repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
uncertainty-project | uncertainty-project-master/Code/core/overlaps.py |
from dataclasses import dataclass, fields
from typing import List
import numpy as np
@dataclass
class Overlaps:
rho : float = float("nan")
# For BO
qbo : float = float("nan")
# should be used when we do experiments (so q is not exactly m)
mbo : float = float("nan")
# for ERM
qerm : float = float("nan")
m : float = float("nan")
V : float = float("nan")
# BO - ERM overlap
Q : float = float("nan")
hatqbo: float = float("nan")
hatqerm: float = float("nan")
hatm : float = float("nan")
hatV : float = float("nan")
hatQ : float = float("nan")
def get_teacher_bo_erm_covariance(self, sigma : float = 0., add_noise : bool = True) -> np.ndarray:
teacher_param = self.rho + sigma**2 if add_noise else self.rho
return np.array([
[teacher_param, self.qbo, self.m],
[self.qbo, self.qbo, self.Q],
[self.m, self.Q, self.qerm]
])
def get_teacher_bo_erm_hat_covariance(self) -> np.ndarray:
return np.array([
[1.0, 0 , 0],
[0 , self.hatqbo , self.hatQ],
[0 , self.hatQ , self.hatqerm],
])
def average_overlaps(overlaps_list : List[Overlaps]) -> Overlaps:
mean_overlaps = Overlaps()
for field in fields(mean_overlaps):
try:
# ok because we only have float in the overlaps
mean_value = np.mean([getattr(o, field.name) for o in overlaps_list])
except:
mean_value = float('nan')
setattr(mean_overlaps, field.name, mean_value)
return mean_overlaps
| 1,616 | 30.096154 | 103 | py |
uncertainty-project | uncertainty-project-master/Code/core/models/amp_erm.py | import sys
from time import time
from abc import ABC
from typing import Tuple
import scipy.optimize as opt
import numpy as np
sys.path.append('..')
sys.path.append('core')
import utility
class ERM:
"""
NOTE : This code should not be changed in the noiseless case as it's not aware of the noise
and explicitely depends on the input data
"""
proximal_by_derivation : bool = False
def __init__(self, lamb : float = 1.) -> None:
self.lamb = lamb
def gout(self, w : float, y : int, V : float, **kwargs) -> float:
logistic = lambda x : np.log(1. + np.exp(-y*x))
logistic_prime = lambda x : - y / (1. + np.exp(y * x))
logistic_second = lambda x : - (y**2) * np.exp(y * x) / (1. + np.exp(y * x))**2
# should be correct
try:
if self.proximal_by_derivation:
prox = utility.proximal_operator_by_derivation(logistic_prime, logistic_second, w, V)
else:
prox = utility.proximal_operator(logistic, w, V)
except RuntimeWarning as e:
print(e)
print('w, y, V = ', w, y, V)
return 0.
return (1. / V) * (prox - w)
def dwgout(self, w : float, y : int, V : float , f : float = None, **kwargs) -> float:
# do not recompute the proximal operator twice, reuse previous computations
f = f or self.gout(w, y, V)
# On peut enlever le y du cosh par symmetrie de la fonction
alpha = (2. * np.cosh(0.5 * y * (w + V*f)))**2
# Sanity check : apparement, pour que le onsager tBaseERM soit globalement positif, il faut que dwgout soit negatif
return - 1. / (alpha + V )
def channel(self, y : int, w : float, v : float, **kwargs) -> Tuple[float, float]:
"""
NOTE : We completely discard the sig. (noise) but keep it so the signature is the same as AMP
and avoid bugs
"""
# Need to do it for each coordinate
n = len(w)
g, dg = np.zeros_like(y), np.zeros_like(y)
for i in range(n):
g[i] = self.gout(w[i], y[i], v[i])
dg[i] = self.dwgout(w[i], y[i], v[i], f = g[i])
return g, dg
# for the L2 regularization, the functions are the same as the gaussian prior
def fa(self, Sigma : float, R : float) -> float:
"""
Input function
"""
return R / (self.lamb * Sigma + 1.)
def fv(self, Sigma : float, R : float) -> float:
"""
Derivative of input function w.r.t. R, multiplied by Sigma
"""
return Sigma / (self.lamb * Sigma + 1.)
def prior(self, b : float, A : float) -> Tuple[float, float]:
"""
Compute f and f' for Bernoulli-Gaussian prior
Sigma = 1 / A
R = b / A
"""
return self.fa(1. / A, b / A), self.fv(1. / A, b / A)
| 2,876 | 33.25 | 123 | py |
uncertainty-project | uncertainty-project-master/Code/core/models/bayes_optimal.py | from typing import Tuple
import warnings
from abc import ABC
import numpy as np
from scipy.special import erfc
H_ = lambda x : 0.5 * erfc(x / np.sqrt(2.))
class BayesOptimal(ABC):
"""
Contains the channel / denoising function used in AMP for the bayes optimal setting
where
y = sgn(w . x) and w has a gaussian prior
"""
def gout(w : float, y : int, V : float, sig : float = 0.) -> float:
"""
Output function (depends on the activation)
arguments :
- sig : STD of noise
"""
delta = 1e-10
# Only change this part to take the noise into account
U = V + sig**2 + delta
try:
deno = np.sqrt(2*np.pi * U) * H_(- y * w / np.sqrt(U)) + delta
x = y * np.exp(-0.5*(w**2. / U)) / deno
except Warning:
print('Error in gout of Bayes')
return 0
return x
def dwgout(w : float, y : int, V : float, sig : float = 0.) -> float:
"""
Derivative of gout with respect to w
"""
U = V + sig**2
delta = 1e-10
g = BayesOptimal.gout(w, y, V, sig)
tmp = np.multiply(g, (np.divide(w, U + delta) + g))
return - np.maximum(tmp, 0.)
# below : alternative way that looks less stable than the other one
# return - (w / V) * g - g**2
def channel(y : int, w : float, v : float, sig : float = 0.) -> float:
return BayesOptimal.gout(w, y, v, sig = sig), BayesOptimal.dwgout(w, y, v, sig = sig)
def fa(Sigma : float, R : float) -> float:
"""
Input function, independent of the variance of gaussian prior
NOTE : Should not depend on the noise in label
"""
return R / (Sigma + 1.)
def fv(Sigma : float, R : float) -> float:
"""
Derivative of input function w.r.t. R, multiplied by Sigma
"""
return Sigma / (Sigma + 1.)
def prior(b : float, A : float) -> Tuple[float, float]:
'''
Compute f and f' for Bernoulli-Gaussian prior
Sigma = 1 / A
R = b / A
'''
return BayesOptimal.fa(1. / A, b / A), BayesOptimal.fv(1. / A, b / A)
| 2,202 | 30.927536 | 93 | py |
uncertainty-project | uncertainty-project-master/Code/experiments/joint_density.py | # joint_density.py
# Used to compute the joint probability distribution between ERM and BO
import itertools
import sys
from typing import Tuple
from tqdm.utils import disp_len
sys.path.append('core')
sys.path.append('experiments')
import matplotlib as mpl
import matplotlib.pyplot as plt
import json
import numpy as np
import pandas as pd
from scipy.integrate import nquad, quad
import scipy.linalg
from scipy.special import erfc, erfcinv
import scipy.stats as stats
from tqdm import tqdm
from core.utility import *
def integrand_p_correct(nu, cov, qbo, a, b, sigma = 0.0):
y = np.sign(nu)
Delta = sigma**2
alpha = - y / np.sqrt(2 * (1. - qbo + Delta))
inv_l = erfcinv(2 * a) / alpha
inv_lerm = y * sigmoid_inv(b)
normalisation_bo = (alpha / 2.) * erfc_prime(alpha * inv_l)
normalisation_erm = b * (1. - b)
return stats.multivariate_normal.pdf([nu, inv_l, inv_lerm], mean=np.zeros(3), cov=cov, allow_singular=False) / np.abs(normalisation_bo * normalisation_erm)
def get_p_correct(a, b, qbo, Sigma, sigma = 0.0):
integral = quad(lambda nu : integrand_p_correct(nu, Sigma, qbo, a, b, sigma), -float('inf'), float('inf'), limit=100)
return integral[0]
def get_p_correct_density(cov, qbo, N, sigma = 0.0, vmin=0., vmax=1.):
dx = 1. / N
subdivisions = np.linspace(vmin + dx / 2., vmax - dx / 2., N)
matrix = np.zeros((N, N))
with tqdm(total=N * N) as pbar:
# i is for B.O, j is for ERM
for i in range(N):
for j in range(N):
# TODO : Implementer la densité pour calculer P = 1 => calibration
a, b = subdivisions[i], subdivisions[j]
# divide by area s.t the integral over all square is 1
matrix[i, j] = get_p_correct(a, b, qbo, cov, sigma)
pbar.update(1)
return matrix
# =========
def get_p_one(a, b, qbo, Sigma_2, sigma : float = 0.):
"""
Between ERM and Bayes
"""
Delta = sigma**2
alpha = - 1 / np.sqrt(2 * (1. - qbo + Delta))
inv_l = erfcinv(2 * a) / alpha
inv_lerm = sigmoid_inv(b)
normalisation_bo = (alpha / 2.) * erfc_prime(alpha * inv_l)
normalisation_erm = b * (1. - b)
return stats.multivariate_normal.pdf([inv_l, inv_lerm], mean=np.zeros(2), cov=Sigma_2, allow_singular=False) / np.abs(normalisation_bo * normalisation_erm)
def get_p_one_density(Sigma_2, qbo, N, sigma = 0.0, vmin=0., vmax=1.):
# row is for bayes, column is for ERM
dx = 1. / N
subdivisions = np.linspace(vmin + dx / 2., vmax - dx / 2., N)
matrix = np.zeros((N, N))
with tqdm(total=N * N) as pbar:
# i is for B.O, j is for ERM
for i in range(N):
for j in range(N):
# TODO : Implementer la densité pour calculer P = 1 => calibration
a, b = subdivisions[i], subdivisions[j]
# divide by area s.t the integral over all square is 1
matrix[i, j] = get_p_one(a, b, qbo, Sigma_2, sigma)
pbar.update(1)
return matrix
# === Between ERM and teacher
def get_p_one_teacher(a, b, rho, Sigma_2, sigma : float = 0.):
"""
Between ERM and teacher
"""
assert sigma > 0
Delta = sigma**2
# If Delta = 0.0, no density is defined for the teacher
alpha = - 1 / np.sqrt(2 * Delta)
inv_l = erfcinv(2 * a) / alpha
inv_lerm = sigmoid_inv(b)
normalisation_teacher = (alpha / 2.) * erfc_prime(alpha * inv_l)
normalisation_erm = b * (1. - b)
return stats.multivariate_normal.pdf([inv_l, inv_lerm], mean=np.zeros(2), cov=Sigma_2, allow_singular=False) / np.abs(normalisation_teacher * normalisation_erm)
def get_p_one_teacher_density(Sigma_2, rho, N, sigma = 0.0, vmin=0., vmax=1.):
# row is for teacher, column is for ERM
dx = 1. / N
subdivisions = np.linspace(vmin + dx / 2., vmax - dx / 2., N)
matrix = np.zeros((N, N))
with tqdm(total=N * N) as pbar:
# i is for B.O, j is for ERM
for i in range(N):
for j in range(N):
# TODO : Implementer la densité pour calculer P = 1 => calibration
a, b = subdivisions[i], subdivisions[j]
# divide by area s.t the integral over all square is 1
matrix[i, j] = get_p_one_teacher(a, b, rho, Sigma_2, sigma)
pbar.update(1)
return matrix
# ==== Between Bayes and the teacher
def get_p_one_bo_teacher(a, b, sigma, Sigma_bo_teacher):
"""
NOTE : Here, we need to add the noise to bayes otherwise it makes no sense
NOTE 2 : Use teacher WITHOUT noise in the covariance matrix (it 's surely
equivalent the computations are probably more complicate in that case)
First arg is for teacher, second is for bayes
"""
if Sigma_bo_teacher.shape != (2, 2):
raise Exception()
qbo = Sigma_bo_teacher[1, 1]
Delta = sigma**2
alpha_bo = - 1 / np.sqrt(2 * (1. - qbo + Delta))
alpha_teacher = - 1 / np.sqrt(2 * Delta)
inv_teacher = erfcinv(2 * b) / alpha_teacher
inv_bo = erfcinv(2 * a) / alpha_bo
normalisation_bo = (alpha_bo / 2.) * erfc_prime(alpha_bo * inv_bo)
normalisation_teacher = (alpha_teacher / 2.) * erfc_prime(alpha_teacher * inv_teacher)
return stats.multivariate_normal.pdf([inv_teacher, inv_bo], mean=np.zeros(2), cov=Sigma_bo_teacher, allow_singular=False) / np.abs(normalisation_bo * normalisation_teacher)
def get_p_one_bo_teacher_density(sigma, Sigma_bo_teacher, N, vmin=0., vmax=1., bo_on_rows = True):
"""
NOTE : rows will be for bayes, columns for teacher
"""
dx = 1. / N
subdivisions = np.linspace(vmin + dx / 2., vmax - dx / 2., N)
matrix = np.zeros((N, N))
with tqdm(total=N * N) as pbar:
# i is for B.O, j is for ERM
for i in range(N):
for j in range(N):
# TODO : Implementer la densité pour calculer P = 1 => calibration
a, b = subdivisions[i], subdivisions[j]
# divide by area s.t the integral over all square is 1
matrix[i, j] = get_p_one_bo_teacher(a, b, sigma, Sigma_bo_teacher)
pbar.update(1)
if bo_on_rows == False:
return matrix.T
return matrix | 6,288 | 34.937143 | 176 | py |
uncertainty-project | uncertainty-project-master/Code/experiments/display.py | import matplotlib.pyplot as plt
import json
import scipy.ndimage as ndimage
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.colors import LogNorm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import matplotlib as mpl
import numpy as np
def plot_from_density(density : np.ndarray, vmin : float =0., vmax : float =1., fig = None, ax = None, colorbar=True, mask_vmin=-float('inf'), mask_vmax=float('inf'), sigma=0.0) -> None:
"""
Usual values of mask vmax and vmin : 1e-3, 1e3
NOTE : We transpose the matrix for whatever reason, don't change this as it will change some experiments
Consequence : row => columns => x-axis
columns => rows => y-axis
"""
N = len(density)
density = density.T
masked = ndimage.gaussian_filter(density, sigma=sigma)
masked = np.ma.masked_outside(masked, mask_vmin, mask_vmax)
if ax is None:
plt.clf()
plt.imshow(masked, extent=(vmin, vmax, vmin, vmax), origin='lower', norm=LogNorm())
ax = plt.gca()
ax.set_facecolor((0., 0., 0.))
norm = mpl.colors.LogNorm(vmin=mask_vmin, vmax=mask_vmax)
if colorbar:
cbar = plt.colorbar(mpl.cm.ScalarMappable(norm=norm))
# cbar.set_ticklabels(["1e-7", "1e-5", "1e-3", "1e-1", "1e1"])
else:
im = ax.imshow(masked, extent=(vmin, vmax, vmin, vmax), origin='lower', norm=LogNorm())
ax.set_facecolor((0., 0., 0.))
return im
| 1,497 | 37.410256 | 186 | py |
featuretools | featuretools-main/featuretools/__main__.py | 0 | 0 | 0 | py | |
featuretools | featuretools-main/featuretools/config_init.py | import copy
import logging
import os
import sys
def initialize_logging():
loggers = {}
# Check for environmental variables
logger_env_vars = {
"FEATURETOOLS_LOG_LEVEL": "featuretools",
"FEATURETOOLS_ES_LOG_LEVEL": "featuretools.entityset",
"FEATURETOOLS_BACKEND_LOG_LEVEL": "featuretools.computation_backend",
}
for logger_env, logger in logger_env_vars.items():
log_level = os.environ.get(logger_env, None)
if log_level is not None:
loggers[logger] = log_level
# Set log level to info if not otherwise specified.
loggers.setdefault("featuretools", "info")
loggers.setdefault("featuretools.computation_backend", "info")
loggers.setdefault("featuretools.entityset", "info")
fmt = "%(asctime)-15s %(name)s - %(levelname)s %(message)s"
out_handler = logging.StreamHandler(sys.stdout)
err_handler = logging.StreamHandler(sys.stdout)
out_handler.setFormatter(logging.Formatter(fmt))
err_handler.setFormatter(logging.Formatter(fmt))
err_levels = ["WARNING", "ERROR", "CRITICAL"]
for name, level in list(loggers.items()):
LEVEL = getattr(logging, level.upper())
logger = logging.getLogger(name)
logger.setLevel(LEVEL)
for _handler in logger.handlers:
logger.removeHandler(_handler)
if level in err_levels:
logger.addHandler(err_handler)
else:
logger.addHandler(out_handler)
logger.propagate = False
initialize_logging()
class Config:
def __init__(self):
self._data = {}
self.set_to_default()
def set_to_default(self):
PWD = os.path.dirname(__file__)
primitive_data_folder = os.path.join(PWD, "primitives/data")
self._data = {
"primitive_data_folder": primitive_data_folder,
}
def get(self, key):
return copy.deepcopy(self._data[key])
def get_all(self):
return copy.deepcopy(self._data)
def set(self, values):
self._data.update(values)
config = Config()
| 2,078 | 27.479452 | 77 | py |
featuretools | featuretools-main/featuretools/exceptions.py | class UnknownFeature(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class UnusedPrimitiveWarning(UserWarning):
pass
| 178 | 21.375 | 49 | py |
featuretools | featuretools-main/featuretools/version.py | __version__ = "1.26.0"
ENTITYSET_SCHEMA_VERSION = "9.0.0"
FEATURES_SCHEMA_VERSION = "10.0.0"
| 93 | 22.5 | 34 | py |
featuretools | featuretools-main/featuretools/__init__.py | # flake8: noqa
from featuretools.version import __version__
from featuretools.config_init import config
from featuretools.entityset.api import *
from featuretools import primitives
from featuretools.synthesis.api import *
from featuretools.primitives import list_primitives, summarize_primitives
from featuretools.computational_backends.api import *
from featuretools import tests
from featuretools.utils.recommend_primitives import get_recommended_primitives
from featuretools.utils.time_utils import *
from featuretools.utils.utils_info import show_info
import featuretools.demo
from featuretools import feature_base
from featuretools import selection
from featuretools.feature_base import (
AggregationFeature,
DirectFeature,
Feature,
FeatureBase,
GroupByTransformFeature,
IdentityFeature,
TransformFeature,
graph_feature,
describe_feature,
save_features,
load_features,
)
import logging
import pkg_resources
import sys
import traceback
import warnings
from woodwork import list_logical_types, list_semantic_tags
logger = logging.getLogger("featuretools")
# Call functions registered by other libraries when featuretools is imported
for entry_point in pkg_resources.iter_entry_points("featuretools_initialize"):
try:
method = entry_point.load()
if callable(method):
method()
except Exception:
pass
for entry_point in pkg_resources.iter_entry_points("alteryx_open_src_initialize"):
try:
method = entry_point.load()
if callable(method):
method("featuretools")
except Exception:
pass
# Load in submodules registered by other libraries into Featuretools namespace
for entry_point in pkg_resources.iter_entry_points("featuretools_plugin"):
try:
sys.modules["featuretools." + entry_point.name] = entry_point.load()
except Exception:
message = "Featuretools failed to load plugin {} from library {}. "
message += "For a full stack trace, set logging to debug."
logger.warning(message.format(entry_point.name, entry_point.module_name))
logger.debug(traceback.format_exc())
| 2,152 | 32.640625 | 82 | py |
featuretools | featuretools-main/featuretools/synthesis/dfs.py | import warnings
from featuretools.computational_backends import calculate_feature_matrix
from featuretools.entityset import EntitySet
from featuretools.exceptions import UnusedPrimitiveWarning
from featuretools.synthesis.deep_feature_synthesis import DeepFeatureSynthesis
from featuretools.synthesis.utils import _categorize_features, get_unused_primitives
from featuretools.utils import entry_point
@entry_point("featuretools_dfs")
def dfs(
dataframes=None,
relationships=None,
entityset=None,
target_dataframe_name=None,
cutoff_time=None,
instance_ids=None,
agg_primitives=None,
trans_primitives=None,
groupby_trans_primitives=None,
allowed_paths=None,
max_depth=2,
ignore_dataframes=None,
ignore_columns=None,
primitive_options=None,
seed_features=None,
drop_contains=None,
drop_exact=None,
where_primitives=None,
max_features=-1,
cutoff_time_in_index=False,
save_progress=None,
features_only=False,
training_window=None,
approximate=None,
chunk_size=None,
n_jobs=1,
dask_kwargs=None,
verbose=False,
return_types=None,
progress_callback=None,
include_cutoff_time=True,
):
"""Calculates a feature matrix and features given a dictionary of dataframes
and a list of relationships.
Args:
dataframes (dict[str -> tuple(DataFrame, str, str, dict[str -> str/Woodwork.LogicalType], dict[str->str/set], boolean)]):
Dictionary of DataFrames. Entries take the format
{dataframe name -> (dataframe, index column, time_index, logical_types, semantic_tags, make_index)}.
Note that only the dataframe is required. If a Woodwork DataFrame is supplied, any other parameters
will be ignored.
relationships (list[(str, str, str, str)]): List of relationships
between dataframes. List items are a tuple with the format
(parent dataframe name, parent column, child dataframe name, child column).
entityset (EntitySet): An already initialized entityset. Required if
dataframes and relationships are not defined.
target_dataframe_name (str): Name of dataframe on which to make predictions.
cutoff_time (pd.DataFrame or Datetime or str): Specifies times at which to calculate
the features for each instance. The resulting feature matrix will use data
up to and including the cutoff_time. Can either be a DataFrame, a single
value, or a string that can be parsed into a datetime. If a DataFrame is passed
the instance ids for which to calculate features must be in a column with the
same name as the target dataframe index or a column named `instance_id`.
The cutoff time values in the DataFrame must be in a column with the same name as
the target dataframe time index or a column named `time`. If the DataFrame has more
than two columns, any additional columns will be added to the resulting feature
matrix. If a single value is passed, this value will be used for all instances.
instance_ids (list): List of instances on which to calculate features. Only
used if cutoff_time is a single datetime.
agg_primitives (list[str or AggregationPrimitive], optional): List of Aggregation
Feature types to apply.
Default: ["sum", "std", "max", "skew", "min", "mean", "count", "percent_true", "num_unique", "mode"]
trans_primitives (list[str or TransformPrimitive], optional):
List of Transform Feature functions to apply.
Default: ["day", "year", "month", "weekday", "haversine", "num_words", "num_characters"]
groupby_trans_primitives (list[str or TransformPrimitive], optional):
list of Transform primitives to make GroupByTransformFeatures with
allowed_paths (list[list[str]]): Allowed dataframe paths on which to make
features.
max_depth (int) : Maximum allowed depth of features.
ignore_dataframes (list[str], optional): List of dataframes to
blacklist when creating features.
ignore_columns (dict[str -> list[str]], optional): List of specific
columns within each dataframe to blacklist when creating features.
primitive_options (list[dict[str or tuple[str] -> dict] or dict[str or tuple[str] -> dict, optional]):
Specify options for a single primitive or a group of primitives.
Lists of option dicts are used to specify options per input for primitives
with multiple inputs. Each option ``dict`` can have the following keys:
``"include_dataframes"``
List of dataframes to be included when creating features for
the primitive(s). All other dataframes will be ignored
(list[str]).
``"ignore_dataframes"``
List of dataframes to be blacklisted when creating features
for the primitive(s) (list[str]).
``"include_columns"``
List of specific columns within each dataframe to include when
creating features for the primitive(s). All other columns
in a given dataframe will be ignored (dict[str -> list[str]]).
``"ignore_columns"``
List of specific columns within each dataframe to blacklist
when creating features for the primitive(s) (dict[str ->
list[str]]).
``"include_groupby_dataframes"``
List of dataframes to be included when finding groupbys. All
other dataframes will be ignored (list[str]).
``"ignore_groupby_dataframes"``
List of dataframes to blacklist when finding groupbys
(list[str]).
``"include_groupby_columns"``
List of specific columns within each dataframe to include as
groupbys, if applicable. All other columns in each
dataframe will be ignored (dict[str -> list[str]]).
``"ignore_groupby_columns"``
List of specific columns within each dataframe to blacklist
as groupbys (dict[str -> list[str]]).
seed_features (list[:class:`.FeatureBase`]): List of manually defined
features to use.
drop_contains (list[str], optional): Drop features
that contains these strings in name.
drop_exact (list[str], optional): Drop features that
exactly match these strings in name.
where_primitives (list[str or PrimitiveBase], optional):
List of Primitives names (or types) to apply with where clauses.
Default:
["count"]
max_features (int, optional) : Cap the number of generated features to
this number. If -1, no limit.
features_only (bool, optional): If True, returns the list of
features without calculating the feature matrix.
cutoff_time_in_index (bool): If True, return a DataFrame with a MultiIndex
where the second index is the cutoff time (first is instance id).
DataFrame will be sorted by (time, instance_id).
training_window (Timedelta or str, optional):
Window defining how much time before the cutoff time data
can be used when calculating features. If ``None`` , all data
before cutoff time is used. Defaults to ``None``. Month and year
units are not relative when Pandas Timedeltas are used. Relative
units should be passed as a Featuretools Timedelta or a string.
approximate (Timedelta): Bucket size to group instances with similar
cutoff times by for features with costly calculations. For example,
if bucket is 24 hours, all instances with cutoff times on the same
day will use the same calculation for expensive features.
save_progress (str, optional): Path to save intermediate computational results.
n_jobs (int, optional): number of parallel processes to use when
calculating feature matrix
chunk_size (int or float or None or "cutoff time", optional): Number
of rows of output feature matrix to calculate at time. If passed an
integer greater than 0, will try to use that many rows per chunk.
If passed a float value between 0 and 1 sets the chunk size to that
percentage of all instances. If passed the string "cutoff time",
rows are split per cutoff time.
dask_kwargs (dict, optional): Dictionary of keyword arguments to be
passed when creating the dask client and scheduler. Even if n_jobs
is not set, using `dask_kwargs` will enable multiprocessing.
Main parameters:
cluster (str or dask.distributed.LocalCluster):
cluster or address of cluster to send tasks to. If unspecified,
a cluster will be created.
diagnostics port (int):
port number to use for web dashboard. If left unspecified, web
interface will not be enabled.
Valid keyword arguments for LocalCluster will also be accepted.
return_types (list[woodwork.ColumnSchema] or str, optional):
List of ColumnSchemas defining the types of
columns to return. If None, defaults to returning all
numeric, categorical and boolean types. If given as
the string 'all', returns all available types.
progress_callback (callable): function to be called with incremental progress updates.
Has the following parameters:
update: percentage change (float between 0 and 100) in progress since last call
progress_percent: percentage (float between 0 and 100) of total computation completed
time_elapsed: total time in seconds that has elapsed since start of call
include_cutoff_time (bool): Include data at cutoff times in feature calculations. Defaults to ``True``.
Returns:
list[:class:`.FeatureBase`], pd.DataFrame:
The list of generated feature defintions, and the feature matrix.
If ``features_only`` is ``True``, the feature matrix will not be generated.
Examples:
.. code-block:: python
from featuretools.primitives import Mean
# cutoff times per instance
dataframes = {
"sessions" : (session_df, "id"),
"transactions" : (transactions_df, "id", "transaction_time")
}
relationships = [("sessions", "id", "transactions", "session_id")]
feature_matrix, features = dfs(dataframes=dataframes,
relationships=relationships,
target_dataframe_name="transactions",
cutoff_time=cutoff_times)
feature_matrix
features = dfs(dataframes=dataframes,
relationships=relationships,
target_dataframe_name="transactions",
features_only=True)
"""
if not isinstance(entityset, EntitySet):
entityset = EntitySet("dfs", dataframes, relationships)
dfs_object = DeepFeatureSynthesis(
target_dataframe_name,
entityset,
agg_primitives=agg_primitives,
trans_primitives=trans_primitives,
groupby_trans_primitives=groupby_trans_primitives,
max_depth=max_depth,
where_primitives=where_primitives,
allowed_paths=allowed_paths,
drop_exact=drop_exact,
drop_contains=drop_contains,
ignore_dataframes=ignore_dataframes,
ignore_columns=ignore_columns,
primitive_options=primitive_options,
max_features=max_features,
seed_features=seed_features,
)
features = dfs_object.build_features(verbose=verbose, return_types=return_types)
trans, agg, groupby, where = _categorize_features(features)
trans_unused = get_unused_primitives(trans_primitives, trans)
agg_unused = get_unused_primitives(agg_primitives, agg)
groupby_unused = get_unused_primitives(groupby_trans_primitives, groupby)
where_unused = get_unused_primitives(where_primitives, where)
unused_primitives = [trans_unused, agg_unused, groupby_unused, where_unused]
if any(unused_primitives):
warn_unused_primitives(unused_primitives)
if features_only:
return features
assert (
features != []
), "No features can be generated from the specified primitives. Please make sure the primitives you are using are compatible with the variable types in your data."
feature_matrix = calculate_feature_matrix(
features,
entityset=entityset,
cutoff_time=cutoff_time,
instance_ids=instance_ids,
training_window=training_window,
approximate=approximate,
cutoff_time_in_index=cutoff_time_in_index,
save_progress=save_progress,
chunk_size=chunk_size,
n_jobs=n_jobs,
dask_kwargs=dask_kwargs,
verbose=verbose,
progress_callback=progress_callback,
include_cutoff_time=include_cutoff_time,
)
return feature_matrix, features
def warn_unused_primitives(unused_primitives):
messages = [
" trans_primitives: {}\n",
" agg_primitives: {}\n",
" groupby_trans_primitives: {}\n",
" where_primitives: {}\n",
]
unused_string = ""
for primitives, message in zip(unused_primitives, messages):
if primitives:
unused_string += message.format(primitives)
warning_msg = (
"Some specified primitives were not used during DFS:\n{}".format(unused_string)
+ "This may be caused by a using a value of max_depth that is too small, not setting interesting values, "
+ "or it may indicate no compatible columns for the primitive were found in the data. If the DFS call "
+ "contained multiple instances of a primitive in the list above, none of them were used."
)
warnings.warn(warning_msg, UnusedPrimitiveWarning)
| 14,419 | 43.782609 | 167 | py |
featuretools | featuretools-main/featuretools/synthesis/deep_feature_synthesis.py | import functools
import logging
import operator
import warnings
from collections import defaultdict
from typing import Any, DefaultDict, Dict, List
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Boolean, BooleanNullable
from featuretools import primitives
from featuretools.entityset.entityset import LTI_COLUMN_NAME
from featuretools.entityset.relationship import RelationshipPath
from featuretools.feature_base import (
AggregationFeature,
DirectFeature,
FeatureBase,
GroupByTransformFeature,
IdentityFeature,
TransformFeature,
)
from featuretools.feature_base.cache import CacheType, feature_cache
from featuretools.feature_base.utils import is_valid_input
from featuretools.primitives.base import (
AggregationPrimitive,
PrimitiveBase,
TransformPrimitive,
)
from featuretools.primitives.options_utils import (
filter_groupby_matches_by_options,
filter_matches_by_options,
generate_all_primitive_options,
ignore_dataframe_for_primitive,
)
from featuretools.utils.gen_utils import Library, camel_and_title_to_snake
logger = logging.getLogger("featuretools")
class DeepFeatureSynthesis(object):
"""Automatically produce features for a target dataframe in an Entityset.
Args:
target_dataframe_name (str): Name of dataframe for which to build features.
entityset (EntitySet): Entityset for which to build features.
agg_primitives (list[str or :class:`.primitives.`], optional):
list of Aggregation Feature types to apply.
Default: ["sum", "std", "max", "skew", "min", "mean", "count", "percent_true", "num_unique", "mode"]
trans_primitives (list[str or :class:`.primitives.TransformPrimitive`], optional):
list of Transform primitives to use.
Default: ["day", "year", "month", "weekday", "haversine", "num_words", "num_characters"]
where_primitives (list[str or :class:`.primitives.PrimitiveBase`], optional):
only add where clauses to these types of Primitives
Default:
["count"]
groupby_trans_primitives (list[str or :class:`.primitives.TransformPrimitive`], optional):
list of Transform primitives to make GroupByTransformFeatures with
max_depth (int, optional) : maximum allowed depth of features.
Default: 2. If -1, no limit.
max_features (int, optional) : Cap the number of generated features to
this number. If -1, no limit.
allowed_paths (list[list[str]], optional): Allowed dataframe paths to make
features for. If None, use all paths.
ignore_dataframes (list[str], optional): List of dataframes to
blacklist when creating features. If None, use all dataframes.
ignore_columns (dict[str -> list[str]], optional): List of specific
columns within each dataframe to blacklist when creating features.
If None, use all columns.
seed_features (list[:class:`.FeatureBase`], optional): List of manually
defined features to use.
drop_contains (list[str], optional): Drop features
that contains these strings in name.
drop_exact (list[str], optional): Drop features that
exactly match these strings in name.
where_stacking_limit (int, optional): Cap the depth of the where features.
Default: 1
primitive_options (dict[str or tuple[str] or PrimitiveBase -> dict or list[dict]], optional):
Specify options for a single primitive or a group of primitives.
Lists of option dicts are used to specify options per input for primitives
with multiple inputs. Each option ``dict`` can have the following keys:
``"include_dataframes"``
List of dataframes to be included when creating features for
the primitive(s). All other dataframes will be ignored
(list[str]).
``"ignore_dataframes"``
List of dataframes to be blacklisted when creating features
for the primitive(s) (list[str]).
``"include_columns"``
List of specific columns within each dataframe to include when
creating features for the primitive(s). All other columns
in a given dataframe will be ignored (dict[str -> list[str]]).
``"ignore_columns"``
List of specific columns within each dataframe to blacklist
when creating features for the primitive(s) (dict[str ->
list[str]]).
``"include_groupby_dataframes"``
List of dataframes to be included when finding groupbys. All
other dataframes will be ignored (list[str]).
``"ignore_groupby_dataframes"``
List of dataframes to blacklist when finding groupbys
(list[str]).
``"include_groupby_columns"``
List of specific columns within each dataframe to include as
groupbys, if applicable. All other columns in each
dataframe will be ignored (dict[str -> list[str]]).
``"ignore_groupby_columns"``
List of specific columns within each dataframe to blacklist
as groupbys (dict[str -> list[str]]).
"""
def __init__(
self,
target_dataframe_name,
entityset,
agg_primitives=None,
trans_primitives=None,
where_primitives=None,
groupby_trans_primitives=None,
max_depth=2,
max_features=-1,
allowed_paths=None,
ignore_dataframes=None,
ignore_columns=None,
primitive_options=None,
seed_features=None,
drop_contains=None,
drop_exact=None,
where_stacking_limit=1,
):
if target_dataframe_name not in entityset.dataframe_dict:
es_name = entityset.id or "entity set"
msg = "Provided target dataframe %s does not exist in %s" % (
target_dataframe_name,
es_name,
)
raise KeyError(msg)
# Multiple calls to dfs() should start with a fresh cache
feature_cache.clear_all()
feature_cache.enabled = True
# need to change max_depth to None because DFs terminates when <0
if max_depth == -1:
max_depth = None
# if just one dataframe, set max depth to 1 (transform stacking rule)
if len(entityset.dataframe_dict) == 1 and (max_depth is None or max_depth > 1):
warnings.warn(
"Only one dataframe in entityset, changing max_depth to "
"1 since deeper features cannot be created",
)
max_depth = 1
self.max_depth = max_depth
self.max_features = max_features
self.allowed_paths = allowed_paths
if self.allowed_paths:
self.allowed_paths = set()
for path in allowed_paths:
self.allowed_paths.add(tuple(path))
if ignore_dataframes is None:
self.ignore_dataframes = set()
else:
if not isinstance(ignore_dataframes, list):
raise TypeError("ignore_dataframes must be a list")
assert (
target_dataframe_name not in ignore_dataframes
), "Can't ignore target_dataframe!"
self.ignore_dataframes = set(ignore_dataframes)
self.ignore_columns = _build_ignore_columns(ignore_columns)
self.target_dataframe_name = target_dataframe_name
self.es = entityset
for library in Library:
if library.value == self.es.dataframe_type:
df_library = library
break
aggregation_primitive_dict = primitives.get_aggregation_primitives()
transform_primitive_dict = primitives.get_transform_primitives()
if agg_primitives is None:
agg_primitives = [
p
for p in primitives.get_default_aggregation_primitives()
if df_library in p.compatibility
]
self.agg_primitives = sorted(
[
check_primitive(
p,
"aggregation",
aggregation_primitive_dict,
transform_primitive_dict,
)
for p in agg_primitives
],
)
if trans_primitives is None:
trans_primitives = [
p
for p in primitives.get_default_transform_primitives()
if df_library in p.compatibility
]
self.trans_primitives = sorted(
[
check_primitive(
p,
"transform",
aggregation_primitive_dict,
transform_primitive_dict,
)
for p in trans_primitives
],
)
if where_primitives is None:
where_primitives = [primitives.Count]
self.where_primitives = sorted(
[
check_primitive(
p,
"where",
aggregation_primitive_dict,
transform_primitive_dict,
)
for p in where_primitives
],
)
if groupby_trans_primitives is None:
groupby_trans_primitives = []
self.groupby_trans_primitives = sorted(
[
check_primitive(
p,
"groupby transform",
aggregation_primitive_dict,
transform_primitive_dict,
)
for p in groupby_trans_primitives
],
)
if primitive_options is None:
primitive_options = {}
all_primitives = (
self.trans_primitives
+ self.agg_primitives
+ self.where_primitives
+ self.groupby_trans_primitives
)
bad_primitives = [
prim.name for prim in all_primitives if df_library not in prim.compatibility
]
if bad_primitives:
msg = "Selected primitives are incompatible with {} EntitySets: {}"
raise ValueError(msg.format(df_library.value, ", ".join(bad_primitives)))
(
self.primitive_options,
self.ignore_dataframes,
self.ignore_columns,
) = generate_all_primitive_options(
all_primitives,
primitive_options,
self.ignore_dataframes,
self.ignore_columns,
self.es,
)
self.seed_features = sorted(seed_features or [], key=lambda f: f.unique_name())
self.drop_exact = drop_exact or []
self.drop_contains = drop_contains or []
self.where_stacking_limit = where_stacking_limit
def build_features(self, return_types=None, verbose=False):
"""Automatically builds feature definitions for target
dataframe using Deep Feature Synthesis algorithm
Args:
return_types (list[woodwork.ColumnSchema] or str, optional):
List of ColumnSchemas defining the types of
columns to return. If None, defaults to returning all
numeric, categorical and boolean types. If given as
the string 'all', use all available return types.
verbose (bool, optional): If True, print progress.
Returns:
list[BaseFeature]: Returns a list of
features for target dataframe, sorted by feature depth
(shallow first).
"""
all_features = {}
self.where_clauses = defaultdict(set)
if return_types is None:
return_types = [
ColumnSchema(semantic_tags=["numeric"]),
ColumnSchema(semantic_tags=["category"]),
ColumnSchema(logical_type=Boolean),
ColumnSchema(logical_type=BooleanNullable),
]
elif return_types == "all":
pass
else:
msg = "return_types must be a list, or 'all'"
assert isinstance(return_types, list), msg
self._run_dfs(
self.es[self.target_dataframe_name],
RelationshipPath([]),
all_features,
max_depth=self.max_depth,
)
new_features = list(all_features[self.target_dataframe_name].values())
def filt(f):
# remove identity features of the ID field of the target dataframe
if (
isinstance(f, IdentityFeature)
and f.dataframe_name == self.target_dataframe_name
and f.column_name == self.es[self.target_dataframe_name].ww.index
):
return False
return True
# filter out features with undesired return types
if return_types != "all":
new_features = [
f
for f in new_features
if any(
True
for schema in return_types
if is_valid_input(f.column_schema, schema)
)
]
new_features = list(filter(filt, new_features))
new_features.sort(key=lambda f: f.get_depth())
new_features = self._filter_features(new_features)
if self.max_features > 0:
new_features = new_features[: self.max_features]
if verbose:
print("Built {} features".format(len(new_features)))
verbose = None
return new_features
def _filter_features(self, features):
assert isinstance(self.drop_exact, list), "drop_exact must be a list"
assert isinstance(self.drop_contains, list), "drop_contains must be a list"
f_keep = []
for f in features:
keep = True
for contains in self.drop_contains:
if contains in f.get_name():
keep = False
break
if f.get_name() in self.drop_exact:
keep = False
if keep:
f_keep.append(f)
return f_keep
def _run_dfs(self, dataframe, relationship_path, all_features, max_depth):
"""
Create features for the provided dataframe
Args:
dataframe (DataFrame): Dataframe for which to create features.
relationship_path (RelationshipPath): The path to this dataframe.
all_features (dict[dataframe name -> dict[str -> BaseFeature]]):
Dict containing a dict for each dataframe. Each nested dict
has features as values with their ids as keys.
max_depth (int) : Maximum allowed depth of features.
"""
if max_depth is not None and max_depth < 0:
return
all_features[dataframe.ww.name] = {}
"""
Step 1 - Create identity features
"""
self._add_identity_features(all_features, dataframe)
"""
Step 2 - Recursively build features for each dataframe in a backward relationship
"""
backward_dataframes = self.es.get_backward_dataframes(dataframe.ww.name)
for b_dataframe_id, sub_relationship_path in backward_dataframes:
# Skip if we've already created features for this dataframe.
if b_dataframe_id in all_features:
continue
if b_dataframe_id in self.ignore_dataframes:
continue
new_path = relationship_path + sub_relationship_path
if (
self.allowed_paths
and tuple(new_path.dataframes()) not in self.allowed_paths
):
continue
new_max_depth = None
if max_depth is not None:
new_max_depth = max_depth - 1
self._run_dfs(
dataframe=self.es[b_dataframe_id],
relationship_path=new_path,
all_features=all_features,
max_depth=new_max_depth,
)
"""
Step 3 - Create aggregation features for all deep backward relationships
"""
backward_dataframes = self.es.get_backward_dataframes(
dataframe.ww.name,
deep=True,
)
for b_dataframe_id, sub_relationship_path in backward_dataframes:
if b_dataframe_id in self.ignore_dataframes:
continue
new_path = relationship_path + sub_relationship_path
if (
self.allowed_paths
and tuple(new_path.dataframes()) not in self.allowed_paths
):
continue
self._build_agg_features(
parent_dataframe=self.es[dataframe.ww.name],
child_dataframe=self.es[b_dataframe_id],
all_features=all_features,
max_depth=max_depth,
relationship_path=sub_relationship_path,
)
"""
Step 4 - Create transform features of identity and aggregation features
"""
self._build_transform_features(all_features, dataframe, max_depth=max_depth)
"""
Step 5 - Recursively build features for each dataframe in a forward relationship
"""
forward_dataframes = self.es.get_forward_dataframes(dataframe.ww.name)
for f_dataframe_id, sub_relationship_path in forward_dataframes:
# Skip if we've already created features for this dataframe.
if f_dataframe_id in all_features:
continue
if f_dataframe_id in self.ignore_dataframes:
continue
new_path = relationship_path + sub_relationship_path
if (
self.allowed_paths
and tuple(new_path.dataframes()) not in self.allowed_paths
):
continue
new_max_depth = None
if max_depth is not None:
new_max_depth = max_depth - 1
self._run_dfs(
dataframe=self.es[f_dataframe_id],
relationship_path=new_path,
all_features=all_features,
max_depth=new_max_depth,
)
"""
Step 6 - Create direct features for forward relationships
"""
forward_dataframes = self.es.get_forward_dataframes(dataframe.ww.name)
for f_dataframe_id, sub_relationship_path in forward_dataframes:
if f_dataframe_id in self.ignore_dataframes:
continue
new_path = relationship_path + sub_relationship_path
if (
self.allowed_paths
and tuple(new_path.dataframes()) not in self.allowed_paths
):
continue
self._build_forward_features(
all_features=all_features,
relationship_path=sub_relationship_path,
max_depth=max_depth,
)
"""
Step 7 - Create transform features of direct features
"""
self._build_transform_features(
all_features,
dataframe,
max_depth=max_depth,
require_direct_input=True,
)
# now that all features are added, build where clauses
self._build_where_clauses(all_features, dataframe)
def _handle_new_feature(self, new_feature, all_features):
"""Adds new feature to the dict
Args:
new_feature (:class:`.FeatureBase`): New feature being
checked.
all_features (dict[dataframe name -> dict[str -> BaseFeature]]):
Dict containing a dict for each dataframe. Each nested dict
has features as values with their ids as keys.
Returns:
dict[PrimitiveBase -> dict[feature id -> feature]]: Dict of
features with any new features.
Raises:
Exception: Attempted to add a single feature multiple times
"""
dataframe_name = new_feature.dataframe_name
name = new_feature.unique_name()
# Warn if this feature is already present, and it is not a seed feature.
# It is expected that a seed feature could also be generated by dfs.
if name in all_features[dataframe_name] and name not in (
f.unique_name() for f in self.seed_features
):
logger.warning(
"Attempting to add feature %s which is already "
"present. This is likely a bug." % new_feature,
)
return
all_features[dataframe_name][name] = new_feature
def _add_identity_features(self, all_features, dataframe):
"""converts all columns from the given dataframe into features
Args:
all_features (dict[dataframe name -> dict[str -> BaseFeature]]):
Dict containing a dict for each dataframe. Each nested dict
has features as values with their ids as keys.
dataframe (DataFrame): DataFrame to calculate features for.
"""
for col in dataframe.columns:
if col in self.ignore_columns[dataframe.ww.name] or col == LTI_COLUMN_NAME:
continue
new_f = IdentityFeature(self.es[dataframe.ww.name].ww[col])
self._handle_new_feature(all_features=all_features, new_feature=new_f)
# add seed features, if any, for dfs to build on top of
# if there are any multi output features, this will build on
# top of each output of the feature.
for f in self.seed_features:
if f.dataframe_name == dataframe.ww.name:
self._handle_new_feature(all_features=all_features, new_feature=f)
def _build_where_clauses(self, all_features, dataframe):
"""Traverses all identity features and creates a Compare for
each one, based on some heuristics
Args:
all_features (dict[dataframe name -> dict[str -> BaseFeature]]):
Dict containing a dict for each dataframe. Each nested dict
has features as values with their ids as keys.
dataframe (DataFrame): DataFrame to calculate features for.
"""
def is_valid_feature(f):
if isinstance(f, IdentityFeature):
return True
if isinstance(f, DirectFeature) and getattr(
f.base_features[0],
"column_name",
None,
):
return True
return False
for feat in [
f for f in all_features[dataframe.ww.name].values() if is_valid_feature(f)
]:
# Get interesting_values from the EntitySet that was passed, which
# is assumed to be the most recent version of the EntitySet.
# Features can contain a stale EntitySet reference without
# interesting_values
if isinstance(feat, DirectFeature):
df = feat.base_features[0].dataframe_name
col = feat.base_features[0].column_name
else:
df = feat.dataframe_name
col = feat.column_name
metadata = self.es[df].ww.columns[col].metadata
interesting_values = metadata.get("interesting_values")
if interesting_values:
for val in interesting_values:
self.where_clauses[dataframe.ww.name].add(feat == val)
def _build_transform_features(
self,
all_features,
dataframe,
max_depth=0,
require_direct_input=False,
):
"""Creates trans_features for all the columns in a dataframe
Args:
all_features (dict[dataframe name: dict->[str->:class:`BaseFeature`]]):
Dict containing a dict for each dataframe. Each nested dict
has features as values with their ids as keys
dataframe (DataFrame): DataFrame to calculate features for.
"""
new_max_depth = None
if max_depth is not None:
new_max_depth = max_depth - 1
# Keep track of features to add until the end to avoid applying
# transform primitives to features that were also built by transform primitives
features_to_add = []
for trans_prim in self.trans_primitives:
current_options = self.primitive_options.get(
trans_prim,
self.primitive_options.get(trans_prim.name),
)
if ignore_dataframe_for_primitive(current_options, dataframe):
continue
input_types = trans_prim.input_types
matching_inputs = self._get_matching_inputs(
all_features,
dataframe,
new_max_depth,
input_types,
trans_prim,
current_options,
require_direct_input=require_direct_input,
feature_filter=not_a_transform_input,
)
for matching_input in matching_inputs:
if not can_stack_primitive_on_inputs(trans_prim, matching_input):
continue
if not any(
True for bf in matching_input if bf.number_output_features != 1
):
new_f = TransformFeature(matching_input, primitive=trans_prim)
features_to_add.append(new_f)
for groupby_prim in self.groupby_trans_primitives:
current_options = self.primitive_options.get(
groupby_prim,
self.primitive_options.get(groupby_prim.name),
)
if ignore_dataframe_for_primitive(current_options, dataframe, groupby=True):
continue
input_types = groupby_prim.input_types[:]
matching_inputs = self._get_matching_inputs(
all_features,
dataframe,
new_max_depth,
input_types,
groupby_prim,
current_options,
feature_filter=not_a_transform_input,
)
# get columns to use as groupbys, use IDs as default unless other groupbys specified
if any(
True
for option in current_options
if dataframe.ww.name in option.get("include_groupby_columns", [])
):
column_schemas = "all"
else:
column_schemas = [ColumnSchema(semantic_tags=["foreign_key"])]
groupby_matches = self._features_by_type(
all_features=all_features,
dataframe=dataframe,
max_depth=new_max_depth,
column_schemas=column_schemas,
)
groupby_matches = filter_groupby_matches_by_options(
groupby_matches,
current_options,
)
for matching_input in matching_inputs:
if not can_stack_primitive_on_inputs(groupby_prim, matching_input):
continue
if any(True for bf in matching_input if bf.number_output_features != 1):
continue
if require_direct_input:
if any_direct_in_matching_input := any(
isinstance(bf, DirectFeature) for bf in matching_input
):
all_direct_and_same_path_in_matching_input = (
_all_direct_and_same_path(matching_input)
)
for groupby in groupby_matches:
if require_direct_input:
# If require_direct_input, require a DirectFeature in input or as a
# groupby, and don't create features of inputs/groupbys which are
# all direct features with the same relationship path
#
# If we require_direct_input, we skip Feature generation
# in the following two cases:
# (1) --> There are no DirectFeatures in the matching input,
# and groupby is not a DirectFeature
# (2) --> All of the matching input and groupby are DirectFeatures
# with the same relationship path
groupby_is_direct = isinstance(groupby[0], DirectFeature)
# Checks case (1)
if not any_direct_in_matching_input:
if not groupby_is_direct:
continue
elif all_direct_and_same_path_in_matching_input:
# Checks case (2)
if (
groupby_is_direct
and groupby[0].relationship_path
== matching_input[0].relationship_path
):
continue
new_f = GroupByTransformFeature(
list(matching_input),
groupby=groupby[0],
primitive=groupby_prim,
)
features_to_add.append(new_f)
for new_f in features_to_add:
self._handle_new_feature(all_features=all_features, new_feature=new_f)
def _build_forward_features(self, all_features, relationship_path, max_depth=0):
_, relationship = relationship_path[0]
child_dataframe_name = relationship.child_dataframe.ww.name
parent_dataframe = relationship.parent_dataframe
features = self._features_by_type(
all_features=all_features,
dataframe=parent_dataframe,
max_depth=max_depth,
column_schemas="all",
)
for f in features:
if self._feature_in_relationship_path(relationship_path, f):
continue
# limits allowing direct features of agg_feats with where clauses
if isinstance(f, AggregationFeature):
deep_base_features = [f] + f.get_dependencies(deep=True)
for feat in deep_base_features:
if isinstance(feat, AggregationFeature) and feat.where is not None:
continue
new_f = DirectFeature(f, child_dataframe_name, relationship=relationship)
self._handle_new_feature(all_features=all_features, new_feature=new_f)
def _build_agg_features(
self,
all_features,
parent_dataframe,
child_dataframe,
max_depth,
relationship_path,
):
new_max_depth = None
if max_depth is not None:
new_max_depth = max_depth - 1
for agg_prim in self.agg_primitives:
current_options = self.primitive_options.get(
agg_prim,
self.primitive_options.get(agg_prim.name),
)
if ignore_dataframe_for_primitive(current_options, child_dataframe):
continue
def feature_filter(f):
# Remove direct features of parent dataframe and features in relationship path.
return (
not _direct_of_dataframe(f, parent_dataframe)
) and not self._feature_in_relationship_path(relationship_path, f)
input_types = agg_prim.input_types
matching_inputs = self._get_matching_inputs(
all_features,
child_dataframe,
new_max_depth,
input_types,
agg_prim,
current_options,
feature_filter=feature_filter,
)
matching_inputs = filter_matches_by_options(
matching_inputs,
current_options,
)
wheres = list(self.where_clauses[child_dataframe.ww.name])
for matching_input in matching_inputs:
if not can_stack_primitive_on_inputs(agg_prim, matching_input):
continue
new_f = AggregationFeature(
matching_input,
parent_dataframe_name=parent_dataframe.ww.name,
relationship_path=relationship_path,
primitive=agg_prim,
)
self._handle_new_feature(new_f, all_features)
# limit the stacking of where features
# count up the the number of where features
# in this feature and its dependencies
feat_wheres = []
for f in matching_input:
if isinstance(f, AggregationFeature) and f.where is not None:
feat_wheres.append(f)
for feat in f.get_dependencies(deep=True):
if (
isinstance(feat, AggregationFeature)
and feat.where is not None
):
feat_wheres.append(feat)
if len(feat_wheres) >= self.where_stacking_limit:
continue
# limits the aggregation feature by the given allowed feature types.
if not any(
True
for primitive in self.where_primitives
if issubclass(type(agg_prim), type(primitive))
):
continue
for where in wheres:
# limits the where feats so they are different than base feats
base_names = [f.unique_name() for f in new_f.base_features]
if any(
True
for base_feat in where.base_features
if base_feat.unique_name() in base_names
):
continue
new_f = AggregationFeature(
matching_input,
parent_dataframe_name=parent_dataframe.ww.name,
relationship_path=relationship_path,
where=where,
primitive=agg_prim,
)
self._handle_new_feature(new_f, all_features)
def _features_by_type(
self,
all_features,
dataframe,
max_depth,
column_schemas=None,
):
if max_depth is not None and max_depth < 0:
return []
if dataframe.ww.name not in all_features:
return []
def expand_features(feature) -> List[Any]:
"""Internal method to return either the single feature
or the output features
Args:
feature (Feature): Feature instance
Returns:
List[Any]: list of features
"""
outputs = feature.number_output_features
if outputs > 1:
return [feature[i] for i in range(outputs)]
return [feature]
# Build the complete list of features prior to processing
selected_features = [
expand_features(feature)
for feature in all_features[dataframe.ww.name].values()
]
selected_features = functools.reduce(operator.iconcat, selected_features, [])
column_schemas = column_schemas if column_schemas else set()
if max_depth is None and column_schemas == "all":
return selected_features
# assigning seed_features locally adds a slight performance benefit by not having to look
# up the property for each round of the comprehension
seed_features = self.seed_features
if max_depth is not None:
selected_features = [
feature
for feature in selected_features
if get_feature_depth(feature, stop_at=seed_features) <= max_depth
]
def valid_input(column_schema) -> bool:
"""Helper method to validate the feature schema
to the allowed column_schemas
Args:
column_schema (ColumnSchema): feature column schema
Returns:
bool: True if valid
"""
return any(
True
for schema in column_schemas
if is_valid_input(column_schema, schema)
)
if column_schemas and column_schemas != "all":
selected_features = [
feature
for feature in selected_features
if valid_input(feature.column_schema)
]
return selected_features
def _feature_in_relationship_path(self, relationship_path, feature):
# must be identity feature to be in the relationship path
if not isinstance(feature, IdentityFeature):
return False
for _, relationship in relationship_path:
if (
relationship.child_name == feature.dataframe_name
and relationship._child_column_name == feature.column_name
):
return True
if (
relationship.parent_name == feature.dataframe_name
and relationship._parent_column_name == feature.column_name
):
return True
return False
def _get_matching_inputs(
self,
all_features,
dataframe,
max_depth,
input_types,
primitive,
primitive_options,
require_direct_input=False,
feature_filter=None,
):
if not isinstance(input_types[0], list):
input_types = [input_types]
matching_inputs = []
for input_type in input_types:
features = self._features_by_type(
all_features=all_features,
dataframe=dataframe,
max_depth=max_depth,
column_schemas=list(input_type),
)
if not features:
continue
if feature_filter:
features = [f for f in features if feature_filter(f)]
matches = match(
input_type,
features,
commutative=primitive.commutative,
require_direct_input=require_direct_input,
)
matching_inputs.extend(matches)
# everything following depends on populated matching_inputs
if not matching_inputs:
return matching_inputs
if require_direct_input:
# Don't create trans features of inputs which are all direct
# features with the same relationship_path.
matching_inputs = {
inputs
for inputs in matching_inputs
if not _all_direct_and_same_path(inputs)
}
matching_inputs = filter_matches_by_options(
matching_inputs,
primitive_options,
commutative=primitive.commutative,
)
# Don't build features on numeric foreign key columns
matching_inputs = [
match
for match in matching_inputs
if not _match_contains_numeric_foreign_key(match)
]
return matching_inputs
def _match_contains_numeric_foreign_key(match):
match_schema = ColumnSchema(semantic_tags={"foreign_key", "numeric"})
return any(True for f in match if is_valid_input(f.column_schema, match_schema))
def not_a_transform_input(feature):
"""
Verifies transform inputs are not transform features or direct features of transform features
Returns True if a transform primitive can stack on the feature, and False if it cannot.
"""
primitive = _find_root_primitive(feature)
return not isinstance(primitive, TransformPrimitive)
def _find_root_primitive(feature):
"""
If a feature is a DirectFeature, finds the primitive of
the "original" base feature.
"""
if isinstance(feature, DirectFeature):
return _find_root_primitive(feature.base_features[0])
return feature.primitive
def can_stack_primitive_on_inputs(primitive, inputs):
"""
Checks if features in inputs can be used with supplied primitive
using the stacking rules.
Returns True if stacking is possible, and False if not.
"""
primitive_class = primitive.__class__
tup_primitive_stack_on = (
tuple(primitive.stack_on) if primitive.stack_on is not None else None
)
tup_primitive_stack_on_exclude = (
tuple(primitive.stack_on_exclude)
if primitive.stack_on_exclude is not None
else tuple()
)
primitive_stack_on_self: bool = primitive.stack_on_self
for feature in inputs:
# In the case that the feature is a DirectFeature, the feature's primitive will be a PrimitiveBase object.
# However, we want to check stacking rules with the primitive the DirectFeature is based on.
f_primitive = _find_root_primitive(feature)
if not primitive_stack_on_self and isinstance(f_primitive, primitive_class):
return False
if isinstance(f_primitive, tup_primitive_stack_on_exclude):
return False
if feature.number_output_features > 1:
return False
if f_primitive.base_of_exclude is not None and isinstance(
primitive,
tuple(f_primitive.base_of_exclude),
):
return False
if primitive_stack_on_self and isinstance(f_primitive, primitive_class):
continue
if tup_primitive_stack_on is None or isinstance(
f_primitive,
tup_primitive_stack_on,
):
continue
if f_primitive.base_of is None:
continue
if primitive_class in f_primitive.base_of:
continue
return False
return True
def match_by_schema(features, column_schema):
matches = [f for f in features if is_valid_input(f.column_schema, column_schema)]
return matches
def match(
input_types,
features,
replace=False,
commutative=False,
require_direct_input=False,
):
to_match = input_types[0]
matches = match_by_schema(features, to_match)
if len(input_types) == 1:
return [
(m,)
for m in matches
if (not require_direct_input or isinstance(m, DirectFeature))
]
matching_inputs = set()
for m in matches:
copy = features[:]
if not replace:
copy = [c for c in copy if c.unique_name() != m.unique_name()]
# If we need a DirectFeature and this is not a DirectFeature then one of the rest must be.
still_require_direct_input = require_direct_input and not isinstance(
m,
DirectFeature,
)
rest = match(
input_types[1:],
copy,
replace,
require_direct_input=still_require_direct_input,
)
for r in rest:
new_match = [m] + list(r)
# commutative uses frozenset instead of tuple because it doesn't
# want multiple orderings of the same input
if commutative:
new_match = frozenset(new_match)
else:
new_match = tuple(new_match)
matching_inputs.add(new_match)
if commutative:
matching_inputs = {
tuple(sorted(s, key=lambda x: x.get_name().lower()))
for s in matching_inputs
}
return matching_inputs
def handle_primitive(primitive):
if not isinstance(primitive, PrimitiveBase):
primitive = primitive()
assert isinstance(primitive, PrimitiveBase), "must be a primitive"
return primitive
def check_primitive(
primitive,
prim_type,
aggregation_primitive_dict,
transform_primitive_dict,
):
if prim_type in ("transform", "groupby transform"):
prim_dict = transform_primitive_dict
supertype = TransformPrimitive
arg_name = (
"trans_primitives"
if prim_type == "transform"
else "groupby_trans_primitives"
)
s = "a transform"
if prim_type in ("aggregation", "where"):
prim_dict = aggregation_primitive_dict
supertype = AggregationPrimitive
arg_name = (
"agg_primitives" if prim_type == "aggregation" else "where_primitives"
)
s = "an aggregation"
if isinstance(primitive, str):
prim_string = camel_and_title_to_snake(primitive)
if prim_string not in prim_dict:
raise ValueError(
"Unknown {} primitive {}. "
"Call ft.primitives.list_primitives() to get"
" a list of available primitives".format(prim_type, prim_string),
)
primitive = prim_dict[prim_string]
primitive = handle_primitive(primitive)
if not isinstance(primitive, supertype):
raise ValueError(
"Primitive {} in {} is not {} "
"primitive".format(type(primitive), arg_name, s),
)
return primitive
def _all_direct_and_same_path(input_features: List[FeatureBase]) -> bool:
"""Given a list of features, returns True if they are all
DirectFeatures with the same relationship_path, and False if not
"""
path = input_features[0].relationship_path
for f in input_features:
if not isinstance(f, DirectFeature) or f.relationship_path != path:
return False
return True
def _build_ignore_columns(input_dict: Dict[str, List[str]]) -> DefaultDict[str, set]:
"""Iterates over the input dictionary to build the ignore_columns defaultdict.
Expects the input_dict's keys to be strings, and values to be lists of strings.
Throws a TypeError if they are not.
"""
ignore_columns = defaultdict(set)
if input_dict is not None:
for df_name, cols in input_dict.items():
if not isinstance(df_name, str) or not isinstance(cols, list):
raise TypeError("ignore_columns should be dict[str -> list]")
elif not all(isinstance(c, str) for c in cols):
raise TypeError("list in ignore_columns must only have string values")
ignore_columns[df_name] = set(cols)
return ignore_columns
def _direct_of_dataframe(feature, parent_dataframe):
return (
isinstance(feature, DirectFeature)
and feature.parent_dataframe_name == parent_dataframe.ww.name
)
def get_feature_depth(feature, stop_at=None):
"""Helper method to allow caching of feature.get_depth()
Why here and not in FeatureBase? Putting t in FeatureBase was causing
some weird pickle errors in spark tests in 3.9 and this keeps the caching
local to DFS.
"""
hash_key = hash(f"{feature.get_name()}{feature.dataframe_name}{stop_at}")
if cached_depth := feature_cache.get(CacheType.DEPTH, hash_key):
return cached_depth
depth = feature.get_depth(stop_at=stop_at)
feature_cache.add(CacheType.DEPTH, hash_key, depth)
return depth
| 47,691 | 35.406107 | 114 | py |
featuretools | featuretools-main/featuretools/synthesis/utils.py | from featuretools.feature_base import (
AggregationFeature,
FeatureOutputSlice,
GroupByTransformFeature,
TransformFeature,
)
from featuretools.utils.gen_utils import camel_and_title_to_snake
def _categorize_features(features):
"""Categorize each feature by its primitive type in a set of primitives along with any dependencies"""
transform = set()
agg = set()
groupby = set()
where = set()
explored = set()
def get_feature_data(feature):
if feature.get_name() in explored:
return
dependencies = []
if isinstance(feature, FeatureOutputSlice):
feature = feature.base_feature
if isinstance(feature, AggregationFeature):
if feature.where:
where.add(feature.primitive.name)
else:
agg.add(feature.primitive.name)
elif isinstance(feature, GroupByTransformFeature):
groupby.add(feature.primitive.name)
elif isinstance(feature, TransformFeature):
transform.add(feature.primitive.name)
feature_deps = feature.get_dependencies()
if feature_deps:
dependencies.extend(feature_deps)
explored.add(feature.get_name())
for dep in dependencies:
get_feature_data(dep)
for feature in features:
get_feature_data(feature)
return transform, agg, groupby, where
def get_unused_primitives(specified, used):
"""Get a list of unused primitives based on a list of specified primitives and a list of output features"""
if not specified:
return []
specified = {
camel_and_title_to_snake(primitive)
if isinstance(primitive, str)
else primitive.name
for primitive in specified
}
return sorted(specified.difference(used))
| 1,830 | 28.063492 | 111 | py |
featuretools | featuretools-main/featuretools/synthesis/api.py | # flake8: noqa
from featuretools.synthesis.deep_feature_synthesis import DeepFeatureSynthesis
from featuretools.synthesis.dfs import dfs
from featuretools.synthesis.encode_features import encode_features
from featuretools.synthesis.get_valid_primitives import get_valid_primitives
| 281 | 46 | 78 | py |
featuretools | featuretools-main/featuretools/synthesis/encode_features.py | import logging
import pandas as pd
from featuretools.computational_backends.utils import get_ww_types_from_features
from featuretools.utils.gen_utils import make_tqdm_iterator
logger = logging.getLogger("featuretools")
DEFAULT_TOP_N = 10
def encode_features(
feature_matrix,
features,
top_n=DEFAULT_TOP_N,
include_unknown=True,
to_encode=None,
inplace=False,
drop_first=False,
verbose=False,
):
"""Encode categorical features
Args:
feature_matrix (pd.DataFrame): Dataframe of features.
features (list[PrimitiveBase]): Feature definitions in feature_matrix.
top_n (int or dict[string -> int]): Number of top values to include.
If dict[string -> int] is used, key is feature name and value is
the number of top values to include for that feature.
If a feature's name is not in dictionary, a default value of 10 is used.
include_unknown (pd.DataFrame): Add feature encoding an unknown class.
defaults to True
to_encode (list[str]): List of feature names to encode.
features not in this list are unencoded in the output matrix
defaults to encode all necessary features.
inplace (bool): Encode feature_matrix in place. Defaults to False.
drop_first (bool): Whether to get k-1 dummies out of k categorical
levels by removing the first level.
defaults to False
verbose (str): Print progress info.
Returns:
(pd.Dataframe, list) : encoded feature_matrix, encoded features
Example:
.. ipython:: python
:suppress:
from featuretools.tests.testing_utils import make_ecommerce_entityset
import featuretools as ft
es = make_ecommerce_entityset()
.. ipython:: python
f1 = ft.Feature(es["log"].ww["product_id"])
f2 = ft.Feature(es["log"].ww["purchased"])
f3 = ft.Feature(es["log"].ww["value"])
features = [f1, f2, f3]
ids = [0, 1, 2, 3, 4, 5]
feature_matrix = ft.calculate_feature_matrix(features, es,
instance_ids=ids)
fm_encoded, f_encoded = ft.encode_features(feature_matrix,
features)
f_encoded
fm_encoded, f_encoded = ft.encode_features(feature_matrix,
features, top_n=2)
f_encoded
fm_encoded, f_encoded = ft.encode_features(feature_matrix, features,
include_unknown=False)
f_encoded
fm_encoded, f_encoded = ft.encode_features(feature_matrix, features,
to_encode=['purchased'])
f_encoded
fm_encoded, f_encoded = ft.encode_features(feature_matrix, features,
drop_first=True)
f_encoded
"""
if not isinstance(feature_matrix, pd.DataFrame):
msg = "feature_matrix must be a Pandas DataFrame"
raise TypeError(msg)
if inplace:
X = feature_matrix
else:
X = feature_matrix.copy()
old_feature_names = set()
for feature in features:
for fname in feature.get_feature_names():
assert fname in X.columns, "Feature %s not found in feature matrix" % (
fname
)
old_feature_names.add(fname)
pass_through = [col for col in X.columns if col not in old_feature_names]
if verbose:
iterator = make_tqdm_iterator(
iterable=features,
total=len(features),
desc="Encoding pass 1",
unit="feature",
)
else:
iterator = features
new_feature_list = []
kept_columns = []
encoded_columns = []
columns_info = feature_matrix.ww.columns
for f in iterator:
# TODO: features with multiple columns are not encoded by this method,
# which can cause an "encoded" matrix with non-numeric values
is_discrete = {"category", "foreign_key"}.intersection(
f.column_schema.semantic_tags,
)
if f.number_output_features > 1 or not is_discrete:
if f.number_output_features > 1:
logger.warning(
"Feature %s has multiple columns and will not "
"be encoded. This may result in a matrix with"
" non-numeric values." % (f),
)
new_feature_list.append(f)
kept_columns.extend(f.get_feature_names())
continue
if to_encode is not None and f.get_name() not in to_encode:
new_feature_list.append(f)
kept_columns.extend(f.get_feature_names())
continue
val_counts = X[f.get_name()].value_counts()
# Remove 0 count category values
val_counts = val_counts[val_counts > 0].to_frame()
index_name = val_counts.index.name
val_counts = val_counts.rename(columns={val_counts.columns[0]: "count"})
if index_name is None:
if "index" in val_counts.columns:
index_name = "level_0"
else:
index_name = "index"
val_counts.reset_index(inplace=True)
val_counts = val_counts.sort_values(["count", index_name], ascending=False)
val_counts.set_index(index_name, inplace=True)
select_n = top_n
if isinstance(top_n, dict):
select_n = top_n.get(f.get_name(), DEFAULT_TOP_N)
if drop_first:
select_n = min(len(val_counts), top_n)
select_n = max(select_n - 1, 1)
unique = val_counts.head(select_n).index.tolist()
for label in unique:
add = f == label
add_name = add.get_name()
new_feature_list.append(add)
new_col = X[f.get_name()] == label
new_col.rename(add_name, inplace=True)
encoded_columns.append(new_col)
if include_unknown:
unknown = f.isin(unique).NOT().rename(f.get_name() + " is unknown")
unknown_name = unknown.get_name()
new_feature_list.append(unknown)
new_col = ~X[f.get_name()].isin(unique)
new_col.rename(unknown_name, inplace=True)
encoded_columns.append(new_col)
if inplace:
X.drop(f.get_name(), axis=1, inplace=True)
kept_columns.extend(pass_through)
if inplace:
for encoded_column in encoded_columns:
X[encoded_column.name] = encoded_column
else:
X = pd.concat([X[kept_columns]] + encoded_columns, axis=1)
entityset = new_feature_list[0].entityset
ww_init_kwargs = get_ww_types_from_features(new_feature_list, entityset)
# Grab ww metadata from feature matrix since it may be more exact
for column in kept_columns:
ww_init_kwargs["logical_types"][column] = columns_info[column].logical_type
ww_init_kwargs["semantic_tags"][column] = columns_info[column].semantic_tags
ww_init_kwargs["column_origins"][column] = columns_info[column].origin
X.ww.init(**ww_init_kwargs)
return X, new_feature_list
| 7,377 | 36.075377 | 84 | py |
featuretools | featuretools-main/featuretools/synthesis/__init__.py | # flake8: noqa
from featuretools.synthesis.api import *
| 56 | 18 | 40 | py |
featuretools | featuretools-main/featuretools/synthesis/get_valid_primitives.py | from featuretools.primitives import AggregationPrimitive, TransformPrimitive
from featuretools.primitives.utils import (
get_aggregation_primitives,
get_transform_primitives,
)
from featuretools.synthesis.deep_feature_synthesis import DeepFeatureSynthesis
from featuretools.synthesis.utils import _categorize_features, get_unused_primitives
from featuretools.utils.gen_utils import Library
def get_valid_primitives(
entityset,
target_dataframe_name,
max_depth=2,
selected_primitives=None,
**dfs_kwargs,
):
"""
Returns two lists of primitives (transform and aggregation) containing
primitives that can be applied to the specific target dataframe to create
features. If the optional 'selected_primitives' parameter is not used,
all discoverable primitives will be considered.
Note:
When using a ``max_depth`` greater than 1, some primitives returned by
this function may not create any features if passed to DFS alone. These
primitives relied on features created by other primitives as input
(primitive stacking).
Args:
entityset (EntitySet): An already initialized entityset
target_dataframe_name (str): Name of dataframe to create features for.
max_depth (int, optional): Maximum allowed depth of features.
selected_primitives(list[str or AggregationPrimitive/TransformPrimitive], optional):
list of primitives to consider when looking for valid primitives.
If None, all primitives will be considered
dfs_kwargs (keywords): Additional keyword arguments to pass as keyword arguments to
the DeepFeatureSynthesis object. Should not include ``max_depth``, ``agg_primitives``,
or ``trans_primitives``, as those are passed in explicity.
Returns:
list[AggregationPrimitive], list[TransformPrimitive]:
The list of valid aggregation primitives and the list of valid
transform primitives.
"""
agg_primitives = []
trans_primitives = []
available_aggs = get_aggregation_primitives()
available_trans = get_transform_primitives()
for library in Library:
if library.value == entityset.dataframe_type:
df_library = library
break
if selected_primitives:
for prim in selected_primitives:
if not isinstance(prim, str):
if issubclass(prim, AggregationPrimitive):
prim_list = agg_primitives
elif issubclass(prim, TransformPrimitive):
prim_list = trans_primitives
else:
raise ValueError(
f"Selected primitive {prim} is not an "
"AggregationPrimitive, TransformPrimitive, or str",
)
elif prim in available_aggs:
prim = available_aggs[prim]
prim_list = agg_primitives
elif prim in available_trans:
prim = available_trans[prim]
prim_list = trans_primitives
else:
raise ValueError(f"'{prim}' is not a recognized primitive name")
if df_library in prim.compatibility:
prim_list.append(prim)
else:
agg_primitives = [
agg for agg in available_aggs.values() if df_library in agg.compatibility
]
trans_primitives = [
trans
for trans in available_trans.values()
if df_library in trans.compatibility
]
dfs_object = DeepFeatureSynthesis(
target_dataframe_name,
entityset,
agg_primitives=agg_primitives,
trans_primitives=trans_primitives,
max_depth=max_depth,
**dfs_kwargs,
)
features = dfs_object.build_features()
trans, agg, _, _ = _categorize_features(features)
trans_unused = get_unused_primitives(trans_primitives, trans)
agg_unused = get_unused_primitives(agg_primitives, agg)
# switch from str to class
agg_unused = [available_aggs[name] for name in agg_unused]
trans_unused = [available_trans[name] for name in trans_unused]
used_agg_prims = set(agg_primitives).difference(set(agg_unused))
used_trans_prims = set(trans_primitives).difference(set(trans_unused))
return list(used_agg_prims), list(used_trans_prims)
| 4,388 | 38.9 | 98 | py |
featuretools | featuretools-main/featuretools/tests/conftest.py | import contextlib
import copy
import os
import composeml as cp
import numpy as np
import pandas as pd
import pytest
from packaging.version import parse
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Boolean, Integer
from featuretools import EntitySet, demo
from featuretools.primitives import AggregationPrimitive, TransformPrimitive
from featuretools.tests.testing_utils import make_ecommerce_entityset, to_pandas
from featuretools.utils.gen_utils import import_or_none
from featuretools.utils.spark_utils import pd_to_spark_clean
@pytest.fixture()
def dask_cluster():
distributed = pytest.importorskip(
"distributed",
reason="Dask not installed, skipping",
)
if distributed:
with distributed.LocalCluster() as cluster:
yield cluster
@pytest.fixture()
def three_worker_dask_cluster():
distributed = pytest.importorskip(
"distributed",
reason="Dask not installed, skipping",
)
if distributed:
with distributed.LocalCluster(n_workers=3) as cluster:
yield cluster
@pytest.fixture(scope="session", autouse=True)
def spark_session():
sql = import_or_none("pyspark.sql")
if sql:
spark = (
sql.SparkSession.builder.master("local[2]")
.config(
"spark.driver.extraJavaOptions",
"-Dio.netty.tryReflectionSetAccessible=True",
)
.config("spark.sql.shuffle.partitions", "2")
.config("spark.driver.bindAddress", "127.0.0.1")
.getOrCreate()
)
return spark
@pytest.fixture(scope="session")
def make_es():
return make_ecommerce_entityset()
@pytest.fixture(scope="session")
def make_int_es():
return make_ecommerce_entityset(with_integer_time_index=True)
@pytest.fixture
def pd_es(make_es):
return copy.deepcopy(make_es)
@pytest.fixture
def pd_int_es(make_int_es):
return copy.deepcopy(make_int_es)
@pytest.fixture
def dask_int_es(pd_int_es):
dd = pytest.importorskip("dask.dataframe", reason="Dask not installed, skipping")
es = EntitySet(id=pd_int_es.id)
for df in pd_int_es.dataframes:
dd_df = dd.from_pandas(df.reset_index(drop=True), npartitions=4)
dd_df.ww.init(schema=df.ww.schema)
es.add_dataframe(dd_df)
for rel in pd_int_es.relationships:
es.add_relationship(
rel.parent_dataframe.ww.name,
rel._parent_column_name,
rel.child_dataframe.ww.name,
rel._child_column_name,
)
return es
@pytest.fixture
def spark_int_es(pd_int_es):
ps = pytest.importorskip("pyspark.pandas", reason="Spark not installed, skipping")
es = EntitySet(id=pd_int_es.id)
for df in pd_int_es.dataframes:
cleaned_df = pd_to_spark_clean(df).reset_index(drop=True)
spark_df = ps.from_pandas(cleaned_df)
spark_df.ww.init(schema=df.ww.schema)
es.add_dataframe(spark_df)
for rel in pd_int_es.relationships:
es.add_relationship(
rel._parent_dataframe_name,
rel._parent_column_name,
rel._child_dataframe_name,
rel._child_column_name,
)
return es
@pytest.fixture(params=["pd_int_es", "dask_int_es", "spark_int_es"])
def int_es(request):
return request.getfixturevalue(request.param)
@pytest.fixture
def dask_es(pd_es):
dd = pytest.importorskip("dask.dataframe", reason="Dask not installed, skipping")
es = EntitySet(id=pd_es.id)
for df in pd_es.dataframes:
dd_df = dd.from_pandas(df.reset_index(drop=True), npartitions=4)
dd_df.ww.init(schema=df.ww.schema)
es.add_dataframe(dd_df)
for rel in pd_es.relationships:
es.add_relationship(
rel.parent_dataframe.ww.name,
rel._parent_column_name,
rel.child_dataframe.ww.name,
rel._child_column_name,
)
return es
@pytest.fixture
def spark_es(pd_es):
ps = pytest.importorskip("pyspark.pandas", reason="Spark not installed, skipping")
es = EntitySet(id=pd_es.id)
for df in pd_es.dataframes:
cleaned_df = pd_to_spark_clean(df).reset_index(drop=True)
spark_df = ps.from_pandas(cleaned_df)
spark_df.ww.init(schema=df.ww.schema)
es.add_dataframe(spark_df)
for rel in pd_es.relationships:
es.add_relationship(
rel._parent_dataframe_name,
rel._parent_column_name,
rel._child_dataframe_name,
rel._child_column_name,
)
return es
@pytest.fixture(params=["pd_es", "dask_es", "spark_es"])
def es(request):
return request.getfixturevalue(request.param)
@pytest.fixture
def pd_latlong_df():
df = pd.DataFrame({"idx": [0, 1, 2], "latLong": [pd.NA, (1, 2), (pd.NA, pd.NA)]})
return df
@pytest.fixture
def dask_latlong_df(pd_latlong_df):
dd = pytest.importorskip("dask.dataframe", reason="Dask not installed, skipping")
return dd.from_pandas(pd_latlong_df.reset_index(drop=True), npartitions=4)
@pytest.fixture
def spark_latlong_df(pd_latlong_df):
ps = pytest.importorskip("pyspark.pandas", reason="Spark not installed, skipping")
cleaned_df = pd_to_spark_clean(pd_latlong_df)
pdf = ps.from_pandas(cleaned_df)
return pdf
@pytest.fixture(params=["pd_latlong_df", "dask_latlong_df", "spark_latlong_df"])
def latlong_df(request):
return request.getfixturevalue(request.param)
@pytest.fixture(params=["pd_diamond_es", "dask_diamond_es", "spark_diamond_es"])
def diamond_es(request):
return request.getfixturevalue(request.param)
@pytest.fixture
def pd_diamond_es():
countries_df = pd.DataFrame({"id": range(2), "name": ["US", "Canada"]})
regions_df = pd.DataFrame(
{
"id": range(3),
"country_id": [0, 0, 1],
"name": ["Northeast", "South", "Quebec"],
},
).astype({"name": "category"})
stores_df = pd.DataFrame(
{
"id": range(5),
"region_id": [0, 1, 2, 2, 1],
"square_ft": [2000, 3000, 1500, 2500, 2700],
},
)
customers_df = pd.DataFrame(
{
"id": range(5),
"region_id": [1, 0, 0, 1, 1],
"name": ["A", "B", "C", "D", "E"],
},
)
transactions_df = pd.DataFrame(
{
"id": range(8),
"store_id": [4, 4, 2, 3, 4, 0, 1, 1],
"customer_id": [3, 0, 2, 4, 3, 3, 2, 3],
"amount": [100, 40, 45, 83, 13, 94, 27, 81],
},
)
dataframes = {
"countries": (countries_df, "id"),
"regions": (regions_df, "id"),
"stores": (stores_df, "id"),
"customers": (customers_df, "id"),
"transactions": (transactions_df, "id"),
}
relationships = [
("countries", "id", "regions", "country_id"),
("regions", "id", "stores", "region_id"),
("regions", "id", "customers", "region_id"),
("stores", "id", "transactions", "store_id"),
("customers", "id", "transactions", "customer_id"),
]
return EntitySet(
id="ecommerce_diamond",
dataframes=dataframes,
relationships=relationships,
)
@pytest.fixture
def dask_diamond_es(pd_diamond_es):
dd = pytest.importorskip("dask.dataframe", reason="Dask not installed, skipping")
dataframes = {}
for df in pd_diamond_es.dataframes:
dd_df = dd.from_pandas(df, npartitions=2)
dd_df.ww.init(schema=df.ww.schema)
dataframes[df.ww.name] = (dd_df,)
relationships = [
(
rel._parent_dataframe_name,
rel._parent_column_name,
rel._child_dataframe_name,
rel._child_column_name,
)
for rel in pd_diamond_es.relationships
]
return EntitySet(
id=pd_diamond_es.id,
dataframes=dataframes,
relationships=relationships,
)
@pytest.fixture
def spark_diamond_es(pd_diamond_es):
ps = pytest.importorskip("pyspark.pandas", reason="Spark not installed, skipping")
dataframes = {}
for df in pd_diamond_es.dataframes:
spark_df = ps.from_pandas(pd_to_spark_clean(df))
spark_df.ww.init(schema=df.ww.schema)
dataframes[df.ww.name] = (spark_df,)
relationships = [
(
rel._parent_dataframe_name,
rel._parent_column_name,
rel._child_dataframe_name,
rel._child_column_name,
)
for rel in pd_diamond_es.relationships
]
return EntitySet(
id=pd_diamond_es.id,
dataframes=dataframes,
relationships=relationships,
)
@pytest.fixture(
params=["pd_default_value_es", "dask_default_value_es", "spark_default_value_es"],
)
def default_value_es(request):
return request.getfixturevalue(request.param)
@pytest.fixture
def pd_default_value_es():
transactions = pd.DataFrame(
{"id": [1, 2, 3, 4], "session_id": ["a", "a", "b", "c"], "value": [1, 1, 1, 1]},
)
sessions = pd.DataFrame({"id": ["a", "b"]})
es = EntitySet()
es.add_dataframe(dataframe_name="transactions", dataframe=transactions, index="id")
es.add_dataframe(dataframe_name="sessions", dataframe=sessions, index="id")
es.add_relationship("sessions", "id", "transactions", "session_id")
return es
@pytest.fixture
def dask_default_value_es(pd_default_value_es):
dd = pytest.importorskip("dask.dataframe", reason="Dask not installed, skipping")
dataframes = {}
for df in pd_default_value_es.dataframes:
dd_df = dd.from_pandas(df, npartitions=4)
dd_df.ww.init(schema=df.ww.schema)
dataframes[df.ww.name] = (dd_df,)
relationships = [
(
rel._parent_dataframe_name,
rel._parent_column_name,
rel._child_dataframe_name,
rel._child_column_name,
)
for rel in pd_default_value_es.relationships
]
return EntitySet(
id=pd_default_value_es.id,
dataframes=dataframes,
relationships=relationships,
)
@pytest.fixture
def spark_default_value_es(pd_default_value_es):
ps = pytest.importorskip("pyspark.pandas", reason="Spark not installed, skipping")
dataframes = {}
for df in pd_default_value_es.dataframes:
spark_df = ps.from_pandas(pd_to_spark_clean(df))
spark_df.ww.init(schema=df.ww.schema)
dataframes[df.ww.name] = (spark_df,)
relationships = [
(
rel._parent_dataframe_name,
rel._parent_column_name,
rel._child_dataframe_name,
rel._child_column_name,
)
for rel in pd_default_value_es.relationships
]
return EntitySet(
id=pd_default_value_es.id,
dataframes=dataframes,
relationships=relationships,
)
@pytest.fixture(
params=["pd_home_games_es", "dask_home_games_es", "spark_home_games_es"],
)
def home_games_es(request):
return request.getfixturevalue(request.param)
@pytest.fixture
def pd_home_games_es():
teams = pd.DataFrame({"id": range(3), "name": ["Breakers", "Spirit", "Thorns"]})
games = pd.DataFrame(
{
"id": range(5),
"home_team_id": [2, 2, 1, 0, 1],
"away_team_id": [1, 0, 2, 1, 0],
"home_team_score": [3, 0, 1, 0, 4],
"away_team_score": [2, 1, 2, 0, 0],
},
)
dataframes = {"teams": (teams, "id"), "games": (games, "id")}
relationships = [("teams", "id", "games", "home_team_id")]
return EntitySet(dataframes=dataframes, relationships=relationships)
@pytest.fixture
def dask_home_games_es(pd_home_games_es):
dd = pytest.importorskip("dask.dataframe", reason="Dask not installed, skipping")
dataframes = {}
for df in pd_home_games_es.dataframes:
dd_df = dd.from_pandas(df, npartitions=2)
dd_df.ww.init(schema=df.ww.schema)
dataframes[df.ww.name] = (dd_df,)
relationships = [
(
rel._parent_dataframe_name,
rel._parent_column_name,
rel._child_dataframe_name,
rel._child_column_name,
)
for rel in pd_home_games_es.relationships
]
return EntitySet(
id=pd_home_games_es.id,
dataframes=dataframes,
relationships=relationships,
)
@pytest.fixture
def spark_home_games_es(pd_home_games_es):
ps = pytest.importorskip("pyspark.pandas", reason="Spark not installed, skipping")
dataframes = {}
for df in pd_home_games_es.dataframes:
spark_df = ps.from_pandas(pd_to_spark_clean(df))
spark_df.ww.init(schema=df.ww.schema)
dataframes[df.ww.name] = (spark_df,)
relationships = [
(
rel._parent_dataframe_name,
rel._parent_column_name,
rel._child_dataframe_name,
rel._child_column_name,
)
for rel in pd_home_games_es.relationships
]
return EntitySet(
id=pd_home_games_es.id,
dataframes=dataframes,
relationships=relationships,
)
@pytest.fixture
def games_es(home_games_es):
return home_games_es.add_relationship("teams", "id", "games", "away_team_id")
@pytest.fixture
def pd_mock_customer():
return demo.load_mock_customer(return_entityset=True, random_seed=0)
@pytest.fixture
def dd_mock_customer(pd_mock_customer):
dd = pytest.importorskip("dask.dataframe", reason="Dask not installed, skipping")
dataframes = {}
for df in pd_mock_customer.dataframes:
dd_df = dd.from_pandas(df.reset_index(drop=True), npartitions=4)
dd_df.ww.init(schema=df.ww.schema)
dataframes[df.ww.name] = (
dd_df,
df.ww.index,
df.ww.time_index,
df.ww.logical_types,
)
relationships = [
(
rel._parent_dataframe_name,
rel._parent_column_name,
rel._child_dataframe_name,
rel._child_column_name,
)
for rel in pd_mock_customer.relationships
]
return EntitySet(
id=pd_mock_customer.id,
dataframes=dataframes,
relationships=relationships,
)
@pytest.fixture
def spark_mock_customer(pd_mock_customer):
ps = pytest.importorskip("pyspark.pandas", reason="Spark not installed, skipping")
dataframes = {}
for df in pd_mock_customer.dataframes:
cleaned_df = pd_to_spark_clean(df).reset_index(drop=True)
dataframes[df.ww.name] = (
ps.from_pandas(cleaned_df),
df.ww.index,
df.ww.time_index,
df.ww.logical_types,
)
relationships = [
(
rel._parent_dataframe_name,
rel._parent_column_name,
rel._child_dataframe_name,
rel._child_column_name,
)
for rel in pd_mock_customer.relationships
]
return EntitySet(
id=pd_mock_customer.id,
dataframes=dataframes,
relationships=relationships,
)
@pytest.fixture(params=["pd_mock_customer", "dd_mock_customer", "spark_mock_customer"])
def mock_customer(request):
return request.getfixturevalue(request.param)
@pytest.fixture
def lt(es):
def label_func(df):
return df["value"].sum() > 10
kwargs = {
"time_index": "datetime",
"labeling_function": label_func,
"window_size": "1m",
}
if parse(cp.__version__) >= parse("0.10.0"):
kwargs["target_dataframe_index"] = "id"
else:
kwargs["target_dataframe_name"] = "id" # pragma: no cover
lm = cp.LabelMaker(**kwargs)
df = es["log"]
df = to_pandas(df)
labels = lm.search(df, num_examples_per_instance=-1)
labels = labels.rename(columns={"cutoff_time": "time"})
return labels
@pytest.fixture(params=["pd_dataframes", "dask_dataframes", "spark_dataframes"])
def dataframes(request):
return request.getfixturevalue(request.param)
@pytest.fixture
def pd_dataframes():
cards_df = pd.DataFrame({"id": [1, 2, 3, 4, 5]})
transactions_df = pd.DataFrame(
{
"id": [1, 2, 3, 4, 5, 6],
"card_id": [1, 2, 1, 3, 4, 5],
"transaction_time": [10, 12, 13, 20, 21, 20],
"fraud": [True, False, False, False, True, True],
},
)
dataframes = {
"cards": (cards_df, "id"),
"transactions": (transactions_df, "id", "transaction_time"),
}
return dataframes
@pytest.fixture
def dask_dataframes():
dd = pytest.importorskip("dask.dataframe", reason="Dask not installed, skipping")
cards_df = pd.DataFrame({"id": [1, 2, 3, 4, 5]})
transactions_df = pd.DataFrame(
{
"id": [1, 2, 3, 4, 5, 6],
"card_id": [1, 2, 1, 3, 4, 5],
"transaction_time": [10, 12, 13, 20, 21, 20],
"fraud": [True, False, False, False, True, True],
},
)
cards_df = dd.from_pandas(cards_df, npartitions=2)
transactions_df = dd.from_pandas(transactions_df, npartitions=2)
cards_ltypes = {"id": Integer}
transactions_ltypes = {
"id": Integer,
"card_id": Integer,
"transaction_time": Integer,
"fraud": Boolean,
}
dataframes = {
"cards": (cards_df, "id", None, cards_ltypes),
"transactions": (
transactions_df,
"id",
"transaction_time",
transactions_ltypes,
),
}
return dataframes
@pytest.fixture
def spark_dataframes():
ps = pytest.importorskip("pyspark.pandas", reason="Spark not installed, skipping")
cards_df = ps.DataFrame({"id": [1, 2, 3, 4, 5]})
transactions_df = ps.DataFrame(
{
"id": [1, 2, 3, 4, 5, 6],
"card_id": [1, 2, 1, 3, 4, 5],
"transaction_time": [10, 12, 13, 20, 21, 20],
"fraud": [True, False, False, False, True, True],
},
)
cards_ltypes = {"id": Integer}
transactions_ltypes = {
"id": Integer,
"card_id": Integer,
"transaction_time": Integer,
"fraud": Boolean,
}
dataframes = {
"cards": (cards_df, "id", None, cards_ltypes),
"transactions": (
transactions_df,
"id",
"transaction_time",
transactions_ltypes,
),
}
return dataframes
@pytest.fixture
def relationships():
return [("cards", "id", "transactions", "card_id")]
@pytest.fixture(params=["pd_transform_es", "dask_transform_es", "spark_transform_es"])
def transform_es(request):
return request.getfixturevalue(request.param)
@pytest.fixture
def pd_transform_es():
# Create dataframe
df = pd.DataFrame(
{
"a": [14, 12, 10],
"b": [False, False, True],
"b1": [True, True, False],
"b12": [4, 5, 6],
"P": [10, 15, 12],
},
)
es = EntitySet(id="test")
# Add dataframe to entityset
es.add_dataframe(
dataframe_name="first",
dataframe=df,
index="index",
make_index=True,
)
return es
@pytest.fixture
def dask_transform_es(pd_transform_es):
dd = pytest.importorskip("dask.dataframe", reason="Dask not installed, skipping")
es = EntitySet(id=pd_transform_es.id)
for df in pd_transform_es.dataframes:
es.add_dataframe(
dataframe_name=df.ww.name,
dataframe=dd.from_pandas(df, npartitions=2),
index=df.ww.index,
logical_types=df.ww.logical_types,
)
return es
@pytest.fixture
def spark_transform_es(pd_transform_es):
ps = pytest.importorskip("pyspark.pandas", reason="Spark not installed, skipping")
es = EntitySet(id=pd_transform_es.id)
for df in pd_transform_es.dataframes:
es.add_dataframe(
dataframe_name=df.ww.name,
dataframe=ps.from_pandas(df),
index=df.ww.index,
logical_types=df.ww.logical_types,
)
return es
@pytest.fixture(
params=[
"divide_by_zero_es_pd",
"divide_by_zero_es_dask",
"divide_by_zero_es_spark",
],
)
def divide_by_zero_es(request):
return request.getfixturevalue(request.param)
@pytest.fixture
def divide_by_zero_es_pd():
df = pd.DataFrame(
{
"id": [0, 1, 2, 3],
"col1": [1, 0, -3, 4],
"col2": [0, 0, 0, 4],
},
)
return EntitySet("data", {"zero": (df, "id", None)})
@pytest.fixture
def divide_by_zero_es_dask(divide_by_zero_es_pd):
dd = pytest.importorskip("dask.dataframe", reason="Dask not installed, skipping")
es = EntitySet(id=divide_by_zero_es_pd.id)
for df in divide_by_zero_es_pd.dataframes:
es.add_dataframe(
dataframe_name=df.ww.name,
dataframe=dd.from_pandas(df, npartitions=2),
index=df.ww.index,
logical_types=df.ww.logical_types,
)
return es
@pytest.fixture
def divide_by_zero_es_spark(divide_by_zero_es_pd):
ps = pytest.importorskip("pyspark.pandas", reason="Spark not installed, skipping")
es = EntitySet(id=divide_by_zero_es_pd.id)
for df in divide_by_zero_es_pd.dataframes:
es.add_dataframe(
dataframe_name=df.ww.name,
dataframe=ps.from_pandas(df),
index=df.ww.index,
logical_types=df.ww.logical_types,
)
return es
@pytest.fixture
def window_series_pd():
return pd.Series(
range(20),
index=pd.date_range(start="2020-01-01", end="2020-01-20"),
)
@pytest.fixture
def window_date_range_pd():
return pd.date_range(start="2022-11-1", end="2022-11-5", periods=30)
@pytest.fixture
def rolling_outlier_series_pd():
return pd.Series(
[0] * 4 + [10] + [0] * 4 + [10] + [0] * 5,
index=pd.date_range(start="2020-01-01", end="2020-01-15", periods=15),
)
@pytest.fixture
def postal_code_dataframe_pd():
df = pd.DataFrame(
{
"string_dtype": pd.Series(["90210", "60018", "10010", "92304-4201"]),
"int_dtype": pd.Series([10000, 20000, 30000]).astype("category"),
"has_nulls": pd.Series([np.nan, 20000, 30000]).astype("category"),
},
)
return df
@pytest.fixture
def postal_code_dataframe_pyspark(postal_code_dataframe_pd):
ps = pytest.importorskip("pyspark.pandas", reason="Spark not installed, skipping")
df = ps.from_pandas(postal_code_dataframe_pd)
return df
@pytest.fixture
def postal_code_dataframe_dask(postal_code_dataframe_pd):
dd = pytest.importorskip("dask.dataframe", reason="Dask not installed, skipping")
df = dd.from_pandas(
postal_code_dataframe_pd,
npartitions=1,
).categorize()
return df
@pytest.fixture(
params=[
"postal_code_dataframe_pd",
"postal_code_dataframe_pyspark",
"postal_code_dataframe_dask",
],
)
def postal_code_dataframe(request):
df = request.getfixturevalue(request.param)
df.ww.init(
logical_types={
"string_dtype": "PostalCode",
"int_dtype": "PostalCode",
"has_nulls": "PostalCode",
},
)
return df
def create_test_credentials(test_path):
with open(test_path, "w+") as f:
f.write("[test]\n")
f.write("aws_access_key_id=AKIAIOSFODNN7EXAMPLE\n")
f.write("aws_secret_access_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY\n")
def create_test_config(test_path_config):
with open(test_path_config, "w+") as f:
f.write("[profile test]\n")
f.write("region=us-east-2\n")
f.write("output=text\n")
@pytest.fixture
def setup_test_profile(monkeypatch, tmp_path):
cache = tmp_path.joinpath(".cache")
cache.mkdir()
test_path = str(cache.joinpath("test_credentials"))
test_path_config = str(cache.joinpath("test_config"))
monkeypatch.setenv("AWS_SHARED_CREDENTIALS_FILE", test_path)
monkeypatch.setenv("AWS_CONFIG_FILE", test_path_config)
monkeypatch.delenv("AWS_ACCESS_KEY_ID", raising=False)
monkeypatch.delenv("AWS_SECRET_ACCESS_KEY", raising=False)
monkeypatch.setenv("AWS_PROFILE", "test")
with contextlib.suppress(OSError):
os.remove(test_path)
os.remove(test_path_config) # pragma: no cover
create_test_credentials(test_path)
create_test_config(test_path_config)
yield
os.remove(test_path)
os.remove(test_path_config)
@pytest.fixture
def test_aggregation_primitive():
class TestAgg(AggregationPrimitive):
name = "test"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
stack_on = []
return TestAgg
@pytest.fixture
def test_transform_primitive():
class TestTransform(TransformPrimitive):
name = "test"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
stack_on = []
return TestTransform
@pytest.fixture
def strings_that_have_triggered_errors_before():
return [
" ",
'"This Borderlands game here"" is the perfect conclusion to the ""Borderlands 3"" line, which focuses on the fans ""favorite character and gives the players the opportunity to close for a long time some very important questions about\'s character and the memorable scenery with which the players interact.',
]
| 25,391 | 27.43449 | 315 | py |
featuretools | featuretools-main/featuretools/tests/test_version.py | from featuretools import __version__
def test_version():
assert __version__ == "1.26.0"
| 94 | 14.833333 | 36 | py |
featuretools | featuretools-main/featuretools/tests/__init__.py | 0 | 0 | 0 | py | |
featuretools | featuretools-main/featuretools/tests/demo_tests/test_demo_data.py | import urllib.request
import pandas as pd
import pytest
from featuretools import EntitySet
from featuretools.demo import load_flight, load_mock_customer, load_retail, load_weather
@pytest.fixture(autouse=True)
def set_testing_headers():
opener = urllib.request.build_opener()
opener.addheaders = [("Testing", "True")]
urllib.request.install_opener(opener)
def test_load_retail_diff():
nrows = 10
es_first = load_retail(nrows=nrows)
assert isinstance(es_first, EntitySet)
assert es_first["order_products"].shape[0] == nrows
nrows_second = 11
es_second = load_retail(nrows=nrows_second)
assert es_second["order_products"].shape[0] == nrows_second
def test_mock_customer():
n_customers = 4
n_products = 3
n_sessions = 30
n_transactions = 400
es = load_mock_customer(
n_customers=n_customers,
n_products=n_products,
n_sessions=n_sessions,
n_transactions=n_transactions,
random_seed=0,
return_entityset=True,
)
assert isinstance(es, EntitySet)
df_names = [df.ww.name for df in es.dataframes]
expected_names = ["transactions", "products", "sessions", "customers"]
assert set(expected_names) == set(df_names)
assert len(es["customers"]) == 4
assert len(es["products"]) == 3
assert len(es["sessions"]) == 30
assert len(es["transactions"]) == 400
def test_load_flight():
es = load_flight(
month_filter=[1],
categorical_filter={"origin_city": ["Charlotte, NC"]},
return_single_table=False,
nrows=1000,
)
assert isinstance(es, EntitySet)
dataframe_names = ["airports", "flights", "trip_logs", "airlines"]
realvals = [(11, 3), (13, 9), (103, 21), (1, 1)]
for i, name in enumerate(dataframe_names):
assert es[name].shape == realvals[i]
def test_weather():
es = load_weather()
assert isinstance(es, EntitySet)
dataframe_names = ["temperatures"]
realvals = [(3650, 3)]
for i, name in enumerate(dataframe_names):
assert es[name].shape == realvals[i]
es = load_weather(return_single_table=True)
assert isinstance(es, pd.DataFrame)
| 2,170 | 28.739726 | 88 | py |
featuretools | featuretools-main/featuretools/tests/demo_tests/__init__.py | 0 | 0 | 0 | py | |
featuretools | featuretools-main/featuretools/tests/synthesis/test_dfs_method.py | from unittest.mock import patch
import composeml as cp
import numpy as np
import pandas as pd
import pytest
from packaging.version import parse
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import NaturalLanguage
from featuretools.computational_backends.calculate_feature_matrix import (
FEATURE_CALCULATION_PERCENTAGE,
)
from featuretools.entityset import EntitySet, Timedelta
from featuretools.exceptions import UnusedPrimitiveWarning
from featuretools.primitives import GreaterThanScalar, Max, Mean, Min, Sum
from featuretools.primitives.base import AggregationPrimitive, TransformPrimitive
from featuretools.synthesis import dfs
from featuretools.synthesis.deep_feature_synthesis import DeepFeatureSynthesis
from featuretools.tests.testing_utils import to_pandas
from featuretools.utils.gen_utils import Library, import_or_none, is_instance
dd = import_or_none("dask.dataframe")
@pytest.fixture
def datetime_es():
cards_df = pd.DataFrame({"id": [1, 2, 3, 4, 5]})
transactions_df = pd.DataFrame(
{
"id": [1, 2, 3, 4, 5],
"card_id": [1, 1, 5, 1, 5],
"transaction_time": pd.to_datetime(
[
"2011-2-28 04:00",
"2012-2-28 05:00",
"2012-2-29 06:00",
"2012-3-1 08:00",
"2014-4-1 10:00",
],
),
"fraud": [True, False, False, False, True],
},
)
datetime_es = EntitySet(id="fraud_data")
datetime_es = datetime_es.add_dataframe(
dataframe_name="transactions",
dataframe=transactions_df,
index="id",
time_index="transaction_time",
)
datetime_es = datetime_es.add_dataframe(
dataframe_name="cards",
dataframe=cards_df,
index="id",
)
datetime_es = datetime_es.add_relationship("cards", "id", "transactions", "card_id")
datetime_es.add_last_time_indexes()
return datetime_es
def test_dfs_empty_features():
error_text = "No features can be generated from the specified primitives. Please make sure the primitives you are using are compatible with the variable types in your data."
teams = pd.DataFrame({"id": range(3), "name": ["Breakers", "Spirit", "Thorns"]})
games = pd.DataFrame(
{
"id": range(5),
"home_team_id": [2, 2, 1, 0, 1],
"away_team_id": [1, 0, 2, 1, 0],
"home_team_score": [3, 0, 1, 0, 4],
"away_team_score": [2, 1, 2, 0, 0],
},
)
dataframes = {
"teams": (teams, "id", None, {"name": "natural_language"}),
"games": (games, "id"),
}
relationships = [("teams", "id", "games", "home_team_id")]
with patch.object(DeepFeatureSynthesis, "build_features", return_value=[]):
features = dfs(
dataframes,
relationships,
target_dataframe_name="teams",
features_only=True,
)
assert features == []
with pytest.raises(AssertionError, match=error_text), patch.object(
DeepFeatureSynthesis,
"build_features",
return_value=[],
):
dfs(
dataframes,
relationships,
target_dataframe_name="teams",
features_only=False,
)
def test_passing_strings_to_logical_types_dfs():
teams = pd.DataFrame({"id": range(3), "name": ["Breakers", "Spirit", "Thorns"]})
games = pd.DataFrame(
{
"id": range(5),
"home_team_id": [2, 2, 1, 0, 1],
"away_team_id": [1, 0, 2, 1, 0],
"home_team_score": [3, 0, 1, 0, 4],
"away_team_score": [2, 1, 2, 0, 0],
},
)
dataframes = {
"teams": (teams, "id", None, {"name": "natural_language"}),
"games": (games, "id"),
}
relationships = [("teams", "id", "games", "home_team_id")]
features = dfs(
dataframes,
relationships,
target_dataframe_name="teams",
features_only=True,
)
name_logical_type = features[0].dataframe["name"].ww.logical_type
assert isinstance(name_logical_type, NaturalLanguage)
def test_accepts_cutoff_time_df(dataframes, relationships):
cutoff_times_df = pd.DataFrame({"instance_id": [1, 2, 3], "time": [10, 12, 15]})
feature_matrix, features = dfs(
dataframes=dataframes,
relationships=relationships,
target_dataframe_name="transactions",
cutoff_time=cutoff_times_df,
)
feature_matrix = to_pandas(feature_matrix, index="id", sort_index=True)
assert len(feature_matrix.index) == 3
assert len(feature_matrix.columns) == len(features)
@pytest.mark.skipif("not dd")
def test_warns_cutoff_time_dask(dataframes, relationships):
cutoff_times_df = pd.DataFrame({"instance_id": [1, 2, 3], "time": [10, 12, 15]})
cutoff_times_df = dd.from_pandas(cutoff_times_df, npartitions=2)
match = (
"cutoff_time should be a Pandas DataFrame: "
"computing cutoff_time, this may take a while"
)
with pytest.warns(UserWarning, match=match):
dfs(
dataframes=dataframes,
relationships=relationships,
target_dataframe_name="transactions",
cutoff_time=cutoff_times_df,
)
def test_accepts_cutoff_time_compose(dataframes, relationships):
def fraud_occured(df):
return df["fraud"].any()
kwargs = {
"time_index": "transaction_time",
"labeling_function": fraud_occured,
"window_size": 1,
}
if parse(cp.__version__) >= parse("0.10.0"):
kwargs["target_dataframe_index"] = "card_id"
else:
kwargs["target_dataframe_name"] = "card_id" # pragma: no cover
lm = cp.LabelMaker(**kwargs)
transactions_df = to_pandas(dataframes["transactions"][0])
labels = lm.search(transactions_df, num_examples_per_instance=-1)
labels["time"] = pd.to_numeric(labels["time"])
labels.rename({"card_id": "id"}, axis=1, inplace=True)
feature_matrix, features = dfs(
dataframes=dataframes,
relationships=relationships,
target_dataframe_name="cards",
cutoff_time=labels,
)
feature_matrix = to_pandas(feature_matrix, index="id")
assert len(feature_matrix.index) == 6
assert len(feature_matrix.columns) == len(features) + 1
def test_accepts_single_cutoff_time(dataframes, relationships):
feature_matrix, features = dfs(
dataframes=dataframes,
relationships=relationships,
target_dataframe_name="transactions",
cutoff_time=20,
)
feature_matrix = to_pandas(feature_matrix, index="id")
assert len(feature_matrix.index) == 5
assert len(feature_matrix.columns) == len(features)
def test_accepts_no_cutoff_time(dataframes, relationships):
feature_matrix, features = dfs(
dataframes=dataframes,
relationships=relationships,
target_dataframe_name="transactions",
instance_ids=[1, 2, 3, 5, 6],
)
feature_matrix = to_pandas(feature_matrix, index="id")
assert len(feature_matrix.index) == 5
assert len(feature_matrix.columns) == len(features)
def test_ignores_instance_ids_if_cutoff_df(dataframes, relationships):
cutoff_times_df = pd.DataFrame({"instance_id": [1, 2, 3], "time": [10, 12, 15]})
instance_ids = [1, 2, 3, 4, 5]
feature_matrix, features = dfs(
dataframes=dataframes,
relationships=relationships,
target_dataframe_name="transactions",
cutoff_time=cutoff_times_df,
instance_ids=instance_ids,
)
feature_matrix = to_pandas(feature_matrix, index="id")
assert len(feature_matrix.index) == 3
assert len(feature_matrix.columns) == len(features)
def test_approximate_features(pd_dataframes, relationships):
# TODO: Update to use Dask dataframes when issue #985 is closed
cutoff_times_df = pd.DataFrame(
{"instance_id": [1, 3, 1, 5, 3, 6], "time": [11, 16, 16, 26, 17, 22]},
)
# force column to BooleanNullable
pd_dataframes["transactions"] += ({"fraud": "BooleanNullable"},)
feature_matrix, features = dfs(
dataframes=pd_dataframes,
relationships=relationships,
target_dataframe_name="transactions",
cutoff_time=cutoff_times_df,
approximate=5,
cutoff_time_in_index=True,
)
direct_agg_feat_name = "cards.PERCENT_TRUE(transactions.fraud)"
assert len(feature_matrix.index) == 6
assert len(feature_matrix.columns) == len(features)
truth_values = pd.Series(data=[1.0, 0.5, 0.5, 1.0, 0.5, 1.0])
assert (feature_matrix[direct_agg_feat_name] == truth_values.values).all()
def test_all_columns(pd_dataframes, relationships):
cutoff_times_df = pd.DataFrame({"instance_id": [1, 2, 3], "time": [10, 12, 15]})
instance_ids = [1, 2, 3, 4, 5]
feature_matrix, features = dfs(
dataframes=pd_dataframes,
relationships=relationships,
target_dataframe_name="transactions",
cutoff_time=cutoff_times_df,
instance_ids=instance_ids,
agg_primitives=[Max, Mean, Min, Sum],
trans_primitives=[],
groupby_trans_primitives=["cum_sum"],
max_depth=3,
allowed_paths=None,
ignore_dataframes=None,
ignore_columns=None,
seed_features=None,
)
assert len(feature_matrix.index) == 3
assert len(feature_matrix.columns) == len(features)
def test_features_only(dataframes, relationships):
if len(dataframes["transactions"]) > 3:
dataframes["transactions"][3]["fraud"] = "BooleanNullable"
else:
dataframes["transactions"] += ({"fraud": "BooleanNullable"},)
features = dfs(
dataframes=dataframes,
relationships=relationships,
target_dataframe_name="transactions",
features_only=True,
)
# pandas creates 11 features
# dask creates 10 features (no skew)
# spark creates 9 features (no skew, no percent_true)
if isinstance(dataframes["transactions"][0], pd.DataFrame):
expected_features = 11
elif is_instance(dataframes["transactions"][0], dd, "DataFrame"):
expected_features = 10
else:
expected_features = 9
assert len(features) == expected_features
def test_accepts_relative_training_window(datetime_es):
# TODO: Update to use Dask dataframes when issue #882 is closed
feature_matrix, _ = dfs(entityset=datetime_es, target_dataframe_name="transactions")
feature_matrix_2, _ = dfs(
entityset=datetime_es,
target_dataframe_name="transactions",
cutoff_time=pd.Timestamp("2012-4-1 04:00"),
)
feature_matrix_3, _ = dfs(
entityset=datetime_es,
target_dataframe_name="transactions",
cutoff_time=pd.Timestamp("2012-4-1 04:00"),
training_window=Timedelta("3 months"),
)
feature_matrix_4, _ = dfs(
entityset=datetime_es,
target_dataframe_name="transactions",
cutoff_time=pd.Timestamp("2012-4-1 04:00"),
training_window="3 months",
)
assert (feature_matrix.index == [1, 2, 3, 4, 5]).all()
assert (feature_matrix_2.index == [1, 2, 3, 4]).all()
assert (feature_matrix_3.index == [2, 3, 4]).all()
assert (feature_matrix_4.index == [2, 3, 4]).all()
# Test case for leap years
feature_matrix_5, _ = dfs(
entityset=datetime_es,
target_dataframe_name="transactions",
cutoff_time=pd.Timestamp("2012-2-29 04:00"),
training_window=Timedelta("1 year"),
include_cutoff_time=True,
)
assert (feature_matrix_5.index == [2]).all()
feature_matrix_5, _ = dfs(
entityset=datetime_es,
target_dataframe_name="transactions",
cutoff_time=pd.Timestamp("2012-2-29 04:00"),
training_window=Timedelta("1 year"),
include_cutoff_time=False,
)
assert (feature_matrix_5.index == [1, 2]).all()
def test_accepts_pd_timedelta_training_window(datetime_es):
# TODO: Update to use Dask dataframes when issue #882 is closed
feature_matrix, _ = dfs(
entityset=datetime_es,
target_dataframe_name="transactions",
cutoff_time=pd.Timestamp("2012-3-31 04:00"),
training_window=pd.Timedelta(61, "D"),
)
assert (feature_matrix.index == [2, 3, 4]).all()
def test_accepts_pd_dateoffset_training_window(datetime_es):
# TODO: Update to use Dask dataframes when issue #882 is closed
feature_matrix, _ = dfs(
entityset=datetime_es,
target_dataframe_name="transactions",
cutoff_time=pd.Timestamp("2012-3-31 04:00"),
training_window=pd.DateOffset(months=2),
)
feature_matrix_2, _ = dfs(
entityset=datetime_es,
target_dataframe_name="transactions",
cutoff_time=pd.Timestamp("2012-3-31 04:00"),
training_window=pd.offsets.BDay(44),
)
assert (feature_matrix.index == [2, 3, 4]).all()
assert (feature_matrix.index == feature_matrix_2.index).all()
def test_accepts_datetime_and_string_offset(datetime_es):
feature_matrix, _ = dfs(
entityset=datetime_es,
target_dataframe_name="transactions",
cutoff_time=pd.to_datetime("2012-3-31 04:00"),
training_window=pd.DateOffset(months=2),
)
feature_matrix_2, _ = dfs(
entityset=datetime_es,
target_dataframe_name="transactions",
cutoff_time="2012-3-31 04:00",
training_window=pd.offsets.BDay(44),
)
assert (feature_matrix.index == [2, 3, 4]).all()
assert (feature_matrix.index == feature_matrix_2.index).all()
def test_handles_pandas_parser_error(datetime_es):
with pytest.raises(ValueError):
_, _ = dfs(
entityset=datetime_es,
target_dataframe_name="transactions",
cutoff_time="2--012-----3-----31 04:00",
training_window=pd.DateOffset(months=2),
)
def test_handles_pandas_overflow_error(datetime_es):
# pandas 1.5.0 raises ValueError, older versions raised OverflowError
with pytest.raises((OverflowError, ValueError)):
_, _ = dfs(
entityset=datetime_es,
target_dataframe_name="transactions",
cutoff_time="200000000000000000000000000000000000000000000000000000000000000000-3-31 04:00",
training_window=pd.DateOffset(months=2),
)
def test_warns_with_unused_primitives(es):
if es.dataframe_type == Library.SPARK:
pytest.skip("Spark throws extra warnings")
trans_primitives = ["num_characters", "num_words", "add_numeric"]
agg_primitives = [Max, "min"]
warning_text = (
"Some specified primitives were not used during DFS:\n"
+ " trans_primitives: ['add_numeric']\n agg_primitives: ['max', 'min']\n"
+ "This may be caused by a using a value of max_depth that is too small, not setting interesting values, "
+ "or it may indicate no compatible columns for the primitive were found in the data. If the DFS call "
+ "contained multiple instances of a primitive in the list above, none of them were used."
)
with pytest.warns(UnusedPrimitiveWarning) as record:
dfs(
entityset=es,
target_dataframe_name="customers",
trans_primitives=trans_primitives,
agg_primitives=agg_primitives,
max_depth=1,
features_only=True,
)
assert record[0].message.args[0] == warning_text
# Should not raise a warning
with pytest.warns(None) as record:
dfs(
entityset=es,
target_dataframe_name="customers",
trans_primitives=trans_primitives,
agg_primitives=agg_primitives,
max_depth=2,
features_only=True,
)
assert not record
def test_no_warns_with_camel_and_title_case(es):
for trans_primitive in ["isNull", "IsNull"]:
# Should not raise a UnusedPrimitiveWarning warning
with pytest.warns(None) as record:
dfs(
entityset=es,
target_dataframe_name="customers",
trans_primitives=[trans_primitive],
max_depth=1,
features_only=True,
)
assert not record
for agg_primitive in ["numUnique", "NumUnique"]:
# Should not raise a UnusedPrimitiveWarning warning
with pytest.warns(None) as record:
dfs(
entityset=es,
target_dataframe_name="customers",
agg_primitives=[agg_primitive],
max_depth=2,
features_only=True,
)
assert not record
def test_does_not_warn_with_stacking_feature(pd_es):
with pytest.warns(None) as record:
dfs(
entityset=pd_es,
target_dataframe_name="régions",
agg_primitives=["percent_true"],
trans_primitives=[GreaterThanScalar(5)],
primitive_options={
"greater_than_scalar": {"include_dataframes": ["stores"]},
},
features_only=True,
)
assert not record
def test_warns_with_unused_where_primitives(es):
if es.dataframe_type == Library.SPARK:
pytest.skip("Spark throws extra warnings")
warning_text = (
"Some specified primitives were not used during DFS:\n"
+ " where_primitives: ['count', 'sum']\n"
+ "This may be caused by a using a value of max_depth that is too small, not setting interesting values, "
+ "or it may indicate no compatible columns for the primitive were found in the data. If the DFS call "
+ "contained multiple instances of a primitive in the list above, none of them were used."
)
with pytest.warns(UnusedPrimitiveWarning) as record:
dfs(
entityset=es,
target_dataframe_name="customers",
agg_primitives=["count"],
where_primitives=["sum", "count"],
max_depth=1,
features_only=True,
)
assert record[0].message.args[0] == warning_text
def test_warns_with_unused_groupby_primitives(pd_es):
warning_text = (
"Some specified primitives were not used during DFS:\n"
+ " groupby_trans_primitives: ['cum_sum']\n"
+ "This may be caused by a using a value of max_depth that is too small, not setting interesting values, "
+ "or it may indicate no compatible columns for the primitive were found in the data. If the DFS call "
+ "contained multiple instances of a primitive in the list above, none of them were used."
)
with pytest.warns(UnusedPrimitiveWarning) as record:
dfs(
entityset=pd_es,
target_dataframe_name="sessions",
groupby_trans_primitives=["cum_sum"],
max_depth=1,
features_only=True,
)
assert record[0].message.args[0] == warning_text
# Should not raise a warning
with pytest.warns(None) as record:
dfs(
entityset=pd_es,
target_dataframe_name="customers",
groupby_trans_primitives=["cum_sum"],
max_depth=1,
features_only=True,
)
assert not record
def test_warns_with_unused_custom_primitives(pd_es):
class AboveTen(TransformPrimitive):
name = "above_ten"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
trans_primitives = [AboveTen]
warning_text = (
"Some specified primitives were not used during DFS:\n"
+ " trans_primitives: ['above_ten']\n"
+ "This may be caused by a using a value of max_depth that is too small, not setting interesting values, "
+ "or it may indicate no compatible columns for the primitive were found in the data. If the DFS call "
+ "contained multiple instances of a primitive in the list above, none of them were used."
)
with pytest.warns(UnusedPrimitiveWarning) as record:
dfs(
entityset=pd_es,
target_dataframe_name="sessions",
trans_primitives=trans_primitives,
max_depth=1,
features_only=True,
)
assert record[0].message.args[0] == warning_text
# Should not raise a warning
with pytest.warns(None) as record:
dfs(
entityset=pd_es,
target_dataframe_name="customers",
trans_primitives=trans_primitives,
max_depth=1,
features_only=True,
)
class MaxAboveTen(AggregationPrimitive):
name = "max_above_ten"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
agg_primitives = [MaxAboveTen]
warning_text = (
"Some specified primitives were not used during DFS:\n"
+ " agg_primitives: ['max_above_ten']\n"
+ "This may be caused by a using a value of max_depth that is too small, not setting interesting values, "
+ "or it may indicate no compatible columns for the primitive were found in the data. If the DFS call "
+ "contained multiple instances of a primitive in the list above, none of them were used."
)
with pytest.warns(UnusedPrimitiveWarning) as record:
dfs(
entityset=pd_es,
target_dataframe_name="stores",
agg_primitives=agg_primitives,
max_depth=1,
features_only=True,
)
assert record[0].message.args[0] == warning_text
# Should not raise a warning
with pytest.warns(None) as record:
dfs(
entityset=pd_es,
target_dataframe_name="sessions",
agg_primitives=agg_primitives,
max_depth=1,
features_only=True,
)
def test_calls_progress_callback(dataframes, relationships):
class MockProgressCallback:
def __init__(self):
self.progress_history = []
self.total_update = 0
self.total_progress_percent = 0
def __call__(self, update, progress_percent, time_elapsed):
self.total_update += update
self.total_progress_percent = progress_percent
self.progress_history.append(progress_percent)
mock_progress_callback = MockProgressCallback()
dfs(
dataframes=dataframes,
relationships=relationships,
target_dataframe_name="transactions",
progress_callback=mock_progress_callback,
)
# second to last entry is the last update from feature calculation
assert np.isclose(
mock_progress_callback.progress_history[-2],
FEATURE_CALCULATION_PERCENTAGE * 100,
)
assert np.isclose(mock_progress_callback.total_update, 100.0)
assert np.isclose(mock_progress_callback.total_progress_percent, 100.0)
def test_calls_progress_callback_cluster(pd_dataframes, relationships, dask_cluster):
class MockProgressCallback:
def __init__(self):
self.progress_history = []
self.total_update = 0
self.total_progress_percent = 0
def __call__(self, update, progress_percent, time_elapsed):
self.total_update += update
self.total_progress_percent = progress_percent
self.progress_history.append(progress_percent)
mock_progress_callback = MockProgressCallback()
dkwargs = {"cluster": dask_cluster.scheduler.address}
dfs(
dataframes=pd_dataframes,
relationships=relationships,
target_dataframe_name="transactions",
progress_callback=mock_progress_callback,
dask_kwargs=dkwargs,
)
assert np.isclose(mock_progress_callback.total_update, 100.0)
assert np.isclose(mock_progress_callback.total_progress_percent, 100.0)
def test_dask_kwargs(pd_dataframes, relationships, dask_cluster):
cutoff_times_df = pd.DataFrame({"instance_id": [1, 2, 3], "time": [10, 12, 15]})
feature_matrix, features = dfs(
dataframes=pd_dataframes,
relationships=relationships,
target_dataframe_name="transactions",
cutoff_time=cutoff_times_df,
)
dask_kwargs = {"cluster": dask_cluster.scheduler.address}
feature_matrix_2, features_2 = dfs(
dataframes=pd_dataframes,
relationships=relationships,
target_dataframe_name="transactions",
cutoff_time=cutoff_times_df,
dask_kwargs=dask_kwargs,
)
assert all(
f1.unique_name() == f2.unique_name() for f1, f2 in zip(features, features_2)
)
for column in feature_matrix:
for x, y in zip(feature_matrix[column], feature_matrix_2[column]):
assert (pd.isnull(x) and pd.isnull(y)) or (x == y)
| 24,820 | 33.425798 | 177 | py |
featuretools | featuretools-main/featuretools/tests/synthesis/test_dask_dfs.py | import pandas as pd
import pytest
from woodwork.logical_types import (
Datetime,
Double,
Integer,
IntegerNullable,
NaturalLanguage,
)
from featuretools import dfs
from featuretools.entityset import EntitySet
from featuretools.utils.gen_utils import import_or_none
dd = import_or_none("dask.dataframe")
@pytest.mark.skipif("not dd")
def test_single_table_dask_entityset():
primitives_list = [
"absolute",
"is_weekend",
"year",
"day",
"num_characters",
"num_words",
]
dask_es = EntitySet(id="dask_es")
df = pd.DataFrame(
{
"id": [0, 1, 2, 3],
"values": [1, 12, -34, 27],
"dates": [
pd.to_datetime("2019-01-10"),
pd.to_datetime("2019-02-03"),
pd.to_datetime("2019-01-01"),
pd.to_datetime("2017-08-25"),
],
"strings": ["I am a string", "23", "abcdef ghijk", ""],
},
)
values_dd = dd.from_pandas(df, npartitions=2)
ltypes = {"values": Integer, "dates": Datetime, "strings": NaturalLanguage}
dask_es.add_dataframe(
dataframe_name="data",
dataframe=values_dd,
index="id",
logical_types=ltypes,
)
dask_fm, _ = dfs(
entityset=dask_es,
target_dataframe_name="data",
trans_primitives=primitives_list,
)
pd_es = EntitySet(id="pd_es")
pd_es.add_dataframe(
dataframe_name="data",
dataframe=df,
index="id",
logical_types=ltypes,
)
fm, _ = dfs(
entityset=pd_es,
target_dataframe_name="data",
trans_primitives=primitives_list,
)
# Use the same columns and make sure both indexes are sorted the same
# update the type of the future index column so it doesn't conflict with the pandas fm
dask_fm = dask_fm.compute().astype({"id": "int64"})
dask_computed_fm = dask_fm.set_index("id").loc[fm.index][fm.columns]
pd.testing.assert_frame_equal(fm, dask_computed_fm, check_dtype=False)
@pytest.mark.skipif("not dd")
def test_single_table_dask_entityset_ids_not_sorted():
primitives_list = [
"absolute",
"is_weekend",
"year",
"day",
"num_characters",
"num_words",
]
dask_es = EntitySet(id="dask_es")
df = pd.DataFrame(
{
"id": [2, 0, 1, 3],
"values": [1, 12, -34, 27],
"dates": [
pd.to_datetime("2019-01-10"),
pd.to_datetime("2019-02-03"),
pd.to_datetime("2019-01-01"),
pd.to_datetime("2017-08-25"),
],
"strings": ["I am a string", "23", "abcdef ghijk", ""],
},
)
values_dd = dd.from_pandas(df, npartitions=2)
ltypes = {"values": Integer, "dates": Datetime, "strings": NaturalLanguage}
dask_es.add_dataframe(
dataframe_name="data",
dataframe=values_dd,
index="id",
logical_types=ltypes,
)
dask_fm, _ = dfs(
entityset=dask_es,
target_dataframe_name="data",
trans_primitives=primitives_list,
)
pd_es = EntitySet(id="pd_es")
pd_es.add_dataframe(
dataframe_name="data",
dataframe=df,
index="id",
logical_types=ltypes,
)
fm, _ = dfs(
entityset=pd_es,
target_dataframe_name="data",
trans_primitives=primitives_list,
)
# Make sure both indexes are sorted the same
dask_fm = dask_fm.compute().astype({"id": "int64"})
pd.testing.assert_frame_equal(
fm,
dask_fm.set_index("id").loc[fm.index],
check_dtype=False,
)
@pytest.mark.skipif("not dd")
def test_single_table_dask_entityset_with_instance_ids():
primitives_list = [
"absolute",
"is_weekend",
"year",
"day",
"num_characters",
"num_words",
]
instance_ids = [0, 1, 3]
dask_es = EntitySet(id="dask_es")
df = pd.DataFrame(
{
"id": [0, 1, 2, 3],
"values": [1, 12, -34, 27],
"dates": [
pd.to_datetime("2019-01-10"),
pd.to_datetime("2019-02-03"),
pd.to_datetime("2019-01-01"),
pd.to_datetime("2017-08-25"),
],
"strings": ["I am a string", "23", "abcdef ghijk", ""],
},
)
values_dd = dd.from_pandas(df, npartitions=2)
ltypes = {"values": Integer, "dates": Datetime, "strings": NaturalLanguage}
dask_es.add_dataframe(
dataframe_name="data",
dataframe=values_dd,
index="id",
logical_types=ltypes,
)
dask_fm, _ = dfs(
entityset=dask_es,
target_dataframe_name="data",
trans_primitives=primitives_list,
instance_ids=instance_ids,
)
pd_es = EntitySet(id="pd_es")
pd_es.add_dataframe(
dataframe_name="data",
dataframe=df,
index="id",
logical_types=ltypes,
)
fm, _ = dfs(
entityset=pd_es,
target_dataframe_name="data",
trans_primitives=primitives_list,
instance_ids=instance_ids,
)
# Make sure both indexes are sorted the same
dask_fm = dask_fm.compute().astype({"id": "int64"})
pd.testing.assert_frame_equal(
fm,
dask_fm.set_index("id").loc[fm.index],
check_dtype=False,
)
@pytest.mark.skipif("not dd")
def test_single_table_dask_entityset_single_cutoff_time():
primitives_list = [
"absolute",
"is_weekend",
"year",
"day",
"num_characters",
"num_words",
]
dask_es = EntitySet(id="dask_es")
df = pd.DataFrame(
{
"id": [0, 1, 2, 3],
"values": [1, 12, -34, 27],
"dates": [
pd.to_datetime("2019-01-10"),
pd.to_datetime("2019-02-03"),
pd.to_datetime("2019-01-01"),
pd.to_datetime("2017-08-25"),
],
"strings": ["I am a string", "23", "abcdef ghijk", ""],
},
)
values_dd = dd.from_pandas(df, npartitions=2)
ltypes = {"values": Integer, "dates": Datetime, "strings": NaturalLanguage}
dask_es.add_dataframe(
dataframe_name="data",
dataframe=values_dd,
index="id",
logical_types=ltypes,
)
dask_fm, _ = dfs(
entityset=dask_es,
target_dataframe_name="data",
trans_primitives=primitives_list,
cutoff_time=pd.Timestamp("2019-01-05 04:00"),
)
pd_es = EntitySet(id="pd_es")
pd_es.add_dataframe(
dataframe_name="data",
dataframe=df,
index="id",
logical_types=ltypes,
)
fm, _ = dfs(
entityset=pd_es,
target_dataframe_name="data",
trans_primitives=primitives_list,
cutoff_time=pd.Timestamp("2019-01-05 04:00"),
)
# Make sure both indexes are sorted the same
dask_fm = dask_fm.compute().astype({"id": "int64"})
pd.testing.assert_frame_equal(
fm,
dask_fm.set_index("id").loc[fm.index],
check_dtype=False,
)
@pytest.mark.skipif("not dd")
def test_single_table_dask_entityset_cutoff_time_df():
primitives_list = [
"absolute",
"is_weekend",
"year",
"day",
"num_characters",
"num_words",
]
dask_es = EntitySet(id="dask_es")
df = pd.DataFrame(
{
"id": [0, 1, 2],
"values": [1, 12, -34],
"dates": [
pd.to_datetime("2019-01-10"),
pd.to_datetime("2019-02-03"),
pd.to_datetime("2019-01-01"),
],
"strings": ["I am a string", "23", "abcdef ghijk"],
},
)
values_dd = dd.from_pandas(df, npartitions=2)
ltypes = {"values": IntegerNullable, "dates": Datetime, "strings": NaturalLanguage}
dask_es.add_dataframe(
dataframe_name="data",
dataframe=values_dd,
index="id",
time_index="dates",
logical_types=ltypes,
)
ids = [0, 1, 2, 0]
times = [
pd.Timestamp("2019-01-05 04:00"),
pd.Timestamp("2019-01-05 04:00"),
pd.Timestamp("2019-01-05 04:00"),
pd.Timestamp("2019-01-15 04:00"),
]
labels = [True, False, True, False]
cutoff_times = pd.DataFrame(
{"id": ids, "time": times, "labels": labels},
columns=["id", "time", "labels"],
)
dask_fm, _ = dfs(
entityset=dask_es,
target_dataframe_name="data",
trans_primitives=primitives_list,
cutoff_time=cutoff_times,
)
pd_es = EntitySet(id="pd_es")
pd_es.add_dataframe(
dataframe_name="data",
dataframe=df,
index="id",
time_index="dates",
logical_types=ltypes,
)
fm, _ = dfs(
entityset=pd_es,
target_dataframe_name="data",
trans_primitives=primitives_list,
cutoff_time=cutoff_times,
)
# Because row ordering with Dask is not guaranteed, we need to sort on two columns to make sure that values
# for instance id 0 are compared correctly. Also, make sure the index column has the same dtype.
fm = fm.sort_values(["id", "labels"])
dask_fm = dask_fm.compute().astype({"id": "int64"})
dask_fm = dask_fm.set_index("id").sort_values(["id", "labels"])
pd.testing.assert_frame_equal(fm, dask_fm, check_dtype=False)
@pytest.mark.skipif("not dd")
def test_single_table_dask_entityset_dates_not_sorted():
dask_es = EntitySet(id="dask_es")
df = pd.DataFrame(
{
"id": [0, 1, 2, 3],
"values": [1, 12, -34, 27],
"dates": [
pd.to_datetime("2019-01-10"),
pd.to_datetime("2019-02-03"),
pd.to_datetime("2019-01-01"),
pd.to_datetime("2017-08-25"),
],
},
)
primitives_list = ["absolute", "is_weekend", "year", "day"]
values_dd = dd.from_pandas(df, npartitions=1)
ltypes = {
"values": Integer,
"dates": Datetime,
}
dask_es.add_dataframe(
dataframe_name="data",
dataframe=values_dd,
index="id",
time_index="dates",
logical_types=ltypes,
)
dask_fm, _ = dfs(
entityset=dask_es,
target_dataframe_name="data",
trans_primitives=primitives_list,
max_depth=1,
)
pd_es = EntitySet(id="pd_es")
pd_es.add_dataframe(
dataframe_name="data",
dataframe=df,
index="id",
time_index="dates",
logical_types=ltypes,
)
fm, _ = dfs(
entityset=pd_es,
target_dataframe_name="data",
trans_primitives=primitives_list,
max_depth=1,
)
dask_fm = dask_fm.compute().astype({"id": "int64"})
pd.testing.assert_frame_equal(
fm,
dask_fm.set_index("id").loc[fm.index],
check_dtype=False,
)
@pytest.mark.skipif("not dd")
def test_dask_entityset_secondary_time_index():
log_df = pd.DataFrame()
log_df["id"] = [0, 1, 2, 3]
log_df["scheduled_time"] = pd.to_datetime(
["2019-01-01", "2019-01-01", "2019-01-01", "2019-01-01"],
)
log_df["departure_time"] = pd.to_datetime(
[
"2019-02-01 09:00",
"2019-02-06 10:00",
"2019-02-12 10:00",
"2019-03-01 11:30",
],
)
log_df["arrival_time"] = pd.to_datetime(
[
"2019-02-01 11:23",
"2019-02-06 12:45",
"2019-02-12 13:53",
"2019-03-01 14:07",
],
)
log_df["delay"] = [-2, 10, 60, 0]
log_df["flight_id"] = [0, 1, 0, 1]
log_dask = dd.from_pandas(log_df, npartitions=2)
flights_df = pd.DataFrame()
flights_df["id"] = [0, 1, 2, 3]
flights_df["origin"] = ["BOS", "LAX", "BOS", "LAX"]
flights_dask = dd.from_pandas(flights_df, npartitions=2)
pd_es = EntitySet("flights")
dask_es = EntitySet("flights_dask")
log_ltypes = {
"scheduled_time": Datetime,
"departure_time": Datetime,
"arrival_time": Datetime,
"delay": Double,
}
pd_es.add_dataframe(
dataframe_name="logs",
dataframe=log_df,
index="id",
time_index="scheduled_time",
secondary_time_index={"arrival_time": ["departure_time", "delay"]},
logical_types=log_ltypes,
)
dask_es.add_dataframe(
dataframe_name="logs",
dataframe=log_dask,
index="id",
logical_types=log_ltypes,
semantic_tags={"flight_id": "foreign_key"},
time_index="scheduled_time",
secondary_time_index={"arrival_time": ["departure_time", "delay"]},
)
pd_es.add_dataframe(dataframe_name="flights", dataframe=flights_df, index="id")
flights_ltypes = pd_es["flights"].ww.logical_types
dask_es.add_dataframe(
dataframe_name="flights",
dataframe=flights_dask,
index="id",
logical_types=flights_ltypes,
)
pd_es.add_relationship("flights", "id", "logs", "flight_id")
dask_es.add_relationship("flights", "id", "logs", "flight_id")
cutoff_df = pd.DataFrame()
cutoff_df["id"] = [0, 1, 1]
cutoff_df["time"] = pd.to_datetime(["2019-02-02", "2019-02-02", "2019-02-20"])
fm, _ = dfs(
entityset=pd_es,
target_dataframe_name="logs",
cutoff_time=cutoff_df,
agg_primitives=["max"],
trans_primitives=["month"],
)
dask_fm, _ = dfs(
entityset=dask_es,
target_dataframe_name="logs",
cutoff_time=cutoff_df,
agg_primitives=["max"],
trans_primitives=["month"],
)
# Make sure both matrixes are sorted the same
# Also need to account for index differences
dask_fm_computed = dask_fm.compute().astype({"id": "int64"}).set_index("id")
pd.testing.assert_frame_equal(
fm.sort_values("delay"),
dask_fm_computed.sort_values("delay"),
check_dtype=False,
)
| 14,056 | 26.401559 | 111 | py |
featuretools | featuretools-main/featuretools/tests/synthesis/test_encode_features.py | import pandas as pd
import pytest
from featuretools import EntitySet, calculate_feature_matrix, dfs
from featuretools.feature_base import Feature, IdentityFeature
from featuretools.primitives import NMostCommon
from featuretools.synthesis import encode_features
def test_encodes_features(pd_es):
f1 = IdentityFeature(pd_es["log"].ww["product_id"])
f2 = IdentityFeature(pd_es["log"].ww["purchased"])
f3 = IdentityFeature(pd_es["log"].ww["value"])
features = [f1, f2, f3]
feature_matrix = calculate_feature_matrix(
features,
pd_es,
instance_ids=[0, 1, 2, 3, 4, 5],
)
_, features_encoded = encode_features(feature_matrix, features)
assert len(features_encoded) == 6
_, features_encoded = encode_features(feature_matrix, features, top_n=2)
assert len(features_encoded) == 5
_, features_encoded = encode_features(
feature_matrix,
features,
include_unknown=False,
)
assert len(features_encoded) == 5
def test_dask_errors_encode_features(dask_es):
f1 = IdentityFeature(dask_es["log"].ww["product_id"])
f2 = IdentityFeature(dask_es["log"].ww["purchased"])
f3 = IdentityFeature(dask_es["log"].ww["value"])
features = [f1, f2, f3]
feature_matrix = calculate_feature_matrix(
features,
dask_es,
instance_ids=[0, 1, 2, 3, 4, 5],
)
error_text = "feature_matrix must be a Pandas DataFrame"
with pytest.raises(TypeError, match=error_text):
encode_features(feature_matrix, features)
def test_inplace_encodes_features(pd_es):
f1 = IdentityFeature(pd_es["log"].ww["product_id"])
features = [f1]
feature_matrix = calculate_feature_matrix(
features,
pd_es,
instance_ids=[0, 1, 2, 3, 4, 5],
)
feature_matrix_shape = feature_matrix.shape
feature_matrix_encoded, _ = encode_features(feature_matrix, features)
assert feature_matrix_encoded.shape != feature_matrix_shape
assert feature_matrix.shape == feature_matrix_shape
# inplace they should be the same
feature_matrix_encoded, _ = encode_features(feature_matrix, features, inplace=True)
assert feature_matrix_encoded.shape == feature_matrix.shape
def test_to_encode_features(pd_es):
f1 = IdentityFeature(pd_es["log"].ww["product_id"])
f2 = IdentityFeature(pd_es["log"].ww["value"])
f3 = IdentityFeature(pd_es["log"].ww["datetime"])
features = [f1, f2, f3]
feature_matrix = calculate_feature_matrix(
features,
pd_es,
instance_ids=[0, 1, 2, 3, 4, 5],
)
feature_matrix_encoded, _ = encode_features(feature_matrix, features)
feature_matrix_encoded_shape = feature_matrix_encoded.shape
# to_encode should keep product_id as a string and datetime as a date,
# and not have the same shape as previous encoded matrix due to fewer encoded features
to_encode = []
feature_matrix_encoded, _ = encode_features(
feature_matrix,
features,
to_encode=to_encode,
)
assert feature_matrix_encoded_shape != feature_matrix_encoded.shape
assert feature_matrix_encoded["datetime"].dtype == "datetime64[ns]"
assert feature_matrix_encoded["product_id"].dtype == "category"
to_encode = ["value"]
feature_matrix_encoded, _ = encode_features(
feature_matrix,
features,
to_encode=to_encode,
)
assert feature_matrix_encoded_shape != feature_matrix_encoded.shape
assert feature_matrix_encoded["datetime"].dtype == "datetime64[ns]"
assert feature_matrix_encoded["product_id"].dtype == "category"
def test_encode_features_handles_pass_columns(pd_es):
f1 = IdentityFeature(pd_es["log"].ww["product_id"])
f2 = IdentityFeature(pd_es["log"].ww["value"])
features = [f1, f2]
cutoff_time = pd.DataFrame(
{
"instance_id": range(6),
"time": pd_es["log"]["datetime"][0:6],
"label": [i % 2 for i in range(6)],
},
columns=["instance_id", "time", "label"],
)
feature_matrix = calculate_feature_matrix(features, pd_es, cutoff_time)
assert "label" in feature_matrix.columns
feature_matrix_encoded, _ = encode_features(feature_matrix, features)
feature_matrix_encoded_shape = feature_matrix_encoded.shape
# to_encode should keep product_id as a string, and not create 3 additional columns
to_encode = []
feature_matrix_encoded, _ = encode_features(
feature_matrix,
features,
to_encode=to_encode,
)
assert feature_matrix_encoded_shape != feature_matrix_encoded.shape
to_encode = ["value"]
feature_matrix_encoded, _ = encode_features(
feature_matrix,
features,
to_encode=to_encode,
)
assert feature_matrix_encoded_shape != feature_matrix_encoded.shape
assert "label" in feature_matrix_encoded.columns
def test_encode_features_catches_features_mismatch(pd_es):
f1 = IdentityFeature(pd_es["log"].ww["product_id"])
f2 = IdentityFeature(pd_es["log"].ww["value"])
f3 = IdentityFeature(pd_es["log"].ww["session_id"])
features = [f1, f2]
cutoff_time = pd.DataFrame(
{
"instance_id": range(6),
"time": pd_es["log"]["datetime"][0:6],
"label": [i % 2 for i in range(6)],
},
columns=["instance_id", "time", "label"],
)
feature_matrix = calculate_feature_matrix(features, pd_es, cutoff_time)
assert "label" in feature_matrix.columns
error_text = "Feature session_id not found in feature matrix"
with pytest.raises(AssertionError, match=error_text):
encode_features(feature_matrix, [f1, f3])
def test_encode_unknown_features():
# Dataframe with categorical column with "unknown" string
df = pd.DataFrame({"category": ["unknown", "b", "c", "d", "e"]}).astype(
{"category": "category"},
)
pd_es = EntitySet("test")
pd_es.add_dataframe(
dataframe_name="a",
dataframe=df,
index="index",
make_index=True,
)
features, feature_defs = dfs(entityset=pd_es, target_dataframe_name="a")
# Specify unknown token for replacement
features_enc, _ = encode_features(features, feature_defs, include_unknown=True)
assert list(features_enc.columns) == [
"category = unknown",
"category = e",
"category = d",
"category = c",
"category = b",
"category is unknown",
]
def test_encode_features_topn(pd_es):
topn = Feature(
Feature(pd_es["log"].ww["product_id"]),
parent_dataframe_name="customers",
primitive=NMostCommon(n=3),
)
features, feature_defs = dfs(
entityset=pd_es,
instance_ids=[0, 1, 2],
target_dataframe_name="customers",
agg_primitives=[NMostCommon(n=3)],
)
features_enc, feature_defs_enc = encode_features(
features,
feature_defs,
include_unknown=True,
)
assert topn.unique_name() in [feat.unique_name() for feat in feature_defs_enc]
for name in topn.get_feature_names():
assert name in features_enc.columns
assert features_enc.columns.tolist().count(name) == 1
def test_encode_features_drop_first():
df = pd.DataFrame({"category": ["ao", "b", "c", "d", "e"]}).astype(
{"category": "category"},
)
pd_es = EntitySet("test")
pd_es.add_dataframe(
dataframe_name="a",
dataframe=df,
index="index",
make_index=True,
)
features, feature_defs = dfs(entityset=pd_es, target_dataframe_name="a")
features_enc, _ = encode_features(
features,
feature_defs,
drop_first=True,
include_unknown=False,
)
assert len(features_enc.columns) == 4
features_enc, feature_defs = encode_features(
features,
feature_defs,
top_n=3,
drop_first=True,
include_unknown=False,
)
assert len(features_enc.columns) == 2
def test_encode_features_handles_dictionary_input(pd_es):
f1 = IdentityFeature(pd_es["log"].ww["product_id"])
f2 = IdentityFeature(pd_es["log"].ww["purchased"])
f3 = IdentityFeature(pd_es["log"].ww["session_id"])
features = [f1, f2, f3]
feature_matrix = calculate_feature_matrix(features, pd_es, instance_ids=range(16))
feature_matrix_encoded, features_encoded = encode_features(feature_matrix, features)
true_values = [
"product_id = coke zero",
"product_id = toothpaste",
"product_id = car",
"product_id = brown bag",
"product_id = taco clock",
"product_id = Haribo sugar-free gummy bears",
"product_id is unknown",
"purchased",
"session_id = 0",
"session_id = 1",
"session_id = 4",
"session_id = 3",
"session_id = 5",
"session_id = 2",
"session_id is unknown",
]
assert len(features_encoded) == 15
for col in true_values:
assert col in list(feature_matrix_encoded.columns)
top_n_dict = {}
feature_matrix_encoded, features_encoded = encode_features(
feature_matrix,
features,
top_n=top_n_dict,
)
assert len(features_encoded) == 15
for col in true_values:
assert col in list(feature_matrix_encoded.columns)
top_n_dict = {f1.get_name(): 4, f3.get_name(): 3}
feature_matrix_encoded, features_encoded = encode_features(
feature_matrix,
features,
top_n=top_n_dict,
)
assert len(features_encoded) == 10
true_values = [
"product_id = coke zero",
"product_id = toothpaste",
"product_id = car",
"product_id = brown bag",
"product_id is unknown",
"purchased",
"session_id = 0",
"session_id = 1",
"session_id = 4",
"session_id is unknown",
]
for col in true_values:
assert col in list(feature_matrix_encoded.columns)
feature_matrix_encoded, features_encoded = encode_features(
feature_matrix,
features,
top_n=top_n_dict,
include_unknown=False,
)
true_values = [
"product_id = coke zero",
"product_id = toothpaste",
"product_id = car",
"product_id = brown bag",
"purchased",
"session_id = 0",
"session_id = 1",
"session_id = 4",
]
assert len(features_encoded) == 8
for col in true_values:
assert col in list(feature_matrix_encoded.columns)
def test_encode_features_matches_calculate_feature_matrix():
df = pd.DataFrame({"category": ["b", "c", "d", "e"]}).astype(
{"category": "category"},
)
pd_es = EntitySet("test")
pd_es.add_dataframe(
dataframe_name="a",
dataframe=df,
index="index",
make_index=True,
)
features, feature_defs = dfs(entityset=pd_es, target_dataframe_name="a")
features_enc, feature_defs_enc = encode_features(
features,
feature_defs,
to_encode=["category"],
)
features_calc = calculate_feature_matrix(feature_defs_enc, entityset=pd_es)
pd.testing.assert_frame_equal(features_enc, features_calc)
assert features_calc.ww._schema == features_enc.ww._schema
| 11,258 | 30.188366 | 90 | py |
featuretools | featuretools-main/featuretools/tests/synthesis/test_spark_dfs.py | import pandas as pd
import pytest
from woodwork.logical_types import (
Datetime,
Double,
Integer,
IntegerNullable,
NaturalLanguage,
)
from featuretools import dfs
from featuretools.entityset import EntitySet
from featuretools.utils.gen_utils import import_or_none
ps = import_or_none("pyspark.pandas")
@pytest.mark.skipif("not ps")
def test_single_table_spark_entityset():
primitives_list = [
"absolute",
"is_weekend",
"year",
"day",
"num_characters",
"num_words",
]
spark_es = EntitySet(id="spark_es")
df = pd.DataFrame(
{
"id": [0, 1, 2, 3],
"values": [1, 12, -34, 27],
"dates": [
pd.to_datetime("2019-01-10"),
pd.to_datetime("2019-02-03"),
pd.to_datetime("2019-01-01"),
pd.to_datetime("2017-08-25"),
],
"strings": ["I am a string", "23", "abcdef ghijk", ""],
},
)
values_dd = ps.from_pandas(df)
ltypes = {"values": Integer, "dates": Datetime, "strings": NaturalLanguage}
spark_es.add_dataframe(
dataframe_name="data",
dataframe=values_dd,
index="id",
logical_types=ltypes,
)
spark_fm, _ = dfs(
entityset=spark_es,
target_dataframe_name="data",
trans_primitives=primitives_list,
)
pd_es = EntitySet(id="pd_es")
pd_es.add_dataframe(
dataframe_name="data",
dataframe=df,
index="id",
logical_types=ltypes,
)
fm, _ = dfs(
entityset=pd_es,
target_dataframe_name="data",
trans_primitives=primitives_list,
)
spark_fm = spark_fm.to_pandas().astype({"id": "int64"})
spark_computed_fm = spark_fm.set_index("id").loc[fm.index][fm.columns]
# Spark dtypes are different for categorical - set the pandas fm to have the same dtypes before comparing
pd.testing.assert_frame_equal(
fm.astype(spark_computed_fm.dtypes),
spark_computed_fm,
)
@pytest.mark.skipif("not ps")
def test_single_table_spark_entityset_ids_not_sorted():
primitives_list = [
"absolute",
"is_weekend",
"year",
"day",
"num_characters",
"num_words",
]
spark_es = EntitySet(id="spark_es")
df = pd.DataFrame(
{
"id": [2, 0, 1, 3],
"values": [1, 12, -34, 27],
"dates": [
pd.to_datetime("2019-01-10"),
pd.to_datetime("2019-02-03"),
pd.to_datetime("2019-01-01"),
pd.to_datetime("2017-08-25"),
],
"strings": ["I am a string", "23", "abcdef ghijk", ""],
},
)
values_dd = ps.from_pandas(df)
ltypes = {
"values": Integer,
"dates": Datetime,
"strings": NaturalLanguage,
}
spark_es.add_dataframe(
dataframe_name="data",
dataframe=values_dd,
index="id",
logical_types=ltypes,
)
spark_fm, _ = dfs(
entityset=spark_es,
target_dataframe_name="data",
trans_primitives=primitives_list,
)
pd_es = EntitySet(id="pd_es")
pd_es.add_dataframe(
dataframe_name="data",
dataframe=df,
index="id",
logical_types=ltypes,
)
fm, _ = dfs(
entityset=pd_es,
target_dataframe_name="data",
trans_primitives=primitives_list,
)
spark_fm = spark_fm.to_pandas().astype({"id": "int64"})
spark_computed_fm = spark_fm.set_index("id").loc[fm.index]
# Spark dtypes are different for categorical - set the pandas fm to have the same dtypes before comparing
pd.testing.assert_frame_equal(
fm.astype(spark_computed_fm.dtypes),
spark_computed_fm,
)
@pytest.mark.skipif("not ps")
def test_single_table_spark_entityset_with_instance_ids():
primitives_list = [
"absolute",
"is_weekend",
"year",
"day",
"num_characters",
"num_words",
]
instance_ids = [0, 1, 3]
spark_es = EntitySet(id="spark_es")
df = pd.DataFrame(
{
"id": [0, 1, 2, 3],
"values": [1, 12, -34, 27],
"dates": [
pd.to_datetime("2019-01-10"),
pd.to_datetime("2019-02-03"),
pd.to_datetime("2019-01-01"),
pd.to_datetime("2017-08-25"),
],
"strings": ["I am a string", "23", "abcdef ghijk", ""],
},
)
values_dd = ps.from_pandas(df)
ltypes = {"values": Integer, "dates": Datetime, "strings": NaturalLanguage}
spark_es.add_dataframe(
dataframe_name="data",
dataframe=values_dd,
index="id",
logical_types=ltypes,
)
spark_fm, _ = dfs(
entityset=spark_es,
target_dataframe_name="data",
trans_primitives=primitives_list,
instance_ids=instance_ids,
)
pd_es = EntitySet(id="pd_es")
pd_es.add_dataframe(
dataframe_name="data",
dataframe=df,
index="id",
logical_types=ltypes,
)
fm, _ = dfs(
entityset=pd_es,
target_dataframe_name="data",
trans_primitives=primitives_list,
instance_ids=instance_ids,
)
spark_fm = spark_fm.to_pandas().astype({"id": "int64"})
spark_computed_fm = spark_fm.set_index("id").loc[fm.index]
# Spark dtypes are different for categorical - set the pandas fm to have the same dtypes before comparing
pd.testing.assert_frame_equal(
fm.astype(spark_computed_fm.dtypes),
spark_computed_fm,
)
@pytest.mark.skipif("not ps")
def test_single_table_spark_entityset_single_cutoff_time():
primitives_list = [
"absolute",
"is_weekend",
"year",
"day",
"num_characters",
"num_words",
]
spark_es = EntitySet(id="spark_es")
df = pd.DataFrame(
{
"id": [0, 1, 2, 3],
"values": [1, 12, -34, 27],
"dates": [
pd.to_datetime("2019-01-10"),
pd.to_datetime("2019-02-03"),
pd.to_datetime("2019-01-01"),
pd.to_datetime("2017-08-25"),
],
"strings": ["I am a string", "23", "abcdef ghijk", ""],
},
)
values_dd = ps.from_pandas(df)
ltypes = {"values": Integer, "dates": Datetime, "strings": NaturalLanguage}
spark_es.add_dataframe(
dataframe_name="data",
dataframe=values_dd,
index="id",
logical_types=ltypes,
)
spark_fm, _ = dfs(
entityset=spark_es,
target_dataframe_name="data",
trans_primitives=primitives_list,
cutoff_time=pd.Timestamp("2019-01-05 04:00"),
)
pd_es = EntitySet(id="pd_es")
pd_es.add_dataframe(
dataframe_name="data",
dataframe=df,
index="id",
logical_types=ltypes,
)
fm, _ = dfs(
entityset=pd_es,
target_dataframe_name="data",
trans_primitives=primitives_list,
cutoff_time=pd.Timestamp("2019-01-05 04:00"),
)
spark_fm = spark_fm.to_pandas().astype({"id": "int64"})
spark_computed_fm = spark_fm.set_index("id").loc[fm.index]
# Spark dtypes are different for categorical - set the pandas fm to have the same dtypes before comparing
pd.testing.assert_frame_equal(
fm.astype(spark_computed_fm.dtypes),
spark_computed_fm,
)
@pytest.mark.skipif("not ps")
def test_single_table_spark_entityset_cutoff_time_df():
primitives_list = [
"absolute",
"is_weekend",
"year",
"day",
"num_characters",
"num_words",
]
spark_es = EntitySet(id="spark_es")
df = pd.DataFrame(
{
"id": [0, 1, 2],
"values": [1, 12, -34],
"dates": [
pd.to_datetime("2019-01-10"),
pd.to_datetime("2019-02-03"),
pd.to_datetime("2019-01-01"),
],
"strings": ["I am a string", "23", "abcdef ghijk"],
},
)
values_dd = ps.from_pandas(df)
ltypes = {"values": IntegerNullable, "dates": Datetime, "strings": NaturalLanguage}
spark_es.add_dataframe(
dataframe_name="data",
dataframe=values_dd,
index="id",
time_index="dates",
logical_types=ltypes,
)
ids = [0, 1, 2, 0]
times = [
pd.Timestamp("2019-01-05 04:00"),
pd.Timestamp("2019-01-05 04:00"),
pd.Timestamp("2019-01-05 04:00"),
pd.Timestamp("2019-01-15 04:00"),
]
labels = [True, False, True, False]
cutoff_times = pd.DataFrame(
{"id": ids, "time": times, "labels": labels},
columns=["id", "time", "labels"],
)
spark_fm, _ = dfs(
entityset=spark_es,
target_dataframe_name="data",
trans_primitives=primitives_list,
cutoff_time=cutoff_times,
)
pd_es = EntitySet(id="pd_es")
pd_es.add_dataframe(
dataframe_name="data",
dataframe=df,
index="id",
time_index="dates",
logical_types=ltypes,
)
fm, _ = dfs(
entityset=pd_es,
target_dataframe_name="data",
trans_primitives=primitives_list,
cutoff_time=cutoff_times,
)
# Because row ordering with spark is not guaranteed, `we need to sort on two columns to make sure that values
# for instance id 0 are compared correctly. Also, make sure the index column has the same dtype.
fm = fm.sort_values(["id", "labels"])
spark_fm = spark_fm.to_pandas().astype({"id": "int64"})
spark_fm = spark_fm.set_index("id").sort_values(["id", "labels"])
for column in fm.columns:
if fm[column].dtype.name == "category":
fm[column] = fm[column].astype("Int64").astype("string")
pd.testing.assert_frame_equal(
fm.astype(spark_fm.dtypes),
spark_fm,
check_dtype=False,
)
@pytest.mark.skipif("not ps")
def test_single_table_spark_entityset_dates_not_sorted():
spark_es = EntitySet(id="spark_es")
df = pd.DataFrame(
{
"id": [0, 1, 2, 3],
"values": [1, 12, -34, 27],
"dates": [
pd.to_datetime("2019-01-10"),
pd.to_datetime("2019-02-03"),
pd.to_datetime("2019-01-01"),
pd.to_datetime("2017-08-25"),
],
},
)
primitives_list = ["absolute", "is_weekend", "year", "day"]
values_dd = ps.from_pandas(df)
ltypes = {
"values": Integer,
"dates": Datetime,
}
spark_es.add_dataframe(
dataframe_name="data",
dataframe=values_dd,
index="id",
time_index="dates",
logical_types=ltypes,
)
spark_fm, _ = dfs(
entityset=spark_es,
target_dataframe_name="data",
trans_primitives=primitives_list,
max_depth=1,
)
pd_es = EntitySet(id="pd_es")
pd_es.add_dataframe(
dataframe_name="data",
dataframe=df,
index="id",
time_index="dates",
logical_types=ltypes,
)
fm, _ = dfs(
entityset=pd_es,
target_dataframe_name="data",
trans_primitives=primitives_list,
max_depth=1,
)
spark_fm = spark_fm.to_pandas().astype({"id": "int64"})
spark_fm = spark_fm.set_index("id").loc[fm.index]
pd.testing.assert_frame_equal(fm.astype(spark_fm.dtypes), spark_fm)
@pytest.mark.skipif("not ps")
def test_spark_entityset_secondary_time_index():
log_df = pd.DataFrame()
log_df["id"] = [0, 1, 2, 3]
log_df["scheduled_time"] = pd.to_datetime(
["2019-01-01", "2019-01-01", "2019-01-01", "2019-01-01"],
)
log_df["departure_time"] = pd.to_datetime(
[
"2019-02-01 09:00",
"2019-02-06 10:00",
"2019-02-12 10:00",
"2019-03-01 11:30",
],
)
log_df["arrival_time"] = pd.to_datetime(
[
"2019-02-01 11:23",
"2019-02-06 12:45",
"2019-02-12 13:53",
"2019-03-01 14:07",
],
)
log_df["delay"] = [-2, 10, 60, 0]
log_df["flight_id"] = [0, 1, 0, 1]
log_spark = ps.from_pandas(log_df)
flights_df = pd.DataFrame()
flights_df["id"] = [0, 1, 2, 3]
flights_df["origin"] = ["BOS", "LAX", "BOS", "LAX"]
flights_spark = ps.from_pandas(flights_df)
pd_es = EntitySet("flights")
spark_es = EntitySet("flights_spark")
log_ltypes = {
"scheduled_time": Datetime,
"departure_time": Datetime,
"arrival_time": Datetime,
"delay": Double,
}
pd_es.add_dataframe(
dataframe_name="logs",
dataframe=log_df,
index="id",
logical_types=log_ltypes,
semantic_tags={"flight_id": "foreign_key"},
time_index="scheduled_time",
secondary_time_index={"arrival_time": ["departure_time", "delay"]},
)
spark_es.add_dataframe(
dataframe_name="logs",
dataframe=log_spark,
index="id",
logical_types=log_ltypes,
semantic_tags={"flight_id": "foreign_key"},
time_index="scheduled_time",
secondary_time_index={"arrival_time": ["departure_time", "delay"]},
)
pd_es.add_dataframe(dataframe_name="flights", dataframe=flights_df, index="id")
flights_ltypes = pd_es["flights"].ww.logical_types
spark_es.add_dataframe(
dataframe_name="flights",
dataframe=flights_spark,
index="id",
logical_types=flights_ltypes,
)
pd_es.add_relationship("flights", "id", "logs", "flight_id")
spark_es.add_relationship("flights", "id", "logs", "flight_id")
cutoff_df = pd.DataFrame()
cutoff_df["id"] = [0, 1, 1]
cutoff_df["time"] = pd.to_datetime(["2019-02-02", "2019-02-02", "2019-02-20"])
fm, _ = dfs(
entityset=pd_es,
target_dataframe_name="logs",
cutoff_time=cutoff_df,
agg_primitives=["max"],
trans_primitives=["month"],
)
spark_fm, _ = dfs(
entityset=spark_es,
target_dataframe_name="logs",
cutoff_time=cutoff_df,
agg_primitives=["max"],
trans_primitives=["month"],
)
# Make sure both matrices are sorted the same
# Also make sure index has same dtype
spark_fm = spark_fm.to_pandas().astype({"id": "int64"})
spark_fm = spark_fm.set_index("id").sort_values("delay")
fm = fm.sort_values("delay")
# Spark output for MONTH columns will be of string type without decimal points,
# while pandas will contain decimals - we need to convert before comparing
for column in fm.columns:
if fm[column].dtype.name == "category":
fm[column] = fm[column].astype("Int64").astype("string")
pd.testing.assert_frame_equal(
fm,
spark_fm,
check_categorical=False,
check_dtype=False,
)
| 14,974 | 26.990654 | 113 | py |
featuretools | featuretools-main/featuretools/tests/synthesis/test_deep_feature_synthesis.py | import copy
import re
import pandas as pd
import pytest
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime
from featuretools import EntitySet, Feature, GroupByTransformFeature
from featuretools.entityset.entityset import LTI_COLUMN_NAME
from featuretools.feature_base import (
AggregationFeature,
DirectFeature,
IdentityFeature,
TransformFeature,
)
from featuretools.feature_base.utils import is_valid_input
from featuretools.primitives import (
Absolute,
AddNumeric,
Count,
CumCount,
CumMean,
CumMin,
CumSum,
Day,
Diff,
Equal,
Hour,
IsIn,
IsNull,
Last,
Mean,
Mode,
Month,
Negate,
NMostCommon,
Not,
NotEqual,
NumCharacters,
NumTrue,
NumUnique,
RollingCount,
RollingMax,
RollingMean,
RollingMin,
RollingOutlierCount,
RollingSTD,
Sum,
TimeSincePrevious,
TransformPrimitive,
Trend,
Year,
)
from featuretools.synthesis import DeepFeatureSynthesis
from featuretools.tests.testing_utils import (
feature_with_name,
make_ecommerce_entityset,
number_of_features_with_name_like,
)
from featuretools.utils.gen_utils import Library
def test_makes_agg_features_from_str(es):
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="sessions",
entityset=es,
agg_primitives=["sum"],
trans_primitives=[],
)
features = dfs_obj.build_features()
assert feature_with_name(features, "SUM(log.value)")
def test_makes_agg_features_from_mixed_str(es):
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="sessions",
entityset=es,
agg_primitives=[Count, "sum"],
trans_primitives=[],
)
features = dfs_obj.build_features()
assert feature_with_name(features, "SUM(log.value)")
assert feature_with_name(features, "COUNT(log)")
def test_makes_agg_features(es):
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="sessions",
entityset=es,
agg_primitives=[Sum],
trans_primitives=[],
)
features = dfs_obj.build_features()
assert feature_with_name(features, "SUM(log.value)")
def test_only_makes_supplied_agg_feat(es):
kwargs = dict(
target_dataframe_name="customers",
entityset=es,
max_depth=3,
)
dfs_obj = DeepFeatureSynthesis(agg_primitives=[Sum], **kwargs)
features = dfs_obj.build_features()
def find_other_agg_features(features):
return [
f
for f in features
if (isinstance(f, AggregationFeature) and not isinstance(f.primitive, Sum))
or len(
[
g
for g in f.base_features
if isinstance(g, AggregationFeature)
and not isinstance(g.primitive, Sum)
],
)
> 0
]
other_agg_features = find_other_agg_features(features)
assert len(other_agg_features) == 0
def test_errors_unsupported_primitives(es):
bad_trans_prim = CumSum()
bad_agg_prim = NumUnique()
bad_trans_prim.compatibility, bad_agg_prim.compatibility = [], []
library = es.dataframe_type
error_text = "Selected primitives are incompatible with {} EntitySets: cum_sum, num_unique".format(
library.value,
)
with pytest.raises(ValueError, match=error_text):
DeepFeatureSynthesis(
target_dataframe_name="sessions",
entityset=es,
agg_primitives=[bad_agg_prim],
trans_primitives=[bad_trans_prim],
)
def test_errors_unsupported_primitives_spark(spark_es):
bad_trans_prim = CumSum()
bad_agg_prim = NumUnique()
bad_trans_prim.spark_compatible, bad_agg_prim.spark_compatible = False, False
error_text = "Selected primitives are incompatible with Spark EntitySets: cum_sum"
with pytest.raises(ValueError, match=error_text):
DeepFeatureSynthesis(
target_dataframe_name="sessions",
entityset=spark_es,
agg_primitives=[bad_agg_prim],
trans_primitives=[bad_trans_prim],
)
def test_error_for_missing_target_dataframe(es):
error_text = (
"Provided target dataframe missing_dataframe does not exist in ecommerce"
)
with pytest.raises(KeyError, match=error_text):
DeepFeatureSynthesis(
target_dataframe_name="missing_dataframe",
entityset=es,
agg_primitives=[Last],
trans_primitives=[],
ignore_dataframes=["log"],
)
es_without_id = EntitySet()
error_text = (
"Provided target dataframe missing_dataframe does not exist in entity set"
)
with pytest.raises(KeyError, match=error_text):
DeepFeatureSynthesis(
target_dataframe_name="missing_dataframe",
entityset=es_without_id,
agg_primitives=[Last],
trans_primitives=[],
ignore_dataframes=["log"],
)
def test_ignores_dataframes(es):
error_text = "ignore_dataframes must be a list"
with pytest.raises(TypeError, match=error_text):
DeepFeatureSynthesis(
target_dataframe_name="sessions",
entityset=es,
agg_primitives=[Sum],
trans_primitives=[],
ignore_dataframes="log",
)
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="sessions",
entityset=es,
agg_primitives=[Sum],
trans_primitives=[],
ignore_dataframes=["log"],
)
features = dfs_obj.build_features()
for f in features:
deps = f.get_dependencies(deep=True)
dataframes = [d.dataframe_name for d in deps]
assert "log" not in dataframes
def test_ignores_columns(es):
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="sessions",
entityset=es,
agg_primitives=[Sum],
trans_primitives=[],
ignore_columns={"log": ["value"]},
)
features = dfs_obj.build_features()
for f in features:
deps = f.get_dependencies(deep=True)
identities = [d for d in deps if isinstance(d, IdentityFeature)]
columns = [d.column_name for d in identities if d.dataframe_name == "log"]
assert "value" not in columns
def test_ignore_columns_input_type(es):
error_msg = r"ignore_columns should be dict\[str -> list\]" # need to use string literals to avoid regex params
wrong_input_type = {"log": "value"}
with pytest.raises(TypeError, match=error_msg):
DeepFeatureSynthesis(
target_dataframe_name="log",
entityset=es,
ignore_columns=wrong_input_type,
)
def test_ignore_columns_with_nonstring_values(es):
error_msg = "list in ignore_columns must only have string values"
wrong_input_list = {"log": ["a", "b", 3]}
with pytest.raises(TypeError, match=error_msg):
DeepFeatureSynthesis(
target_dataframe_name="log",
entityset=es,
ignore_columns=wrong_input_list,
)
def test_ignore_columns_with_nonstring_keys(es):
error_msg = r"ignore_columns should be dict\[str -> list\]" # need to use string literals to avoid regex params
wrong_input_keys = {1: ["a", "b", "c"]}
with pytest.raises(TypeError, match=error_msg):
DeepFeatureSynthesis(
target_dataframe_name="log",
entityset=es,
ignore_columns=wrong_input_keys,
)
def test_makes_dfeatures(es):
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="sessions",
entityset=es,
agg_primitives=[],
trans_primitives=[],
)
features = dfs_obj.build_features()
assert feature_with_name(features, "customers.age")
def test_makes_trans_feat(es):
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="log",
entityset=es,
agg_primitives=[],
trans_primitives=[Hour],
)
features = dfs_obj.build_features()
assert feature_with_name(features, "HOUR(datetime)")
def test_handles_diff_dataframe_groupby(pd_es):
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="log",
entityset=pd_es,
agg_primitives=[],
groupby_trans_primitives=[Diff],
)
features = dfs_obj.build_features()
assert feature_with_name(features, "DIFF(value) by session_id")
assert feature_with_name(features, "DIFF(value) by product_id")
def test_handles_time_since_previous_dataframe_groupby(pd_es):
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="log",
entityset=pd_es,
agg_primitives=[],
groupby_trans_primitives=[TimeSincePrevious],
)
features = dfs_obj.build_features()
assert feature_with_name(features, "TIME_SINCE_PREVIOUS(datetime) by session_id")
# M TODO
# def test_handles_cumsum_dataframe_groupby(pd_es):
# dfs_obj = DeepFeatureSynthesis(target_dataframe_name='sessions',
# entityset=pd_es,
# agg_primitives=[],
# trans_primitives=[CumMean])
# features = dfs_obj.build_features()
# assert (feature_with_name(features, u'customers.CUM_MEAN(age by région_id)'))
def test_only_makes_supplied_trans_feat(es):
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="log",
entityset=es,
agg_primitives=[],
trans_primitives=[Hour],
)
features = dfs_obj.build_features()
other_trans_features = [
f
for f in features
if (isinstance(f, TransformFeature) and not isinstance(f.primitive, Hour))
or len(
[
g
for g in f.base_features
if isinstance(g, TransformFeature) and not isinstance(g.primitive, Hour)
],
)
> 0
]
assert len(other_trans_features) == 0
def test_makes_dfeatures_of_agg_primitives(es):
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="sessions",
entityset=es,
agg_primitives=["max"],
trans_primitives=[],
)
features = dfs_obj.build_features()
assert feature_with_name(features, "customers.MAX(log.value)")
def test_makes_agg_features_of_trans_primitives(es):
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="sessions",
entityset=es,
agg_primitives=[Mean],
trans_primitives=[NumCharacters],
)
features = dfs_obj.build_features()
assert feature_with_name(features, "MEAN(log.NUM_CHARACTERS(comments))")
def test_makes_agg_features_with_where(es):
# TODO: Update to work with Dask and Spark `es` fixture when issue #978 is closed
if es.dataframe_type != Library.PANDAS:
pytest.xfail("Dask EntitySets do not support add_interesting_values")
es.add_interesting_values()
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="sessions",
entityset=es,
agg_primitives=[Count],
where_primitives=[Count],
trans_primitives=[],
)
features = dfs_obj.build_features()
assert feature_with_name(features, "COUNT(log WHERE priority_level = 0)")
# make sure they are made using direct features too
assert feature_with_name(features, "COUNT(log WHERE products.department = food)")
def test_make_groupby_features(pd_es):
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="log",
entityset=pd_es,
agg_primitives=[],
trans_primitives=[],
groupby_trans_primitives=["cum_sum"],
)
features = dfs_obj.build_features()
assert feature_with_name(features, "CUM_SUM(value) by session_id")
def test_make_indirect_groupby_features(pd_es):
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="log",
entityset=pd_es,
agg_primitives=[],
trans_primitives=[],
groupby_trans_primitives=["cum_sum"],
)
features = dfs_obj.build_features()
assert feature_with_name(features, "CUM_SUM(products.rating) by session_id")
def test_make_groupby_features_with_id(pd_es):
# Need to convert customer_id to categorical column in order to build desired feature
pd_es["sessions"].ww.set_types(
logical_types={"customer_id": "Categorical"},
semantic_tags={"customer_id": "foreign_key"},
)
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="sessions",
entityset=pd_es,
agg_primitives=[],
trans_primitives=[],
groupby_trans_primitives=["cum_count"],
)
features = dfs_obj.build_features()
assert feature_with_name(features, "CUM_COUNT(customer_id) by customer_id")
def test_make_groupby_features_with_diff_id(pd_es):
# Need to convert cohort to categorical column in order to build desired feature
pd_es["customers"].ww.set_types(
logical_types={"cohort": "Categorical"},
semantic_tags={"cohort": "foreign_key"},
)
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="customers",
entityset=pd_es,
agg_primitives=[],
trans_primitives=[],
groupby_trans_primitives=["cum_count"],
)
features = dfs_obj.build_features()
groupby_with_diff_id = "CUM_COUNT(cohort) by région_id"
assert feature_with_name(features, groupby_with_diff_id)
def test_make_groupby_features_with_agg(pd_es):
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="cohorts",
entityset=pd_es,
agg_primitives=["sum"],
trans_primitives=[],
groupby_trans_primitives=["cum_sum"],
)
features = dfs_obj.build_features()
agg_on_groupby_name = "SUM(customers.CUM_SUM(age) by région_id)"
assert feature_with_name(features, agg_on_groupby_name)
def test_bad_groupby_feature(es):
msg = re.escape(
"Unknown groupby transform primitive max. "
"Call ft.primitives.list_primitives() to get "
"a list of available primitives",
)
with pytest.raises(ValueError, match=msg):
DeepFeatureSynthesis(
target_dataframe_name="customers",
entityset=es,
agg_primitives=["sum"],
trans_primitives=[],
groupby_trans_primitives=["Max"],
)
@pytest.mark.parametrize(
"rolling_primitive",
[
RollingMax,
RollingMean,
RollingMin,
RollingOutlierCount,
RollingSTD,
],
)
@pytest.mark.parametrize(
"window_length, gap",
[
(7, 3),
("7d", "3d"),
],
)
def test_make_rolling_features(window_length, gap, rolling_primitive, pd_es):
rolling_primitive_obj = rolling_primitive(
window_length=window_length,
gap=gap,
min_periods=5,
)
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="log",
entityset=pd_es,
agg_primitives=[],
trans_primitives=[rolling_primitive_obj],
)
features = dfs_obj.build_features()
rolling_transform_name = f"{rolling_primitive.name.upper()}(datetime, value_many_nans, window_length={window_length}, gap={gap}, min_periods=5)"
assert feature_with_name(features, rolling_transform_name)
@pytest.mark.parametrize(
"window_length, gap",
[
(7, 3),
("7d", "3d"),
],
)
def test_make_rolling_count_off_datetime_feature(window_length, gap, pd_es):
rolling_count = RollingCount(window_length=window_length, min_periods=gap)
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="log",
entityset=pd_es,
agg_primitives=[],
trans_primitives=[rolling_count],
)
features = dfs_obj.build_features()
rolling_transform_name = (
f"ROLLING_COUNT(datetime, window_length={window_length}, min_periods={gap})"
)
assert feature_with_name(features, rolling_transform_name)
def test_abides_by_max_depth_param(es):
for i in [0, 1, 2, 3]:
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="sessions",
entityset=es,
agg_primitives=[Sum],
trans_primitives=[],
max_depth=i,
)
features = dfs_obj.build_features()
for f in features:
assert f.get_depth() <= i
def test_max_depth_single_table(transform_es):
assert len(transform_es.dataframe_dict) == 1
def make_dfs_obj(max_depth):
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="first",
entityset=transform_es,
trans_primitives=[AddNumeric],
max_depth=max_depth,
)
return dfs_obj
for i in [-1, 0, 1, 2]:
if i in [-1, 2]:
match = (
"Only one dataframe in entityset, changing max_depth to 1 "
"since deeper features cannot be created"
)
with pytest.warns(UserWarning, match=match):
dfs_obj = make_dfs_obj(i)
else:
dfs_obj = make_dfs_obj(i)
features = dfs_obj.build_features()
assert len(features) > 0
if i != 0:
# at least one depth 1 feature made
assert any([f.get_depth() == 1 for f in features])
# no depth 2 or higher even with max_depth=2
assert all([f.get_depth() <= 1 for f in features])
else:
# no depth 1 or higher features with max_depth=0
assert all([f.get_depth() == 0 for f in features])
def test_drop_contains(es):
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="sessions",
entityset=es,
agg_primitives=[Sum],
trans_primitives=[],
max_depth=1,
seed_features=[],
drop_contains=[],
)
features = dfs_obj.build_features()
to_drop = features[2]
partial_name = to_drop.get_name()[:5]
dfs_drop = DeepFeatureSynthesis(
target_dataframe_name="sessions",
entityset=es,
agg_primitives=[Sum],
trans_primitives=[],
max_depth=1,
seed_features=[],
drop_contains=[partial_name],
)
features = dfs_drop.build_features()
assert to_drop.get_name() not in [f.get_name() for f in features]
def test_drop_exact(es):
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="sessions",
entityset=es,
agg_primitives=[Sum],
trans_primitives=[],
max_depth=1,
seed_features=[],
drop_exact=[],
)
features = dfs_obj.build_features()
to_drop = features[2]
name = to_drop.get_name()
dfs_drop = DeepFeatureSynthesis(
target_dataframe_name="sessions",
entityset=es,
agg_primitives=[Sum],
trans_primitives=[],
max_depth=1,
seed_features=[],
drop_exact=[name],
)
features = dfs_drop.build_features()
assert name not in [f.get_name() for f in features]
def test_seed_features(es):
seed_feature_sessions = (
Feature(es["log"].ww["id"], parent_dataframe_name="sessions", primitive=Count)
> 2
)
seed_feature_log = Feature(es["log"].ww["comments"], primitive=NumCharacters)
session_agg = Feature(
seed_feature_log,
parent_dataframe_name="sessions",
primitive=Mean,
)
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="sessions",
entityset=es,
agg_primitives=[Mean],
trans_primitives=[],
max_depth=2,
seed_features=[seed_feature_sessions, seed_feature_log],
)
features = dfs_obj.build_features()
assert seed_feature_sessions.get_name() in [f.get_name() for f in features]
assert session_agg.get_name() in [f.get_name() for f in features]
def test_does_not_make_agg_of_direct_of_target_dataframe(es):
# TODO: Update to work with Dask and Spark supported primitive
if es.dataframe_type != Library.PANDAS:
pytest.xfail("Dask EntitySets do not support the Last primitive")
count_sessions = Feature(
es["sessions"].ww["id"],
parent_dataframe_name="customers",
primitive=Count,
)
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="customers",
entityset=es,
agg_primitives=[Last],
trans_primitives=[],
max_depth=2,
seed_features=[count_sessions],
)
features = dfs_obj.build_features()
# this feature is meaningless because customers.COUNT(sessions) is already defined on
# the customers dataframe
assert not feature_with_name(features, "LAST(sessions.customers.COUNT(sessions))")
assert not feature_with_name(features, "LAST(sessions.customers.age)")
def test_dfs_builds_on_seed_features_more_than_max_depth(es):
# TODO: Update to work with Dask and Spark supported primitive
if es.dataframe_type != Library.PANDAS:
pytest.xfail("Dask EntitySets do not support the Last and Mode primitives")
seed_feature_sessions = Feature(
es["log"].ww["id"],
parent_dataframe_name="sessions",
primitive=Count,
)
seed_feature_log = Feature(es["log"].ww["datetime"], primitive=Hour)
session_agg = Feature(
seed_feature_log,
parent_dataframe_name="sessions",
primitive=Last,
)
# Depth of this feat is 2 relative to session_agg, the seed feature,
# which is greater than max_depth so it shouldn't be built
session_agg_trans = DirectFeature(
Feature(session_agg, parent_dataframe_name="customers", primitive=Mode),
"sessions",
)
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="sessions",
entityset=es,
agg_primitives=[Last, Count],
trans_primitives=[],
max_depth=1,
seed_features=[seed_feature_sessions, seed_feature_log],
)
features = dfs_obj.build_features()
assert seed_feature_sessions.get_name() in [f.get_name() for f in features]
assert session_agg.get_name() in [f.get_name() for f in features]
assert session_agg_trans.get_name() not in [f.get_name() for f in features]
def test_dfs_includes_seed_features_greater_than_max_depth(es):
session_agg = Feature(
es["log"].ww["value"],
parent_dataframe_name="sessions",
primitive=Sum,
)
customer_agg = Feature(
session_agg,
parent_dataframe_name="customers",
primitive=Mean,
)
assert customer_agg.get_depth() == 2
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="customers",
entityset=es,
agg_primitives=[Mean],
trans_primitives=[],
max_depth=1,
seed_features=[customer_agg],
)
features = dfs_obj.build_features()
assert feature_with_name(features=features, name=customer_agg.get_name())
def test_allowed_paths(es):
# TODO: Update to work with Dask and Spark supported primitive
if es.dataframe_type != Library.PANDAS:
pytest.xfail("Dask EntitySets do not support the Last primitive")
kwargs = dict(
target_dataframe_name="customers",
entityset=es,
agg_primitives=[Last],
trans_primitives=[],
max_depth=2,
seed_features=[],
)
dfs_unconstrained = DeepFeatureSynthesis(**kwargs)
features_unconstrained = dfs_unconstrained.build_features()
unconstrained_names = [f.get_name() for f in features_unconstrained]
customers_session_feat = Feature(
es["sessions"].ww["device_type"],
parent_dataframe_name="customers",
primitive=Last,
)
customers_session_log_feat = Feature(
es["log"].ww["value"],
parent_dataframe_name="customers",
primitive=Last,
)
assert customers_session_feat.get_name() in unconstrained_names
assert customers_session_log_feat.get_name() in unconstrained_names
dfs_constrained = DeepFeatureSynthesis(
allowed_paths=[["customers", "sessions"]], **kwargs
)
features = dfs_constrained.build_features()
names = [f.get_name() for f in features]
assert customers_session_feat.get_name() in names
assert customers_session_log_feat.get_name() not in names
def test_max_features(es):
kwargs = dict(
target_dataframe_name="customers",
entityset=es,
agg_primitives=[Sum],
trans_primitives=[],
max_depth=2,
seed_features=[],
)
dfs_unconstrained = DeepFeatureSynthesis(**kwargs)
features_unconstrained = dfs_unconstrained.build_features()
dfs_unconstrained_with_arg = DeepFeatureSynthesis(max_features=-1, **kwargs)
feats_unconstrained_with_arg = dfs_unconstrained_with_arg.build_features()
dfs_constrained = DeepFeatureSynthesis(max_features=1, **kwargs)
features = dfs_constrained.build_features()
assert len(features_unconstrained) == len(feats_unconstrained_with_arg)
assert len(features) == 1
def test_where_primitives(es):
es.add_interesting_values(dataframe_name="sessions", values={"device_type": [0]})
kwargs = dict(
target_dataframe_name="customers",
entityset=es,
agg_primitives=[Count, Sum],
trans_primitives=[Absolute],
max_depth=3,
)
dfs_unconstrained = DeepFeatureSynthesis(**kwargs)
dfs_constrained = DeepFeatureSynthesis(where_primitives=["sum"], **kwargs)
features_unconstrained = dfs_unconstrained.build_features()
features = dfs_constrained.build_features()
where_feats_unconstrained = [
f
for f in features_unconstrained
if isinstance(f, AggregationFeature) and f.where is not None
]
where_feats = [
f for f in features if isinstance(f, AggregationFeature) and f.where is not None
]
assert len(where_feats_unconstrained) >= 1
assert (
len([f for f in where_feats_unconstrained if isinstance(f.primitive, Sum)]) == 0
)
assert (
len([f for f in where_feats_unconstrained if isinstance(f.primitive, Count)])
> 0
)
assert len([f for f in where_feats if isinstance(f.primitive, Sum)]) > 0
assert len([f for f in where_feats if isinstance(f.primitive, Count)]) == 0
assert (
len(
[
d
for f in where_feats
for d in f.get_dependencies(deep=True)
if isinstance(d.primitive, Absolute)
],
)
> 0
)
def test_stacking_where_primitives(es):
# TODO: Update to work with Dask supported primitive
if es.dataframe_type != Library.PANDAS:
pytest.xfail("Dask and Spark EntitySets do not support the Last primitive")
es = copy.deepcopy(es)
es.add_interesting_values(dataframe_name="sessions", values={"device_type": [0]})
es.add_interesting_values(
dataframe_name="log",
values={"product_id": ["coke_zero"]},
)
kwargs = dict(
target_dataframe_name="customers",
entityset=es,
agg_primitives=[Count, Last],
max_depth=3,
)
dfs_where_stack_limit_1 = DeepFeatureSynthesis(
where_primitives=["last", Count], **kwargs
)
dfs_where_stack_limit_2 = DeepFeatureSynthesis(
where_primitives=["last", Count], where_stacking_limit=2, **kwargs
)
stack_limit_1_features = dfs_where_stack_limit_1.build_features()
stack_limit_2_features = dfs_where_stack_limit_2.build_features()
where_stack_1_feats = [
f
for f in stack_limit_1_features
if isinstance(f, AggregationFeature) and f.where is not None
]
where_stack_2_feats = [
f
for f in stack_limit_2_features
if isinstance(f, AggregationFeature) and f.where is not None
]
assert len(where_stack_1_feats) >= 1
assert len(where_stack_2_feats) >= 1
assert len([f for f in where_stack_1_feats if isinstance(f.primitive, Last)]) > 0
assert len([f for f in where_stack_1_feats if isinstance(f.primitive, Count)]) > 0
assert len([f for f in where_stack_2_feats if isinstance(f.primitive, Last)]) > 0
assert len([f for f in where_stack_2_feats if isinstance(f.primitive, Count)]) > 0
stacked_where_limit_1_feats = []
stacked_where_limit_2_feats = []
where_double_where_tuples = [
(where_stack_1_feats, stacked_where_limit_1_feats),
(where_stack_2_feats, stacked_where_limit_2_feats),
]
for where_list, double_where_list in where_double_where_tuples:
for feature in where_list:
for base_feat in feature.base_features:
if (
isinstance(base_feat, AggregationFeature)
and base_feat.where is not None
):
double_where_list.append(feature)
assert len(stacked_where_limit_1_feats) == 0
assert len(stacked_where_limit_2_feats) > 0
def test_where_different_base_feats(es):
es.add_interesting_values(dataframe_name="sessions", values={"device_type": [0]})
kwargs = dict(
target_dataframe_name="customers",
entityset=es,
agg_primitives=[Sum, Count],
where_primitives=[Sum, Count],
max_depth=3,
)
dfs_unconstrained = DeepFeatureSynthesis(**kwargs)
features = dfs_unconstrained.build_features()
where_feats = [
f.unique_name()
for f in features
if isinstance(f, AggregationFeature) and f.where is not None
]
not_where_feats = [
f.unique_name()
for f in features
if isinstance(f, AggregationFeature) and f.where is None
]
for name in not_where_feats:
assert name not in where_feats
def test_dfeats_where(es):
# TODO: Update to work with Dask `es` fixture when issue #978 is closed
if es.dataframe_type != Library.PANDAS:
pytest.xfail("Dask and Spark EntitySets do not support add_interesting_values")
es.add_interesting_values()
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="sessions",
entityset=es,
agg_primitives=[Count],
trans_primitives=[],
)
features = dfs_obj.build_features()
# test to make sure we build direct features of agg features with where clause
assert feature_with_name(features, "customers.COUNT(log WHERE priority_level = 0)")
assert feature_with_name(
features,
"COUNT(log WHERE products.department = electronics)",
)
def test_commutative(es):
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="log",
entityset=es,
agg_primitives=[Sum],
trans_primitives=[AddNumeric],
max_depth=3,
)
feats = dfs_obj.build_features()
add_feats = [f for f in feats if isinstance(f.primitive, AddNumeric)]
# Check that there are no two AddNumeric features with the same base
# features.
unordered_args = set()
for f in add_feats:
arg1, arg2 = f.base_features
args_set = frozenset({arg1.unique_name(), arg2.unique_name()})
unordered_args.add(args_set)
assert len(add_feats) == len(unordered_args)
def test_transform_consistency(transform_es):
# Generate features
transform_es["first"].ww.set_types(
logical_types={"b": "BooleanNullable", "b1": "BooleanNullable"},
)
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="first",
entityset=transform_es,
trans_primitives=["and", "add_numeric", "or"],
max_depth=1,
)
feature_defs = dfs_obj.build_features()
# Check for correct ordering of features
assert feature_with_name(feature_defs, "a")
assert feature_with_name(feature_defs, "b")
assert feature_with_name(feature_defs, "b1")
assert feature_with_name(feature_defs, "b12")
assert feature_with_name(feature_defs, "P")
assert feature_with_name(feature_defs, "AND(b, b1)")
assert not feature_with_name(
feature_defs,
"AND(b1, b)",
) # make sure it doesn't exist the other way
assert feature_with_name(feature_defs, "a + P")
assert feature_with_name(feature_defs, "b12 + P")
assert feature_with_name(feature_defs, "a + b12")
assert feature_with_name(feature_defs, "OR(b, b1)")
def test_transform_no_stack_agg(es):
# TODO: Update to work with Dask and Spark supported primitives
if es.dataframe_type != Library.PANDAS:
pytest.xfail("Dask EntitySets do not support the NMostCommon primitive")
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="customers",
entityset=es,
agg_primitives=[NMostCommon],
trans_primitives=[NotEqual],
max_depth=3,
)
feature_defs = dfs_obj.build_features()
assert not feature_with_name(
feature_defs,
"id != N_MOST_COMMON(sessions.device_type)",
)
def test_initialized_trans_prim(es):
prim = IsIn(list_of_outputs=["coke zero"])
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="log",
entityset=es,
agg_primitives=[],
trans_primitives=[prim],
)
features = dfs_obj.build_features()
assert feature_with_name(features, "product_id.isin(['coke zero'])")
def test_initialized_agg_prim(es):
# TODO: Update to work with Dask and Spark supported primitives
if es.dataframe_type != Library.PANDAS:
pytest.xfail("Dask EntitySets do not support the NMostCommon primitive")
ThreeMost = NMostCommon(n=3)
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="sessions",
entityset=es,
agg_primitives=[ThreeMost],
trans_primitives=[],
)
features = dfs_obj.build_features()
assert feature_with_name(features, "N_MOST_COMMON(log.subregioncode)")
def test_return_types(es):
# TODO: Update to work with Dask and Spark supported primitive
if es.dataframe_type != Library.PANDAS:
pytest.xfail(
"Dask and Spark EntitySets do not support the NMostCommon primitive",
)
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="sessions",
entityset=es,
agg_primitives=[Count, NMostCommon],
trans_primitives=[Absolute, Hour, IsIn],
)
discrete = ColumnSchema(semantic_tags={"category"})
numeric = ColumnSchema(semantic_tags={"numeric"})
datetime = ColumnSchema(logical_type=Datetime)
f1 = dfs_obj.build_features(return_types=None)
f2 = dfs_obj.build_features(return_types=[discrete])
f3 = dfs_obj.build_features(return_types="all")
f4 = dfs_obj.build_features(return_types=[datetime])
f1_types = [f.column_schema for f in f1]
f2_types = [f.column_schema for f in f2]
f3_types = [f.column_schema for f in f3]
f4_types = [f.column_schema for f in f4]
assert any([is_valid_input(schema, discrete) for schema in f1_types])
assert any([is_valid_input(schema, numeric) for schema in f1_types])
assert not any([is_valid_input(schema, datetime) for schema in f1_types])
assert any([is_valid_input(schema, discrete) for schema in f2_types])
assert not any([is_valid_input(schema, numeric) for schema in f2_types])
assert not any([is_valid_input(schema, datetime) for schema in f2_types])
assert any([is_valid_input(schema, discrete) for schema in f3_types])
assert any([is_valid_input(schema, numeric) for schema in f3_types])
assert any([is_valid_input(schema, datetime) for schema in f3_types])
assert not any([is_valid_input(schema, discrete) for schema in f4_types])
assert not any([is_valid_input(schema, numeric) for schema in f4_types])
assert any([is_valid_input(schema, datetime) for schema in f4_types])
def test_checks_primitives_correct_type(es):
error_text = (
"Primitive <class \\'featuretools\\.primitives\\.standard\\."
"transform\\.datetime\\.hour\\.Hour\\'> in "
"agg_primitives is not an aggregation primitive"
)
with pytest.raises(ValueError, match=error_text):
DeepFeatureSynthesis(
target_dataframe_name="sessions",
entityset=es,
agg_primitives=[Hour],
trans_primitives=[],
)
error_text = (
"Primitive <class \\'featuretools\\.primitives\\.standard\\."
"aggregation\\.sum_primitive\\.Sum\\'> in trans_primitives "
"is not a transform primitive"
)
with pytest.raises(ValueError, match=error_text):
DeepFeatureSynthesis(
target_dataframe_name="sessions",
entityset=es,
agg_primitives=[],
trans_primitives=[Sum],
)
def test_makes_agg_features_along_multiple_paths(diamond_es):
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="regions",
entityset=diamond_es,
agg_primitives=["mean"],
trans_primitives=[],
)
features = dfs_obj.build_features()
assert feature_with_name(features, "MEAN(customers.transactions.amount)")
assert feature_with_name(features, "MEAN(stores.transactions.amount)")
def test_makes_direct_features_through_multiple_relationships(games_es):
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="games",
entityset=games_es,
agg_primitives=["mean"],
trans_primitives=[],
)
features = dfs_obj.build_features()
teams = ["home", "away"]
for forward in teams:
for backward in teams:
for col in teams:
f = "teams[%s_team_id].MEAN(games[%s_team_id].%s_team_score)" % (
forward,
backward,
col,
)
assert feature_with_name(features, f)
def test_stacks_multioutput_features(es):
# TODO: Update to work with Dask and Spark supported primitive
if es.dataframe_type != Library.PANDAS:
pytest.xfail(
"Dask EntitySets do not support the NumUnique and NMostCommon primitives",
)
class TestTime(TransformPrimitive):
name = "test_time"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(semantic_tags={"numeric"})
number_output_features = 6
def get_function(self):
def test_f(x):
times = pd.Series(x)
units = ["year", "month", "day", "hour", "minute", "second"]
return [times.apply(lambda x: getattr(x, unit)) for unit in units]
return test_f
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="customers",
entityset=es,
agg_primitives=[NumUnique, NMostCommon(n=3)],
trans_primitives=[TestTime, Diff],
max_depth=4,
)
feat = dfs_obj.build_features()
for i in range(3):
f = "NUM_UNIQUE(sessions.N_MOST_COMMON(log.countrycode)[%d])" % i
assert feature_with_name(feat, f)
def test_seed_multi_output_feature_stacking(es):
# TODO: Update to work with Dask and Spark supported primitive
if es.dataframe_type != Library.PANDAS:
pytest.xfail(
"Dask EntitySets do not support the NMostCommon and NumUnique primitives",
)
threecommon = NMostCommon(3)
tc = Feature(
es["log"].ww["product_id"],
parent_dataframe_name="sessions",
primitive=threecommon,
)
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="customers",
entityset=es,
seed_features=[tc],
agg_primitives=[NumUnique],
trans_primitives=[],
max_depth=4,
)
feat = dfs_obj.build_features()
for i in range(3):
f = "NUM_UNIQUE(sessions.N_MOST_COMMON(log.product_id)[%d])" % i
assert feature_with_name(feat, f)
def test_makes_direct_features_along_multiple_paths(diamond_es):
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="transactions",
entityset=diamond_es,
max_depth=3,
agg_primitives=[],
trans_primitives=[],
)
features = dfs_obj.build_features()
assert feature_with_name(features, "customers.regions.name")
assert feature_with_name(features, "stores.regions.name")
def test_does_not_make_trans_of_single_direct_feature(es):
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="sessions",
entityset=es,
agg_primitives=[],
trans_primitives=["weekday"],
max_depth=2,
)
features = dfs_obj.build_features()
assert not feature_with_name(features, "WEEKDAY(customers.signup_date)")
assert feature_with_name(features, "customers.WEEKDAY(signup_date)")
def test_makes_trans_of_multiple_direct_features(diamond_es):
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="transactions",
entityset=diamond_es,
agg_primitives=["mean"],
trans_primitives=[Equal],
max_depth=4,
)
features = dfs_obj.build_features()
# Make trans of direct and non-direct
assert feature_with_name(features, "amount = stores.MEAN(transactions.amount)")
# Make trans of direct features on different dataframes
assert feature_with_name(
features,
"customers.MEAN(transactions.amount) = stores.square_ft",
)
# Make trans of direct features on same dataframe with different paths.
assert feature_with_name(features, "customers.regions.name = stores.regions.name")
# Don't make trans of direct features with same path.
assert not feature_with_name(
features,
"stores.square_ft = stores.MEAN(transactions.amount)",
)
assert not feature_with_name(
features,
"stores.MEAN(transactions.amount) = stores.square_ft",
)
# The naming of the below is confusing but this is a direct feature of a transform.
assert feature_with_name(features, "stores.MEAN(transactions.amount) = square_ft")
def test_makes_direct_of_agg_of_trans_on_target(es):
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="log",
entityset=es,
agg_primitives=["mean"],
trans_primitives=[Absolute],
max_depth=3,
)
features = dfs_obj.build_features()
assert feature_with_name(features, "sessions.MEAN(log.ABSOLUTE(value))")
def test_primitive_options_errors(es):
wrong_key_options = {"mean": {"ignore_dataframe": ["sessions"]}}
wrong_type_list = {"mean": {"ignore_dataframes": "sessions"}}
wrong_type_dict = {"mean": {"ignore_columns": {"sessions": "product_id"}}}
conflicting_primitive_options = {
("count", "mean"): {"ignore_dataframes": ["sessions"]},
"mean": {"include_dataframes": ["sessions"]},
}
invalid_dataframe = {"mean": {"include_dataframes": ["invalid_dataframe"]}}
invalid_column_dataframe = {
"mean": {"include_columns": {"invalid_dataframe": ["product_id"]}},
}
invalid_column = {"mean": {"include_columns": {"sessions": ["invalid_column"]}}}
key_error_text = "Unrecognized primitive option 'ignore_dataframe' for mean"
list_error_text = "Incorrect type formatting for 'ignore_dataframes' for mean"
dict_error_text = "Incorrect type formatting for 'ignore_columns' for mean"
conflicting_error_text = "Multiple options found for primitive mean"
invalid_dataframe_warning = "Dataframe 'invalid_dataframe' not in entityset"
invalid_column_warning = "Column 'invalid_column' not in dataframe 'sessions'"
with pytest.raises(KeyError, match=key_error_text):
DeepFeatureSynthesis(
target_dataframe_name="customers",
entityset=es,
agg_primitives=["mean"],
trans_primitives=[],
primitive_options=wrong_key_options,
)
with pytest.raises(TypeError, match=list_error_text):
DeepFeatureSynthesis(
target_dataframe_name="customers",
entityset=es,
agg_primitives=["mean"],
trans_primitives=[],
primitive_options=wrong_type_list,
)
with pytest.raises(TypeError, match=dict_error_text):
DeepFeatureSynthesis(
target_dataframe_name="customers",
entityset=es,
agg_primitives=["mean"],
trans_primitives=[],
primitive_options=wrong_type_dict,
)
with pytest.raises(KeyError, match=conflicting_error_text):
DeepFeatureSynthesis(
target_dataframe_name="customers",
entityset=es,
agg_primitives=["mean"],
trans_primitives=[],
primitive_options=conflicting_primitive_options,
)
with pytest.warns(UserWarning, match=invalid_dataframe_warning) as record:
DeepFeatureSynthesis(
target_dataframe_name="customers",
entityset=es,
agg_primitives=["mean"],
trans_primitives=[],
primitive_options=invalid_dataframe,
)
assert len(record) == 1
with pytest.warns(UserWarning, match=invalid_dataframe_warning) as record:
DeepFeatureSynthesis(
target_dataframe_name="customers",
entityset=es,
agg_primitives=["mean"],
trans_primitives=[],
primitive_options=invalid_column_dataframe,
)
assert len(record) == 1
with pytest.warns(UserWarning, match=invalid_column_warning) as record:
DeepFeatureSynthesis(
target_dataframe_name="customers",
entityset=es,
agg_primitives=["mean"],
trans_primitives=[],
primitive_options=invalid_column,
)
assert len(record) == 1
def test_primitive_options(es):
options = {
"sum": {"include_columns": {"customers": ["age"]}},
"mean": {"include_dataframes": ["customers"]},
"mode": {"ignore_dataframes": ["sessions"]},
"num_unique": {"ignore_columns": {"customers": ["engagement_level"]}},
}
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="cohorts",
entityset=es,
primitive_options=options,
)
features = dfs_obj.build_features()
for f in features:
deps = f.get_dependencies(deep=True)
df_names = [d.dataframe_name for d in deps]
columns = [d for d in deps if isinstance(d, IdentityFeature)]
if isinstance(f.primitive, Sum):
for identity_base in columns:
if identity_base.dataframe_name == "customers":
assert identity_base.get_name() == "age"
if isinstance(f.primitive, Mean):
assert all([df_name in ["customers"] for df_name in df_names])
if isinstance(f.primitive, Mode):
assert "sessions" not in df_names
if isinstance(f.primitive, NumUnique):
for identity_base in columns:
assert not (
identity_base.dataframe_name == "customers"
and identity_base.get_name() == "engagement_level"
)
options = {
"month": {"ignore_columns": {"customers": ["birthday"]}},
"day": {"include_columns": {"customers": ["signup_date", "upgrade_date"]}},
"num_characters": {"ignore_dataframes": ["customers"]},
"year": {"include_dataframes": ["customers"]},
}
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="customers",
entityset=es,
agg_primitives=[],
ignore_dataframes=["cohort"],
primitive_options=options,
)
features = dfs_obj.build_features()
assert not any([isinstance(f, NumCharacters) for f in features])
for f in features:
deps = f.get_dependencies(deep=True)
df_names = [d.dataframe_name for d in deps]
columns = [d for d in deps if isinstance(d, IdentityFeature)]
if isinstance(f.primitive, Month):
for identity_base in columns:
assert not (
identity_base.dataframe_name == "customers"
and identity_base.get_name() == "birthday"
)
if isinstance(f.primitive, Day):
for identity_base in columns:
if identity_base.dataframe_name == "customers":
assert (
identity_base.get_name() == "signup_date"
or identity_base.get_name() == "upgrade_date"
)
if isinstance(f.primitive, Year):
assert all([df_name in ["customers"] for df_name in df_names])
def test_primitive_options_with_globals(es):
# non-overlapping ignore_dataframes
options = {"mode": {"ignore_dataframes": ["sessions"]}}
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="cohorts",
entityset=es,
ignore_dataframes=["régions"],
primitive_options=options,
)
features = dfs_obj.build_features()
for f in features:
deps = f.get_dependencies(deep=True)
df_names = [d.dataframe_name for d in deps]
assert "régions" not in df_names
if isinstance(f.primitive, Mode):
assert "sessions" not in df_names
# non-overlapping ignore_columns
options = {"num_unique": {"ignore_columns": {"customers": ["engagement_level"]}}}
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="customers",
entityset=es,
ignore_columns={"customers": ["région_id"]},
primitive_options=options,
)
features = dfs_obj.build_features()
for f in features:
deps = f.get_dependencies(deep=True)
columns = [d for d in deps if isinstance(d, IdentityFeature)]
for identity_base in columns:
assert not (
identity_base.dataframe_name == "customers"
and identity_base.get_name() == "région_id"
)
if isinstance(f.primitive, NumUnique):
for identity_base in columns:
assert not (
identity_base.dataframe_name == "customers"
and identity_base.get_name() == "engagement_level"
)
# Overlapping globals/options with ignore_dataframes
options = {
"mode": {
"include_dataframes": ["sessions", "customers"],
"ignore_columns": {"customers": ["région_id"]},
},
"num_unique": {
"include_dataframes": ["sessions", "customers"],
"include_columns": {"sessions": ["device_type"], "customers": ["age"]},
},
"month": {"ignore_columns": {"cohorts": ["cohort_end"]}},
}
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="cohorts",
entityset=es,
ignore_dataframes=["sessions"],
ignore_columns={"customers": ["age"]},
primitive_options=options,
)
features = dfs_obj.build_features()
for f in features:
assert f.primitive.name != "month"
# ignoring cohorts means no features are created
assert not isinstance(f.primitive, Month)
deps = f.get_dependencies(deep=True)
df_names = [d.dataframe_name for d in deps]
columns = [d for d in deps if isinstance(d, IdentityFeature)]
if isinstance(f.primitive, Mode):
assert [all([df_name in ["sessions", "customers"] for df_name in df_names])]
for identity_base in columns:
assert not (
identity_base.dataframe_name == "customers"
and (
identity_base.get_name() == "age"
or identity_base.get_name() == "région_id"
)
)
elif isinstance(f.primitive, NumUnique):
assert [all([df_name in ["sessions", "customers"] for df_name in df_names])]
for identity_base in columns:
if identity_base.dataframe_name == "sessions":
assert identity_base.get_name() == "device_type"
# All other primitives ignore 'sessions' and 'age'
else:
assert "sessions" not in df_names
for identity_base in columns:
assert not (
identity_base.dataframe_name == "customers"
and identity_base.get_name() == "age"
)
def test_primitive_options_groupbys(pd_es):
options = {
"cum_count": {"include_groupby_dataframes": ["log", "customers"]},
"cum_sum": {"ignore_groupby_dataframes": ["sessions"]},
"cum_mean": {
"ignore_groupby_columns": {
"customers": ["région_id"],
"log": ["session_id"],
},
},
"cum_min": {
"include_groupby_columns": {"sessions": ["customer_id", "device_type"]},
},
}
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="log",
entityset=pd_es,
agg_primitives=[],
trans_primitives=[],
max_depth=3,
groupby_trans_primitives=["cum_sum", "cum_count", "cum_min", "cum_mean"],
primitive_options=options,
)
features = dfs_obj.build_features()
for f in features:
if isinstance(f, GroupByTransformFeature):
deps = f.groupby.get_dependencies(deep=True)
df_names = [d.dataframe_name for d in deps] + [f.groupby.dataframe_name]
columns = [d for d in deps if isinstance(d, IdentityFeature)]
columns += [f.groupby] if isinstance(f.groupby, IdentityFeature) else []
if isinstance(f.primitive, CumMean):
for identity_groupby in columns:
assert not (
identity_groupby.dataframe_name == "customers"
and identity_groupby.get_name() == "région_id"
)
assert not (
identity_groupby.dataframe_name == "log"
and identity_groupby.get_name() == "session_id"
)
if isinstance(f.primitive, CumCount):
assert all([name in ["log", "customers"] for name in df_names])
if isinstance(f.primitive, CumSum):
assert "sessions" not in df_names
if isinstance(f.primitive, CumMin):
for identity_groupby in columns:
if identity_groupby.dataframe_name == "sessions":
assert (
identity_groupby.get_name() == "customer_id"
or identity_groupby.get_name() == "device_type"
)
def test_primitive_options_multiple_inputs(es):
if es.dataframe_type != Library.PANDAS:
pytest.xfail(
"Dask and Spark EntitySets do not support various primitives used in this test",
)
too_many_options = {
"mode": [{"include_dataframes": ["logs"]}, {"ignore_dataframes": ["sessions"]}],
}
error_msg = "Number of options does not match number of inputs for primitive mode"
with pytest.raises(AssertionError, match=error_msg):
DeepFeatureSynthesis(
target_dataframe_name="customers",
entityset=es,
agg_primitives=["mode"],
trans_primitives=[],
primitive_options=too_many_options,
)
unknown_primitive = Trend()
unknown_primitive.name = "unknown_primitive"
unknown_primitive_option = {
"unknown_primitive": [
{"include_dataframes": ["logs"]},
{"ignore_dataframes": ["sessions"]},
],
}
error_msg = "Unknown primitive with name 'unknown_primitive'"
with pytest.raises(ValueError, match=error_msg):
DeepFeatureSynthesis(
target_dataframe_name="customers",
entityset=es,
agg_primitives=[unknown_primitive],
trans_primitives=[],
primitive_options=unknown_primitive_option,
)
options1 = {
"trend": [
{"include_dataframes": ["log"], "ignore_columns": {"log": ["value"]}},
{"include_dataframes": ["log"], "include_columns": {"log": ["datetime"]}},
],
}
dfs_obj1 = DeepFeatureSynthesis(
target_dataframe_name="sessions",
entityset=es,
agg_primitives=["trend"],
trans_primitives=[],
primitive_options=options1,
)
features1 = dfs_obj1.build_features()
for f in features1:
deps = f.get_dependencies()
df_names = [d.dataframe_name for d in deps]
columns = [d.get_name() for d in deps]
if f.primitive.name == "trend":
assert all([df_name in ["log"] for df_name in df_names])
assert "datetime" in columns
if len(columns) == 2:
assert "value" != columns[0]
options2 = {
Trend: [
{"include_dataframes": ["log"], "ignore_columns": {"log": ["value"]}},
{"include_dataframes": ["log"], "include_columns": {"log": ["datetime"]}},
],
}
dfs_obj2 = DeepFeatureSynthesis(
target_dataframe_name="sessions",
entityset=es,
agg_primitives=["trend"],
trans_primitives=[],
primitive_options=options2,
)
features2 = dfs_obj2.build_features()
assert set(features2) == set(features1)
def test_primitive_options_class_names(es):
options1 = {"mean": {"include_dataframes": ["customers"]}}
options2 = {Mean: {"include_dataframes": ["customers"]}}
bad_options = {
"mean": {"include_dataframes": ["customers"]},
Mean: {"ignore_dataframes": ["customers"]},
}
conflicting_error_text = "Multiple options found for primitive mean"
primitives = [["mean"], [Mean]]
options = [options1, options2]
features = []
for primitive in primitives:
with pytest.raises(KeyError, match=conflicting_error_text):
DeepFeatureSynthesis(
target_dataframe_name="cohorts",
entityset=es,
agg_primitives=primitive,
trans_primitives=[],
primitive_options=bad_options,
)
for option in options:
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="cohorts",
entityset=es,
agg_primitives=primitive,
trans_primitives=[],
primitive_options=option,
)
features.append(set(dfs_obj.build_features()))
for f in features[0]:
deps = f.get_dependencies(deep=True)
df_names = [d.dataframe_name for d in deps]
if isinstance(f.primitive, Mean):
assert all(df_name == "customers" for df_name in df_names)
assert features[0] == features[1] == features[2] == features[3]
def test_primitive_options_instantiated_primitive(es):
warning_msg = (
"Options present for primitive instance and generic "
"primitive class \\(mean\\), primitive instance will not use generic "
"options"
)
skipna_mean = Mean(skipna=False)
options = {
skipna_mean: {"include_dataframes": ["stores"]},
"mean": {"ignore_dataframes": ["stores"]},
}
with pytest.warns(UserWarning, match=warning_msg):
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="régions",
entityset=es,
agg_primitives=["mean", skipna_mean],
trans_primitives=[],
primitive_options=options,
)
features = dfs_obj.build_features()
for f in features:
deps = f.get_dependencies(deep=True)
df_names = [d.dataframe_name for d in deps]
if f.primitive == skipna_mean:
assert all(df_name == "stores" for df_name in df_names)
elif isinstance(f.primitive, Mean):
assert "stores" not in df_names
def test_primitive_options_commutative(es):
class AddThree(TransformPrimitive):
name = "add_three"
input_types = [
ColumnSchema(semantic_tags={"numeric"}),
ColumnSchema(semantic_tags={"numeric"}),
ColumnSchema(semantic_tags={"numeric"}),
]
return_type = ColumnSchema(semantic_tags={"numeric"})
commutative = True
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
def generate_name(self, base_feature_names):
return "%s + %s + %s" % (
base_feature_names[0],
base_feature_names[1],
base_feature_names[2],
)
options = {
"add_numeric": [
{"include_columns": {"log": ["value_2"]}},
{"include_columns": {"log": ["value"]}},
],
AddThree: [
{"include_columns": {"log": ["value_2"]}},
{"include_columns": {"log": ["value_many_nans"]}},
{"include_columns": {"log": ["value"]}},
],
}
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="log",
entityset=es,
agg_primitives=[],
trans_primitives=[AddNumeric, AddThree],
primitive_options=options,
max_depth=1,
)
features = dfs_obj.build_features()
add_numeric = [f for f in features if isinstance(f.primitive, AddNumeric)]
assert len(add_numeric) == 1
deps = add_numeric[0].get_dependencies(deep=True)
assert deps[0].get_name() == "value_2" and deps[1].get_name() == "value"
add_three = [f for f in features if isinstance(f.primitive, AddThree)]
assert len(add_three) == 1
deps = add_three[0].get_dependencies(deep=True)
assert (
deps[0].get_name() == "value_2"
and deps[1].get_name() == "value_many_nans"
and deps[2].get_name() == "value"
)
def test_primitive_options_include_over_exclude(es):
options = {
"mean": {"ignore_dataframes": ["stores"], "include_dataframes": ["stores"]},
}
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="régions",
entityset=es,
agg_primitives=["mean"],
trans_primitives=[],
primitive_options=options,
)
features = dfs_obj.build_features()
at_least_one_mean = False
for f in features:
deps = f.get_dependencies(deep=True)
dataframes = [d.dataframe_name for d in deps]
if isinstance(f.primitive, Mean):
at_least_one_mean = True
assert "stores" in dataframes
assert at_least_one_mean
def test_primitive_ordering():
# Test that the order of the input primitives impacts neither
# which features are created nor their order
es = make_ecommerce_entityset()
trans_prims = [AddNumeric, Absolute, "divide_numeric", NotEqual, "is_null"]
groupby_trans_prim = ["cum_mean", CumMin, CumSum]
agg_prims = [NMostCommon(n=3), Sum, Mean, Mean(skipna=False), "min", "max"]
where_prims = ["count", Sum]
seed_num_chars = Feature(
es["customers"].ww["favorite_quote"],
primitive=NumCharacters,
)
seed_is_null = Feature(es["customers"].ww["age"], primitive=IsNull)
seed_features = [seed_num_chars, seed_is_null]
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="customers",
entityset=es,
trans_primitives=trans_prims,
groupby_trans_primitives=groupby_trans_prim,
agg_primitives=agg_prims,
where_primitives=where_prims,
seed_features=seed_features,
max_features=-1,
max_depth=2,
)
features1 = dfs_obj.build_features()
trans_prims.reverse()
groupby_trans_prim.reverse()
agg_prims.reverse()
where_prims.reverse()
seed_features.reverse()
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="customers",
entityset=es,
trans_primitives=trans_prims,
groupby_trans_primitives=groupby_trans_prim,
agg_primitives=agg_prims,
where_primitives=where_prims,
seed_features=seed_features,
max_features=-1,
max_depth=2,
)
features2 = dfs_obj.build_features()
assert len(features1) == len(features2)
for i in range(len(features2)):
assert features1[i].unique_name() == features2[i].unique_name()
def test_no_transform_stacking():
df1 = pd.DataFrame({"id": [0, 1, 2, 3], "A": [0, 1, 2, 3]})
df2 = pd.DataFrame({"first_id": [0, 1, 1, 3], "B": [99, 88, 77, 66]})
dataframes = {"first": (df1, "id"), "second": (df2, "index")}
relationships = [("first", "id", "second", "first_id")]
es = EntitySet("data", dataframes, relationships)
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="second",
entityset=es,
trans_primitives=["negate", "add_numeric"],
agg_primitives=["sum"],
max_depth=4,
)
feature_defs = dfs_obj.build_features()
expected = [
"first_id",
"B",
"-(B)",
"first.A",
"first.SUM(second.B)",
"first.-(A)",
"B + first.A",
"first.SUM(second.-(B))",
"first.A + SUM(second.B)",
"first.-(SUM(second.B))",
"B + first.SUM(second.B)",
"first.A + SUM(second.-(B))",
"first.SUM(second.-(B)) + SUM(second.B)",
"first.-(SUM(second.-(B)))",
"B + first.SUM(second.-(B))",
]
assert len(feature_defs) == len(expected)
for feature_name in expected:
assert feature_with_name(feature_defs, feature_name)
def test_builds_seed_features_on_foreign_key_col(es):
seed_feature_sessions = Feature(es["sessions"].ww["customer_id"], primitive=Negate)
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="sessions",
entityset=es,
agg_primitives=[],
trans_primitives=[],
max_depth=2,
seed_features=[seed_feature_sessions],
)
features = dfs_obj.build_features()
assert feature_with_name(features, "-(customer_id)")
def test_does_not_build_features_on_last_time_index_col(es):
es.add_last_time_indexes()
dfs_obj = DeepFeatureSynthesis(target_dataframe_name="log", entityset=es)
features = dfs_obj.build_features()
for feature in features:
assert LTI_COLUMN_NAME not in feature.get_name()
def test_builds_features_using_all_input_types(es):
if es.dataframe_type == Library.SPARK:
pytest.skip("NumTrue primitive not compatible with Spark")
new_log_df = es["log"]
new_log_df.ww["purchased_nullable"] = es["log"]["purchased"]
new_log_df.ww.set_types(logical_types={"purchased_nullable": "boolean_nullable"})
es.replace_dataframe("log", new_log_df)
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="log",
entityset=es,
trans_primitives=[Not],
max_depth=1,
)
trans_features = dfs_obj.build_features()
assert feature_with_name(trans_features, "NOT(purchased)")
assert feature_with_name(trans_features, "NOT(purchased_nullable)")
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="log",
entityset=es,
groupby_trans_primitives=[Not],
max_depth=1,
)
groupby_trans_features = dfs_obj.build_features()
assert feature_with_name(groupby_trans_features, "NOT(purchased) by session_id")
assert feature_with_name(
groupby_trans_features,
"NOT(purchased_nullable) by session_id",
)
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="sessions",
entityset=es,
trans_primitives=[],
agg_primitives=[NumTrue],
)
agg_features = dfs_obj.build_features()
assert feature_with_name(agg_features, "NUM_TRUE(log.purchased)")
assert feature_with_name(agg_features, "NUM_TRUE(log.purchased_nullable)")
def test_make_groupby_features_with_depth_none(pd_es):
# If max_depth is set to -1, it sets it to None internally, so this
# test validates code paths that have a None max_depth
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="log",
entityset=pd_es,
agg_primitives=[],
trans_primitives=[],
groupby_trans_primitives=["cum_sum"],
max_depth=-1,
)
features = dfs_obj.build_features()
assert feature_with_name(features, "CUM_SUM(value) by session_id")
def test_check_stacking_when_building_transform_features(pd_es):
class NewMean(Mean):
name = "NEW_MEAN"
base_of_exclude = [Absolute]
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="log",
entityset=pd_es,
agg_primitives=[NewMean, "mean"],
trans_primitives=["absolute"],
max_depth=-1,
)
features = dfs_obj.build_features()
assert number_of_features_with_name_like(features, "ABSOLUTE(MEAN") > 0
assert number_of_features_with_name_like(features, "ABSOLUTE(NEW_MEAN") == 0
def test_check_stacking_when_building_groupby_features(pd_es):
class NewMean(Mean):
name = "NEW_MEAN"
base_of_exclude = [CumSum]
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="log",
entityset=pd_es,
agg_primitives=[NewMean, "mean"],
groupby_trans_primitives=["cum_sum"],
max_depth=5,
)
features = dfs_obj.build_features()
assert number_of_features_with_name_like(features, "CUM_SUM(MEAN") > 0
assert number_of_features_with_name_like(features, "CUM_SUM(NEW_MEAN") == 0
def test_check_stacking_when_building_agg_features(pd_es):
class NewAbsolute(Absolute):
name = "NEW_ABSOLUTE"
base_of_exclude = [Mean]
dfs_obj = DeepFeatureSynthesis(
target_dataframe_name="log",
entityset=pd_es,
agg_primitives=["mean"],
trans_primitives=[NewAbsolute, "absolute"],
max_depth=5,
)
features = dfs_obj.build_features()
assert number_of_features_with_name_like(features, "MEAN(log.ABSOLUTE") > 0
assert number_of_features_with_name_like(features, "MEAN(log.NEW_ABSOLUTE") == 0
| 69,688 | 32.488227 | 148 | py |
featuretools | featuretools-main/featuretools/tests/synthesis/__init__.py | 0 | 0 | 0 | py | |
featuretools | featuretools-main/featuretools/tests/synthesis/test_get_valid_primitives.py | import pytest
from woodwork.column_schema import ColumnSchema
from featuretools.primitives import (
AggregationPrimitive,
Count,
Hour,
IsIn,
Not,
TimeSincePrevious,
TransformPrimitive,
)
from featuretools.synthesis.get_valid_primitives import get_valid_primitives
from featuretools.utils.gen_utils import Library
def test_get_valid_primitives_selected_primitives(es):
agg_prims, trans_prims = get_valid_primitives(
es,
"log",
selected_primitives=[Hour, Count],
)
assert set(agg_prims) == set([Count])
assert set(trans_prims) == set([Hour])
agg_prims, trans_prims = get_valid_primitives(
es,
"products",
selected_primitives=[Hour],
max_depth=1,
)
assert set(agg_prims) == set()
assert set(trans_prims) == set()
def test_get_valid_primitives_selected_primitives_strings(es):
agg_prims, trans_prims = get_valid_primitives(
es,
"log",
selected_primitives=["hour", "count"],
)
assert set(agg_prims) == set([Count])
assert set(trans_prims) == set([Hour])
agg_prims, trans_prims = get_valid_primitives(
es,
"products",
selected_primitives=["hour"],
max_depth=1,
)
assert set(agg_prims) == set()
assert set(trans_prims) == set()
def test_invalid_primitive(es):
with pytest.raises(ValueError, match="'foobar' is not a recognized primitive name"):
get_valid_primitives(
es,
target_dataframe_name="log",
selected_primitives=["foobar"],
)
msg = (
"Selected primitive <enum 'Library'> "
"is not an AggregationPrimitive, TransformPrimitive, or str"
)
with pytest.raises(ValueError, match=msg):
get_valid_primitives(
es,
target_dataframe_name="log",
selected_primitives=[Library],
)
def test_primitive_compatibility(es):
_, trans_prims = get_valid_primitives(
es,
"customers",
selected_primitives=[TimeSincePrevious],
)
if es.dataframe_type != Library.PANDAS:
assert len(trans_prims) == 0
else:
assert len(trans_prims) == 1
def test_get_valid_primitives_custom_primitives(pd_es):
class ThreeMostCommonCat(AggregationPrimitive):
name = "n_most_common_categorical"
input_types = [ColumnSchema(semantic_tags={"category"})]
return_type = ColumnSchema(semantic_tags={"category"})
number_output_features = 3
class AddThree(TransformPrimitive):
name = "add_three"
input_types = [
ColumnSchema(semantic_tags="numeric"),
ColumnSchema(semantic_tags="numeric"),
ColumnSchema(semantic_tags="numeric"),
]
return_type = ColumnSchema(semantic_tags="numeric")
commutative = True
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
agg_prims, trans_prims = get_valid_primitives(pd_es, "log")
assert ThreeMostCommonCat not in agg_prims
assert AddThree not in trans_prims
with pytest.raises(
ValueError,
match="'add_three' is not a recognized primitive name",
):
agg_prims, trans_prims = get_valid_primitives(
pd_es,
"log",
2,
[ThreeMostCommonCat, "add_three"],
)
def test_get_valid_primitives_all_primitives(es):
agg_prims, trans_prims = get_valid_primitives(es, "customers")
assert Count in agg_prims
assert Hour in trans_prims
def test_get_valid_primitives_single_table(transform_es):
msg = "Only one dataframe in entityset, changing max_depth to 1 since deeper features cannot be created"
with pytest.warns(UserWarning, match=msg):
agg_prims, trans_prims = get_valid_primitives(transform_es, "first")
assert set(agg_prims) == set()
assert IsIn in trans_prims
def test_get_valid_primitives_with_dfs_kwargs(es):
agg_prims, trans_prims = get_valid_primitives(
es,
"customers",
selected_primitives=[Hour, Count, Not],
)
assert set(agg_prims) == set([Count])
assert set(trans_prims) == set([Hour, Not])
# Can use other dfs parameters and they get applied
agg_prims, trans_prims = get_valid_primitives(
es,
"customers",
selected_primitives=[Hour, Count, Not],
ignore_columns={"customers": ["loves_ice_cream"]},
)
assert set(agg_prims) == set([Count])
assert set(trans_prims) == set([Hour])
agg_prims, trans_prims = get_valid_primitives(
es,
"products",
selected_primitives=[Hour, Count],
ignore_dataframes=["log"],
)
assert set(agg_prims) == set()
assert set(trans_prims) == set()
| 4,778 | 28.140244 | 108 | py |
featuretools | featuretools-main/featuretools/tests/profiling/dfs_profile.py | """
dfs_profile.py
Helper module to allow profiling of the dfs operations. At some point we may
want to use pstats to output the results to a log, but I'm anticipating that
LookingGlass will provide the performance data we want.
Notes:
- output currently goes to the root directory and is in dfs_profile.stats
- *.stats is gitignored
- it uses the demo customers dataset for testing
- max_depth > 2 is very slow (currently)
- stats output can be viewed online with https://nejc.saje.info/pstats-viewer.html
"""
import cProfile
from pathlib import Path
import featuretools as ft
import featuretools.demo as demo
from featuretools.synthesis.dfs import dfs
es = demo.load_retail()
all_aggs = ft.primitives.get_aggregation_primitives()
all_trans = ft.primitives.get_transform_primitives()
profiler = cProfile.Profile(builtins=False)
profiler.enable()
feature_defs = dfs(
entityset=es,
target_dataframe_name="customers",
trans_primitives=all_trans,
agg_primitives=all_aggs,
max_depth=2,
features_only=True,
)
profiler.disable()
profiler.dump_stats(Path.cwd() / "dfs_profile.stats")
| 1,118 | 27.692308 | 84 | py |
featuretools | featuretools-main/featuretools/tests/profiling/__init__.py | 0 | 0 | 0 | py | |
featuretools | featuretools-main/featuretools/tests/utils_tests/test_utils_info.py | import os
import pytest
from featuretools import __version__
from featuretools.utils import (
get_featuretools_root,
get_installed_packages,
get_sys_info,
show_info,
)
@pytest.fixture
def this_dir():
return os.path.dirname(os.path.abspath(__file__))
def test_show_info(capsys):
show_info()
captured = capsys.readouterr()
assert "Featuretools version" in captured.out
assert "Featuretools installation directory:" in captured.out
assert __version__ in captured.out
assert "SYSTEM INFO" in captured.out
def test_sys_info():
sys_info = get_sys_info()
info_keys = [
"python",
"python-bits",
"OS",
"OS-release",
"machine",
"processor",
"byteorder",
"LC_ALL",
"LANG",
"LOCALE",
]
found_keys = [k for k, _ in sys_info]
assert set(info_keys).issubset(found_keys)
def test_installed_packages():
installed_packages = get_installed_packages()
# Per PEP 426, package names are case insensitive
# Underscore and hyphen are equivalent
installed_set = {
name.lower().replace("-", "_") for name in installed_packages.keys()
}
requirements = [
"pandas",
"numpy",
"tqdm",
"cloudpickle",
"psutil",
]
assert set(requirements).issubset(installed_set)
def test_get_featuretools_root(this_dir):
root = os.path.abspath(os.path.join(this_dir, "..", ".."))
assert get_featuretools_root() == root
| 1,514 | 21.954545 | 76 | py |
featuretools | featuretools-main/featuretools/tests/utils_tests/test_config.py | import logging
import os
from featuretools.config_init import initialize_logging
logging_env_vars = {
"FEATURETOOLS_LOG_LEVEL": "debug",
"FEATURETOOLS_ES_LOG_LEVEL": "critical",
"FEATURETOOLS_BACKEND_LOG_LEVEL": "error",
}
def test_logging_defaults():
old_env_vars = {}
for env_var in logging_env_vars:
old_env_vars[env_var] = os.environ.get(env_var, None)
if old_env_vars[env_var] is not None:
del os.environ[env_var]
initialize_logging()
main_logger = logging.getLogger("featuretools")
assert main_logger.getEffectiveLevel() == logging.INFO
es_logger = logging.getLogger("featuretools.entityset")
assert es_logger.getEffectiveLevel() == logging.INFO
backend_logger = logging.getLogger("featuretools.computation_backend")
assert backend_logger.getEffectiveLevel() == logging.INFO
for env_var, value in old_env_vars.items():
if value is not None:
os.environ[env_var] = value
def test_logging_set_via_env():
old_env_vars = {}
for env_var, value in logging_env_vars.items():
old_env_vars[env_var] = os.environ.get(env_var, None)
os.environ[env_var] = value
initialize_logging()
main_logger = logging.getLogger("featuretools")
assert main_logger.getEffectiveLevel() == logging.DEBUG
es_logger = logging.getLogger("featuretools.entityset")
assert es_logger.getEffectiveLevel() == logging.CRITICAL
backend_logger = logging.getLogger("featuretools.computation_backend")
assert backend_logger.getEffectiveLevel() == logging.ERROR
for env_var, value in old_env_vars.items():
if value is not None:
os.environ[env_var] = value
| 1,703 | 33.08 | 74 | py |
featuretools | featuretools-main/featuretools/tests/utils_tests/test_gen_utils.py | import pandas as pd
import pytest
from woodwork import list_logical_types as ww_list_logical_types
from woodwork import list_semantic_tags as ww_list_semantic_tags
from featuretools import list_logical_types, list_semantic_tags
from featuretools.utils.gen_utils import (
camel_and_title_to_snake,
import_or_none,
import_or_raise,
is_instance,
)
dd = import_or_none("dask.dataframe")
def test_import_or_raise_errors():
with pytest.raises(ImportError, match="error message"):
import_or_raise("_featuretools", "error message")
def test_import_or_raise_imports():
math = import_or_raise("math", "error message")
assert math.ceil(0.1) == 1
def test_import_or_none():
math = import_or_none("math")
assert math.ceil(0.1) == 1
bad_lib = import_or_none("_featuretools")
assert bad_lib is None
@pytest.fixture
def df():
return pd.DataFrame({"id": range(5)})
def test_is_instance_single_module(df):
assert is_instance(df, pd, "DataFrame")
@pytest.mark.skipif("not dd")
def test_is_instance_multiple_modules(df):
df2 = dd.from_pandas(df, npartitions=2)
assert is_instance(df, (dd, pd), "DataFrame")
assert is_instance(df2, (dd, pd), "DataFrame")
assert is_instance(df2["id"], (dd, pd), ("Series", "DataFrame"))
assert not is_instance(df2["id"], (dd, pd), ("DataFrame", "Series"))
def test_is_instance_errors_mismatch():
msg = "Number of modules does not match number of classnames"
with pytest.raises(ValueError, match=msg):
is_instance("abc", pd, ("DataFrame", "Series"))
def test_is_instance_none_module(df):
assert not is_instance(df, None, "DataFrame")
assert is_instance(df, (None, pd), "DataFrame")
assert is_instance(df, (None, pd), ("Series", "DataFrame"))
def test_list_logical_types():
ft_ltypes = list_logical_types()
ww_ltypes = ww_list_logical_types()
assert ft_ltypes.equals(ww_ltypes)
def test_list_semantic_tags():
ft_semantic_tags = list_semantic_tags()
ww_semantic_tags = ww_list_semantic_tags()
assert ft_semantic_tags.equals(ww_semantic_tags)
def test_camel_and_title_to_snake():
assert camel_and_title_to_snake("Top3Words") == "top_3_words"
assert camel_and_title_to_snake("top3Words") == "top_3_words"
assert camel_and_title_to_snake("Top100Words") == "top_100_words"
assert camel_and_title_to_snake("top100Words") == "top_100_words"
assert camel_and_title_to_snake("Top41") == "top_41"
assert camel_and_title_to_snake("top41") == "top_41"
assert camel_and_title_to_snake("41TopWords") == "41_top_words"
assert camel_and_title_to_snake("TopThreeWords") == "top_three_words"
assert camel_and_title_to_snake("topThreeWords") == "top_three_words"
assert camel_and_title_to_snake("top_three_words") == "top_three_words"
assert camel_and_title_to_snake("over_65") == "over_65"
assert camel_and_title_to_snake("65_and_over") == "65_and_over"
assert camel_and_title_to_snake("USDValue") == "usd_value"
| 3,014 | 32.131868 | 75 | py |
featuretools | featuretools-main/featuretools/tests/utils_tests/test_entry_point.py | import pandas as pd
import pytest
from featuretools import dfs
@pytest.fixture
def pd_entry_point_dfs():
cards_df = pd.DataFrame({"id": [1, 2, 3, 4, 5]})
transactions_df = pd.DataFrame(
{
"id": [1, 2, 3, 4, 5, 6],
"card_id": [1, 2, 1, 3, 4, 5],
"transaction_time": [10, 12, 13, 20, 21, 20],
"fraud": [True, False, True, False, True, True],
},
)
return cards_df, transactions_df
@pytest.fixture
def dask_entry_point_dfs(pd_entry_point_dfs):
dd = pytest.importorskip("dask.dataframe", reason="Dask not installed, skipping")
cards_df = dd.from_pandas(pd_entry_point_dfs[0], npartitions=2)
transactions_df = dd.from_pandas(pd_entry_point_dfs[1], npartitions=2)
return cards_df, transactions_df
@pytest.fixture(params=["pd_entry_point_dfs", "dask_entry_point_dfs"])
def entry_points_dfs(request):
return request.getfixturevalue(request.param)
class MockEntryPoint(object):
def on_call(self, kwargs):
self.kwargs = kwargs
def on_error(self, error, runtime):
self.error = error
def on_return(self, return_value, runtime):
self.return_value = return_value
def load(self):
return self
def __call__(self):
return self
class MockPkgResources(object):
def __init__(self, entry_point):
self.entry_point = entry_point
def iter_entry_points(self, name):
return [self.entry_point]
def test_entry_point(es, monkeypatch):
entry_point = MockEntryPoint()
# overrides a module used in the entry_point decorator for dfs
# so the decorator will use this mock entry point
monkeypatch.setitem(
dfs.__globals__["entry_point"].__globals__,
"pkg_resources",
MockPkgResources(entry_point),
)
fm, fl = dfs(entityset=es, target_dataframe_name="customers")
assert "entityset" in entry_point.kwargs.keys()
assert "target_dataframe_name" in entry_point.kwargs.keys()
assert (fm, fl) == entry_point.return_value
def test_entry_point_error(es, monkeypatch):
entry_point = MockEntryPoint()
monkeypatch.setitem(
dfs.__globals__["entry_point"].__globals__,
"pkg_resources",
MockPkgResources(entry_point),
)
with pytest.raises(KeyError):
dfs(entityset=es, target_dataframe_name="missing_dataframe")
assert isinstance(entry_point.error, KeyError)
def test_entry_point_detect_arg(monkeypatch, entry_points_dfs):
cards_df = entry_points_dfs[0]
transactions_df = entry_points_dfs[1]
cards_df = pd.DataFrame({"id": [1, 2, 3, 4, 5]})
transactions_df = pd.DataFrame(
{
"id": [1, 2, 3, 4, 5, 6],
"card_id": [1, 2, 1, 3, 4, 5],
"transaction_time": [10, 12, 13, 20, 21, 20],
"fraud": [True, False, True, False, True, True],
},
)
dataframes = {
"cards": (cards_df, "id"),
"transactions": (transactions_df, "id", "transaction_time"),
}
relationships = [("cards", "id", "transactions", "card_id")]
entry_point = MockEntryPoint()
monkeypatch.setitem(
dfs.__globals__["entry_point"].__globals__,
"pkg_resources",
MockPkgResources(entry_point),
)
fm, fl = dfs(dataframes, relationships, target_dataframe_name="cards")
assert "dataframes" in entry_point.kwargs.keys()
assert "relationships" in entry_point.kwargs.keys()
assert "target_dataframe_name" in entry_point.kwargs.keys()
| 3,506 | 29.763158 | 85 | py |
featuretools | featuretools-main/featuretools/tests/utils_tests/test_recommend_primitives.py | import logging
import pandas as pd
import pytest
from woodwork.logical_types import NaturalLanguage
from woodwork.table_schema import ColumnSchema
from featuretools import EntitySet
from featuretools.primitives import Day, TransformPrimitive
from featuretools.utils.recommend_primitives import (
DEFAULT_EXCLUDED_PRIMITIVES,
TIME_SERIES_PRIMITIVES,
_recommend_non_numeric_primitives,
_recommend_skew_numeric_primitives,
get_recommended_primitives,
)
@pytest.fixture
def moderate_right_skewed_df():
return pd.DataFrame(
{"moderately right skewed": [2, 3, 4, 4, 4, 5, 5, 7, 9, 11, 12, 13, 15]},
)
@pytest.fixture
def heavy_right_skewed_df():
return pd.DataFrame(
{"heavy right skewed": [1, 1, 1, 1, 2, 2, 3, 3, 4, 5, 9, 11, 13]},
)
@pytest.fixture
def left_skewed_df():
return pd.DataFrame(
{"left skewed": [2, 3, 4, 5, 7, 9, 11, 11, 11, 12, 12, 12, 13, 15]},
)
@pytest.fixture
def skewed_df_zeros():
return pd.DataFrame({"zeros": [-1, 0, 0, 1, 2, 2, 3, 4, 5, 7, 9]})
@pytest.fixture
def normal_df():
return pd.DataFrame({"normal": [2, 3, 4, 5, 5, 6, 6, 7, 7, 8, 9, 10, 11]})
@pytest.fixture
def right_skew_moderate_and_heavy_df(moderate_right_skewed_df, heavy_right_skewed_df):
return pd.concat([moderate_right_skewed_df, heavy_right_skewed_df], axis=1)
@pytest.fixture
def es_with_skewed_dfs(
moderate_right_skewed_df,
heavy_right_skewed_df,
left_skewed_df,
skewed_df_zeros,
normal_df,
right_skew_moderate_and_heavy_df,
):
es = EntitySet()
es.add_dataframe(moderate_right_skewed_df, "moderate_right_skewed_df", "dataframe")
es.add_dataframe(heavy_right_skewed_df, "heavy_right_skewed_df", "dataframe")
es.add_dataframe(left_skewed_df, "left_skewed_df", "dataframe")
es.add_dataframe(skewed_df_zeros, "skewed_df_zeros", "dataframe")
es.add_dataframe(normal_df, "normal_df", "dataframe")
es.add_dataframe(
right_skew_moderate_and_heavy_df,
"right_skew_moderate_and_heavy_df",
"dataframe",
)
return es
def test_recommend_skew_numeric_primitives(es_with_skewed_dfs):
valid_skew_primtives = set(["square_root", "natural_logarithm"])
valid_prims = [
"cosine",
"square_root",
"natural_logarithm",
"sine",
]
assert _recommend_skew_numeric_primitives(
es_with_skewed_dfs,
"moderate_right_skewed_df",
valid_prims,
) == set(["square_root"])
assert _recommend_skew_numeric_primitives(
es_with_skewed_dfs,
"heavy_right_skewed_df",
valid_skew_primtives,
) == set(["natural_logarithm"])
assert (
_recommend_skew_numeric_primitives(
es_with_skewed_dfs,
"left_skewed_df",
valid_skew_primtives,
)
== set()
)
assert (
_recommend_skew_numeric_primitives(
es_with_skewed_dfs,
"skewed_df_zeros",
valid_skew_primtives,
)
== set()
)
assert (
_recommend_skew_numeric_primitives(
es_with_skewed_dfs,
"normal_df",
valid_skew_primtives,
)
== set()
)
assert (
_recommend_skew_numeric_primitives(
es_with_skewed_dfs,
"right_skew_moderate_and_heavy_df",
valid_skew_primtives,
)
== valid_skew_primtives
)
def test_recommend_non_numeric_primitives(make_es):
ecom_es_customers = EntitySet()
ecom_es_customers.add_dataframe(make_es["customers"])
valid_primitives = [
"day",
"num_characters",
"natural_logarithm",
"sine",
]
actual_recommendations = _recommend_non_numeric_primitives(
ecom_es_customers,
"customers",
valid_primitives,
)
expected_recommendations = set(
[
"day",
"num_characters",
],
)
assert expected_recommendations == actual_recommendations
def test_recommend_skew_numeric_primitives_exception(make_es, caplog):
class MockExceptionPrimitive(TransformPrimitive):
"""Count the number of times the string value occurs."""
name = "mock_primitive_with_exception"
input_types = [ColumnSchema(logical_type=NaturalLanguage)]
return_type = ColumnSchema(semantic_tags={"numeric"})
def get_function(self):
def make_exception(column):
raise Exception("this primitive has an exception")
return make_exception
ecom_es_customers = EntitySet()
ecom_es_customers.add_dataframe(make_es["customers"])
valid_primitives = [MockExceptionPrimitive(), Day()]
logger = logging.getLogger("featuretools")
logger.propagate = True
actual_recommendations = _recommend_non_numeric_primitives(
ecom_es_customers,
"customers",
valid_primitives,
)
logger.propagate = False
expected_recommendations = set(["day"])
assert expected_recommendations == actual_recommendations
assert (
"Exception with feature MOCK_PRIMITIVE_WITH_EXCEPTION(favorite_quote) with primitive mock_primitive_with_exception: this primitive has an exception"
in caplog.text
)
def test_get_recommended_primitives_time_series(make_es):
ecom_es_log = EntitySet()
ecom_es_log.add_dataframe(make_es["log"])
ecom_es_log["log"].ww.set_time_index("datetime")
actual_recommendations_ts = get_recommended_primitives(
ecom_es_log,
True,
)
for ts_prim in TIME_SERIES_PRIMITIVES:
assert ts_prim in actual_recommendations_ts
def test_get_recommended_primitives(make_es):
ecom_es_customers = EntitySet()
ecom_es_customers.add_dataframe(make_es["customers"])
actual_recommendations = get_recommended_primitives(
ecom_es_customers,
False,
)
expected_recommendations = [
"day",
"num_characters",
"natural_logarithm",
"punctuation_count",
"mean_characters_per_word",
"is_weekend",
"whitespace_count",
"median_word_length",
"month",
"total_word_length",
"weekday",
"day_of_year",
"week",
"quarter",
"email_address_to_domain",
"number_of_common_words",
"num_words",
"num_unique_separators",
"age",
"year",
"is_leap_year",
"days_in_month",
"is_free_email_domain",
"number_of_unique_words",
]
for prim in expected_recommendations:
assert prim in actual_recommendations
for ts_prim in TIME_SERIES_PRIMITIVES:
assert ts_prim not in actual_recommendations
def test_get_recommended_primitives_exclude(make_es):
ecom_es_customers = EntitySet()
ecom_es_customers.add_dataframe(make_es["customers"])
extra_exclude = ["num_characters", "natural_logarithm"]
prims_to_exclude = DEFAULT_EXCLUDED_PRIMITIVES + extra_exclude
actual_recommendations = get_recommended_primitives(
ecom_es_customers,
False,
prims_to_exclude,
)
for ex_prim in extra_exclude:
assert ex_prim not in actual_recommendations
def test_get_recommended_primitives_empty_es_error():
error_msg = "No DataFrame in EntitySet found. Please add a DataFrame."
empty_es = EntitySet()
with pytest.raises(IndexError, match=error_msg):
get_recommended_primitives(
empty_es,
False,
)
def test_get_recommended_primitives_multi_table_es_error(make_es):
error_msg = "Multi-table EntitySets are currently not supported. Please only use a single table EntitySet."
with pytest.raises(IndexError, match=error_msg):
get_recommended_primitives(
make_es,
False,
)
| 7,844 | 28.055556 | 156 | py |
featuretools | featuretools-main/featuretools/tests/utils_tests/__init__.py | 0 | 0 | 0 | py | |
featuretools | featuretools-main/featuretools/tests/utils_tests/test_description_utils.py | from featuretools.utils.description_utils import convert_to_nth
def test_first():
assert convert_to_nth(1) == "1st"
assert convert_to_nth(21) == "21st"
assert convert_to_nth(131) == "131st"
def test_second():
assert convert_to_nth(2) == "2nd"
assert convert_to_nth(22) == "22nd"
assert convert_to_nth(232) == "232nd"
def test_third():
assert convert_to_nth(3) == "3rd"
assert convert_to_nth(23) == "23rd"
assert convert_to_nth(133) == "133rd"
def test_nth():
assert convert_to_nth(4) == "4th"
assert convert_to_nth(11) == "11th"
assert convert_to_nth(12) == "12th"
assert convert_to_nth(13) == "13th"
assert convert_to_nth(111) == "111th"
assert convert_to_nth(112) == "112th"
assert convert_to_nth(113) == "113th"
| 787 | 25.266667 | 63 | py |
featuretools | featuretools-main/featuretools/tests/utils_tests/test_trie.py | from featuretools.utils import Trie
def test_get_node():
t = Trie(default=lambda: "default")
t.get_node([1, 2, 3]).value = "123"
t.get_node([1, 2, 4]).value = "124"
sub = t.get_node([1, 2])
assert sub.get_node([3]).value == "123"
assert sub.get_node([4]).value == "124"
sub.get_node([4, 5]).value = "1245"
assert t.get_node([1, 2, 4, 5]).value == "1245"
def test_setting_and_getting():
t = Trie(default=lambda: "default")
assert t.get_node([1, 2, 3]).value == "default"
t.get_node([1, 2, 3]).value = "123"
t.get_node([1, 2, 4]).value = "124"
assert t.get_node([1, 2, 3]).value == "123"
assert t.get_node([1, 2, 4]).value == "124"
assert t.get_node([1]).value == "default"
t.get_node([1]).value = "1"
assert t.get_node([1]).value == "1"
t.get_node([1, 2, 3]).value = "updated"
assert t.get_node([1, 2, 3]).value == "updated"
def test_iteration():
t = Trie(default=lambda: "default", path_constructor=tuple)
t.get_node((1, 2, 3)).value = "123"
t.get_node((1, 2, 4)).value = "124"
expected = [
((), "default"),
((1,), "default"),
((1, 2), "default"),
((1, 2, 3), "123"),
((1, 2, 4), "124"),
]
for i, value in enumerate(t):
assert value == expected[i]
| 1,309 | 25.734694 | 63 | py |
featuretools | featuretools-main/featuretools/tests/utils_tests/test_time_utils.py | from datetime import datetime, timedelta
from itertools import chain
import numpy as np
import pandas as pd
import pytest
from featuretools.utils import convert_time_units, make_temporal_cutoffs
from featuretools.utils.time_utils import (
calculate_trend,
convert_datetime_to_floats,
convert_timedelta_to_floats,
)
def test_make_temporal_cutoffs():
instance_ids = pd.Series(range(10))
cutoffs = pd.date_range(start="1/2/2015", periods=10, freq="1d")
temporal_cutoffs_by_nwindows = make_temporal_cutoffs(
instance_ids,
cutoffs,
window_size="1h",
num_windows=2,
)
assert temporal_cutoffs_by_nwindows.shape[0] == 20
actual_instances = chain.from_iterable([[i, i] for i in range(10)])
actual_times = [
"1/1/2015 23:00:00",
"1/2/2015 00:00:00",
"1/2/2015 23:00:00",
"1/3/2015 00:00:00",
"1/3/2015 23:00:00",
"1/4/2015 00:00:00",
"1/4/2015 23:00:00",
"1/5/2015 00:00:00",
"1/5/2015 23:00:00",
"1/6/2015 00:00:00",
"1/6/2015 23:00:00",
"1/7/2015 00:00:00",
"1/7/2015 23:00:00",
"1/8/2015 00:00:00",
"1/8/2015 23:00:00",
"1/9/2015 00:00:00",
"1/9/2015 23:00:00",
"1/10/2015 00:00:00",
"1/10/2015 23:00:00",
"1/11/2015 00:00:00",
"1/11/2015 23:00:00",
]
actual_times = [pd.Timestamp(c) for c in actual_times]
for computed, actual in zip(
temporal_cutoffs_by_nwindows["instance_id"],
actual_instances,
):
assert computed == actual
for computed, actual in zip(temporal_cutoffs_by_nwindows["time"], actual_times):
assert computed == actual
cutoffs = [pd.Timestamp("1/2/2015")] * 9 + [pd.Timestamp("1/3/2015")]
starts = [pd.Timestamp("1/1/2015")] * 9 + [pd.Timestamp("1/2/2015")]
actual_times = ["1/1/2015 00:00:00", "1/2/2015 00:00:00"] * 9
actual_times += ["1/2/2015 00:00:00", "1/3/2015 00:00:00"]
actual_times = [pd.Timestamp(c) for c in actual_times]
temporal_cutoffs_by_wsz_start = make_temporal_cutoffs(
instance_ids,
cutoffs,
window_size="1d",
start=starts,
)
for computed, actual in zip(
temporal_cutoffs_by_wsz_start["instance_id"],
actual_instances,
):
assert computed == actual
for computed, actual in zip(temporal_cutoffs_by_wsz_start["time"], actual_times):
assert computed == actual
cutoffs = [pd.Timestamp("1/2/2015")] * 9 + [pd.Timestamp("1/3/2015")]
starts = [pd.Timestamp("1/1/2015")] * 10
actual_times = ["1/1/2015 00:00:00", "1/2/2015 00:00:00"] * 9
actual_times += ["1/1/2015 00:00:00", "1/3/2015 00:00:00"]
actual_times = [pd.Timestamp(c) for c in actual_times]
temporal_cutoffs_by_nw_start = make_temporal_cutoffs(
instance_ids,
cutoffs,
num_windows=2,
start=starts,
)
for computed, actual in zip(
temporal_cutoffs_by_nw_start["instance_id"],
actual_instances,
):
assert computed == actual
for computed, actual in zip(temporal_cutoffs_by_nw_start["time"], actual_times):
assert computed == actual
def test_convert_time_units():
units = {
"years": 31540000,
"months": 2628000,
"days": 86400,
"hours": 3600,
"minutes": 60,
"seconds": 1,
"milliseconds": 0.001,
"nanoseconds": 0.000000001,
}
for each in units:
assert convert_time_units(units[each] * 2, each) == 2
assert np.isclose(convert_time_units(float(units[each] * 2), each), 2)
error_text = "Invalid unit given, make sure it is plural"
with pytest.raises(ValueError, match=error_text):
convert_time_units("jnkwjgn", 10)
@pytest.mark.parametrize(
"dt, expected_floats",
[
(
pd.Series(
[
datetime(2010, 1, 1, 11, 45, 0),
datetime(2010, 1, 1, 12, 55, 15),
datetime(2010, 1, 1, 11, 57, 30),
datetime(2010, 1, 1, 11, 12),
datetime(2010, 1, 1, 11, 12, 15),
],
),
pd.Series([21039105.0, 21039175.25, 21039117.5, 21039072.0, 21039072.25]),
),
(
pd.Series(
list(pd.date_range(start="2017-01-01", freq="1d", periods=3))
+ list(pd.date_range(start="2017-01-10", freq="2d", periods=4))
+ list(pd.date_range(start="2017-01-22", freq="1d", periods=7)),
),
pd.Series(
[
17167.0,
17168.0,
17169.0,
17176.0,
17178.0,
17180.0,
17182.0,
17188.0,
17189.0,
17190.0,
17191.0,
17192.0,
17193.0,
17194.0,
],
),
),
],
)
def test_convert_datetime_floats(dt, expected_floats):
actual_floats = convert_datetime_to_floats(dt)
pd.testing.assert_series_equal(pd.Series(actual_floats), expected_floats)
@pytest.mark.parametrize(
"td, expected_floats",
[
(
pd.Series(
[
pd.Timedelta(2, "day"),
pd.Timedelta(120000000),
pd.Timedelta(48, "sec"),
pd.Timedelta(30, "min"),
pd.Timedelta(12, "hour"),
],
),
pd.Series(
[
2.0,
1.388888888888889e-06,
0.0005555555555555556,
0.020833333333333332,
0.5,
],
),
),
(
pd.Series(
[
timedelta(days=4),
timedelta(milliseconds=4000000),
timedelta(hours=2, seconds=49),
],
),
pd.Series([4.0, 0.0462962962962963, 0.08390046296296297]),
),
],
)
def test_convert_timedelta_to_floats(td, expected_floats):
actual_floats = convert_timedelta_to_floats(td)
pd.testing.assert_series_equal(pd.Series(actual_floats), expected_floats)
@pytest.mark.parametrize(
"series,expected_trends",
[
(
# using datetimes
pd.Series(
data=[0, 5, 10],
index=[
datetime(2019, 1, 1),
datetime(2019, 1, 2),
datetime(2019, 1, 3),
],
),
5.0,
),
(
# using pd.Timestamp
pd.Series(
data=[0, -5, 3],
index=pd.date_range(start="2019-01-01", freq="1D", periods=3),
),
1.4999999999999998,
),
(
pd.Series(
data=[1, 2, 4, 8, 16],
index=pd.date_range(start="2019-01-01", freq="1D", periods=5),
),
3.6000000000000005,
),
(
# using pd.Timedelta with no change in time
pd.Series(
data=[1, 2, 3],
index=[
pd.Timedelta(120000000),
pd.Timedelta(120000000),
pd.Timedelta(120000000),
],
),
0,
),
],
)
def test_calculate_trend(series, expected_trends):
actual_trends = calculate_trend(series)
assert np.isclose(actual_trends, expected_trends)
| 7,746 | 29.261719 | 86 | py |
featuretools | featuretools-main/featuretools/tests/testing_utils/generate_fake_dataframe.py | import random
from datetime import datetime as dt
import pandas as pd
import woodwork.type_sys.type_system as ww_type_system
from woodwork import logical_types
logical_type_mapping = {
logical_types.Boolean.__name__: [True, False],
logical_types.BooleanNullable.__name__: [True, False, pd.NA],
logical_types.Categorical.__name__: ["A", "B", "C"],
logical_types.Datetime.__name__: [
dt(2020, 1, 1, 12, 0, 0),
dt(2020, 6, 1, 12, 0, 0),
],
logical_types.Double.__name__: [1.2, 2.3, 3.4],
logical_types.Integer.__name__: [1, 2, 3],
logical_types.IntegerNullable.__name__: [1, 2, 3, pd.NA],
logical_types.EmailAddress.__name__: [
"john.smith@example.com",
"sally.jones@example.com",
],
logical_types.LatLong.__name__: [(1, 2), (3, 4)],
logical_types.NaturalLanguage.__name__: [
"This is sentence 1",
"This is sentence 2",
],
logical_types.Ordinal.__name__: [1, 2, 3],
logical_types.URL.__name__: ["https://www.example.com", "https://www.example2.com"],
logical_types.PostalCode.__name__: ["60018", "60018-0123"],
}
def flatten_list(nested_list):
return [item for sublist in nested_list for item in sublist]
def generate_fake_dataframe(
col_defs=[("f_1", "Numeric"), ("f_2", "Datetime", "time_index")],
n_rows=10,
df_name="df",
):
def randomize(values_):
random.seed(10)
values = values_.copy()
random.shuffle(values)
return values
def gen_series(values):
values = [values] * n_rows
if isinstance(values, list):
values = flatten_list(values)
return randomize(values)[:n_rows]
def get_tags(lt, tags=set()):
inferred_tags = ww_type_system.str_to_logical_type(lt).standard_tags
assert isinstance(inferred_tags, set)
return inferred_tags.union(tags) - {"index", "time_index"}
other_kwargs = {}
df = pd.DataFrame()
lt_dict = {}
tags_dict = {}
for name, lt_name, *rest in col_defs:
if lt_name in logical_type_mapping:
values = logical_type_mapping[lt_name]
if lt_name == logical_types.Ordinal.__name__:
lt = logical_types.Ordinal(order=values)
else:
lt = lt_name
values = gen_series(values)
else:
raise Exception(f"Unknown logical type {lt_name}")
lt_dict[name] = lt
if len(rest):
tags = rest[0]
if "index" in tags:
other_kwargs["index"] = name
values = range(n_rows)
if "time_index" in tags:
other_kwargs["time_index"] = name
values = pd.date_range("2000-01-01", periods=n_rows)
tags_dict[name] = get_tags(lt_name, tags)
else:
tags_dict[name] = get_tags(lt_name)
s = pd.Series(values, name=name)
df = pd.concat([df, s], axis=1)
df.ww.init(
name=df_name,
logical_types=lt_dict,
semantic_tags=tags_dict,
**other_kwargs,
)
return df
| 3,103 | 29.431373 | 88 | py |
featuretools | featuretools-main/featuretools/tests/testing_utils/features.py | import re
from featuretools.entityset.relationship import RelationshipPath
def feature_with_name(features, name):
for f in features:
if f.get_name() == name:
return True
return False
def number_of_features_with_name_like(features, pattern):
"""Returns number of features with names that match the provided regex pattern"""
pattern = re.compile(re.escape(pattern))
names = [f.get_name() for f in features]
return len([name for name in names if pattern.search(name)])
def backward_path(es, dataframe_ids):
"""
Create a backward RelationshipPath through the given dataframes. Assumes only
one such path is possible.
"""
def _get_relationship(child, parent):
return next(
r
for r in es.get_forward_relationships(child)
if r._parent_dataframe_name == parent
)
relationships = [
_get_relationship(child, parent)
for parent, child in zip(dataframe_ids[:-1], dataframe_ids[1:])
]
return RelationshipPath([(False, r) for r in relationships])
def forward_path(es, dataframe_ids):
"""
Create a forward RelationshipPath through the given dataframes. Assumes only
one such path is possible.
"""
def _get_relationship(child, parent):
return next(
r
for r in es.get_forward_relationships(child)
if r._parent_dataframe_name == parent
)
relationships = [
_get_relationship(child, parent)
for child, parent in zip(dataframe_ids[:-1], dataframe_ids[1:])
]
return RelationshipPath([(True, r) for r in relationships])
def check_rename(feat, new_name, new_names):
copy_feat = feat.rename(new_name)
assert feat.unique_name() != copy_feat.unique_name()
assert feat.get_name() != copy_feat.get_name()
assert (
feat.base_features[0].generate_name()
== copy_feat.base_features[0].generate_name()
)
assert feat.dataframe_name == copy_feat.dataframe_name
assert feat.get_feature_names() != copy_feat.get_feature_names()
check_names(copy_feat, new_name, new_names)
def check_names(feat, new_name, new_names):
assert feat.get_name() == new_name
assert feat.get_feature_names() == new_names
| 2,278 | 28.217949 | 85 | py |
featuretools | featuretools-main/featuretools/tests/testing_utils/cluster.py | from psutil import virtual_memory
def mock_cluster(
n_workers=1,
threads_per_worker=1,
diagnostics_port=8787,
memory_limit=None,
**dask_kwarg,
):
return (n_workers, threads_per_worker, diagnostics_port, memory_limit)
class MockClient:
def __init__(self, cluster):
self.cluster = cluster
def scheduler_info(self):
return {"workers": {"worker 1": {"memory_limit": virtual_memory().total}}}
def get_mock_client_cluster():
return MockClient, mock_cluster
| 510 | 20.291667 | 82 | py |
featuretools | featuretools-main/featuretools/tests/testing_utils/mock_ds.py | from datetime import datetime
import numpy as np
import pandas as pd
from woodwork.logical_types import (
URL,
Boolean,
Categorical,
CountryCode,
Datetime,
Double,
EmailAddress,
Filepath,
Integer,
IPAddress,
LatLong,
NaturalLanguage,
Ordinal,
PersonFullName,
PhoneNumber,
PostalCode,
SubRegionCode,
)
from featuretools.entityset import EntitySet
def make_ecommerce_entityset(with_integer_time_index=False):
"""Makes a entityset with the following shape:
R Régions
/ \\ .
S C Stores, Customers
| .
S P Sessions, Products
\\ / .
L Log
"""
dataframes = make_ecommerce_dataframes(
with_integer_time_index=with_integer_time_index,
)
dataframe_names = dataframes.keys()
es_id = "ecommerce"
if with_integer_time_index:
es_id += "_int_time_index"
logical_types = make_logical_types(with_integer_time_index=with_integer_time_index)
semantic_tags = make_semantic_tags()
time_indexes = make_time_indexes(with_integer_time_index=with_integer_time_index)
es = EntitySet(id=es_id)
for df_name in dataframe_names:
time_index = time_indexes.get(df_name, None)
ti_name = None
secondary = None
if time_index is not None:
ti_name = time_index["name"]
secondary = time_index["secondary"]
df = dataframes[df_name]
es.add_dataframe(
df,
dataframe_name=df_name,
index="id",
logical_types=logical_types[df_name],
semantic_tags=semantic_tags[df_name],
time_index=ti_name,
secondary_time_index=secondary,
)
es.normalize_dataframe(
"customers",
"cohorts",
"cohort",
additional_columns=["cohort_name"],
make_time_index=True,
new_dataframe_time_index="cohort_end",
)
es.add_relationships(
[
("régions", "id", "customers", "région_id"),
("régions", "id", "stores", "région_id"),
("customers", "id", "sessions", "customer_id"),
("sessions", "id", "log", "session_id"),
("products", "id", "log", "product_id"),
],
)
return es
def make_ecommerce_dataframes(with_integer_time_index=False):
region_df = pd.DataFrame(
{"id": ["United States", "Mexico"], "language": ["en", "sp"]},
)
store_df = pd.DataFrame(
{
"id": range(6),
"région_id": ["United States"] * 3 + ["Mexico"] * 2 + [np.nan],
"num_square_feet": list(range(30000, 60000, 6000)) + [np.nan],
},
)
product_df = pd.DataFrame(
{
"id": [
"Haribo sugar-free gummy bears",
"car",
"toothpaste",
"brown bag",
"coke zero",
"taco clock",
],
"department": [
"food",
"electronics",
"health",
"food",
"food",
"electronics",
],
"rating": [3.5, 4.0, 4.5, 1.5, 5.0, 5.0],
"url": [
"google.com",
"https://www.featuretools.com/",
"amazon.com",
"www.featuretools.com",
"bit.ly",
"featuretools.com/demos/",
],
},
)
customer_times = {
"signup_date": [
datetime(2011, 4, 8),
datetime(2011, 4, 9),
datetime(2011, 4, 6),
],
# some point after signup date
"upgrade_date": [
datetime(2011, 4, 10),
datetime(2011, 4, 11),
datetime(2011, 4, 7),
],
"cancel_date": [
datetime(2011, 6, 8),
datetime(2011, 10, 9),
datetime(2012, 1, 6),
],
"birthday": [datetime(1993, 3, 8), datetime(1926, 8, 2), datetime(1993, 4, 20)],
}
if with_integer_time_index:
customer_times["signup_date"] = [6, 7, 4]
customer_times["upgrade_date"] = [18, 26, 5]
customer_times["cancel_date"] = [27, 28, 29]
customer_times["birthday"] = [2, 1, 3]
customer_df = pd.DataFrame(
{
"id": pd.Categorical([0, 1, 2]),
"age": [33, 25, 56],
"région_id": ["United States"] * 3,
"cohort": [0, 1, 0],
"cohort_name": ["Early Adopters", "Late Adopters", "Early Adopters"],
"loves_ice_cream": [True, False, True],
"favorite_quote": [
"The proletariat have nothing to lose but their chains",
"Capitalism deprives us all of self-determination",
"All members of the working classes must seize the "
"means of production.",
],
"signup_date": customer_times["signup_date"],
# some point after signup date
"upgrade_date": customer_times["upgrade_date"],
"cancel_date": customer_times["cancel_date"],
"cancel_reason": ["reason_1", "reason_2", "reason_1"],
"engagement_level": [1, 3, 2],
"full_name": ["Mr. John Doe", "Doe, Mrs. Jane", "James Brown"],
"email": ["john.smith@example.com", np.nan, "team@featuretools.com"],
"phone_number": ["555-555-5555", "555-555-5555", "1-(555)-555-5555"],
"birthday": customer_times["birthday"],
},
)
ips = [
"192.168.0.1",
"2001:4860:4860::8888",
"0.0.0.0",
"192.168.1.1:2869",
np.nan,
np.nan,
]
filepaths = [
"/home/user/docs/Letter.txt",
"./inthisdir",
"C:\\user\\docs\\Letter.txt",
"~/.rcinfo",
"../../greatgrandparent",
"data.json",
]
session_df = pd.DataFrame(
{
"id": [0, 1, 2, 3, 4, 5],
"customer_id": pd.Categorical([0, 0, 0, 1, 1, 2]),
"device_type": [0, 1, 1, 0, 0, 1],
"device_name": ["PC", "Mobile", "Mobile", "PC", "PC", "Mobile"],
"ip": ips,
"filepath": filepaths,
},
)
times = list(
[datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)]
+ [datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)]
+ [datetime(2011, 4, 9, 10, 40, 0)]
+ [datetime(2011, 4, 10, 10, 40, i) for i in range(2)]
+ [datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)]
+ [datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)],
)
if with_integer_time_index:
times = list(range(8, 18)) + list(range(19, 26))
values = list(
[i * 5 for i in range(5)]
+ [i * 1 for i in range(4)]
+ [0]
+ [i * 5 for i in range(2)]
+ [i * 7 for i in range(3)]
+ [np.nan] * 2,
)
values_2 = list(
[i * 2 for i in range(5)]
+ [i * 1 for i in range(4)]
+ [0]
+ [i * 2 for i in range(2)]
+ [i * 3 for i in range(3)]
+ [np.nan] * 2,
)
values_many_nans = list(
[np.nan] * 5
+ [i * 1 for i in range(4)]
+ [0]
+ [np.nan] * 2
+ [i * 3 for i in range(3)]
+ [np.nan] * 2,
)
latlong = list([(values[i], values_2[i]) for i, _ in enumerate(values)])
latlong2 = list([(values_2[i], -values[i]) for i, _ in enumerate(values)])
zipcodes = list(
["02116"] * 5
+ ["02116-3899"] * 4
+ ["0"]
+ ["1234567890"] * 2
+ ["12345-6789"] * 2
+ [np.nan] * 3,
)
countrycodes = list(["US"] * 5 + ["AL"] * 4 + [np.nan] * 5 + ["ALB"] * 2 + ["USA"])
subregioncodes = list(
["US-AZ"] * 5 + ["US-MT"] * 4 + [np.nan] * 3 + ["UG-219"] * 2 + ["ZM-06"] * 3,
)
log_df = pd.DataFrame(
{
"id": range(17),
"session_id": [0] * 5 + [1] * 4 + [2] * 1 + [3] * 2 + [4] * 3 + [5] * 2,
"product_id": ["coke zero"] * 3
+ ["car"] * 2
+ ["toothpaste"] * 3
+ ["brown bag"] * 2
+ ["Haribo sugar-free gummy bears"]
+ ["coke zero"] * 4
+ ["taco clock"] * 2,
"datetime": times,
"value": values,
"value_2": values_2,
"latlong": latlong,
"latlong2": latlong2,
"zipcode": zipcodes,
"countrycode": countrycodes,
"subregioncode": subregioncodes,
"value_many_nans": values_many_nans,
"priority_level": [0] * 2 + [1] * 5 + [0] * 6 + [2] * 2 + [1] * 2,
"purchased": [True] * 11 + [False] * 4 + [True, False],
"url": ["https://www.featuretools.com/"] * 2
+ ["amazon.com"] * 2
+ [
"www.featuretools.com",
"bit.ly",
"featuretools.com/demos/",
"www.google.co.in/" "http://lplay.google.co.in",
" ",
"invalid_url",
"an",
"microsoft.com/search/",
]
+ [np.nan] * 5,
"email_address": ["john.smith@example.com", np.nan, "team@featuretools.com"]
* 5
+ [" prefix@space.com", "suffix@space.com "],
"comments": [coke_zero_review()]
+ ["I loved it"] * 2
+ car_reviews()
+ toothpaste_reviews()
+ brown_bag_reviews()
+ [gummy_review()]
+ ["I loved it"] * 4
+ taco_clock_reviews(),
},
)
return {
"régions": region_df,
"stores": store_df,
"products": product_df,
"customers": customer_df,
"sessions": session_df,
"log": log_df,
}
def make_semantic_tags():
store_semantic_tags = {"région_id": "foreign_key"}
customer_semantic_tags = {"région_id": "foreign_key", "birthday": "date_of_birth"}
session_semantic_tags = {"customer_id": "foreign_key"}
log_semantic_tags = {"session_id": "foreign_key"}
return {
"customers": customer_semantic_tags,
"sessions": session_semantic_tags,
"log": log_semantic_tags,
"products": {},
"stores": store_semantic_tags,
"régions": {},
}
def make_logical_types(with_integer_time_index=False):
region_logical_types = {"id": Categorical, "language": Categorical}
store_logical_types = {
"id": Integer,
"région_id": Categorical,
"num_square_feet": Double,
}
product_logical_types = {
"id": Categorical,
"rating": Double,
"department": Categorical,
"url": URL,
}
customer_logical_types = {
"id": Integer,
"age": Integer,
"région_id": Categorical,
"loves_ice_cream": Boolean,
"favorite_quote": NaturalLanguage,
"signup_date": Datetime(datetime_format="%Y-%m-%d"),
"upgrade_date": Datetime(datetime_format="%Y-%m-%d"),
"cancel_date": Datetime(datetime_format="%Y-%m-%d"),
"cancel_reason": Categorical,
"engagement_level": Ordinal(order=[1, 2, 3]),
"full_name": PersonFullName,
"email": EmailAddress,
"phone_number": PhoneNumber,
"birthday": Datetime(datetime_format="%Y-%m-%d"),
"cohort_name": Categorical,
"cohort": Integer,
}
session_logical_types = {
"id": Integer,
"customer_id": Integer,
"device_type": Categorical,
"device_name": Categorical,
"ip": IPAddress,
"filepath": Filepath,
}
log_logical_types = {
"id": Integer,
"session_id": Integer,
"product_id": Categorical,
"datetime": Datetime(datetime_format="%Y-%m-%d"),
"value": Double,
"value_2": Double,
"latlong": LatLong,
"latlong2": LatLong,
"zipcode": PostalCode,
"countrycode": CountryCode,
"subregioncode": SubRegionCode,
"value_many_nans": Double,
"priority_level": Ordinal(order=[0, 1, 2]),
"purchased": Boolean,
"url": URL,
"email_address": EmailAddress,
"comments": NaturalLanguage,
}
if with_integer_time_index:
log_logical_types["datetime"] = Integer
customer_logical_types["signup_date"] = Integer
customer_logical_types["upgrade_date"] = Integer
customer_logical_types["cancel_date"] = Integer
customer_logical_types["birthday"] = Integer
return {
"customers": customer_logical_types,
"sessions": session_logical_types,
"log": log_logical_types,
"products": product_logical_types,
"stores": store_logical_types,
"régions": region_logical_types,
}
def make_time_indexes(with_integer_time_index=False):
return {
"customers": {
"name": "signup_date",
"secondary": {"cancel_date": ["cancel_reason"]},
},
"log": {"name": "datetime", "secondary": None},
}
def coke_zero_review():
return """
When it comes to Coca-Cola products, people tend to be die-hard fans. Many of us know someone who can't go a day without a Diet Coke (or two or three). And while Diet Coke has been a leading sugar-free soft drink since it was first released in 1982, it came to light that young adult males shied away from this beverage — identifying diet cola as a woman's drink. The company's answer to that predicament came in 2005 - in the form of a shiny black can - with the release of Coca-Cola Zero.
While Diet Coke was created with its own flavor profile and not as a sugar-free version of the original, Coca-Cola Zero aims to taste just like the "real Coke flavor." Despite their polar opposite advertising campaigns, the contents and nutritional information of the two sugar-free colas is nearly identical. With that information in hand we at HuffPost Taste needed to know: Which of these two artificially-sweetened Coca-Cola beverages actually tastes better? And can you even tell the difference between them?
Before we get to the results of our taste test, here are the facts:
Diet Coke
Motto: Always Great Tast
Nutritional Information: Many say that a can of Diet Coke actually contains somewhere between 1-4 calories, but if a serving size contains fewer than 5 calories a company is not obligated to note it in its nutritional information. Diet Coke's nutritional information reads 0 Calories, 0g Fat, 40mg Sodium, 0g Total Carbs, 0g Protein.
Ingredients: Carbonated water, caramel color, aspartame, phosphoric acid, potassium benzonate, natural flavors, citric acid, caffeine.
Artificial sweetener: Aspartame
Coca-Cola Zero
Motto: Real Coca-Cola Taste AND Zero Calories
Nutritional Information: While the label clearly advertises this beverage as a zero calorie cola, we are not entirely certain that its minimal calorie content is simply not required to be noted in the nutritional information. Coca-Cola Zero's nutritional information reads 0 Calories, 0g Fat, 40mg Sodium, 0g Total Carbs, 0g Protein.
Artificial sweetener: Aspartame and acesulfame potassium
Ingredients: Carbonated water, caramel color, phosphoric acid, aspartame, potassium benzonate, natural flavors, potassium citrate, acesulfame potassium, caffeine.
The Verdict:
Twenty-four editors blind-tasted the two cokes, side by side, and...
54 percent of our tasters were able to distinguish Diet Coke from Coca-Cola Zero
50 percent of our tasters preferred Diet Coke to Coca-Cola Zero, and vice versa
Here’s what our tasters thought of the two sugar-free soft drinks:
Diet Coke: "Tastes fake right away." "Much fresher brighter, crisper." "Has the wonderful flavors of Diet Coke’s artificial sweeteners."
Coca-Cola Zero: "Has more of a sharply sweet aftertaste I associate with diet sodas." "Tastes more like regular coke, less like fake sweetener." "Has an odd taste." "Tastes more like regular." "Very sweet."
Overall comments: "That was a lot more difficult than I though it would be." "Both equally palatable." A few people said Diet Coke tasted much better ... unbeknownst to them, they were actually referring to Coca-Cola Zero.
IN SUMMARY: It is a real toss up. There is not one artificially-sweetened Coca-Cola beverage that outshines the other. So how do people choose between one or the other? It is either a matter of personal taste, or maybe the marketing campaigns will influence their choice.
"""
def gummy_review():
return """
The place: BMO Harris Bradley Center
The event: Bucks VS Spurs
The snack: Satan's Diarrhea Hate Bears made by Haribo
I recently took my 4 year old son to his first NBA game. He was very excited to go to the game, and I was excited because we had fantastic seats. Row C center court to be exact. I've never sat that close before. I've never had to go DOWN stairs to get to my seats. 24 stairs to get to my seats to be exact.
His favorite candy is Skittles. Mine are anything gummy. I snuck in a bag of skittles for my son, and grabbed a handful of gummy bears for myself, to be later known as Satan's Diarrhea Hate Bears, that I received for Christmas in bulk from my parents, and put them in a zip lock bag.
After the excitement of the 1st quarter has ended I take my son out to get him a bottled water and myself a beer. We return to our seats to enjoy our candy and drinks.
..............fast forward until 1 minute before half time...........
I have begun to sweat a sweat that is only meant for a man on mile 19 of a marathon. I have kicked out my legs out so straight that I am violently pushing the gentleman wearing a suit seat in front of me forward. He is not happy, I do not care. My hands are on the side of my seat not unlike that of a gymnast on a pommel horse, lifting me off my chair. My son is oblivious to what is happening next to him, after all, there is a mascot running around somewhere and he is eating candy.
I realize that at some point in the very near to immediate future I am going to have to allow this lava from Satan to forcefully expel itself from my innards. I also realize that I have to walk up 24 stairs just to get to level ground in hopes to make it to the bathroom. I’ll just have to sit here stiff as a board for a few moments waiting for the pain to subside. About 30 seconds later there is a slight calm in the storm of the violent hurricane that is going on in my lower intestine. I muster the courage to gently relax every muscle in my lower half and stand up. My son stands up next to me and we start to ascend up the stairs. I take a very careful and calculated step up the first stair. Then a very loud horn sounds. Halftime. Great. It’s going to be crowded. The horn also seems to have awaken the Satan's Diarrhea Hate Bears that are having a mosh pit in my stomach. It literally felt like an avalanche went down my stomach and I again have to tighten every muscle and stand straight up and focus all my energy on my poor sphincter to tighten up and perform like it has never performed before. Taking another step would be the worst idea possible, the flood gates would open. Don’t worry, Daddy has a plan. I some how mumble the question, “want to play a game?” to my son, he of course says “yes”. My idea is to hop on both feet allllll the way up the stairs, using the center railing to propel me up each stair. My son is always up for a good hopping game, so he complies and joins in on the “fun”. Some old lady 4 steps up thinks its cute that we are doing this, obviously she wasn’t looking at the panic on my face. 3 rows behind her a man about the same age as me, who must have had similar situations, notices the fear/panic/desperation on my face understands the danger that I along with my pants and anyone within a 5 yard radius spray zone are in. He just mouths the words “good luck man” to me and I press on. Half way up and there is no leakage, but my legs are getting tired and my sphincter has never endured this amount of pressure for this long of time. 16 steps/hops later…….4 steps to go…….My son trips and falls on the stairs, I have two options: keep going knowing he will catch up or bend down to pick him up relieving my sphincter of all the pressure and commotion while ruining the day of roughly the 50 people that are now watching a grown man hop up stairs while sweating profusely next to a 4 year old boy.
Luckily he gets right back up and we make it to the top of the stairs. Good, the hard part was over. Or so I thought. I managed to waddle like a penguin, or someone who is about to poop their pants in 2.5 seconds, to the men's room only to find that every stall is being used. EVERY STALL. It's halftime, of course everyone has to poop at that moment. I don't know if I can wait any longer, do I go ahead and fulfil the dream of every high school boy and poop in the urinal? What kind of an example would that set for my son? On the other hand, what kind of an example would it be for his father to fill his pants with a substance that probably will be unrecognizable to man. Suddenly a stall door opens, and I think I manage to actually levitate over to the stall. I my son follows me in, luckily it was the handicap stall so there was room for him to be out of the way. I get my pants off and start to sit. I know what taking a giant poo feels like. I also know what vomiting feels like. I can now successfully say that I know what it is like to vomit out my butt. I wasn't pooping, those Satan's Diarrhea Hate Bears did something to my insides that made my sphincter vomit our the madness.
I am now conscious of my surroundings. Other than the war that the bottom half of my body is currently having with this porcelain chair, it is quiet as a pin drop in the bathroom. The other men in there can sense that something isn't right, no one has heard anyone ever poop vomit before.
I can sense that the worst part is over. But its not stopping, nor can I physically stop it at this point, I am leaking..it's horrible. I call out "does anyone have a diaper?" hoping that some gentleman was changing a baby. Nothing. No one said a word. I know people are in there, I can see the toes of shoes pointed in my direction under the stall.. "DOES ANYONE HAVE A DIAPER!?!" I am screaming, my son is now crying, he thinks he is witnessing the death of his father. I can't even assure him that I will make it.
Not a word was said, but a diaper was thrown over the stall. I catch it, line my underwear with it, put my pants back on, and walk out of that bathroom like a champ. We go straight to our seats, grab out coats and go home. As we are walking out, the gentleman that wished me good luck earlier simply put his fist out, and I happily bumped it.
My son asks me, "Daddy, why are we leaving early?"
"Well son, I need to change my diaper"
"""
def taco_clock_reviews():
return [
"""
This timer does what it is supposed to do. Setup is elementary. Replacing the old one (after 12 years) was relatively easy. It has performed flawlessly since. I'm delighted I could find an esoteric product like this at Amazon. Their service, and the customer reviews, are just excellent.
""",
"""
Funny, cute clock. A little spendy for how light the clock is, but its hard to find a taco clock.
""",
]
def brown_bag_reviews():
return [
"""
These bags looked exactly like I'd hoped, however, the handles broke off of almost every single bag as soon as items were placed in them! I used these as gift bags for out-of-town guests at my wedding, so imagine my embarassment as the handles broke off as I was handing them out. I would not recommend purchaing these bags unless you plan to fill them with nothing but paper! Anything heavier will cause the handles to snap right off.
""",
"""
I purchased these in August 2014 from Big Blue Supplies. I have no problem with the seller, these arrived new condition, fine shape.
I do have a slight problem with the bags. In case someone might want to know, the handles on these bags are set inside against the top. Then a piece of Kraft type packing tape is placed over the handles to hold them in place. On some of the bags, the tape is already starting to peel off. I would be really hesitant about using these bags unless I reinforced the current tape with a different adhesive.
I will keep the bags, and make a tape of a holiday or decorative theme and place over in order to make certain the handles stay in place.
Also in case anybody is wondering, the label on the plastic packaging bag states these are from ORIENTAL TRADING COMPANY. On the bottom of each bag it is stamped MADE IN CHINA. Again, I will be placing a sticker over that.
Even the dollar store bags I normally purchase do not have that stamped on the bottom in such prominent lettering. I purchased these because they were plain and I wanted to decorate them.
I do not think I would purchase again for all the reasons stated above.
Another thing for those still wanting to purchase, the ones I received were: 12 3/4 inches high not including handle, 10 1/4 inches wide and a 5 1/4 inch depth.
""",
]
def car_reviews():
return [
"""
The full-size pickup truck and the V-8 engine were supposed to be inseparable, like the internet and cat videos. You can’t have one without the other—or so we thought.
In America’s most popular vehicle, the Ford F-150, two turbocharged six-cylinder engines marketed under the EcoBoost name have dethroned the naturally aspirated V-8. Ford’s new 2.7-liter twin-turbo V-6 is the popular choice, while the 3.5-liter twin-turbo V-6 is the top performer. The larger six allows for greater hauling capacity, accelerates the truck more quickly, and swills less gas in EPA testing than the V-8 alternative. It’s enough to make even old-school truck buyers acknowledge that there actually is a replacement for displacement.
And yet a V-8 in a big pickup truck still feels so natural, so right. In the F-150, the Coyote 5.0-liter V-8 is tuned for torque more so than power, yet it still revs with an enthusiastic giddy-up that reminds us that this engine’s other job is powering the Mustang. The response follows the throttle pedal faithfully while the six-speed automatic clicks through gears smoothly and easily. Together they pull this 5220-pound F-150 to 60 mph in 6.3 seconds, which is 0.4 second quicker than the 5.3-liter Chevrolet Silverado with the six-speed automatic and 0.9 second quicker than the 5.3 Silverado with the new eight-speed auto. The 3.5-liter EcoBoost, though, can do the deed another half-second quicker, but its synthetic soundtrack doesn’t have the rich, multilayered tone of the V-8.
It wasn’t until we saddled our test truck with a 6400-pound trailer (well under its 9000-pound rating) that we fully understood the case for upgrading to the 3.5-liter EcoBoost. The twin-turbo engine offers an extra 2500 pounds of towing capability and handles lighter tasks with considerably less strain. The 5.0-liter truck needs more revs and a wider throttle opening to accelerate its load, so we were often coaxed into pressing the throttle to the floor for even modest acceleration. The torquier EcoBoost engine offers a heartier response at part throttle.
In real-world, non-towing situations, the twin-turbo 3.5-liter doesn’t deliver on its promise of increased fuel economy, with both the 5.0-liter V-8 and that V-6 returning 16 mpg in our hands. But given the 3.5-liter’s virtues, we can forgive it that trespass.
Trucks Are the New Luxury
Pickups once were working-class transportation. Today, they’re proxy luxury vehicles—or at least that’s how they’re priced. If you think our test truck’s $57,240 window sticker is steep, consider that our model, the Lariat, is merely a mid-spec trim. There are three additional grades—King Ranch, Platinum, and Limited—positioned and priced above it, plus the 3.5-liter EcoBoost that costs an extra $400 as well as a plethora of options to inflate the price past 60 grand. Squint and you can almost see the six-figure trucks of the future on the horizon.
For the most part, though, the equipment in this particular Lariat lives up to the price tag. The driver and passenger seats are heated and cooled, with 10-way power adjustability and supple leather. The technology includes blind-spot monitoring, navigation, and a 110-volt AC outlet. Nods to utility include spotlights built into the side mirrors and Ford’s Pro Trailer Backup Assist, which makes reversing with a trailer as easy as turning a tiny knob on the dashboard.
Middle-Child Syndrome
In the F-150, Ford has a trifecta of engines (the fourth, a naturally aspirated 3.5-liter V-6, is best left to the fleet operators). The 2.7-liter twin-turbo V-6 delivers remarkable performance at an affordable price. The 3.5-liter twin-turbo V-6 is the workhorse, with power, torque, and hauling capability to spare. Compared with those two logical options, the middle-child 5.0-liter V-8 is the right-brain choice. Its strongest selling points may be its silky power delivery and the familiar V-8 rumble. That’s a flimsy argument when it comes to rationalizing a $50,000-plus purchase, though, so perhaps it’s no surprise that today’s boosted six-cylinders are now the engines of choice in the F-150.
""",
"""
THE GOOD
The Tesla Model S 90D's electric drivetrain is substantially more efficient than any internal combustion engine, and gives the car smooth and quick acceleration. All-wheel drive comes courtesy of a smart dual motor system. The new Autopilot feature eases the stress of stop-and-go traffic and long road trips.
THE BAD
Even at Tesla's Supercharger stations, recharging the battery takes significantly longer than refilling an internal combustion engine car's gas tank, limiting where you can drive. Tesla hasn't improved its infotainment system much from the Model S' launch.
THE BOTTOM LINE
Among the different flavors of Tesla Model S, the 90D is the one to get, exhibiting the best range and all-wheel drive, while offering an uncomplicated, next-generation driving experience that shows very well against equally priced competitors.
REVIEW SPECIFICATIONS PHOTOS
Roadshow Automobiles Tesla 2016 Tesla Model S
Having tested driver assistance systems in many cars, and even ridden in fully self-driving cars, I should have been ready for Tesla's new Autopilot feature. But engaging it while cruising the freeway in the Model S 90D, I kept my foot hovering over the brake.
My trepidation didn't come so much from the adaptive cruise control, which kept the Model S following traffic ahead at a set distance, but from the self-steering, this part of Autopilot managing to keep the Model S well-centered in its lane with no help from me. Over many miles, I built up more trust in the system, letting the car do the steering in situations from bumper-to-bumper traffic and a winding road through the hills.
2016 Tesla Model S 90DEnlarge Image
Although the middle of the Model S range, the 90D offers the best range and a wealth of useful tech, such as Autopilot self-driving.
Wayne Cunningham/Roadshow
Tesla added Autopilot to its Model S line as an option last year, along with all-wheel-drive. More recently, the high-tech automaker improved its batteries, upgrading its cars from their former 65 and 85 kilowatt-hour capacity to 70 and 90 kilowatt-hour. The example I drove, the 90D, represents all these advances.
More importantly, the 90D is the current range-leader among the Model S line, boasting 288 miles on a full battery charge.
The Model S' improvements fall outside of typical automotive industry product cycles, fulfilling Tesla's promise of acting more like a technology company, constantly building and deploying new features. Tesla accomplishes that goal partially through over-the-air software updates, improving existing cars, but the 90D presents significant hardware updates over the original Model S launched four years ago.
Sit and go
Of course, this Model S exhibited the ease of use of the original. Walking up to the car with the key fob in my pocket, it automatically unlocked. When I got in the car, it powered up without me having to push a start button, so I only needed to put it in drive to get on the road.
Likewise, the design hasn't changed, its sleek, hatchback four-door body offering excellent cargo room, both front and back, and seating space. The cabin feels less cramped than most cars due to the lack of a transmission tunnel and a dashboard bare of buttons or dials.
2016 Tesla Model S 90DEnlarge Image
The flat floor in the Model S' cabin makes for enhanced passenger room.
Wayne Cunningham/Roadshow
The big, 17-inch touchscreen in the center of the dashboard shows navigation, stereo, phone, energy consumption and car settings. I easily went from full-screen to a split-screen view, the windows showing each appearing instantly. A built-in 4G/LTE data connection powers Google maps and Internet-based audio. The LCD instrument panel in front of me showed my speed, energy usage, remaining range, and intelligently swapped audio information for turn-by-turn directions when started navigation.
The instrument panel actually made the experience of driving under Autopilot more comfortable, reassuring me with graphics that showed when the Model S' sensors were detecting the lane lines and the traffic around me. Impressively, the sensors could differentiate, as shown on the screen's graphics, a passenger car from a big truck.
At speed on the freeway, Autopilot smoothly maintained the car's position in its lane, and when I took my hands off the wheel for too long, it flashed a warning on the instrument panel. In stop-and-go traffic approaching a toll booth, the car did an even better job of self-driving, recognizing traffic around it and maintaining appropriate distances.
Handling surprise
Taking over the driving myself, the ride quality proved as comfortable as any sport-luxury car, as this Model S had its optional air suspension. The electric power steering is well-tuned, turning the wheels with a quiet, natural feel and good heft.
Audi S7 vs Tesla Model S
Shootout: Audi S7 vs. Tesla Model S
Wayne Cunningham/Roadshow
The biggest surprise came when I spent the day doing laps at the Thunderhill Raceway, negotiating a series of tight, technical turns in competition with an Audi S7. I expected the Model S to get out-of-shape in the turns, but instead it proved steady and solid. The Model S' 4,647-pound curb weight made it less than ideal for a track test, but much of that weight is in the battery pack, mounted low in the chassis. That low center of gravity helped limit body roll, ensuring good grip from all four tires. In the turns, the Model S felt nicely balanced, although not entirely nimble.
Helping its grip was its native all-wheel drive, gained from having motors driving each set of wheels. The combined output of the motors comes to 417 horsepower and 485 pound-feet of torque, those numbers expressed in 0-to-60 mph times of well under 5 seconds. That thrust made for fast runs down the race track's straightaways, or simply giving me the ability to take advantage of gaps in traffic on public roads.
288 miles is more than enough for most people's daily driving needs, and if you plug in every night, you will wake up to a fully charged car every morning. The Model S makes for a far different experience than driving an internal combustion car, where you need to go to a gas station to refuel. However, longer trips in the Model S require some planning, such as scheduling stops at Tesla's free Supercharger stations.
Charging times are much lengthier than refilling a tank with gasoline. From a Level 2, 240-volt station, you get 29 miles added every hour. Tesla's Supercharger, a Level 3 charger, takes 75 minutes to fully recharge the Model S 90D's battery.
2016 Tesla Model S 90DEnlarge Image
Despite its high initial price, the Model S 90D costs less to run on a daily basis than a combustion engine car.
Wayne Cunningham/Roadshow
Low maintenance
The 2016 Tesla Model S 90D adds features to keep it competitive against the internal combustion cars in its sport luxury set. More importantly, it remains very easy to live with. In fact, the electric drivetrain should mean greatly decreased maintenance, as there are fewer moving parts. The EPA estimates that annual electricity costs for the Model S 90D should run $650, much less than buying gasoline for an equivalent internal combustion car.
Lengthy charging times mean longer trips are either out of the question or require more planning than with an internal combustion car. And while the infotainment system responds quickly to touch inputs and offers useful screens, it hasn't changed much in four years. Most notably, Tesla hasn't added any music apps beyond the ones it launched with. Along with new, useful apps, it would be nice to have some themes or other aesthetic changes to the infotainment interface.
The Model S 90D's base price of $88,000 puts it out of reach of the average buyer, and the model I drove was optioned up to around $95,000. Against its Audi, BMW and Mercedes-Benz competition, however, it makes a compelling argument, especially for its uncomplicated nature.
""",
]
def toothpaste_reviews():
return [
"""
Toothpaste can do more harm than good
The next time a patient innocently asks me, “What’s the best toothpaste to use?” I’m going to unleash a whole Chunky Soup can of “You Want The Truth? You CAN’T HANDLE THE TRUTH!!!” Gosh, that’s such an overused movie quote. Sorry about that, but still.
If you’re a dental professional, isn’t this the most annoying question you get, day after day? Do you even care which toothpaste your patients use?
No. You don’t. Asking a dentist what toothpaste to use is like asking your physician which bar of soap or body scrub you should use to clean your skin. Your dentist and dental hygienist have never seen a tube of toothpaste that singlehandedly improves the health of all patients in their practice, and the reason is simple:
Toothpaste is a cosmetic.
We brush our teeth so that out mouths no longer taste like… mouth. Mouth tastes gross, right? It tastes like putrefied skin. It tastes like tongue cheese. It tastes like Cream of Barf.
On the other hand, toothpaste has been exquisitely designed to bring you a brisk rush of York Peppermint Patty, or Triple Cinnamon Heaven, or whatever flavor that drives those tubes off of the shelves in the confusing dental aisle of your local supermarket or drugstore.
Toothpaste definitely tastes better than Cream of Barf. And that’s why you use it. Not because it’s good for you. You use toothpaste because it tastes good, and because it makes you accept your mouth as part of your face again.
From a marketing perspective, all of the other things that are in your toothpaste are in there to give it additional perceived value. So let’s deconstruct these ingredients, shall we?
1. Fluoride.
This was probably the first additive to toothpaste that brought it under the jurisdiction of the Food & Drug Administration and made toothpaste part drug, part cosmetic. Over time, a fluoride toothpaste can improve the strength of teeth, but the fluoride itself does nothing to make teeth cleaner. Some people are scared of fluoride so they don’t use it. Their choice. Professionally speaking, I know that the benefits of a fluoride additive far outweigh the risks.
2. Foam.
Sodium Lauryl Sulfate is soap. Soap has a creamy, thick texture that American tongues especially like and equate to the feeling of cleanliness. There’s not enough surfactant, though, in toothpaste foam to break up the goo that grows on your teeth. If these bubbles scrubbed, you’d better believe that they would also scrub your delicate gum tissues into a bloody pulp.
3. Abrasive particles.
Most toothpastes use hydrated silica as the grit that polishes teeth. You’re probably most familiar with it as the clear beady stuff in the “Do Not Eat” packets. Depending on the size and shape of the particles, silica is the whitening ingredient in most whitening toothpastes. But whitening toothpaste cannot get your teeth any whiter than a professional dental cleaning, because it only cleans the surface. Two weeks to a whiter smile? How about 30 minutes with your hygienist? It’s much more efficient and less harsh.
4. Desensitizers.
Teeth that are sensitive to hot, cold, sweets, or a combination can benefit from the addition of potassium nitrate or stannous fluoride to a toothpaste. This is more of a palliative treatment, when the pain is the problem. Good old Time will usually make teeth feel better, too, unless the pain is coming from a cavity. Yeah, I’m talking to you, the person who is trying to heal the hole in their tooth with Sensodyne.
5. Tartar control.
It burns! It burns! If your toothpaste has a particular biting flavor, it might contain tetrasodium pyrophosphate, an ingredient that is supposed to keep calcium phosphate salts (tartar, or calculus) from fossilizing on the back of your lower front teeth. A little tartar on your teeth doesn’t harm you unless it gets really thick and you can no longer keep it clean. One problem with tartar control toothpastes is that in order for the active ingredient to work, it has to be dissolved in a stronger detergent than usual, which can affect people that are sensitive to a high pH.
6. Triclosan.
This antimicrobial is supposed to reduce infections between the gum and tooth. However, if you just keep the germs off of your teeth in the first place it’s pretty much a waste of an extra ingredient. Its safety has been questioned but, like fluoride, the bulk of the scientific research easily demonstrates that the addition of triclosan in toothpaste does much more good than harm.
Why toothpaste can be bad for you.
Let’s just say it’s not the toothpaste’s fault. It’s yours. The toothpaste is just the co-dependent enabler. You’re the one with the problem.
Remember, toothpaste is a cosmetic, first and foremost. It doesn’t clean your teeth by itself. Just in case you think I’m making this up I’ve included clinical studies in the references at the end of this article that show how ineffective toothpaste really is.
peasized
• You’re using too much.
Don’t be so suggestible! Toothpaste ads show you how to use up the tube more quickly. Just use 1/3 as much, the size of a pea. It will still taste good, I promise! And too much foam can make you lose track of where your teeth actually are located.
• You’re not taking enough time.
At least two minutes. Any less and you’re missing spots. Just ’cause it tastes better doesn’t mean you did a good job.
• You’re not paying attention.
I’ve seen people brush the same four spots for two minutes and miss the other 60% of their mouth.brushguide The toothbrush needs to touch every crevice of every tooth, not just where it lands when you go into autopilot and start thinking about what you’re going to wear that day. It’s the toothbrush friction that cleans your teeth, not the cleaning product. Plaque is a growth, like the pink or grey mildew that grows around the edges of your shower. You’ve gotta rub it off to get it off. No tooth cleaning liquid, paste, creme, gel, or powder is going to make as much of a difference as your attention to detail will.
The solution.
Use what you like. It’s that simple. If it tastes good and feels clean to you, you’ll use it more often, brush longer, feel better, be healthier.
You can use baking soda, or coconut oil, or your favorite toothpaste, or even just plain water. The key is to have a good technique and to brush often. A music video makes this demonstration a little more fun than your usual lecture at the dental office, although, in my opinion you really still need to feel what it is like to MASH THE BRISTLES OF A SOFT TOOTHBRUSH INTO YOUR GUMS:
A little more serious video from my pal Dr. Mark Burhenne where he demonstrates how to be careful with your toothbrush bristles:
Final word.
♬ It’s all about that Bass, ’bout that Bass, no bubbles. ♬ Heh, dentistry in-joke there.
Seriously, though, the bottom line is that your paste will mask brushing technique issues, so don’t put so much faith in the power of toothpaste.
Also you may have heard that some toothpastes contain decorative plastic that can get swallowed. Yeah, that was a DentalBuzz report I wrote that went viral earlier this year. And while I can’t claim total victory on that front, at least the company in question has promised that the plastic will no longer be added to their toothpaste lines very soon due to the overwhelming amount of letters, emails, and phone calls that they received as a result of people reading that article and making a difference.
But now I’m tired of talking about toothpaste.
Next topic?
I’m bringing pyorrhea back.
""",
"""
I’ve been a user of Colgate Total Whitening Toothpaste for many years because I’ve always tried to maintain a healthy smile (I’m a receptionist so I need a white smile). But because I drink coffee at least twice a day (sometimes more!) and a lot of herbal teas, I’ve found that using just this toothpaste alone doesn’t really get my teeth white...
The best way to get white teeth is to really try some professional products specifically for tooth whitening. I’ve tried a few products, like Crest White Strips and found that the strips are really not as good as the trays. Although the Crest White Strips are easy to use, they really DO NOT cover your teeth perfectly like some other professional dental whitening kits. This Product did cover my teeth well however because of their custom heat trays, and whitening my teeth A LOT. I would say if you really want white teeth, use the Colgate Toothpaste and least 2 times a day, along side a professional Gel product like Shine Whitening.
""",
"""
The first feature is the price, and it is right.
Next, I consider whether it will be neat to use. It is. Sometimes when I buy those new hard plastic containers, they actually get messy. Also I cannot get all the toothpaste out. It is easy to get the paste out of Colgate Total Whitening Paste without spraying it all over the cabinet.
If it does not taste good, I won't use it. Some toothpaste burns my mouth so bad that brushing my teeth is a painful experience. This one doesn't burn. It tastes simply the way toothpaste is supposed to taste.
Whitening is important. This one is supposed ot whiten. After spending money to whiten my teeth, I need a product to help ward off the bad effects of coffee and tea.
Avoiding all kinds of oral pathology is a major consideration. This toothpaste claims that it can help fight cavities, gingivitis, plaque, tartar, and bad breath.
I hope this product stays on the market a long time and does not change.
""",
]
| 47,055 | 61.741333 | 2,445 | py |
featuretools | featuretools-main/featuretools/tests/testing_utils/es_utils.py | import pandas as pd
from featuretools.utils.gen_utils import import_or_none, is_instance
dd = import_or_none("dask.dataframe")
ps = import_or_none("pyspark.pandas")
def to_pandas(df, index=None, sort_index=False, int_index=False):
"""
Testing util to convert dataframes to pandas. If a pandas dataframe is passed in, just returns the dataframe.
Args:
index (str, optional): column name to set as index, defaults to None
sort_index (bool, optional): whether to sort the dataframe on the index after setting it, defaults to False
int_index (bool, optional): Converts computed dask index to Int64Index to avoid errors, defaults to False
Returns:
Pandas DataFrame
"""
if isinstance(df, (pd.DataFrame, pd.Series)):
return df
if is_instance(df, (dd, dd), ("DataFrame", "Series")):
pd_df = df.compute()
if is_instance(df, (ps, ps), ("DataFrame", "Series")):
pd_df = df.to_pandas()
if index:
pd_df = pd_df.set_index(index)
if sort_index:
pd_df = pd_df.sort_index()
if int_index and is_instance(df, dd, "DataFrame"):
pd_df.index = pd.Index(pd_df.index, dtype="Int64")
return pd_df
def get_df_tags(df):
"""Gets a DataFrame's semantic tags without index or time index tags for Woodwork init"""
semantic_tags = {}
for col_name in df.columns:
semantic_tags[col_name] = df.ww.semantic_tags[col_name] - {
"time_index",
"index",
}
return semantic_tags
| 1,532 | 30.285714 | 115 | py |
featuretools | featuretools-main/featuretools/tests/testing_utils/__init__.py | # flake8: noqa
from featuretools.tests.testing_utils.cluster import (
MockClient,
mock_cluster,
get_mock_client_cluster,
)
from featuretools.tests.testing_utils.es_utils import get_df_tags, to_pandas
from featuretools.tests.testing_utils.features import (
feature_with_name,
number_of_features_with_name_like,
backward_path,
forward_path,
check_rename,
check_names,
)
from featuretools.tests.testing_utils.mock_ds import make_ecommerce_entityset
| 482 | 27.411765 | 77 | py |
featuretools | featuretools-main/featuretools/tests/entry_point_tests/test_primitives.py | from featuretools.tests.entry_point_tests.utils import (
_import_featuretools,
_install_featuretools_primitives,
_python,
_uninstall_featuretools_primitives,
)
def test_entry_point():
_install_featuretools_primitives()
featuretools_log = _import_featuretools("debug").stdout.decode()
new_primitive = _python("-c", "from featuretools.primitives import NewPrimitive")
_uninstall_featuretools_primitives()
assert new_primitive.returncode == 0
invalid_primitive = 'Featuretools failed to load "invalid" primitives from "featuretools_primitives.invalid_primitive". '
invalid_primitive += "For a full stack trace, set logging to debug."
assert invalid_primitive in featuretools_log
existing_primitive = 'While loading primitives via "existing" entry point, '
existing_primitive += 'ignored primitive "Sum" from "featuretools_primitives.existing_primitive" because a primitive '
existing_primitive += 'with that name already exists in "featuretools.primitives.standard.aggregation.sum_primitive"'
assert existing_primitive in featuretools_log
| 1,104 | 45.041667 | 125 | py |
featuretools | featuretools-main/featuretools/tests/entry_point_tests/utils.py | import os
import subprocess
import sys
def _get_path_to_add_ons(*args):
pwd = os.path.dirname(__file__)
return os.path.join(pwd, "add-ons", *args)
def _python(*args):
command = [sys.executable, *args]
return subprocess.run(command, stdout=subprocess.PIPE)
def _install_featuretools_plugin():
os.chdir(_get_path_to_add_ons("featuretools_plugin"))
return _python("-m", "pip", "install", "-e", ".")
def _uninstall_featuretools_plugin():
return _python("-m", "pip", "uninstall", "featuretools_plugin", "-y")
def _install_featuretools_primitives():
os.chdir(_get_path_to_add_ons("featuretools_primitives"))
return _python("-m", "pip", "install", "-e", ".")
def _uninstall_featuretools_primitives():
return _python("-m", "pip", "uninstall", "featuretools_primitives", "-y")
def _import_featuretools(level=None):
c = ""
if level:
c += "import os;"
c += 'os.environ["FEATURETOOLS_LOG_LEVEL"] = "%s";' % level
c += "import featuretools;"
return _python("-c", c)
| 1,040 | 23.785714 | 77 | py |
featuretools | featuretools-main/featuretools/tests/entry_point_tests/test_plugin.py | from featuretools.tests.entry_point_tests.utils import (
_import_featuretools,
_install_featuretools_plugin,
_uninstall_featuretools_plugin,
)
def test_plugin_warning():
_install_featuretools_plugin()
warning = _import_featuretools("warning").stdout.decode()
debug = _import_featuretools("debug").stdout.decode()
_uninstall_featuretools_plugin()
message = (
"Featuretools failed to load plugin module from library featuretools_plugin"
)
traceback = "NotImplementedError: plugin not implemented"
assert message in warning
assert traceback not in warning
assert message in debug
assert traceback in debug
| 671 | 28.217391 | 84 | py |
featuretools | featuretools-main/featuretools/tests/entry_point_tests/__init__.py | 0 | 0 | 0 | py | |
featuretools | featuretools-main/featuretools/tests/entry_point_tests/add-ons/__init__.py | 0 | 0 | 0 | py | |
featuretools | featuretools-main/featuretools/tests/entry_point_tests/add-ons/featuretools_primitives/setup.py | from setuptools import find_packages, setup
setup(
name="featuretools_primitives",
packages=find_packages(),
entry_points={
"featuretools_primitives": [
"new = featuretools_primitives.new_primitive",
"invalid = featuretools_primitives.invalid_primitive",
"existing = featuretools_primitives.existing_primitive",
],
},
)
| 389 | 26.857143 | 68 | py |
featuretools | featuretools-main/featuretools/tests/entry_point_tests/add-ons/featuretools_primitives/__init__.py | 0 | 0 | 0 | py | |
featuretools | featuretools-main/featuretools/tests/entry_point_tests/add-ons/featuretools_primitives/featuretools_primitives/invalid_primitive.py | raise NotImplementedError("invalid primitive")
| 47 | 23 | 46 | py |
featuretools | featuretools-main/featuretools/tests/entry_point_tests/add-ons/featuretools_primitives/featuretools_primitives/existing_primitive.py | from featuretools.primitives.base import AggregationPrimitive
class Sum(AggregationPrimitive):
"""A primitive that should currently exist for testing."""
pass
| 170 | 20.375 | 62 | py |
featuretools | featuretools-main/featuretools/tests/entry_point_tests/add-ons/featuretools_primitives/featuretools_primitives/__init__.py | 0 | 0 | 0 | py | |
featuretools | featuretools-main/featuretools/tests/entry_point_tests/add-ons/featuretools_primitives/featuretools_primitives/new_primitive.py | from featuretools.primitives.base import TransformPrimitive
class NewPrimitive(TransformPrimitive):
"""A primitive that should not currently exist for testing."""
pass
| 179 | 21.5 | 66 | py |
featuretools | featuretools-main/featuretools/tests/entry_point_tests/add-ons/featuretools_plugin/setup.py | from setuptools import setup
setup(
name="featuretools_plugin",
packages=["featuretools_plugin"],
entry_points={
"featuretools_plugin": [
"module = featuretools_plugin",
],
},
)
| 223 | 17.666667 | 43 | py |
featuretools | featuretools-main/featuretools/tests/entry_point_tests/add-ons/featuretools_plugin/__init__.py | 0 | 0 | 0 | py | |
featuretools | featuretools-main/featuretools/tests/entry_point_tests/add-ons/featuretools_plugin/featuretools_plugin/__init__.py | raise NotImplementedError("plugin not implemented")
| 52 | 25.5 | 51 | py |
featuretools | featuretools-main/featuretools/tests/entityset_tests/test_timedelta.py | import pandas as pd
import pytest
from dateutil.relativedelta import relativedelta
from featuretools.entityset import Timedelta
from featuretools.feature_base import Feature
from featuretools.primitives import Count
from featuretools.tests.testing_utils import to_pandas
from featuretools.utils.wrangle import _check_timedelta
def test_timedelta_equality():
assert Timedelta(10, "d") == Timedelta(10, "d")
assert Timedelta(10, "d") != 1
def test_singular():
assert Timedelta.make_singular("Month") == "Month"
assert Timedelta.make_singular("Months") == "Month"
def test_delta_with_observations(es):
four_delta = Timedelta(4, "observations")
assert not four_delta.is_absolute()
assert four_delta.get_value("o") == 4
neg_four_delta = -four_delta
assert not neg_four_delta.is_absolute()
assert neg_four_delta.get_value("o") == -4
time = pd.to_datetime("2019-05-01")
error_txt = "Invalid unit"
with pytest.raises(Exception, match=error_txt):
time + four_delta
with pytest.raises(Exception, match=error_txt):
time - four_delta
def test_delta_with_time_unit_matches_pandas(es):
customer_id = 0
sessions_df = to_pandas(es["sessions"])
sessions_df = sessions_df[sessions_df["customer_id"] == customer_id]
log_df = to_pandas(es["log"])
log_df = log_df[log_df["session_id"].isin(sessions_df["id"])]
all_times = log_df["datetime"].sort_values().tolist()
# 4 observation delta
value = 4
unit = "h"
delta = Timedelta(value, unit)
neg_delta = -delta
# first plus 4 obs is fifth
assert all_times[0] + delta == all_times[0] + pd.Timedelta(value, unit)
# using negative
assert all_times[0] - neg_delta == all_times[0] + pd.Timedelta(value, unit)
# fifth minus 4 obs is first
assert all_times[4] - delta == all_times[4] - pd.Timedelta(value, unit)
# using negative
assert all_times[4] + neg_delta == all_times[4] - pd.Timedelta(value, unit)
def test_check_timedelta(es):
time_units = list(Timedelta._readable_units.keys())
expanded_units = list(Timedelta._readable_units.values())
exp_to_standard_unit = {e: t for e, t in zip(expanded_units, time_units)}
singular_units = [u[:-1] for u in expanded_units]
sing_to_standard_unit = {s: t for s, t in zip(singular_units, time_units)}
to_standard_unit = {}
to_standard_unit.update(exp_to_standard_unit)
to_standard_unit.update(sing_to_standard_unit)
full_units = singular_units + expanded_units + time_units + time_units
strings = ["2 {}".format(u) for u in singular_units + expanded_units + time_units]
strings += ["2{}".format(u) for u in time_units]
for i, s in enumerate(strings):
unit = full_units[i]
standard_unit = unit
if unit in to_standard_unit:
standard_unit = to_standard_unit[unit]
td = _check_timedelta(s)
assert td.get_value(standard_unit) == 2
def test_check_pd_timedelta(es):
pdtd = pd.Timedelta(5, "m")
td = _check_timedelta(pdtd)
assert td.get_value("s") == 300
def test_string_timedelta_args():
assert Timedelta("1 second") == Timedelta(1, "second")
assert Timedelta("1 seconds") == Timedelta(1, "second")
assert Timedelta("10 days") == Timedelta(10, "days")
assert Timedelta("100 days") == Timedelta(100, "days")
assert Timedelta("1001 days") == Timedelta(1001, "days")
assert Timedelta("1001 weeks") == Timedelta(1001, "weeks")
def test_feature_takes_timedelta_string(es):
feature = Feature(
Feature(es["log"].ww["id"]),
parent_dataframe_name="customers",
use_previous="1 day",
primitive=Count,
)
assert feature.use_previous == Timedelta(1, "d")
def test_deltas_week(es):
customer_id = 0
sessions_df = to_pandas(es["sessions"])
sessions_df = sessions_df[sessions_df["customer_id"] == customer_id]
log_df = to_pandas(es["log"])
log_df = log_df[log_df["session_id"].isin(sessions_df["id"])]
all_times = log_df["datetime"].sort_values().tolist()
delta_week = Timedelta(1, "w")
delta_days = Timedelta(7, "d")
assert all_times[0] + delta_days == all_times[0] + delta_week
def test_relative_year():
td_time = "1 years"
td = _check_timedelta(td_time)
assert td.get_value("Y") == 1
assert isinstance(td.delta_obj, relativedelta)
time = pd.to_datetime("2020-02-29")
assert time + td == pd.to_datetime("2021-02-28")
def test_serialization():
times = [Timedelta(1, unit="w"), Timedelta(3, unit="d"), Timedelta(5, unit="o")]
dictionaries = [
{"value": 1, "unit": "w"},
{"value": 3, "unit": "d"},
{"value": 5, "unit": "o"},
]
for td, expected in zip(times, dictionaries):
assert expected == td.get_arguments()
for expected, dictionary in zip(times, dictionaries):
assert expected == Timedelta.from_dictionary(dictionary)
# Test multiple temporal parameters separately since it is not deterministic
mult_time = {"years": 4, "months": 3, "days": 2}
mult_td = Timedelta(mult_time)
# Serialize
td_units = mult_td.get_arguments()["unit"]
td_values = mult_td.get_arguments()["value"]
arg_list = list(zip(td_values, td_units))
assert (4, "Y") in arg_list
assert (3, "mo") in arg_list
assert (2, "d") in arg_list
# Deserialize
assert mult_td == Timedelta.from_dictionary(
{"value": [4, 3, 2], "unit": ["Y", "mo", "d"]},
)
def test_relative_month():
td_time = "1 month"
td = _check_timedelta(td_time)
assert td.get_value("mo") == 1
assert isinstance(td.delta_obj, relativedelta)
time = pd.to_datetime("2020-01-31")
assert time + td == pd.to_datetime("2020-02-29")
td_time = "6 months"
td = _check_timedelta(td_time)
assert td.get_value("mo") == 6
assert isinstance(td.delta_obj, relativedelta)
time = pd.to_datetime("2020-01-31")
assert time + td == pd.to_datetime("2020-07-31")
def test_has_multiple_units():
single_unit = pd.DateOffset(months=3)
multiple_units = pd.DateOffset(months=3, years=3, days=5)
single_td = _check_timedelta(single_unit)
multiple_td = _check_timedelta(multiple_units)
assert single_td.has_multiple_units() is False
assert multiple_td.has_multiple_units() is True
def test_pd_dateoffset_to_timedelta():
single_temporal = pd.DateOffset(months=3)
single_td = _check_timedelta(single_temporal)
assert single_td.get_value("mo") == 3
assert single_td.delta_obj == pd.DateOffset(months=3)
mult_temporal = pd.DateOffset(years=10, months=3, days=5)
mult_td = _check_timedelta(mult_temporal)
expected = {"Y": 10, "mo": 3, "d": 5}
assert mult_td.get_value() == expected
assert mult_td.delta_obj == mult_temporal
# get_name() for multiple values is not deterministic
assert len(mult_td.get_name()) == len("10 Years 3 Months 5 Days")
special_dateoffset = pd.offsets.BDay(100)
special_td = _check_timedelta(special_dateoffset)
assert special_td.get_value("businessdays") == 100
assert special_td.delta_obj == special_dateoffset
def test_pd_dateoffset_to_timedelta_math():
base = pd.to_datetime("2020-01-31")
add = _check_timedelta(pd.DateOffset(months=2))
res = base + add
assert res == pd.to_datetime("2020-03-31")
base_2 = pd.to_datetime("2020-01-31")
add_2 = _check_timedelta(pd.DateOffset(months=2, days=3))
res_2 = base_2 + add_2
assert res_2 == pd.to_datetime("2020-04-03")
base_3 = pd.to_datetime("2019-09-20")
sub = _check_timedelta(pd.offsets.BDay(10))
res_3 = base_3 - sub
assert res_3 == pd.to_datetime("2019-09-06")
| 7,711 | 32.241379 | 86 | py |
featuretools | featuretools-main/featuretools/tests/entityset_tests/test_plotting.py | import os
import re
import graphviz
import pandas as pd
import pytest
from featuretools import EntitySet
from featuretools.utils.gen_utils import Library
@pytest.fixture
def pd_simple():
es = EntitySet("test")
df = pd.DataFrame({"foo": [1]})
es.add_dataframe(df, dataframe_name="test")
return es
@pytest.fixture
def dd_simple():
dd = pytest.importorskip("dask.dataframe", reason="Dask not installed, skipping")
es = EntitySet("test")
df = pd.DataFrame({"foo": [1]})
df = dd.from_pandas(df, npartitions=2)
es.add_dataframe(df, dataframe_name="test")
return es
@pytest.fixture
def spark_simple():
ps = pytest.importorskip("pyspark.pandas", reason="Spark not installed, skipping")
es = EntitySet("test")
df = ps.DataFrame({"foo": [1]})
es.add_dataframe(df, dataframe_name="test")
return es
@pytest.fixture(params=["pd_simple", "dd_simple", "spark_simple"])
def simple_es(request):
return request.getfixturevalue(request.param)
def test_returns_digraph_object(es):
graph = es.plot()
assert isinstance(graph, graphviz.Digraph)
def test_saving_png_file(es, tmp_path):
output_path = str(tmp_path.joinpath("test1.png"))
es.plot(to_file=output_path)
assert os.path.isfile(output_path)
os.remove(output_path)
def test_missing_file_extension(es):
output_path = "test1"
with pytest.raises(ValueError) as excinfo:
es.plot(to_file=output_path)
assert str(excinfo.value).startswith("Please use a file extension")
def test_invalid_format(es):
output_path = "test1.xzy"
with pytest.raises(ValueError) as excinfo:
es.plot(to_file=output_path)
assert str(excinfo.value).startswith("Unknown format")
def test_multiple_rows(es):
plot_ = es.plot()
result = re.findall(r"\((\d+\srows?)\)", plot_.source)
expected = ["{} rows".format(str(i.shape[0])) for i in es.dataframes]
if es.dataframe_type == Library.DASK:
# Dask does not list number of rows in plot
assert result == []
else:
assert result == expected
def test_single_row(simple_es):
plot_ = simple_es.plot()
result = re.findall(r"\((\d+\srows?)\)", plot_.source)
expected = ["1 row"]
if simple_es.dataframe_type == Library.DASK:
# Dask does not list number of rows in plot
assert result == []
else:
assert result == expected
| 2,404 | 23.793814 | 86 | py |
featuretools | featuretools-main/featuretools/tests/entityset_tests/test_last_time_index.py | from datetime import datetime
import pandas as pd
import pytest
from woodwork.logical_types import Categorical, Datetime, Integer
from featuretools.entityset.entityset import LTI_COLUMN_NAME
from featuretools.tests.testing_utils import to_pandas
from featuretools.utils.gen_utils import Library, import_or_none
dd = import_or_none("dask.dataframe")
ps = import_or_none("pyspark.pandas")
@pytest.fixture
def values_es(es):
es.normalize_dataframe(
"log",
"values",
"value",
make_time_index=True,
new_dataframe_time_index="value_time",
)
return es
@pytest.fixture
def true_values_lti():
true_values_lti = pd.Series(
[
datetime(2011, 4, 10, 10, 41, 0),
datetime(2011, 4, 9, 10, 31, 9),
datetime(2011, 4, 9, 10, 31, 18),
datetime(2011, 4, 9, 10, 31, 27),
datetime(2011, 4, 10, 10, 40, 1),
datetime(2011, 4, 10, 10, 41, 3),
datetime(2011, 4, 9, 10, 30, 12),
datetime(2011, 4, 10, 10, 41, 6),
datetime(2011, 4, 9, 10, 30, 18),
datetime(2011, 4, 9, 10, 30, 24),
datetime(2011, 4, 10, 11, 10, 3),
],
)
return true_values_lti
@pytest.fixture
def true_sessions_lti():
sessions_lti = pd.Series(
[
datetime(2011, 4, 9, 10, 30, 24),
datetime(2011, 4, 9, 10, 31, 27),
datetime(2011, 4, 9, 10, 40, 0),
datetime(2011, 4, 10, 10, 40, 1),
datetime(2011, 4, 10, 10, 41, 6),
datetime(2011, 4, 10, 11, 10, 3),
],
)
return sessions_lti
@pytest.fixture
def wishlist_df():
wishlist_df = pd.DataFrame(
{
"session_id": [0, 1, 2, 2, 3, 4, 5],
"datetime": [
datetime(2011, 4, 9, 10, 30, 15),
datetime(2011, 4, 9, 10, 31, 30),
datetime(2011, 4, 9, 10, 30, 30),
datetime(2011, 4, 9, 10, 35, 30),
datetime(2011, 4, 10, 10, 41, 0),
datetime(2011, 4, 10, 10, 39, 59),
datetime(2011, 4, 10, 11, 10, 2),
],
"product_id": [
"coke zero",
"taco clock",
"coke zero",
"car",
"toothpaste",
"brown bag",
"coke zero",
],
},
)
return wishlist_df
@pytest.fixture
def extra_session_df(es):
row_values = {"customer_id": 2, "device_name": "PC", "device_type": 0, "id": 6}
row = pd.DataFrame(row_values, index=pd.Index([6], name="id"))
df = to_pandas(es["sessions"])
df = pd.concat([df, row]).sort_index()
if es.dataframe_type == Library.DASK:
df = dd.from_pandas(df, npartitions=3)
elif es.dataframe_type == Library.SPARK:
# Spark can't handle object dtypes
df = df.astype("string")
df = ps.from_pandas(df)
return df
class TestLastTimeIndex(object):
def test_leaf(self, es):
es.add_last_time_indexes()
log = es["log"]
lti_name = log.ww.metadata.get("last_time_index")
assert lti_name == LTI_COLUMN_NAME
assert len(log[lti_name]) == 17
log_df = to_pandas(log)
for v1, v2 in zip(log_df[lti_name], log_df["datetime"]):
assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2
def test_leaf_no_time_index(self, es):
es.add_last_time_indexes()
stores = es["stores"]
true_lti = pd.Series([None for x in range(6)], dtype="datetime64[ns]")
assert len(true_lti) == len(stores[LTI_COLUMN_NAME])
stores_lti = to_pandas(stores[LTI_COLUMN_NAME])
for v1, v2 in zip(stores_lti, true_lti):
assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2
# TODO: possible issue with either normalize_dataframe or add_last_time_indexes
def test_parent(self, values_es, true_values_lti):
# test dataframe with time index and all instances in child dataframe
if values_es.dataframe_type != Library.PANDAS:
pytest.xfail(
"possible issue with either normalize_dataframe or add_last_time_indexes",
)
values_es.add_last_time_indexes()
values = values_es["values"]
lti_name = values.ww.metadata.get("last_time_index")
assert len(values[lti_name]) == 10
sorted_lti = to_pandas(values[lti_name]).sort_index()
for v1, v2 in zip(sorted_lti, true_values_lti):
assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2
# TODO: fails with Dask, tests needs to be reworked
def test_parent_some_missing(self, values_es, true_values_lti):
# test dataframe with time index and not all instances have children
if values_es.dataframe_type != Library.PANDAS:
pytest.xfail("fails with Dask, tests needs to be reworked")
values = values_es["values"]
# add extra value instance with no children
row_values = {
"value": 21.0,
"value_time": pd.Timestamp("2011-04-10 11:10:02"),
"values_id": 21.0,
}
# make sure index doesn't have same name as column to suppress pandas warning
row = pd.DataFrame(row_values, index=pd.Index([21]))
df = pd.concat([values, row])
df = df[["value", "value_time"]].sort_values(by="value")
df.index.name = None
values_es.replace_dataframe(dataframe_name="values", df=df)
values_es.add_last_time_indexes()
# lti value should default to instance's time index
true_values_lti[10] = pd.Timestamp("2011-04-10 11:10:02")
true_values_lti[11] = pd.Timestamp("2011-04-10 11:10:03")
values = values_es["values"]
lti_name = values.ww.metadata.get("last_time_index")
assert len(values[lti_name]) == 11
sorted_lti = values[lti_name].sort_index()
for v1, v2 in zip(sorted_lti, true_values_lti):
assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2
def test_parent_no_time_index(self, es, true_sessions_lti):
# test dataframe without time index and all instances have children
es.add_last_time_indexes()
sessions = es["sessions"]
lti_name = sessions.ww.metadata.get("last_time_index")
assert len(sessions[lti_name]) == 6
sorted_lti = to_pandas(sessions[lti_name]).sort_index()
for v1, v2 in zip(sorted_lti, true_sessions_lti):
assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2
def test_parent_no_time_index_missing(
self,
es,
extra_session_df,
true_sessions_lti,
):
# test dataframe without time index and not all instance have children
# add session instance with no associated log instances
es.replace_dataframe(dataframe_name="sessions", df=extra_session_df)
es.add_last_time_indexes()
# since sessions has no time index, default value is NaT
true_sessions_lti[6] = pd.NaT
sessions = es["sessions"]
lti_name = sessions.ww.metadata.get("last_time_index")
assert len(sessions[lti_name]) == 7
sorted_lti = to_pandas(sessions[lti_name]).sort_index()
for v1, v2 in zip(sorted_lti, true_sessions_lti):
assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2
def test_multiple_children(self, es, wishlist_df, true_sessions_lti):
if es.dataframe_type == Library.SPARK:
pytest.xfail("Cannot make index on a Spark DataFrame")
# test all instances in both children
if es.dataframe_type == Library.DASK:
wishlist_df = dd.from_pandas(wishlist_df, npartitions=2)
logical_types = {
"session_id": Integer,
"datetime": Datetime,
"product_id": Categorical,
}
es.add_dataframe(
dataframe_name="wishlist_log",
dataframe=wishlist_df,
index="id",
make_index=True,
time_index="datetime",
logical_types=logical_types,
)
es.add_relationship("sessions", "id", "wishlist_log", "session_id")
es.add_last_time_indexes()
sessions = es["sessions"]
# wishlist df has more recent events for two session ids
true_sessions_lti[1] = pd.Timestamp("2011-4-9 10:31:30")
true_sessions_lti[3] = pd.Timestamp("2011-4-10 10:41:00")
lti_name = sessions.ww.metadata.get("last_time_index")
assert len(sessions[lti_name]) == 6
sorted_lti = to_pandas(sessions[lti_name]).sort_index()
for v1, v2 in zip(sorted_lti, true_sessions_lti):
assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2
def test_multiple_children_right_missing(self, es, wishlist_df, true_sessions_lti):
if es.dataframe_type == Library.SPARK:
pytest.xfail("Cannot make index on a Spark DataFrame")
# test all instances in left child
# drop wishlist instance related to id 3 so it's only in log
wishlist_df.drop(4, inplace=True)
if es.dataframe_type == Library.DASK:
wishlist_df = dd.from_pandas(wishlist_df, npartitions=2)
logical_types = {
"session_id": Integer,
"datetime": Datetime,
"product_id": Categorical,
}
es.add_dataframe(
dataframe_name="wishlist_log",
dataframe=wishlist_df,
index="id",
make_index=True,
time_index="datetime",
logical_types=logical_types,
)
es.add_relationship("sessions", "id", "wishlist_log", "session_id")
es.add_last_time_indexes()
sessions = es["sessions"]
# now only session id 1 has newer event in wishlist_log
true_sessions_lti[1] = pd.Timestamp("2011-4-9 10:31:30")
lti_name = sessions.ww.metadata.get("last_time_index")
assert len(sessions[lti_name]) == 6
sorted_lti = to_pandas(sessions[lti_name]).sort_index()
for v1, v2 in zip(sorted_lti, true_sessions_lti):
assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2
def test_multiple_children_left_missing(
self,
es,
extra_session_df,
wishlist_df,
true_sessions_lti,
):
if es.dataframe_type == Library.SPARK:
pytest.xfail("Cannot make index on a Spark DataFrame")
# add row to sessions so not all session instances are in log
es.replace_dataframe(dataframe_name="sessions", df=extra_session_df)
# add row to wishlist df so new session instance in in wishlist_log
row_values = {
"session_id": 6,
"datetime": pd.Timestamp("2011-04-11 11:11:11"),
"product_id": "toothpaste",
}
row = pd.DataFrame(row_values, index=pd.RangeIndex(start=7, stop=8))
df = pd.concat([wishlist_df, row])
if es.dataframe_type == Library.DASK:
df = dd.from_pandas(df, npartitions=2)
logical_types = {
"session_id": Integer,
"datetime": Datetime,
"product_id": Categorical,
}
es.add_dataframe(
dataframe_name="wishlist_log",
dataframe=df,
index="id",
make_index=True,
time_index="datetime",
logical_types=logical_types,
)
es.add_relationship("sessions", "id", "wishlist_log", "session_id")
es.add_last_time_indexes()
# test all instances in right child
sessions = es["sessions"]
# now wishlist_log has newer events for 3 session ids
true_sessions_lti[1] = pd.Timestamp("2011-4-9 10:31:30")
true_sessions_lti[3] = pd.Timestamp("2011-4-10 10:41:00")
true_sessions_lti[6] = pd.Timestamp("2011-04-11 11:11:11")
lti_name = sessions.ww.metadata.get("last_time_index")
assert len(sessions[lti_name]) == 7
sorted_lti = to_pandas(sessions[lti_name]).sort_index()
for v1, v2 in zip(sorted_lti, true_sessions_lti):
assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2
def test_multiple_children_all_combined(
self,
es,
extra_session_df,
wishlist_df,
true_sessions_lti,
):
if es.dataframe_type == Library.SPARK:
pytest.xfail("Cannot make index on a Spark DataFrame")
# add row to sessions so not all session instances are in log
es.replace_dataframe(dataframe_name="sessions", df=extra_session_df)
# add row to wishlist_log so extra session has child instance
row_values = {
"session_id": 6,
"datetime": pd.Timestamp("2011-04-11 11:11:11"),
"product_id": "toothpaste",
}
row = pd.DataFrame(row_values, index=pd.RangeIndex(start=7, stop=8))
df = pd.concat([wishlist_df, row])
# drop instance 4 so wishlist_log does not have session id 3 instance
df.drop(4, inplace=True)
if es.dataframe_type == Library.DASK:
df = dd.from_pandas(df, npartitions=2)
logical_types = {
"session_id": Integer,
"datetime": Datetime,
"product_id": Categorical,
}
es.add_dataframe(
dataframe_name="wishlist_log",
dataframe=df,
index="id",
make_index=True,
time_index="datetime",
logical_types=logical_types,
)
es.add_relationship("sessions", "id", "wishlist_log", "session_id")
es.add_last_time_indexes()
# test some instances in right, some in left, all when combined
sessions = es["sessions"]
# wishlist has newer events for 2 sessions
true_sessions_lti[1] = pd.Timestamp("2011-4-9 10:31:30")
true_sessions_lti[6] = pd.Timestamp("2011-04-11 11:11:11")
lti_name = sessions.ww.metadata.get("last_time_index")
assert len(sessions[lti_name]) == 7
sorted_lti = to_pandas(sessions[lti_name]).sort_index()
for v1, v2 in zip(sorted_lti, true_sessions_lti):
assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2
def test_multiple_children_both_missing(
self,
es,
extra_session_df,
wishlist_df,
true_sessions_lti,
):
if es.dataframe_type == Library.SPARK:
pytest.xfail("Cannot make index on a Spark DataFrame")
# test all instances in neither child
sessions = es["sessions"]
if es.dataframe_type == Library.DASK:
wishlist_df = dd.from_pandas(wishlist_df, npartitions=2)
logical_types = {
"session_id": Integer,
"datetime": Datetime,
"product_id": Categorical,
}
# add row to sessions to create session with no events
es.replace_dataframe(dataframe_name="sessions", df=extra_session_df)
es.add_dataframe(
dataframe_name="wishlist_log",
dataframe=wishlist_df,
index="id",
make_index=True,
time_index="datetime",
logical_types=logical_types,
)
es.add_relationship("sessions", "id", "wishlist_log", "session_id")
es.add_last_time_indexes()
sessions = es["sessions"]
# wishlist has 2 newer events and one is NaT
true_sessions_lti[1] = pd.Timestamp("2011-4-9 10:31:30")
true_sessions_lti[3] = pd.Timestamp("2011-4-10 10:41:00")
true_sessions_lti[6] = pd.NaT
lti_name = sessions.ww.metadata.get("last_time_index")
assert len(sessions[lti_name]) == 7
sorted_lti = to_pandas(sessions[lti_name]).sort_index()
for v1, v2 in zip(sorted_lti, true_sessions_lti):
assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2
def test_grandparent(self, es):
# test sorting by time works correctly across several generations
log = es["log"]
# For one user, change a log event to be newer than the user's normal
# last time index. This event should be from a different session than
# the current last time index.
df = to_pandas(log)
df["datetime"][5] = pd.Timestamp("2011-4-09 10:40:01")
df = (
df.set_index("datetime", append=True)
.sort_index(level=[1, 0], kind="mergesort")
.reset_index("datetime", drop=False)
)
if es.dataframe_type == Library.DASK:
df = dd.from_pandas(df, npartitions=2)
if es.dataframe_type == Library.SPARK:
df = ps.from_pandas(df)
es.replace_dataframe(dataframe_name="log", df=df)
es.add_last_time_indexes()
customers = es["customers"]
true_customers_lti = pd.Series(
[
datetime(2011, 4, 9, 10, 40, 1),
datetime(2011, 4, 10, 10, 41, 6),
datetime(2011, 4, 10, 11, 10, 3),
],
)
lti_name = customers.ww.metadata.get("last_time_index")
assert len(customers[lti_name]) == 3
sorted_lti = to_pandas(customers).sort_values("id")[lti_name]
for v1, v2 in zip(sorted_lti, true_customers_lti):
assert (pd.isnull(v1) and pd.isnull(v2)) or v1 == v2
| 17,328 | 36.266667 | 90 | py |
featuretools | featuretools-main/featuretools/tests/entityset_tests/test_dask_es.py | import pandas as pd
import pytest
from woodwork.logical_types import (
Categorical,
Datetime,
Double,
Integer,
NaturalLanguage,
)
from featuretools.entityset import EntitySet
from featuretools.tests.testing_utils import get_df_tags
from featuretools.utils.gen_utils import Library, import_or_none
dd = import_or_none("dask.dataframe")
@pytest.mark.skipif("not dd")
def test_add_dataframe(pd_es):
dask_es = EntitySet(id="dask_es")
log_dask = dd.from_pandas(pd_es["log"], npartitions=2)
dask_es = dask_es.add_dataframe(
dataframe_name="log_dask",
dataframe=log_dask,
index="id",
time_index="datetime",
logical_types=pd_es["log"].ww.logical_types,
semantic_tags=get_df_tags(pd_es["log"]),
)
pd.testing.assert_frame_equal(
pd_es["log"],
dask_es["log_dask"].compute(),
check_like=True,
)
@pytest.mark.skipif("not dd")
def test_add_dataframe_with_non_numeric_index(pd_es, dask_es):
df = pd.DataFrame({"id": ["A_1", "A_2", "C", "D"], "values": [1, 12, -34, 27]})
dask_df = dd.from_pandas(df, npartitions=2)
pd_es.add_dataframe(
dataframe_name="new_dataframe",
dataframe=df,
index="id",
logical_types={"id": Categorical, "values": Integer},
)
dask_es.add_dataframe(
dataframe_name="new_dataframe",
dataframe=dask_df,
index="id",
logical_types={"id": Categorical, "values": Integer},
)
pd.testing.assert_frame_equal(
pd_es["new_dataframe"].reset_index(drop=True),
dask_es["new_dataframe"].compute(),
)
@pytest.mark.skipif("not dd")
def test_create_entityset_with_mixed_dataframe_types(pd_es, dask_es):
df = pd.DataFrame({"id": [0, 1, 2, 3], "values": [1, 12, -34, 27]})
dask_df = dd.from_pandas(df, npartitions=2)
err_msg = (
"All dataframes must be of the same type. "
"Cannot add dataframe of type {} to an entityset with existing dataframes "
"of type {}"
)
# Test error is raised when trying to add Dask dataframe to entityset with existing pandas dataframes
with pytest.raises(
ValueError,
match=err_msg.format(type(dask_df), type(pd_es.dataframes[0])),
):
pd_es.add_dataframe(
dataframe_name="new_dataframe",
dataframe=dask_df,
index="id",
)
# Test error is raised when trying to add pandas dataframe to entityset with existing dask dataframes
with pytest.raises(
ValueError,
match=err_msg.format(type(df), type(dask_es.dataframes[0])),
):
dask_es.add_dataframe(dataframe_name="new_dataframe", dataframe=df, index="id")
@pytest.mark.skipif("not dd")
def test_add_last_time_indexes():
pd_es = EntitySet(id="pd_es")
dask_es = EntitySet(id="dask_es")
sessions = pd.DataFrame(
{
"id": [0, 1, 2, 3],
"user": [1, 2, 1, 3],
"time": [
pd.to_datetime("2019-01-10"),
pd.to_datetime("2019-02-03"),
pd.to_datetime("2019-01-01"),
pd.to_datetime("2017-08-25"),
],
"strings": ["I am a string", "23", "abcdef ghijk", ""],
},
)
sessions_dask = dd.from_pandas(sessions, npartitions=2)
sessions_logical_types = {
"id": Integer,
"user": Integer,
"time": Datetime,
"strings": NaturalLanguage,
}
transactions = pd.DataFrame(
{
"id": [0, 1, 2, 3, 4, 5],
"session_id": [0, 0, 1, 2, 2, 3],
"amount": [1.23, 5.24, 123.52, 67.93, 40.34, 50.13],
"time": [
pd.to_datetime("2019-01-10 03:53"),
pd.to_datetime("2019-01-10 04:12"),
pd.to_datetime("2019-02-03 10:34"),
pd.to_datetime("2019-01-01 12:35"),
pd.to_datetime("2019-01-01 12:49"),
pd.to_datetime("2017-08-25 04:53"),
],
},
)
transactions_dask = dd.from_pandas(transactions, npartitions=2)
transactions_logical_types = {
"id": Integer,
"session_id": Integer,
"time": Datetime,
"amount": Double,
}
pd_es.add_dataframe(
dataframe_name="sessions",
dataframe=sessions,
index="id",
time_index="time",
)
dask_es.add_dataframe(
dataframe_name="sessions",
dataframe=sessions_dask,
index="id",
time_index="time",
logical_types=sessions_logical_types,
)
pd_es.add_dataframe(
dataframe_name="transactions",
dataframe=transactions,
index="id",
time_index="time",
)
dask_es.add_dataframe(
dataframe_name="transactions",
dataframe=transactions_dask,
index="id",
time_index="time",
logical_types=transactions_logical_types,
)
pd_es = pd_es.add_relationship("sessions", "id", "transactions", "session_id")
dask_es = dask_es.add_relationship("sessions", "id", "transactions", "session_id")
assert "foreign_key" in pd_es["transactions"].ww.semantic_tags["session_id"]
assert "foreign_key" in dask_es["transactions"].ww.semantic_tags["session_id"]
assert pd_es["sessions"].ww.metadata.get("last_time_index") is None
assert dask_es["sessions"].ww.metadata.get("last_time_index") is None
pd_es.add_last_time_indexes()
dask_es.add_last_time_indexes()
pd_lti_name = pd_es["sessions"].ww.metadata.get("last_time_index")
spark_lti_name = dask_es["sessions"].ww.metadata.get("last_time_index")
assert pd_lti_name == spark_lti_name
pd.testing.assert_series_equal(
pd_es["sessions"][pd_lti_name].sort_index(),
dask_es["sessions"][spark_lti_name].compute().sort_index(),
check_names=False,
)
@pytest.mark.skipif("not dd")
def test_add_dataframe_with_make_index():
values = [1, 12, -23, 27]
df = pd.DataFrame({"values": values})
dask_df = dd.from_pandas(df, npartitions=2)
dask_es = EntitySet(id="dask_es")
logical_types = {"values": Integer}
dask_es.add_dataframe(
dataframe_name="new_dataframe",
dataframe=dask_df,
make_index=True,
index="new_index",
logical_types=logical_types,
)
expected_df = pd.DataFrame({"values": values, "new_index": range(len(values))})
pd.testing.assert_frame_equal(expected_df, dask_es["new_dataframe"].compute())
@pytest.mark.skipif("not dd")
def test_dataframe_type_dask(dask_es):
assert dask_es.dataframe_type == Library.DASK
| 6,622 | 29.948598 | 105 | py |
featuretools | featuretools-main/featuretools/tests/entityset_tests/test_es.py | import copy
import logging
import pickle
import re
from datetime import datetime
from unittest.mock import patch
import numpy as np
import pandas as pd
import pytest
from woodwork.logical_types import (
URL,
Boolean,
Categorical,
CountryCode,
Datetime,
Double,
EmailAddress,
Integer,
LatLong,
NaturalLanguage,
Ordinal,
PostalCode,
SubRegionCode,
)
from featuretools import Relationship
from featuretools.demo import load_retail
from featuretools.entityset import EntitySet
from featuretools.entityset.entityset import LTI_COLUMN_NAME, WW_SCHEMA_KEY
from featuretools.tests.testing_utils import get_df_tags, to_pandas
from featuretools.utils.gen_utils import Library, import_or_none, is_instance
from featuretools.utils.spark_utils import pd_to_spark_clean
dd = import_or_none("dask.dataframe")
ps = import_or_none("pyspark.pandas")
def test_normalize_time_index_as_additional_column(es):
error_text = "Not moving signup_date as it is the base time index column. Perhaps, move the column to the copy_columns."
with pytest.raises(ValueError, match=error_text):
assert "signup_date" in es["customers"].columns
es.normalize_dataframe(
base_dataframe_name="customers",
new_dataframe_name="cancellations",
index="cancel_reason",
make_time_index="signup_date",
additional_columns=["signup_date"],
copy_columns=[],
)
def test_normalize_time_index_as_copy_column(es):
assert "signup_date" in es["customers"].columns
es.normalize_dataframe(
base_dataframe_name="customers",
new_dataframe_name="cancellations",
index="cancel_reason",
make_time_index="signup_date",
additional_columns=[],
copy_columns=["signup_date"],
)
assert "signup_date" in es["customers"].columns
assert es["customers"].ww.time_index == "signup_date"
assert "signup_date" in es["cancellations"].columns
assert es["cancellations"].ww.time_index == "signup_date"
def test_normalize_time_index_as_copy_column_new_time_index(es):
assert "signup_date" in es["customers"].columns
es.normalize_dataframe(
base_dataframe_name="customers",
new_dataframe_name="cancellations",
index="cancel_reason",
make_time_index=True,
additional_columns=[],
copy_columns=["signup_date"],
)
assert "signup_date" in es["customers"].columns
assert es["customers"].ww.time_index == "signup_date"
assert "first_customers_time" in es["cancellations"].columns
assert "signup_date" not in es["cancellations"].columns
assert es["cancellations"].ww.time_index == "first_customers_time"
def test_normalize_time_index_as_copy_column_no_time_index(es):
assert "signup_date" in es["customers"].columns
es.normalize_dataframe(
base_dataframe_name="customers",
new_dataframe_name="cancellations",
index="cancel_reason",
make_time_index=False,
additional_columns=[],
copy_columns=["signup_date"],
)
assert "signup_date" in es["customers"].columns
assert es["customers"].ww.time_index == "signup_date"
assert "signup_date" in es["cancellations"].columns
assert es["cancellations"].ww.time_index is None
def test_cannot_re_add_relationships_that_already_exists(es):
warn_text = "Not adding duplicate relationship: " + str(es.relationships[0])
before_len = len(es.relationships)
rel = es.relationships[0]
with pytest.warns(UserWarning, match=warn_text):
es.add_relationship(relationship=rel)
with pytest.warns(UserWarning, match=warn_text):
es.add_relationship(
rel._parent_dataframe_name,
rel._parent_column_name,
rel._child_dataframe_name,
rel._child_column_name,
)
after_len = len(es.relationships)
assert before_len == after_len
def test_add_relationships_convert_type(es):
for r in es.relationships:
parent_df = es[r.parent_dataframe.ww.name]
child_df = es[r.child_dataframe.ww.name]
assert parent_df.ww.index == r._parent_column_name
assert "foreign_key" in r.child_column.ww.semantic_tags
assert str(parent_df[r._parent_column_name].dtype) == str(
child_df[r._child_column_name].dtype,
)
def test_add_relationship_diff_param_logical_types(es):
ordinal_1 = Ordinal(order=[0, 1, 2, 3, 4, 5, 6])
ordinal_2 = Ordinal(order=[0, 1, 2, 3, 4, 5])
es["sessions"].ww.set_types(logical_types={"id": ordinal_1})
log_2_df = es["log"].copy()
log_logical_types = {
"id": Integer,
"session_id": ordinal_2,
"product_id": Categorical(),
"datetime": Datetime,
"value": Double,
"value_2": Double,
"latlong": LatLong,
"latlong2": LatLong,
"zipcode": PostalCode,
"countrycode": CountryCode,
"subregioncode": SubRegionCode,
"value_many_nans": Double,
"priority_level": Ordinal(order=[0, 1, 2]),
"purchased": Boolean,
"comments": NaturalLanguage,
"url": URL,
"email_address": EmailAddress,
}
log_semantic_tags = {"session_id": "foreign_key", "product_id": "foreign_key"}
assert set(log_logical_types) == set(log_2_df.columns)
es.add_dataframe(
dataframe_name="log2",
dataframe=log_2_df,
index="id",
logical_types=log_logical_types,
semantic_tags=log_semantic_tags,
time_index="datetime",
)
assert "log2" in es.dataframe_dict
assert es["log2"].ww.schema is not None
assert isinstance(es["log2"].ww.logical_types["session_id"], Ordinal)
assert isinstance(es["sessions"].ww.logical_types["id"], Ordinal)
assert (
es["sessions"].ww.logical_types["id"]
!= es["log2"].ww.logical_types["session_id"]
)
warning_text = "Changing child logical type to match parent."
with pytest.warns(UserWarning, match=warning_text):
es.add_relationship("sessions", "id", "log2", "session_id")
assert isinstance(es["log2"].ww.logical_types["product_id"], Categorical)
assert isinstance(es["products"].ww.logical_types["id"], Categorical)
def test_add_relationship_different_logical_types_same_dtype(es):
log_2_df = es["log"].copy()
log_logical_types = {
"id": Integer,
"session_id": Integer,
"product_id": CountryCode,
"datetime": Datetime,
"value": Double,
"value_2": Double,
"latlong": LatLong,
"latlong2": LatLong,
"zipcode": PostalCode,
"countrycode": CountryCode,
"subregioncode": SubRegionCode,
"value_many_nans": Double,
"priority_level": Ordinal(order=[0, 1, 2]),
"purchased": Boolean,
"comments": NaturalLanguage,
"url": URL,
"email_address": EmailAddress,
}
log_semantic_tags = {"session_id": "foreign_key", "product_id": "foreign_key"}
assert set(log_logical_types) == set(log_2_df.columns)
es.add_dataframe(
dataframe_name="log2",
dataframe=log_2_df,
index="id",
logical_types=log_logical_types,
semantic_tags=log_semantic_tags,
time_index="datetime",
)
assert "log2" in es.dataframe_dict
assert es["log2"].ww.schema is not None
assert isinstance(es["log2"].ww.logical_types["product_id"], CountryCode)
assert isinstance(es["products"].ww.logical_types["id"], Categorical)
warning_text = "Logical type CountryCode for child column product_id does not match parent column id logical type Categorical. Changing child logical type to match parent."
with pytest.warns(UserWarning, match=warning_text):
es.add_relationship("products", "id", "log2", "product_id")
assert isinstance(es["log2"].ww.logical_types["product_id"], Categorical)
assert isinstance(es["products"].ww.logical_types["id"], Categorical)
assert "foreign_key" in es["log2"].ww.semantic_tags["product_id"]
def test_add_relationship_different_compatible_dtypes(es):
log_2_df = es["log"].copy()
log_logical_types = {
"id": Integer,
"session_id": Datetime,
"product_id": Categorical,
"datetime": Datetime,
"value": Double,
"value_2": Double,
"latlong": LatLong,
"latlong2": LatLong,
"zipcode": PostalCode,
"countrycode": CountryCode,
"subregioncode": SubRegionCode,
"value_many_nans": Double,
"priority_level": Ordinal(order=[0, 1, 2]),
"purchased": Boolean,
"comments": NaturalLanguage,
"url": URL,
"email_address": EmailAddress,
}
log_semantic_tags = {"session_id": "foreign_key", "product_id": "foreign_key"}
assert set(log_logical_types) == set(log_2_df.columns)
es.add_dataframe(
dataframe_name="log2",
dataframe=log_2_df,
index="id",
logical_types=log_logical_types,
semantic_tags=log_semantic_tags,
time_index="datetime",
)
assert "log2" in es.dataframe_dict
assert es["log2"].ww.schema is not None
assert isinstance(es["log2"].ww.logical_types["session_id"], Datetime)
assert isinstance(es["customers"].ww.logical_types["id"], Integer)
warning_text = "Logical type Datetime for child column session_id does not match parent column id logical type Integer. Changing child logical type to match parent."
with pytest.warns(UserWarning, match=warning_text):
es.add_relationship("customers", "id", "log2", "session_id")
assert isinstance(es["log2"].ww.logical_types["session_id"], Integer)
assert isinstance(es["customers"].ww.logical_types["id"], Integer)
def test_add_relationship_errors_child_v_index(es):
new_df = es["log"].ww.copy()
new_df.ww._schema.name = "log2"
es.add_dataframe(dataframe=new_df)
to_match = "Unable to add relationship because child column 'id' in 'log2' is also its index"
with pytest.raises(ValueError, match=to_match):
es.add_relationship("log", "id", "log2", "id")
def test_add_relationship_empty_child_convert_dtype(es):
relationship = Relationship(es, "sessions", "id", "log", "session_id")
empty_log_df = pd.DataFrame(columns=es["log"].columns)
if es.dataframe_type == Library.DASK:
empty_log_df = dd.from_pandas(empty_log_df, npartitions=2)
elif es.dataframe_type == Library.SPARK:
empty_log_df = ps.from_pandas(empty_log_df)
es.add_dataframe(empty_log_df, "log")
assert len(es["log"]) == 0
# session_id will be Unknown logical type with dtype string
assert es["log"]["session_id"].dtype == "string"
es.relationships.remove(relationship)
assert relationship not in es.relationships
es.add_relationship(relationship=relationship)
assert es["log"]["session_id"].dtype == "int64"
def test_add_relationship_with_relationship_object(es):
relationship = Relationship(es, "sessions", "id", "log", "session_id")
es.add_relationship(relationship=relationship)
assert relationship in es.relationships
def test_add_relationships_with_relationship_object(es):
relationships = [Relationship(es, "sessions", "id", "log", "session_id")]
es.add_relationships(relationships)
assert relationships[0] in es.relationships
def test_add_relationship_error(es):
relationship = Relationship(es, "sessions", "id", "log", "session_id")
error_message = (
"Cannot specify dataframe and column name values and also supply a Relationship"
)
with pytest.raises(ValueError, match=error_message):
es.add_relationship(parent_dataframe_name="sessions", relationship=relationship)
def test_query_by_values_returns_rows_in_given_order():
data = pd.DataFrame(
{
"id": [1, 2, 3, 4, 5],
"value": ["a", "c", "b", "a", "a"],
"time": [1000, 2000, 3000, 4000, 5000],
},
)
es = EntitySet()
es = es.add_dataframe(
dataframe=data,
dataframe_name="test",
index="id",
time_index="time",
logical_types={"value": "Categorical"},
)
query = es.query_by_values("test", ["b", "a"], column_name="value")
assert np.array_equal(query["id"], [1, 3, 4, 5])
def test_query_by_values_secondary_time_index(es):
end = np.datetime64(datetime(2011, 10, 1))
all_instances = [0, 1, 2]
result = es.query_by_values("customers", all_instances, time_last=end)
result = to_pandas(result, index="id")
for col in ["cancel_date", "cancel_reason"]:
nulls = result.loc[all_instances][col].isnull() == [False, True, True]
assert nulls.all(), "Some instance has data it shouldn't for column %s" % col
def test_query_by_id(es):
df = to_pandas(es.query_by_values("log", instance_vals=[0]))
assert df["id"].values[0] == 0
def test_query_by_single_value(es):
df = to_pandas(es.query_by_values("log", instance_vals=0))
assert df["id"].values[0] == 0
def test_query_by_df(es):
instance_df = pd.DataFrame({"id": [1, 3], "vals": [0, 1]})
df = to_pandas(es.query_by_values("log", instance_vals=instance_df))
assert np.array_equal(df["id"], [1, 3])
def test_query_by_id_with_time(es):
df = es.query_by_values(
dataframe_name="log",
instance_vals=[0, 1, 2, 3, 4],
time_last=datetime(2011, 4, 9, 10, 30, 2 * 6),
)
df = to_pandas(df)
if es.dataframe_type == Library.SPARK:
# Spark doesn't maintain order
df = df.sort_values("id")
assert list(df["id"].values) == [0, 1, 2]
def test_query_by_column_with_time(es):
df = es.query_by_values(
dataframe_name="log",
instance_vals=[0, 1, 2],
column_name="session_id",
time_last=datetime(2011, 4, 9, 10, 50, 0),
)
df = to_pandas(df)
true_values = [i * 5 for i in range(5)] + [i * 1 for i in range(4)] + [0]
if es.dataframe_type == Library.SPARK:
# Spark doesn't maintain order
df = df.sort_values("id")
assert list(df["id"].values) == list(range(10))
assert list(df["value"].values) == true_values
def test_query_by_column_with_no_lti_and_training_window(es):
match = (
"Using training_window but last_time_index is not set for dataframe customers"
)
with pytest.warns(UserWarning, match=match):
df = es.query_by_values(
dataframe_name="customers",
instance_vals=[0, 1, 2],
column_name="cohort",
time_last=datetime(2011, 4, 11),
training_window="3d",
)
df = to_pandas(df)
assert list(df["id"].values) == [1]
assert list(df["age"].values) == [25]
def test_query_by_column_with_lti_and_training_window(es):
es.add_last_time_indexes()
df = es.query_by_values(
dataframe_name="customers",
instance_vals=[0, 1, 2],
column_name="cohort",
time_last=datetime(2011, 4, 11),
training_window="3d",
)
# Account for different ordering between pandas and dask/spark
df = to_pandas(df).reset_index(drop=True).sort_values("id")
assert list(df["id"].values) == [0, 1, 2]
assert list(df["age"].values) == [33, 25, 56]
def test_query_by_indexed_column(es):
df = es.query_by_values(
dataframe_name="log",
instance_vals=["taco clock"],
column_name="product_id",
)
# Account for different ordering between pandas and dask/spark
df = to_pandas(df).reset_index(drop=True).sort_values("id")
assert list(df["id"].values) == [15, 16]
@pytest.fixture
def pd_df():
return pd.DataFrame({"id": [0, 1, 2], "category": ["a", "b", "c"]})
@pytest.fixture
def dd_df(pd_df):
dd = pytest.importorskip("dask.dataframe", reason="Dask not installed, skipping")
return dd.from_pandas(pd_df, npartitions=2)
@pytest.fixture
def spark_df(pd_df):
ps = pytest.importorskip("pyspark.pandas", reason="Spark not installed, skipping")
return ps.from_pandas(pd_df)
@pytest.fixture(params=["pd_df", "dd_df", "spark_df"])
def df(request):
return request.getfixturevalue(request.param)
def test_check_columns_and_dataframe(df):
# matches
logical_types = {"id": Integer, "category": Categorical}
es = EntitySet(id="test")
es.add_dataframe(
df,
dataframe_name="test_dataframe",
index="id",
logical_types=logical_types,
)
assert isinstance(
es.dataframe_dict["test_dataframe"].ww.logical_types["category"],
Categorical,
)
assert es.dataframe_dict["test_dataframe"].ww.semantic_tags["category"] == {
"category",
}
def test_make_index_any_location(df):
logical_types = {"id": Integer, "category": Categorical}
es = EntitySet(id="test")
es.add_dataframe(
dataframe_name="test_dataframe",
index="id1",
make_index=True,
logical_types=logical_types,
dataframe=df,
)
if es.dataframe_type != Library.PANDAS:
assert es.dataframe_dict["test_dataframe"].columns[-1] == "id1"
else:
assert es.dataframe_dict["test_dataframe"].columns[0] == "id1"
assert es.dataframe_dict["test_dataframe"].ww.index == "id1"
def test_replace_dataframe_and_create_index(es):
df = pd.DataFrame({"ints": [3, 4, 5], "category": ["a", "b", "a"]})
final_df = df.copy()
final_df["id"] = [0, 1, 2]
if es.dataframe_type == Library.DASK:
df = dd.from_pandas(df, npartitions=2)
elif es.dataframe_type == Library.SPARK:
df = ps.from_pandas(df)
needs_idx_df = df.copy()
logical_types = {"ints": Integer, "category": Categorical}
es.add_dataframe(
dataframe=df,
dataframe_name="test_df",
index="id",
make_index=True,
logical_types=logical_types,
)
assert es["test_df"].ww.index == "id"
# DataFrame that needs the index column added
assert "id" not in needs_idx_df.columns
es.replace_dataframe("test_df", needs_idx_df)
assert es["test_df"].ww.index == "id"
df = to_pandas(es["test_df"]).sort_values(by="id")
assert all(df["id"] == final_df["id"])
assert all(df["ints"] == final_df["ints"])
def test_replace_dataframe_created_index_present(es):
df = pd.DataFrame({"ints": [3, 4, 5], "category": ["a", "b", "a"]})
if es.dataframe_type == Library.DASK:
df = dd.from_pandas(df, npartitions=2)
elif es.dataframe_type == Library.SPARK:
df = ps.from_pandas(df)
logical_types = {"ints": Integer, "category": Categorical}
es.add_dataframe(
dataframe=df,
dataframe_name="test_df",
index="id",
make_index=True,
logical_types=logical_types,
)
# DataFrame that already has the index column
has_idx_df = es["test_df"].replace({0: 100})
if es.dataframe_type == Library.PANDAS:
has_idx_df.set_index("id", drop=False, inplace=True)
assert "id" in has_idx_df.columns
es.replace_dataframe("test_df", has_idx_df)
assert es["test_df"].ww.index == "id"
df = to_pandas(es["test_df"]).sort_values(by="ints")
assert all(df["id"] == [100, 1, 2])
def test_index_any_location(df):
logical_types = {"id": Integer, "category": Categorical}
es = EntitySet(id="test")
es.add_dataframe(
dataframe_name="test_dataframe",
index="category",
logical_types=logical_types,
dataframe=df,
)
assert es.dataframe_dict["test_dataframe"].columns[1] == "category"
assert es.dataframe_dict["test_dataframe"].ww.index == "category"
def test_extra_column_type(df):
# more columns
logical_types = {"id": Integer, "category": Categorical, "category2": Categorical}
error_text = re.escape(
"logical_types contains columns that are not present in dataframe: ['category2']",
)
with pytest.raises(LookupError, match=error_text):
es = EntitySet(id="test")
es.add_dataframe(
dataframe_name="test_dataframe",
index="id",
logical_types=logical_types,
dataframe=df,
)
def test_add_parent_not_index_column(es):
error_text = "Parent column 'language' is not the index of dataframe régions"
with pytest.raises(AttributeError, match=error_text):
es.add_relationship("régions", "language", "customers", "région_id")
@pytest.fixture
def pd_df2():
return pd.DataFrame({"category": [1, 2, 3], "category2": ["1", "2", "3"]})
@pytest.fixture
def dd_df2(pd_df2):
dd = pytest.importorskip("dask.dataframe", reason="Dask not installed, skipping")
return dd.from_pandas(pd_df2, npartitions=2)
@pytest.fixture
def spark_df2(pd_df2):
ps = pytest.importorskip("pyspark.pandas", reason="Spark not installed, skipping")
return ps.from_pandas(pd_df2)
@pytest.fixture(params=["pd_df2", "dd_df2", "spark_df2"])
def df2(request):
return request.getfixturevalue(request.param)
def test_none_index(df2):
es = EntitySet(id="test")
copy_df = df2.copy()
copy_df.ww.init(name="test_dataframe")
error_msg = "Cannot add Woodwork DataFrame to EntitySet without index"
with pytest.raises(ValueError, match=error_msg):
es.add_dataframe(dataframe=copy_df)
warn_text = (
"Using first column as index. To change this, specify the index parameter"
)
with pytest.warns(UserWarning, match=warn_text):
es.add_dataframe(
dataframe_name="test_dataframe",
logical_types={"category": "Categorical"},
dataframe=df2,
)
assert es["test_dataframe"].ww.index == "category"
assert es["test_dataframe"].ww.semantic_tags["category"] == {"index"}
assert isinstance(es["test_dataframe"].ww.logical_types["category"], Categorical)
@pytest.fixture
def pd_df3():
return pd.DataFrame({"category": [1, 2, 3]})
@pytest.fixture
def dd_df3(pd_df3):
dd = pytest.importorskip("dask.dataframe", reason="Dask not installed, skipping")
return dd.from_pandas(pd_df3, npartitions=2)
@pytest.fixture
def spark_df3(pd_df3):
ps = pytest.importorskip("pyspark.pandas", reason="Spark not installed, skipping")
return ps.from_pandas(pd_df3)
@pytest.fixture(params=["pd_df3", "dd_df3", "spark_df3"])
def df3(request):
return request.getfixturevalue(request.param)
def test_unknown_index(df3):
warn_text = "index id not found in dataframe, creating new integer column"
es = EntitySet(id="test")
with pytest.warns(UserWarning, match=warn_text):
es.add_dataframe(
dataframe_name="test_dataframe",
dataframe=df3,
index="id",
logical_types={"category": "Categorical"},
)
assert es["test_dataframe"].ww.index == "id"
assert list(to_pandas(es["test_dataframe"]["id"], sort_index=True)) == list(
range(3),
)
def test_doesnt_remake_index(df):
logical_types = {"id": "Integer", "category": "Categorical"}
error_text = "Cannot make index: column with name id already present"
with pytest.raises(RuntimeError, match=error_text):
es = EntitySet(id="test")
es.add_dataframe(
dataframe_name="test_dataframe",
index="id",
make_index=True,
dataframe=df,
logical_types=logical_types,
)
def test_bad_time_index_column(df3):
logical_types = {"category": "Categorical"}
error_text = "Specified time index column `time` not found in dataframe"
with pytest.raises(LookupError, match=error_text):
es = EntitySet(id="test")
es.add_dataframe(
dataframe_name="test_dataframe",
dataframe=df3,
time_index="time",
logical_types=logical_types,
)
@pytest.fixture
def pd_df4():
df = pd.DataFrame(
{
"id": [0, 1, 2],
"category": ["a", "b", "a"],
"category_int": [1, 2, 3],
"ints": ["1", "2", "3"],
"floats": ["1", "2", "3.0"],
},
)
df["category_int"] = df["category_int"].astype("category")
return df
@pytest.fixture
def dd_df4(pd_df4):
dd = pytest.importorskip("dask.dataframe", reason="Dask not installed, skipping")
return dd.from_pandas(pd_df4, npartitions=2)
@pytest.fixture
def spark_df4(pd_df4):
ps = pytest.importorskip("pyspark.pandas", reason="Spark not installed, skipping")
return ps.from_pandas(pd_to_spark_clean(pd_df4))
@pytest.fixture(params=["pd_df4", "dd_df4", "spark_df4"])
def df4(request):
return request.getfixturevalue(request.param)
def test_converts_dtype_on_init(df4):
logical_types = {"id": Integer, "ints": Integer, "floats": Double}
if not isinstance(df4, pd.DataFrame):
logical_types["category"] = Categorical
logical_types["category_int"] = Categorical
es = EntitySet(id="test")
df4.ww.init(name="test_dataframe", index="id", logical_types=logical_types)
es.add_dataframe(dataframe=df4)
df = es["test_dataframe"]
assert df["ints"].dtype.name == "int64"
assert df["floats"].dtype.name == "float64"
# this is infer from pandas dtype
df = es["test_dataframe"]
assert isinstance(df.ww.logical_types["category_int"], Categorical)
def test_converts_dtype_after_init(df4):
category_dtype = "category"
if ps and isinstance(df4, ps.DataFrame):
category_dtype = "string"
df4["category"] = df4["category"].astype(category_dtype)
if not isinstance(df4, pd.DataFrame):
logical_types = {
"id": Integer,
"category": Categorical,
"category_int": Categorical,
"ints": Integer,
"floats": Double,
}
else:
logical_types = None
es = EntitySet(id="test")
es.add_dataframe(
dataframe_name="test_dataframe",
index="id",
dataframe=df4,
logical_types=logical_types,
)
df = es["test_dataframe"]
df.ww.set_types(logical_types={"ints": "Integer"})
assert isinstance(df.ww.logical_types["ints"], Integer)
assert df["ints"].dtype == "int64"
df.ww.set_types(logical_types={"ints": "Categorical"})
assert isinstance(df.ww.logical_types["ints"], Categorical)
assert df["ints"].dtype == category_dtype
df.ww.set_types(logical_types={"ints": Ordinal(order=[1, 2, 3])})
assert df.ww.logical_types["ints"] == Ordinal(order=[1, 2, 3])
assert df["ints"].dtype == category_dtype
df.ww.set_types(logical_types={"ints": "NaturalLanguage"})
assert isinstance(df.ww.logical_types["ints"], NaturalLanguage)
assert df["ints"].dtype == "string"
def test_warns_no_typing(df4):
es = EntitySet(id="test")
if not isinstance(df4, pd.DataFrame):
msg = "Performing type inference on Dask or Spark DataFrames may be computationally intensive. Specify logical types for each column to speed up EntitySet initialization."
with pytest.warns(UserWarning, match=msg):
es.add_dataframe(dataframe_name="test_dataframe", index="id", dataframe=df4)
else:
es.add_dataframe(dataframe_name="test_dataframe", index="id", dataframe=df4)
assert "test_dataframe" in es.dataframe_dict
@pytest.fixture
def pd_datetime1():
times = pd.date_range("1/1/2011", periods=3, freq="H")
time_strs = times.strftime("%Y-%m-%d")
return pd.DataFrame({"id": [0, 1, 2], "time": time_strs})
@pytest.fixture
def dd_datetime1(pd_datetime1):
dd = pytest.importorskip("dask.dataframe", reason="Dask not installed, skipping")
return dd.from_pandas(pd_datetime1, npartitions=2)
@pytest.fixture
def spark_datetime1(pd_datetime1):
ps = pytest.importorskip("pyspark.pandas", reason="Spark not installed, skipping")
return ps.from_pandas(pd_datetime1)
@pytest.fixture(params=["pd_datetime1", "dd_datetime1", "spark_datetime1"])
def datetime1(request):
return request.getfixturevalue(request.param)
def test_converts_datetime(datetime1):
# string converts to datetime correctly
# This test fails without defining logical types.
# Entityset infers time column should be numeric type
logical_types = {"id": Integer, "time": Datetime}
es = EntitySet(id="test")
es.add_dataframe(
dataframe_name="test_dataframe",
index="id",
time_index="time",
logical_types=logical_types,
dataframe=datetime1,
)
pd_col = to_pandas(es["test_dataframe"]["time"])
assert isinstance(es["test_dataframe"].ww.logical_types["time"], Datetime)
assert type(pd_col[0]) == pd.Timestamp
@pytest.fixture
def pd_datetime2():
datetime_format = "%d-%m-%Y"
actual = pd.Timestamp("Jan 2, 2011")
time_strs = [actual.strftime(datetime_format)] * 3
return pd.DataFrame(
{"id": [0, 1, 2], "time_format": time_strs, "time_no_format": time_strs},
)
@pytest.fixture
def dd_datetime2(pd_datetime2):
dd = pytest.importorskip("dask.dataframe", reason="Dask not installed, skipping")
return dd.from_pandas(pd_datetime2, npartitions=2)
@pytest.fixture
def spark_datetime2(pd_datetime2):
ps = pytest.importorskip("pyspark.pandas", reason="Spark not installed, skipping")
return ps.from_pandas(pd_datetime2)
@pytest.fixture(params=["pd_datetime2", "dd_datetime2", "spark_datetime2"])
def datetime2(request):
return request.getfixturevalue(request.param)
def test_handles_datetime_format(datetime2):
# check if we load according to the format string
# pass in an ambiguous date
datetime_format = "%d-%m-%Y"
actual = pd.Timestamp("Jan 2, 2011")
logical_types = {
"id": Integer,
"time_format": (Datetime(datetime_format=datetime_format)),
"time_no_format": Datetime,
}
es = EntitySet(id="test")
es.add_dataframe(
dataframe_name="test_dataframe",
index="id",
logical_types=logical_types,
dataframe=datetime2,
)
col_format = to_pandas(es["test_dataframe"]["time_format"])
col_no_format = to_pandas(es["test_dataframe"]["time_no_format"])
# without formatting pandas gets it wrong
assert (col_no_format != actual).all()
# with formatting we correctly get jan2
assert (col_format == actual).all()
def test_handles_datetime_mismatch():
# can't convert arbitrary strings
df = pd.DataFrame({"id": [0, 1, 2], "time": ["a", "b", "tomorrow"]})
logical_types = {"id": Integer, "time": Datetime}
error_text = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_text):
es = EntitySet(id="test")
es.add_dataframe(
df,
dataframe_name="test_dataframe",
index="id",
time_index="time",
logical_types=logical_types,
)
def test_dataframe_init(es):
df = pd.DataFrame(
{
"id": ["0", "1", "2"],
"time": [datetime(2011, 4, 9, 10, 31, 3 * i) for i in range(3)],
"category": ["a", "b", "a"],
"number": [4, 5, 6],
},
)
if es.dataframe_type == Library.DASK:
df = dd.from_pandas(df, npartitions=2)
elif es.dataframe_type == Library.SPARK:
df = ps.from_pandas(df)
logical_types = {"id": Categorical, "time": Datetime}
if not isinstance(df, pd.DataFrame):
extra_logical_types = {
"category": Categorical,
"number": Integer,
}
logical_types.update(extra_logical_types)
es.add_dataframe(
df.copy(),
dataframe_name="test_dataframe",
index="id",
time_index="time",
logical_types=logical_types,
)
if is_instance(df, dd, "DataFrame"):
df_shape = (df.shape[0].compute(), df.shape[1])
else:
df_shape = df.shape
if es.dataframe_type == Library.DASK:
es_df_shape = (
es["test_dataframe"].shape[0].compute(),
es["test_dataframe"].shape[1],
)
else:
es_df_shape = es["test_dataframe"].shape
assert es_df_shape == df_shape
assert es["test_dataframe"].ww.index == "id"
assert es["test_dataframe"].ww.time_index == "time"
assert set([v for v in es["test_dataframe"].ww.columns]) == set(df.columns)
assert es["test_dataframe"]["time"].dtype == df["time"].dtype
if es.dataframe_type == Library.SPARK:
assert set(es["test_dataframe"]["id"].to_list()) == set(df["id"].to_list())
else:
assert set(es["test_dataframe"]["id"]) == set(df["id"])
@pytest.fixture
def pd_bad_df():
return pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], 3: ["a", "b", "c"]})
@pytest.fixture
def dd_bad_df(pd_bad_df):
dd = pytest.importorskip("dask.dataframe", reason="Dask not installed, skipping")
return dd.from_pandas(pd_bad_df, npartitions=2)
@pytest.fixture(params=["pd_bad_df", "dd_bad_df"])
def bad_df(request):
return request.getfixturevalue(request.param)
# Skip for Spark, automatically converts non-str column names to str
def test_nonstr_column_names(bad_df):
if is_instance(bad_df, dd, "DataFrame"):
pytest.xfail("Dask DataFrames cannot handle integer column names")
es = EntitySet(id="Failure")
error_text = r"All column names must be strings \(Columns \[3\] are not strings\)"
with pytest.raises(ValueError, match=error_text):
es.add_dataframe(dataframe_name="str_cols", dataframe=bad_df, index="a")
bad_df.ww.init()
with pytest.raises(ValueError, match=error_text):
es.add_dataframe(dataframe_name="str_cols", dataframe=bad_df)
def test_sort_time_id():
transactions_df = pd.DataFrame(
{
"id": [1, 2, 3, 4, 5, 6],
"transaction_time": pd.date_range(start="10:00", periods=6, freq="10s")[
::-1
],
},
)
es = EntitySet(
"test",
dataframes={"t": (transactions_df.copy(), "id", "transaction_time")},
)
assert es["t"] is not transactions_df
times = list(es["t"].transaction_time)
assert times == sorted(list(transactions_df.transaction_time))
def test_already_sorted_parameter():
transactions_df = pd.DataFrame(
{
"id": [1, 2, 3, 4, 5, 6],
"transaction_time": [
datetime(2014, 4, 6),
datetime(2012, 4, 8),
datetime(2012, 4, 8),
datetime(2013, 4, 8),
datetime(2015, 4, 8),
datetime(2016, 4, 9),
],
},
)
es = EntitySet(id="test")
es.add_dataframe(
transactions_df.copy(),
dataframe_name="t",
index="id",
time_index="transaction_time",
already_sorted=True,
)
assert es["t"] is not transactions_df
times = list(es["t"].transaction_time)
assert times == list(transactions_df.transaction_time)
def test_concat_not_inplace(es):
first_es = copy.deepcopy(es)
for df in first_es.dataframes:
new_df = df.loc[[], :]
first_es.replace_dataframe(df.ww.name, new_df)
second_es = copy.deepcopy(es)
# set the data description
first_es.metadata
new_es = first_es.concat(second_es)
assert new_es == es
assert new_es._data_description is None
assert first_es._data_description is not None
def test_concat_inplace(es):
first_es = copy.deepcopy(es)
second_es = copy.deepcopy(es)
for df in first_es.dataframes:
new_df = df.loc[[], :]
first_es.replace_dataframe(df.ww.name, new_df)
# set the data description
es.metadata
es.concat(first_es, inplace=True)
assert second_es == es
assert es._data_description is None
def test_concat_with_lti(es):
first_es = copy.deepcopy(es)
for df in first_es.dataframes:
if first_es.dataframe_type == Library.SPARK:
# Spark cannot compute last time indexes on an empty Dataframe
new_df = df.head(1)
else:
new_df = df.loc[[], :]
first_es.replace_dataframe(df.ww.name, new_df)
second_es = copy.deepcopy(es)
first_es.add_last_time_indexes()
second_es.add_last_time_indexes()
es.add_last_time_indexes()
new_es = first_es.concat(second_es)
assert new_es == es
first_es["stores"].ww.pop(LTI_COLUMN_NAME)
first_es["stores"].ww.metadata.pop("last_time_index")
second_es["stores"].ww.pop(LTI_COLUMN_NAME)
second_es["stores"].ww.metadata.pop("last_time_index")
assert not first_es.__eq__(es, deep=False)
assert not second_es.__eq__(es, deep=False)
assert LTI_COLUMN_NAME not in first_es["stores"]
assert LTI_COLUMN_NAME not in second_es["stores"]
new_es = first_es.concat(second_es)
assert new_es.__eq__(es, deep=True)
# stores will get last time index re-added because it has children that will get lti calculated
assert LTI_COLUMN_NAME in new_es["stores"]
def test_concat_errors(es):
# entitysets are not equal
copy_es = copy.deepcopy(es)
copy_es["customers"].ww.pop("phone_number")
error = (
"Entitysets must have the same dataframes, relationships" ", and column names"
)
with pytest.raises(ValueError, match=error):
es.concat(copy_es)
def test_concat_sort_index_with_time_index(pd_es):
# only pandas dataframes sort on the index and time index
es1 = copy.deepcopy(pd_es)
es1.replace_dataframe(
dataframe_name="customers",
df=pd_es["customers"].loc[[0, 1], :],
already_sorted=True,
)
es2 = copy.deepcopy(pd_es)
es2.replace_dataframe(
dataframe_name="customers",
df=pd_es["customers"].loc[[2], :],
already_sorted=True,
)
combined_es_order_1 = es1.concat(es2)
combined_es_order_2 = es2.concat(es1)
assert list(combined_es_order_1["customers"].index) == [2, 0, 1]
assert list(combined_es_order_2["customers"].index) == [2, 0, 1]
assert combined_es_order_1.__eq__(pd_es, deep=True)
assert combined_es_order_2.__eq__(pd_es, deep=True)
assert combined_es_order_2.__eq__(combined_es_order_1, deep=True)
def test_concat_sort_index_without_time_index(pd_es):
# Sorting is only performed on DataFrames with time indices
es1 = copy.deepcopy(pd_es)
es1.replace_dataframe(
dataframe_name="products",
df=pd_es["products"].iloc[[0, 1, 2], :],
already_sorted=True,
)
es2 = copy.deepcopy(pd_es)
es2.replace_dataframe(
dataframe_name="products",
df=pd_es["products"].iloc[[3, 4, 5], :],
already_sorted=True,
)
combined_es_order_1 = es1.concat(es2)
combined_es_order_2 = es2.concat(es1)
# order matters when we don't sort
assert list(combined_es_order_1["products"].index) == [
"Haribo sugar-free gummy bears",
"car",
"toothpaste",
"brown bag",
"coke zero",
"taco clock",
]
assert list(combined_es_order_2["products"].index) == [
"brown bag",
"coke zero",
"taco clock",
"Haribo sugar-free gummy bears",
"car",
"toothpaste",
]
assert combined_es_order_1.__eq__(pd_es, deep=True)
assert not combined_es_order_2.__eq__(pd_es, deep=True)
assert combined_es_order_2.__eq__(pd_es, deep=False)
assert not combined_es_order_2.__eq__(combined_es_order_1, deep=True)
def test_concat_with_make_index(es):
df = pd.DataFrame({"id": [0, 1, 2], "category": ["a", "b", "a"]})
if es.dataframe_type == Library.DASK:
df = dd.from_pandas(df, npartitions=2)
elif es.dataframe_type == Library.SPARK:
df = ps.from_pandas(df)
logical_types = {"id": Categorical, "category": Categorical}
es.add_dataframe(
dataframe=df,
dataframe_name="test_df",
index="id1",
make_index=True,
logical_types=logical_types,
)
es_1 = copy.deepcopy(es)
es_2 = copy.deepcopy(es)
assert es.__eq__(es_1, deep=True)
assert es.__eq__(es_2, deep=True)
# map of what rows to take from es_1 and es_2 for each dataframe
emap = {
"log": [list(range(10)) + [14, 15, 16], list(range(10, 14)) + [15, 16]],
"sessions": [[0, 1, 2], [1, 3, 4, 5]],
"customers": [[0, 2], [1, 2]],
"test_df": [[0, 1], [0, 2]],
}
for i, _es in enumerate([es_1, es_2]):
for df_name, rows in emap.items():
df = _es[df_name]
_es.replace_dataframe(dataframe_name=df_name, df=df.loc[rows[i]])
assert es.__eq__(es_1, deep=False)
assert es.__eq__(es_2, deep=False)
if es.dataframe_type == Library.PANDAS:
assert not es.__eq__(es_1, deep=True)
assert not es.__eq__(es_2, deep=True)
old_es_1 = copy.deepcopy(es_1)
old_es_2 = copy.deepcopy(es_2)
es_3 = es_1.concat(es_2)
assert old_es_1.__eq__(es_1, deep=True)
assert old_es_2.__eq__(es_2, deep=True)
assert es_3.__eq__(es, deep=True)
@pytest.fixture
def pd_transactions_df():
return pd.DataFrame(
{
"id": [1, 2, 3, 4, 5, 6],
"card_id": [1, 2, 1, 3, 4, 5],
"transaction_time": [10, 12, 13, 20, 21, 20],
"fraud": [True, False, False, False, True, True],
},
)
@pytest.fixture
def dd_transactions_df(pd_transactions_df):
dd = pytest.importorskip("dask.dataframe", reason="Dask not installed, skipping")
return dd.from_pandas(pd_transactions_df, npartitions=3)
@pytest.fixture
def spark_transactions_df(pd_transactions_df):
ps = pytest.importorskip("pyspark.pandas", reason="Spark not installed, skipping")
return ps.from_pandas(pd_transactions_df)
@pytest.fixture(
params=["pd_transactions_df", "dd_transactions_df", "spark_transactions_df"],
)
def transactions_df(request):
return request.getfixturevalue(request.param)
def test_set_time_type_on_init(transactions_df):
# create cards dataframe
cards_df = pd.DataFrame({"id": [1, 2, 3, 4, 5]})
if is_instance(transactions_df, dd, "DataFrame"):
cards_df = dd.from_pandas(cards_df, npartitions=3)
if ps and isinstance(transactions_df, ps.DataFrame):
cards_df = ps.from_pandas(cards_df)
if not isinstance(transactions_df, pd.DataFrame):
cards_logical_types = {"id": Categorical}
transactions_logical_types = {
"id": Integer,
"card_id": Categorical,
"transaction_time": Integer,
"fraud": Boolean,
}
else:
cards_logical_types = None
transactions_logical_types = None
dataframes = {
"cards": (cards_df, "id", None, cards_logical_types),
"transactions": (
transactions_df,
"id",
"transaction_time",
transactions_logical_types,
),
}
relationships = [("cards", "id", "transactions", "card_id")]
es = EntitySet("fraud", dataframes, relationships)
# assert time_type is set
assert es.time_type == "numeric"
def test_sets_time_when_adding_dataframe(transactions_df):
accounts_df = pd.DataFrame(
{
"id": [3, 4, 5],
"signup_date": [
datetime(2002, 5, 1),
datetime(2006, 3, 20),
datetime(2011, 11, 11),
],
},
)
accounts_df_string = pd.DataFrame(
{"id": [3, 4, 5], "signup_date": ["element", "exporting", "editable"]},
)
if is_instance(transactions_df, dd, "DataFrame"):
accounts_df = dd.from_pandas(accounts_df, npartitions=2)
if ps and isinstance(transactions_df, ps.DataFrame):
accounts_df = ps.from_pandas(accounts_df)
if not isinstance(transactions_df, pd.DataFrame):
accounts_logical_types = {"id": Categorical, "signup_date": Datetime}
transactions_logical_types = {
"id": Integer,
"card_id": Categorical,
"transaction_time": Integer,
"fraud": Boolean,
}
else:
accounts_logical_types = None
transactions_logical_types = None
# create empty entityset
es = EntitySet("fraud")
# assert it's not set
assert getattr(es, "time_type", None) is None
# add dataframe
es.add_dataframe(
transactions_df,
dataframe_name="transactions",
index="id",
time_index="transaction_time",
logical_types=transactions_logical_types,
)
# assert time_type is set
assert es.time_type == "numeric"
# add another dataframe
es.normalize_dataframe("transactions", "cards", "card_id", make_time_index=True)
# assert time_type unchanged
assert es.time_type == "numeric"
# add wrong time type dataframe
error_text = "accounts time index is Datetime type which differs from other entityset time indexes"
with pytest.raises(TypeError, match=error_text):
es.add_dataframe(
accounts_df,
dataframe_name="accounts",
index="id",
time_index="signup_date",
logical_types=accounts_logical_types,
)
# add non time type as time index, only valid for pandas
if isinstance(transactions_df, pd.DataFrame):
error_text = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_text):
es.add_dataframe(
accounts_df_string,
dataframe_name="accounts",
index="id",
time_index="signup_date",
)
def test_secondary_time_index_no_primary_time_index(es):
es["products"].ww.set_types(logical_types={"rating": "Datetime"})
assert es["products"].ww.time_index is None
error = (
"Cannot set secondary time index on a DataFrame that has no primary time index."
)
with pytest.raises(ValueError, match=error):
es.set_secondary_time_index("products", {"rating": ["url"]})
assert "secondary_time_index" not in es["products"].ww.metadata
assert es["products"].ww.time_index is None
def test_set_non_valid_time_index_type(es):
error_text = "Time index column must be a Datetime or numeric column."
with pytest.raises(TypeError, match=error_text):
es["log"].ww.set_time_index("purchased")
def test_checks_time_type_setting_secondary_time_index(es):
# entityset is timestamp time type
assert es.time_type == Datetime
# add secondary index that is timestamp type
new_2nd_ti = {
"upgrade_date": ["upgrade_date", "favorite_quote"],
"cancel_date": ["cancel_date", "cancel_reason"],
}
es.set_secondary_time_index("customers", new_2nd_ti)
assert es.time_type == Datetime
# add secondary index that is numeric type
new_2nd_ti = {"age": ["age", "loves_ice_cream"]}
error_text = "customers time index is numeric type which differs from other entityset time indexes"
with pytest.raises(TypeError, match=error_text):
es.set_secondary_time_index("customers", new_2nd_ti)
# add secondary index that is non-time type
new_2nd_ti = {"favorite_quote": ["favorite_quote", "loves_ice_cream"]}
error_text = "customers time index not recognized as numeric or datetime"
with pytest.raises(TypeError, match=error_text):
es.set_secondary_time_index("customers", new_2nd_ti)
# add mismatched pair of secondary time indexes
new_2nd_ti = {
"upgrade_date": ["upgrade_date", "favorite_quote"],
"age": ["age", "loves_ice_cream"],
}
error_text = "customers time index is numeric type which differs from other entityset time indexes"
with pytest.raises(TypeError, match=error_text):
es.set_secondary_time_index("customers", new_2nd_ti)
# create entityset with numeric time type
cards_df = pd.DataFrame({"id": [1, 2, 3, 4, 5]})
transactions_df = pd.DataFrame(
{
"id": [1, 2, 3, 4, 5, 6],
"card_id": [1, 2, 1, 3, 4, 5],
"transaction_time": [10, 12, 13, 20, 21, 20],
"fraud_decision_time": [11, 14, 15, 21, 22, 21],
"transaction_city": ["City A"] * 6,
"transaction_date": [datetime(1989, 2, i) for i in range(1, 7)],
"fraud": [True, False, False, False, True, True],
},
)
dataframes = {
"cards": (cards_df, "id"),
"transactions": (transactions_df, "id", "transaction_time"),
}
relationships = [("cards", "id", "transactions", "card_id")]
card_es = EntitySet("fraud", dataframes, relationships)
assert card_es.time_type == "numeric"
# add secondary index that is numeric time type
new_2nd_ti = {"fraud_decision_time": ["fraud_decision_time", "fraud"]}
card_es.set_secondary_time_index("transactions", new_2nd_ti)
assert card_es.time_type == "numeric"
# add secondary index that is timestamp type
new_2nd_ti = {"transaction_date": ["transaction_date", "fraud"]}
error_text = "transactions time index is Datetime type which differs from other entityset time indexes"
with pytest.raises(TypeError, match=error_text):
card_es.set_secondary_time_index("transactions", new_2nd_ti)
# add secondary index that is non-time type
new_2nd_ti = {"transaction_city": ["transaction_city", "fraud"]}
error_text = "transactions time index not recognized as numeric or datetime"
with pytest.raises(TypeError, match=error_text):
card_es.set_secondary_time_index("transactions", new_2nd_ti)
# add mixed secondary time indexes
new_2nd_ti = {
"transaction_city": ["transaction_city", "fraud"],
"fraud_decision_time": ["fraud_decision_time", "fraud"],
}
with pytest.raises(TypeError, match=error_text):
card_es.set_secondary_time_index("transactions", new_2nd_ti)
# add bool secondary time index
error_text = "transactions time index not recognized as numeric or datetime"
with pytest.raises(TypeError, match=error_text):
card_es.set_secondary_time_index("transactions", {"fraud": ["fraud"]})
def test_normalize_dataframe(es):
error_text = "'additional_columns' must be a list, but received type.*"
with pytest.raises(TypeError, match=error_text):
es.normalize_dataframe(
"sessions",
"device_types",
"device_type",
additional_columns="log",
)
error_text = "'copy_columns' must be a list, but received type.*"
with pytest.raises(TypeError, match=error_text):
es.normalize_dataframe(
"sessions",
"device_types",
"device_type",
copy_columns="log",
)
es.normalize_dataframe(
"sessions",
"device_types",
"device_type",
additional_columns=["device_name"],
make_time_index=False,
)
assert len(es.get_forward_relationships("sessions")) == 2
assert (
es.get_forward_relationships("sessions")[1].parent_dataframe.ww.name
== "device_types"
)
assert "device_name" in es["device_types"].columns
assert "device_name" not in es["sessions"].columns
assert "device_type" in es["device_types"].columns
def test_normalize_dataframe_add_index_as_column(es):
error_text = "Not adding device_type as both index and column in additional_columns"
with pytest.raises(ValueError, match=error_text):
es.normalize_dataframe(
"sessions",
"device_types",
"device_type",
additional_columns=["device_name", "device_type"],
make_time_index=False,
)
error_text = "Not adding device_type as both index and column in copy_columns"
with pytest.raises(ValueError, match=error_text):
es.normalize_dataframe(
"sessions",
"device_types",
"device_type",
copy_columns=["device_name", "device_type"],
make_time_index=False,
)
def test_normalize_dataframe_new_time_index_in_base_dataframe_error_check(es):
error_text = "'make_time_index' must be a column in the base dataframe"
with pytest.raises(ValueError, match=error_text):
es.normalize_dataframe(
base_dataframe_name="customers",
new_dataframe_name="cancellations",
index="cancel_reason",
make_time_index="non-existent",
)
def test_normalize_dataframe_new_time_index_in_column_list_error_check(es):
error_text = (
"'make_time_index' must be specified in 'additional_columns' or 'copy_columns'"
)
with pytest.raises(ValueError, match=error_text):
es.normalize_dataframe(
base_dataframe_name="customers",
new_dataframe_name="cancellations",
index="cancel_reason",
make_time_index="cancel_date",
)
def test_normalize_dataframe_new_time_index_copy_success_check(es):
es.normalize_dataframe(
base_dataframe_name="customers",
new_dataframe_name="cancellations",
index="cancel_reason",
make_time_index="cancel_date",
additional_columns=[],
copy_columns=["cancel_date"],
)
def test_normalize_dataframe_new_time_index_additional_success_check(es):
es.normalize_dataframe(
base_dataframe_name="customers",
new_dataframe_name="cancellations",
index="cancel_reason",
make_time_index="cancel_date",
additional_columns=["cancel_date"],
copy_columns=[],
)
@pytest.fixture
def pd_normalize_es():
df = pd.DataFrame(
{
"id": [0, 1, 2, 3],
"A": [5, 4, 2, 3],
"time": [
datetime(2020, 6, 3),
(datetime(2020, 3, 12)),
datetime(2020, 5, 1),
datetime(2020, 4, 22),
],
},
)
es = EntitySet("es")
return es.add_dataframe(dataframe_name="data", dataframe=df, index="id")
@pytest.fixture
def dd_normalize_es(pd_normalize_es):
dd = pytest.importorskip("dask.dataframe", reason="Dask not installed, skipping")
es = EntitySet(id=pd_normalize_es.id)
dd_df = dd.from_pandas(pd_normalize_es["data"], npartitions=2)
dd_df.ww.init(schema=pd_normalize_es["data"].ww.schema)
es.add_dataframe(dataframe=dd_df)
return es
@pytest.fixture
def spark_normalize_es(pd_normalize_es):
ps = pytest.importorskip("pyspark.pandas", reason="Spark not installed, skipping")
es = EntitySet(id=pd_normalize_es.id)
spark_df = ps.from_pandas(pd_normalize_es["data"])
spark_df.ww.init(schema=pd_normalize_es["data"].ww.schema)
es.add_dataframe(dataframe=spark_df)
return es
@pytest.fixture(params=["pd_normalize_es", "dd_normalize_es", "spark_normalize_es"])
def normalize_es(request):
return request.getfixturevalue(request.param)
def test_normalize_time_index_from_none(normalize_es):
assert normalize_es["data"].ww.time_index is None
normalize_es.normalize_dataframe(
base_dataframe_name="data",
new_dataframe_name="normalized",
index="A",
make_time_index="time",
copy_columns=["time"],
)
assert normalize_es["normalized"].ww.time_index == "time"
df = normalize_es["normalized"]
# only pandas sorts by time index
if isinstance(df, pd.DataFrame):
assert df["time"].is_monotonic_increasing
def test_raise_error_if_dupicate_additional_columns_passed(es):
error_text = (
"'additional_columns' contains duplicate columns. All columns must be unique."
)
with pytest.raises(ValueError, match=error_text):
es.normalize_dataframe(
"sessions",
"device_types",
"device_type",
additional_columns=["device_name", "device_name"],
)
def test_raise_error_if_dupicate_copy_columns_passed(es):
error_text = (
"'copy_columns' contains duplicate columns. All columns must be unique."
)
with pytest.raises(ValueError, match=error_text):
es.normalize_dataframe(
"sessions",
"device_types",
"device_type",
copy_columns=["device_name", "device_name"],
)
def test_normalize_dataframe_copies_logical_types(es):
es["log"].ww.set_types(
logical_types={
"value": Ordinal(
order=[0.0, 1.0, 2.0, 3.0, 5.0, 7.0, 10.0, 14.0, 15.0, 20.0],
),
},
)
assert isinstance(es["log"].ww.logical_types["value"], Ordinal)
assert len(es["log"].ww.logical_types["value"].order) == 10
assert isinstance(es["log"].ww.logical_types["priority_level"], Ordinal)
assert len(es["log"].ww.logical_types["priority_level"].order) == 3
es.normalize_dataframe(
"log",
"values_2",
"value_2",
additional_columns=["priority_level"],
copy_columns=["value"],
make_time_index=False,
)
assert len(es.get_forward_relationships("log")) == 3
assert es.get_forward_relationships("log")[2].parent_dataframe.ww.name == "values_2"
assert "priority_level" in es["values_2"].columns
assert "value" in es["values_2"].columns
assert "priority_level" not in es["log"].columns
assert "value" in es["log"].columns
assert "value_2" in es["values_2"].columns
assert isinstance(es["values_2"].ww.logical_types["priority_level"], Ordinal)
assert len(es["values_2"].ww.logical_types["priority_level"].order) == 3
assert isinstance(es["values_2"].ww.logical_types["value"], Ordinal)
assert len(es["values_2"].ww.logical_types["value"].order) == 10
# sorting not supported in Dask, Spark
def test_make_time_index_keeps_original_sorting():
trips = {
"trip_id": [999 - i for i in range(1000)],
"flight_time": [datetime(1997, 4, 1) for i in range(1000)],
"flight_id": [1 for i in range(350)] + [2 for i in range(650)],
}
order = [i for i in range(1000)]
df = pd.DataFrame.from_dict(trips)
es = EntitySet("flights")
es.add_dataframe(
dataframe=df,
dataframe_name="trips",
index="trip_id",
time_index="flight_time",
)
assert (es["trips"]["trip_id"] == order).all()
es.normalize_dataframe(
base_dataframe_name="trips",
new_dataframe_name="flights",
index="flight_id",
make_time_index=True,
)
assert (es["trips"]["trip_id"] == order).all()
def test_normalize_dataframe_new_time_index(es):
new_time_index = "value_time"
es.normalize_dataframe(
"log",
"values",
"value",
make_time_index=True,
new_dataframe_time_index=new_time_index,
)
assert es["values"].ww.time_index == new_time_index
assert new_time_index in es["values"].columns
assert len(es["values"].columns) == 2
df = to_pandas(es["values"], sort_index=True)
assert df[new_time_index].is_monotonic_increasing
def test_normalize_dataframe_same_index(es):
transactions_df = pd.DataFrame(
{
"id": [1, 2, 3],
"transaction_time": pd.date_range(start="10:00", periods=3, freq="10s"),
"first_df_time": [1, 2, 3],
},
)
es = EntitySet("example")
es.add_dataframe(
dataframe_name="df",
index="id",
time_index="transaction_time",
dataframe=transactions_df,
)
error_text = "'index' must be different from the index column of the base dataframe"
with pytest.raises(ValueError, match=error_text):
es.normalize_dataframe(
base_dataframe_name="df",
new_dataframe_name="new_dataframe",
index="id",
make_time_index=True,
)
def test_secondary_time_index(es):
es.normalize_dataframe(
"log",
"values",
"value",
make_time_index=True,
make_secondary_time_index={"datetime": ["comments"]},
new_dataframe_time_index="value_time",
new_dataframe_secondary_time_index="second_ti",
)
assert isinstance(es["values"].ww.logical_types["second_ti"], Datetime)
assert es["values"].ww.semantic_tags["second_ti"] == set()
assert es["values"].ww.metadata["secondary_time_index"] == {
"second_ti": ["comments", "second_ti"],
}
def test_sizeof(es):
es.add_last_time_indexes()
total_size = 0
for df in es.dataframes:
total_size += df.__sizeof__()
assert es.__sizeof__() == total_size
def test_construct_without_id():
assert EntitySet().id is None
def test_repr_without_id():
match = "Entityset: None\n DataFrames:\n Relationships:\n No relationships"
assert repr(EntitySet()) == match
def test_getitem_without_id():
error_text = "DataFrame test does not exist in entity set"
with pytest.raises(KeyError, match=error_text):
EntitySet()["test"]
def test_metadata_without_id():
es = EntitySet()
assert es.metadata.id is None
@pytest.fixture
def pd_datetime3():
return pd.DataFrame({"id": [0, 1, 2], "ints": ["1", "2", "1"]})
@pytest.fixture
def dd_datetime3(pd_datetime3):
dd = pytest.importorskip("dask.dataframe", reason="Dask not installed, skipping")
return dd.from_pandas(pd_datetime3, npartitions=2)
@pytest.fixture
def spark_datetime3(pd_datetime3):
ps = pytest.importorskip("pyspark.pandas", reason="Spark not installed, skipping")
return ps.from_pandas(pd_datetime3)
@pytest.fixture(params=["pd_datetime3", "dd_datetime3", "spark_datetime3"])
def datetime3(request):
return request.getfixturevalue(request.param)
def test_datetime64_conversion(datetime3):
df = datetime3
df["time"] = pd.Timestamp.now()
if ps and isinstance(df, ps.DataFrame):
df["time"] = df["time"].astype(np.datetime64)
else:
df["time"] = df["time"].dt.tz_localize("UTC")
if not isinstance(df, pd.DataFrame):
logical_types = {"id": Integer, "ints": Integer, "time": Datetime}
else:
logical_types = None
es = EntitySet(id="test")
es.add_dataframe(
dataframe_name="test_dataframe",
index="id",
dataframe=df,
logical_types=logical_types,
)
es["test_dataframe"].ww.set_time_index("time")
assert es["test_dataframe"].ww.time_index == "time"
@pytest.fixture
def pd_index_df():
return pd.DataFrame(
{
"id": [1, 2, 3, 4, 5, 6],
"transaction_time": pd.date_range(start="10:00", periods=6, freq="10s"),
"first_dataframe_time": [1, 2, 3, 5, 6, 6],
},
)
@pytest.fixture
def dd_index_df(pd_index_df):
dd = pytest.importorskip("dask.dataframe", reason="Dask not installed, skipping")
return dd.from_pandas(pd_index_df, npartitions=3)
@pytest.fixture
def spark_index_df(pd_index_df):
ps = pytest.importorskip("pyspark.pandas", reason="Spark not installed, skipping")
return ps.from_pandas(pd_index_df)
@pytest.fixture(params=["pd_index_df", "dd_index_df", "spark_index_df"])
def index_df(request):
return request.getfixturevalue(request.param)
def test_same_index_values(index_df):
if not isinstance(index_df, pd.DataFrame):
logical_types = {
"id": Integer,
"transaction_time": Datetime,
"first_dataframe_time": Integer,
}
else:
logical_types = None
es = EntitySet("example")
error_text = (
'"id" is already set as the index. An index cannot also be the time index.'
)
with pytest.raises(ValueError, match=error_text):
es.add_dataframe(
dataframe_name="dataframe",
index="id",
time_index="id",
dataframe=index_df,
logical_types=logical_types,
)
es.add_dataframe(
dataframe_name="dataframe",
index="id",
time_index="transaction_time",
dataframe=index_df,
logical_types=logical_types,
)
error_text = "time_index and index cannot be the same value, first_dataframe_time"
with pytest.raises(ValueError, match=error_text):
es.normalize_dataframe(
base_dataframe_name="dataframe",
new_dataframe_name="new_dataframe",
index="first_dataframe_time",
make_time_index=True,
)
def test_use_time_index(index_df):
if not isinstance(index_df, pd.DataFrame):
bad_ltypes = {
"id": Integer,
"transaction_time": Datetime,
"first_dataframe_time": Integer,
}
bad_semantic_tags = {"transaction_time": "time_index"}
logical_types = {
"id": Integer,
"transaction_time": Datetime,
"first_dataframe_time": Integer,
}
else:
bad_ltypes = {"transaction_time": Datetime}
bad_semantic_tags = {"transaction_time": "time_index"}
logical_types = None
es = EntitySet()
error_text = re.escape(
"Cannot add 'time_index' tag directly for column transaction_time. To set a column as the time index, use DataFrame.ww.set_time_index() instead.",
)
with pytest.raises(ValueError, match=error_text):
es.add_dataframe(
dataframe_name="dataframe",
index="id",
logical_types=bad_ltypes,
semantic_tags=bad_semantic_tags,
dataframe=index_df,
)
es.add_dataframe(
dataframe_name="dataframe",
index="id",
time_index="transaction_time",
logical_types=logical_types,
dataframe=index_df,
)
def test_normalize_with_datetime_time_index(es):
es.normalize_dataframe(
base_dataframe_name="customers",
new_dataframe_name="cancel_reason",
index="cancel_reason",
make_time_index=False,
copy_columns=["signup_date", "upgrade_date"],
)
assert isinstance(es["cancel_reason"].ww.logical_types["signup_date"], Datetime)
assert isinstance(es["cancel_reason"].ww.logical_types["upgrade_date"], Datetime)
def test_normalize_with_numeric_time_index(int_es):
int_es.normalize_dataframe(
base_dataframe_name="customers",
new_dataframe_name="cancel_reason",
index="cancel_reason",
make_time_index=False,
copy_columns=["signup_date", "upgrade_date"],
)
assert int_es["cancel_reason"].ww.semantic_tags["signup_date"] == {"numeric"}
def test_normalize_with_invalid_time_index(es):
if es.dataframe_type == Library.DASK:
pytest.skip(
"Woodwork raises different error with Dask. Remove this skip once WW is updated.",
)
error_text = "Time index column must contain datetime or numeric values"
with pytest.raises(TypeError, match=error_text):
es.normalize_dataframe(
base_dataframe_name="customers",
new_dataframe_name="cancel_reason",
index="cancel_reason",
copy_columns=["upgrade_date", "favorite_quote"],
make_time_index="favorite_quote",
)
def test_entityset_init():
cards_df = pd.DataFrame({"id": [1, 2, 3, 4, 5]})
transactions_df = pd.DataFrame(
{
"id": [1, 2, 3, 4, 5, 6],
"card_id": [1, 2, 1, 3, 4, 5],
"transaction_time": [10, 12, 13, 20, 21, 20],
"upgrade_date": [51, 23, 45, 12, 22, 53],
"fraud": [True, False, False, False, True, True],
},
)
logical_types = {"fraud": "boolean", "card_id": "integer"}
dataframes = {
"cards": (cards_df.copy(), "id", None, {"id": "Integer"}),
"transactions": (
transactions_df.copy(),
"id",
"transaction_time",
logical_types,
None,
False,
),
}
relationships = [("cards", "id", "transactions", "card_id")]
es = EntitySet(id="fraud_data", dataframes=dataframes, relationships=relationships)
assert es["transactions"].ww.index == "id"
assert es["transactions"].ww.time_index == "transaction_time"
es_copy = EntitySet(id="fraud_data")
es_copy.add_dataframe(dataframe_name="cards", dataframe=cards_df.copy(), index="id")
es_copy.add_dataframe(
dataframe_name="transactions",
dataframe=transactions_df.copy(),
index="id",
logical_types=logical_types,
make_index=False,
time_index="transaction_time",
)
es_copy.add_relationship("cards", "id", "transactions", "card_id")
assert es["cards"].ww == es_copy["cards"].ww
assert es["transactions"].ww == es_copy["transactions"].ww
def test_add_interesting_values_specified_vals(es):
product_vals = ["coke zero", "taco clock"]
country_vals = ["AL", "US"]
interesting_values = {
"product_id": product_vals,
"countrycode": country_vals,
}
es.add_interesting_values(dataframe_name="log", values=interesting_values)
assert es["log"].ww["product_id"].ww.metadata["interesting_values"] == product_vals
assert es["log"].ww["countrycode"].ww.metadata["interesting_values"] == country_vals
def test_add_interesting_values_vals_specified_without_dataframe_name(es):
interesting_values = {
"countrycode": ["AL", "US"],
}
error_msg = "dataframe_name must be specified if values are provided"
with pytest.raises(ValueError, match=error_msg):
es.add_interesting_values(values=interesting_values)
def test_add_interesting_values_single_dataframe(pd_es):
pd_es.add_interesting_values(dataframe_name="log")
expected_vals = {
"zipcode": ["02116", "02116-3899", "12345-6789", "1234567890", "0"],
"countrycode": ["US", "AL", "ALB", "USA"],
"subregioncode": ["US-AZ", "US-MT", "ZM-06", "UG-219"],
"priority_level": [0, 1, 2],
}
for col in pd_es["log"].columns:
if col in expected_vals:
assert (
pd_es["log"].ww.columns[col].metadata.get("interesting_values")
== expected_vals[col]
)
else:
assert (
pd_es["log"].ww.columns[col].metadata.get("interesting_values") is None
)
def test_add_interesting_values_multiple_dataframes(pd_es):
pd_es.add_interesting_values()
expected_cols_with_vals = {
"régions": {"language"},
"stores": {},
"products": {"department"},
"customers": {"cancel_reason", "engagement_level"},
"sessions": {"device_type", "device_name"},
"log": {"zipcode", "countrycode", "subregioncode", "priority_level"},
"cohorts": {"cohort_name"},
}
for df_id, df in pd_es.dataframe_dict.items():
expected_cols = expected_cols_with_vals[df_id]
for col in df.columns:
if col in expected_cols:
assert df.ww.columns[col].metadata.get("interesting_values") is not None
else:
assert df.ww.columns[col].metadata.get("interesting_values") is None
def test_add_interesting_values_verbose_output(caplog):
es = load_retail(nrows=200)
es["order_products"].ww.set_types({"quantity": "Categorical"})
es["orders"].ww.set_types({"country": "Categorical"})
logger = logging.getLogger("featuretools")
logger.propagate = True
logger_es = logging.getLogger("featuretools.entityset")
logger_es.propagate = True
es.add_interesting_values(verbose=True, max_values=10)
logger.propagate = False
logger_es.propagate = False
assert (
"Column country: Marking United Kingdom as an interesting value" in caplog.text
)
assert "Column quantity: Marking 6 as an interesting value" in caplog.text
def test_entityset_equality(es):
first_es = EntitySet()
second_es = EntitySet()
assert first_es == second_es
first_es.add_dataframe(
dataframe_name="customers",
dataframe=es["customers"].copy(),
index="id",
time_index="signup_date",
logical_types=es["customers"].ww.logical_types,
semantic_tags=get_df_tags(es["customers"]),
)
assert first_es != second_es
second_es.add_dataframe(
dataframe_name="sessions",
dataframe=es["sessions"].copy(),
index="id",
logical_types=es["sessions"].ww.logical_types,
semantic_tags=get_df_tags(es["sessions"]),
)
assert first_es != second_es
first_es.add_dataframe(
dataframe_name="sessions",
dataframe=es["sessions"].copy(),
index="id",
logical_types=es["sessions"].ww.logical_types,
semantic_tags=get_df_tags(es["sessions"]),
)
second_es.add_dataframe(
dataframe_name="customers",
dataframe=es["customers"].copy(),
index="id",
time_index="signup_date",
logical_types=es["customers"].ww.logical_types,
semantic_tags=get_df_tags(es["customers"]),
)
assert first_es == second_es
first_es.add_relationship("customers", "id", "sessions", "customer_id")
assert first_es != second_es
assert second_es != first_es
second_es.add_relationship("customers", "id", "sessions", "customer_id")
assert first_es == second_es
def test_entityset_dataframe_dict_and_relationship_equality(es):
first_es = EntitySet()
second_es = EntitySet()
first_es.add_dataframe(
dataframe_name="sessions",
dataframe=es["sessions"].copy(),
index="id",
logical_types=es["sessions"].ww.logical_types,
semantic_tags=get_df_tags(es["sessions"]),
)
# Tests if two entity sets are not equal if they have a different
# number of dataframes attached.
# first_es has 1 dataframe, second_es has 0 dataframes attached.
assert first_es != second_es
second_es.add_dataframe(
dataframe_name="customers",
dataframe=es["customers"].copy(),
index="id",
logical_types=es["customers"].ww.logical_types,
semantic_tags=get_df_tags(es["customers"]),
)
# Tests if two entity sets are not equal if they have a different
# dataframes attached.
# first_es has the sessions dataframe attached,
# second_es has the customers dataframe attached.
assert first_es != second_es
first_es.add_dataframe(
dataframe_name="customers",
dataframe=es["customers"].copy(),
index="id",
logical_types=es["customers"].ww.logical_types,
semantic_tags=get_df_tags(es["customers"]),
)
first_es.add_dataframe(
dataframe_name="stores",
dataframe=es["stores"].copy(),
index="id",
logical_types=es["stores"].ww.logical_types,
semantic_tags=get_df_tags(es["stores"]),
)
first_es.add_dataframe(
dataframe_name="régions",
dataframe=es["régions"].copy(),
index="id",
logical_types=es["régions"].ww.logical_types,
semantic_tags=get_df_tags(es["régions"]),
)
second_es.add_dataframe(
dataframe_name="sessions",
dataframe=es["sessions"].copy(),
index="id",
logical_types=es["sessions"].ww.logical_types,
semantic_tags=get_df_tags(es["sessions"]),
)
second_es.add_dataframe(
dataframe_name="stores",
dataframe=es["stores"].copy(),
index="id",
logical_types=es["stores"].ww.logical_types,
semantic_tags=get_df_tags(es["stores"]),
)
second_es.add_dataframe(
dataframe_name="régions",
dataframe=es["régions"].copy(),
index="id",
logical_types=es["régions"].ww.logical_types,
semantic_tags=get_df_tags(es["régions"]),
)
# Now the two entity sets should be equal,
# since they have the same dataframes.
assert first_es == second_es
first_es.add_relationship("customers", "id", "sessions", "customer_id")
second_es.add_relationship("régions", "id", "stores", "région_id")
# Test if two entity sets are not equal
# if they have different relationships.
assert first_es != second_es
def test_entityset_id_equality():
first_es = EntitySet(id="first")
first_es_copy = EntitySet(id="first")
second_es = EntitySet(id="second")
assert first_es != second_es
assert first_es == first_es_copy
def test_entityset_time_type_equality():
first_es = EntitySet()
second_es = EntitySet()
assert first_es == second_es
first_es.time_type = "numeric"
assert first_es != second_es
second_es.time_type = Datetime
assert first_es != second_es
second_es.time_type = "numeric"
assert first_es == second_es
def test_entityset_deep_equality(es):
first_es = EntitySet()
second_es = EntitySet()
first_es.add_dataframe(
dataframe_name="customers",
dataframe=es["customers"].copy(),
index="id",
time_index="signup_date",
logical_types=es["customers"].ww.logical_types,
semantic_tags=get_df_tags(es["customers"]),
)
first_es.add_dataframe(
dataframe_name="sessions",
dataframe=es["sessions"].copy(),
index="id",
logical_types=es["sessions"].ww.logical_types,
semantic_tags=get_df_tags(es["sessions"]),
)
second_es.add_dataframe(
dataframe_name="sessions",
dataframe=es["sessions"].copy(),
index="id",
logical_types=es["sessions"].ww.logical_types,
semantic_tags=get_df_tags(es["sessions"]),
)
second_es.add_dataframe(
dataframe_name="customers",
dataframe=es["customers"].copy(),
index="id",
time_index="signup_date",
logical_types=es["customers"].ww.logical_types,
semantic_tags=get_df_tags(es["customers"]),
)
assert first_es.__eq__(second_es, deep=False)
assert first_es.__eq__(second_es, deep=True)
# Woodwork metadata only gets included in deep equality check
first_es["sessions"].ww.metadata["created_by"] = "user0"
assert first_es.__eq__(second_es, deep=False)
assert not first_es.__eq__(second_es, deep=True)
second_es["sessions"].ww.metadata["created_by"] = "user0"
assert first_es.__eq__(second_es, deep=False)
assert first_es.__eq__(second_es, deep=True)
updated_df = first_es["customers"].loc[[2, 0], :]
first_es.replace_dataframe("customers", updated_df)
assert first_es.__eq__(second_es, deep=False)
# Uses woodwork equality which only looks at df content for pandas
if isinstance(updated_df, pd.DataFrame):
assert not first_es.__eq__(second_es, deep=True)
else:
assert first_es.__eq__(second_es, deep=True)
@pytest.fixture(params=["make_es", "dask_es_to_copy"])
def es_to_copy(request):
return request.getfixturevalue(request.param)
@pytest.fixture
def dask_es_to_copy(make_es):
dd = pytest.importorskip("dask.dataframe", reason="Dask not installed, skipping")
es = EntitySet(id=make_es.id)
for df in make_es.dataframes:
dd_df = dd.from_pandas(df.reset_index(drop=True), npartitions=4)
dd_df.ww.init(schema=df.ww.schema)
es.add_dataframe(dd_df)
for rel in make_es.relationships:
es.add_relationship(
rel.parent_dataframe.ww.name,
rel._parent_column_name,
rel.child_dataframe.ww.name,
rel._child_column_name,
)
return es
def test_deepcopy_entityset(es_to_copy):
# Uses make_es since the es fixture uses deepcopy
copied_es = copy.deepcopy(es_to_copy)
assert copied_es == es_to_copy
assert copied_es is not es_to_copy
for df_name in es_to_copy.dataframe_dict.keys():
original_df = es_to_copy[df_name]
new_df = copied_es[df_name]
assert new_df.ww.schema == original_df.ww.schema
assert new_df.ww._schema is not original_df.ww._schema
pd.testing.assert_frame_equal(to_pandas(new_df), to_pandas(original_df))
assert new_df is not original_df
def test_deepcopy_entityset_woodwork_changes(es):
copied_es = copy.deepcopy(es)
assert copied_es == es
assert copied_es is not es
copied_es["products"].ww.add_semantic_tags({"id": "new_tag"})
assert copied_es["products"].ww.semantic_tags["id"] == {"index", "new_tag"}
assert es["products"].ww.semantic_tags["id"] == {"index"}
assert copied_es != es
def test_deepcopy_entityset_featuretools_changes(es):
copied_es = copy.deepcopy(es)
assert copied_es == es
assert copied_es is not es
copied_es.set_secondary_time_index(
"customers",
{"upgrade_date": ["engagement_level"]},
)
assert copied_es["customers"].ww.metadata["secondary_time_index"] == {
"upgrade_date": ["engagement_level", "upgrade_date"],
}
assert es["customers"].ww.metadata["secondary_time_index"] == {
"cancel_date": ["cancel_reason", "cancel_date"],
}
def test_dataframe_type_empty_es():
es = EntitySet("test")
assert es.dataframe_type is None
def test_dataframe_type_pandas_es(pd_es):
assert pd_es.dataframe_type == Library.PANDAS
def test_es__getstate__key_unique(es):
assert not hasattr(es, WW_SCHEMA_KEY)
def test_pd_es_pickling(pd_es):
pkl = pickle.dumps(pd_es)
unpickled = pickle.loads(pkl)
assert pd_es.__eq__(unpickled, deep=True)
assert not hasattr(unpickled, WW_SCHEMA_KEY)
def test_empty_es_pickling():
es = EntitySet(id="empty")
pkl = pickle.dumps(es)
unpickled = pickle.loads(pkl)
assert es.__eq__(unpickled, deep=True)
@patch("featuretools.entityset.entityset.EntitySet.add_dataframe")
def test_setitem(add_dataframe):
es = EntitySet()
df = pd.DataFrame()
es["new_df"] = df
assert add_dataframe.called
add_dataframe.assert_called_with(dataframe=df, dataframe_name="new_df")
def test_latlong_nan_normalization(latlong_df):
latlong_df.ww.init(
name="latLong",
index="idx",
logical_types={"latLong": "LatLong"},
)
dataframes = {"latLong": (latlong_df,)}
relationships = []
es = EntitySet("latlong-test", dataframes, relationships)
normalized_df = to_pandas(es["latLong"], sort_index=True)
expected_df = pd.DataFrame(
{"idx": [0, 1, 2], "latLong": [(np.nan, np.nan), (1, 2), (np.nan, np.nan)]},
)
pd.testing.assert_frame_equal(normalized_df, expected_df)
def test_latlong_nan_normalization_add_dataframe(latlong_df):
latlong_df.ww.init(
name="latLong",
index="idx",
logical_types={"latLong": "LatLong"},
)
es = EntitySet("latlong-test")
es.add_dataframe(latlong_df)
normalized_df = to_pandas(es["latLong"], sort_index=True)
expected_df = pd.DataFrame(
{"idx": [0, 1, 2], "latLong": [(np.nan, np.nan), (1, 2), (np.nan, np.nan)]},
)
pd.testing.assert_frame_equal(normalized_df, expected_df)
| 82,748 | 31.61687 | 179 | py |
featuretools | featuretools-main/featuretools/tests/entityset_tests/test_es_metadata.py | import pandas as pd
import pytest
from featuretools import EntitySet
from featuretools.tests.testing_utils import backward_path, forward_path
def test_cannot_re_add_relationships_that_already_exists(es):
before_len = len(es.relationships)
es.add_relationship(relationship=es.relationships[0])
after_len = len(es.relationships)
assert before_len == after_len
def test_add_relationships_convert_type(es):
for r in es.relationships:
assert r.parent_dataframe.ww.index == r._parent_column_name
assert "foreign_key" in r.child_column.ww.semantic_tags
assert r.child_column.ww.logical_type == r.parent_column.ww.logical_type
def test_get_forward_dataframes(es):
dataframes = es.get_forward_dataframes("log")
path_to_sessions = forward_path(es, ["log", "sessions"])
path_to_products = forward_path(es, ["log", "products"])
assert list(dataframes) == [
("sessions", path_to_sessions),
("products", path_to_products),
]
def test_get_backward_dataframes(es):
dataframes = es.get_backward_dataframes("customers")
path_to_sessions = backward_path(es, ["customers", "sessions"])
assert list(dataframes) == [("sessions", path_to_sessions)]
def test_get_forward_dataframes_deep(es):
dataframes = es.get_forward_dataframes("log", deep=True)
path_to_sessions = forward_path(es, ["log", "sessions"])
path_to_products = forward_path(es, ["log", "products"])
path_to_customers = forward_path(es, ["log", "sessions", "customers"])
path_to_regions = forward_path(es, ["log", "sessions", "customers", "régions"])
path_to_cohorts = forward_path(es, ["log", "sessions", "customers", "cohorts"])
assert list(dataframes) == [
("sessions", path_to_sessions),
("customers", path_to_customers),
("cohorts", path_to_cohorts),
("régions", path_to_regions),
("products", path_to_products),
]
def test_get_backward_dataframes_deep(es):
dataframes = es.get_backward_dataframes("customers", deep=True)
path_to_log = backward_path(es, ["customers", "sessions", "log"])
path_to_sessions = backward_path(es, ["customers", "sessions"])
assert list(dataframes) == [("sessions", path_to_sessions), ("log", path_to_log)]
def test_get_forward_relationships(es):
relationships = es.get_forward_relationships("log")
assert len(relationships) == 2
assert relationships[0]._parent_dataframe_name == "sessions"
assert relationships[0]._child_dataframe_name == "log"
assert relationships[1]._parent_dataframe_name == "products"
assert relationships[1]._child_dataframe_name == "log"
relationships = es.get_forward_relationships("sessions")
assert len(relationships) == 1
assert relationships[0]._parent_dataframe_name == "customers"
assert relationships[0]._child_dataframe_name == "sessions"
def test_get_backward_relationships(es):
relationships = es.get_backward_relationships("sessions")
assert len(relationships) == 1
assert relationships[0]._parent_dataframe_name == "sessions"
assert relationships[0]._child_dataframe_name == "log"
relationships = es.get_backward_relationships("customers")
assert len(relationships) == 1
assert relationships[0]._parent_dataframe_name == "customers"
assert relationships[0]._child_dataframe_name == "sessions"
def test_find_forward_paths(es):
paths = list(es.find_forward_paths("log", "customers"))
assert len(paths) == 1
path = paths[0]
assert len(path) == 2
assert path[0]._child_dataframe_name == "log"
assert path[0]._parent_dataframe_name == "sessions"
assert path[1]._child_dataframe_name == "sessions"
assert path[1]._parent_dataframe_name == "customers"
def test_find_forward_paths_multiple_paths(diamond_es):
paths = list(diamond_es.find_forward_paths("transactions", "regions"))
assert len(paths) == 2
path1, path2 = paths
r1, r2 = path1
assert r1._child_dataframe_name == "transactions"
assert r1._parent_dataframe_name == "stores"
assert r2._child_dataframe_name == "stores"
assert r2._parent_dataframe_name == "regions"
r1, r2 = path2
assert r1._child_dataframe_name == "transactions"
assert r1._parent_dataframe_name == "customers"
assert r2._child_dataframe_name == "customers"
assert r2._parent_dataframe_name == "regions"
def test_find_forward_paths_multiple_relationships(games_es):
paths = list(games_es.find_forward_paths("games", "teams"))
assert len(paths) == 2
path1, path2 = paths
assert len(path1) == 1
assert len(path2) == 1
r1 = path1[0]
r2 = path2[0]
assert r1._child_dataframe_name == "games"
assert r2._child_dataframe_name == "games"
assert r1._parent_dataframe_name == "teams"
assert r2._parent_dataframe_name == "teams"
assert r1._child_column_name == "home_team_id"
assert r2._child_column_name == "away_team_id"
assert r1._parent_column_name == "id"
assert r2._parent_column_name == "id"
@pytest.fixture
def pd_employee_df():
return pd.DataFrame({"id": [0], "manager_id": [0]})
@pytest.fixture
def dd_employee_df(pd_employee_df):
dd = pytest.importorskip("dask.dataframe", reason="Dask not installed, skipping")
return dd.from_pandas(pd_employee_df, npartitions=2)
@pytest.fixture
def spark_employee_df(pd_employee_df):
ps = pytest.importorskip("pyspark.pandas", reason="Spark not installed, skipping")
return ps.from_pandas(pd_employee_df)
@pytest.fixture(params=["pd_employee_df", "dd_employee_df", "spark_employee_df"])
def employee_df(request):
return request.getfixturevalue(request.param)
def test_find_forward_paths_ignores_loops(employee_df):
dataframes = {"employees": (employee_df, "id")}
relationships = [("employees", "id", "employees", "manager_id")]
es = EntitySet(dataframes=dataframes, relationships=relationships)
paths = list(es.find_forward_paths("employees", "employees"))
assert len(paths) == 1
assert paths[0] == []
def test_find_backward_paths(es):
paths = list(es.find_backward_paths("customers", "log"))
assert len(paths) == 1
path = paths[0]
assert len(path) == 2
assert path[0]._child_dataframe_name == "sessions"
assert path[0]._parent_dataframe_name == "customers"
assert path[1]._child_dataframe_name == "log"
assert path[1]._parent_dataframe_name == "sessions"
def test_find_backward_paths_multiple_paths(diamond_es):
paths = list(diamond_es.find_backward_paths("regions", "transactions"))
assert len(paths) == 2
path1, path2 = paths
r1, r2 = path1
assert r1._child_dataframe_name == "stores"
assert r1._parent_dataframe_name == "regions"
assert r2._child_dataframe_name == "transactions"
assert r2._parent_dataframe_name == "stores"
r1, r2 = path2
assert r1._child_dataframe_name == "customers"
assert r1._parent_dataframe_name == "regions"
assert r2._child_dataframe_name == "transactions"
assert r2._parent_dataframe_name == "customers"
def test_find_backward_paths_multiple_relationships(games_es):
paths = list(games_es.find_backward_paths("teams", "games"))
assert len(paths) == 2
path1, path2 = paths
assert len(path1) == 1
assert len(path2) == 1
r1 = path1[0]
r2 = path2[0]
assert r1._child_dataframe_name == "games"
assert r2._child_dataframe_name == "games"
assert r1._parent_dataframe_name == "teams"
assert r2._parent_dataframe_name == "teams"
assert r1._child_column_name == "home_team_id"
assert r2._child_column_name == "away_team_id"
assert r1._parent_column_name == "id"
assert r2._parent_column_name == "id"
def test_has_unique_path(diamond_es):
assert diamond_es.has_unique_forward_path("customers", "regions")
assert not diamond_es.has_unique_forward_path("transactions", "regions")
def test_raise_key_error_missing_dataframe(es):
error_text = "DataFrame testing does not exist in ecommerce"
with pytest.raises(KeyError, match=error_text):
es["testing"]
es_without_id = EntitySet()
error_text = "DataFrame testing does not exist in entity set"
with pytest.raises(KeyError, match=error_text):
es_without_id["testing"]
def test_add_parent_not_index_column(es):
error_text = "Parent column 'language' is not the index of dataframe régions"
with pytest.raises(AttributeError, match=error_text):
es.add_relationship("régions", "language", "customers", "région_id")
| 8,550 | 33.902041 | 86 | py |
featuretools | featuretools-main/featuretools/tests/entityset_tests/test_relationship.py | from featuretools.entityset.relationship import Relationship, RelationshipPath
def test_relationship_path(es):
log_to_sessions = Relationship(es, "sessions", "id", "log", "session_id")
sessions_to_customers = Relationship(
es,
"customers",
"id",
"sessions",
"customer_id",
)
path_list = [
(True, log_to_sessions),
(True, sessions_to_customers),
(False, sessions_to_customers),
]
path = RelationshipPath(path_list)
for i, edge in enumerate(path_list):
assert path[i] == edge
assert [edge for edge in path] == path_list
def test_relationship_path_name(es):
assert RelationshipPath([]).name == ""
log_to_sessions = Relationship(es, "sessions", "id", "log", "session_id")
sessions_to_customers = Relationship(
es,
"customers",
"id",
"sessions",
"customer_id",
)
forward_path = [(True, log_to_sessions), (True, sessions_to_customers)]
assert RelationshipPath(forward_path).name == "sessions.customers"
backward_path = [(False, sessions_to_customers), (False, log_to_sessions)]
assert RelationshipPath(backward_path).name == "sessions.log"
mixed_path = [(True, log_to_sessions), (False, log_to_sessions)]
assert RelationshipPath(mixed_path).name == "sessions.log"
def test_relationship_path_dataframes(es):
assert list(RelationshipPath([]).dataframes()) == []
log_to_sessions = Relationship(es, "sessions", "id", "log", "session_id")
sessions_to_customers = Relationship(
es,
"customers",
"id",
"sessions",
"customer_id",
)
forward_path = [(True, log_to_sessions), (True, sessions_to_customers)]
assert list(RelationshipPath(forward_path).dataframes()) == [
"log",
"sessions",
"customers",
]
backward_path = [(False, sessions_to_customers), (False, log_to_sessions)]
assert list(RelationshipPath(backward_path).dataframes()) == [
"customers",
"sessions",
"log",
]
mixed_path = [(True, log_to_sessions), (False, log_to_sessions)]
assert list(RelationshipPath(mixed_path).dataframes()) == ["log", "sessions", "log"]
def test_names_when_multiple_relationships_between_dataframes(games_es):
relationship = Relationship(games_es, "teams", "id", "games", "home_team_id")
assert relationship.child_name == "games[home_team_id]"
assert relationship.parent_name == "teams[home_team_id]"
def test_names_when_no_other_relationship_between_dataframes(home_games_es):
relationship = Relationship(home_games_es, "teams", "id", "games", "home_team_id")
assert relationship.child_name == "games"
assert relationship.parent_name == "teams"
def test_relationship_serialization(es):
relationship = Relationship(es, "sessions", "id", "log", "session_id")
dictionary = {
"parent_dataframe_name": "sessions",
"parent_column_name": "id",
"child_dataframe_name": "log",
"child_column_name": "session_id",
}
assert relationship.to_dictionary() == dictionary
assert Relationship.from_dictionary(dictionary, es) == relationship
| 3,214 | 30.831683 | 88 | py |
featuretools | featuretools-main/featuretools/tests/entityset_tests/test_spark_es.py | import pandas as pd
import pytest
from woodwork.logical_types import Datetime, Double, Integer, NaturalLanguage
from featuretools.entityset import EntitySet
from featuretools.tests.testing_utils import get_df_tags
from featuretools.utils.gen_utils import Library, import_or_none
from featuretools.utils.spark_utils import pd_to_spark_clean
ps = import_or_none("pyspark.pandas")
@pytest.mark.skipif("not ps")
def test_add_dataframe_from_spark_df(pd_es):
cleaned_df = pd_to_spark_clean(pd_es["log"])
log_spark = ps.from_pandas(cleaned_df)
spark_es = EntitySet(id="spark_es")
spark_es = spark_es.add_dataframe(
dataframe_name="log_spark",
dataframe=log_spark,
index="id",
time_index="datetime",
logical_types=pd_es["log"].ww.logical_types,
semantic_tags=get_df_tags(pd_es["log"]),
)
pd.testing.assert_frame_equal(
cleaned_df,
spark_es["log_spark"].to_pandas(),
check_like=True,
)
@pytest.mark.skipif("not ps")
def test_add_dataframe_with_non_numeric_index(pd_es, spark_es):
df = pd.DataFrame(
{
"id": pd.Series(["A_1", "A_2", "C", "D"], dtype="string"),
"values": [1, 12, -34, 27],
},
)
spark_df = ps.from_pandas(df)
pd_es.add_dataframe(
dataframe_name="new_dataframe",
dataframe=df,
index="id",
logical_types={"id": NaturalLanguage, "values": Integer},
)
spark_es.add_dataframe(
dataframe_name="new_dataframe",
dataframe=spark_df,
index="id",
logical_types={"id": NaturalLanguage, "values": Integer},
)
pd.testing.assert_frame_equal(
pd_es["new_dataframe"].reset_index(drop=True),
spark_es["new_dataframe"].to_pandas(),
)
@pytest.mark.skipif("not ps")
def test_create_entityset_with_mixed_dataframe_types(pd_es, spark_es):
df = pd.DataFrame({"id": [0, 1, 2, 3], "values": [1, 12, -34, 27]})
spark_df = ps.from_pandas(df)
err_msg = (
"All dataframes must be of the same type. "
"Cannot add dataframe of type {} to an entityset with existing dataframes "
"of type {}"
)
# Test error is raised when trying to add Spark dataframe to entitset with existing pandas dataframes
with pytest.raises(
ValueError,
match=err_msg.format(type(spark_df), type(pd_es.dataframes[0])),
):
pd_es.add_dataframe(
dataframe_name="new_dataframe",
dataframe=spark_df,
index="id",
)
# Test error is raised when trying to add pandas dataframe to entitset with existing ps dataframes
with pytest.raises(
ValueError,
match=err_msg.format(type(df), type(spark_es.dataframes[0])),
):
spark_es.add_dataframe(dataframe_name="new_dataframe", dataframe=df, index="id")
@pytest.mark.skipif("not ps")
def test_add_last_time_indexes():
pd_es = EntitySet(id="pd_es")
spark_es = EntitySet(id="spark_es")
sessions = pd.DataFrame(
{
"id": [0, 1, 2, 3],
"user": [1, 2, 1, 3],
"time": [
pd.to_datetime("2019-01-10"),
pd.to_datetime("2019-02-03"),
pd.to_datetime("2019-01-01"),
pd.to_datetime("2017-08-25"),
],
"strings": ["I am a string", "23", "abcdef ghijk", ""],
},
)
sessions_spark = ps.from_pandas(sessions)
sessions_logical_types = {
"id": Integer,
"user": Integer,
"strings": NaturalLanguage,
"time": Datetime,
}
transactions = pd.DataFrame(
{
"id": [0, 1, 2, 3, 4, 5],
"session_id": [0, 0, 1, 2, 2, 3],
"amount": [1.23, 5.24, 123.52, 67.93, 40.34, 50.13],
"time": [
pd.to_datetime("2019-01-10 03:53"),
pd.to_datetime("2019-01-10 04:12"),
pd.to_datetime("2019-02-03 10:34"),
pd.to_datetime("2019-01-01 12:35"),
pd.to_datetime("2019-01-01 12:49"),
pd.to_datetime("2017-08-25 04:53"),
],
},
)
transactions_spark = ps.from_pandas(transactions)
transactions_logical_types = {
"id": Integer,
"session_id": Integer,
"amount": Double,
"time": Datetime,
}
pd_es.add_dataframe(
dataframe_name="sessions",
dataframe=sessions,
index="id",
time_index="time",
)
spark_es.add_dataframe(
dataframe_name="sessions",
dataframe=sessions_spark,
index="id",
time_index="time",
logical_types=sessions_logical_types,
)
pd_es.add_dataframe(
dataframe_name="transactions",
dataframe=transactions,
index="id",
time_index="time",
)
spark_es.add_dataframe(
dataframe_name="transactions",
dataframe=transactions_spark,
index="id",
time_index="time",
logical_types=transactions_logical_types,
)
pd_es = pd_es.add_relationship("sessions", "id", "transactions", "session_id")
spark_es = spark_es.add_relationship("sessions", "id", "transactions", "session_id")
assert "foreign_key" in pd_es["transactions"].ww.semantic_tags["session_id"]
assert "foreign_key" in spark_es["transactions"].ww.semantic_tags["session_id"]
assert pd_es["sessions"].ww.metadata.get("last_time_index") is None
assert spark_es["sessions"].ww.metadata.get("last_time_index") is None
pd_es.add_last_time_indexes()
spark_es.add_last_time_indexes()
pd_lti_name = pd_es["sessions"].ww.metadata.get("last_time_index")
spark_lti_name = spark_es["sessions"].ww.metadata.get("last_time_index")
assert pd_lti_name == spark_lti_name
pd.testing.assert_series_equal(
pd_es["sessions"][pd_lti_name].sort_index(),
spark_es["sessions"][spark_lti_name].to_pandas().sort_index(),
check_names=False,
)
@pytest.mark.skipif("not ps")
def test_add_dataframe_with_make_index():
values = [1, 12, -23, 27]
df = pd.DataFrame({"values": values})
spark_df = ps.from_pandas(df)
spark_es = EntitySet(id="spark_es")
ltypes = {"values": "Integer"}
spark_es.add_dataframe(
dataframe_name="new_dataframe",
dataframe=spark_df,
make_index=True,
index="new_index",
logical_types=ltypes,
)
expected_df = pd.DataFrame({"values": values, "new_index": range(len(values))})
pd.testing.assert_frame_equal(expected_df, spark_es["new_dataframe"].to_pandas())
@pytest.mark.skipif("not ps")
def test_dataframe_type_spark(spark_es):
assert spark_es.dataframe_type == Library.SPARK
| 6,736 | 30.481308 | 105 | py |
featuretools | featuretools-main/featuretools/tests/entityset_tests/__init__.py | 0 | 0 | 0 | py | |
featuretools | featuretools-main/featuretools/tests/entityset_tests/test_serialization.py | import json
import logging
import os
import tempfile
from unittest.mock import patch
from urllib.request import urlretrieve
import boto3
import pandas as pd
import pytest
import woodwork.type_sys.type_system as ww_type_system
from woodwork.logical_types import Datetime, LogicalType, Ordinal
from woodwork.serializers.serializer_base import typing_info_to_dict
from woodwork.type_sys.utils import list_logical_types
from featuretools.entityset import EntitySet, deserialize, serialize
from featuretools.tests.testing_utils import to_pandas
from featuretools.utils.gen_utils import Library
from featuretools.version import ENTITYSET_SCHEMA_VERSION
BUCKET_NAME = "test-bucket"
WRITE_KEY_NAME = "test-key"
TEST_S3_URL = "s3://{}/{}".format(BUCKET_NAME, WRITE_KEY_NAME)
TEST_FILE = "test_serialization_data_entityset_schema_{}_2022_09_02.tar".format(
ENTITYSET_SCHEMA_VERSION,
)
S3_URL = "s3://featuretools-static/" + TEST_FILE
URL = "https://featuretools-static.s3.amazonaws.com/" + TEST_FILE
TEST_KEY = "test_access_key_es"
def test_entityset_description(es):
description = serialize.entityset_to_description(es)
_es = deserialize.description_to_entityset(description)
assert es.metadata.__eq__(_es, deep=True)
def test_all_ww_logical_types():
logical_types = list_logical_types()["type_string"].to_list()
dataframe = pd.DataFrame(columns=logical_types)
es = EntitySet()
ltype_dict = {ltype: ltype for ltype in logical_types}
ltype_dict["ordinal"] = Ordinal(order=[])
es.add_dataframe(
dataframe=dataframe,
dataframe_name="all_types",
index="integer",
logical_types=ltype_dict,
)
description = serialize.entityset_to_description(es)
_es = deserialize.description_to_entityset(description)
assert es.__eq__(_es, deep=True)
def test_with_custom_ww_logical_type():
class CustomLogicalType(LogicalType):
pass
ww_type_system.add_type(CustomLogicalType)
columns = ["integer", "natural_language", "custom_logical_type"]
dataframe = pd.DataFrame(columns=columns)
es = EntitySet()
ltype_dict = {
"integer": "integer",
"natural_language": "natural_language",
"custom_logical_type": CustomLogicalType,
}
es.add_dataframe(
dataframe=dataframe,
dataframe_name="custom_type",
index="integer",
logical_types=ltype_dict,
)
description = serialize.entityset_to_description(es)
_es = deserialize.description_to_entityset(description)
assert isinstance(
_es["custom_type"].ww.logical_types["custom_logical_type"],
CustomLogicalType,
)
assert es.__eq__(_es, deep=True)
def test_serialize_invalid_formats(es, tmp_path):
error_text = "must be one of the following formats: {}"
error_text = error_text.format(", ".join(serialize.FORMATS))
with pytest.raises(ValueError, match=error_text):
serialize.write_data_description(es, path=str(tmp_path), format="")
def test_empty_dataframe(es):
for df in es.dataframes:
description = typing_info_to_dict(df)
dataframe = deserialize.empty_dataframe(description)
assert dataframe.empty
assert all(dataframe.columns == df.columns)
def test_to_csv(es, tmp_path):
es.to_csv(str(tmp_path), encoding="utf-8", engine="python")
new_es = deserialize.read_entityset(str(tmp_path))
assert es.__eq__(new_es, deep=True)
df = to_pandas(es["log"], index="id")
new_df = to_pandas(new_es["log"], index="id")
assert type(df["latlong"][0]) in (tuple, list)
assert type(new_df["latlong"][0]) in (tuple, list)
# Dask/Spark don't support auto setting of interesting values with es.add_interesting_values()
def test_to_csv_interesting_values(pd_es, tmp_path):
pd_es.add_interesting_values()
pd_es.to_csv(str(tmp_path))
new_es = deserialize.read_entityset(str(tmp_path))
assert pd_es.__eq__(new_es, deep=True)
def test_to_csv_manual_interesting_values(es, tmp_path):
es.add_interesting_values(
dataframe_name="log",
values={"product_id": ["coke_zero"]},
)
es.to_csv(str(tmp_path))
new_es = deserialize.read_entityset(str(tmp_path))
assert es.__eq__(new_es, deep=True)
assert new_es["log"].ww["product_id"].ww.metadata["interesting_values"] == [
"coke_zero",
]
# Dask/Spark do not support to_pickle
def test_to_pickle(pd_es, tmp_path):
pd_es.to_pickle(str(tmp_path))
new_es = deserialize.read_entityset(str(tmp_path))
assert pd_es.__eq__(new_es, deep=True)
assert type(pd_es["log"]["latlong"][0]) == tuple
assert type(new_es["log"]["latlong"][0]) == tuple
def test_to_pickle_errors_dask(dask_es, tmp_path):
msg = "DataFrame type not compatible with pickle serialization. Please serialize to another format."
with pytest.raises(ValueError, match=msg):
dask_es.to_pickle(str(tmp_path))
def test_to_pickle_errors_spark(spark_es, tmp_path):
msg = "DataFrame type not compatible with pickle serialization. Please serialize to another format."
with pytest.raises(ValueError, match=msg):
spark_es.to_pickle(str(tmp_path))
# Dask/Spark do not support to_pickle
def test_to_pickle_interesting_values(pd_es, tmp_path):
pd_es.add_interesting_values()
pd_es.to_pickle(str(tmp_path))
new_es = deserialize.read_entityset(str(tmp_path))
assert pd_es.__eq__(new_es, deep=True)
# Dask/Spark do not support to_pickle
def test_to_pickle_manual_interesting_values(pd_es, tmp_path):
pd_es.add_interesting_values(
dataframe_name="log",
values={"product_id": ["coke_zero"]},
)
pd_es.to_pickle(str(tmp_path))
new_es = deserialize.read_entityset(str(tmp_path))
assert pd_es.__eq__(new_es, deep=True)
assert new_es["log"].ww["product_id"].ww.metadata["interesting_values"] == [
"coke_zero",
]
def test_to_parquet(es, tmp_path):
es.to_parquet(str(tmp_path))
new_es = deserialize.read_entityset(str(tmp_path))
assert es.__eq__(new_es, deep=True)
df = to_pandas(es["log"])
new_df = to_pandas(new_es["log"])
assert type(df["latlong"][0]) in (tuple, list)
assert type(new_df["latlong"][0]) in (tuple, list)
def test_to_parquet_manual_interesting_values(es, tmp_path):
es.add_interesting_values(
dataframe_name="log",
values={"product_id": ["coke_zero"]},
)
es.to_parquet(str(tmp_path))
new_es = deserialize.read_entityset(str(tmp_path))
assert es.__eq__(new_es, deep=True)
assert new_es["log"].ww["product_id"].ww.metadata["interesting_values"] == [
"coke_zero",
]
# Dask/Spark don't support auto setting of interesting values with es.add_interesting_values()
def test_to_parquet_interesting_values(pd_es, tmp_path):
pd_es.add_interesting_values()
pd_es.to_parquet(str(tmp_path))
new_es = deserialize.read_entityset(str(tmp_path))
assert pd_es.__eq__(new_es, deep=True)
def test_to_parquet_with_lti(tmp_path, pd_mock_customer):
es = pd_mock_customer
es.to_parquet(str(tmp_path))
new_es = deserialize.read_entityset(str(tmp_path))
assert es.__eq__(new_es, deep=True)
def test_to_pickle_id_none(tmp_path):
es = EntitySet()
es.to_pickle(str(tmp_path))
new_es = deserialize.read_entityset(str(tmp_path))
assert es.__eq__(new_es, deep=True)
# TODO: Fix Moto tests needing to explicitly set permissions for objects
@pytest.fixture
def s3_client():
_environ = os.environ.copy()
from moto import mock_s3
with mock_s3():
s3 = boto3.resource("s3")
yield s3
os.environ.clear()
os.environ.update(_environ)
@pytest.fixture
def s3_bucket(s3_client, region="us-east-2"):
location = {"LocationConstraint": region}
s3_client.create_bucket(
Bucket=BUCKET_NAME,
ACL="public-read-write",
CreateBucketConfiguration=location,
)
s3_bucket = s3_client.Bucket(BUCKET_NAME)
yield s3_bucket
def make_public(s3_client, s3_bucket):
obj = list(s3_bucket.objects.all())[0].key
s3_client.ObjectAcl(BUCKET_NAME, obj).put(ACL="public-read-write")
# TODO: tmp file disappears after deserialize step, cannot check equality with Dask, Spark
@pytest.mark.parametrize("profile_name", [None, False])
def test_serialize_s3_csv(es, s3_client, s3_bucket, profile_name):
if es.dataframe_type != Library.PANDAS:
pytest.xfail(
"tmp file disappears after deserialize step, cannot check equality with Dask",
)
es.to_csv(TEST_S3_URL, encoding="utf-8", engine="python", profile_name=profile_name)
make_public(s3_client, s3_bucket)
new_es = deserialize.read_entityset(TEST_S3_URL, profile_name=profile_name)
assert es.__eq__(new_es, deep=True)
# Dask and Spark do not support to_pickle
@pytest.mark.parametrize("profile_name", [None, False])
def test_serialize_s3_pickle(pd_es, s3_client, s3_bucket, profile_name):
pd_es.to_pickle(TEST_S3_URL, profile_name=profile_name)
make_public(s3_client, s3_bucket)
new_es = deserialize.read_entityset(TEST_S3_URL, profile_name=profile_name)
assert pd_es.__eq__(new_es, deep=True)
# TODO: tmp file disappears after deserialize step, cannot check equality with Dask, Spark
@pytest.mark.parametrize("profile_name", [None, False])
def test_serialize_s3_parquet(es, s3_client, s3_bucket, profile_name):
if es.dataframe_type != Library.PANDAS:
pytest.xfail(
"tmp file disappears after deserialize step, cannot check equality with Dask or Spark",
)
es.to_parquet(TEST_S3_URL, profile_name=profile_name)
make_public(s3_client, s3_bucket)
new_es = deserialize.read_entityset(TEST_S3_URL, profile_name=profile_name)
assert es.__eq__(new_es, deep=True)
def test_s3_test_profile(es, s3_client, s3_bucket, setup_test_profile):
if es.dataframe_type != Library.PANDAS:
pytest.xfail(
"tmp file disappears after deserialize step, cannot check equality with Dask",
)
es.to_csv(TEST_S3_URL, encoding="utf-8", engine="python", profile_name="test")
make_public(s3_client, s3_bucket)
new_es = deserialize.read_entityset(TEST_S3_URL, profile_name="test")
assert es.__eq__(new_es, deep=True)
def test_serialize_url_csv(es):
error_text = "Writing to URLs is not supported"
with pytest.raises(ValueError, match=error_text):
es.to_csv(URL, encoding="utf-8", engine="python")
def test_serialize_subdirs_not_removed(es, tmp_path):
write_path = tmp_path.joinpath("test")
write_path.mkdir()
test_dir = write_path.joinpath("test_dir")
test_dir.mkdir()
description_path = write_path.joinpath("data_description.json")
with open(description_path, "w") as f:
json.dump("__SAMPLE_TEXT__", f)
if es.dataframe_type == Library.SPARK:
compression = "none"
else:
compression = None
serialize.write_data_description(
es,
path=str(write_path),
index="1",
sep="\t",
encoding="utf-8",
compression=compression,
)
assert os.path.exists(str(test_dir))
with open(description_path, "r") as f:
assert "__SAMPLE_TEXT__" not in json.load(f)
def test_deserialize_local_tar(es):
with tempfile.TemporaryDirectory() as tmp_path:
temp_tar_filepath = os.path.join(tmp_path, TEST_FILE)
urlretrieve(URL, filename=temp_tar_filepath)
new_es = deserialize.read_entityset(temp_tar_filepath)
assert es.__eq__(new_es, deep=True)
def test_deserialize_url_csv(es):
new_es = deserialize.read_entityset(URL)
assert es.__eq__(new_es, deep=True)
def test_deserialize_s3_csv(es):
new_es = deserialize.read_entityset(S3_URL, profile_name=False)
assert es.__eq__(new_es, deep=True)
def test_operations_invalidate_metadata(es):
new_es = EntitySet(id="test")
# test metadata gets created on access
assert new_es._data_description is None
assert new_es.metadata is not None # generated after access
assert new_es._data_description is not None
if not isinstance(es["customers"], pd.DataFrame):
customers_ltypes = es["customers"].ww.logical_types
customers_ltypes["signup_date"] = Datetime
else:
customers_ltypes = None
new_es.add_dataframe(
es["customers"],
"customers",
index=es["customers"].index,
logical_types=customers_ltypes,
)
if not isinstance(es["sessions"], pd.DataFrame):
sessions_ltypes = es["sessions"].ww.logical_types
else:
sessions_ltypes = None
new_es.add_dataframe(
es["sessions"],
"sessions",
index=es["sessions"].index,
logical_types=sessions_ltypes,
)
assert new_es._data_description is None
assert new_es.metadata is not None
assert new_es._data_description is not None
new_es = new_es.add_relationship("customers", "id", "sessions", "customer_id")
assert new_es._data_description is None
assert new_es.metadata is not None
assert new_es._data_description is not None
new_es = new_es.normalize_dataframe("customers", "cohort", "cohort")
assert new_es._data_description is None
assert new_es.metadata is not None
assert new_es._data_description is not None
new_es.add_last_time_indexes()
assert new_es._data_description is None
assert new_es.metadata is not None
assert new_es._data_description is not None
# automatically adding interesting values not supported in Dask or Spark
if new_es.dataframe_type == Library.PANDAS:
new_es.add_interesting_values()
assert new_es._data_description is None
assert new_es.metadata is not None
assert new_es._data_description is not None
def test_reset_metadata(es):
assert es.metadata is not None
assert es._data_description is not None
es.reset_data_description()
assert es._data_description is None
@patch("featuretools.utils.schema_utils.ENTITYSET_SCHEMA_VERSION", "1.1.1")
@pytest.mark.parametrize(
"hardcoded_schema_version, warns",
[("2.1.1", True), ("1.2.1", True), ("1.1.2", True), ("1.0.2", False)],
)
def test_later_schema_version(es, caplog, hardcoded_schema_version, warns):
def test_version(version, warns):
if warns:
warning_text = (
"The schema version of the saved entityset"
"(%s) is greater than the latest supported (%s). "
"You may need to upgrade featuretools. Attempting to load entityset ..."
% (version, "1.1.1")
)
else:
warning_text = None
_check_schema_version(version, es, warning_text, caplog, "warn")
test_version(hardcoded_schema_version, warns)
@patch("featuretools.utils.schema_utils.ENTITYSET_SCHEMA_VERSION", "1.1.1")
@pytest.mark.parametrize(
"hardcoded_schema_version, warns",
[("0.1.1", True), ("1.0.1", False), ("1.1.0", False)],
)
def test_earlier_schema_version(
es,
caplog,
monkeypatch,
hardcoded_schema_version,
warns,
):
def test_version(version, warns):
if warns:
warning_text = (
"The schema version of the saved entityset"
"(%s) is no longer supported by this version "
"of featuretools. Attempting to load entityset ..." % version
)
else:
warning_text = None
_check_schema_version(version, es, warning_text, caplog, "log")
test_version(hardcoded_schema_version, warns)
def _check_schema_version(version, es, warning_text, caplog, warning_type=None):
dataframes = {
dataframe.ww.name: typing_info_to_dict(dataframe) for dataframe in es.dataframes
}
relationships = [relationship.to_dictionary() for relationship in es.relationships]
dictionary = {
"schema_version": version,
"id": es.id,
"dataframes": dataframes,
"relationships": relationships,
"data_type": es.dataframe_type,
}
if warning_type == "warn" and warning_text:
with pytest.warns(UserWarning) as record:
deserialize.description_to_entityset(dictionary)
assert record[0].message.args[0] == warning_text
elif warning_type == "log":
logger = logging.getLogger("featuretools")
logger.propagate = True
deserialize.description_to_entityset(dictionary)
if warning_text:
assert warning_text in caplog.text
else:
assert not len(caplog.text)
logger.propagate = False
| 16,578 | 33.611691 | 104 | py |
featuretools | featuretools-main/featuretools/tests/entityset_tests/test_ww_es.py | from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from woodwork.exceptions import TypeConversionError
from woodwork.logical_types import (
Boolean,
Categorical,
Datetime,
Double,
Integer,
NaturalLanguage,
)
from featuretools.entityset.entityset import LTI_COLUMN_NAME, EntitySet
from featuretools.tests.testing_utils import to_pandas
from featuretools.utils.gen_utils import Library, import_or_none, is_instance
dd = import_or_none("dask.dataframe")
ps = import_or_none("pyspark.pandas")
def test_empty_es():
es = EntitySet("es")
assert es.id == "es"
assert es.dataframe_dict == {}
assert es.relationships == []
assert es.time_type is None
@pytest.fixture
def pd_df():
return pd.DataFrame({"id": [0, 1, 2], "category": ["a", "b", "c"]}).astype(
{"category": "category"},
)
@pytest.fixture
def dd_df(pd_df):
dd = pytest.importorskip("dask.dataframe", reason="Dask not installed, skipping")
return dd.from_pandas(pd_df, npartitions=2)
@pytest.fixture
def spark_df(pd_df):
ps = pytest.importorskip("pyspark.pandas", reason="Spark not installed, skipping")
return ps.from_pandas(pd_df)
@pytest.fixture(params=["pd_df", "dd_df", "spark_df"])
def df(request):
return request.getfixturevalue(request.param)
def test_init_es_with_dataframe(df):
es = EntitySet("es", dataframes={"table": (df, "id")})
assert es.id == "es"
assert len(es.dataframe_dict) == 1
assert es["table"] is df
assert es["table"].ww.schema is not None
assert isinstance(es["table"].ww.logical_types["id"], Integer)
assert isinstance(es["table"].ww.logical_types["category"], Categorical)
def test_init_es_with_woodwork_table_same_name(df):
df.ww.init(index="id", name="table")
es = EntitySet("es", dataframes={"table": (df,)})
assert es.id == "es"
assert len(es.dataframe_dict) == 1
assert es["table"] is df
assert es["table"].ww.schema is not None
assert es["table"].ww.index == "id"
assert es["table"].ww.time_index is None
assert isinstance(es["table"].ww.logical_types["id"], Integer)
assert isinstance(es["table"].ww.logical_types["category"], Categorical)
def test_init_es_with_woodwork_table_diff_name_error(df):
df.ww.init(index="id", name="table")
error = "Naming conflict in dataframes dictionary: dictionary key 'diff_name' does not match dataframe name 'table'"
with pytest.raises(ValueError, match=error):
EntitySet("es", dataframes={"diff_name": (df,)})
def test_init_es_with_dataframe_and_params(df):
logical_types = {"id": "NaturalLanguage", "category": NaturalLanguage}
semantic_tags = {"category": "new_tag"}
es = EntitySet(
"es",
dataframes={"table": (df, "id", None, logical_types, semantic_tags)},
)
assert es.id == "es"
assert len(es.dataframe_dict) == 1
assert es["table"] is df
assert es["table"].ww.schema is not None
assert es["table"].ww.index == "id"
assert es["table"].ww.time_index is None
assert isinstance(es["table"].ww.logical_types["id"], NaturalLanguage)
assert isinstance(es["table"].ww.logical_types["category"], NaturalLanguage)
assert es["table"].ww.semantic_tags["id"] == {"index"}
assert es["table"].ww.semantic_tags["category"] == {"new_tag"}
def test_init_es_with_multiple_dataframes(pd_df):
second_df = pd.DataFrame({"id": [0, 1, 2, 3], "first_table_id": [1, 2, 2, 1]})
pd_df.ww.init(name="first_table", index="id")
es = EntitySet(
"es",
dataframes={
"first_table": (pd_df,),
"second_table": (
second_df,
"id",
None,
None,
{"first_table_id": "foreign_key"},
),
},
)
assert len(es.dataframe_dict) == 2
assert es["first_table"].ww.schema is not None
assert es["second_table"].ww.schema is not None
def test_add_dataframe_to_es(df):
es1 = EntitySet("es")
assert es1.dataframe_dict == {}
es1.add_dataframe(
df,
dataframe_name="table",
index="id",
semantic_tags={"category": "new_tag"},
)
assert len(es1.dataframe_dict) == 1
copy_df = df.ww.copy()
es2 = EntitySet("es")
assert es2.dataframe_dict == {}
es2.add_dataframe(copy_df)
assert len(es2.dataframe_dict) == 1
assert es1["table"].ww == es2["table"].ww
def test_change_es_dataframe_schema(df):
df.ww.init(index="id", name="table")
es = EntitySet("es", dataframes={"table": (df,)})
assert es["table"].ww.index == "id"
es["table"].ww.set_index("category")
assert es["table"].ww.index == "category"
def test_init_es_with_relationships(pd_df):
second_df = pd.DataFrame({"id": [0, 1, 2, 3], "first_table_id": [1, 2, 2, 1]})
pd_df.ww.init(name="first_table", index="id")
second_df.ww.init(name="second_table", index="id")
es = EntitySet(
"es",
dataframes={"first_table": (pd_df,), "second_table": (second_df,)},
relationships=[("first_table", "id", "second_table", "first_table_id")],
)
assert len(es.relationships) == 1
forward_dataframes = [name for name, _ in es.get_forward_dataframes("second_table")]
assert forward_dataframes[0] == "first_table"
relationship = es.relationships[0]
assert "foreign_key" in relationship.child_column.ww.semantic_tags
assert "index" in relationship.parent_column.ww.semantic_tags
@pytest.fixture
def dates_df():
return pd.DataFrame(
{
"backwards_order": [8, 7, 6, 5, 4, 3, 2, 1, 0],
"dates_backwards": [
"2020-09-09",
"2020-09-08",
"2020-09-07",
"2020-09-06",
"2020-09-05",
"2020-09-04",
"2020-09-03",
"2020-09-02",
"2020-09-01",
],
"random_order": [7, 6, 8, 0, 2, 4, 3, 1, 5],
"repeating_dates": [
"2020-08-01",
"2019-08-01",
"2020-08-01",
"2012-08-01",
"2019-08-01",
"2019-08-01",
"2019-08-01",
"2013-08-01",
"2019-08-01",
],
"special": [7, 8, 0, 1, 4, 2, 6, 3, 5],
"special_dates": [
"2020-08-01",
"2019-08-01",
"2020-08-01",
"2012-08-01",
"2019-08-01",
"2019-08-01",
"2019-08-01",
"2013-08-01",
"2019-08-01",
],
},
)
def test_add_secondary_time_index(dates_df):
dates_df.ww.init(
name="dates_table",
index="backwards_order",
time_index="dates_backwards",
)
es = EntitySet("es")
es.add_dataframe(
dates_df,
secondary_time_index={"repeating_dates": ["random_order", "special"]},
)
assert dates_df.ww.metadata["secondary_time_index"] == {
"repeating_dates": ["random_order", "special", "repeating_dates"],
}
def test_time_type_check_order(dates_df):
dates_df.ww.init(
name="dates_table",
index="backwards_order",
time_index="random_order",
)
es = EntitySet("es")
error = "dates_table time index is Datetime type which differs from other entityset time indexes"
with pytest.raises(TypeError, match=error):
es.add_dataframe(
dates_df,
secondary_time_index={"repeating_dates": ["random_order", "special"]},
)
assert "secondary_time_index" not in dates_df.ww.metadata
def test_add_time_index_through_woodwork_different_type(dates_df):
dates_df.ww.init(
name="dates_table",
index="backwards_order",
time_index="dates_backwards",
)
es = EntitySet("es")
es.add_dataframe(
dates_df,
secondary_time_index={"repeating_dates": ["random_order", "special"]},
)
assert dates_df.ww.metadata["secondary_time_index"] == {
"repeating_dates": ["random_order", "special", "repeating_dates"],
}
assert es.time_type == Datetime
assert es._check_uniform_time_index(es["dates_table"]) is None
dates_df.ww.set_time_index("random_order")
assert dates_df.ww.time_index == "random_order"
error = "dates_table time index is numeric type which differs from other entityset time indexes"
with pytest.raises(TypeError, match=error):
es._check_uniform_time_index(es["dates_table"])
def test_init_with_mismatched_time_types(dates_df):
dates_df.ww.init(
name="dates_table",
index="backwards_order",
time_index="repeating_dates",
)
es = EntitySet("es")
es.add_dataframe(dates_df, secondary_time_index={"special_dates": ["special"]})
assert es.time_type == Datetime
nums_df = pd.DataFrame({"id": [1, 2, 3], "times": [9, 8, 7]})
nums_df.ww.init(name="numerics_table", index="id", time_index="times")
error = "numerics_table time index is numeric type which differs from other entityset time indexes"
with pytest.raises(TypeError, match=error):
es.add_dataframe(nums_df)
def test_int_double_time_type(dates_df):
dates_df.ww.init(
name="dates_table",
index="backwards_order",
time_index="random_order",
logical_types={"random_order": "Integer", "special": "Double"},
)
es = EntitySet("es")
# Both random_order and special are numeric, but they are different logical types
es.add_dataframe(dates_df, secondary_time_index={"special": ["dates_backwards"]})
assert isinstance(es["dates_table"].ww.logical_types["random_order"], Integer)
assert isinstance(es["dates_table"].ww.logical_types["special"], Double)
assert es["dates_table"].ww.time_index == "random_order"
assert "special" in es["dates_table"].ww.metadata["secondary_time_index"]
def test_normalize_dataframe():
df = pd.DataFrame(
{
"id": range(4),
"full_name": [
"Mr. John Doe",
"Doe, Mrs. Jane",
"James Brown",
"Ms. Paige Turner",
],
"email": [
"john.smith@example.com",
np.nan,
"team@featuretools.com",
"junk@example.com",
],
"phone_number": [
"5555555555",
"555-555-5555",
"1-(555)-555-5555",
"555-555-5555",
],
"age": pd.Series([33, None, 33, 57], dtype="Int64"),
"signup_date": [pd.to_datetime("2020-09-01")] * 4,
"is_registered": pd.Series([True, False, True, None], dtype="boolean"),
},
)
df.ww.init(name="first_table", index="id", time_index="signup_date")
es = EntitySet("es")
es.add_dataframe(df)
es.normalize_dataframe(
"first_table",
"second_table",
"age",
additional_columns=["phone_number", "full_name"],
make_time_index=True,
)
assert len(es.dataframe_dict) == 2
assert "foreign_key" in es["first_table"].ww.semantic_tags["age"]
def test_replace_dataframe():
df = pd.DataFrame(
{
"id": range(4),
"full_name": [
"Mr. John Doe",
"Doe, Mrs. Jane",
"James Brown",
"Ms. Paige Turner",
],
"email": [
"john.smith@example.com",
np.nan,
"team@featuretools.com",
"junk@example.com",
],
"phone_number": [
"5555555555",
"555-555-5555",
"1-(555)-555-5555",
"555-555-5555",
],
"age": pd.Series([33, None, 33, 57], dtype="Int64"),
"signup_date": [pd.to_datetime("2020-09-01")] * 4,
"is_registered": pd.Series([True, False, True, None], dtype="boolean"),
},
)
df.ww.init(name="table", index="id")
es = EntitySet("es")
es.add_dataframe(df)
original_schema = es["table"].ww.schema
new_df = df.iloc[2:]
es.replace_dataframe("table", new_df)
assert len(es["table"]) == 2
assert es["table"].ww.schema == original_schema
def test_add_last_time_index(es):
es.add_last_time_indexes(["products"])
assert "last_time_index" in es["products"].ww.metadata
assert es["products"].ww.metadata["last_time_index"] == LTI_COLUMN_NAME
assert LTI_COLUMN_NAME in es["products"]
assert "last_time_index" in es["products"].ww.semantic_tags[LTI_COLUMN_NAME]
assert isinstance(es["products"].ww.logical_types[LTI_COLUMN_NAME], Datetime)
def test_add_last_time_non_numeric_index(pd_es, spark_es, dask_es):
# Confirm that add_last_time_index works for indices that aren't numeric
# since numeric underlying indices can accidentally match the Woodwork index
pd_es.add_last_time_indexes(["products"])
dask_es.add_last_time_indexes(["products"])
spark_es.add_last_time_indexes(["products"])
assert list(to_pandas(pd_es["products"][LTI_COLUMN_NAME]).sort_index()) == list(
to_pandas(dask_es["products"][LTI_COLUMN_NAME]).sort_index(),
)
assert list(to_pandas(pd_es["products"][LTI_COLUMN_NAME]).sort_index()) == list(
to_pandas(spark_es["products"]).sort_values("id")[LTI_COLUMN_NAME],
)
assert pd_es["products"].ww.schema == dask_es["products"].ww.schema
assert pd_es["products"].ww.schema == spark_es["products"].ww.schema
def test_lti_already_has_last_time_column_name(es):
col = es["customers"].ww.pop("loves_ice_cream")
col.name = LTI_COLUMN_NAME
es["customers"].ww[LTI_COLUMN_NAME] = col
assert LTI_COLUMN_NAME in es["customers"].columns
assert isinstance(es["customers"].ww.logical_types[LTI_COLUMN_NAME], Boolean)
error = (
"Cannot add a last time index on DataFrame with an existing "
f"'{LTI_COLUMN_NAME}' column. Please rename '{LTI_COLUMN_NAME}'."
)
with pytest.raises(ValueError, match=error):
es.add_last_time_indexes(["customers"])
def test_numeric_es_last_time_index_logical_type(int_es):
assert int_es.time_type == "numeric"
int_es.add_last_time_indexes()
for df in int_es.dataframes:
assert isinstance(df.ww.logical_types[LTI_COLUMN_NAME], Double)
int_es._check_uniform_time_index(df, LTI_COLUMN_NAME)
def test_datetime_es_last_time_index_logical_type(es):
assert es.time_type == Datetime
es.add_last_time_indexes()
for df in es.dataframes:
assert isinstance(df.ww.logical_types[LTI_COLUMN_NAME], Datetime)
es._check_uniform_time_index(df, LTI_COLUMN_NAME)
def test_dataframe_without_name(es):
new_es = EntitySet()
new_df = es["sessions"].copy()
assert new_df.ww.schema is None
error = "Cannot add dataframe to EntitySet without a name. Please provide a value for the dataframe_name parameter."
with pytest.raises(ValueError, match=error):
new_es.add_dataframe(new_df)
def test_dataframe_with_name_parameter(es):
new_es = EntitySet()
new_df = es["sessions"][["id"]]
assert new_df.ww.schema is None
new_es.add_dataframe(
new_df,
dataframe_name="df_name",
index="id",
logical_types={"id": "Integer"},
)
assert new_es["df_name"].ww.name == "df_name"
def test_woodwork_dataframe_without_name_errors(es):
new_es = EntitySet()
new_df = es["sessions"].ww.copy()
new_df.ww._schema.name = None
assert new_df.ww.name is None
error = "Cannot add a Woodwork DataFrame to EntitySet without a name"
with pytest.raises(ValueError, match=error):
new_es.add_dataframe(new_df)
def test_woodwork_dataframe_with_name(es):
new_es = EntitySet()
new_df = es["sessions"].ww.copy()
new_df.ww._schema.name = "df_name"
assert new_df.ww.name == "df_name"
new_es.add_dataframe(new_df)
assert new_es["df_name"].ww.name == "df_name"
def test_woodwork_dataframe_ignore_conflicting_name_parameter_warning(es):
new_es = EntitySet()
new_df = es["sessions"].ww.copy()
new_df.ww._schema.name = "df_name"
assert new_df.ww.name == "df_name"
warning = "A Woodwork-initialized DataFrame was provided, so the following parameters were ignored: dataframe_name"
with pytest.warns(UserWarning, match=warning):
new_es.add_dataframe(new_df, dataframe_name="conflicting_name")
assert new_es["df_name"].ww.name == "df_name"
def test_woodwork_dataframe_same_name_parameter(es):
new_es = EntitySet()
new_df = es["sessions"].ww.copy()
new_df.ww._schema.name = "df_name"
assert new_df.ww.name == "df_name"
new_es.add_dataframe(new_df, dataframe_name="df_name")
assert new_es["df_name"].ww.name == "df_name"
def test_extra_woodwork_params(es):
new_es = EntitySet()
sessions_df = es["sessions"].ww.copy()
assert sessions_df.ww.index == "id"
assert sessions_df.ww.time_index is None
assert isinstance(sessions_df.ww.logical_types["id"], Integer)
warning_msg = (
"A Woodwork-initialized DataFrame was provided, so the following parameters were ignored: "
"index, time_index, logical_types, make_index, semantic_tags, already_sorted"
)
with pytest.warns(UserWarning, match=warning_msg):
new_es.add_dataframe(
dataframe_name="sessions",
dataframe=sessions_df,
index="filepath",
time_index="customer_id",
logical_types={"id": Categorical},
make_index=True,
already_sorted=True,
semantic_tags={"id": "new_tag"},
)
assert sessions_df.ww.index == "id"
assert sessions_df.ww.time_index is None
assert isinstance(sessions_df.ww.logical_types["id"], Integer)
assert "new_tag" not in sessions_df.ww.semantic_tags
def test_replace_dataframe_errors(es):
df = es["customers"].copy()
if ps and isinstance(df, ps.DataFrame):
df["new"] = [1, 2, 3]
else:
df["new"] = pd.Series([1, 2, 3])
error_text = "New dataframe is missing new cohort column"
with pytest.raises(ValueError, match=error_text):
es.replace_dataframe(dataframe_name="customers", df=df.drop(columns=["cohort"]))
error_text = "New dataframe contains 16 columns, expecting 15"
with pytest.raises(ValueError, match=error_text):
es.replace_dataframe(dataframe_name="customers", df=df)
def test_replace_dataframe_already_sorted(es):
# test already_sorted on dataframe without time index
df = es["sessions"].copy()
updated_id = to_pandas(df["id"])
updated_id.iloc[1] = 2
updated_id.iloc[2] = 1
df = df.set_index("id", drop=False)
df.index.name = None
assert es["sessions"].ww.time_index is None
if ps and isinstance(df, ps.DataFrame):
df["id"] = updated_id.to_list()
df = df.sort_index()
elif is_instance(df, dd, "DataFrame"):
df["id"] = updated_id
es.replace_dataframe(dataframe_name="sessions", df=df.copy(), already_sorted=False)
sessions_df = to_pandas(es["sessions"])
assert sessions_df["id"].iloc[1] == 2 # no sorting since time index not defined
es.replace_dataframe(dataframe_name="sessions", df=df.copy(), already_sorted=True)
sessions_df = to_pandas(es["sessions"])
assert sessions_df["id"].iloc[1] == 2
# test already_sorted on dataframe with time index
df = es["customers"].copy()
updated_signup = to_pandas(df["signup_date"])
updated_signup.iloc[0] = datetime(2011, 4, 11)
assert es["customers"].ww.time_index == "signup_date"
if ps and isinstance(df, ps.DataFrame):
df["signup_date"] = updated_signup.to_list()
df = df.sort_index()
else:
df["signup_date"] = updated_signup
es.replace_dataframe(dataframe_name="customers", df=df.copy(), already_sorted=True)
customers_df = to_pandas(es["customers"])
assert customers_df["id"].iloc[0] == 2
# only pandas allows for sorting:
es.replace_dataframe(dataframe_name="customers", df=df.copy(), already_sorted=False)
updated_customers = to_pandas(es["customers"])
if isinstance(df, pd.DataFrame):
assert updated_customers["id"].iloc[0] == 0
else:
assert updated_customers["id"].iloc[0] == 2
def test_replace_dataframe_invalid_schema(es):
if es.dataframe_type != Library.PANDAS:
pytest.xfail(
"Invalid schema checks able to be caught by Woodwork only relevant for Pandas",
)
df = es["customers"].copy()
df["id"] = pd.Series([1, 1, 1])
error_text = "Index column must be unique"
with pytest.raises(IndexError, match=error_text):
es.replace_dataframe(dataframe_name="customers", df=df)
def test_replace_dataframe_mismatched_index(es):
if es.dataframe_type != Library.PANDAS:
pytest.xfail(
"Only pandas checks whether underlying index matches the Woodwork index",
)
df = es["customers"].copy()
df["id"] = pd.Series([99, 88, 77])
es.replace_dataframe(dataframe_name="customers", df=df)
assert all([77, 99, 88] == es["customers"]["id"])
assert all([77, 99, 88] == (es["customers"]["id"]).index)
def test_replace_dataframe_different_dtypes(es):
float_dtype_df = es["customers"].copy()
float_dtype_df = float_dtype_df.astype({"age": "float64"})
es.replace_dataframe(dataframe_name="customers", df=float_dtype_df)
assert es["customers"]["age"].dtype == "int64"
assert isinstance(es["customers"].ww.logical_types["age"], Integer)
incompatible_dtype_df = es["customers"].copy()
incompatible_list = ["hi", "bye", "bye"]
if ps and isinstance(incompatible_dtype_df, ps.DataFrame):
incompatible_dtype_df["age"] = incompatible_list
else:
incompatible_dtype_df["age"] = pd.Series(incompatible_list)
if isinstance(es["customers"], pd.DataFrame):
# Dask and Spark do not error on invalid type conversion until compute
error_msg = "Error converting datatype for age from type object to type int64. Please confirm the underlying data is consistent with logical type Integer."
with pytest.raises(TypeConversionError, match=error_msg):
es.replace_dataframe(dataframe_name="customers", df=incompatible_dtype_df)
@pytest.fixture()
def latlong_df_pandas():
latlong_df = pd.DataFrame(
{
"tuples": pd.Series([(1, 2), (3, 4)]),
"string_tuple": pd.Series(["(1, 2)", "(3, 4)"]),
"bracketless_string_tuple": pd.Series(["1, 2", "3, 4"]),
"list_strings": pd.Series([["1", "2"], ["3", "4"]]),
"combo_tuple_types": pd.Series(["[1, 2]", "(3, 4)"]),
},
)
latlong_df.set_index("string_tuple", drop=False, inplace=True)
latlong_df.index.name = None
return latlong_df
@pytest.fixture()
def latlong_df_dask(latlong_df_pandas):
dd = pytest.importorskip("dask.dataframe", reason="Dask not installed, skipping")
return dd.from_pandas(latlong_df_pandas, npartitions=2)
@pytest.fixture()
def latlong_df_spark(latlong_df_pandas):
ps = pytest.importorskip("pyspark.pandas", reason="Spark not installed, skipping")
return ps.from_pandas(
latlong_df_pandas.applymap(
lambda tup: list(tup) if isinstance(tup, tuple) else tup,
),
)
@pytest.fixture(params=["latlong_df_pandas", "latlong_df_dask", "latlong_df_spark"])
def latlong_df(request):
return request.getfixturevalue(request.param)
def test_replace_dataframe_data_transformation(latlong_df):
initial_df = latlong_df.copy()
initial_df.ww.init(
name="latlongs",
index="string_tuple",
logical_types={col_name: "LatLong" for col_name in initial_df.columns},
)
es = EntitySet()
es.add_dataframe(dataframe=initial_df)
df = to_pandas(es["latlongs"])
expected_val = (1, 2)
if ps and isinstance(es["latlongs"], ps.DataFrame):
expected_val = [1, 2]
for col in latlong_df.columns:
series = df[col]
assert series.iloc[0] == expected_val
es.replace_dataframe("latlongs", latlong_df)
df = to_pandas(es["latlongs"])
expected_val = (3, 4)
if ps and isinstance(es["latlongs"], ps.DataFrame):
expected_val = [3, 4]
for col in latlong_df.columns:
series = df[col]
assert series.iloc[-1] == expected_val
def test_replace_dataframe_column_order(es):
original_column_order = es["customers"].columns.copy()
df = es["customers"].copy()
col = df.pop("cohort")
df[col.name] = col
assert not df.columns.equals(original_column_order)
assert set(df.columns) == set(original_column_order)
es.replace_dataframe(dataframe_name="customers", df=df)
assert es["customers"].columns.equals(original_column_order)
def test_replace_dataframe_different_woodwork_initialized(es):
df = es["customers"].copy()
if ps and isinstance(df, ps.DataFrame):
df["age"] = [1, 2, 3]
else:
df["age"] = pd.Series([1, 2, 3])
# Initialize Woodwork on the new DataFrame and change the schema so it won't match the original DataFrame's schema
df.ww.init(schema=es["customers"].ww.schema)
df.ww.set_types(
logical_types={"id": "NaturalLanguage", "cancel_date": "NaturalLanguage"},
)
assert df["id"].dtype == "string"
assert df["cancel_date"].dtype == "string"
assert es["customers"]["id"].dtype == "int64"
assert es["customers"]["cancel_date"].dtype == "datetime64[ns]"
original_schema = es["customers"].ww.schema
warning = "Woodwork typing information on new dataframe will be replaced with existing typing information from customers"
with pytest.warns(UserWarning, match=warning):
es.replace_dataframe("customers", df, already_sorted=True)
actual = to_pandas(es["customers"]["age"]).sort_values()
assert all(actual == [1, 2, 3])
assert es["customers"].ww._schema == original_schema
assert es["customers"]["id"].dtype == "int64"
assert es["customers"]["cancel_date"].dtype == "datetime64[ns]"
@pytest.mark.skipif("not dd")
def test_replace_dataframe_different_dataframe_types():
dask_es = EntitySet(id="dask_es")
sessions = pd.DataFrame(
{
"id": [0, 1, 2, 3],
"user": [1, 2, 1, 3],
"time": [
pd.to_datetime("2019-01-10"),
pd.to_datetime("2019-02-03"),
pd.to_datetime("2019-01-01"),
pd.to_datetime("2017-08-25"),
],
"strings": ["I am a string", "23", "abcdef ghijk", ""],
},
)
sessions_dask = dd.from_pandas(sessions, npartitions=2)
sessions_logical_types = {
"id": Integer,
"user": Integer,
"time": Datetime,
"strings": NaturalLanguage,
}
sessions_semantic_tags = {"user": "foreign_key"}
dask_es.add_dataframe(
dataframe_name="sessions",
dataframe=sessions_dask,
index="id",
time_index="time",
logical_types=sessions_logical_types,
semantic_tags=sessions_semantic_tags,
)
with pytest.raises(TypeError, match="Incorrect DataFrame type used"):
dask_es.replace_dataframe("sessions", sessions)
def test_replace_dataframe_and_min_last_time_index(es):
es.add_last_time_indexes(["products"])
original_time_index = es["log"]["datetime"].copy()
original_last_time_index = es["products"][LTI_COLUMN_NAME].copy()
if ps and isinstance(original_time_index, ps.Series):
new_time_index = ps.from_pandas(
original_time_index.to_pandas() + pd.Timedelta(days=1),
)
expected_last_time_index = ps.from_pandas(
original_last_time_index.to_pandas() + pd.Timedelta(days=1),
)
else:
new_time_index = original_time_index + pd.Timedelta(days=1)
expected_last_time_index = original_last_time_index + pd.Timedelta(days=1)
new_dataframe = es["log"].copy()
new_dataframe["datetime"] = new_time_index
new_dataframe.pop(LTI_COLUMN_NAME)
es.replace_dataframe("log", new_dataframe, recalculate_last_time_indexes=True)
# Spark reorders indices during last time index, so we sort to confirm individual values are the same
pd.testing.assert_series_equal(
to_pandas(es["products"][LTI_COLUMN_NAME]).sort_index(),
to_pandas(expected_last_time_index).sort_index(),
)
pd.testing.assert_series_equal(
to_pandas(es["log"][LTI_COLUMN_NAME]).sort_index(),
to_pandas(new_time_index).sort_index(),
check_names=False,
)
def test_replace_dataframe_dont_recalculate_last_time_index_present(es):
es.add_last_time_indexes()
original_time_index = es["customers"]["signup_date"].copy()
original_last_time_index = es["customers"][LTI_COLUMN_NAME].copy()
if ps and isinstance(original_time_index, ps.Series):
new_time_index = ps.from_pandas(
original_time_index.to_pandas() + pd.Timedelta(days=10),
)
else:
new_time_index = original_time_index + pd.Timedelta(days=10)
new_dataframe = es["customers"].copy()
new_dataframe["signup_date"] = new_time_index
es.replace_dataframe(
"customers",
new_dataframe,
recalculate_last_time_indexes=False,
)
pd.testing.assert_series_equal(
to_pandas(es["customers"][LTI_COLUMN_NAME], sort_index=True),
to_pandas(original_last_time_index, sort_index=True),
)
def test_replace_dataframe_dont_recalculate_last_time_index_not_present(es):
es.add_last_time_indexes()
original_lti_name = es["customers"].ww.metadata.get("last_time_index")
assert original_lti_name is not None
original_time_index = es["customers"]["signup_date"].copy()
if ps and isinstance(original_time_index, ps.Series):
new_time_index = ps.from_pandas(
original_time_index.to_pandas() + pd.Timedelta(days=10),
)
else:
new_time_index = original_time_index + pd.Timedelta(days=10)
new_dataframe = es["customers"].copy()
new_dataframe["signup_date"] = new_time_index
new_dataframe.pop(LTI_COLUMN_NAME)
es.replace_dataframe(
"customers",
new_dataframe,
recalculate_last_time_indexes=False,
)
assert "last_time_index" not in es["customers"].ww.metadata
assert original_lti_name not in es["customers"].columns
def test_replace_dataframe_recalculate_last_time_index_not_present(es):
es.add_last_time_indexes()
original_time_index = es["log"]["datetime"].copy()
if ps and isinstance(original_time_index, ps.Series):
new_time_index = ps.from_pandas(
original_time_index.to_pandas() + pd.Timedelta(days=10),
)
else:
new_time_index = original_time_index + pd.Timedelta(days=10)
new_dataframe = es["log"].copy()
new_dataframe["datetime"] = new_time_index
new_dataframe.pop(LTI_COLUMN_NAME)
es.replace_dataframe("log", new_dataframe, recalculate_last_time_indexes=True)
pd.testing.assert_series_equal(
to_pandas(es["log"]["datetime"]).sort_index(),
to_pandas(new_time_index).sort_index(),
check_names=False,
)
pd.testing.assert_series_equal(
to_pandas(es["log"][LTI_COLUMN_NAME]).sort_index(),
to_pandas(new_time_index).sort_index(),
check_names=False,
)
def test_replace_dataframe_recalculate_last_time_index_present(es):
es.add_last_time_indexes()
original_time_index = es["log"]["datetime"].copy()
if ps and isinstance(original_time_index, ps.Series):
new_time_index = ps.from_pandas(
original_time_index.to_pandas() + pd.Timedelta(days=10),
)
else:
new_time_index = original_time_index + pd.Timedelta(days=10)
new_dataframe = es["log"].copy()
new_dataframe["datetime"] = new_time_index
assert LTI_COLUMN_NAME in new_dataframe.columns
es.replace_dataframe("log", new_dataframe, recalculate_last_time_indexes=True)
pd.testing.assert_series_equal(
to_pandas(es["log"]["datetime"]).sort_index(),
to_pandas(new_time_index).sort_index(),
check_names=False,
)
pd.testing.assert_series_equal(
to_pandas(es["log"][LTI_COLUMN_NAME]).sort_index(),
to_pandas(new_time_index).sort_index(),
check_names=False,
)
def test_normalize_dataframe_loses_column_metadata(es):
es["log"].ww.columns["value"].metadata["interesting_values"] = [0.0, 1.0]
es["log"].ww.columns["priority_level"].metadata["interesting_values"] = [1]
es["log"].ww.columns["value"].description = "a value column"
es["log"].ww.columns["priority_level"].description = "a priority level column"
assert "interesting_values" in es["log"].ww.columns["priority_level"].metadata
assert "interesting_values" in es["log"].ww.columns["value"].metadata
assert es["log"].ww.columns["value"].description == "a value column"
assert (
es["log"].ww.columns["priority_level"].description == "a priority level column"
)
es.normalize_dataframe(
"log",
"values_2",
"value_2",
additional_columns=["priority_level"],
copy_columns=["value"],
make_time_index=False,
)
# Metadata in the original dataframe and the new dataframe are maintained
assert "interesting_values" in es["log"].ww.columns["value"].metadata
assert "interesting_values" in es["values_2"].ww.columns["value"].metadata
assert "interesting_values" in es["values_2"].ww.columns["priority_level"].metadata
assert es["log"].ww.columns["value"].description == "a value column"
assert es["values_2"].ww.columns["value"].description == "a value column"
assert (
es["values_2"].ww.columns["priority_level"].description
== "a priority level column"
)
def test_normalize_ww_init():
es = EntitySet()
df = pd.DataFrame(
{
"id": [1, 2, 3, 4],
"col": ["a", "b", "c", "d"],
"df2_id": [1, 1, 2, 2],
"df2_col": [True, False, True, True],
},
)
df.ww.init(index="id", name="test_name")
es.add_dataframe(dataframe=df)
assert es["test_name"].ww.name == "test_name"
assert es["test_name"].ww.schema.name == "test_name"
es.normalize_dataframe(
"test_name",
"new_df",
"df2_id",
additional_columns=["df2_col"],
)
assert es["test_name"].ww.name == "test_name"
assert es["test_name"].ww.schema.name == "test_name"
assert es["new_df"].ww.name == "new_df"
assert es["new_df"].ww.schema.name == "new_df"
| 34,935 | 31.498605 | 163 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/test_identity_features.py | from featuretools import IdentityFeature
from featuretools.primitives.utils import PrimitivesDeserializer
def test_relationship_path(es):
value = IdentityFeature(es["log"].ww["value"])
assert len(value.relationship_path) == 0
def test_serialization(es):
value = IdentityFeature(es["log"].ww["value"])
dictionary = {
"name": "value",
"column_name": "value",
"dataframe_name": "log",
}
assert dictionary == value.get_arguments()
assert value == IdentityFeature.from_dictionary(
dictionary,
es,
{},
PrimitivesDeserializer,
)
| 616 | 22.730769 | 64 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/test_primitive_base.py | from datetime import datetime
import numpy as np
import pandas as pd
from pytest import raises
from featuretools.primitives import Haversine, IsIn, IsNull, Max, TimeSinceLast
from featuretools.primitives.base import TransformPrimitive
def test_call_agg():
primitive = Max()
# the assert is run twice on purpose
for _ in range(2):
assert 5 == primitive(range(6))
def test_call_trans():
primitive = IsNull()
for _ in range(2):
assert pd.Series([False] * 6).equals(primitive(range(6)))
def test_uses_calc_time():
primitive = TimeSinceLast()
primitive_h = TimeSinceLast(unit="hours")
datetimes = pd.Series([datetime(2015, 6, 6), datetime(2015, 6, 7)])
answer = 86400.0
answer_h = 24.0
assert answer == primitive(datetimes, time=datetime(2015, 6, 8))
assert answer_h == primitive_h(datetimes, time=datetime(2015, 6, 8))
def test_call_multiple_args():
primitive = Haversine()
data1 = [(42.4, -71.1), (40.0, -122.4)]
data2 = [(40.0, -122.4), (41.2, -96.75)]
answer = [2631.231, 1343.289]
for _ in range(2):
assert np.round(primitive(data1, data2), 3).tolist() == answer
def test_get_function_called_once():
class TestPrimitive(TransformPrimitive):
def __init__(self):
self.get_function_call_count = 0
def get_function(self):
self.get_function_call_count += 1
def test(x):
return x
return test
primitive = TestPrimitive()
for _ in range(2):
primitive(range(6))
assert primitive.get_function_call_count == 1
def test_multiple_arg_string():
class Primitive(TransformPrimitive):
def __init__(self, bool=True, int=0, float=None):
self.bool = bool
self.int = int
self.float = float
primitive = Primitive(bool=True, int=4, float=0.1)
string = primitive.get_args_string()
assert string == ", int=4, float=0.1"
def test_single_args_string():
assert IsIn([1, 2, 3]).get_args_string() == ", list_of_outputs=[1, 2, 3]"
def test_args_string_default():
assert IsIn().get_args_string() == ""
def test_args_string_mixed():
class Primitive(TransformPrimitive):
def __init__(self, bool=True, int=0, float=None):
self.bool = bool
self.int = int
self.float = float
primitive = Primitive(bool=False, int=0)
string = primitive.get_args_string()
assert string == ", bool=False"
def test_args_string_undefined():
string = Max().get_args_string()
assert string == ""
def test_args_string_error():
class Primitive(TransformPrimitive):
def __init__(self, bool=True, int=0, float=None):
pass
with raises(AssertionError, match="must be attribute"):
Primitive(bool=True, int=4, float=0.1).get_args_string()
| 2,873 | 25.127273 | 79 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/test_feature_base.py | import os.path
import re
import pytest
from pympler.asizeof import asizeof
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Integer
from featuretools import Feature, config, feature_base
from featuretools.feature_base import IdentityFeature
from featuretools.primitives import (
Count,
Diff,
Last,
Mode,
Negate,
NMostCommon,
NumUnique,
Sum,
TransformPrimitive,
)
from featuretools.synthesis.deep_feature_synthesis import can_stack_primitive_on_inputs
from featuretools.tests.testing_utils import check_rename
def test_copy_features_does_not_copy_entityset(es):
agg = Feature(
es["log"].ww["value"],
parent_dataframe_name="sessions",
primitive=Sum,
)
agg_where = Feature(
es["log"].ww["value"],
parent_dataframe_name="sessions",
where=IdentityFeature(es["log"].ww["value"]) == 2,
primitive=Sum,
)
agg_use_previous = Feature(
es["log"].ww["value"],
parent_dataframe_name="sessions",
use_previous="4 days",
primitive=Sum,
)
agg_use_previous_where = Feature(
es["log"].ww["value"],
parent_dataframe_name="sessions",
where=IdentityFeature(es["log"].ww["value"]) == 2,
use_previous="4 days",
primitive=Sum,
)
features = [agg, agg_where, agg_use_previous, agg_use_previous_where]
in_memory_size = asizeof(locals())
copied = [f.copy() for f in features]
new_in_memory_size = asizeof(locals())
assert new_in_memory_size < 2 * in_memory_size
def test_get_dependencies(es):
f = Feature(es["log"].ww["value"])
agg1 = Feature(f, parent_dataframe_name="sessions", primitive=Sum)
agg2 = Feature(agg1, parent_dataframe_name="customers", primitive=Sum)
d1 = Feature(agg2, "sessions")
shallow = d1.get_dependencies(deep=False, ignored=None)
deep = d1.get_dependencies(deep=True, ignored=None)
ignored = set([agg1.unique_name()])
deep_ignored = d1.get_dependencies(deep=True, ignored=ignored)
assert [s.unique_name() for s in shallow] == [agg2.unique_name()]
assert [d.unique_name() for d in deep] == [
agg2.unique_name(),
agg1.unique_name(),
f.unique_name(),
]
assert [d.unique_name() for d in deep_ignored] == [agg2.unique_name()]
def test_get_depth(es):
f = Feature(es["log"].ww["value"])
g = Feature(es["log"].ww["value"])
agg1 = Feature(f, parent_dataframe_name="sessions", primitive=Last)
agg2 = Feature(agg1, parent_dataframe_name="customers", primitive=Last)
d1 = Feature(agg2, "sessions")
d2 = Feature(d1, "log")
assert d2.get_depth() == 4
# Make sure this works if we pass in two of the same
# feature. This came up when user supplied duplicates
# in the seed_features of DFS.
assert d2.get_depth(stop_at=[f, g]) == 4
assert d2.get_depth(stop_at=[f, g, agg1]) == 3
assert d2.get_depth(stop_at=[f, g, agg1]) == 3
assert d2.get_depth(stop_at=[f, g, agg2]) == 2
assert d2.get_depth(stop_at=[f, g, d1]) == 1
assert d2.get_depth(stop_at=[f, g, d2]) == 0
def test_squared(es):
feature = Feature(es["log"].ww["value"])
squared = feature * feature
assert len(squared.base_features) == 2
assert (
squared.base_features[0].unique_name() == squared.base_features[1].unique_name()
)
def test_return_type_inference(es):
mode = Feature(
es["log"].ww["priority_level"],
parent_dataframe_name="customers",
primitive=Mode,
)
assert (
mode.column_schema
== IdentityFeature(es["log"].ww["priority_level"]).column_schema
)
def test_return_type_inference_direct_feature(es):
mode = Feature(
es["log"].ww["priority_level"],
parent_dataframe_name="customers",
primitive=Mode,
)
mode_session = Feature(mode, "sessions")
assert (
mode_session.column_schema
== IdentityFeature(es["log"].ww["priority_level"]).column_schema
)
def test_return_type_inference_index(es):
last = Feature(
es["log"].ww["id"],
parent_dataframe_name="customers",
primitive=Last,
)
assert "index" not in last.column_schema.semantic_tags
assert isinstance(last.column_schema.logical_type, Integer)
def test_return_type_inference_datetime_time_index(es):
last = Feature(
es["log"].ww["datetime"],
parent_dataframe_name="customers",
primitive=Last,
)
assert isinstance(last.column_schema.logical_type, Datetime)
def test_return_type_inference_numeric_time_index(int_es):
last = Feature(
int_es["log"].ww["datetime"],
parent_dataframe_name="customers",
primitive=Last,
)
assert "numeric" in last.column_schema.semantic_tags
def test_return_type_inference_id(es):
# direct features should keep foreign key tag
direct_id_feature = Feature(es["sessions"].ww["customer_id"], "log")
assert "foreign_key" in direct_id_feature.column_schema.semantic_tags
# aggregations of foreign key types should get converted
last_feat = Feature(
es["log"].ww["session_id"],
parent_dataframe_name="customers",
primitive=Last,
)
assert "foreign_key" not in last_feat.column_schema.semantic_tags
assert isinstance(last_feat.column_schema.logical_type, Integer)
# also test direct feature of aggregation
last_direct = Feature(last_feat, "sessions")
assert "foreign_key" not in last_direct.column_schema.semantic_tags
assert isinstance(last_direct.column_schema.logical_type, Integer)
def test_set_data_path(es):
key = "primitive_data_folder"
# Don't change orig_path
orig_path = config.get(key)
new_path = "/example/new/directory"
filename = "test.csv"
# Test that default path works
sum_prim = Sum()
assert sum_prim.get_filepath(filename) == os.path.join(orig_path, filename)
# Test that new path works
config.set({key: new_path})
assert sum_prim.get_filepath(filename) == os.path.join(new_path, filename)
# Test that new path with trailing / works
new_path += "/"
config.set({key: new_path})
assert sum_prim.get_filepath(filename) == os.path.join(new_path, filename)
# Test that the path is correct on newly defined feature
sum_prim2 = Sum()
assert sum_prim2.get_filepath(filename) == os.path.join(new_path, filename)
# Ensure path was reset
config.set({key: orig_path})
assert config.get(key) == orig_path
def test_to_dictionary_direct(es):
actual = Feature(
IdentityFeature(es["sessions"].ww["customer_id"]),
"log",
).to_dictionary()
expected = {
"type": "DirectFeature",
"dependencies": ["sessions: customer_id"],
"arguments": {
"name": "sessions.customer_id",
"base_feature": "sessions: customer_id",
"relationship": {
"parent_dataframe_name": "sessions",
"child_dataframe_name": "log",
"parent_column_name": "id",
"child_column_name": "session_id",
},
},
}
assert expected == actual
def test_to_dictionary_identity(es):
actual = Feature(es["sessions"].ww["customer_id"]).to_dictionary()
expected = {
"type": "IdentityFeature",
"dependencies": [],
"arguments": {
"name": "customer_id",
"column_name": "customer_id",
"dataframe_name": "sessions",
},
}
assert expected == actual
def test_to_dictionary_agg(es):
primitive = Sum()
actual = Feature(
es["customers"].ww["age"],
primitive=primitive,
parent_dataframe_name="cohorts",
).to_dictionary()
expected = {
"type": "AggregationFeature",
"dependencies": ["customers: age"],
"arguments": {
"name": "SUM(customers.age)",
"base_features": ["customers: age"],
"relationship_path": [
{
"parent_dataframe_name": "cohorts",
"child_dataframe_name": "customers",
"parent_column_name": "cohort",
"child_column_name": "cohort",
},
],
"primitive": primitive,
"where": None,
"use_previous": None,
},
}
assert expected == actual
def test_to_dictionary_where(es):
primitive = Sum()
actual = Feature(
es["log"].ww["value"],
parent_dataframe_name="sessions",
where=IdentityFeature(es["log"].ww["value"]) == 2,
primitive=primitive,
).to_dictionary()
expected = {
"type": "AggregationFeature",
"dependencies": ["log: value", "log: value = 2"],
"arguments": {
"name": "SUM(log.value WHERE value = 2)",
"base_features": ["log: value"],
"relationship_path": [
{
"parent_dataframe_name": "sessions",
"child_dataframe_name": "log",
"parent_column_name": "id",
"child_column_name": "session_id",
},
],
"primitive": primitive,
"where": "log: value = 2",
"use_previous": None,
},
}
assert expected == actual
def test_to_dictionary_trans(es):
primitive = Negate()
trans_feature = Feature(es["customers"].ww["age"], primitive=primitive)
expected = {
"type": "TransformFeature",
"dependencies": ["customers: age"],
"arguments": {
"name": "-(age)",
"base_features": ["customers: age"],
"primitive": primitive,
},
}
assert expected == trans_feature.to_dictionary()
def test_to_dictionary_groupby_trans(es):
primitive = Negate()
id_feat = Feature(es["log"].ww["product_id"])
groupby_feature = Feature(
es["log"].ww["value"],
primitive=primitive,
groupby=id_feat,
)
expected = {
"type": "GroupByTransformFeature",
"dependencies": ["log: value", "log: product_id"],
"arguments": {
"name": "-(value) by product_id",
"base_features": ["log: value"],
"primitive": primitive,
"groupby": "log: product_id",
},
}
assert expected == groupby_feature.to_dictionary()
def test_to_dictionary_multi_slice(es):
slice_feature = Feature(
es["log"].ww["product_id"],
parent_dataframe_name="customers",
primitive=NMostCommon(n=2),
)[0]
expected = {
"type": "FeatureOutputSlice",
"dependencies": ["customers: N_MOST_COMMON(log.product_id, n=2)"],
"arguments": {
"name": "N_MOST_COMMON(log.product_id, n=2)[0]",
"base_feature": "customers: N_MOST_COMMON(log.product_id, n=2)",
"n": 0,
},
}
assert expected == slice_feature.to_dictionary()
def test_multi_output_base_error_agg(es):
three_common = NMostCommon(3)
tc = Feature(
es["log"].ww["product_id"],
parent_dataframe_name="sessions",
primitive=three_common,
)
error_text = "Cannot stack on whole multi-output feature."
with pytest.raises(ValueError, match=error_text):
Feature(tc, parent_dataframe_name="customers", primitive=NumUnique)
def test_multi_output_base_error_trans(es):
class TestTime(TransformPrimitive):
name = "test_time"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(semantic_tags={"numeric"})
number_output_features = 6
tc = Feature(es["customers"].ww["birthday"], primitive=TestTime)
error_text = "Cannot stack on whole multi-output feature."
with pytest.raises(ValueError, match=error_text):
Feature(tc, primitive=Diff)
def test_multi_output_attributes(es):
tc = Feature(
es["log"].ww["product_id"],
parent_dataframe_name="sessions",
primitive=NMostCommon,
)
assert tc.generate_name() == "N_MOST_COMMON(log.product_id)"
assert tc.number_output_features == 3
assert tc.base_features == ["<Feature: product_id>"]
assert tc[0].generate_name() == "N_MOST_COMMON(log.product_id)[0]"
assert tc[0].number_output_features == 1
assert tc[0].base_features == [tc]
assert tc.relationship_path == tc[0].relationship_path
def test_multi_output_index_error(es):
error_text = "can only access slice of multi-output feature"
three_common = Feature(
es["log"].ww["product_id"],
parent_dataframe_name="sessions",
primitive=NMostCommon,
)
with pytest.raises(AssertionError, match=error_text):
single = Feature(
es["log"].ww["product_id"],
parent_dataframe_name="sessions",
primitive=NumUnique,
)
single[0]
error_text = "Cannot get item from slice of multi output feature"
with pytest.raises(ValueError, match=error_text):
three_common[0][0]
error_text = "index is higher than the number of outputs"
with pytest.raises(AssertionError, match=error_text):
three_common[10]
def test_rename(es):
feat = Feature(
es["log"].ww["id"],
parent_dataframe_name="sessions",
primitive=Count,
)
new_name = "session_test"
new_names = ["session_test"]
check_rename(feat, new_name, new_names)
def test_rename_multioutput(es):
feat = Feature(
es["log"].ww["product_id"],
parent_dataframe_name="customers",
primitive=NMostCommon(n=2),
)
new_name = "session_test"
new_names = ["session_test[0]", "session_test[1]"]
check_rename(feat, new_name, new_names)
def test_rename_featureoutputslice(es):
multi_output_feat = Feature(
es["log"].ww["product_id"],
parent_dataframe_name="customers",
primitive=NMostCommon(n=2),
)
feat = feature_base.FeatureOutputSlice(multi_output_feat, 0)
new_name = "session_test"
new_names = ["session_test"]
check_rename(feat, new_name, new_names)
def test_set_feature_names_wrong_number_of_names(es):
feat = Feature(
es["log"].ww["product_id"],
parent_dataframe_name="customers",
primitive=NMostCommon(n=2),
)
new_names = ["col1"]
error_msg = re.escape(
"Number of names provided must match the number of output features: 1 name(s) provided, 2 expected.",
)
with pytest.raises(ValueError, match=error_msg):
feat.set_feature_names(new_names)
def test_set_feature_names_not_unique(es):
feat = Feature(
es["log"].ww["product_id"],
parent_dataframe_name="customers",
primitive=NMostCommon(n=2),
)
new_names = ["col1", "col1"]
error_msg = "Provided output feature names must be unique."
with pytest.raises(ValueError, match=error_msg):
feat.set_feature_names(new_names)
def test_set_feature_names_error_on_single_output_feature(es):
feat = Feature(es["sessions"].ww["device_name"], "log")
new_names = ["sessions_device"]
error_msg = "The set_feature_names can only be used on features that have more than one output column."
with pytest.raises(ValueError, match=error_msg):
feat.set_feature_names(new_names)
def test_set_feature_names_transform_feature(es):
class MultiCumulative(TransformPrimitive):
name = "multi_cum_sum"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
number_output_features = 3
feat = Feature(es["log"].ww["value"], primitive=MultiCumulative)
new_names = ["cumulative_sum", "cumulative_max", "cumulative_min"]
feat.set_feature_names(new_names)
assert feat.get_feature_names() == new_names
def test_set_feature_names_aggregation_feature(es):
feat = Feature(
es["log"].ww["product_id"],
parent_dataframe_name="customers",
primitive=NMostCommon(n=2),
)
new_names = ["agg_col_1", "second_agg_col"]
feat.set_feature_names(new_names)
assert feat.get_feature_names() == new_names
def test_renaming_resets_feature_output_names_to_default(es):
feat = Feature(
es["log"].ww["product_id"],
parent_dataframe_name="customers",
primitive=NMostCommon(n=2),
)
new_names = ["renamed1", "renamed2"]
feat.set_feature_names(new_names)
assert feat.get_feature_names() == new_names
feat = feat.rename("new_feature_name")
assert feat.get_feature_names() == ["new_feature_name[0]", "new_feature_name[1]"]
def test_base_of_and_stack_on_heuristic(es, test_aggregation_primitive):
child = Feature(
es["sessions"].ww["id"],
parent_dataframe_name="customers",
primitive=Count,
)
test_aggregation_primitive.stack_on = []
child.primitive.base_of = []
assert not can_stack_primitive_on_inputs(test_aggregation_primitive(), [child])
test_aggregation_primitive.stack_on = []
child.primitive.base_of = None
assert can_stack_primitive_on_inputs(test_aggregation_primitive(), [child])
test_aggregation_primitive.stack_on = []
child.primitive.base_of = [test_aggregation_primitive]
assert can_stack_primitive_on_inputs(test_aggregation_primitive(), [child])
test_aggregation_primitive.stack_on = None
child.primitive.base_of = []
assert can_stack_primitive_on_inputs(test_aggregation_primitive(), [child])
test_aggregation_primitive.stack_on = None
child.primitive.base_of = None
assert can_stack_primitive_on_inputs(test_aggregation_primitive(), [child])
test_aggregation_primitive.stack_on = None
child.primitive.base_of = [test_aggregation_primitive]
assert can_stack_primitive_on_inputs(test_aggregation_primitive(), [child])
test_aggregation_primitive.stack_on = [type(child.primitive)]
child.primitive.base_of = []
assert can_stack_primitive_on_inputs(test_aggregation_primitive(), [child])
test_aggregation_primitive.stack_on = [type(child.primitive)]
child.primitive.base_of = None
assert can_stack_primitive_on_inputs(test_aggregation_primitive(), [child])
test_aggregation_primitive.stack_on = [type(child.primitive)]
child.primitive.base_of = [test_aggregation_primitive]
assert can_stack_primitive_on_inputs(test_aggregation_primitive(), [child])
test_aggregation_primitive.stack_on = None
child.primitive.base_of = None
child.primitive.base_of_exclude = [test_aggregation_primitive]
assert not can_stack_primitive_on_inputs(test_aggregation_primitive(), [child])
test_aggregation_primitive.stack_on_exclude = [Count]
assert not can_stack_primitive_on_inputs(test_aggregation_primitive(), [child])
child.primitive.number_output_features = 2
test_aggregation_primitive.stack_on_exclude = []
test_aggregation_primitive.stack_on = []
child.primitive.base_of = []
assert not can_stack_primitive_on_inputs(test_aggregation_primitive(), [child])
def test_stack_on_self(es, test_transform_primitive):
# test stacks on self
child = Feature(
es["log"].ww["value"],
primitive=test_transform_primitive,
)
test_transform_primitive.stack_on = []
child.primitive.base_of = []
test_transform_primitive.stack_on_self = False
child.primitive.stack_on_self = False
assert not can_stack_primitive_on_inputs(test_transform_primitive(), [child])
test_transform_primitive.stack_on_self = True
assert can_stack_primitive_on_inputs(test_transform_primitive(), [child])
test_transform_primitive.stack_on = None
test_transform_primitive.stack_on_self = False
assert not can_stack_primitive_on_inputs(test_transform_primitive(), [child])
| 19,917 | 31.177706 | 109 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/test_overrides.py | from featuretools import Feature, calculate_feature_matrix
from featuretools.primitives import (
AddNumeric,
AddNumericScalar,
Count,
DivideByFeature,
DivideNumeric,
DivideNumericScalar,
Equal,
EqualScalar,
GreaterThan,
GreaterThanEqualTo,
GreaterThanEqualToScalar,
GreaterThanScalar,
LessThan,
LessThanEqualTo,
LessThanEqualToScalar,
LessThanScalar,
ModuloByFeature,
ModuloNumeric,
ModuloNumericScalar,
MultiplyNumeric,
MultiplyNumericScalar,
Negate,
NotEqual,
NotEqualScalar,
ScalarSubtractNumericFeature,
SubtractNumeric,
SubtractNumericScalar,
Sum,
)
from featuretools.tests.testing_utils import to_pandas
def test_overrides(es):
value = Feature(es["log"].ww["value"])
value2 = Feature(es["log"].ww["value_2"])
feats = [
AddNumeric,
SubtractNumeric,
MultiplyNumeric,
DivideNumeric,
ModuloNumeric,
GreaterThan,
LessThan,
Equal,
NotEqual,
GreaterThanEqualTo,
LessThanEqualTo,
]
assert Feature(value, primitive=Negate).unique_name() == (-value).unique_name()
compares = [(value, value), (value, value2)]
overrides = [
value + value,
value - value,
value * value,
value / value,
value % value,
value > value,
value < value,
value == value,
value != value,
value >= value,
value <= value,
value + value2,
value - value2,
value * value2,
value / value2,
value % value2,
value > value2,
value < value2,
value == value2,
value != value2,
value >= value2,
value <= value2,
]
for left, right in compares:
for feat in feats:
f = Feature([left, right], primitive=feat)
o = overrides.pop(0)
assert o.unique_name() == f.unique_name()
def test_override_boolean(es):
count = Feature(
es["log"].ww["id"],
parent_dataframe_name="sessions",
primitive=Count,
)
count_lo = Feature(count, primitive=GreaterThanScalar(1))
count_hi = Feature(count, primitive=LessThanScalar(10))
to_test = [[True, True, True], [True, True, False], [False, False, True]]
features = []
features.append(count_lo.OR(count_hi))
features.append(count_lo.AND(count_hi))
features.append(~(count_lo.AND(count_hi)))
df = calculate_feature_matrix(
entityset=es,
features=features,
instance_ids=[0, 1, 2],
)
df = to_pandas(df, index="id", sort_index=True)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].tolist()
assert v == test
def test_scalar_overrides(es):
value = Feature(es["log"].ww["value"])
feats = [
AddNumericScalar,
SubtractNumericScalar,
MultiplyNumericScalar,
DivideNumericScalar,
ModuloNumericScalar,
GreaterThanScalar,
LessThanScalar,
EqualScalar,
NotEqualScalar,
GreaterThanEqualToScalar,
LessThanEqualToScalar,
]
overrides = [
value + 2,
value - 2,
value * 2,
value / 2,
value % 2,
value > 2,
value < 2,
value == 2,
value != 2,
value >= 2,
value <= 2,
]
for feat in feats:
f = Feature(value, primitive=feat(2))
o = overrides.pop(0)
assert o.unique_name() == f.unique_name()
value2 = Feature(es["log"].ww["value_2"])
reverse_feats = [
AddNumericScalar,
ScalarSubtractNumericFeature,
MultiplyNumericScalar,
DivideByFeature,
ModuloByFeature,
GreaterThanScalar,
LessThanScalar,
EqualScalar,
NotEqualScalar,
GreaterThanEqualToScalar,
LessThanEqualToScalar,
]
reverse_overrides = [
2 + value2,
2 - value2,
2 * value2,
2 / value2,
2 % value2,
2 < value2,
2 > value2,
2 == value2,
2 != value2,
2 <= value2,
2 >= value2,
]
for feat in reverse_feats:
f = Feature(value2, primitive=feat(2))
o = reverse_overrides.pop(0)
assert o.unique_name() == f.unique_name()
def test_override_cmp_from_column(es):
count_lo = Feature(es["log"].ww["value"]) > 1
to_test = [False, True, True]
features = [count_lo]
df = to_pandas(
calculate_feature_matrix(
entityset=es,
features=features,
instance_ids=[0, 1, 2],
),
index="id",
sort_index=True,
)
v = df[count_lo.get_name()].tolist()
for i, test in enumerate(to_test):
assert v[i] == test
def test_override_cmp(es):
count = Feature(
es["log"].ww["id"],
parent_dataframe_name="sessions",
primitive=Count,
)
_sum = Feature(
es["log"].ww["value"],
parent_dataframe_name="sessions",
primitive=Sum,
)
gt_lo = count > 1
gt_other = count > _sum
ge_lo = count >= 1
ge_other = count >= _sum
lt_hi = count < 10
lt_other = count < _sum
le_hi = count <= 10
le_other = count <= _sum
ne_lo = count != 1
ne_other = count != _sum
to_test = [
[True, True, False],
[False, False, True],
[True, True, True],
[False, False, True],
[True, True, True],
[True, True, False],
[True, True, True],
[True, True, False],
]
features = [
gt_lo,
gt_other,
ge_lo,
ge_other,
lt_hi,
lt_other,
le_hi,
le_other,
ne_lo,
ne_other,
]
df = calculate_feature_matrix(
entityset=es,
features=features,
instance_ids=[0, 1, 2],
)
df = to_pandas(df, index="id", sort_index=True)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].tolist()
assert v == test
| 6,106 | 22.579151 | 83 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/test_transform_features.py | from inspect import isclass
import numpy as np
import pandas as pd
import pytest
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import (
Boolean,
BooleanNullable,
Categorical,
Datetime,
Double,
Integer,
IntegerNullable,
)
from featuretools import (
AggregationFeature,
EntitySet,
Feature,
IdentityFeature,
TransformFeature,
calculate_feature_matrix,
dfs,
primitives,
)
from featuretools.computational_backends.feature_set import FeatureSet
from featuretools.computational_backends.feature_set_calculator import (
FeatureSetCalculator,
)
from featuretools.primitives import (
Absolute,
AddNumeric,
AddNumericScalar,
Age,
Count,
Day,
Diff,
DiffDatetime,
DivideByFeature,
DivideNumeric,
DivideNumericScalar,
Equal,
EqualScalar,
FileExtension,
First,
FullNameToFirstName,
FullNameToLastName,
FullNameToTitle,
GreaterThan,
GreaterThanEqualTo,
GreaterThanEqualToScalar,
GreaterThanScalar,
Haversine,
Hour,
IsIn,
IsNull,
Lag,
Latitude,
LessThan,
LessThanEqualTo,
LessThanEqualToScalar,
LessThanScalar,
Longitude,
Min,
Mode,
MultiplyBoolean,
MultiplyNumeric,
MultiplyNumericBoolean,
MultiplyNumericScalar,
Not,
NotEqual,
NotEqualScalar,
NumCharacters,
NumericLag,
NumWords,
Percentile,
ScalarSubtractNumericFeature,
SubtractNumeric,
SubtractNumericScalar,
Sum,
TimeSince,
TransformPrimitive,
get_transform_primitives,
)
from featuretools.synthesis.deep_feature_synthesis import match
from featuretools.tests.testing_utils import to_pandas
from featuretools.utils.gen_utils import Library, import_or_none
from featuretools.utils.spark_utils import pd_to_spark_clean
dd = import_or_none("dask.dataframe")
def test_init_and_name(es):
log = es["log"]
rating = Feature(IdentityFeature(es["products"].ww["rating"]), "log")
log_features = [Feature(es["log"].ww[col]) for col in log.columns] + [
Feature(rating, primitive=GreaterThanScalar(2.5)),
Feature(rating, primitive=GreaterThanScalar(3.5)),
]
# Add Timedelta feature
# features.append(pd.Timestamp.now() - Feature(log['datetime']))
customers_features = [
Feature(es["customers"].ww[col]) for col in es["customers"].columns
]
# check all transform primitives have a name
for attribute_string in dir(primitives):
attr = getattr(primitives, attribute_string)
if isclass(attr):
if issubclass(attr, TransformPrimitive) and attr != TransformPrimitive:
assert getattr(attr, "name") is not None
trans_primitives = get_transform_primitives().values()
# If Dask EntitySet use only Dask compatible primitives
if es.dataframe_type == Library.DASK:
trans_primitives = [
prim for prim in trans_primitives if Library.DASK in prim.compatibility
]
if es.dataframe_type == Library.SPARK:
trans_primitives = [
prim for prim in trans_primitives if Library.SPARK in prim.compatibility
]
for transform_prim in trans_primitives:
# skip automated testing if a few special cases
features_to_use = log_features
if transform_prim in [NotEqual, Equal, FileExtension]:
continue
if transform_prim in [
Age,
FullNameToFirstName,
FullNameToLastName,
FullNameToTitle,
]:
features_to_use = customers_features
# use the input_types matching function from DFS
input_types = transform_prim.input_types
if type(input_types[0]) == list:
matching_inputs = match(input_types[0], features_to_use)
else:
matching_inputs = match(input_types, features_to_use)
if len(matching_inputs) == 0:
raise Exception("Transform Primitive %s not tested" % transform_prim.name)
for prim in matching_inputs:
instance = Feature(prim, primitive=transform_prim)
# try to get name and calculate
instance.get_name()
calculate_feature_matrix([instance], entityset=es)
def test_relationship_path(es):
f = TransformFeature(Feature(es["log"].ww["datetime"]), Hour)
assert len(f.relationship_path) == 0
def test_serialization(es):
value = IdentityFeature(es["log"].ww["value"])
primitive = MultiplyNumericScalar(value=2)
value_x2 = TransformFeature(value, primitive)
dictionary = {
"name": value_x2.get_name(),
"base_features": [value.unique_name()],
"primitive": primitive,
}
assert dictionary == value_x2.get_arguments()
assert value_x2 == TransformFeature.from_dictionary(
dictionary,
es,
{value.unique_name(): value},
primitive,
)
def test_make_trans_feat(es):
f = Feature(es["log"].ww["datetime"], primitive=Hour)
feature_set = FeatureSet([f])
calculator = FeatureSetCalculator(es, feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0])))
v = df[f.get_name()][0]
assert v == 10
@pytest.fixture
def pd_simple_es():
df = pd.DataFrame(
{
"id": range(4),
"value": pd.Categorical(["a", "c", "b", "d"]),
"value2": pd.Categorical(["a", "b", "a", "d"]),
"object": ["time1", "time2", "time3", "time4"],
"datetime": pd.Series(
[
pd.Timestamp("2001-01-01"),
pd.Timestamp("2001-01-02"),
pd.Timestamp("2001-01-03"),
pd.Timestamp("2001-01-04"),
],
),
},
)
es = EntitySet("equal_test")
es.add_dataframe(dataframe_name="values", dataframe=df, index="id")
return es
@pytest.fixture
def dd_simple_es(pd_simple_es):
dd = pytest.importorskip("dask.dataframe", reason="Dask not installed, skipping")
dataframes = {}
for df in pd_simple_es.dataframes:
dataframes[df.ww.name] = (
dd.from_pandas(df.reset_index(drop=True), npartitions=4),
df.ww.index,
None,
df.ww.logical_types,
)
relationships = [
(
rel.parent_name,
rel._parent_column_name,
rel.child_name,
rel._child_column_name,
)
for rel in pd_simple_es.relationships
]
return EntitySet(
id=pd_simple_es.id,
dataframes=dataframes,
relationships=relationships,
)
@pytest.fixture
def spark_simple_es(pd_simple_es):
ps = pytest.importorskip("pyspark.pandas", reason="Spark not installed, skipping")
dataframes = {}
for df in pd_simple_es.dataframes:
cleaned_df = pd_to_spark_clean(df).reset_index(drop=True)
dataframes[df.ww.name] = (
ps.from_pandas(cleaned_df),
df.ww.index,
None,
df.ww.logical_types,
)
relationships = [
(
rel.parent_name,
rel._parent_column_name,
rel.child_name,
rel._child_column_name,
)
for rel in pd_simple_es.relationships
]
return EntitySet(
id=pd_simple_es.id,
dataframes=dataframes,
relationships=relationships,
)
@pytest.fixture(params=["pd_simple_es", "dd_simple_es", "spark_simple_es"])
def simple_es(request):
return request.getfixturevalue(request.param)
def test_equal_categorical(simple_es):
f1 = Feature(
[
IdentityFeature(simple_es["values"].ww["value"]),
IdentityFeature(simple_es["values"].ww["value2"]),
],
primitive=Equal,
)
df = calculate_feature_matrix(entityset=simple_es, features=[f1])
if simple_es.dataframe_type != Library.SPARK:
# Spark does not support categorical dtype
assert set(simple_es["values"]["value"].cat.categories) != set(
simple_es["values"]["value2"].cat.categories,
)
assert to_pandas(df, index="id", sort_index=True)["value = value2"].to_list() == [
True,
False,
False,
True,
]
def test_equal_different_dtypes(simple_es):
f1 = Feature(
[
IdentityFeature(simple_es["values"].ww["object"]),
IdentityFeature(simple_es["values"].ww["datetime"]),
],
primitive=Equal,
)
f2 = Feature(
[
IdentityFeature(simple_es["values"].ww["datetime"]),
IdentityFeature(simple_es["values"].ww["object"]),
],
primitive=Equal,
)
# verify that equals works for different dtypes regardless of order
df = calculate_feature_matrix(entityset=simple_es, features=[f1, f2])
assert to_pandas(df, index="id", sort_index=True)[
"object = datetime"
].to_list() == [False, False, False, False]
assert to_pandas(df, index="id", sort_index=True)[
"datetime = object"
].to_list() == [False, False, False, False]
def test_not_equal_categorical(simple_es):
f1 = Feature(
[
IdentityFeature(simple_es["values"].ww["value"]),
IdentityFeature(simple_es["values"].ww["value2"]),
],
primitive=NotEqual,
)
df = calculate_feature_matrix(entityset=simple_es, features=[f1])
if simple_es.dataframe_type != Library.SPARK:
# Spark does not support categorical dtype
assert set(simple_es["values"]["value"].cat.categories) != set(
simple_es["values"]["value2"].cat.categories,
)
assert to_pandas(df, index="id", sort_index=True)["value != value2"].to_list() == [
False,
True,
True,
False,
]
def test_not_equal_different_dtypes(simple_es):
f1 = Feature(
[
IdentityFeature(simple_es["values"].ww["object"]),
IdentityFeature(simple_es["values"].ww["datetime"]),
],
primitive=NotEqual,
)
f2 = Feature(
[
IdentityFeature(simple_es["values"].ww["datetime"]),
IdentityFeature(simple_es["values"].ww["object"]),
],
primitive=NotEqual,
)
# verify that equals works for different dtypes regardless of order
df = calculate_feature_matrix(entityset=simple_es, features=[f1, f2])
assert to_pandas(df, index="id", sort_index=True)[
"object != datetime"
].to_list() == [True, True, True, True]
assert to_pandas(df, index="id", sort_index=True)[
"datetime != object"
].to_list() == [True, True, True, True]
def test_diff(pd_es):
value = Feature(pd_es["log"].ww["value"])
customer_id_feat = Feature(pd_es["sessions"].ww["customer_id"], "log")
diff1 = Feature(
value,
groupby=Feature(pd_es["log"].ww["session_id"]),
primitive=Diff,
)
diff2 = Feature(value, groupby=customer_id_feat, primitive=Diff)
feature_set = FeatureSet([diff1, diff2])
calculator = FeatureSetCalculator(pd_es, feature_set=feature_set)
df = calculator.run(np.array(range(15)))
val1 = df[diff1.get_name()].tolist()
val2 = df[diff2.get_name()].tolist()
correct_vals1 = [
np.nan,
5,
5,
5,
5,
np.nan,
1,
1,
1,
np.nan,
np.nan,
5,
np.nan,
7,
7,
]
correct_vals2 = [np.nan, 5, 5, 5, 5, -20, 1, 1, 1, -3, np.nan, 5, -5, 7, 7]
np.testing.assert_equal(val1, correct_vals1)
np.testing.assert_equal(val2, correct_vals2)
def test_diff_shift(pd_es):
value = Feature(pd_es["log"].ww["value"])
customer_id_feat = Feature(pd_es["sessions"].ww["customer_id"], "log")
diff_periods = Feature(value, groupby=customer_id_feat, primitive=Diff(periods=1))
feature_set = FeatureSet([diff_periods])
calculator = FeatureSetCalculator(pd_es, feature_set=feature_set)
df = calculator.run(np.array(range(15)))
val3 = df[diff_periods.get_name()].tolist()
correct_vals3 = [np.nan, np.nan, 5, 5, 5, 5, -20, 1, 1, 1, np.nan, np.nan, 5, -5, 7]
np.testing.assert_equal(val3, correct_vals3)
def test_diff_single_value(pd_es):
diff = Feature(
pd_es["stores"].ww["num_square_feet"],
groupby=Feature(pd_es["stores"].ww["région_id"]),
primitive=Diff,
)
feature_set = FeatureSet([diff])
calculator = FeatureSetCalculator(pd_es, feature_set=feature_set)
df = calculator.run(np.array([4]))
assert df[diff.get_name()][4] == 6000.0
def test_diff_reordered(pd_es):
sum_feat = Feature(
pd_es["log"].ww["value"],
parent_dataframe_name="sessions",
primitive=Sum,
)
diff = Feature(sum_feat, primitive=Diff)
feature_set = FeatureSet([diff])
calculator = FeatureSetCalculator(pd_es, feature_set=feature_set)
df = calculator.run(np.array([4, 2]))
assert df[diff.get_name()][4] == 16
assert df[diff.get_name()][2] == -6
def test_diff_single_value_is_nan(pd_es):
diff = Feature(
pd_es["stores"].ww["num_square_feet"],
groupby=Feature(pd_es["stores"].ww["région_id"]),
primitive=Diff,
)
feature_set = FeatureSet([diff])
calculator = FeatureSetCalculator(pd_es, feature_set=feature_set)
df = calculator.run(np.array([5]))
assert df.shape[0] == 1
assert df[diff.get_name()].dropna().shape[0] == 0
def test_diff_datetime(pd_es):
diff = Feature(
pd_es["log"].ww["datetime"],
primitive=DiffDatetime,
)
feature_set = FeatureSet([diff])
calculator = FeatureSetCalculator(pd_es, feature_set=feature_set)
df = calculator.run(np.array(range(15)))
vals = pd.Series(df[diff.get_name()].tolist())
expected_vals = pd.Series(
[
pd.NaT,
pd.Timedelta(seconds=6),
pd.Timedelta(seconds=6),
pd.Timedelta(seconds=6),
pd.Timedelta(seconds=6),
pd.Timedelta(seconds=36),
pd.Timedelta(seconds=9),
pd.Timedelta(seconds=9),
pd.Timedelta(seconds=9),
pd.Timedelta(minutes=8, seconds=33),
pd.Timedelta(days=1),
pd.Timedelta(seconds=1),
pd.Timedelta(seconds=59),
pd.Timedelta(seconds=3),
pd.Timedelta(seconds=3),
],
)
pd.testing.assert_series_equal(vals, expected_vals)
def test_diff_datetime_shift(pd_es):
diff = Feature(
pd_es["log"].ww["datetime"],
primitive=DiffDatetime(periods=1),
)
feature_set = FeatureSet([diff])
calculator = FeatureSetCalculator(pd_es, feature_set=feature_set)
df = calculator.run(np.array(range(6)))
vals = pd.Series(df[diff.get_name()].tolist())
expected_vals = pd.Series(
[
pd.NaT,
pd.NaT,
pd.Timedelta(seconds=6),
pd.Timedelta(seconds=6),
pd.Timedelta(seconds=6),
pd.Timedelta(seconds=6),
],
)
pd.testing.assert_series_equal(vals, expected_vals)
def test_compare_of_identity(es):
to_test = [
(EqualScalar, [False, False, True, False]),
(NotEqualScalar, [True, True, False, True]),
(LessThanScalar, [True, True, False, False]),
(LessThanEqualToScalar, [True, True, True, False]),
(GreaterThanScalar, [False, False, False, True]),
(GreaterThanEqualToScalar, [False, False, True, True]),
]
features = []
for test in to_test:
features.append(Feature(es["log"].ww["value"], primitive=test[0](10)))
df = to_pandas(
calculate_feature_matrix(
entityset=es,
features=features,
instance_ids=[0, 1, 2, 3],
),
index="id",
sort_index=True,
)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].tolist()
assert v == test[1]
def test_compare_of_direct(es):
log_rating = Feature(es["products"].ww["rating"], "log")
to_test = [
(EqualScalar, [False, False, False, False]),
(NotEqualScalar, [True, True, True, True]),
(LessThanScalar, [False, False, False, True]),
(LessThanEqualToScalar, [False, False, False, True]),
(GreaterThanScalar, [True, True, True, False]),
(GreaterThanEqualToScalar, [True, True, True, False]),
]
features = []
for test in to_test:
features.append(Feature(log_rating, primitive=test[0](4.5)))
df = calculate_feature_matrix(
entityset=es,
features=features,
instance_ids=[0, 1, 2, 3],
)
df = to_pandas(df, index="id", sort_index=True)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].tolist()
assert v == test[1]
def test_compare_of_transform(es):
day = Feature(es["log"].ww["datetime"], primitive=Day)
to_test = [
(EqualScalar, [False, True]),
(NotEqualScalar, [True, False]),
]
features = []
for test in to_test:
features.append(Feature(day, primitive=test[0](10)))
df = calculate_feature_matrix(entityset=es, features=features, instance_ids=[0, 14])
df = to_pandas(df, index="id", sort_index=True)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].tolist()
assert v == test[1]
def test_compare_of_agg(es):
count_logs = Feature(
es["log"].ww["id"],
parent_dataframe_name="sessions",
primitive=Count,
)
to_test = [
(EqualScalar, [False, False, False, True]),
(NotEqualScalar, [True, True, True, False]),
(LessThanScalar, [False, False, True, False]),
(LessThanEqualToScalar, [False, False, True, True]),
(GreaterThanScalar, [True, True, False, False]),
(GreaterThanEqualToScalar, [True, True, False, True]),
]
features = []
for test in to_test:
features.append(Feature(count_logs, primitive=test[0](2)))
df = calculate_feature_matrix(
entityset=es,
features=features,
instance_ids=[0, 1, 2, 3],
)
df = to_pandas(df, index="id", sort_index=True)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].tolist()
assert v == test[1]
def test_compare_all_nans(es):
if es.dataframe_type != Library.PANDAS:
nan_feat = Feature(
es["log"].ww["value"],
parent_dataframe_name="sessions",
primitive=Min,
)
compare = nan_feat == 0.0
else:
nan_feat = Feature(
es["log"].ww["product_id"],
parent_dataframe_name="sessions",
primitive=Mode,
)
compare = nan_feat == "brown bag"
# before all data
time_last = pd.Timestamp("1/1/1993")
df = calculate_feature_matrix(
entityset=es,
features=[nan_feat, compare],
instance_ids=[0, 1, 2],
cutoff_time=time_last,
)
df = to_pandas(df, index="id", sort_index=True)
assert df[nan_feat.get_name()].dropna().shape[0] == 0
assert not df[compare.get_name()].any()
def test_arithmetic_of_val(es):
to_test = [
(AddNumericScalar, [2.0, 7.0, 12.0, 17.0]),
(SubtractNumericScalar, [-2.0, 3.0, 8.0, 13.0]),
(ScalarSubtractNumericFeature, [2.0, -3.0, -8.0, -13.0]),
(MultiplyNumericScalar, [0, 10, 20, 30]),
(DivideNumericScalar, [0, 2.5, 5, 7.5]),
(DivideByFeature, [np.inf, 0.4, 0.2, 2 / 15.0]),
]
features = []
for test in to_test:
features.append(Feature(es["log"].ww["value"], primitive=test[0](2)))
features.append(Feature(es["log"].ww["value"]) / 0)
df = calculate_feature_matrix(
entityset=es,
features=features,
instance_ids=[0, 1, 2, 3],
)
df = to_pandas(df, index="id", sort_index=True)
for f, test in zip(features, to_test):
v = df[f.get_name()].tolist()
assert v == test[1]
test = [np.nan, np.inf, np.inf, np.inf]
v = df[features[-1].get_name()].tolist()
assert np.isnan(v[0])
assert v[1:] == test[1:]
def test_arithmetic_two_vals_fails(es):
error_text = "Not a feature"
with pytest.raises(Exception, match=error_text):
Feature([2, 2], primitive=AddNumeric)
def test_arithmetic_of_identity(es):
to_test = [
(AddNumeric, [0.0, 7.0, 14.0, 21.0]),
(SubtractNumeric, [0, 3, 6, 9]),
(MultiplyNumeric, [0, 10, 40, 90]),
(DivideNumeric, [np.nan, 2.5, 2.5, 2.5]),
]
# SubtractNumeric not supported for Spark EntitySets
if es.dataframe_type == Library.SPARK:
to_test = to_test[:1] + to_test[2:]
features = []
for test in to_test:
features.append(
Feature(
[
Feature(es["log"].ww["value"]),
Feature(es["log"].ww["value_2"]),
],
primitive=test[0],
),
)
df = calculate_feature_matrix(
entityset=es,
features=features,
instance_ids=[0, 1, 2, 3],
)
df = to_pandas(df, index="id", sort_index=True)
for i, test in enumerate(to_test[:-1]):
v = df[features[i].get_name()].tolist()
assert v == test[1]
i, test = -1, to_test[-1]
v = df[features[i].get_name()].tolist()
assert np.isnan(v[0])
assert v[1:] == test[1][1:]
def test_arithmetic_of_direct(es):
rating = Feature(es["products"].ww["rating"])
log_rating = Feature(rating, "log")
customer_age = Feature(es["customers"].ww["age"])
session_age = Feature(customer_age, "sessions")
log_age = Feature(session_age, "log")
to_test = [
(AddNumeric, [38, 37, 37.5, 37.5]),
(SubtractNumeric, [28, 29, 28.5, 28.5]),
(MultiplyNumeric, [165, 132, 148.5, 148.5]),
(DivideNumeric, [6.6, 8.25, 22.0 / 3, 22.0 / 3]),
]
if es.dataframe_type == Library.SPARK:
to_test = to_test[:1] + to_test[2:]
features = []
for test in to_test:
features.append(Feature([log_age, log_rating], primitive=test[0]))
df = calculate_feature_matrix(
entityset=es,
features=features,
instance_ids=[0, 3, 5, 7],
)
df = to_pandas(df, index="id", sort_index=True)
for i, test in enumerate(to_test):
v = df[features[i].get_name()].tolist()
assert v == test[1]
# Spark EntitySets do not support boolean multiplication
@pytest.fixture(params=["pd_boolean_mult_es", "dask_boolean_mult_es"])
def boolean_mult_es(request):
return request.getfixturevalue(request.param)
@pytest.fixture
def pd_boolean_mult_es():
es = EntitySet()
df = pd.DataFrame(
{
"index": [0, 1, 2],
"bool": pd.Series([True, False, True]),
"numeric": [2, 3, np.nan],
},
)
es.add_dataframe(
dataframe_name="test",
dataframe=df,
index="index",
logical_types={"numeric": Double},
)
return es
@pytest.fixture
def dask_boolean_mult_es(pd_boolean_mult_es):
dd = pytest.importorskip("dask.dataframe", reason="Dask not installed, skipping")
dataframes = {}
for df in pd_boolean_mult_es.dataframes:
dataframes[df.ww.name] = (
dd.from_pandas(df, npartitions=2),
df.ww.index,
None,
df.ww.logical_types,
)
return EntitySet(id=pd_boolean_mult_es.id, dataframes=dataframes)
def test_boolean_multiply(boolean_mult_es):
es = boolean_mult_es
to_test = [
("numeric", "numeric"),
("numeric", "bool"),
("bool", "numeric"),
("bool", "bool"),
]
features = []
for row in to_test:
features.append(Feature(es["test"].ww[row[0]]) * Feature(es["test"].ww[row[1]]))
fm = to_pandas(calculate_feature_matrix(entityset=es, features=features))
df = to_pandas(es["test"])
for row in to_test:
col_name = "{} * {}".format(row[0], row[1])
if row[0] == "bool" and row[1] == "bool":
assert fm[col_name].equals((df[row[0]] & df[row[1]]).astype("boolean"))
else:
assert fm[col_name].equals(df[row[0]] * df[row[1]])
# TODO: rework test to be Dask and Spark compatible
def test_arithmetic_of_transform(es):
if es.dataframe_type != Library.PANDAS:
pytest.xfail("Test uses Diff which is not supported in Dask or Spark")
diff1 = Feature([Feature(es["log"].ww["value"])], primitive=Diff)
diff2 = Feature([Feature(es["log"].ww["value_2"])], primitive=Diff)
to_test = [
(AddNumeric, [np.nan, 7.0, -7.0, 10.0]),
(SubtractNumeric, [np.nan, 3.0, -3.0, 4.0]),
(MultiplyNumeric, [np.nan, 10.0, 10.0, 21.0]),
(DivideNumeric, [np.nan, 2.5, 2.5, 2.3333333333333335]),
]
features = []
for test in to_test:
features.append(Feature([diff1, diff2], primitive=test[0]()))
feature_set = FeatureSet(features)
calculator = FeatureSetCalculator(es, feature_set=feature_set)
df = calculator.run(np.array([0, 2, 12, 13]))
for i, test in enumerate(to_test):
v = df[features[i].get_name()].tolist()
assert np.isnan(v.pop(0))
assert np.isnan(test[1].pop(0))
assert v == test[1]
def test_not_feature(es):
not_feat = Feature(es["customers"].ww["loves_ice_cream"], primitive=Not)
features = [not_feat]
df = to_pandas(
calculate_feature_matrix(entityset=es, features=features, instance_ids=[0, 1]),
)
v = df[not_feat.get_name()].values
assert not v[0]
assert v[1]
def test_arithmetic_of_agg(es):
customer_id_feat = Feature(es["customers"].ww["id"])
store_id_feat = Feature(es["stores"].ww["id"])
count_customer = Feature(
customer_id_feat,
parent_dataframe_name="régions",
primitive=Count,
)
count_stores = Feature(
store_id_feat,
parent_dataframe_name="régions",
primitive=Count,
)
to_test = [
(AddNumeric, [6, 2]),
(SubtractNumeric, [0, -2]),
(MultiplyNumeric, [9, 0]),
(DivideNumeric, [1, 0]),
]
# Skip SubtractNumeric for Spark as it's unsupported
if es.dataframe_type == Library.SPARK:
to_test = to_test[:1] + to_test[2:]
features = []
for test in to_test:
features.append(Feature([count_customer, count_stores], primitive=test[0]()))
ids = ["United States", "Mexico"]
df = calculate_feature_matrix(entityset=es, features=features, instance_ids=ids)
df = to_pandas(df, index="id", sort_index=True)
df = df.loc[ids]
for i, test in enumerate(to_test):
v = df[features[i].get_name()].tolist()
assert v == test[1]
def test_latlong(pd_es):
log_latlong_feat = Feature(pd_es["log"].ww["latlong"])
latitude = Feature(log_latlong_feat, primitive=Latitude)
longitude = Feature(log_latlong_feat, primitive=Longitude)
features = [latitude, longitude]
df = calculate_feature_matrix(
entityset=pd_es,
features=features,
instance_ids=range(15),
)
latvalues = df[latitude.get_name()].values
lonvalues = df[longitude.get_name()].values
assert len(latvalues) == 15
assert len(lonvalues) == 15
real_lats = [0, 5, 10, 15, 20, 0, 1, 2, 3, 0, 0, 5, 0, 7, 14]
real_lons = [0, 2, 4, 6, 8, 0, 1, 2, 3, 0, 0, 2, 0, 3, 6]
for (
i,
v,
) in enumerate(real_lats):
assert v == latvalues[i]
for (
i,
v,
) in enumerate(real_lons):
assert v == lonvalues[i]
def test_latlong_with_nan(pd_es):
df = pd_es["log"]
df["latlong"][0] = np.nan
df["latlong"][1] = (10, np.nan)
df["latlong"][2] = (np.nan, 4)
df["latlong"][3] = (np.nan, np.nan)
pd_es.replace_dataframe(dataframe_name="log", df=df)
log_latlong_feat = Feature(pd_es["log"].ww["latlong"])
latitude = Feature(log_latlong_feat, primitive=Latitude)
longitude = Feature(log_latlong_feat, primitive=Longitude)
features = [latitude, longitude]
fm = calculate_feature_matrix(entityset=pd_es, features=features)
latvalues = fm[latitude.get_name()].values
lonvalues = fm[longitude.get_name()].values
assert len(latvalues) == 17
assert len(lonvalues) == 17
real_lats = [
np.nan,
10,
np.nan,
np.nan,
20,
0,
1,
2,
3,
0,
0,
5,
0,
7,
14,
np.nan,
np.nan,
]
real_lons = [
np.nan,
np.nan,
4,
np.nan,
8,
0,
1,
2,
3,
0,
0,
2,
0,
3,
6,
np.nan,
np.nan,
]
assert np.allclose(latvalues, real_lats, atol=0.0001, equal_nan=True)
assert np.allclose(lonvalues, real_lons, atol=0.0001, equal_nan=True)
def test_haversine(pd_es):
log_latlong_feat = Feature(pd_es["log"].ww["latlong"])
log_latlong_feat2 = Feature(pd_es["log"].ww["latlong2"])
haversine = Feature([log_latlong_feat, log_latlong_feat2], primitive=Haversine)
features = [haversine]
df = calculate_feature_matrix(
entityset=pd_es,
features=features,
instance_ids=range(15),
)
values = df[haversine.get_name()].values
real = [
0,
525.318462,
1045.32190304,
1554.56176802,
2047.3294327,
0,
138.16578931,
276.20524822,
413.99185444,
0,
0,
525.318462,
0,
741.57941183,
1467.52760175,
]
assert len(values) == 15
assert np.allclose(values, real, atol=0.0001)
haversine = Feature(
[log_latlong_feat, log_latlong_feat2],
primitive=Haversine(unit="kilometers"),
)
features = [haversine]
df = calculate_feature_matrix(
entityset=pd_es,
features=features,
instance_ids=range(15),
)
values = df[haversine.get_name()].values
real_km = [
0,
845.41812212,
1682.2825471,
2501.82467535,
3294.85736668,
0,
222.35628593,
444.50926278,
666.25531268,
0,
0,
845.41812212,
0,
1193.45638714,
2361.75676089,
]
assert len(values) == 15
assert np.allclose(values, real_km, atol=0.0001)
error_text = "Invalid unit inches provided. Must be one of"
with pytest.raises(ValueError, match=error_text):
Haversine(unit="inches")
def test_haversine_with_nan(pd_es):
# Check some `nan` values
df = pd_es["log"]
df["latlong"][0] = np.nan
df["latlong"][1] = (10, np.nan)
pd_es.replace_dataframe(dataframe_name="log", df=df)
log_latlong_feat = Feature(pd_es["log"].ww["latlong"])
log_latlong_feat2 = Feature(pd_es["log"].ww["latlong2"])
haversine = Feature([log_latlong_feat, log_latlong_feat2], primitive=Haversine)
features = [haversine]
df = calculate_feature_matrix(entityset=pd_es, features=features)
values = df[haversine.get_name()].values
real = [
np.nan,
np.nan,
1045.32190304,
1554.56176802,
2047.3294327,
0,
138.16578931,
276.20524822,
413.99185444,
0,
0,
525.318462,
0,
741.57941183,
1467.52760175,
np.nan,
np.nan,
]
assert np.allclose(values, real, atol=0.0001, equal_nan=True)
# Check all `nan` values
df = pd_es["log"]
df["latlong2"] = np.nan
pd_es.replace_dataframe(dataframe_name="log", df=df)
log_latlong_feat = Feature(pd_es["log"].ww["latlong"])
log_latlong_feat2 = Feature(pd_es["log"].ww["latlong2"])
haversine = Feature([log_latlong_feat, log_latlong_feat2], primitive=Haversine)
features = [haversine]
df = calculate_feature_matrix(entityset=pd_es, features=features)
values = df[haversine.get_name()].values
real = [np.nan] * pd_es["log"].shape[0]
assert np.allclose(values, real, atol=0.0001, equal_nan=True)
def test_text_primitives(es):
words = Feature(es["log"].ww["comments"], primitive=NumWords)
chars = Feature(es["log"].ww["comments"], primitive=NumCharacters)
features = [words, chars]
df = to_pandas(
calculate_feature_matrix(
entityset=es,
features=features,
instance_ids=range(15),
),
index="id",
sort_index=True,
)
word_counts = [532, 3, 3, 653, 1306, 1305, 174, 173, 79, 246, 1253, 3, 3, 3, 3]
char_counts = [
3392,
10,
10,
4116,
7961,
7580,
992,
957,
437,
1325,
6322,
10,
10,
10,
10,
]
word_values = df[words.get_name()].values
char_values = df[chars.get_name()].values
assert len(word_values) == 15
for i, v in enumerate(word_values):
assert v == word_counts[i]
for i, v in enumerate(char_values):
assert v == char_counts[i]
def test_isin_feat(es):
isin = Feature(
es["log"].ww["product_id"],
primitive=IsIn(list_of_outputs=["toothpaste", "coke zero"]),
)
features = [isin]
df = to_pandas(
calculate_feature_matrix(
entityset=es,
features=features,
instance_ids=range(8),
),
index="id",
sort_index=True,
)
true = [True, True, True, False, False, True, True, True]
v = df[isin.get_name()].tolist()
assert true == v
def test_isin_feat_other_syntax(es):
isin = Feature(es["log"].ww["product_id"]).isin(["toothpaste", "coke zero"])
features = [isin]
df = to_pandas(
calculate_feature_matrix(
entityset=es,
features=features,
instance_ids=range(8),
),
index="id",
sort_index=True,
)
true = [True, True, True, False, False, True, True, True]
v = df[isin.get_name()].tolist()
assert true == v
def test_isin_feat_other_syntax_int(es):
isin = Feature(es["log"].ww["value"]).isin([5, 10])
features = [isin]
df = to_pandas(
calculate_feature_matrix(
entityset=es,
features=features,
instance_ids=range(8),
),
index="id",
sort_index=True,
)
true = [False, True, True, False, False, False, False, False]
v = df[isin.get_name()].tolist()
assert true == v
def test_isin_feat_custom(es):
class CustomIsIn(TransformPrimitive):
name = "is_in"
input_types = [ColumnSchema()]
return_type = ColumnSchema(logical_type=Boolean)
def __init__(self, list_of_outputs=None):
self.list_of_outputs = list_of_outputs
def get_function(self):
def pd_is_in(array):
return array.isin(self.list_of_outputs)
return pd_is_in
isin = Feature(
es["log"].ww["product_id"],
primitive=CustomIsIn(list_of_outputs=["toothpaste", "coke zero"]),
)
features = [isin]
df = to_pandas(
calculate_feature_matrix(
entityset=es,
features=features,
instance_ids=range(8),
),
index="id",
sort_index=True,
)
true = [True, True, True, False, False, True, True, True]
v = df[isin.get_name()].tolist()
assert true == v
isin = Feature(es["log"].ww["product_id"]).isin(["toothpaste", "coke zero"])
features = [isin]
df = to_pandas(
calculate_feature_matrix(
entityset=es,
features=features,
instance_ids=range(8),
),
index="id",
sort_index=True,
)
true = [True, True, True, False, False, True, True, True]
v = df[isin.get_name()].tolist()
assert true == v
isin = Feature(es["log"].ww["value"]).isin([5, 10])
features = [isin]
df = to_pandas(
calculate_feature_matrix(
entityset=es,
features=features,
instance_ids=range(8),
),
index="id",
sort_index=True,
)
true = [False, True, True, False, False, False, False, False]
v = df[isin.get_name()].tolist()
assert true == v
def test_isnull_feat(pd_es):
value = Feature(pd_es["log"].ww["value"])
diff = Feature(
value,
groupby=Feature(pd_es["log"].ww["session_id"]),
primitive=Diff,
)
isnull = Feature(diff, primitive=IsNull)
features = [isnull]
df = calculate_feature_matrix(
entityset=pd_es,
features=features,
instance_ids=range(15),
)
correct_vals = [
True,
False,
False,
False,
False,
True,
False,
False,
False,
True,
True,
False,
True,
False,
False,
]
values = df[isnull.get_name()].tolist()
assert correct_vals == values
def test_percentile(pd_es):
v = Feature(pd_es["log"].ww["value"])
p = Feature(v, primitive=Percentile)
feature_set = FeatureSet([p])
calculator = FeatureSetCalculator(pd_es, feature_set)
df = calculator.run(np.array(range(10, 17)))
true = pd_es["log"][v.get_name()].rank(pct=True)
true = true.loc[range(10, 17)]
for t, a in zip(true.values, df[p.get_name()].values):
assert (pd.isnull(t) and pd.isnull(a)) or t == a
def test_dependent_percentile(pd_es):
v = Feature(pd_es["log"].ww["value"])
p = Feature(v, primitive=Percentile)
p2 = Feature(p - 1, primitive=Percentile)
feature_set = FeatureSet([p, p2])
calculator = FeatureSetCalculator(pd_es, feature_set)
df = calculator.run(np.array(range(10, 17)))
true = pd_es["log"][v.get_name()].rank(pct=True)
true = true.loc[range(10, 17)]
for t, a in zip(true.values, df[p.get_name()].values):
assert (pd.isnull(t) and pd.isnull(a)) or t == a
def test_agg_percentile(pd_es):
v = Feature(pd_es["log"].ww["value"])
p = Feature(v, primitive=Percentile)
agg = Feature(p, parent_dataframe_name="sessions", primitive=Sum)
feature_set = FeatureSet([agg])
calculator = FeatureSetCalculator(pd_es, feature_set)
df = calculator.run(np.array([0, 1]))
log_vals = pd_es["log"][[v.get_name(), "session_id"]]
log_vals["percentile"] = log_vals[v.get_name()].rank(pct=True)
true_p = log_vals.groupby("session_id")["percentile"].sum()[[0, 1]]
for t, a in zip(true_p.values, df[agg.get_name()].values):
assert (pd.isnull(t) and pd.isnull(a)) or t == a
def test_percentile_agg_percentile(pd_es):
v = Feature(pd_es["log"].ww["value"])
p = Feature(v, primitive=Percentile)
agg = Feature(p, parent_dataframe_name="sessions", primitive=Sum)
pagg = Feature(agg, primitive=Percentile)
feature_set = FeatureSet([pagg])
calculator = FeatureSetCalculator(pd_es, feature_set)
df = calculator.run(np.array([0, 1]))
log_vals = pd_es["log"][[v.get_name(), "session_id"]]
log_vals["percentile"] = log_vals[v.get_name()].rank(pct=True)
true_p = log_vals.groupby("session_id")["percentile"].sum().fillna(0)
true_p = true_p.rank(pct=True)[[0, 1]]
for t, a in zip(true_p.values, df[pagg.get_name()].values):
assert (pd.isnull(t) and pd.isnull(a)) or t == a
def test_percentile_agg(pd_es):
v = Feature(pd_es["log"].ww["value"])
agg = Feature(v, parent_dataframe_name="sessions", primitive=Sum)
pagg = Feature(agg, primitive=Percentile)
feature_set = FeatureSet([pagg])
calculator = FeatureSetCalculator(pd_es, feature_set)
df = calculator.run(np.array([0, 1]))
log_vals = pd_es["log"][[v.get_name(), "session_id"]]
true_p = log_vals.groupby("session_id")[v.get_name()].sum().fillna(0)
true_p = true_p.rank(pct=True)[[0, 1]]
for t, a in zip(true_p.values, df[pagg.get_name()].values):
assert (pd.isnull(t) and pd.isnull(a)) or t == a
def test_direct_percentile(pd_es):
v = Feature(pd_es["customers"].ww["age"])
p = Feature(v, primitive=Percentile)
d = Feature(p, "sessions")
feature_set = FeatureSet([d])
calculator = FeatureSetCalculator(pd_es, feature_set)
df = calculator.run(np.array([0, 1]))
cust_vals = pd_es["customers"][[v.get_name()]]
cust_vals["percentile"] = cust_vals[v.get_name()].rank(pct=True)
true_p = cust_vals["percentile"].loc[[0, 0]]
for t, a in zip(true_p.values, df[d.get_name()].values):
assert (pd.isnull(t) and pd.isnull(a)) or t == a
def test_direct_agg_percentile(pd_es):
v = Feature(pd_es["log"].ww["value"])
p = Feature(v, primitive=Percentile)
agg = Feature(p, parent_dataframe_name="customers", primitive=Sum)
d = Feature(agg, "sessions")
feature_set = FeatureSet([d])
calculator = FeatureSetCalculator(pd_es, feature_set)
df = calculator.run(np.array([0, 1]))
log_vals = pd_es["log"][[v.get_name(), "session_id"]]
log_vals["percentile"] = log_vals[v.get_name()].rank(pct=True)
log_vals["customer_id"] = [0] * 10 + [1] * 5 + [2] * 2
true_p = log_vals.groupby("customer_id")["percentile"].sum().fillna(0)
true_p = true_p[[0, 0]]
for t, a in zip(true_p.values, df[d.get_name()].values):
assert (pd.isnull(t) and pd.isnull(a)) or round(t, 3) == round(a, 3)
def test_percentile_with_cutoff(pd_es):
v = Feature(pd_es["log"].ww["value"])
p = Feature(v, primitive=Percentile)
feature_set = FeatureSet([p])
calculator = FeatureSetCalculator(
pd_es,
feature_set,
pd.Timestamp("2011/04/09 10:30:13"),
)
df = calculator.run(np.array([2]))
assert df[p.get_name()].tolist()[0] == 1.0
def test_two_kinds_of_dependents(pd_es):
v = Feature(pd_es["log"].ww["value"])
product = Feature(pd_es["log"].ww["product_id"])
agg = Feature(
v,
parent_dataframe_name="customers",
where=product == "coke zero",
primitive=Sum,
)
p = Feature(agg, primitive=Percentile)
g = Feature(agg, primitive=Absolute)
agg2 = Feature(
v,
parent_dataframe_name="sessions",
where=product == "coke zero",
primitive=Sum,
)
agg3 = Feature(agg2, parent_dataframe_name="customers", primitive=Sum)
feature_set = FeatureSet([p, g, agg3])
calculator = FeatureSetCalculator(pd_es, feature_set)
df = calculator.run(np.array([0, 1]))
assert df[p.get_name()].tolist() == [2.0 / 3, 1.0]
assert df[g.get_name()].tolist() == [15, 26]
def test_get_filepath(es):
class Mod4(TransformPrimitive):
"""Return base feature modulo 4"""
name = "mod4"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
def get_function(self):
filepath = self.get_filepath("featuretools_unit_test_example.csv")
reference = pd.read_csv(filepath, header=None).squeeze("columns")
def map_to_word(x):
def _map(x):
if pd.isnull(x):
return x
return reference[int(x) % 4]
return x.apply(_map)
return map_to_word
feat = Feature(es["log"].ww["value"], primitive=Mod4)
df = calculate_feature_matrix(features=[feat], entityset=es, instance_ids=range(17))
df = to_pandas(df, index="id")
assert pd.isnull(df["MOD4(value)"][15])
assert df["MOD4(value)"][0] == 0
assert df["MOD4(value)"][14] == 2
fm, fl = dfs(
entityset=es,
target_dataframe_name="log",
agg_primitives=[],
trans_primitives=[Mod4],
)
fm = to_pandas(fm, index="id")
assert fm["MOD4(value)"][0] == 0
assert fm["MOD4(value)"][14] == 2
assert pd.isnull(fm["MOD4(value)"][15])
def test_override_multi_feature_names(pd_es):
def gen_custom_names(primitive, base_feature_names):
return [
"Above18(%s)" % base_feature_names,
"Above21(%s)" % base_feature_names,
"Above65(%s)" % base_feature_names,
]
class IsGreater(TransformPrimitive):
name = "is_greater"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
number_output_features = 3
def get_function(self):
def is_greater(x):
return x > 18, x > 21, x > 65
return is_greater
def generate_names(primitive, base_feature_names):
return gen_custom_names(primitive, base_feature_names)
fm, features = dfs(
entityset=pd_es,
target_dataframe_name="customers",
instance_ids=[0, 1, 2],
agg_primitives=[],
trans_primitives=[IsGreater],
)
expected_names = gen_custom_names(IsGreater, ["age"])
for name in expected_names:
assert name in fm.columns
def test_time_since_primitive_matches_all_datetime_types(es):
if es.dataframe_type == Library.SPARK:
pytest.xfail("TimeSince transform primitive is incompatible with Spark")
fm, fl = dfs(
target_dataframe_name="customers",
entityset=es,
trans_primitives=[TimeSince],
agg_primitives=[],
max_depth=1,
)
customers_datetime_cols = [
id
for id, t in es["customers"].ww.logical_types.items()
if isinstance(t, Datetime)
]
expected_names = [f"TIME_SINCE({v})" for v in customers_datetime_cols]
for name in expected_names:
assert name in fm.columns
def test_cfm_with_numeric_lag_and_non_nullable_column(pd_es):
# fill nans so we can use non nullable numeric logical type in the EntitySet
new_log = pd_es["log"].copy()
new_log["value"] = new_log["value"].fillna(0)
new_log.ww.init(
logical_types={"value": "Integer", "product_id": "Categorical"},
index="id",
time_index="datetime",
name="new_log",
)
pd_es.add_dataframe(new_log)
rels = [
("sessions", "id", "new_log", "session_id"),
("products", "id", "new_log", "product_id"),
]
pd_es = pd_es.add_relationships(rels)
assert isinstance(pd_es["new_log"].ww.logical_types["value"], Integer)
periods = 5
lag_primitive = NumericLag(periods=periods)
cutoff_times = pd_es["new_log"][["id", "datetime"]]
fm, _ = dfs(
target_dataframe_name="new_log",
entityset=pd_es,
agg_primitives=[],
trans_primitives=[lag_primitive],
cutoff_time=cutoff_times,
)
assert fm["NUMERIC_LAG(datetime, value, periods=5)"].head(periods).isnull().all()
assert fm["NUMERIC_LAG(datetime, value, periods=5)"].isnull().sum() == periods
assert "NUMERIC_LAG(datetime, value_2, periods=5)" in fm.columns
assert "NUMERIC_LAG(datetime, products.rating, periods=5)" in fm.columns
assert (
fm["NUMERIC_LAG(datetime, products.rating, periods=5)"]
.head(periods)
.isnull()
.all()
)
def test_cfm_with_lag_and_non_nullable_columns(pd_es):
# fill nans so we can use non nullable numeric logical type in the EntitySet
new_log = pd_es["log"].copy()
new_log["value"] = new_log["value"].fillna(0)
new_log["value_double"] = new_log["value"]
new_log["purchased_with_nulls"] = new_log["purchased"]
new_log["purchased_with_nulls"][0:4] = None
new_log.ww.init(
logical_types={
"value": "Integer",
"value_2": "IntegerNullable",
"product_id": "Categorical",
"value_double": "Double",
"purchased_with_nulls": "BooleanNullable",
},
index="id",
time_index="datetime",
name="new_log",
)
pd_es.add_dataframe(new_log)
rels = [
("sessions", "id", "new_log", "session_id"),
("products", "id", "new_log", "product_id"),
]
pd_es = pd_es.add_relationships(rels)
assert isinstance(pd_es["new_log"].ww.logical_types["value"], Integer)
periods = 5
lag_primitive = Lag(periods=periods)
cutoff_times = pd_es["new_log"][["id", "datetime"]]
fm, _ = dfs(
target_dataframe_name="new_log",
entityset=pd_es,
agg_primitives=[],
trans_primitives=[lag_primitive],
cutoff_time=cutoff_times,
)
# Integer
assert fm["LAG(value, datetime, periods=5)"].head(periods).isnull().all()
assert fm["LAG(value, datetime, periods=5)"].isnull().sum() == periods
assert isinstance(
fm.ww.schema.logical_types["LAG(value, datetime, periods=5)"],
IntegerNullable,
)
# IntegerNullable
assert "LAG(value_2, datetime, periods=5)" in fm.columns
assert fm["LAG(value_2, datetime, periods=5)"].head(periods).isnull().all()
assert isinstance(
fm.ww.schema.logical_types["LAG(value_2, datetime, periods=5)"],
IntegerNullable,
)
# Categorical
assert "LAG(product_id, datetime, periods=5)" in fm.columns
assert fm["LAG(product_id, datetime, periods=5)"].head(periods).isnull().all()
assert isinstance(
fm.ww.schema.logical_types["LAG(product_id, datetime, periods=5)"],
Categorical,
)
# Double
assert "LAG(value_double, datetime, periods=5)" in fm.columns
assert fm["LAG(value_double, datetime, periods=5)"].head(periods).isnull().all()
assert isinstance(
fm.ww.schema.logical_types["LAG(value_double, datetime, periods=5)"],
Double,
)
# Boolean
assert "LAG(purchased, datetime, periods=5)" in fm.columns
assert fm["LAG(purchased, datetime, periods=5)"].head(periods).isnull().all()
assert isinstance(
fm.ww.schema.logical_types["LAG(purchased, datetime, periods=5)"],
BooleanNullable,
)
# BooleanNullable
assert "LAG(purchased_with_nulls, datetime, periods=5)" in fm.columns
assert (
fm["LAG(purchased_with_nulls, datetime, periods=5)"]
.head(periods)
.isnull()
.all()
)
assert isinstance(
fm.ww.schema.logical_types["LAG(purchased_with_nulls, datetime, periods=5)"],
BooleanNullable,
)
def test_comparisons_with_ordinal_valid_inputs_that_dont_work_but_should(pd_es):
# TODO: Remvoe this test once the correct behavior is implemented in CFM
# The following test covers a scenario where an intermediate feature doesn't have the correct type
# because Woodwork has not yet been initialized. This calculation should work and return valid True/False
# values. This should be fixed in a future PR, but until a fix is implemented null values are returned to
# prevent calculate_feature_matrix from raising an Error when calculating features generated by DFS.
priority_level = Feature(pd_es["log"].ww["priority_level"])
first_priority = AggregationFeature(
priority_level,
parent_dataframe_name="customers",
primitive=First,
)
engagement = Feature(pd_es["customers"].ww["engagement_level"])
invalid_but_should_be_valid = [
TransformFeature([engagement, first_priority], primitive=LessThan),
TransformFeature([engagement, first_priority], primitive=LessThanEqualTo),
TransformFeature([engagement, first_priority], primitive=GreaterThan),
TransformFeature([engagement, first_priority], primitive=GreaterThanEqualTo),
]
fm = calculate_feature_matrix(
entityset=pd_es,
features=invalid_but_should_be_valid,
)
feature_cols = [f.get_name() for f in invalid_but_should_be_valid]
fm = to_pandas(fm)
for col in feature_cols:
assert fm[col].isnull().all()
def test_multiply_numeric_boolean():
test_cases = [
{"val": 100, "mask": True, "expected": 100},
{"val": 100, "mask": False, "expected": 0},
{"val": 0, "mask": False, "expected": 0},
{"val": 100, "mask": pd.NA, "expected": pd.NA},
{"val": pd.NA, "mask": pd.NA, "expected": pd.NA},
{"val": pd.NA, "mask": True, "expected": pd.NA},
{"val": pd.NA, "mask": False, "expected": pd.NA},
]
multiply_numeric_boolean = MultiplyNumericBoolean()
for input in test_cases:
vals = pd.Series(input["val"]).astype("Int64")
mask = pd.Series(input["mask"])
actual = multiply_numeric_boolean(vals, mask).tolist()[0]
expected = input["expected"]
if pd.isnull(expected):
assert pd.isnull(actual)
else:
assert actual == input["expected"]
def test_multiply_numeric_boolean_multiple_dtypes_no_nulls():
# Test without null values
vals = pd.Series([1, 2, 3])
bools = pd.Series([True, False, True])
multiply_numeric_boolean = MultiplyNumericBoolean()
numeric_dtypes = ["float64", "int64", "Int64"]
boolean_dtypes = ["bool", "boolean"]
for numeric_dtype in numeric_dtypes:
for boolean_dtype in boolean_dtypes:
actual = multiply_numeric_boolean(
vals.astype(numeric_dtype),
bools.astype(boolean_dtype),
)
expected = pd.Series([1, 0, 3])
pd.testing.assert_series_equal(actual, expected, check_dtype=False)
def test_multiply_numeric_boolean_multiple_dtypes_with_nulls():
# Test with null values
vals = pd.Series([np.nan, 2, 3])
bools = pd.Series([True, False, pd.NA], dtype="boolean")
multiply_numeric_boolean = MultiplyNumericBoolean()
numeric_dtypes = ["float64", "Int64"]
for numeric_dtype in numeric_dtypes:
actual = multiply_numeric_boolean(vals.astype(numeric_dtype), bools)
expected = pd.Series([np.nan, 0, np.nan])
pd.testing.assert_series_equal(actual, expected, check_dtype=False)
def test_feature_multiplication(es):
numeric_ft = Feature(es["customers"].ww["age"])
boolean_ft = Feature(es["customers"].ww["loves_ice_cream"])
mult_numeric = numeric_ft * numeric_ft
mult_boolean = boolean_ft * boolean_ft
mult_numeric_boolean = numeric_ft * boolean_ft
mult_numeric_boolean2 = boolean_ft * numeric_ft
assert issubclass(type(mult_numeric.primitive), MultiplyNumeric)
assert issubclass(type(mult_boolean.primitive), MultiplyBoolean)
assert issubclass(type(mult_numeric_boolean.primitive), MultiplyNumericBoolean)
assert issubclass(type(mult_numeric_boolean2.primitive), MultiplyNumericBoolean)
# Test with nullable types
es["customers"].ww.set_types(
logical_types={"age": "IntegerNullable", "loves_ice_cream": "BooleanNullable"},
)
numeric_ft = Feature(es["customers"].ww["age"])
boolean_ft = Feature(es["customers"].ww["loves_ice_cream"])
mult_numeric = numeric_ft * numeric_ft
mult_boolean = boolean_ft * boolean_ft
mult_numeric_boolean = numeric_ft * boolean_ft
mult_numeric_boolean2 = boolean_ft * numeric_ft
assert issubclass(type(mult_numeric.primitive), MultiplyNumeric)
assert issubclass(type(mult_boolean.primitive), MultiplyBoolean)
assert issubclass(type(mult_numeric_boolean.primitive), MultiplyNumericBoolean)
assert issubclass(type(mult_numeric_boolean2.primitive), MultiplyNumericBoolean)
| 55,375 | 29.594475 | 109 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/test_rolling_primitive_utils.py | from unittest.mock import patch
import numpy as np
import pandas as pd
import pytest
from featuretools.primitives import (
RollingCount,
RollingMax,
RollingMean,
RollingMin,
RollingSTD,
RollingTrend,
)
from featuretools.primitives.standard.transform.time_series.utils import (
_get_rolled_series_without_gap,
apply_roll_with_offset_gap,
roll_series_with_gap,
)
from featuretools.tests.primitive_tests.utils import get_number_from_offset
def test_get_rolled_series_without_gap(window_series_pd):
# Data is daily, so number of rows should be number of days not included in the gap
assert len(_get_rolled_series_without_gap(window_series_pd, "11D")) == 9
assert len(_get_rolled_series_without_gap(window_series_pd, "0D")) == 20
assert len(_get_rolled_series_without_gap(window_series_pd, "48H")) == 18
assert len(_get_rolled_series_without_gap(window_series_pd, "4H")) == 19
def test_get_rolled_series_without_gap_not_uniform(window_series_pd):
non_uniform_series = window_series_pd.iloc[[0, 2, 5, 6, 8, 9]]
assert len(_get_rolled_series_without_gap(non_uniform_series, "10D")) == 0
assert len(_get_rolled_series_without_gap(non_uniform_series, "0D")) == 6
assert len(_get_rolled_series_without_gap(non_uniform_series, "48H")) == 4
assert len(_get_rolled_series_without_gap(non_uniform_series, "4H")) == 5
assert len(_get_rolled_series_without_gap(non_uniform_series, "4D")) == 3
assert len(_get_rolled_series_without_gap(non_uniform_series, "4D2H")) == 2
def test_get_rolled_series_without_gap_empty_series(window_series_pd):
empty_series = pd.Series([], dtype="object")
assert len(_get_rolled_series_without_gap(empty_series, "1D")) == 0
assert len(_get_rolled_series_without_gap(empty_series, "0D")) == 0
def test_get_rolled_series_without_gap_large_bound(window_series_pd):
assert len(_get_rolled_series_without_gap(window_series_pd, "100D")) == 0
assert (
len(
_get_rolled_series_without_gap(
window_series_pd.iloc[[0, 2, 5, 6, 8, 9]],
"20D",
),
)
== 0
)
@pytest.mark.parametrize(
"window_length, gap",
[
(3, 2),
(3, 4), # gap larger than window
(2, 0), # gap explicitly set to 0
("3d", "2d"), # using offset aliases
("3d", "4d"),
("4d", "0d"),
],
)
def test_roll_series_with_gap(window_length, gap, window_series_pd):
rolling_max = roll_series_with_gap(
window_series_pd,
window_length,
gap=gap,
min_periods=1,
).max()
rolling_min = roll_series_with_gap(
window_series_pd,
window_length,
gap=gap,
min_periods=1,
).min()
assert len(rolling_max) == len(window_series_pd)
assert len(rolling_min) == len(window_series_pd)
gap_num = get_number_from_offset(gap)
window_length_num = get_number_from_offset(window_length)
for i in range(len(window_series_pd)):
start_idx = i - gap_num - window_length_num + 1
if isinstance(gap, str):
# No gap functionality is happening, so gap isn't taken account in the end index
# it's like the gap is 0; it includes the row itself
end_idx = i
else:
end_idx = i - gap_num
# If start and end are negative, they're entirely before
if start_idx < 0 and end_idx < 0:
assert pd.isnull(rolling_max.iloc[i])
assert pd.isnull(rolling_min.iloc[i])
continue
if start_idx < 0:
start_idx = 0
# Because the row values are a range from 0 to 20, the rolling min will be the start index
# and the rolling max will be the end idx
assert rolling_min.iloc[i] == start_idx
assert rolling_max.iloc[i] == end_idx
@pytest.mark.parametrize("window_length", [3, "3d"])
def test_roll_series_with_no_gap(window_length, window_series_pd):
actual_rolling = roll_series_with_gap(
window_series_pd,
window_length,
gap=0,
min_periods=1,
).mean()
expected_rolling = window_series_pd.rolling(window_length, min_periods=1).mean()
pd.testing.assert_series_equal(actual_rolling, expected_rolling)
@pytest.mark.parametrize(
"window_length, gap",
[
(6, 2),
(6, 0), # No gap - changes early values
("6d", "0d"), # Uses offset aliases
("6d", "2d"),
],
)
def test_roll_series_with_gap_early_values(window_length, gap, window_series_pd):
gap_num = get_number_from_offset(gap)
window_length_num = get_number_from_offset(window_length)
# Default min periods is 1 - will include all
default_partial_values = roll_series_with_gap(
window_series_pd,
window_length,
gap=gap,
min_periods=1,
).count()
num_empty_aggregates = len(default_partial_values.loc[default_partial_values == 0])
num_partial_aggregates = len(
(default_partial_values.loc[default_partial_values != 0]).loc[
default_partial_values < window_length_num
],
)
assert num_partial_aggregates == window_length_num - 1
if isinstance(gap, str):
# gap isn't handled, so we'll always at least include the row itself
assert num_empty_aggregates == 0
else:
assert num_empty_aggregates == gap_num
# Make min periods the size of the window
no_partial_values = roll_series_with_gap(
window_series_pd,
window_length,
gap=gap,
min_periods=window_length_num,
).count()
num_null_aggregates = len(no_partial_values.loc[pd.isna(no_partial_values)])
num_partial_aggregates = len(
no_partial_values.loc[no_partial_values < window_length_num],
)
# because we shift, gap is included as nan values in the series.
# Count treats nans in a window as values that don't get counted,
# so the gap rows get included in the count for whether a window has "min periods".
# This is different than max, for example, which does not count nans in a window as values towards "min periods"
assert num_null_aggregates == window_length_num - 1
if isinstance(gap, str):
# gap isn't handled, so we'll never have any partial aggregates
assert num_partial_aggregates == 0
else:
assert num_partial_aggregates == gap_num
def test_roll_series_with_gap_nullable_types(window_series_pd):
window_length = 3
gap = 2
min_periods = 1
# Because we're inserting nans, confirm that nullability of the dtype doesn't have an impact on the results
nullable_series = window_series_pd.astype("Int64")
non_nullable_series = window_series_pd.astype("int64")
nullable_rolling_max = roll_series_with_gap(
nullable_series,
window_length,
gap=gap,
min_periods=min_periods,
).max()
non_nullable_rolling_max = roll_series_with_gap(
non_nullable_series,
window_length,
gap=gap,
min_periods=min_periods,
).max()
pd.testing.assert_series_equal(nullable_rolling_max, non_nullable_rolling_max)
def test_roll_series_with_gap_nullable_types_with_nans(window_series_pd):
window_length = 3
gap = 2
min_periods = 1
nullable_floats = window_series_pd.astype("float64").replace(
{1: np.nan, 3: np.nan},
)
nullable_ints = nullable_floats.astype("Int64")
nullable_ints_rolling_max = roll_series_with_gap(
nullable_ints,
window_length,
gap=gap,
min_periods=min_periods,
).max()
nullable_floats_rolling_max = roll_series_with_gap(
nullable_floats,
window_length,
gap=gap,
min_periods=min_periods,
).max()
pd.testing.assert_series_equal(
nullable_ints_rolling_max,
nullable_floats_rolling_max,
)
expected_early_values = [np.nan, np.nan, 0, 0, 2, 2, 4] + list(
range(7 - gap, len(window_series_pd) - gap),
)
for i in range(len(window_series_pd)):
actual = nullable_floats_rolling_max.iloc[i]
expected = expected_early_values[i]
if pd.isnull(actual):
assert pd.isnull(expected)
else:
assert actual == expected
@pytest.mark.parametrize(
"window_length, gap",
[
("3d", "2d"),
("3d", "4d"),
("4d", "0d"),
],
)
def test_apply_roll_with_offset_gap(window_length, gap, window_series_pd):
def max_wrapper(sub_s):
return apply_roll_with_offset_gap(sub_s, gap, max, min_periods=1)
rolling_max_obj = roll_series_with_gap(
window_series_pd,
window_length,
gap=gap,
min_periods=1,
)
rolling_max_series = rolling_max_obj.apply(max_wrapper)
def min_wrapper(sub_s):
return apply_roll_with_offset_gap(sub_s, gap, min, min_periods=1)
rolling_min_obj = roll_series_with_gap(
window_series_pd,
window_length,
gap=gap,
min_periods=1,
)
rolling_min_series = rolling_min_obj.apply(min_wrapper)
assert len(rolling_max_series) == len(window_series_pd)
assert len(rolling_min_series) == len(window_series_pd)
gap_num = get_number_from_offset(gap)
window_length_num = get_number_from_offset(window_length)
for i in range(len(window_series_pd)):
start_idx = i - gap_num - window_length_num + 1
# Now that we have the _apply call, this acts as expected
end_idx = i - gap_num
# If start and end are negative, they're entirely before
if start_idx < 0 and end_idx < 0:
assert pd.isnull(rolling_max_series.iloc[i])
assert pd.isnull(rolling_min_series.iloc[i])
continue
if start_idx < 0:
start_idx = 0
# Because the row values are a range from 0 to 20, the rolling min will be the start index
# and the rolling max will be the end idx
assert rolling_min_series.iloc[i] == start_idx
assert rolling_max_series.iloc[i] == end_idx
@pytest.mark.parametrize(
"min_periods",
[1, 0, None],
)
def test_apply_roll_with_offset_gap_default_min_periods(min_periods, window_series_pd):
window_length = "5d"
window_length_num = 5
gap = "3d"
gap_num = 3
def count_wrapper(sub_s):
return apply_roll_with_offset_gap(sub_s, gap, len, min_periods=min_periods)
rolling_count_obj = roll_series_with_gap(
window_series_pd,
window_length,
gap=gap,
min_periods=min_periods,
)
rolling_count_series = rolling_count_obj.apply(count_wrapper)
# gap essentially creates a rolling series that has no elements; which should be nan
# to differentiate from when a window only has null values
num_empty_aggregates = rolling_count_series.isna().sum()
num_partial_aggregates = len(
(rolling_count_series.loc[rolling_count_series != 0]).loc[
rolling_count_series < window_length_num
],
)
assert num_empty_aggregates == gap_num
assert num_partial_aggregates == window_length_num - 1
@pytest.mark.parametrize(
"min_periods",
[2, 3, 4, 5],
)
def test_apply_roll_with_offset_gap_min_periods(min_periods, window_series_pd):
window_length = "5d"
window_length_num = 5
gap = "3d"
gap_num = 3
def count_wrapper(sub_s):
return apply_roll_with_offset_gap(sub_s, gap, len, min_periods=min_periods)
rolling_count_obj = roll_series_with_gap(
window_series_pd,
window_length,
gap=gap,
min_periods=min_periods,
)
rolling_count_series = rolling_count_obj.apply(count_wrapper)
# gap essentially creates rolling series that have no elements; which should be nan
# to differentiate from when a window only has null values
num_empty_aggregates = rolling_count_series.isna().sum()
num_partial_aggregates = len(
(rolling_count_series.loc[rolling_count_series != 0]).loc[
rolling_count_series < window_length_num
],
)
assert num_empty_aggregates == min_periods - 1 + gap_num
assert num_partial_aggregates == window_length_num - min_periods
def test_apply_roll_with_offset_gap_non_uniform():
window_length = "3d"
gap = "3d"
min_periods = 1
# When the data isn't uniform, this impacts the number of values in each rolling window
datetimes = (
list(pd.date_range(start="2017-01-01", freq="1d", periods=7))
+ list(pd.date_range(start="2017-02-01", freq="2d", periods=7))
+ list(pd.date_range(start="2017-03-01", freq="1d", periods=7))
)
no_freq_series = pd.Series(range(len(datetimes)), index=datetimes)
assert pd.infer_freq(no_freq_series.index) is None
expected_series = pd.Series(
[None, None, None, 1, 2, 3, 3]
+ [None, None, 1, 1, 1, 1, 1]
+ [None, None, None, 1, 2, 3, 3],
index=datetimes,
)
def count_wrapper(sub_s):
return apply_roll_with_offset_gap(sub_s, gap, len, min_periods=min_periods)
rolling_count_obj = roll_series_with_gap(
no_freq_series,
window_length,
gap=gap,
min_periods=min_periods,
)
rolling_count_series = rolling_count_obj.apply(count_wrapper)
pd.testing.assert_series_equal(rolling_count_series, expected_series)
def test_apply_roll_with_offset_data_frequency_higher_than_parameters_frequency():
window_length = "5D" # 120 hours
window_length_num = 5
# In order for min periods to be the length of the window, we multiply 24hours*5
min_periods = window_length_num * 24
datetimes = list(pd.date_range(start="2017-01-01", freq="1H", periods=200))
high_frequency_series = pd.Series(range(200), index=datetimes)
# Check without gap
gap = "0d"
gap_num = 0
def max_wrapper(sub_s):
return apply_roll_with_offset_gap(sub_s, gap, max, min_periods=min_periods)
rolling_max_obj = roll_series_with_gap(
high_frequency_series,
window_length,
min_periods=min_periods,
gap=gap,
)
rolling_max_series = rolling_max_obj.apply(max_wrapper)
assert rolling_max_series.isna().sum() == (min_periods - 1) + gap_num
# Check with small gap
gap = "3H"
gap_num = 3
def max_wrapper(sub_s):
return apply_roll_with_offset_gap(sub_s, gap, max, min_periods=min_periods)
rolling_max_obj = roll_series_with_gap(
high_frequency_series,
window_length,
min_periods=min_periods,
gap=gap,
)
rolling_max_series = rolling_max_obj.apply(max_wrapper)
assert rolling_max_series.isna().sum() == (min_periods - 1) + gap_num
# Check with large gap - in terms of days, so we'll multiply by 24hours for number of nans
gap = "2D"
gap_num = 2
def max_wrapper(sub_s):
return apply_roll_with_offset_gap(sub_s, gap, max, min_periods=min_periods)
rolling_max_obj = roll_series_with_gap(
high_frequency_series,
window_length,
min_periods=min_periods,
gap=gap,
)
rolling_max_series = rolling_max_obj.apply(max_wrapper)
assert rolling_max_series.isna().sum() == (min_periods - 1) + (gap_num * 24)
def test_apply_roll_with_offset_data_min_periods_too_big(window_series_pd):
window_length = "5D"
gap = "2d"
# Since the data has a daily frequency, there will only be, at most, 5 rows in the window
min_periods = 6
def max_wrapper(sub_s):
return apply_roll_with_offset_gap(sub_s, gap, max, min_periods=min_periods)
rolling_max_obj = roll_series_with_gap(
window_series_pd,
window_length,
min_periods=min_periods,
gap=gap,
)
rolling_max_series = rolling_max_obj.apply(max_wrapper)
# The resulting series is comprised entirely of nans
assert rolling_max_series.isna().sum() == len(window_series_pd)
def test_roll_series_with_gap_different_input_types_same_result_uniform(
window_series_pd,
):
# Offset inputs will only produce the same results as numeric inputs
# when the data has a uniform frequency
offset_gap = "2d"
offset_window_length = "5d"
int_gap = 2
int_window_length = 5
min_periods = 1
# Rolling series' with matching input types
expected_rolling_numeric = roll_series_with_gap(
window_series_pd,
window_length=int_window_length,
gap=int_gap,
min_periods=min_periods,
).max()
def count_wrapper(sub_s):
return apply_roll_with_offset_gap(
sub_s,
offset_gap,
max,
min_periods=min_periods,
)
rolling_count_obj = roll_series_with_gap(
window_series_pd,
window_length=offset_window_length,
gap=offset_gap,
min_periods=min_periods,
)
expected_rolling_offset = rolling_count_obj.apply(count_wrapper)
# confirm that the offset and gap results are equal to one another
pd.testing.assert_series_equal(expected_rolling_numeric, expected_rolling_offset)
# Rolling series' with mismatched input types
mismatched_numeric_gap = roll_series_with_gap(
window_series_pd,
window_length=offset_window_length,
gap=int_gap,
min_periods=min_periods,
).max()
# Confirm the mismatched results also produce the same results
pd.testing.assert_series_equal(expected_rolling_numeric, mismatched_numeric_gap)
def test_roll_series_with_gap_incorrect_types(window_series_pd):
error = "Window length must be either an offset string or an integer."
with pytest.raises(TypeError, match=error):
roll_series_with_gap(
window_series_pd,
window_length=4.2,
gap=4,
min_periods=1,
),
error = "Gap must be either an offset string or an integer."
with pytest.raises(TypeError, match=error):
roll_series_with_gap(window_series_pd, window_length=4, gap=4.2, min_periods=1)
def test_roll_series_with_gap_negative_inputs(window_series_pd):
error = "Window length must be greater than zero."
with pytest.raises(ValueError, match=error):
roll_series_with_gap(window_series_pd, window_length=-4, gap=4, min_periods=1)
error = "Gap must be greater than or equal to zero."
with pytest.raises(ValueError, match=error):
roll_series_with_gap(window_series_pd, window_length=4, gap=-4, min_periods=1)
def test_roll_series_with_non_offset_string_inputs(window_series_pd):
error = "Cannot roll series. The specified gap, test, is not a valid offset alias."
with pytest.raises(ValueError, match=error):
roll_series_with_gap(
window_series_pd,
window_length="4D",
gap="test",
min_periods=1,
)
error = "Cannot roll series. The specified window length, test, is not a valid offset alias."
with pytest.raises(ValueError, match=error):
roll_series_with_gap(
window_series_pd,
window_length="test",
gap="7D",
min_periods=1,
)
# Test mismatched types error
error = (
"Cannot roll series with offset gap, 2d, and numeric window length, 7. "
"If an offset alias is used for gap, the window length must also be defined as an offset alias. "
"Please either change gap to be numeric or change window length to be an offset alias."
)
with pytest.raises(TypeError, match=error):
roll_series_with_gap(
window_series_pd,
window_length=7,
gap="2d",
min_periods=1,
).max()
@pytest.mark.parametrize(
"primitive",
[RollingCount, RollingMax, RollingMin, RollingMean, RollingSTD, RollingTrend],
)
@patch(
"featuretools.primitives.standard.transform.time_series.utils.apply_roll_with_offset_gap",
)
def test_no_call_to_apply_roll_with_offset_gap_with_numeric(
mock_apply_roll,
primitive,
window_series_pd,
):
assert not mock_apply_roll.called
fully_numeric_primitive = primitive(window_length=3, gap=1)
primitive_func = fully_numeric_primitive.get_function()
if isinstance(fully_numeric_primitive, RollingCount):
pd.Series(primitive_func(window_series_pd.index))
else:
pd.Series(
primitive_func(
window_series_pd.index,
pd.Series(window_series_pd.values),
),
)
assert not mock_apply_roll.called
offset_window_primitive = primitive(window_length="3d", gap=1)
primitive_func = offset_window_primitive.get_function()
if isinstance(offset_window_primitive, RollingCount):
pd.Series(primitive_func(window_series_pd.index))
else:
pd.Series(
primitive_func(
window_series_pd.index,
pd.Series(window_series_pd.values),
),
)
assert not mock_apply_roll.called
no_gap_specified_primitive = primitive(window_length="3d")
primitive_func = no_gap_specified_primitive.get_function()
if isinstance(no_gap_specified_primitive, RollingCount):
pd.Series(primitive_func(window_series_pd.index))
else:
pd.Series(
primitive_func(
window_series_pd.index,
pd.Series(window_series_pd.values),
),
)
assert not mock_apply_roll.called
no_gap_specified_primitive = primitive(window_length="3d", gap="1d")
primitive_func = no_gap_specified_primitive.get_function()
if isinstance(no_gap_specified_primitive, RollingCount):
pd.Series(primitive_func(window_series_pd.index))
else:
pd.Series(
primitive_func(
window_series_pd.index,
pd.Series(window_series_pd.values),
),
)
assert mock_apply_roll.called
| 21,912 | 31.70597 | 116 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/test_dask_primitives.py | import pandas as pd
import pytest
from featuretools import calculate_feature_matrix, dfs, list_primitives
from featuretools.feature_base.cache import feature_cache
from featuretools.primitives import get_aggregation_primitives, get_transform_primitives
from featuretools.tests.testing_utils import to_pandas
from featuretools.utils.gen_utils import Library
UNSUPPORTED = [
p.name
for p in get_transform_primitives().values()
if Library.DASK not in p.compatibility
]
UNSUPPORTED += [
p.name
for p in get_aggregation_primitives().values()
if Library.DASK not in p.compatibility
]
@pytest.fixture(autouse=True)
def reset_dfs_cache():
feature_cache.enabled = False
feature_cache.clear_all()
def test_transform(pd_es, dask_es):
pytest.skip(
"TODO: Dask issue with `series.eq`. Fix once Dask Issue #7957 is closed.",
)
primitives = list_primitives()
trans_list = primitives[primitives["type"] == "transform"]["name"].tolist()
trans_primitives = [prim for prim in trans_list if prim not in UNSUPPORTED]
agg_primitives = []
cutoff_time = pd.Timestamp("2019-01-05 04:00")
assert pd_es == dask_es
# Run DFS using each dataframe as a target and confirm results match
for df in pd_es.dataframes:
features = dfs(
entityset=pd_es,
target_dataframe_name=df.ww.name,
trans_primitives=trans_primitives,
agg_primitives=agg_primitives,
max_depth=2,
features_only=True,
)
dask_features = dfs(
entityset=dask_es,
target_dataframe_name=df.ww.name,
trans_primitives=trans_primitives,
agg_primitives=agg_primitives,
max_depth=2,
features_only=True,
)
assert features == dask_features
# Calculate feature matrix values to confirm output is the same between dask and pandas.
# Not testing on all returned features due to long run times.
fm = calculate_feature_matrix(
features=features[:100],
entityset=pd_es,
cutoff_time=cutoff_time,
)
dask_fm = calculate_feature_matrix(
features=dask_features[:100],
entityset=dask_es,
cutoff_time=cutoff_time,
)
# Categorical categories can be ordered differently, this makes sure they are the same
dask_fm = dask_fm.astype(fm.dtypes)
# Use the same columns and make sure both indexes are sorted the same
dask_computed_fm = (
dask_fm.compute().set_index(df.ww.index).loc[fm.index][fm.columns]
)
pd.testing.assert_frame_equal(fm, dask_computed_fm)
def test_aggregation(pd_es, dask_es):
primitives = list_primitives()
trans_primitives = []
agg_list = primitives[primitives["type"] == "aggregation"]["name"].tolist()
agg_primitives = [prim for prim in agg_list if prim not in UNSUPPORTED]
assert pd_es == dask_es
# Run DFS using each dataframe as a target and confirm results match
for df in pd_es.dataframes:
fm, _ = dfs(
entityset=pd_es,
target_dataframe_name=df.ww.name,
trans_primitives=trans_primitives,
agg_primitives=agg_primitives,
cutoff_time=pd.Timestamp("2019-01-05 04:00"),
max_depth=2,
)
dask_fm, _ = dfs(
entityset=dask_es,
target_dataframe_name=df.ww.name,
trans_primitives=trans_primitives,
agg_primitives=agg_primitives,
cutoff_time=pd.Timestamp("2019-01-05 04:00"),
max_depth=2,
)
# Categorical categories can be ordered differently, this makes sure they
# are the same, including the index column
index_col = df.ww.index
fm = fm.reset_index()
dask_fm = dask_fm.astype(fm.dtypes)
fm = fm.set_index(index_col)
pd.testing.assert_frame_equal(
fm.sort_index(),
to_pandas(dask_fm, index=index_col, sort_index=True),
)
| 4,094 | 32.292683 | 96 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/test_direct_features.py | import numpy as np
import pandas as pd
import pytest
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime
from featuretools.computational_backends.feature_set import FeatureSet
from featuretools.computational_backends.feature_set_calculator import (
FeatureSetCalculator,
)
from featuretools.feature_base import DirectFeature, Feature, IdentityFeature
from featuretools.primitives import (
AggregationPrimitive,
Day,
Hour,
Minute,
Month,
NMostCommon,
Second,
TransformPrimitive,
Year,
)
from featuretools.primitives.utils import PrimitivesDeserializer
from featuretools.synthesis import dfs
from featuretools.tests.testing_utils import to_pandas
from featuretools.utils.gen_utils import Library
def test_direct_from_identity(es):
device = Feature(es["sessions"].ww["device_type"])
d = DirectFeature(base_feature=device, child_dataframe_name="log")
feature_set = FeatureSet([d])
calculator = FeatureSetCalculator(es, feature_set=feature_set, time_last=None)
df = calculator.run(np.array([0, 5]))
df = to_pandas(df, index="id", sort_index=True)
v = df[d.get_name()].tolist()
if es.dataframe_type == Library.SPARK:
expected = ["0", "1"]
else:
expected = [0, 1]
assert v == expected
def test_direct_from_column(es):
# should be same behavior as test_direct_from_identity
device = Feature(es["sessions"].ww["device_type"])
d = DirectFeature(base_feature=device, child_dataframe_name="log")
feature_set = FeatureSet([d])
calculator = FeatureSetCalculator(es, feature_set=feature_set, time_last=None)
df = calculator.run(np.array([0, 5]))
df = to_pandas(df, index="id", sort_index=True)
v = df[d.get_name()].tolist()
if es.dataframe_type == Library.SPARK:
expected = ["0", "1"]
else:
expected = [0, 1]
assert v == expected
def test_direct_rename_multioutput(es):
n_common = Feature(
es["log"].ww["product_id"],
parent_dataframe_name="customers",
primitive=NMostCommon(n=2),
)
feat = DirectFeature(n_common, "sessions")
copy_feat = feat.rename("session_test")
assert feat.unique_name() != copy_feat.unique_name()
assert feat.get_name() != copy_feat.get_name()
assert (
feat.base_features[0].generate_name()
== copy_feat.base_features[0].generate_name()
)
assert feat.dataframe_name == copy_feat.dataframe_name
def test_direct_rename(es):
# should be same behavior as test_direct_from_identity
feat = DirectFeature(
base_feature=IdentityFeature(es["sessions"].ww["device_type"]),
child_dataframe_name="log",
)
copy_feat = feat.rename("session_test")
assert feat.unique_name() != copy_feat.unique_name()
assert feat.get_name() != copy_feat.get_name()
assert (
feat.base_features[0].generate_name()
== copy_feat.base_features[0].generate_name()
)
assert feat.dataframe_name == copy_feat.dataframe_name
def test_direct_copy(games_es):
home_team = next(
r for r in games_es.relationships if r._child_column_name == "home_team_id"
)
feat = DirectFeature(
IdentityFeature(games_es["teams"].ww["name"]),
"games",
relationship=home_team,
)
copied = feat.copy()
assert copied.dataframe_name == feat.dataframe_name
assert copied.base_features == feat.base_features
assert copied.relationship_path == feat.relationship_path
def test_direct_of_multi_output_transform_feat(es):
# TODO: Update to work with Dask and Spark
if es.dataframe_type != Library.PANDAS:
pytest.xfail("Custom primitive is not compatible with Dask or Spark")
class TestTime(TransformPrimitive):
name = "test_time"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(semantic_tags={"numeric"})
number_output_features = 6
def get_function(self):
def test_f(x):
times = pd.Series(x)
units = ["year", "month", "day", "hour", "minute", "second"]
return [times.apply(lambda x: getattr(x, unit)) for unit in units]
return test_f
base_feature = IdentityFeature(es["customers"].ww["signup_date"])
join_time_split = Feature(base_feature, primitive=TestTime)
alt_features = [
Feature(base_feature, primitive=Year),
Feature(base_feature, primitive=Month),
Feature(base_feature, primitive=Day),
Feature(base_feature, primitive=Hour),
Feature(base_feature, primitive=Minute),
Feature(base_feature, primitive=Second),
]
fm, fl = dfs(
entityset=es,
target_dataframe_name="sessions",
trans_primitives=[TestTime, Year, Month, Day, Hour, Minute, Second],
)
# Get column names of for multi feature and normal features
subnames = DirectFeature(join_time_split, "sessions").get_feature_names()
altnames = [DirectFeature(f, "sessions").get_name() for f in alt_features]
# Check values are equal between
for col1, col2 in zip(subnames, altnames):
assert (fm[col1] == fm[col2]).all()
def test_direct_features_of_multi_output_agg_primitives(pd_es):
class ThreeMostCommonCat(AggregationPrimitive):
name = "n_most_common_categorical"
input_types = [ColumnSchema(semantic_tags={"category"})]
return_type = ColumnSchema(semantic_tags={"category"})
number_output_features = 3
def get_function(self, agg_type="pandas"):
def pd_top3(x):
counts = x.value_counts()
counts = counts[counts > 0]
array = np.array(counts.index[:3])
if len(array) < 3:
filler = np.full(3 - len(array), np.nan)
array = np.append(array, filler)
return array
return pd_top3
fm, fl = dfs(
entityset=pd_es,
target_dataframe_name="log",
agg_primitives=[ThreeMostCommonCat],
trans_primitives=[],
max_depth=3,
)
has_nmost_as_base = []
for feature in fl:
is_base = False
if len(feature.base_features) > 0 and isinstance(
feature.base_features[0].primitive,
ThreeMostCommonCat,
):
is_base = True
has_nmost_as_base.append(is_base)
assert any(has_nmost_as_base)
true_result_rows = []
session_data = {
0: ["coke zero", "car", np.nan],
1: ["toothpaste", "brown bag", np.nan],
2: ["brown bag", np.nan, np.nan],
3: set(["Haribo sugar-free gummy bears", "coke zero", np.nan]),
4: ["coke zero", np.nan, np.nan],
5: ["taco clock", np.nan, np.nan],
}
for i, count in enumerate([5, 4, 1, 2, 3, 2]):
while count > 0:
true_result_rows.append(session_data[i])
count -= 1
tempname = "sessions.N_MOST_COMMON_CATEGORICAL(log.product_id)[%s]"
for i, row in enumerate(true_result_rows):
for j in range(3):
value = fm[tempname % (j)][i]
if isinstance(row, set):
assert pd.isnull(value) or value in row
else:
assert (pd.isnull(value) and pd.isnull(row[j])) or value == row[j]
def test_direct_with_invalid_init_args(diamond_es):
customer_to_region = diamond_es.get_forward_relationships("customers")[0]
error_text = "child_dataframe must be the relationship child dataframe"
with pytest.raises(AssertionError, match=error_text):
DirectFeature(
IdentityFeature(diamond_es["regions"].ww["name"]),
"stores",
relationship=customer_to_region,
)
transaction_relationships = diamond_es.get_forward_relationships("transactions")
transaction_to_store = next(
r for r in transaction_relationships if r.parent_dataframe.ww.name == "stores"
)
error_text = "Base feature must be defined on the relationship parent dataframe"
with pytest.raises(AssertionError, match=error_text):
DirectFeature(
IdentityFeature(diamond_es["regions"].ww["name"]),
"transactions",
relationship=transaction_to_store,
)
def test_direct_with_multiple_possible_paths(games_es):
error_text = (
"There are multiple relationships to the base dataframe. "
"You must specify a relationship."
)
with pytest.raises(RuntimeError, match=error_text):
DirectFeature(IdentityFeature(games_es["teams"].ww["name"]), "games")
# Does not raise if path specified.
relationship = next(
r
for r in games_es.get_forward_relationships("games")
if r._child_column_name == "home_team_id"
)
feat = DirectFeature(
IdentityFeature(games_es["teams"].ww["name"]),
"games",
relationship=relationship,
)
assert feat.relationship_path_name() == "teams[home_team_id]"
assert feat.get_name() == "teams[home_team_id].name"
def test_direct_with_single_possible_path(es):
feat = DirectFeature(IdentityFeature(es["customers"].ww["age"]), "sessions")
assert feat.relationship_path_name() == "customers"
assert feat.get_name() == "customers.age"
def test_direct_with_no_path(diamond_es):
error_text = 'No relationship from "regions" to "customers" found.'
with pytest.raises(RuntimeError, match=error_text):
DirectFeature(IdentityFeature(diamond_es["customers"].ww["name"]), "regions")
error_text = 'No relationship from "customers" to "customers" found.'
with pytest.raises(RuntimeError, match=error_text):
DirectFeature(IdentityFeature(diamond_es["customers"].ww["name"]), "customers")
def test_serialization(es):
value = IdentityFeature(es["products"].ww["rating"])
direct = DirectFeature(value, "log")
log_to_products = next(
r
for r in es.get_forward_relationships("log")
if r.parent_dataframe.ww.name == "products"
)
dictionary = {
"name": direct.get_name(),
"base_feature": value.unique_name(),
"relationship": log_to_products.to_dictionary(),
}
assert dictionary == direct.get_arguments()
assert direct == DirectFeature.from_dictionary(
dictionary,
es,
{value.unique_name(): value},
PrimitivesDeserializer(),
)
| 10,446 | 33.823333 | 87 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/utils.py | from inspect import signature
import pytest
from featuretools import (
FeatureBase,
calculate_feature_matrix,
dfs,
encode_features,
list_primitives,
load_features,
save_features,
)
from featuretools.primitives.base import AggregationPrimitive, PrimitiveBase
from featuretools.tests.testing_utils import make_ecommerce_entityset
PRIMITIVES = list_primitives()
def get_number_from_offset(offset):
"""Extract the numeric element of a potential offset string.
Args:
offset (int, str): If offset is an integer, that value is returned. If offset is a string,
it's assumed to be an offset string of the format nD where n is a single digit integer.
Note: This helper utility should only be used with offset strings that only have one numeric character.
Only the first character will be returned, so if an offset string 24H is used, it will incorrectly
return the integer 2. Additionally, any of the offset timespans (H for hourly, D for daily, etc.)
can be used here; however, care should be taken by the user to remember what that timespan is when
writing tests, as comparing 7 from 7D to 1 from 1W may not behave as expected.
"""
if isinstance(offset, str):
return int(offset[0])
else:
return offset
class PrimitiveTestBase:
primitive = None
@pytest.fixture()
def es(self):
es = make_ecommerce_entityset()
return es
def test_name_and_desc(self):
assert self.primitive.name is not None
assert self.primitive.__doc__ is not None
docstring = self.primitive.__doc__
short_description = docstring.splitlines()[0]
first_word = short_description.split(" ", 1)[0]
valid_verbs = [
"Calculates",
"Determines",
"Transforms",
"Computes",
"Shifts",
"Extracts",
"Applies",
]
assert any(s in first_word for s in valid_verbs)
assert self.primitive.input_types is not None
def test_name_in_primitive_list(self):
assert PRIMITIVES.name.eq(self.primitive.name).any()
def test_arg_init(self):
primitive_ = self.primitive()
# determine the optional arguments in the __init__
init_params = signature(self.primitive.__init__)
for name, parameter in init_params.parameters.items():
if parameter.default is not parameter.empty:
assert hasattr(primitive_, name)
def test_serialize(self, es, target_dataframe_name="log"):
check_serialize(primitive=self.primitive, es=es, target_dataframe_name="log")
def check_serialize(primitive, es, target_dataframe_name="log"):
trans_primitives = []
agg_primitives = []
if issubclass(primitive, AggregationPrimitive):
agg_primitives = [primitive]
else:
trans_primitives = [primitive]
features = dfs(
entityset=es,
target_dataframe_name=target_dataframe_name,
agg_primitives=agg_primitives,
trans_primitives=trans_primitives,
max_features=-1,
max_depth=3,
features_only=True,
return_types="all",
)
feat_to_serialize = None
for feature in features:
if feature.primitive.__class__ == primitive:
feat_to_serialize = feature
break
for base_feature in feature.get_dependencies(deep=True):
if base_feature.primitive.__class__ == primitive:
feat_to_serialize = base_feature
break
assert feat_to_serialize is not None
# Skip calculating feature matrix for long running primitives
skip_primitives = ["elmo"]
if primitive.name not in skip_primitives:
df1 = calculate_feature_matrix([feat_to_serialize], entityset=es)
new_feat = load_features(save_features([feat_to_serialize]))[0]
assert isinstance(new_feat, FeatureBase)
if primitive.name not in skip_primitives:
df2 = calculate_feature_matrix([new_feat], entityset=es)
assert df1.equals(df2)
def find_applicable_primitives(primitive):
from featuretools.primitives.utils import (
get_aggregation_primitives,
get_transform_primitives,
)
all_transform_primitives = list(get_transform_primitives().values())
all_aggregation_primitives = list(get_aggregation_primitives().values())
applicable_transforms = find_stackable_primitives(
all_transform_primitives,
primitive,
)
applicable_aggregations = find_stackable_primitives(
all_aggregation_primitives,
primitive,
)
return applicable_transforms, applicable_aggregations
def find_stackable_primitives(all_primitives, primitive):
applicable_primitives = []
for x in all_primitives:
if x.input_types == [primitive.return_type]:
applicable_primitives.append(x)
return applicable_primitives
def valid_dfs(
es,
aggregations,
transforms,
feature_substrings,
target_dataframe_name="log",
multi_output=False,
max_depth=3,
max_features=-1,
instance_ids=[0, 1, 2, 3],
):
if not isinstance(feature_substrings, list):
feature_substrings = [feature_substrings]
if any([issubclass(x, PrimitiveBase) for x in feature_substrings]):
feature_substrings = [x.name.upper() for x in feature_substrings]
features = dfs(
entityset=es,
target_dataframe_name=target_dataframe_name,
agg_primitives=aggregations,
trans_primitives=transforms,
max_features=max_features,
max_depth=max_depth,
features_only=True,
)
applicable_features = []
for feat in features:
for x in feature_substrings:
if x in feat.get_name():
applicable_features.append(feat)
if len(applicable_features) == 0:
raise ValueError(
"No feature names with %s, verify the name attribute \
is defined and/or generate_name() is defined to \
return %s "
% (feature_substrings, feature_substrings),
)
df = calculate_feature_matrix(
entityset=es,
features=applicable_features,
instance_ids=instance_ids,
n_jobs=1,
)
encode_features(df, applicable_features)
# TODO: check the multi_output shape by checking
# feature.number_output_features for each feature
# and comparing it with the matrix shape
if not multi_output:
assert len(applicable_features) == df.shape[1]
return
| 6,621 | 31.62069 | 107 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/test_feature_serialization.py | import os
import boto3
import pandas as pd
import pytest
from pympler.asizeof import asizeof
from smart_open import open
from woodwork.column_schema import ColumnSchema
from featuretools import (
AggregationFeature,
DirectFeature,
EntitySet,
Feature,
GroupByTransformFeature,
IdentityFeature,
TransformFeature,
dfs,
feature_base,
load_features,
primitives,
save_features,
)
from featuretools.feature_base import FeatureOutputSlice
from featuretools.feature_base.cache import feature_cache
from featuretools.feature_base.features_deserializer import FeaturesDeserializer
from featuretools.feature_base.features_serializer import FeaturesSerializer
from featuretools.primitives import (
Count,
CumSum,
Day,
DistanceToHoliday,
Haversine,
IsIn,
Max,
Mean,
Min,
Mode,
Month,
MultiplyNumericScalar,
Negate,
NMostCommon,
NumberOfCommonWords,
NumCharacters,
NumUnique,
NumWords,
PercentTrue,
Skew,
Std,
Sum,
TransformPrimitive,
Weekday,
Year,
)
from featuretools.primitives.base import AggregationPrimitive
from featuretools.tests.testing_utils import check_names
from featuretools.utils.gen_utils import Library
from featuretools.version import ENTITYSET_SCHEMA_VERSION, FEATURES_SCHEMA_VERSION
BUCKET_NAME = "test-bucket"
WRITE_KEY_NAME = "test-key"
TEST_S3_URL = "s3://{}/{}".format(BUCKET_NAME, WRITE_KEY_NAME)
TEST_FILE = "test_feature_serialization_feature_schema_{}_entityset_schema_{}_2022_12_28.json".format(
FEATURES_SCHEMA_VERSION,
ENTITYSET_SCHEMA_VERSION,
)
S3_URL = "s3://featuretools-static/" + TEST_FILE
URL = "https://featuretools-static.s3.amazonaws.com/" + TEST_FILE
TEST_CONFIG = "CheckConfigPassesOn"
TEST_KEY = "test_access_key_features"
@pytest.fixture(autouse=True)
def reset_dfs_cache():
feature_cache.enabled = False
feature_cache.clear_all()
def assert_features(original, deserialized):
for feat_1, feat_2 in zip(original, deserialized):
assert feat_1.unique_name() == feat_2.unique_name()
assert feat_1.entityset == feat_2.entityset
# IdentityFeature and DirectFeature objects do not have primitives, so
# series library does not need to be compared
if not (isinstance(feat_1, (IdentityFeature, DirectFeature))):
assert feat_1.primitive.series_library == feat_2.primitive.series_library
def pickle_features_test_helper(es_size, features_original, dir_path):
filepath = os.path.join(dir_path, "test_feature")
save_features(features_original, filepath)
features_deserializedA = load_features(filepath)
assert os.path.getsize(filepath) < es_size
os.remove(filepath)
with open(filepath, "w") as f:
save_features(features_original, f)
features_deserializedB = load_features(open(filepath))
assert os.path.getsize(filepath) < es_size
os.remove(filepath)
features = save_features(features_original)
features_deserializedC = load_features(features)
assert asizeof(features) < es_size
features_deserialized_options = [
features_deserializedA,
features_deserializedB,
features_deserializedC,
]
for features_deserialized in features_deserialized_options:
assert_features(features_original, features_deserialized)
def test_pickle_features(es, tmp_path):
features_original = dfs(
target_dataframe_name="sessions",
entityset=es,
features_only=True,
)
pickle_features_test_helper(asizeof(es), features_original, str(tmp_path))
def test_pickle_features_with_custom_primitive(pd_es, tmp_path):
class NewMax(AggregationPrimitive):
name = "new_max"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
features_original = dfs(
target_dataframe_name="sessions",
entityset=pd_es,
agg_primitives=["Last", "Mean", NewMax],
features_only=True,
)
assert any([isinstance(feat.primitive, NewMax) for feat in features_original])
pickle_features_test_helper(asizeof(pd_es), features_original, str(tmp_path))
def test_serialized_renamed_features(es):
def serialize_name_unchanged(original):
new_name = "MyFeature"
original_names = original.get_feature_names()
renamed = original.rename(new_name)
new_names = (
[new_name]
if len(original_names) == 1
else [new_name + "[{}]".format(i) for i in range(len(original_names))]
)
check_names(renamed, new_name, new_names)
serializer = FeaturesSerializer([renamed])
serialized = serializer.to_dict()
deserializer = FeaturesDeserializer(serialized)
deserialized = deserializer.to_list()[0]
check_names(deserialized, new_name, new_names)
identity_original = IdentityFeature(es["log"].ww["value"])
assert identity_original.get_name() == "value"
value = IdentityFeature(es["log"].ww["value"])
primitive = primitives.Max()
agg_original = AggregationFeature(value, "customers", primitive)
assert agg_original.get_name() == "MAX(log.value)"
direct_original = DirectFeature(
IdentityFeature(es["customers"].ww["age"]),
"sessions",
)
assert direct_original.get_name() == "customers.age"
primitive = primitives.MultiplyNumericScalar(value=2)
transform_original = TransformFeature(value, primitive)
assert transform_original.get_name() == "value * 2"
zipcode = IdentityFeature(es["log"].ww["zipcode"])
primitive = CumSum()
groupby_original = feature_base.GroupByTransformFeature(value, primitive, zipcode)
assert groupby_original.get_name() == "CUM_SUM(value) by zipcode"
multioutput_original = Feature(
es["log"].ww["product_id"],
parent_dataframe_name="customers",
primitive=NMostCommon(n=2),
)
assert multioutput_original.get_name() == "N_MOST_COMMON(log.product_id, n=2)"
featureslice_original = feature_base.FeatureOutputSlice(multioutput_original, 0)
assert featureslice_original.get_name() == "N_MOST_COMMON(log.product_id, n=2)[0]"
feature_type_list = [
identity_original,
agg_original,
direct_original,
transform_original,
groupby_original,
multioutput_original,
featureslice_original,
]
for feature_type in feature_type_list:
serialize_name_unchanged(feature_type)
@pytest.fixture
def s3_client():
_environ = os.environ.copy()
from moto import mock_s3
with mock_s3():
s3 = boto3.resource("s3")
yield s3
os.environ.clear()
os.environ.update(_environ)
@pytest.fixture
def s3_bucket(s3_client, region="us-east-2"):
location = {"LocationConstraint": region}
s3_client.create_bucket(
Bucket=BUCKET_NAME,
ACL="public-read-write",
CreateBucketConfiguration=location,
)
s3_bucket = s3_client.Bucket(BUCKET_NAME)
yield s3_bucket
def test_serialize_features_mock_s3(es, s3_client, s3_bucket):
features_original = dfs(
target_dataframe_name="sessions",
entityset=es,
features_only=True,
)
save_features(features_original, TEST_S3_URL)
obj = list(s3_bucket.objects.all())[0].key
s3_client.ObjectAcl(BUCKET_NAME, obj).put(ACL="public-read-write")
features_deserialized = load_features(TEST_S3_URL)
assert_features(features_original, features_deserialized)
def test_serialize_features_mock_anon_s3(es, s3_client, s3_bucket):
features_original = dfs(
target_dataframe_name="sessions",
entityset=es,
features_only=True,
)
save_features(features_original, TEST_S3_URL, profile_name=False)
obj = list(s3_bucket.objects.all())[0].key
s3_client.ObjectAcl(BUCKET_NAME, obj).put(ACL="public-read-write")
features_deserialized = load_features(TEST_S3_URL, profile_name=False)
assert_features(features_original, features_deserialized)
@pytest.mark.parametrize("profile_name", ["test", False])
def test_s3_test_profile(es, s3_client, s3_bucket, setup_test_profile, profile_name):
features_original = dfs(
target_dataframe_name="sessions",
entityset=es,
features_only=True,
)
save_features(features_original, TEST_S3_URL, profile_name="test")
obj = list(s3_bucket.objects.all())[0].key
s3_client.ObjectAcl(BUCKET_NAME, obj).put(ACL="public-read-write")
features_deserialized = load_features(TEST_S3_URL, profile_name=profile_name)
assert_features(features_original, features_deserialized)
@pytest.mark.parametrize("url,profile_name", [(S3_URL, False), (URL, None)])
def test_deserialize_features_s3(pd_es, url, profile_name):
agg_primitives = [
Sum,
Std,
Max,
Skew,
Min,
Mean,
Count,
PercentTrue,
NumUnique,
Mode,
]
trans_primitives = [Day, Year, Month, Weekday, Haversine, NumWords, NumCharacters]
features_original = dfs(
target_dataframe_name="sessions",
entityset=pd_es,
features_only=True,
agg_primitives=agg_primitives,
trans_primitives=trans_primitives,
)
features_deserialized = load_features(url, profile_name=profile_name)
assert_features(features_original, features_deserialized)
def test_serialize_url(es):
features_original = dfs(
target_dataframe_name="sessions",
entityset=es,
features_only=True,
)
error_text = "Writing to URLs is not supported"
with pytest.raises(ValueError, match=error_text):
save_features(features_original, URL)
def test_custom_feature_names_retained_during_serialization(pd_es, tmp_path):
class MultiCumulative(TransformPrimitive):
name = "multi_cum_sum"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
number_output_features = 3
multi_output_trans_feat = Feature(
pd_es["log"].ww["value"],
primitive=MultiCumulative,
)
groupby_trans_feat = GroupByTransformFeature(
pd_es["log"].ww["value"],
primitive=MultiCumulative,
groupby=pd_es["log"].ww["product_id"],
)
multi_output_agg_feat = Feature(
pd_es["log"].ww["product_id"],
parent_dataframe_name="customers",
primitive=NMostCommon(n=2),
)
slice = FeatureOutputSlice(multi_output_trans_feat, 1)
stacked_feat = Feature(slice, primitive=Negate)
trans_names = ["cumulative_sum", "cumulative_max", "cumulative_min"]
multi_output_trans_feat.set_feature_names(trans_names)
groupby_trans_names = ["grouped_sum", "grouped_max", "grouped_min"]
groupby_trans_feat.set_feature_names(groupby_trans_names)
agg_names = ["first_most_common", "second_most_common"]
multi_output_agg_feat.set_feature_names(agg_names)
features = [
multi_output_trans_feat,
multi_output_agg_feat,
groupby_trans_feat,
stacked_feat,
]
file = os.path.join(tmp_path, "features.json")
save_features(features, file)
deserialized_features = load_features(file)
new_trans, new_agg, new_groupby, new_stacked = deserialized_features
assert new_trans.get_feature_names() == trans_names
assert new_agg.get_feature_names() == agg_names
assert new_groupby.get_feature_names() == groupby_trans_names
assert new_stacked.get_feature_names() == ["-(cumulative_max)"]
def test_deserializer_uses_common_primitive_instances_no_args(es, tmp_path):
features = dfs(
entityset=es,
target_dataframe_name="products",
features_only=True,
agg_primitives=["sum"],
trans_primitives=["is_null"],
)
is_null_features = [f for f in features if f.primitive.name == "is_null"]
sum_features = [f for f in features if f.primitive.name == "sum"]
# Make sure we have multiple features of each type
assert len(is_null_features) > 1
assert len(sum_features) > 1
# DFS should use the same primitive instance for all features that share a primitive
is_null_primitive = is_null_features[0].primitive
sum_primitive = sum_features[0].primitive
assert all([f.primitive is is_null_primitive for f in is_null_features])
assert all([f.primitive is sum_primitive for f in sum_features])
file = os.path.join(tmp_path, "features.json")
save_features(features, file)
deserialized_features = load_features(file)
new_is_null_features = [
f for f in deserialized_features if f.primitive.name == "is_null"
]
new_sum_features = [f for f in deserialized_features if f.primitive.name == "sum"]
# After deserialization all features that share a primitive should use the same primitive instance
new_is_null_primitive = new_is_null_features[0].primitive
new_sum_primitive = new_sum_features[0].primitive
assert all([f.primitive is new_is_null_primitive for f in new_is_null_features])
assert all([f.primitive is new_sum_primitive for f in new_sum_features])
def test_deserializer_uses_common_primitive_instances_with_args(es, tmp_path):
# Single argument
scalar1 = MultiplyNumericScalar(value=1)
scalar5 = MultiplyNumericScalar(value=5)
features = dfs(
entityset=es,
target_dataframe_name="products",
features_only=True,
agg_primitives=["sum"],
trans_primitives=[scalar1, scalar5],
)
scalar1_features = [
f
for f in features
if f.primitive.name == "multiply_numeric_scalar" and " * 1" in f.get_name()
]
scalar5_features = [
f
for f in features
if f.primitive.name == "multiply_numeric_scalar" and " * 5" in f.get_name()
]
# Make sure we have multiple features of each type
assert len(scalar1_features) > 1
assert len(scalar5_features) > 1
# DFS should use the the passed in primitive instance for all features
assert all([f.primitive is scalar1 for f in scalar1_features])
assert all([f.primitive is scalar5 for f in scalar5_features])
file = os.path.join(tmp_path, "features.json")
save_features(features, file)
deserialized_features = load_features(file)
new_scalar1_features = [
f
for f in deserialized_features
if f.primitive.name == "multiply_numeric_scalar" and " * 1" in f.get_name()
]
new_scalar5_features = [
f
for f in deserialized_features
if f.primitive.name == "multiply_numeric_scalar" and " * 5" in f.get_name()
]
# After deserialization all features that share a primitive should use the same primitive instance
new_scalar1_primitive = new_scalar1_features[0].primitive
new_scalar5_primitive = new_scalar5_features[0].primitive
assert all([f.primitive is new_scalar1_primitive for f in new_scalar1_features])
assert all([f.primitive is new_scalar5_primitive for f in new_scalar5_features])
assert new_scalar1_primitive.value == 1
assert new_scalar5_primitive.value == 5
# Test primitive with multiple args - pandas only due to primitive compatibility
if es.dataframe_type == Library.PANDAS:
distance_to_holiday = DistanceToHoliday(
holiday="Victoria Day",
country="Canada",
)
features = dfs(
entityset=es,
target_dataframe_name="customers",
features_only=True,
agg_primitives=[],
trans_primitives=[distance_to_holiday],
)
distance_features = [
f for f in features if f.primitive.name == "distance_to_holiday"
]
assert len(distance_features) > 1
# DFS should use the the passed in primitive instance for all features
assert all([f.primitive is distance_to_holiday for f in distance_features])
file = os.path.join(tmp_path, "distance_features.json")
save_features(distance_features, file)
new_distance_features = load_features(file)
# After deserialization all features that share a primitive should use the same primitive instance
new_distance_primitive = new_distance_features[0].primitive
assert all(
[f.primitive is new_distance_primitive for f in new_distance_features],
)
assert new_distance_primitive.holiday == "Victoria Day"
assert new_distance_primitive.country == "Canada"
# Test primitive with list arg
is_in = IsIn(list_of_outputs=[5, True, "coke zero"])
features = dfs(
entityset=es,
target_dataframe_name="customers",
features_only=True,
agg_primitives=[],
trans_primitives=[is_in],
)
is_in_features = [f for f in features if f.primitive.name == "isin"]
assert len(is_in_features) > 1
# DFS should use the the passed in primitive instance for all features
assert all([f.primitive is is_in for f in is_in_features])
file = os.path.join(tmp_path, "distance_features.json")
save_features(is_in_features, file)
new_is_in_features = load_features(file)
# After deserialization all features that share a primitive should use the same primitive instance
new_is_in_primitive = new_is_in_features[0].primitive
assert all([f.primitive is new_is_in_primitive for f in new_is_in_features])
assert new_is_in_primitive.list_of_outputs == [5, True, "coke zero"]
def test_can_serialize_word_set_for_number_of_common_words_feature(pd_es):
# The word_set argument is passed in as a set, which is not JSON-serializable.
# This test checks internal logic that converts the set to a list so it can be serialized
common_word_set = {"hello", "my"}
df = pd.DataFrame({"text": ["hello my name is hi"]})
es = EntitySet()
es.add_dataframe(dataframe_name="df", index="idx", dataframe=df, make_index=True)
num_common_words = NumberOfCommonWords(word_set=common_word_set)
fm, fd = dfs(
entityset=es,
target_dataframe_name="df",
trans_primitives=[num_common_words],
)
feat = fd[-1]
save_features([feat])
| 18,248 | 32.794444 | 106 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/test_groupby_transform_primitives.py | import numpy as np
import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime
from featuretools import (
Feature,
GroupByTransformFeature,
IdentityFeature,
calculate_feature_matrix,
feature_base,
)
from featuretools.computational_backends.feature_set import FeatureSet
from featuretools.computational_backends.feature_set_calculator import (
FeatureSetCalculator,
)
from featuretools.primitives import CumCount, CumMax, CumMean, CumMin, CumSum, Last
from featuretools.primitives.base import TransformPrimitive
from featuretools.synthesis import dfs
from featuretools.tests.testing_utils import feature_with_name
class TestCumCount:
primitive = CumCount
def test_order(self):
g = pd.Series(["a", "b", "a"])
answers = ([1, 2], [1])
function = self.primitive().get_function()
for (_, group), answer in zip(g.groupby(g), answers):
np.testing.assert_array_equal(function(group), answer)
def test_regular(self):
g = pd.Series(["a", "b", "a", "c", "d", "b"])
answers = ([1, 2], [1, 2], [1], [1])
function = self.primitive().get_function()
for (_, group), answer in zip(g.groupby(g), answers):
np.testing.assert_array_equal(function(group), answer)
def test_discrete(self):
g = pd.Series(["a", "b", "a", "c", "d", "b"])
answers = ([1, 2], [1, 2], [1], [1])
function = self.primitive().get_function()
for (_, group), answer in zip(g.groupby(g), answers):
np.testing.assert_array_equal(function(group), answer)
class TestCumSum:
primitive = CumSum
def test_order(self):
v = pd.Series([1, 2, 2])
g = pd.Series(["a", "b", "a"])
answers = ([1, 3], [2])
function = self.primitive().get_function()
for (_, group), answer in zip(v.groupby(g), answers):
np.testing.assert_array_equal(function(group), answer)
def test_regular(self):
v = pd.Series([101, 102, 103, 104, 105, 106])
g = pd.Series(["a", "b", "a", "c", "d", "b"])
answers = ([101, 204], [102, 208], [104], [105])
function = self.primitive().get_function()
for (_, group), answer in zip(v.groupby(g), answers):
np.testing.assert_array_equal(function(group), answer)
class TestCumMean:
primitive = CumMean
def test_order(self):
v = pd.Series([1, 2, 2])
g = pd.Series(["a", "b", "a"])
answers = ([1, 1.5], [2])
function = self.primitive().get_function()
for (_, group), answer in zip(v.groupby(g), answers):
np.testing.assert_array_equal(function(group), answer)
def test_regular(self):
v = pd.Series([101, 102, 103, 104, 105, 106])
g = pd.Series(["a", "b", "a", "c", "d", "b"])
answers = ([101, 102], [102, 104], [104], [105])
function = self.primitive().get_function()
for (_, group), answer in zip(v.groupby(g), answers):
np.testing.assert_array_equal(function(group), answer)
class TestCumMax:
primitive = CumMax
def test_order(self):
v = pd.Series([1, 2, 2])
g = pd.Series(["a", "b", "a"])
answers = ([1, 2], [2])
function = self.primitive().get_function()
for (_, group), answer in zip(v.groupby(g), answers):
np.testing.assert_array_equal(function(group), answer)
def test_regular(self):
v = pd.Series([101, 102, 103, 104, 105, 106])
g = pd.Series(["a", "b", "a", "c", "d", "b"])
answers = ([101, 103], [102, 106], [104], [105])
function = self.primitive().get_function()
for (_, group), answer in zip(v.groupby(g), answers):
np.testing.assert_array_equal(function(group), answer)
class TestCumMin:
primitive = CumMin
def test_order(self):
v = pd.Series([1, 2, 2])
g = pd.Series(["a", "b", "a"])
answers = ([1, 1], [2])
function = self.primitive().get_function()
for (_, group), answer in zip(v.groupby(g), answers):
np.testing.assert_array_equal(function(group), answer)
def test_regular(self):
v = pd.Series([101, 102, 103, 104, 105, 106, 100])
g = pd.Series(["a", "b", "a", "c", "d", "b", "a"])
answers = ([101, 101, 100], [102, 102], [104], [105])
function = self.primitive().get_function()
for (_, group), answer in zip(v.groupby(g), answers):
np.testing.assert_array_equal(function(group), answer)
def test_cum_sum(pd_es):
log_value_feat = IdentityFeature(pd_es["log"].ww["value"])
dfeat = Feature(
IdentityFeature(pd_es["sessions"].ww["device_type"]),
dataframe_name="log",
)
cum_sum = Feature(log_value_feat, groupby=dfeat, primitive=CumSum)
features = [cum_sum]
df = calculate_feature_matrix(
entityset=pd_es,
features=features,
instance_ids=range(15),
)
cvalues = df[cum_sum.get_name()].values
assert len(cvalues) == 15
cum_sum_values = [0, 5, 15, 30, 50, 0, 1, 3, 6, 6, 50, 55, 55, 62, 76]
for i, v in enumerate(cum_sum_values):
assert v == cvalues[i]
def test_cum_min(pd_es):
log_value_feat = IdentityFeature(pd_es["log"].ww["value"])
cum_min = Feature(
log_value_feat,
groupby=IdentityFeature(pd_es["log"].ww["session_id"]),
primitive=CumMin,
)
features = [cum_min]
df = calculate_feature_matrix(
entityset=pd_es,
features=features,
instance_ids=range(15),
)
cvalues = df[cum_min.get_name()].values
assert len(cvalues) == 15
cum_min_values = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
for i, v in enumerate(cum_min_values):
assert v == cvalues[i]
def test_cum_max(pd_es):
log_value_feat = IdentityFeature(pd_es["log"].ww["value"])
cum_max = Feature(
log_value_feat,
groupby=IdentityFeature(pd_es["log"].ww["session_id"]),
primitive=CumMax,
)
features = [cum_max]
df = calculate_feature_matrix(
entityset=pd_es,
features=features,
instance_ids=range(15),
)
cvalues = df[cum_max.get_name()].values
assert len(cvalues) == 15
cum_max_values = [0, 5, 10, 15, 20, 0, 1, 2, 3, 0, 0, 5, 0, 7, 14]
for i, v in enumerate(cum_max_values):
assert v == cvalues[i]
def test_cum_sum_group_on_nan(pd_es):
log_value_feat = IdentityFeature(pd_es["log"].ww["value"])
pd_es["log"]["product_id"] = (
["coke zero"] * 3
+ ["car"] * 2
+ ["toothpaste"] * 3
+ ["brown bag"] * 2
+ ["shoes"]
+ [np.nan] * 4
+ ["coke_zero"] * 2
)
pd_es["log"]["value"][16] = 10
cum_sum = Feature(
log_value_feat,
groupby=IdentityFeature(pd_es["log"].ww["product_id"]),
primitive=CumSum,
)
features = [cum_sum]
df = calculate_feature_matrix(
entityset=pd_es,
features=features,
instance_ids=range(17),
)
cvalues = df[cum_sum.get_name()].values
assert len(cvalues) == 17
cum_sum_values = [
0,
5,
15,
15,
35,
0,
1,
3,
3,
3,
0,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
10,
]
assert len(cvalues) == len(cum_sum_values)
for i, v in enumerate(cum_sum_values):
if np.isnan(v):
assert np.isnan(cvalues[i])
else:
assert v == cvalues[i]
def test_cum_sum_numpy_group_on_nan(pd_es):
class CumSumNumpy(TransformPrimitive):
"""Returns the cumulative sum after grouping"""
name = "cum_sum"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
uses_full_dataframe = True
def get_function(self):
def cum_sum(values):
return values.cumsum().values
return cum_sum
log_value_feat = IdentityFeature(pd_es["log"].ww["value"])
pd_es["log"]["product_id"] = (
["coke zero"] * 3
+ ["car"] * 2
+ ["toothpaste"] * 3
+ ["brown bag"] * 2
+ ["shoes"]
+ [np.nan] * 4
+ ["coke_zero"] * 2
)
pd_es["log"]["value"][16] = 10
cum_sum = Feature(
log_value_feat,
groupby=IdentityFeature(pd_es["log"].ww["product_id"]),
primitive=CumSumNumpy,
)
assert cum_sum.get_name() == "CUM_SUM(value) by product_id"
features = [cum_sum]
df = calculate_feature_matrix(
entityset=pd_es,
features=features,
instance_ids=range(17),
)
cvalues = df[cum_sum.get_name()].values
assert len(cvalues) == 17
cum_sum_values = [
0,
5,
15,
15,
35,
0,
1,
3,
3,
3,
0,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
10,
]
assert len(cvalues) == len(cum_sum_values)
for i, v in enumerate(cum_sum_values):
if np.isnan(v):
assert np.isnan(cvalues[i])
else:
assert v == cvalues[i]
def test_cum_handles_uses_full_dataframe(pd_es):
def check(feature):
feature_set = FeatureSet([feature])
calculator = FeatureSetCalculator(
pd_es,
feature_set=feature_set,
time_last=None,
)
df_1 = calculator.run(np.array([0, 1, 2]))
df_2 = calculator.run(np.array([2, 4]))
# check that the value for instance id 2 matches
assert (df_2.loc[2] == df_1.loc[2]).all()
for primitive in [CumSum, CumMean, CumMax, CumMin]:
check(
Feature(
pd_es["log"].ww["value"],
groupby=IdentityFeature(pd_es["log"].ww["session_id"]),
primitive=primitive,
),
)
check(
Feature(
pd_es["log"].ww["product_id"],
groupby=Feature(pd_es["log"].ww["product_id"]),
primitive=CumCount,
),
)
def test_cum_mean(pd_es):
log_value_feat = IdentityFeature(pd_es["log"].ww["value"])
cum_mean = Feature(
log_value_feat,
groupby=IdentityFeature(pd_es["log"].ww["session_id"]),
primitive=CumMean,
)
features = [cum_mean]
df = calculate_feature_matrix(
entityset=pd_es,
features=features,
instance_ids=range(15),
)
cvalues = df[cum_mean.get_name()].values
assert len(cvalues) == 15
cum_mean_values = [0, 2.5, 5, 7.5, 10, 0, 0.5, 1, 1.5, 0, 0, 2.5, 0, 3.5, 7]
for i, v in enumerate(cum_mean_values):
assert v == cvalues[i]
def test_cum_count(pd_es):
cum_count = Feature(
IdentityFeature(pd_es["log"].ww["product_id"]),
groupby=IdentityFeature(pd_es["log"].ww["product_id"]),
primitive=CumCount,
)
features = [cum_count]
df = calculate_feature_matrix(
entityset=pd_es,
features=features,
instance_ids=range(15),
)
cvalues = df[cum_count.get_name()].values
assert len(cvalues) == 15
cum_count_values = [1, 2, 3, 1, 2, 1, 2, 3, 1, 2, 1, 4, 5, 6, 7]
for i, v in enumerate(cum_count_values):
assert v == cvalues[i]
def test_rename(pd_es):
cum_count = Feature(
IdentityFeature(pd_es["log"].ww["product_id"]),
groupby=IdentityFeature(pd_es["log"].ww["product_id"]),
primitive=CumCount,
)
copy_feat = cum_count.rename("rename_test")
assert cum_count.unique_name() != copy_feat.unique_name()
assert cum_count.get_name() != copy_feat.get_name()
assert all(
[
x.generate_name() == y.generate_name()
for x, y in zip(cum_count.base_features, copy_feat.base_features)
],
)
assert cum_count.dataframe_name == copy_feat.dataframe_name
def test_groupby_no_data(pd_es):
cum_count = Feature(
IdentityFeature(pd_es["log"].ww["product_id"]),
groupby=IdentityFeature(pd_es["log"].ww["product_id"]),
primitive=CumCount,
)
last_feat = Feature(cum_count, parent_dataframe_name="customers", primitive=Last)
df = calculate_feature_matrix(
entityset=pd_es,
features=[last_feat],
cutoff_time=pd.Timestamp("2011-04-08"),
)
cvalues = df[last_feat.get_name()].values
assert len(cvalues) == 2
assert all([pd.isnull(value) for value in cvalues])
def test_groupby_uses_calc_time(pd_es):
def projected_amount_left(amount, timestamp, time=None):
# cumulative sum of amount, with timedelta * constant subtracted
delta = time - timestamp
delta_seconds = delta / np.timedelta64(1, "s")
return amount.cumsum() - (delta_seconds)
class ProjectedAmountRemaining(TransformPrimitive):
name = "projected_amount_remaining"
uses_calc_time = True
input_types = [
ColumnSchema(semantic_tags={"numeric"}),
ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"}),
]
return_type = ColumnSchema(semantic_tags={"numeric"})
uses_full_dataframe = True
def get_function(self):
return projected_amount_left
time_since_product = GroupByTransformFeature(
[
IdentityFeature(pd_es["log"].ww["value"]),
IdentityFeature(pd_es["log"].ww["datetime"]),
],
groupby=IdentityFeature(pd_es["log"].ww["product_id"]),
primitive=ProjectedAmountRemaining,
)
df = calculate_feature_matrix(
entityset=pd_es,
features=[time_since_product],
cutoff_time=pd.Timestamp("2011-04-10 11:10:30"),
)
answers = [
-88830,
-88819,
-88803,
-88797,
-88771,
-88770,
-88760,
-88749,
-88740,
-88227,
-1830,
-1809,
-1750,
-1740,
-1723,
np.nan,
np.nan,
]
for x, y in zip(df[time_since_product.get_name()], answers):
assert (pd.isnull(x) and pd.isnull(y)) or x == y
def test_groupby_multi_output_stacking(pd_es):
class TestTime(TransformPrimitive):
name = "test_time"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(semantic_tags={"numeric"})
number_output_features = 6
fl = dfs(
entityset=pd_es,
target_dataframe_name="sessions",
agg_primitives=["sum"],
groupby_trans_primitives=[TestTime],
features_only=True,
max_depth=4,
)
for i in range(6):
f = "SUM(log.TEST_TIME(datetime)[%d] by product_id)" % i
assert feature_with_name(fl, f)
assert ("customers.SUM(log.TEST_TIME(datetime)[%d] by session_id)" % i) in fl
def test_serialization(pd_es):
value = IdentityFeature(pd_es["log"].ww["value"])
zipcode = IdentityFeature(pd_es["log"].ww["zipcode"])
primitive = CumSum()
groupby = feature_base.GroupByTransformFeature(value, primitive, zipcode)
dictionary = {
"name": "CUM_SUM(value) by zipcode",
"base_features": [value.unique_name()],
"primitive": primitive,
"groupby": zipcode.unique_name(),
}
assert dictionary == groupby.get_arguments()
dependencies = {
value.unique_name(): value,
zipcode.unique_name(): zipcode,
}
assert groupby == feature_base.GroupByTransformFeature.from_dictionary(
dictionary,
pd_es,
dependencies,
primitive,
)
def test_groupby_with_multioutput_primitive(pd_es):
class MultiCumSum(TransformPrimitive):
name = "multi_cum_sum"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
number_output_features = 3
def get_function(self):
def multi_cum_sum(x):
return x.cumsum(), x.cummax(), x.cummin()
return multi_cum_sum
fm, _ = dfs(
entityset=pd_es,
target_dataframe_name="customers",
trans_primitives=[],
agg_primitives=[],
groupby_trans_primitives=[MultiCumSum, CumSum, CumMax, CumMin],
)
# Calculate output in a separate DFS call to make sure the multi-output code
# does not alter any values
fm2, _ = dfs(
entityset=pd_es,
target_dataframe_name="customers",
trans_primitives=[],
agg_primitives=[],
groupby_trans_primitives=[CumSum, CumMax, CumMin],
)
answer_cols = [
["CUM_SUM(age) by cohort", "CUM_SUM(age) by région_id"],
["CUM_MAX(age) by cohort", "CUM_MAX(age) by région_id"],
["CUM_MIN(age) by cohort", "CUM_MIN(age) by région_id"],
]
for i in range(3):
# Check that multi-output gives correct answers
f = "MULTI_CUM_SUM(age)[%d] by cohort" % i
assert f in fm.columns
for x, y in zip(fm[f].values, fm[answer_cols[i][0]].values):
assert x == y
f = "MULTI_CUM_SUM(age)[%d] by région_id" % i
assert f in fm.columns
for x, y in zip(fm[f].values, fm[answer_cols[i][1]].values):
assert x == y
# Verify single output results are unchanged by inclusion of
# multi-output primitive
for x, y in zip(fm[answer_cols[i][0]], fm2[answer_cols[i][0]]):
assert x == y
for x, y in zip(fm[answer_cols[i][1]], fm2[answer_cols[i][1]]):
assert x == y
def test_groupby_with_multioutput_primitive_custom_names(pd_es):
class MultiCumSum(TransformPrimitive):
name = "multi_cum_sum"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
number_output_features = 3
def get_function(self):
def multi_cum_sum(x):
return x.cumsum(), x.cummax(), x.cummin()
return multi_cum_sum
def generate_names(primitive, base_feature_names):
return ["CUSTOM_SUM", "CUSTOM_MAX", "CUSTOM_MIN"]
fm, _ = dfs(
entityset=pd_es,
target_dataframe_name="customers",
trans_primitives=[],
agg_primitives=[],
groupby_trans_primitives=[MultiCumSum, CumSum, CumMax, CumMin],
)
answer_cols = [
["CUM_SUM(age) by cohort", "CUM_SUM(age) by région_id"],
["CUM_MAX(age) by cohort", "CUM_MAX(age) by région_id"],
["CUM_MIN(age) by cohort", "CUM_MIN(age) by région_id"],
]
expected_names = [
["CUSTOM_SUM by cohort", "CUSTOM_SUM by région_id"],
["CUSTOM_MAX by cohort", "CUSTOM_MAX by région_id"],
["CUSTOM_MIN by cohort", "CUSTOM_MIN by région_id"],
]
for i in range(3):
f = expected_names[i][0]
assert f in fm.columns
for x, y in zip(fm[f].values, fm[answer_cols[i][0]].values):
assert x == y
f = expected_names[i][1]
assert f in fm.columns
for x, y in zip(fm[f].values, fm[answer_cols[i][1]].values):
assert x == y
| 19,205 | 29.198113 | 85 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/test_feature_visualizer.py | import json
import os
import re
import graphviz
import pytest
from featuretools.feature_base import (
AggregationFeature,
DirectFeature,
FeatureOutputSlice,
GroupByTransformFeature,
IdentityFeature,
TransformFeature,
graph_feature,
)
from featuretools.primitives import Count, CumMax, Mode, NMostCommon, Year
@pytest.fixture
def simple_feat(es):
return IdentityFeature(es["log"].ww["id"])
@pytest.fixture
def trans_feat(es):
return TransformFeature(IdentityFeature(es["customers"].ww["cancel_date"]), Year)
def test_returns_digraph_object(simple_feat):
graph = graph_feature(simple_feat)
assert isinstance(graph, graphviz.Digraph)
def test_saving_png_file(simple_feat, tmp_path):
output_path = str(tmp_path.joinpath("test1.png"))
graph_feature(simple_feat, to_file=output_path)
assert os.path.isfile(output_path)
def test_missing_file_extension(simple_feat):
output_path = "test1"
with pytest.raises(ValueError, match="Please use a file extension"):
graph_feature(simple_feat, to_file=output_path)
def test_invalid_format(simple_feat):
output_path = "test1.xyz"
with pytest.raises(ValueError, match="Unknown format"):
graph_feature(simple_feat, to_file=output_path)
def test_transform(es, trans_feat):
feat = trans_feat
graph = graph_feature(feat).source
feat_name = feat.get_name()
prim_node = "0_{}_year".format(feat_name)
dataframe_table = "\u2605 customers (target)"
prim_edge = 'customers:cancel_date -> "{}"'.format(prim_node)
feat_edge = '"{}" -> customers:"{}"'.format(prim_node, feat_name)
graph_components = [feat_name, dataframe_table, prim_node, prim_edge, feat_edge]
for component in graph_components:
assert component in graph
matches = re.findall(r"customers \[label=<\n<TABLE.*?</TABLE>>", graph, re.DOTALL)
assert len(matches) == 1
rows = re.findall(r"<TR.*?</TR>", matches[0], re.DOTALL)
assert len(rows) == 3
to_match = ["customers", "cancel_date", feat_name]
for match, row in zip(to_match, rows):
assert match in row
def test_html_symbols(es, tmp_path):
output_path_template = str(tmp_path.joinpath("test{}.png"))
value = IdentityFeature(es["log"].ww["value"])
gt = value > 5
lt = value < 5
ge = value >= 5
le = value <= 5
for i, feat in enumerate([gt, lt, ge, le]):
output_path = output_path_template.format(i)
graph = graph_feature(feat, to_file=output_path).source
assert os.path.isfile(output_path)
assert feat.get_name() in graph
def test_groupby_transform(es):
feat = GroupByTransformFeature(
IdentityFeature(es["customers"].ww["age"]),
CumMax,
IdentityFeature(es["customers"].ww["cohort"]),
)
graph = graph_feature(feat).source
feat_name = feat.get_name()
prim_node = "0_{}_cum_max".format(feat_name)
groupby_node = "{}_groupby_customers--cohort".format(feat_name)
dataframe_table = "\u2605 customers (target)"
groupby_edge = 'customers:cohort -> "{}"'.format(groupby_node)
groupby_input = 'customers:age -> "{}"'.format(groupby_node)
prim_input = '"{}" -> "{}"'.format(groupby_node, prim_node)
feat_edge = '"{}" -> customers:"{}"'.format(prim_node, feat_name)
graph_components = [
feat_name,
prim_node,
groupby_node,
dataframe_table,
groupby_edge,
groupby_input,
prim_input,
feat_edge,
]
for component in graph_components:
assert component in graph
matches = re.findall(r"customers \[label=<\n<TABLE.*?</TABLE>>", graph, re.DOTALL)
assert len(matches) == 1
rows = re.findall(r"<TR.*?</TR>", matches[0], re.DOTALL)
assert len(rows) == 4
assert dataframe_table in rows[0]
assert feat_name in rows[-1]
assert ("age" in rows[1] and "cohort" in rows[2]) or (
"age" in rows[2] and "cohort" in rows[1]
)
def test_groupby_transform_direct_groupby(es):
groupby = DirectFeature(
IdentityFeature(es["cohorts"].ww["cohort_name"]),
"customers",
)
feat = GroupByTransformFeature(
IdentityFeature(es["customers"].ww["age"]),
CumMax,
groupby,
)
graph = graph_feature(feat).source
groupby_name = groupby.get_name()
feat_name = feat.get_name()
join_node = "1_{}_join".format(groupby_name)
prim_node = "0_{}_cum_max".format(feat_name)
groupby_node = "{}_groupby_customers--{}".format(feat_name, groupby_name)
customers_table = "\u2605 customers (target)"
cohorts_table = "cohorts"
join_groupby = '"{}" -> customers:cohort'.format(join_node)
join_input = 'cohorts:cohort_name -> "{}"'.format(join_node)
join_out_edge = '"{}" -> customers:"{}"'.format(join_node, groupby_name)
groupby_edge = 'customers:"{}" -> "{}"'.format(groupby_name, groupby_node)
groupby_input = 'customers:age -> "{}"'.format(groupby_node)
prim_input = '"{}" -> "{}"'.format(groupby_node, prim_node)
feat_edge = '"{}" -> customers:"{}"'.format(prim_node, feat_name)
graph_components = [
groupby_name,
feat_name,
join_node,
prim_node,
groupby_node,
customers_table,
cohorts_table,
join_groupby,
join_input,
join_out_edge,
groupby_edge,
groupby_input,
prim_input,
feat_edge,
]
for component in graph_components:
assert component in graph
dataframes = {
"cohorts": [cohorts_table, "cohort_name"],
"customers": [customers_table, "cohort", "age", groupby_name, feat_name],
}
for dataframe in dataframes:
regex = r"{} \[label=<\n<TABLE.*?</TABLE>>".format(dataframe)
matches = re.findall(regex, graph, re.DOTALL)
assert len(matches) == 1
rows = re.findall(r"<TR.*?</TR>", matches[0], re.DOTALL)
assert len(rows) == len(dataframes[dataframe])
for row in rows:
matched = False
for i in dataframes[dataframe]:
if i in row:
matched = True
dataframes[dataframe].remove(i)
break
assert matched
def test_aggregation(es):
feat = AggregationFeature(IdentityFeature(es["log"].ww["id"]), "sessions", Count)
graph = graph_feature(feat).source
feat_name = feat.get_name()
prim_node = "0_{}_count".format(feat_name)
groupby_node = "{}_groupby_log--session_id".format(feat_name)
sessions_table = "\u2605 sessions (target)"
log_table = "log"
groupby_edge = 'log:session_id -> "{}"'.format(groupby_node)
groupby_input = 'log:id -> "{}"'.format(groupby_node)
prim_input = '"{}" -> "{}"'.format(groupby_node, prim_node)
feat_edge = '"{}" -> sessions:"{}"'.format(prim_node, feat_name)
graph_components = [
feat_name,
prim_node,
groupby_node,
sessions_table,
log_table,
groupby_edge,
groupby_input,
prim_input,
feat_edge,
]
for component in graph_components:
assert component in graph
dataframes = {
"log": [log_table, "id", "session_id"],
"sessions": [sessions_table, feat_name],
}
for dataframe in dataframes:
regex = r"{} \[label=<\n<TABLE.*?</TABLE>>".format(dataframe)
matches = re.findall(regex, graph, re.DOTALL)
assert len(matches) == 1
rows = re.findall(r"<TR.*?</TR>", matches[0], re.DOTALL)
assert len(rows) == len(dataframes[dataframe])
for row in rows:
matched = False
for i in dataframes[dataframe]:
if i in row:
matched = True
dataframes[dataframe].remove(i)
break
assert matched
def test_multioutput(es):
multioutput = AggregationFeature(
IdentityFeature(es["log"].ww["zipcode"]),
"sessions",
NMostCommon,
)
feat = FeatureOutputSlice(multioutput, 0)
graph = graph_feature(feat).source
feat_name = feat.get_name()
prim_node = "0_{}_n_most_common".format(multioutput.get_name())
groupby_node = "{}_groupby_log--session_id".format(multioutput.get_name())
sessions_table = "\u2605 sessions (target)"
log_table = "log"
groupby_edge = 'log:session_id -> "{}"'.format(groupby_node)
groupby_input = 'log:zipcode -> "{}"'.format(groupby_node)
prim_input = '"{}" -> "{}"'.format(groupby_node, prim_node)
feat_edge = '"{}" -> sessions:"{}"'.format(prim_node, feat_name)
graph_components = [
feat_name,
prim_node,
groupby_node,
sessions_table,
log_table,
groupby_edge,
groupby_input,
prim_input,
feat_edge,
]
for component in graph_components:
assert component in graph
dataframes = {
"log": [log_table, "zipcode", "session_id"],
"sessions": [sessions_table, feat_name],
}
for dataframe in dataframes:
regex = r"{} \[label=<\n<TABLE.*?</TABLE>>".format(dataframe)
matches = re.findall(regex, graph, re.DOTALL)
assert len(matches) == 1
rows = re.findall(r"<TR.*?</TR>", matches[0], re.DOTALL)
assert len(rows) == len(dataframes[dataframe])
for row in rows:
matched = False
for i in dataframes[dataframe]:
if i in row:
matched = True
dataframes[dataframe].remove(i)
break
assert matched
def test_direct(es):
d1 = DirectFeature(
IdentityFeature(es["customers"].ww["engagement_level"]),
"sessions",
)
d2 = DirectFeature(d1, "log")
graph = graph_feature(d2).source
d1_name = d1.get_name()
d2_name = d2.get_name()
prim_node1 = "1_{}_join".format(d1_name)
prim_node2 = "0_{}_join".format(d2_name)
log_table = "\u2605 log (target)"
sessions_table = "sessions"
customers_table = "customers"
groupby_edge1 = '"{}" -> sessions:customer_id'.format(prim_node1)
groupby_edge2 = '"{}" -> log:session_id'.format(prim_node2)
groupby_input1 = 'customers:engagement_level -> "{}"'.format(prim_node1)
groupby_input2 = 'sessions:"{}" -> "{}"'.format(d1_name, prim_node2)
d1_edge = '"{}" -> sessions:"{}"'.format(prim_node1, d1_name)
d2_edge = '"{}" -> log:"{}"'.format(prim_node2, d2_name)
graph_components = [
d1_name,
d2_name,
prim_node1,
prim_node2,
log_table,
sessions_table,
customers_table,
groupby_edge1,
groupby_edge2,
groupby_input1,
groupby_input2,
d1_edge,
d2_edge,
]
for component in graph_components:
assert component in graph
dataframes = {
"customers": [customers_table, "engagement_level"],
"sessions": [sessions_table, "customer_id", d1_name],
"log": [log_table, "session_id", d2_name],
}
for dataframe in dataframes:
regex = r"{} \[label=<\n<TABLE.*?</TABLE>>".format(dataframe)
matches = re.findall(regex, graph, re.DOTALL)
assert len(matches) == 1
rows = re.findall(r"<TR.*?</TR>", matches[0], re.DOTALL)
assert len(rows) == len(dataframes[dataframe])
for row in rows:
matched = False
for i in dataframes[dataframe]:
if i in row:
matched = True
dataframes[dataframe].remove(i)
break
assert matched
def test_stacked(es, trans_feat):
stacked = AggregationFeature(trans_feat, "cohorts", Mode)
graph = graph_feature(stacked).source
feat_name = stacked.get_name()
intermediate_name = trans_feat.get_name()
agg_primitive = "0_{}_mode".format(feat_name)
trans_primitive = "1_{}_year".format(intermediate_name)
groupby_node = "{}_groupby_customers--cohort".format(feat_name)
trans_prim_edge = 'customers:cancel_date -> "{}"'.format(trans_primitive)
intermediate_edge = '"{}" -> customers:"{}"'.format(
trans_primitive,
intermediate_name,
)
groupby_edge = 'customers:cohort -> "{}"'.format(groupby_node)
groupby_input = 'customers:"{}" -> "{}"'.format(intermediate_name, groupby_node)
agg_input = '"{}" -> "{}"'.format(groupby_node, agg_primitive)
feat_edge = '"{}" -> cohorts:"{}"'.format(agg_primitive, feat_name)
graph_components = [
feat_name,
intermediate_name,
agg_primitive,
trans_primitive,
groupby_node,
trans_prim_edge,
intermediate_edge,
groupby_edge,
groupby_input,
agg_input,
feat_edge,
]
for component in graph_components:
assert component in graph
agg_primitive = agg_primitive.replace("(", "\\(").replace(")", "\\)")
agg_node = re.findall('"{}" \\[label.*'.format(agg_primitive), graph)
assert len(agg_node) == 1
assert "Step 2" in agg_node[0]
trans_primitive = trans_primitive.replace("(", "\\(").replace(")", "\\)")
trans_node = re.findall('"{}" \\[label.*'.format(trans_primitive), graph)
assert len(trans_node) == 1
assert "Step 1" in trans_node[0]
def test_description_auto_caption(trans_feat):
default_graph = graph_feature(trans_feat, description=True).source
default_label = 'label="The year of the \\"cancel_date\\"."'
assert default_label in default_graph
def test_description_auto_caption_metadata(trans_feat, tmp_path):
feature_descriptions = {"customers: cancel_date": "the date the customer cancelled"}
primitive_templates = {"year": "the year that {} occurred"}
metadata_graph = graph_feature(
trans_feat,
description=True,
feature_descriptions=feature_descriptions,
primitive_templates=primitive_templates,
).source
metadata_label = 'label="The year that the date the customer cancelled occurred."'
assert metadata_label in metadata_graph
metadata = {
"feature_descriptions": feature_descriptions,
"primitive_templates": primitive_templates,
}
metadata_path = os.path.join(tmp_path, "description_metadata.json")
with open(metadata_path, "w") as f:
json.dump(metadata, f)
json_metadata_graph = graph_feature(
trans_feat,
description=True,
metadata_file=metadata_path,
).source
assert metadata_label in json_metadata_graph
def test_description_custom_caption(trans_feat):
custom_description = "A custom feature description"
custom_description_graph = graph_feature(
trans_feat,
description=custom_description,
).source
custom_description_label = 'label="A custom feature description"'
assert custom_description_label in custom_description_graph
| 14,927 | 31.311688 | 88 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/test_feature_utils.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Double, Integer
from featuretools.feature_base.utils import is_valid_input
def test_is_valid_input():
assert is_valid_input(candidate=ColumnSchema(), template=ColumnSchema())
assert is_valid_input(
candidate=ColumnSchema(logical_type=Integer, semantic_tags={"index"}),
template=ColumnSchema(logical_type=Integer, semantic_tags={"index"}),
)
assert is_valid_input(
candidate=ColumnSchema(
logical_type=Integer,
semantic_tags={"index", "numeric"},
),
template=ColumnSchema(semantic_tags={"index"}),
)
assert is_valid_input(
candidate=ColumnSchema(semantic_tags={"index"}),
template=ColumnSchema(semantic_tags={"index"}),
)
assert is_valid_input(
candidate=ColumnSchema(logical_type=Integer, semantic_tags={"index"}),
template=ColumnSchema(),
)
assert is_valid_input(
candidate=ColumnSchema(logical_type=Integer),
template=ColumnSchema(logical_type=Integer),
)
assert is_valid_input(
candidate=ColumnSchema(logical_type=Integer, semantic_tags={"numeric"}),
template=ColumnSchema(logical_type=Integer),
)
assert not is_valid_input(
candidate=ColumnSchema(logical_type=Integer, semantic_tags={"index"}),
template=ColumnSchema(logical_type=Double, semantic_tags={"index"}),
)
assert not is_valid_input(
candidate=ColumnSchema(logical_type=Integer, semantic_tags={}),
template=ColumnSchema(logical_type=Integer, semantic_tags={"index"}),
)
assert not is_valid_input(
candidate=ColumnSchema(),
template=ColumnSchema(logical_type=Integer, semantic_tags={"index"}),
)
assert not is_valid_input(
candidate=ColumnSchema(),
template=ColumnSchema(logical_type=Integer),
)
assert not is_valid_input(
candidate=ColumnSchema(),
template=ColumnSchema(semantic_tags={"index"}),
)
| 2,058 | 29.731343 | 80 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/test_feature_descriptions.py | import json
import os
import pytest
from woodwork.column_schema import ColumnSchema
from featuretools import describe_feature
from featuretools.feature_base import (
AggregationFeature,
DirectFeature,
GroupByTransformFeature,
IdentityFeature,
TransformFeature,
)
from featuretools.primitives import (
Absolute,
AggregationPrimitive,
CumMean,
EqualScalar,
Mean,
Mode,
NMostCommon,
NumUnique,
PercentTrue,
Sum,
TransformPrimitive,
)
def test_identity_description(es):
feature = IdentityFeature(es["log"].ww["session_id"])
description = 'The "session_id".'
assert describe_feature(feature) == description
def test_direct_description(es):
feature = DirectFeature(
IdentityFeature(es["customers"].ww["loves_ice_cream"]),
"sessions",
)
description = (
'The "loves_ice_cream" for the instance of "customers" associated '
'with this instance of "sessions".'
)
assert describe_feature(feature) == description
deep_direct = DirectFeature(feature, "log")
deep_description = (
'The "loves_ice_cream" for the instance of "customers" '
'associated with the instance of "sessions" associated with '
'this instance of "log".'
)
assert describe_feature(deep_direct) == deep_description
agg = AggregationFeature(
IdentityFeature(es["log"].ww["purchased"]),
"sessions",
PercentTrue,
)
complicated_direct = DirectFeature(agg, "log")
agg_on_direct = AggregationFeature(complicated_direct, "products", Mean)
complicated_description = (
"The average of the percentage of true values in "
'the "purchased" of all instances of "log" for each "id" in "sessions" for '
'the instance of "sessions" associated with this instance of "log" of all '
'instances of "log" for each "id" in "products".'
)
assert describe_feature(agg_on_direct) == complicated_description
def test_transform_description(es):
feature = TransformFeature(IdentityFeature(es["log"].ww["value"]), Absolute)
description = 'The absolute value of the "value".'
assert describe_feature(feature) == description
def test_groupby_transform_description(es):
feature = GroupByTransformFeature(
IdentityFeature(es["log"].ww["value"]),
CumMean,
IdentityFeature(es["log"].ww["session_id"]),
)
description = 'The cumulative mean of the "value" for each "session_id".'
assert describe_feature(feature) == description
def test_aggregation_description(es):
feature = AggregationFeature(
IdentityFeature(es["log"].ww["value"]),
"sessions",
Mean,
)
description = 'The average of the "value" of all instances of "log" for each "id" in "sessions".'
assert describe_feature(feature) == description
stacked_agg = AggregationFeature(feature, "customers", Sum)
stacked_description = (
'The sum of t{} of all instances of "sessions" for each "id" '
'in "customers".'.format(description[1:-1])
)
assert describe_feature(stacked_agg) == stacked_description
def test_aggregation_description_where(es):
where_feature = TransformFeature(
IdentityFeature(es["log"].ww["countrycode"]),
EqualScalar("US"),
)
feature = AggregationFeature(
IdentityFeature(es["log"].ww["value"]),
"sessions",
Mean,
where=where_feature,
)
description = (
'The average of the "value" of all instances of "log" where the '
'"countrycode" is US for each "id" in "sessions".'
)
assert describe_feature(feature) == description
def test_aggregation_description_use_previous(es):
feature = AggregationFeature(
IdentityFeature(es["log"].ww["value"]),
"sessions",
Mean,
use_previous="5d",
)
description = 'The average of the "value" of the previous 5 days of "log" for each "id" in "sessions".'
assert describe_feature(feature) == description
def test_multioutput_description(es):
n_most_common = NMostCommon(2)
n_most_common_feature = AggregationFeature(
IdentityFeature(es["log"].ww["zipcode"]),
"sessions",
n_most_common,
)
first_most_common_slice = n_most_common_feature[0]
second_most_common_slice = n_most_common_feature[1]
n_most_common_base = 'The 2 most common values of the "zipcode" of all instances of "log" for each "id" in "sessions".'
n_most_common_first = (
'The most common value of the "zipcode" of all instances of "log" '
'for each "id" in "sessions".'
)
n_most_common_second = (
'The 2nd most common value of the "zipcode" of all instances of '
'"log" for each "id" in "sessions".'
)
assert describe_feature(n_most_common_feature) == n_most_common_base
assert describe_feature(first_most_common_slice) == n_most_common_first
assert describe_feature(second_most_common_slice) == n_most_common_second
class CustomMultiOutput(TransformPrimitive):
name = "custom_multioutput"
input_types = [ColumnSchema(semantic_tags={"category"})]
return_type = ColumnSchema(semantic_tags={"category"})
number_output_features = 4
custom_feat = TransformFeature(
IdentityFeature(es["log"].ww["zipcode"]),
CustomMultiOutput,
)
generic_base = 'The result of applying CUSTOM_MULTIOUTPUT to the "zipcode".'
generic_first = 'The 1st output from applying CUSTOM_MULTIOUTPUT to the "zipcode".'
generic_second = 'The 2nd output from applying CUSTOM_MULTIOUTPUT to the "zipcode".'
assert describe_feature(custom_feat) == generic_base
assert describe_feature(custom_feat[0]) == generic_first
assert describe_feature(custom_feat[1]) == generic_second
CustomMultiOutput.description_template = [
"the multioutput of {}",
"the {nth_slice} multioutput part of {}",
]
template_base = 'The multioutput of the "zipcode".'
template_first_slice = 'The 1st multioutput part of the "zipcode".'
template_second_slice = 'The 2nd multioutput part of the "zipcode".'
template_third_slice = 'The 3rd multioutput part of the "zipcode".'
template_fourth_slice = 'The 4th multioutput part of the "zipcode".'
assert describe_feature(custom_feat) == template_base
assert describe_feature(custom_feat[0]) == template_first_slice
assert describe_feature(custom_feat[1]) == template_second_slice
assert describe_feature(custom_feat[2]) == template_third_slice
assert describe_feature(custom_feat[3]) == template_fourth_slice
CustomMultiOutput.description_template = [
"the multioutput of {}",
"the primary multioutput part of {}",
"the secondary multioutput part of {}",
]
custom_base = 'The multioutput of the "zipcode".'
custom_first_slice = 'The primary multioutput part of the "zipcode".'
custom_second_slice = 'The secondary multioutput part of the "zipcode".'
bad_slice_error = "Slice out of range of template"
assert describe_feature(custom_feat) == custom_base
assert describe_feature(custom_feat[0]) == custom_first_slice
assert describe_feature(custom_feat[1]) == custom_second_slice
with pytest.raises(IndexError, match=bad_slice_error):
describe_feature(custom_feat[2])
def test_generic_description(es):
class NoName(TransformPrimitive):
input_types = [ColumnSchema(semantic_tags={"category"})]
output_type = ColumnSchema(semantic_tags={"category"})
def generate_name(self, base_feature_names):
return "%s(%s%s)" % (
"NO_NAME",
", ".join(base_feature_names),
self.get_args_string(),
)
class CustomAgg(AggregationPrimitive):
name = "custom_aggregation"
input_types = [ColumnSchema(semantic_tags={"category"})]
output_type = ColumnSchema(semantic_tags={"category"})
class CustomTrans(TransformPrimitive):
name = "custom_transform"
input_types = [ColumnSchema(semantic_tags={"category"})]
output_type = ColumnSchema(semantic_tags={"category"})
no_name = TransformFeature(IdentityFeature(es["log"].ww["zipcode"]), NoName)
no_name_description = 'The result of applying NoName to the "zipcode".'
assert describe_feature(no_name) == no_name_description
custom_agg = AggregationFeature(
IdentityFeature(es["log"].ww["zipcode"]),
"customers",
CustomAgg,
)
custom_agg_description = 'The result of applying CUSTOM_AGGREGATION to the "zipcode" of all instances of "log" for each "id" in "customers".'
assert describe_feature(custom_agg) == custom_agg_description
custom_trans = TransformFeature(
IdentityFeature(es["log"].ww["zipcode"]),
CustomTrans,
)
custom_trans_description = (
'The result of applying CUSTOM_TRANSFORM to the "zipcode".'
)
assert describe_feature(custom_trans) == custom_trans_description
def test_column_description(es):
column_description = "the name of the device used for each session"
es["sessions"].ww.columns["device_name"].description = column_description
identity_feat = IdentityFeature(es["sessions"].ww["device_name"])
assert (
describe_feature(identity_feat)
== column_description[0].upper() + column_description[1:] + "."
)
def test_metadata(es, tmp_path):
identity_feature_descriptions = {
"sessions: device_name": "the name of the device used for each session",
"customers: id": "the customer's id",
}
agg_feat = AggregationFeature(
IdentityFeature(es["sessions"].ww["device_name"]),
"customers",
NumUnique,
)
agg_description = (
"The number of unique elements in the name of the device used for each "
'session of all instances of "sessions" for each customer\'s id.'
)
assert (
describe_feature(agg_feat, feature_descriptions=identity_feature_descriptions)
== agg_description
)
transform_feat = GroupByTransformFeature(
IdentityFeature(es["log"].ww["value"]),
CumMean,
IdentityFeature(es["log"].ww["session_id"]),
)
transform_description = 'The running average of the "value" for each "session_id".'
primitive_templates = {"cum_mean": "the running average of {}"}
assert (
describe_feature(transform_feat, primitive_templates=primitive_templates)
== transform_description
)
custom_agg = AggregationFeature(
IdentityFeature(es["log"].ww["zipcode"]),
"sessions",
Mode,
)
auto_description = 'The most frequently occurring value of the "zipcode" of all instances of "log" for each "id" in "sessions".'
custom_agg_description = "the most frequently used zipcode"
custom_feature_description = (
custom_agg_description[0].upper() + custom_agg_description[1:] + "."
)
feature_description_dict = {"sessions: MODE(log.zipcode)": custom_agg_description}
assert describe_feature(custom_agg) == auto_description
assert (
describe_feature(custom_agg, feature_descriptions=feature_description_dict)
== custom_feature_description
)
metadata = {
"feature_descriptions": {
**identity_feature_descriptions,
**feature_description_dict,
},
"primitive_templates": primitive_templates,
}
metadata_path = os.path.join(tmp_path, "description_metadata.json")
with open(metadata_path, "w") as f:
json.dump(metadata, f)
assert describe_feature(agg_feat, metadata_file=metadata_path) == agg_description
assert (
describe_feature(transform_feat, metadata_file=metadata_path)
== transform_description
)
assert (
describe_feature(custom_agg, metadata_file=metadata_path)
== custom_feature_description
)
| 12,017 | 34.982036 | 145 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/test_features_deserializer.py | import logging
from unittest.mock import patch
import pandas as pd
import pytest
from featuretools import (
AggregationFeature,
Feature,
IdentityFeature,
TransformFeature,
__version__,
)
from featuretools.feature_base.features_deserializer import FeaturesDeserializer
from featuretools.primitives import (
Count,
Max,
MultiplyNumericScalar,
NMostCommon,
NumberOfCommonWords,
NumUnique,
)
from featuretools.primitives.utils import serialize_primitive
from featuretools.utils.schema_utils import FEATURES_SCHEMA_VERSION
def test_single_feature(es):
feature = IdentityFeature(es["log"].ww["value"])
dictionary = {
"ft_version": __version__,
"schema_version": FEATURES_SCHEMA_VERSION,
"entityset": es.to_dictionary(),
"feature_list": [feature.unique_name()],
"feature_definitions": {feature.unique_name(): feature.to_dictionary()},
"primitive_definitions": {},
}
deserializer = FeaturesDeserializer(dictionary)
expected = [feature]
assert expected == deserializer.to_list()
def test_multioutput_feature(es):
value = IdentityFeature(es["log"].ww["product_id"])
threecommon = NMostCommon()
num_unique = NumUnique()
tc = Feature(value, parent_dataframe_name="sessions", primitive=threecommon)
features = [tc, value]
for i in range(3):
features.append(
Feature(
tc[i],
parent_dataframe_name="customers",
primitive=num_unique,
),
)
features.append(tc[i])
flist = [feat.unique_name() for feat in features]
fd = [feat.to_dictionary() for feat in features]
fdict = dict(zip(flist, fd))
dictionary = {
"ft_version": __version__,
"schema_version": FEATURES_SCHEMA_VERSION,
"entityset": es.to_dictionary(),
"feature_list": flist,
"feature_definitions": fdict,
}
dictionary["primitive_definitions"] = {
"0": serialize_primitive(threecommon),
"1": serialize_primitive(num_unique),
}
dictionary["feature_definitions"][flist[0]]["arguments"]["primitive"] = "0"
dictionary["feature_definitions"][flist[2]]["arguments"]["primitive"] = "1"
dictionary["feature_definitions"][flist[4]]["arguments"]["primitive"] = "1"
dictionary["feature_definitions"][flist[6]]["arguments"]["primitive"] = "1"
deserializer = FeaturesDeserializer(dictionary).to_list()
for i in range(len(features)):
assert features[i].unique_name() == deserializer[i].unique_name()
def test_base_features_in_list(es):
max_primitive = Max()
value = IdentityFeature(es["log"].ww["value"])
max_feat = AggregationFeature(value, "sessions", max_primitive)
dictionary = {
"ft_version": __version__,
"schema_version": FEATURES_SCHEMA_VERSION,
"entityset": es.to_dictionary(),
"feature_list": [max_feat.unique_name(), value.unique_name()],
"feature_definitions": {
max_feat.unique_name(): max_feat.to_dictionary(),
value.unique_name(): value.to_dictionary(),
},
}
dictionary["primitive_definitions"] = {"0": serialize_primitive(max_primitive)}
dictionary["feature_definitions"][max_feat.unique_name()]["arguments"][
"primitive"
] = "0"
deserializer = FeaturesDeserializer(dictionary)
expected = [max_feat, value]
assert expected == deserializer.to_list()
def test_base_features_not_in_list(es):
max_primitive = Max()
mult_primitive = MultiplyNumericScalar(value=2)
value = IdentityFeature(es["log"].ww["value"])
value_x2 = TransformFeature(value, mult_primitive)
max_feat = AggregationFeature(value_x2, "sessions", max_primitive)
dictionary = {
"ft_version": __version__,
"schema_version": FEATURES_SCHEMA_VERSION,
"entityset": es.to_dictionary(),
"feature_list": [max_feat.unique_name()],
"feature_definitions": {
max_feat.unique_name(): max_feat.to_dictionary(),
value_x2.unique_name(): value_x2.to_dictionary(),
value.unique_name(): value.to_dictionary(),
},
}
dictionary["primitive_definitions"] = {
"0": serialize_primitive(max_primitive),
"1": serialize_primitive(mult_primitive),
}
dictionary["feature_definitions"][max_feat.unique_name()]["arguments"][
"primitive"
] = "0"
dictionary["feature_definitions"][value_x2.unique_name()]["arguments"][
"primitive"
] = "1"
deserializer = FeaturesDeserializer(dictionary)
expected = [max_feat]
assert expected == deserializer.to_list()
@patch("featuretools.utils.schema_utils.FEATURES_SCHEMA_VERSION", "1.1.1")
@pytest.mark.parametrize(
"hardcoded_schema_version, warns",
[("2.1.1", True), ("1.2.1", True), ("1.1.2", True), ("1.0.2", False)],
)
def test_later_schema_version(es, caplog, hardcoded_schema_version, warns):
def test_version(version, warns):
if warns:
warning_text = (
"The schema version of the saved features"
"(%s) is greater than the latest supported (%s). "
"You may need to upgrade featuretools. Attempting to load features ..."
% (version, "1.1.1")
)
else:
warning_text = None
_check_schema_version(version, es, warning_text, caplog, "warn")
test_version(hardcoded_schema_version, warns)
@patch("featuretools.utils.schema_utils.FEATURES_SCHEMA_VERSION", "1.1.1")
@pytest.mark.parametrize(
"hardcoded_schema_version, warns",
[("0.1.1", True), ("1.0.1", False), ("1.1.0", False)],
)
def test_earlier_schema_version(es, caplog, hardcoded_schema_version, warns):
def test_version(version, warns):
if warns:
warning_text = (
"The schema version of the saved features"
"(%s) is no longer supported by this version "
"of featuretools. Attempting to load features ..." % version
)
else:
warning_text = None
_check_schema_version(version, es, warning_text, caplog, "log")
test_version(hardcoded_schema_version, warns)
def test_unknown_feature_type(es):
dictionary = {
"ft_version": __version__,
"schema_version": FEATURES_SCHEMA_VERSION,
"entityset": es.to_dictionary(),
"feature_list": ["feature_1"],
"feature_definitions": {
"feature_1": {"type": "FakeFeature", "dependencies": [], "arguments": {}},
},
"primitive_definitions": {},
}
deserializer = FeaturesDeserializer(dictionary)
with pytest.raises(RuntimeError, match='Unrecognized feature type "FakeFeature"'):
deserializer.to_list()
def test_unknown_primitive_type(es):
value = IdentityFeature(es["log"].ww["value"])
max_feat = AggregationFeature(value, "sessions", Max)
primitive_dict = serialize_primitive(Max())
primitive_dict["type"] = "FakePrimitive"
dictionary = {
"ft_version": __version__,
"schema_version": FEATURES_SCHEMA_VERSION,
"entityset": es.to_dictionary(),
"feature_list": [max_feat.unique_name(), value.unique_name()],
"feature_definitions": {
max_feat.unique_name(): max_feat.to_dictionary(),
value.unique_name(): value.to_dictionary(),
},
"primitive_definitions": {"0": primitive_dict},
}
with pytest.raises(RuntimeError) as excinfo:
FeaturesDeserializer(dictionary)
error_text = 'Primitive "FakePrimitive" in module "%s" not found' % Max.__module__
assert error_text == str(excinfo.value)
def test_unknown_primitive_module(es):
value = IdentityFeature(es["log"].ww["value"])
max_feat = AggregationFeature(value, "sessions", Max)
primitive_dict = serialize_primitive(Max())
primitive_dict["module"] = "fake.module"
dictionary = {
"ft_version": __version__,
"schema_version": FEATURES_SCHEMA_VERSION,
"entityset": es.to_dictionary(),
"feature_list": [max_feat.unique_name(), value.unique_name()],
"feature_definitions": {
max_feat.unique_name(): max_feat.to_dictionary(),
value.unique_name(): value.to_dictionary(),
},
"primitive_definitions": {"0": primitive_dict},
}
with pytest.raises(RuntimeError) as excinfo:
FeaturesDeserializer(dictionary)
error_text = 'Primitive "Max" in module "fake.module" not found'
assert error_text == str(excinfo.value)
def test_feature_use_previous_pd_timedelta(es):
value = IdentityFeature(es["log"].ww["id"])
td = pd.Timedelta(12, "W")
count_primitive = Count()
count_feature = AggregationFeature(
value,
"customers",
count_primitive,
use_previous=td,
)
dictionary = {
"ft_version": __version__,
"schema_version": FEATURES_SCHEMA_VERSION,
"entityset": es.to_dictionary(),
"feature_list": [count_feature.unique_name(), value.unique_name()],
"feature_definitions": {
count_feature.unique_name(): count_feature.to_dictionary(),
value.unique_name(): value.to_dictionary(),
},
}
dictionary["primitive_definitions"] = {"0": serialize_primitive(count_primitive)}
dictionary["feature_definitions"][count_feature.unique_name()]["arguments"][
"primitive"
] = "0"
deserializer = FeaturesDeserializer(dictionary)
expected = [count_feature, value]
assert expected == deserializer.to_list()
def test_feature_use_previous_pd_dateoffset(es):
value = IdentityFeature(es["log"].ww["id"])
do = pd.DateOffset(months=3)
count_primitive = Count()
count_feature = AggregationFeature(
value,
"customers",
count_primitive,
use_previous=do,
)
dictionary = {
"ft_version": __version__,
"schema_version": FEATURES_SCHEMA_VERSION,
"entityset": es.to_dictionary(),
"feature_list": [count_feature.unique_name(), value.unique_name()],
"feature_definitions": {
count_feature.unique_name(): count_feature.to_dictionary(),
value.unique_name(): value.to_dictionary(),
},
}
dictionary["primitive_definitions"] = {"0": serialize_primitive(count_primitive)}
dictionary["feature_definitions"][count_feature.unique_name()]["arguments"][
"primitive"
] = "0"
deserializer = FeaturesDeserializer(dictionary)
expected = [count_feature, value]
assert expected == deserializer.to_list()
value = IdentityFeature(es["log"].ww["id"])
do = pd.DateOffset(months=3, days=2, minutes=30)
count_feature = AggregationFeature(
value,
"customers",
count_primitive,
use_previous=do,
)
dictionary = {
"ft_version": __version__,
"schema_version": FEATURES_SCHEMA_VERSION,
"entityset": es.to_dictionary(),
"feature_list": [count_feature.unique_name(), value.unique_name()],
"feature_definitions": {
count_feature.unique_name(): count_feature.to_dictionary(),
value.unique_name(): value.to_dictionary(),
},
}
dictionary["primitive_definitions"] = {"0": serialize_primitive(count_primitive)}
dictionary["feature_definitions"][count_feature.unique_name()]["arguments"][
"primitive"
] = "0"
deserializer = FeaturesDeserializer(dictionary)
expected = [count_feature, value]
assert expected == deserializer.to_list()
def test_word_set_in_number_of_common_words_is_deserialized_back_into_a_set(es):
id_feat = IdentityFeature(es["log"].ww["comments"])
number_of_common_words = NumberOfCommonWords(word_set={"hello", "my"})
transform_feat = TransformFeature(id_feat, number_of_common_words)
dictionary = {
"ft_version": __version__,
"schema_version": FEATURES_SCHEMA_VERSION,
"entityset": es.to_dictionary(),
"feature_list": [id_feat.unique_name(), transform_feat.unique_name()],
"feature_definitions": {
id_feat.unique_name(): id_feat.to_dictionary(),
transform_feat.unique_name(): transform_feat.to_dictionary(),
},
"primitive_definitions": {"0": serialize_primitive(number_of_common_words)},
}
dictionary["feature_definitions"][transform_feat.unique_name()]["arguments"][
"primitive"
] = "0"
deserializer = FeaturesDeserializer(dictionary)
assert isinstance(
deserializer.features_dict["primitive_definitions"]["0"]["arguments"][
"word_set"
],
set,
)
def _check_schema_version(version, es, warning_text, caplog, warning_type=None):
dictionary = {
"ft_version": __version__,
"schema_version": version,
"entityset": es.to_dictionary(),
"feature_list": [],
"feature_definitions": {},
"primitive_definitions": {},
}
if warning_type == "warn" and warning_text:
with pytest.warns(UserWarning) as record:
FeaturesDeserializer(dictionary)
assert record[0].message.args[0] == warning_text
elif warning_type == "log":
logger = logging.getLogger("featuretools")
logger.propagate = True
FeaturesDeserializer(dictionary)
if warning_text:
assert warning_text in caplog.text
else:
assert not len(caplog.text)
logger.propagate = False
| 13,583 | 33.920308 | 87 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/test_features_serializer.py | import pandas as pd
from featuretools import (
AggregationFeature,
Feature,
IdentityFeature,
TransformFeature,
__version__,
)
from featuretools.entityset.deserialize import description_to_entityset
from featuretools.feature_base.features_serializer import FeaturesSerializer
from featuretools.primitives import (
Count,
Max,
MultiplyNumericScalar,
NMostCommon,
NumUnique,
)
from featuretools.primitives.utils import serialize_primitive
from featuretools.version import FEATURES_SCHEMA_VERSION
def test_single_feature(es):
feature = IdentityFeature(es["log"].ww["value"])
serializer = FeaturesSerializer([feature])
expected = {
"ft_version": __version__,
"schema_version": FEATURES_SCHEMA_VERSION,
"entityset": es.to_dictionary(),
"feature_list": [feature.unique_name()],
"feature_definitions": {feature.unique_name(): feature.to_dictionary()},
"primitive_definitions": {},
}
_compare_feature_dicts(expected, serializer.to_dict())
def test_base_features_in_list(es):
value = IdentityFeature(es["log"].ww["value"])
max_feature = AggregationFeature(value, "sessions", Max)
features = [max_feature, value]
serializer = FeaturesSerializer(features)
expected = {
"ft_version": __version__,
"schema_version": FEATURES_SCHEMA_VERSION,
"entityset": es.to_dictionary(),
"feature_list": [max_feature.unique_name(), value.unique_name()],
"feature_definitions": {
max_feature.unique_name(): max_feature.to_dictionary(),
value.unique_name(): value.to_dictionary(),
},
}
expected["primitive_definitions"] = {
"0": serialize_primitive(max_feature.primitive),
}
expected["feature_definitions"][max_feature.unique_name()]["arguments"][
"primitive"
] = "0"
actual = serializer.to_dict()
_compare_feature_dicts(expected, actual)
def test_multi_output_features(es):
product_id = IdentityFeature(es["log"].ww["product_id"])
threecommon = NMostCommon()
num_unique = NumUnique()
tc = Feature(product_id, parent_dataframe_name="sessions", primitive=threecommon)
features = [tc, product_id]
for i in range(3):
features.append(
Feature(
tc[i],
parent_dataframe_name="customers",
primitive=num_unique,
),
)
features.append(tc[i])
serializer = FeaturesSerializer(features)
flist = [feat.unique_name() for feat in features]
fd = [feat.to_dictionary() for feat in features]
fdict = dict(zip(flist, fd))
expected = {
"ft_version": __version__,
"schema_version": FEATURES_SCHEMA_VERSION,
"entityset": es.to_dictionary(),
"feature_list": flist,
"feature_definitions": fdict,
}
expected["primitive_definitions"] = {
"0": serialize_primitive(tc.primitive),
"1": serialize_primitive(features[2].primitive),
}
expected["feature_definitions"][flist[0]]["arguments"]["primitive"] = "0"
expected["feature_definitions"][flist[2]]["arguments"]["primitive"] = "1"
expected["feature_definitions"][flist[4]]["arguments"]["primitive"] = "1"
expected["feature_definitions"][flist[6]]["arguments"]["primitive"] = "1"
actual = serializer.to_dict()
_compare_feature_dicts(expected, actual)
def test_base_features_not_in_list(es):
max_primitive = Max()
mult_primitive = MultiplyNumericScalar(value=2)
value = IdentityFeature(es["log"].ww["value"])
value_x2 = TransformFeature(value, mult_primitive)
max_feature = AggregationFeature(value_x2, "sessions", max_primitive)
features = [max_feature]
serializer = FeaturesSerializer(features)
expected = {
"ft_version": __version__,
"schema_version": FEATURES_SCHEMA_VERSION,
"entityset": es.to_dictionary(),
"feature_list": [max_feature.unique_name()],
"feature_definitions": {
max_feature.unique_name(): max_feature.to_dictionary(),
value_x2.unique_name(): value_x2.to_dictionary(),
value.unique_name(): value.to_dictionary(),
},
}
expected["primitive_definitions"] = {
"0": serialize_primitive(max_feature.primitive),
"1": serialize_primitive(value_x2.primitive),
}
expected["feature_definitions"][max_feature.unique_name()]["arguments"][
"primitive"
] = "0"
expected["feature_definitions"][value_x2.unique_name()]["arguments"][
"primitive"
] = "1"
actual = serializer.to_dict()
_compare_feature_dicts(expected, actual)
def test_where_feature_dependency(es):
max_primitive = Max()
value = IdentityFeature(es["log"].ww["value"])
is_purchased = IdentityFeature(es["log"].ww["purchased"])
max_feature = AggregationFeature(
value,
"sessions",
max_primitive,
where=is_purchased,
)
features = [max_feature]
serializer = FeaturesSerializer(features)
expected = {
"ft_version": __version__,
"schema_version": FEATURES_SCHEMA_VERSION,
"entityset": es.to_dictionary(),
"feature_list": [max_feature.unique_name()],
"feature_definitions": {
max_feature.unique_name(): max_feature.to_dictionary(),
value.unique_name(): value.to_dictionary(),
is_purchased.unique_name(): is_purchased.to_dictionary(),
},
}
expected["primitive_definitions"] = {
"0": serialize_primitive(max_feature.primitive),
}
expected["feature_definitions"][max_feature.unique_name()]["arguments"][
"primitive"
] = "0"
actual = serializer.to_dict()
_compare_feature_dicts(expected, actual)
def test_feature_use_previous_pd_timedelta(es):
value = IdentityFeature(es["log"].ww["id"])
td = pd.Timedelta(12, "W")
count_primitive = Count()
count_feature = AggregationFeature(
value,
"customers",
count_primitive,
use_previous=td,
)
features = [count_feature, value]
serializer = FeaturesSerializer(features)
expected = {
"ft_version": __version__,
"schema_version": FEATURES_SCHEMA_VERSION,
"entityset": es.to_dictionary(),
"feature_list": [count_feature.unique_name(), value.unique_name()],
"feature_definitions": {
count_feature.unique_name(): count_feature.to_dictionary(),
value.unique_name(): value.to_dictionary(),
},
}
expected["primitive_definitions"] = {
"0": serialize_primitive(count_feature.primitive),
}
expected["feature_definitions"][count_feature.unique_name()]["arguments"][
"primitive"
] = "0"
actual = serializer.to_dict()
_compare_feature_dicts(expected, actual)
def test_feature_use_previous_pd_dateoffset(es):
value = IdentityFeature(es["log"].ww["id"])
do = pd.DateOffset(months=3)
count_primitive = Count()
count_feature = AggregationFeature(
value,
"customers",
count_primitive,
use_previous=do,
)
features = [count_feature, value]
serializer = FeaturesSerializer(features)
expected = {
"ft_version": __version__,
"schema_version": FEATURES_SCHEMA_VERSION,
"entityset": es.to_dictionary(),
"feature_list": [count_feature.unique_name(), value.unique_name()],
"feature_definitions": {
count_feature.unique_name(): count_feature.to_dictionary(),
value.unique_name(): value.to_dictionary(),
},
}
expected["primitive_definitions"] = {
"0": serialize_primitive(count_feature.primitive),
}
expected["feature_definitions"][count_feature.unique_name()]["arguments"][
"primitive"
] = "0"
actual = serializer.to_dict()
_compare_feature_dicts(expected, actual)
value = IdentityFeature(es["log"].ww["id"])
do = pd.DateOffset(months=3, days=2, minutes=30)
count_feature = AggregationFeature(
value,
"customers",
count_primitive,
use_previous=do,
)
features = [count_feature, value]
serializer = FeaturesSerializer(features)
expected = {
"ft_version": __version__,
"schema_version": FEATURES_SCHEMA_VERSION,
"entityset": es.to_dictionary(),
"feature_list": [count_feature.unique_name(), value.unique_name()],
"feature_definitions": {
count_feature.unique_name(): count_feature.to_dictionary(),
value.unique_name(): value.to_dictionary(),
},
}
expected["primitive_definitions"] = {
"0": serialize_primitive(count_feature.primitive),
}
expected["feature_definitions"][count_feature.unique_name()]["arguments"][
"primitive"
] = "0"
actual = serializer.to_dict()
_compare_feature_dicts(expected, actual)
def _compare_feature_dicts(a_dict, b_dict):
# We can't compare entityset dictionaries because column lists are not
# guaranteed to be in the same order.
es_a = description_to_entityset(a_dict.pop("entityset"))
es_b = description_to_entityset(b_dict.pop("entityset"))
assert es_a == es_b
assert a_dict == b_dict
| 9,315 | 31.573427 | 85 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/__init__.py | 0 | 0 | 0 | py | |
featuretools | featuretools-main/featuretools/tests/primitive_tests/test_agg_feats.py | from datetime import datetime
from inspect import isclass
from math import isnan
import numpy as np
import pandas as pd
import pytest
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime
from featuretools import (
AggregationFeature,
Feature,
IdentityFeature,
Timedelta,
calculate_feature_matrix,
dfs,
primitives,
)
from featuretools.entityset.relationship import RelationshipPath
from featuretools.feature_base.cache import feature_cache
from featuretools.primitives import (
Count,
Max,
Mean,
Median,
NMostCommon,
NumTrue,
NumUnique,
Sum,
TimeSinceFirst,
TimeSinceLast,
get_aggregation_primitives,
)
from featuretools.primitives.base import AggregationPrimitive
from featuretools.synthesis.deep_feature_synthesis import DeepFeatureSynthesis, match
from featuretools.tests.testing_utils import backward_path, feature_with_name, to_pandas
from featuretools.utils.gen_utils import Library
@pytest.fixture(autouse=True)
def reset_dfs_cache():
feature_cache.enabled = False
feature_cache.clear_all()
def test_get_depth(es):
log_id_feat = IdentityFeature(es["log"].ww["id"])
customer_id_feat = IdentityFeature(es["customers"].ww["id"])
count_logs = Feature(log_id_feat, parent_dataframe_name="sessions", primitive=Count)
sum_count_logs = Feature(
count_logs,
parent_dataframe_name="customers",
primitive=Sum,
)
num_logs_greater_than_5 = sum_count_logs > 5
count_customers = Feature(
customer_id_feat,
parent_dataframe_name="régions",
where=num_logs_greater_than_5,
primitive=Count,
)
num_customers_region = Feature(count_customers, dataframe_name="customers")
depth = num_customers_region.get_depth()
assert depth == 5
def test_makes_count(es):
dfs = DeepFeatureSynthesis(
target_dataframe_name="sessions",
entityset=es,
agg_primitives=[Count],
trans_primitives=[],
)
features = dfs.build_features()
assert feature_with_name(features, "device_type")
assert feature_with_name(features, "customer_id")
assert feature_with_name(features, "customers.région_id")
assert feature_with_name(features, "customers.age")
assert feature_with_name(features, "COUNT(log)")
assert feature_with_name(features, "customers.COUNT(sessions)")
assert feature_with_name(features, "customers.régions.language")
assert feature_with_name(features, "customers.COUNT(log)")
def test_count_null(pd_es):
class Count(AggregationPrimitive):
name = "count"
input_types = [[ColumnSchema(semantic_tags={"foreign_key"})], [ColumnSchema()]]
return_type = ColumnSchema(semantic_tags={"numeric"})
stack_on_self = False
def __init__(self, count_null=True):
self.count_null = count_null
def get_function(self):
def count_func(values):
if self.count_null:
values = values.fillna(0)
return values.count()
return count_func
def generate_name(
self,
base_feature_names,
relationship_path_name,
parent_dataframe_name,
where_str,
use_prev_str,
):
return "COUNT(%s%s%s)" % (relationship_path_name, where_str, use_prev_str)
count_null = Feature(
pd_es["log"].ww["value"],
parent_dataframe_name="sessions",
primitive=Count(count_null=True),
)
feature_matrix = calculate_feature_matrix([count_null], entityset=pd_es)
values = [5, 4, 1, 2, 3, 2]
assert (values == feature_matrix[count_null.get_name()]).all()
def test_check_input_types(es):
count = Feature(
es["sessions"].ww["id"],
parent_dataframe_name="customers",
primitive=Count,
)
mean = Feature(count, parent_dataframe_name="régions", primitive=Mean)
assert mean._check_input_types()
boolean = count > 3
mean = Feature(
count,
parent_dataframe_name="régions",
where=boolean,
primitive=Mean,
)
assert mean._check_input_types()
def test_mean_nan(es):
array = pd.Series([5, 5, 5, 5, 5])
mean_func_nans_default = Mean().get_function()
mean_func_nans_false = Mean(skipna=False).get_function()
mean_func_nans_true = Mean(skipna=True).get_function()
assert mean_func_nans_default(array) == 5
assert mean_func_nans_false(array) == 5
assert mean_func_nans_true(array) == 5
array = pd.Series([5, np.nan, np.nan, np.nan, np.nan, 10])
assert mean_func_nans_default(array) == 7.5
assert isnan(mean_func_nans_false(array))
assert mean_func_nans_true(array) == 7.5
array_nans = pd.Series([np.nan, np.nan, np.nan, np.nan])
assert isnan(mean_func_nans_default(array_nans))
assert isnan(mean_func_nans_false(array_nans))
assert isnan(mean_func_nans_true(array_nans))
# test naming
default_feat = Feature(
es["log"].ww["value"],
parent_dataframe_name="customers",
primitive=Mean,
)
assert default_feat.get_name() == "MEAN(log.value)"
ignore_nan_feat = Feature(
es["log"].ww["value"],
parent_dataframe_name="customers",
primitive=Mean(skipna=True),
)
assert ignore_nan_feat.get_name() == "MEAN(log.value)"
include_nan_feat = Feature(
es["log"].ww["value"],
parent_dataframe_name="customers",
primitive=Mean(skipna=False),
)
assert include_nan_feat.get_name() == "MEAN(log.value, skipna=False)"
def test_init_and_name(es):
log = es["log"]
# Add a BooleanNullable column so primitives with that input type get tested
boolean_nullable = log.ww["purchased"]
boolean_nullable = boolean_nullable.ww.set_logical_type("BooleanNullable")
log.ww["boolean_nullable"] = boolean_nullable
features = [Feature(es["log"].ww[col]) for col in log.columns]
# check all primitives have name
for attribute_string in dir(primitives):
attr = getattr(primitives, attribute_string)
if isclass(attr):
if issubclass(attr, AggregationPrimitive) and attr != AggregationPrimitive:
assert getattr(attr, "name") is not None
agg_primitives = get_aggregation_primitives().values()
# If Dask EntitySet use only Dask compatible primitives
if es.dataframe_type == Library.DASK:
agg_primitives = [
prim for prim in agg_primitives if Library.DASK in prim.compatibility
]
if es.dataframe_type == Library.SPARK:
agg_primitives = [
prim for prim in agg_primitives if Library.SPARK in prim.compatibility
]
for agg_prim in agg_primitives:
input_types = agg_prim.input_types
if type(input_types[0]) != list:
input_types = [input_types]
# test each allowed input_types for this primitive
for it in input_types:
# use the input_types matching function from DFS
matching_types = match(it, features)
if len(matching_types) == 0:
raise Exception("Agg Primitive %s not tested" % agg_prim.name)
for t in matching_types:
instance = Feature(
t,
parent_dataframe_name="sessions",
primitive=agg_prim,
)
# try to get name and calculate
instance.get_name()
calculate_feature_matrix([instance], entityset=es)
def test_invalid_init_args(diamond_es):
error_text = "parent_dataframe must match first relationship in path"
with pytest.raises(AssertionError, match=error_text):
path = backward_path(diamond_es, ["stores", "transactions"])
AggregationFeature(
IdentityFeature(diamond_es["transactions"].ww["amount"]),
"customers",
Mean,
relationship_path=path,
)
error_text = (
"Base feature must be defined on the dataframe at the end of relationship_path"
)
with pytest.raises(AssertionError, match=error_text):
path = backward_path(diamond_es, ["regions", "stores"])
AggregationFeature(
IdentityFeature(diamond_es["transactions"].ww["amount"]),
"regions",
Mean,
relationship_path=path,
)
error_text = "All relationships in path must be backward"
with pytest.raises(AssertionError, match=error_text):
backward = backward_path(diamond_es, ["customers", "transactions"])
forward = RelationshipPath([(True, r) for _, r in backward])
path = RelationshipPath(list(forward) + list(backward))
AggregationFeature(
IdentityFeature(diamond_es["transactions"].ww["amount"]),
"transactions",
Mean,
relationship_path=path,
)
def test_init_with_multiple_possible_paths(diamond_es):
error_text = (
"There are multiple possible paths to the base dataframe. "
"You must specify a relationship path."
)
with pytest.raises(RuntimeError, match=error_text):
AggregationFeature(
IdentityFeature(diamond_es["transactions"].ww["amount"]),
"regions",
Mean,
)
# Does not raise if path specified.
path = backward_path(diamond_es, ["regions", "customers", "transactions"])
AggregationFeature(
IdentityFeature(diamond_es["transactions"].ww["amount"]),
"regions",
Mean,
relationship_path=path,
)
def test_init_with_single_possible_path(diamond_es):
# This uses diamond_es to test that there being a cycle somewhere in the
# graph doesn't cause an error.
feat = AggregationFeature(
IdentityFeature(diamond_es["transactions"].ww["amount"]),
"customers",
Mean,
)
expected_path = backward_path(diamond_es, ["customers", "transactions"])
assert feat.relationship_path == expected_path
def test_init_with_no_path(diamond_es):
error_text = 'No backward path from "transactions" to "customers" found.'
with pytest.raises(RuntimeError, match=error_text):
AggregationFeature(
IdentityFeature(diamond_es["customers"].ww["name"]),
"transactions",
Count,
)
error_text = 'No backward path from "transactions" to "transactions" found.'
with pytest.raises(RuntimeError, match=error_text):
AggregationFeature(
IdentityFeature(diamond_es["transactions"].ww["amount"]),
"transactions",
Mean,
)
def test_name_with_multiple_possible_paths(diamond_es):
path = backward_path(diamond_es, ["regions", "customers", "transactions"])
feat = AggregationFeature(
IdentityFeature(diamond_es["transactions"].ww["amount"]),
"regions",
Mean,
relationship_path=path,
)
assert feat.get_name() == "MEAN(customers.transactions.amount)"
assert feat.relationship_path_name() == "customers.transactions"
def test_copy(games_es):
home_games = next(
r for r in games_es.relationships if r._child_column_name == "home_team_id"
)
path = RelationshipPath([(False, home_games)])
feat = AggregationFeature(
IdentityFeature(games_es["games"].ww["home_team_score"]),
"teams",
relationship_path=path,
primitive=Mean,
)
copied = feat.copy()
assert copied.dataframe_name == feat.dataframe_name
assert copied.base_features == feat.base_features
assert copied.relationship_path == feat.relationship_path
assert copied.primitive == feat.primitive
def test_serialization(es):
value = IdentityFeature(es["log"].ww["value"])
primitive = Max()
max1 = AggregationFeature(value, "customers", primitive)
path = next(es.find_backward_paths("customers", "log"))
dictionary = {
"name": max1.get_name(),
"base_features": [value.unique_name()],
"relationship_path": [r.to_dictionary() for r in path],
"primitive": primitive,
"where": None,
"use_previous": None,
}
assert dictionary == max1.get_arguments()
deserialized = AggregationFeature.from_dictionary(
dictionary,
es,
{value.unique_name(): value},
primitive,
)
_assert_agg_feats_equal(max1, deserialized)
is_purchased = IdentityFeature(es["log"].ww["purchased"])
use_previous = Timedelta(3, "d")
max2 = AggregationFeature(
value,
"customers",
primitive,
where=is_purchased,
use_previous=use_previous,
)
dictionary = {
"name": max2.get_name(),
"base_features": [value.unique_name()],
"relationship_path": [r.to_dictionary() for r in path],
"primitive": primitive,
"where": is_purchased.unique_name(),
"use_previous": use_previous.get_arguments(),
}
assert dictionary == max2.get_arguments()
dependencies = {
value.unique_name(): value,
is_purchased.unique_name(): is_purchased,
}
deserialized = AggregationFeature.from_dictionary(
dictionary,
es,
dependencies,
primitive,
)
_assert_agg_feats_equal(max2, deserialized)
def test_time_since_last(pd_es):
f = Feature(
pd_es["log"].ww["datetime"],
parent_dataframe_name="customers",
primitive=TimeSinceLast,
)
fm = calculate_feature_matrix(
[f],
entityset=pd_es,
instance_ids=[0, 1, 2],
cutoff_time=datetime(2015, 6, 8),
)
correct = [131376000.0, 131289534.0, 131287797.0]
# note: must round to nearest second
assert all(fm[f.get_name()].round().values == correct)
def test_time_since_first(pd_es):
f = Feature(
pd_es["log"].ww["datetime"],
parent_dataframe_name="customers",
primitive=TimeSinceFirst,
)
fm = calculate_feature_matrix(
[f],
entityset=pd_es,
instance_ids=[0, 1, 2],
cutoff_time=datetime(2015, 6, 8),
)
correct = [131376600.0, 131289600.0, 131287800.0]
# note: must round to nearest second
assert all(fm[f.get_name()].round().values == correct)
def test_median(pd_es):
f = Feature(
pd_es["log"].ww["value_many_nans"],
parent_dataframe_name="customers",
primitive=Median,
)
fm = calculate_feature_matrix(
[f],
entityset=pd_es,
instance_ids=[0, 1, 2],
cutoff_time=datetime(2015, 6, 8),
)
correct = [1, 3, np.nan]
np.testing.assert_equal(fm[f.get_name()].values, correct)
def test_agg_same_method_name(es):
"""
Pandas relies on the function name when calculating aggregations. This means if a two
primitives with the same function name are applied to the same column, pandas
can't differentiate them. We have a work around to this based on the name property
that we test here.
"""
# TODO: Update to work with Dask and Spark
if es.dataframe_type != Library.PANDAS:
pytest.xfail("Need to update to work with Dask and Spark EntitySets")
# test with normally defined functions
class Sum(AggregationPrimitive):
name = "sum"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
def get_function(self):
def custom_primitive(x):
return x.sum()
return custom_primitive
class Max(AggregationPrimitive):
name = "max"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
def get_function(self):
def custom_primitive(x):
return x.max()
return custom_primitive
f_sum = Feature(
es["log"].ww["value"],
parent_dataframe_name="customers",
primitive=Sum,
)
f_max = Feature(
es["log"].ww["value"],
parent_dataframe_name="customers",
primitive=Max,
)
fm = calculate_feature_matrix([f_sum, f_max], entityset=es)
assert fm.columns.tolist() == [f_sum.get_name(), f_max.get_name()]
# test with lambdas
class Sum(AggregationPrimitive):
name = "sum"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
def get_function(self):
return lambda x: x.sum()
class Max(AggregationPrimitive):
name = "max"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
def get_function(self):
return lambda x: x.max()
f_sum = Feature(
es["log"].ww["value"],
parent_dataframe_name="customers",
primitive=Sum,
)
f_max = Feature(
es["log"].ww["value"],
parent_dataframe_name="customers",
primitive=Max,
)
fm = calculate_feature_matrix([f_sum, f_max], entityset=es)
assert fm.columns.tolist() == [f_sum.get_name(), f_max.get_name()]
def test_time_since_last_custom(pd_es):
class TimeSinceLast(AggregationPrimitive):
name = "time_since_last"
input_types = [
ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"}),
]
return_type = ColumnSchema(semantic_tags={"numeric"})
uses_calc_time = True
def get_function(self):
def time_since_last(values, time):
time_since = time - values.iloc[0]
return time_since.total_seconds()
return time_since_last
f = Feature(
pd_es["log"].ww["datetime"],
parent_dataframe_name="customers",
primitive=TimeSinceLast,
)
fm = calculate_feature_matrix(
[f],
entityset=pd_es,
instance_ids=[0, 1, 2],
cutoff_time=datetime(2015, 6, 8),
)
correct = [131376600, 131289600, 131287800]
# note: must round to nearest second
assert all(fm[f.get_name()].round().values == correct)
def test_custom_primitive_multiple_inputs(pd_es):
class MeanSunday(AggregationPrimitive):
name = "mean_sunday"
input_types = [
ColumnSchema(semantic_tags={"numeric"}),
ColumnSchema(logical_type=Datetime),
]
return_type = ColumnSchema(semantic_tags={"numeric"})
def get_function(self):
def mean_sunday(numeric, datetime):
"""
Finds the mean of non-null values of a feature that occurred on Sundays
"""
days = pd.DatetimeIndex(datetime).weekday.values
df = pd.DataFrame({"numeric": numeric, "time": days})
return df[df["time"] == 6]["numeric"].mean()
return mean_sunday
fm, features = dfs(
entityset=pd_es,
target_dataframe_name="sessions",
agg_primitives=[MeanSunday],
trans_primitives=[],
)
mean_sunday_value = pd.Series([None, None, None, 2.5, 7, None])
iterator = zip(fm["MEAN_SUNDAY(log.value, datetime)"], mean_sunday_value)
for x, y in iterator:
assert (pd.isnull(x) and pd.isnull(y)) or (x == y)
pd_es.add_interesting_values()
mean_sunday_value_priority_0 = pd.Series([None, None, None, 2.5, 0, None])
fm, features = dfs(
entityset=pd_es,
target_dataframe_name="sessions",
agg_primitives=[MeanSunday],
trans_primitives=[],
where_primitives=[MeanSunday],
)
where_feat = "MEAN_SUNDAY(log.value, datetime WHERE priority_level = 0)"
for x, y in zip(fm[where_feat], mean_sunday_value_priority_0):
assert (pd.isnull(x) and pd.isnull(y)) or (x == y)
def test_custom_primitive_default_kwargs(es):
class SumNTimes(AggregationPrimitive):
name = "sum_n_times"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
def __init__(self, n=1):
self.n = n
sum_n_1_n = 1
sum_n_1_base_f = Feature(es["log"].ww["value"])
sum_n_1 = Feature(
[sum_n_1_base_f],
parent_dataframe_name="sessions",
primitive=SumNTimes(n=sum_n_1_n),
)
sum_n_2_n = 2
sum_n_2_base_f = Feature(es["log"].ww["value_2"])
sum_n_2 = Feature(
[sum_n_2_base_f],
parent_dataframe_name="sessions",
primitive=SumNTimes(n=sum_n_2_n),
)
assert sum_n_1_base_f == sum_n_1.base_features[0]
assert sum_n_1_n == sum_n_1.primitive.n
assert sum_n_2_base_f == sum_n_2.base_features[0]
assert sum_n_2_n == sum_n_2.primitive.n
def test_makes_numtrue(es):
if es.dataframe_type == Library.SPARK:
pytest.xfail("Spark EntitySets do not support NumTrue primitive")
dfs = DeepFeatureSynthesis(
target_dataframe_name="sessions",
entityset=es,
agg_primitives=[NumTrue],
trans_primitives=[],
)
features = dfs.build_features()
assert feature_with_name(features, "customers.NUM_TRUE(log.purchased)")
assert feature_with_name(features, "NUM_TRUE(log.purchased)")
def test_make_three_most_common(pd_es):
class NMostCommoner(AggregationPrimitive):
name = "pd_top3"
input_types = ([ColumnSchema(semantic_tags={"category"})],)
return_type = None
number_output_features = 3
def get_function(self):
def pd_top3(x):
counts = x.value_counts()
counts = counts[counts > 0]
array = np.array(counts[:3].index)
if len(array) < 3:
filler = np.full(3 - len(array), np.nan)
array = np.append(array, filler)
return array
return pd_top3
fm, features = dfs(
entityset=pd_es,
target_dataframe_name="customers",
instance_ids=[0, 1, 2],
agg_primitives=[NMostCommoner],
trans_primitives=[],
)
df = fm[["PD_TOP3(log.product_id)[%s]" % i for i in range(3)]]
assert set(df.iloc[0].values[:2]) == set(
["coke zero", "toothpaste"],
) # coke zero and toothpaste have same number of occurrences
assert df.iloc[0].values[2] in [
"car",
"brown bag",
] # so just check that the top two match
assert (
df.iloc[1]
.reset_index(drop=True)
.equals(pd.Series(["coke zero", "Haribo sugar-free gummy bears", np.nan]))
)
assert (
df.iloc[2]
.reset_index(drop=True)
.equals(pd.Series(["taco clock", np.nan, np.nan]))
)
def test_stacking_multi(pd_es):
threecommon = NMostCommon(3)
tc = Feature(
pd_es["log"].ww["product_id"],
parent_dataframe_name="sessions",
primitive=threecommon,
)
stacked = []
for i in range(3):
stacked.append(
Feature(tc[i], parent_dataframe_name="customers", primitive=NumUnique),
)
fm = calculate_feature_matrix(stacked, entityset=pd_es, instance_ids=[0, 1, 2])
correct_vals = [[3, 2, 1], [2, 1, 0], [0, 0, 0]]
correct_vals1 = [[3, 1, 1], [2, 1, 0], [0, 0, 0]]
# either of the above can be correct, and the outcome depends on the sorting of
# two values in the initial n most common function, which changes arbitrarily.
for i in range(3):
f = "NUM_UNIQUE(sessions.N_MOST_COMMON(log.product_id)[%d])" % i
cols = fm.columns
assert f in cols
assert (
fm[cols[i]].tolist() == correct_vals[i]
or fm[cols[i]].tolist() == correct_vals1[i]
)
def test_use_previous_pd_dateoffset(es):
total_events_pd = Feature(
es["log"].ww["id"],
parent_dataframe_name="customers",
use_previous=pd.DateOffset(hours=47, minutes=60),
primitive=Count,
)
feature_matrix = calculate_feature_matrix(
[total_events_pd],
es,
cutoff_time=pd.Timestamp("2011-04-11 10:31:30"),
instance_ids=[0, 1, 2],
)
feature_matrix = to_pandas(feature_matrix, index="id", sort_index=True)
col_name = list(feature_matrix.head().keys())[0]
assert (feature_matrix[col_name] == [1, 5, 2]).all()
def _assert_agg_feats_equal(f1, f2):
assert f1.unique_name() == f2.unique_name()
assert f1.child_dataframe_name == f2.child_dataframe_name
assert f1.parent_dataframe_name == f2.parent_dataframe_name
assert f1.relationship_path == f2.relationship_path
assert f1.use_previous == f2.use_previous
def test_override_multi_feature_names(pd_es):
def gen_custom_names(
primitive,
base_feature_names,
relationship_path_name,
parent_dataframe_name,
where_str,
use_prev_str,
):
base_string = "Custom_%s({}.{})".format(
parent_dataframe_name,
base_feature_names,
)
return [base_string % i for i in range(primitive.number_output_features)]
class NMostCommoner(AggregationPrimitive):
name = "pd_top3"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"category"})
number_output_features = 3
def generate_names(
self,
base_feature_names,
relationship_path_name,
parent_dataframe_name,
where_str,
use_prev_str,
):
return gen_custom_names(
self,
base_feature_names,
relationship_path_name,
parent_dataframe_name,
where_str,
use_prev_str,
)
fm, features = dfs(
entityset=pd_es,
target_dataframe_name="products",
instance_ids=[0, 1, 2],
agg_primitives=[NMostCommoner],
trans_primitives=[],
)
expected_names = []
base_names = [["value"], ["value_2"], ["value_many_nans"]]
for name in base_names:
expected_names += gen_custom_names(
NMostCommoner,
name,
None,
"products",
None,
None,
)
for name in expected_names:
assert name in fm.columns
| 26,399 | 30.768953 | 89 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/test_absolute_diff.py | import numpy as np
import pandas as pd
import pytest
from featuretools.primitives import AbsoluteDiff
class TestAbsoluteDiff:
def test_nan(self):
data = pd.Series([np.nan, 5, 10, 20, np.nan, 10, np.nan])
answer = pd.Series([np.nan, np.nan, 5, 10, 0, 10, 0])
primitive_func = AbsoluteDiff().get_function()
given_answer = primitive_func(data)
np.testing.assert_array_equal(given_answer, answer)
def test_regular(self):
data = pd.Series([2, 5, 15, 3, 9, 4.5])
answer = pd.Series([np.nan, 3, 10, 12, 6, 4.5])
primitive_func = AbsoluteDiff().get_function()
given_answer = primitive_func(data)
np.testing.assert_array_equal(given_answer, answer)
def test_method(self):
data = pd.Series([2, np.nan, 15, 3, np.nan, 4.5])
answer = pd.Series([np.nan, 13, 0, 12, 1.5, 0])
primitive_func = AbsoluteDiff(method="backfill").get_function()
given_answer = primitive_func(data)
np.testing.assert_array_equal(given_answer, answer)
def test_limit(self):
data = pd.Series([2, np.nan, np.nan, np.nan, 3.0, 4.5])
answer = pd.Series([np.nan, 0, 0, np.nan, np.nan, 1.5])
primitive_func = AbsoluteDiff(limit=2).get_function()
given_answer = primitive_func(data)
np.testing.assert_array_equal(given_answer, answer)
def test_zero(self):
data = pd.Series([2, 0, 0, 5, 0, -4])
answer = pd.Series([np.nan, 2, 0, 5, 5, 4])
primitive_func = AbsoluteDiff().get_function()
given_answer = primitive_func(data)
np.testing.assert_array_equal(given_answer, answer)
def test_empty(self):
data = pd.Series([])
answer = pd.Series([])
primitive_func = AbsoluteDiff().get_function()
given_answer = primitive_func(data)
np.testing.assert_array_equal(given_answer, answer)
def test_inf(self):
data = pd.Series([0, np.inf, 0, 5, np.NINF, np.inf, np.NINF])
answer = pd.Series([np.nan, np.inf, np.inf, 5, np.inf, np.inf, np.inf])
primitive_func = AbsoluteDiff().get_function()
given_answer = primitive_func(data)
np.testing.assert_array_equal(given_answer, answer)
def test_raises(self):
with pytest.raises(ValueError):
AbsoluteDiff(method="invalid")
| 2,345 | 37.459016 | 79 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/test_primitive_utils.py | import os
import pytest
from featuretools import list_primitives, summarize_primitives
from featuretools.primitives import (
AddNumericScalar,
Age,
Count,
Day,
Diff,
GreaterThan,
Haversine,
IsFreeEmailDomain,
IsNull,
Last,
Max,
Mean,
Min,
Mode,
Month,
MultiplyBoolean,
NMostCommon,
NumCharacters,
NumericLag,
NumUnique,
NumWords,
PercentTrue,
Skew,
Std,
Sum,
Weekday,
Year,
get_aggregation_primitives,
get_default_aggregation_primitives,
get_default_transform_primitives,
get_transform_primitives,
)
from featuretools.primitives.base import PrimitiveBase
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.primitives.utils import (
_check_input_types,
_get_descriptions,
_get_summary_primitives,
_get_unique_input_types,
list_primitive_files,
load_primitive_from_file,
)
from featuretools.utils.gen_utils import Library
def test_list_primitives_order():
df = list_primitives()
all_primitives = get_transform_primitives()
all_primitives.update(get_aggregation_primitives())
for name, primitive in all_primitives.items():
assert name in df["name"].values
row = df.loc[df["name"] == name].iloc[0]
actual_desc = _get_descriptions([primitive])[0]
if actual_desc:
assert actual_desc == row["description"]
assert row["dask_compatible"] == (Library.DASK in primitive.compatibility)
assert row["valid_inputs"] == ", ".join(
_get_unique_input_types(primitive.input_types),
)
expected_return_type = (
str(primitive.return_type) if primitive.return_type is not None else None
)
assert row["return_type"] == expected_return_type
types = df["type"].values
assert "aggregation" in types
assert "transform" in types
def test_valid_input_types():
actual = _get_unique_input_types(Haversine.input_types)
assert actual == {"<ColumnSchema (Logical Type = LatLong)>"}
actual = _get_unique_input_types(MultiplyBoolean.input_types)
assert actual == {
"<ColumnSchema (Logical Type = Boolean)>",
"<ColumnSchema (Logical Type = BooleanNullable)>",
}
actual = _get_unique_input_types(Sum.input_types)
assert actual == {"<ColumnSchema (Semantic Tags = ['numeric'])>"}
def test_descriptions():
primitives = {
NumCharacters: "Calculates the number of characters in a given string, including whitespace and punctuation.",
Day: "Determines the day of the month from a datetime.",
Last: "Determines the last value in a list.",
GreaterThan: "Determines if values in one list are greater than another list.",
}
assert _get_descriptions(list(primitives.keys())) == list(primitives.values())
def test_get_descriptions_doesnt_truncate_primitive_description():
# single line
descr = _get_descriptions([IsNull])
assert descr[0] == "Determines if a value is null."
# multiple line; one sentence
descr = _get_descriptions([Diff])
assert (
descr[0]
== "Computes the difference between the value in a list and the previous value in that list."
)
# multiple lines; multiple sentences
class TestPrimitive(TransformPrimitive):
"""This is text that continues on after the line break
and ends in a period.
This is text on one line without a period
Examples:
>>> absolute = Absolute()
>>> absolute([3.0, -5.0, -2.4]).tolist()
[3.0, 5.0, 2.4]
"""
name = "test_primitive"
descr = _get_descriptions([TestPrimitive])
assert (
descr[0]
== "This is text that continues on after the line break and ends in a period. This is text on one line without a period"
)
# docstring ends after description
class TestPrimitive2(TransformPrimitive):
"""This is text that continues on after the line break
and ends in a period.
This is text on one line without a period
"""
name = "test_primitive"
descr = _get_descriptions([TestPrimitive2])
assert (
descr[0]
== "This is text that continues on after the line break and ends in a period. This is text on one line without a period"
)
def test_get_default_aggregation_primitives():
primitives = get_default_aggregation_primitives()
expected_primitives = [
Sum,
Std,
Max,
Skew,
Min,
Mean,
Count,
PercentTrue,
NumUnique,
Mode,
]
assert set(primitives) == set(expected_primitives)
def test_get_default_transform_primitives():
primitives = get_default_transform_primitives()
expected_primitives = [
Age,
Day,
Year,
Month,
Weekday,
Haversine,
NumWords,
NumCharacters,
]
assert set(primitives) == set(expected_primitives)
@pytest.fixture
def this_dir():
return os.path.dirname(os.path.abspath(__file__))
@pytest.fixture
def primitives_to_install_dir(this_dir):
return os.path.join(this_dir, "primitives_to_install")
@pytest.fixture
def bad_primitives_files_dir(this_dir):
return os.path.join(this_dir, "bad_primitive_files")
def test_list_primitive_files(primitives_to_install_dir):
files = list_primitive_files(primitives_to_install_dir)
custom_max_file = os.path.join(primitives_to_install_dir, "custom_max.py")
custom_mean_file = os.path.join(primitives_to_install_dir, "custom_mean.py")
custom_sum_file = os.path.join(primitives_to_install_dir, "custom_sum.py")
assert {custom_max_file, custom_mean_file, custom_sum_file}.issubset(set(files))
def test_load_primitive_from_file(primitives_to_install_dir):
primitve_file = os.path.join(primitives_to_install_dir, "custom_max.py")
primitive_name, primitive_obj = load_primitive_from_file(primitve_file)
assert issubclass(primitive_obj, PrimitiveBase)
def test_errors_more_than_one_primitive_in_file(bad_primitives_files_dir):
primitive_file = os.path.join(bad_primitives_files_dir, "multiple_primitives.py")
error_text = "More than one primitive defined in file {}".format(primitive_file)
with pytest.raises(RuntimeError) as excinfo:
load_primitive_from_file(primitive_file)
assert str(excinfo.value) == error_text
def test_errors_no_primitive_in_file(bad_primitives_files_dir):
primitive_file = os.path.join(bad_primitives_files_dir, "no_primitives.py")
error_text = "No primitive defined in file {}".format(primitive_file)
with pytest.raises(RuntimeError) as excinfo:
load_primitive_from_file(primitive_file)
assert str(excinfo.value) == error_text
def test_check_input_types():
primitives = [Sum, Weekday, PercentTrue, Day, Std, NumericLag]
log_in_type_checks = set()
sem_tag_type_checks = set()
unique_input_types = set()
expected_log_in_check = {
"boolean_nullable",
"boolean",
"datetime",
}
expected_sem_tag_type_check = {"numeric", "time_index"}
expected_unique_input_types = {
"<ColumnSchema (Logical Type = BooleanNullable)>",
"<ColumnSchema (Semantic Tags = ['numeric'])>",
"<ColumnSchema (Logical Type = Boolean)>",
"<ColumnSchema (Logical Type = Datetime)>",
"<ColumnSchema (Semantic Tags = ['time_index'])>",
}
for prim in primitives:
input_types_flattened = prim.flatten_nested_input_types(prim.input_types)
_check_input_types(
input_types_flattened,
log_in_type_checks,
sem_tag_type_checks,
unique_input_types,
)
assert log_in_type_checks == expected_log_in_check
assert sem_tag_type_checks == expected_sem_tag_type_check
assert unique_input_types == expected_unique_input_types
def test_get_summary_primitives():
primitives = [
Sum,
Weekday,
PercentTrue,
Day,
Std,
NumericLag,
AddNumericScalar,
IsFreeEmailDomain,
NMostCommon,
]
primitives_summary = _get_summary_primitives(primitives)
expected_unique_input_types = 7
expected_unique_output_types = 6
expected_uses_multi_input = 2
expected_uses_multi_output = 1
expected_uses_external_data = 1
expected_controllable = 3
expected_datetime_inputs = 2
expected_bool = 1
expected_bool_nullable = 1
expected_time_index_tag = 1
assert (
primitives_summary["general_metrics"]["unique_input_types"]
== expected_unique_input_types
)
assert (
primitives_summary["general_metrics"]["unique_output_types"]
== expected_unique_output_types
)
assert (
primitives_summary["general_metrics"]["uses_multi_input"]
== expected_uses_multi_input
)
assert (
primitives_summary["general_metrics"]["uses_multi_output"]
== expected_uses_multi_output
)
assert (
primitives_summary["general_metrics"]["uses_external_data"]
== expected_uses_external_data
)
assert (
primitives_summary["general_metrics"]["are_controllable"]
== expected_controllable
)
assert (
primitives_summary["semantic_tag_metrics"]["time_index"]
== expected_time_index_tag
)
assert (
primitives_summary["logical_type_input_metrics"]["datetime"]
== expected_datetime_inputs
)
assert primitives_summary["logical_type_input_metrics"]["boolean"] == expected_bool
assert (
primitives_summary["logical_type_input_metrics"]["boolean_nullable"]
== expected_bool_nullable
)
def test_summarize_primitives():
df = summarize_primitives()
trans_prims = get_transform_primitives()
agg_prims = get_aggregation_primitives()
tot_trans = len(trans_prims)
tot_agg = len(agg_prims)
tot_prims = tot_trans + tot_agg
assert df["Count"].iloc[0] == tot_prims
assert df["Count"].iloc[1] == tot_agg
assert df["Count"].iloc[2] == tot_trans
| 10,206 | 29.930303 | 128 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/test_all_primitive_docstrings.py | from featuretools.primitives import get_aggregation_primitives, get_transform_primitives
def docstring_is_uniform(primitive):
docstring = primitive.__doc__
valid_verbs = [
"Calculates",
"Determines",
"Transforms",
"Computes",
"Counts",
"Negates",
"Adds",
"Subtracts",
"Multiplies",
"Divides",
"Performs",
"Returns",
"Shifts",
"Extracts",
"Applies",
]
return any(docstring.startswith(s) for s in valid_verbs)
def test_transform_primitive_docstrings():
for primitive in get_transform_primitives().values():
assert docstring_is_uniform(primitive)
def test_aggregation_primitive_docstrings():
for primitive in get_aggregation_primitives().values():
assert docstring_is_uniform(primitive)
| 850 | 24.029412 | 88 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/natural_language_primitives_tests/test_upper_case_count.py | import numpy as np
import pandas as pd
from featuretools.primitives import UpperCaseCount
from featuretools.tests.primitive_tests.utils import (
PrimitiveTestBase,
find_applicable_primitives,
valid_dfs,
)
class TestUpperCaseCount(PrimitiveTestBase):
primitive = UpperCaseCount
def test_strings(self):
x = pd.Series(
["This IS a STRING.", "Testing AaA", "Testing AAA-BBB", "testing aaa"],
)
primitive_func = self.primitive().get_function()
answers = [9.0, 3.0, 7.0, 0.0]
np.testing.assert_array_equal(primitive_func(x), answers)
def test_nan(self):
x = pd.Series([np.nan, "", "This IS a STRING."])
primitive_func = self.primitive().get_function()
answers = [np.nan, 0.0, 9.0]
np.testing.assert_array_equal(primitive_func(x), answers)
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
transform.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
| 1,118 | 31.911765 | 83 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/natural_language_primitives_tests/test_number_of_mentions.py | import numpy as np
import pandas as pd
from featuretools.primitives import NumberOfMentions
from featuretools.tests.primitive_tests.utils import (
PrimitiveTestBase,
find_applicable_primitives,
valid_dfs,
)
class TestNumberOfMentions(PrimitiveTestBase):
primitive = NumberOfMentions
def test_regular_input(self):
x = pd.Series(
[
"@hello @hi @hello",
"@and@",
"andorandorand",
],
)
expected = [3.0, 0.0, 0.0]
actual = self.primitive().get_function()(x)
np.testing.assert_array_equal(actual, expected)
def test_unicode_input(self):
x = pd.Series(
[
"@Ángel @Æ @ĘÁÊÚ",
"@@@@Āndandandandand@",
"andorandorand @32309",
"example@gmail.com",
"@example-20329",
],
)
expected = [3.0, 0.0, 1.0, 0.0, 1.0]
actual = self.primitive().get_function()(x)
np.testing.assert_array_equal(actual, expected)
def test_multiline(self):
x = pd.Series(
[
"@\n\t\n",
"@mention\n @mention2\n@\n\n",
],
)
expected = [0.0, 2.0]
actual = self.primitive().get_function()(x)
np.testing.assert_array_equal(actual, expected)
def test_null(self):
x = pd.Series([np.nan, pd.NA, None, "@test"])
actual = self.primitive().get_function()(x)
expected = [np.nan, np.nan, np.nan, 1.0]
np.testing.assert_array_equal(actual, expected)
def test_alphanumeric_and_special(self):
x = pd.Series(["@1or0", "@12", "#??!>@?@#>"])
actual = self.primitive().get_function()(x)
expected = [1.0, 1.0, 0.0]
np.testing.assert_array_equal(actual, expected)
def test_underscore(self):
x = pd.Series(["@user1", "@__yes", "#??!>@?@#>"])
actual = self.primitive().get_function()(x)
expected = [1.0, 1.0, 0.0]
np.testing.assert_array_equal(actual, expected)
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
transform.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
| 2,367 | 28.974684 | 75 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/natural_language_primitives_tests/test_num_unique_separators.py | import numpy as np
import pandas as pd
from featuretools.primitives import NumUniqueSeparators
from featuretools.tests.primitive_tests.utils import (
PrimitiveTestBase,
find_applicable_primitives,
valid_dfs,
)
class TestNumUniqueSeparators(PrimitiveTestBase):
primitive = NumUniqueSeparators
def test_punctuation(self):
x = pd.Series(
[
"This: is a test file",
"This, is second line?",
"third/line $1,000;",
"and--subsequen't lines...",
"*and, more..",
],
)
primitive_func = self.primitive().get_function()
answers = pd.Series([1, 3, 3, 2, 3])
pd.testing.assert_series_equal(primitive_func(x), answers, check_names=False)
def test_other_delimeters(self):
x = pd.Series(["@#$%^&*()<>/[]\\`~-_=+"])
primitive_func = self.primitive().get_function()
answers = pd.Series([0])
pd.testing.assert_series_equal(primitive_func(x), answers, check_names=False)
def test_multiline(self):
x = pd.Series(
[
"This is a test file",
"This is second line\nthird line $1000;\nand subsequent lines",
"and more!",
],
)
primitive_func = self.primitive().get_function()
answers = pd.Series([1, 3, 2])
pd.testing.assert_series_equal(primitive_func(x), answers, check_names=False)
def test_nans(self):
x = pd.Series([np.nan, "", "third line."])
primitive_func = self.primitive().get_function()
answers = pd.Series([pd.NA, 0, 2])
pd.testing.assert_series_equal(primitive_func(x), answers, check_names=False)
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
transform.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
| 2,012 | 33.706897 | 85 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/natural_language_primitives_tests/test_number_of_hashtags.py | import numpy as np
import pandas as pd
from featuretools.primitives import NumberOfHashtags
from featuretools.tests.primitive_tests.utils import (
PrimitiveTestBase,
find_applicable_primitives,
valid_dfs,
)
class TestNumberOfHashtags(PrimitiveTestBase):
primitive = NumberOfHashtags
def test_regular_input(self):
x = pd.Series(
[
"#hello #hi #hello",
"#regular#expression#0or1#yes",
"andorandorand #32309",
],
)
expected = [3.0, 0.0, 0.0]
actual = self.primitive().get_function()(x)
np.testing.assert_array_equal(actual, expected)
def test_unicode_input(self):
x = pd.Series(
[
"#Ángel #Æ #ĘÁÊÚ",
"#############Āndandandandand###",
"andorandorand #32309",
],
)
expected = [3.0, 0.0, 0.0]
actual = self.primitive().get_function()(x)
np.testing.assert_array_equal(actual, expected)
def test_multiline(self):
x = pd.Series(
[
"#\n\t\n",
"#hashtag\n#hashtag2\n#\n\n",
],
)
expected = [0.0, 2.0]
actual = self.primitive().get_function()(x)
np.testing.assert_array_equal(actual, expected)
def test_null(self):
x = pd.Series([np.nan, pd.NA, None, "#test"])
actual = self.primitive().get_function()(x)
expected = [np.nan, np.nan, np.nan, 1.0]
np.testing.assert_array_equal(actual, expected)
def test_alphanumeric_and_special(self):
x = pd.Series(["#1or0", "#12", "#??!>@?@#>"])
actual = self.primitive().get_function()(x)
expected = [1.0, 0.0, 0.0]
np.testing.assert_array_equal(actual, expected)
def test_underscore(self):
x = pd.Series(["#no", "#__yes", "#??!>@?@#>"])
actual = self.primitive().get_function()(x)
expected = [1.0, 1.0, 0.0]
np.testing.assert_array_equal(actual, expected)
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
transform.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
| 2,323 | 29.181818 | 75 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/natural_language_primitives_tests/test_title_word_count.py | import numpy as np
import pandas as pd
from featuretools.primitives import TitleWordCount
from featuretools.tests.primitive_tests.utils import (
PrimitiveTestBase,
find_applicable_primitives,
valid_dfs,
)
class TestTitleWordCount(PrimitiveTestBase):
primitive = TitleWordCount
def test_strings(self):
x = pd.Series(
[
"My favorite movie is Jaws.",
"this is a string",
"AAA",
"I bought a Yo-Yo",
],
)
primitive_func = self.primitive().get_function()
answers = [2.0, 0.0, 1.0, 2.0]
np.testing.assert_array_equal(answers, primitive_func(x))
def test_nan(self):
x = pd.Series([np.nan, "", "My favorite movie is Jaws."])
primitive_func = self.primitive().get_function()
answers = [np.nan, 0.0, 2.0]
np.testing.assert_array_equal(answers, primitive_func(x))
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
transform.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
| 1,213 | 30.128205 | 75 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/natural_language_primitives_tests/test_num_characters.py | import numpy as np
import pandas as pd
from featuretools.primitives import NumCharacters
from featuretools.tests.primitive_tests.utils import (
PrimitiveTestBase,
find_applicable_primitives,
valid_dfs,
)
class TestNumCharacters(PrimitiveTestBase):
primitive = NumCharacters
def test_general(self):
x = pd.Series(
[
"test test test test",
"test TEST test TEST,test test test",
"and subsequent lines...",
],
)
expected = pd.Series([19, 34, 23])
actual = self.primitive().get_function()(x)
pd.testing.assert_series_equal(actual, expected, check_names=False)
def test_special_characters_and_whitespace(self):
x = pd.Series(["50% 50 50% \t\t\t\n\n", "$5,3040 a test* test"])
expected = pd.Series([16, 20])
actual = self.primitive().get_function()(x)
pd.testing.assert_series_equal(actual, expected, check_names=False)
def test_unicode_input(self):
x = pd.Series(
[
"Ángel Angel Ángel ángel",
],
)
expected = pd.Series([23])
actual = self.primitive().get_function()(x)
pd.testing.assert_series_equal(actual, expected, check_names=False)
def test_null(self):
x = pd.Series([np.nan, pd.NA, None, "This is a test file."])
actual = self.primitive().get_function()(x)
expected = pd.Series([pd.NA, pd.NA, pd.NA, 20])
pd.testing.assert_series_equal(
actual,
expected,
check_names=False,
check_dtype=False,
)
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
transform.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
| 1,918 | 31.525424 | 75 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/natural_language_primitives_tests/test_number_of_common_words.py | import numpy as np
import pandas as pd
from featuretools.primitives import NumberOfCommonWords
from featuretools.tests.primitive_tests.utils import (
PrimitiveTestBase,
find_applicable_primitives,
valid_dfs,
)
class TestNumberOfCommonWords(PrimitiveTestBase):
primitive = NumberOfCommonWords
test_word_bank = {"and", "a", "is"}
def test_delimiter_override(self):
x = pd.Series(
[
"This is a test file.",
"This,is,second,line, and?",
"and;subsequent;lines...",
],
)
expected = pd.Series([2, 2, 1])
actual = self.primitive(
word_set=self.test_word_bank,
delimiters_regex="[ ,;]",
).get_function()(x)
pd.testing.assert_series_equal(actual, expected, check_names=False)
def test_multiline(self):
x = pd.Series(
[
"This is a test file.",
"This is second line\nthird line $1000;\nand subsequent lines",
],
)
expected = pd.Series([2, 2])
actual = self.primitive(self.test_word_bank).get_function()(x)
pd.testing.assert_series_equal(actual, expected, check_names=False)
def test_null(self):
x = pd.Series([np.nan, pd.NA, None, "This is a test file."])
actual = self.primitive(self.test_word_bank).get_function()(x)
expected = pd.Series([pd.NA, pd.NA, pd.NA, 2])
pd.testing.assert_series_equal(actual, expected, check_names=False)
def test_case_insensitive(self):
x = pd.Series(["Is", "a", "AND"])
actual = self.primitive(self.test_word_bank).get_function()(x)
expected = pd.Series([1, 1, 1])
pd.testing.assert_series_equal(actual, expected, check_names=False)
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
transform.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
| 2,075 | 31.952381 | 79 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/natural_language_primitives_tests/test_number_of_words_in_quotes.py | import numpy as np
import pandas as pd
import pytest
from featuretools.primitives import NumberOfWordsInQuotes
from featuretools.tests.primitive_tests.utils import (
PrimitiveTestBase,
find_applicable_primitives,
valid_dfs,
)
class TestNumberOfWordsInQuotes(PrimitiveTestBase):
primitive = NumberOfWordsInQuotes
def test_regular_double_quotes_input(self):
x = pd.Series(
[
'Yes " "',
'"Hello this is a test"',
'"Yes" " "',
"",
'"Python, java prolog"',
'"Python, java prolog" three words here "binary search algorithm"',
'"Diffie-Hellman key exchange"',
'"user@email.com"',
'"https://alteryx.com"',
'"100,000"',
'"This Borderlands game here"" is the perfect conclusion to the ""Borderlands 3"" line, which focuses on the fans ""favorite character and gives the players the opportunity to close for a long time some very important questions about\'s character and the memorable scenery with which the players interact.',
],
)
expected = pd.Series([0, 5, 1, 0, 3, 6, 3, 1, 1, 1, 6], dtype="Int64")
actual = self.primitive("double").get_function()(x)
pd.testing.assert_series_equal(actual, expected, check_names=False)
def test_captures_regular_single_quotes(self):
x = pd.Series(
[
"'Hello this is a test'",
"'Python, Java Prolog'",
"'Python, Java Prolog' three words here 'three words here'",
"'Diffie-Hellman key exchange'",
"'user@email.com'",
"'https://alteryx.com'",
"'there's where's here's' word 'word'",
"'100,000'",
],
)
expected = pd.Series([5, 3, 6, 3, 1, 1, 4, 1], dtype="Int64")
actual = self.primitive("single").get_function()(x)
pd.testing.assert_series_equal(actual, expected, check_names=False)
def test_captures_both_single_and_double_quotes(self):
x = pd.Series(
[
"'test test test test' three words here \"test test test!\"",
],
)
expected = pd.Series([7], dtype="Int64")
actual = self.primitive().get_function()(x)
pd.testing.assert_series_equal(actual, expected, check_names=False)
def test_unicode_input(self):
x = pd.Series(
[
'"Ángel"',
'"Ángel" word word',
],
)
expected = pd.Series([1, 1], dtype="Int64")
actual = self.primitive().get_function()(x)
pd.testing.assert_series_equal(actual, expected, check_names=False)
def test_multiline(self):
x = pd.Series(
[
"'Yes\n, this is me'",
],
)
expected = pd.Series([4], dtype="Int64")
actual = self.primitive().get_function()(x)
pd.testing.assert_series_equal(actual, expected, check_names=False)
def test_raises_error_invalid_args(self):
error_msg = (
"NULL is not a valid quote_type. Specify 'both', 'single', or 'double'"
)
with pytest.raises(
ValueError,
match=error_msg,
):
self.primitive(quote_type="NULL")
def test_null(self):
x = pd.Series([np.nan, pd.NA, None, '"test"'])
actual = self.primitive().get_function()(x)
expected = pd.Series([pd.NA, pd.NA, pd.NA, 1.0], dtype="Int64")
pd.testing.assert_series_equal(actual, expected, check_names=False)
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
transform.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
| 3,956 | 36.685714 | 323 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.