repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
featuretools | featuretools-main/featuretools/primitives/utils.py | import importlib.util
import os
from inspect import getfullargspec, getsource, isclass
from typing import Dict, List
import pandas as pd
from woodwork import list_logical_types, list_semantic_tags, type_system
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import NaturalLanguage
import featuretools
from featuretools.primitives import NumberOfCommonWords
from featuretools.primitives.base import (
AggregationPrimitive,
PrimitiveBase,
TransformPrimitive,
)
from featuretools.utils.gen_utils import Library, find_descendents
def _get_primitives(primitive_kind):
"""Helper function that selects all primitives
that are instances of `primitive_kind`
"""
primitives = set()
for attribute_string in dir(featuretools.primitives):
attribute = getattr(featuretools.primitives, attribute_string)
if isclass(attribute):
if issubclass(attribute, primitive_kind) and attribute.name:
primitives.add(attribute)
return {prim.name.lower(): prim for prim in primitives}
def get_aggregation_primitives():
"""Returns all aggregation primitives, regardless
of compatibility
"""
return _get_primitives(featuretools.primitives.AggregationPrimitive)
def get_transform_primitives():
"""Returns all transform primitives, regardless
of compatibility
"""
return _get_primitives(featuretools.primitives.TransformPrimitive)
def get_all_primitives():
"""Helper function to return all primitives"""
primitives = set()
for attribute_string in dir(featuretools.primitives):
attribute = getattr(featuretools.primitives, attribute_string)
if isclass(attribute):
if issubclass(attribute, PrimitiveBase) and attribute.name:
primitives.add(attribute)
return {prim.__name__: prim for prim in primitives}
def _get_natural_language_primitives():
"""Returns all Natural Language transform primitives,
regardless of compatibility
"""
transform_primitives = get_transform_primitives()
def _natural_language_in_input_type(primitive):
for input_type in primitive.input_types:
if isinstance(input_type, list):
if any(
isinstance(column_schema.logical_type, NaturalLanguage)
for column_schema in input_type
):
return True
else:
if isinstance(input_type.logical_type, NaturalLanguage):
return True
return False
return {
name: primitive
for name, primitive in transform_primitives.items()
if _natural_language_in_input_type(primitive)
}
def list_primitives():
"""Returns a DataFrame that lists and describes each built-in primitive."""
trans_names, trans_primitives, valid_inputs, return_type = _get_names_primitives(
get_transform_primitives,
)
trans_dask = [
Library.DASK in primitive.compatibility for primitive in trans_primitives
]
trans_spark = [
Library.SPARK in primitive.compatibility for primitive in trans_primitives
]
transform_df = pd.DataFrame(
{
"name": trans_names,
"description": _get_descriptions(trans_primitives),
"dask_compatible": trans_dask,
"spark_compatible": trans_spark,
"valid_inputs": valid_inputs,
"return_type": return_type,
},
)
transform_df["type"] = "transform"
agg_names, agg_primitives, valid_inputs, return_type = _get_names_primitives(
get_aggregation_primitives,
)
agg_dask = [Library.DASK in primitive.compatibility for primitive in agg_primitives]
agg_spark = [
Library.SPARK in primitive.compatibility for primitive in agg_primitives
]
agg_df = pd.DataFrame(
{
"name": agg_names,
"description": _get_descriptions(agg_primitives),
"dask_compatible": agg_dask,
"spark_compatible": agg_spark,
"valid_inputs": valid_inputs,
"return_type": return_type,
},
)
agg_df["type"] = "aggregation"
columns = [
"name",
"type",
"dask_compatible",
"spark_compatible",
"description",
"valid_inputs",
"return_type",
]
return pd.concat([agg_df, transform_df], ignore_index=True)[columns]
def summarize_primitives() -> pd.DataFrame:
"""Returns a metrics summary DataFrame of all primitives found in list_primitives."""
(
trans_names,
trans_primitives,
trans_valid_inputs,
trans_return_type,
) = _get_names_primitives(get_transform_primitives)
(
agg_names,
agg_primitives,
agg_valid_inputs,
agg_return_type,
) = _get_names_primitives(get_aggregation_primitives)
tot_trans = len(trans_names)
tot_agg = len(agg_names)
tot_prims = tot_trans + tot_agg
all_primitives = trans_primitives + agg_primitives
primitives_summary = _get_summary_primitives(all_primitives)
summary_dict = {
"total_primitives": tot_prims,
"aggregation_primitives": tot_agg,
"transform_primitives": tot_trans,
**primitives_summary["general_metrics"],
}
summary_dict.update(
{
f"uses_{ltype}_input": count
for ltype, count in primitives_summary["logical_type_input_metrics"].items()
},
)
summary_dict.update(
{
f"uses_{tag}_tag_input": count
for tag, count in primitives_summary["semantic_tag_metrics"].items()
},
)
summary_df = pd.DataFrame(
[{"Metric": k, "Count": v} for k, v in summary_dict.items()],
)
return summary_df
def get_default_aggregation_primitives():
agg_primitives = [
featuretools.primitives.Sum,
featuretools.primitives.Std,
featuretools.primitives.Max,
featuretools.primitives.Skew,
featuretools.primitives.Min,
featuretools.primitives.Mean,
featuretools.primitives.Count,
featuretools.primitives.PercentTrue,
featuretools.primitives.NumUnique,
featuretools.primitives.Mode,
]
return agg_primitives
def get_default_transform_primitives():
# featuretools.primitives.TimeSince
trans_primitives = [
featuretools.primitives.Age,
featuretools.primitives.Day,
featuretools.primitives.Year,
featuretools.primitives.Month,
featuretools.primitives.Weekday,
featuretools.primitives.Haversine,
featuretools.primitives.NumWords,
featuretools.primitives.NumCharacters,
]
return trans_primitives
def _get_descriptions(primitives):
descriptions = []
for prim in primitives:
description = ""
if prim.__doc__ is not None:
# Break on the empty line between the docstring description and the remainder of the docstring
description = prim.__doc__.split("\n\n")[0]
# remove any excess whitespace from line breaks
description = " ".join(description.split())
descriptions.append(description)
return descriptions
def _get_summary_primitives(primitives: List) -> Dict[str, int]:
"""Provides metrics for a list of primitives."""
unique_input_types = set()
unique_output_types = set()
uses_multi_input = 0
uses_multi_output = 0
uses_external_data = 0
are_controllable = 0
logical_type_metrics = {
log_type: 0 for log_type in list(list_logical_types()["type_string"])
}
semantic_tag_metrics = {
sem_tag: 0 for sem_tag in list(list_semantic_tags()["name"])
}
semantic_tag_metrics.update(
{"foreign_key": 0},
) # not currently in list_semantic_tags()
for prim in primitives:
log_in_type_checks = set()
sem_tag_type_checks = set()
input_types = prim.flatten_nested_input_types(prim.input_types)
_check_input_types(
input_types,
log_in_type_checks,
sem_tag_type_checks,
unique_input_types,
)
for ltype in list(log_in_type_checks):
logical_type_metrics[ltype] += 1
for sem_tag in list(sem_tag_type_checks):
semantic_tag_metrics[sem_tag] += 1
if len(prim.input_types) > 1:
uses_multi_input += 1
# checks if number_output_features is set as an instance variable or set as a constant
if (
"self.number_output_features =" in getsource(prim.__init__)
or prim.number_output_features > 1
):
uses_multi_output += 1
unique_output_types.add(str(prim.return_type))
if hasattr(prim, "filename"):
uses_external_data += 1
if len(getfullargspec(prim.__init__).args) > 1:
are_controllable += 1
return {
"general_metrics": {
"unique_input_types": len(unique_input_types),
"unique_output_types": len(unique_output_types),
"uses_multi_input": uses_multi_input,
"uses_multi_output": uses_multi_output,
"uses_external_data": uses_external_data,
"are_controllable": are_controllable,
},
"logical_type_input_metrics": logical_type_metrics,
"semantic_tag_metrics": semantic_tag_metrics,
}
def _check_input_types(
input_types: List[ColumnSchema],
log_in_type_checks: set,
sem_tag_type_checks: set,
unique_input_types: set,
):
"""Checks if any logical types or semantic tags occur in a list of Woodwork input types and keeps track of unique input types."""
for in_type in input_types:
if in_type.semantic_tags:
for sem_tag in in_type.semantic_tags:
sem_tag_type_checks.add(sem_tag)
if in_type.logical_type:
log_in_type_checks.add(in_type.logical_type.type_string)
unique_input_types.add(str(in_type))
def _get_names_primitives(primitive_func):
names = []
primitives = []
valid_inputs = []
return_type = []
for name, primitive in primitive_func().items():
names.append(name)
primitives.append(primitive)
input_types = _get_unique_input_types(primitive.input_types)
valid_inputs.append(", ".join(input_types))
return_type.append(
str(primitive.return_type),
) if primitive.return_type is not None else return_type.append(None)
return names, primitives, valid_inputs, return_type
def _get_unique_input_types(input_types):
types = set()
for input_type in input_types:
if isinstance(input_type, list):
types |= _get_unique_input_types(input_type)
else:
types.add(str(input_type))
return types
def list_primitive_files(directory):
"""returns list of files in directory that might contain primitives"""
files = os.listdir(directory)
keep = []
for path in files:
if not check_valid_primitive_path(path):
continue
keep.append(os.path.join(directory, path))
return keep
def check_valid_primitive_path(path):
if os.path.isdir(path):
return False
filename = os.path.basename(path)
if filename[:2] == "__" or filename[0] == "." or filename[-3:] != ".py":
return False
return True
def load_primitive_from_file(filepath):
"""load primitive objects in a file"""
module = os.path.basename(filepath)[:-3]
# TODO: what is the first argument"?
spec = importlib.util.spec_from_file_location(module, filepath)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
primitives = []
for primitive_name in vars(module):
primitive_class = getattr(module, primitive_name)
if (
isclass(primitive_class)
and issubclass(primitive_class, PrimitiveBase)
and primitive_class not in (AggregationPrimitive, TransformPrimitive)
):
primitives.append((primitive_name, primitive_class))
if len(primitives) == 0:
raise RuntimeError("No primitive defined in file %s" % filepath)
elif len(primitives) > 1:
raise RuntimeError("More than one primitive defined in file %s" % filepath)
return primitives[0]
def serialize_primitive(primitive: PrimitiveBase):
"""build a dictionary with the data necessary to construct the given primitive"""
args_dict = {name: val for name, val in primitive.get_arguments()}
cls = type(primitive)
if cls == NumberOfCommonWords and "word_set" in args_dict:
args_dict["word_set"] = list(args_dict["word_set"])
return {
"type": cls.__name__,
"module": cls.__module__,
"arguments": args_dict,
}
class PrimitivesDeserializer(object):
"""
This class wraps a cache and a generator which iterates over all primitive
classes. When deserializing a primitive if it is not in the cache then we
iterate until it is found, adding every seen class to the cache. When
deserializing the next primitive the iteration resumes where it left off. This
means that we never visit a class more than once.
"""
def __init__(self):
# Cache to avoid repeatedly searching for primitive class
# (class_name, module_name) -> class
self.class_cache = {}
self.primitive_classes = find_descendents(PrimitiveBase)
def deserialize_primitive(self, primitive_dict):
"""
Construct a primitive from the given dictionary (output from
serialize_primitive).
"""
class_name = primitive_dict["type"]
module_name = primitive_dict["module"]
class_cache_key = (class_name, module_name.split(".")[0])
if class_cache_key in self.class_cache:
cls = self.class_cache[class_cache_key]
else:
cls = self._find_class_in_descendants(class_cache_key)
if not cls:
raise RuntimeError(
'Primitive "%s" in module "%s" not found' % (class_name, module_name),
)
arguments = primitive_dict["arguments"]
if cls == NumberOfCommonWords and "word_set" in arguments:
# We converted word_set from a set to a list to make it serializable,
# we should convert it back now.
arguments["word_set"] = set(arguments["word_set"])
primitive_instance = cls(**arguments)
return primitive_instance
def _find_class_in_descendants(self, search_key):
for cls in self.primitive_classes:
cls_key = (cls.__name__, cls.__module__.split(".")[0])
self.class_cache[cls_key] = cls
if cls_key == search_key:
return cls
def get_all_logical_type_names():
"""Helper function that returns all registered woodwork logical types"""
return {lt.__name__: lt for lt in type_system.registered_types}
| 15,059 | 32.318584 | 133 | py |
featuretools | featuretools-main/featuretools/primitives/__init__.py | # flake8: noqa
import inspect
import logging
import traceback
import pkg_resources
from featuretools.primitives.standard import *
from featuretools.primitives.utils import (
get_aggregation_primitives,
get_default_aggregation_primitives,
get_default_transform_primitives,
get_transform_primitives,
list_primitives,
summarize_primitives,
)
def _load_primitives():
"""Load in a list of primitives registered by other libraries into Featuretools.
Example entry_points definition for a library using this entry point either in:
- setup.py:
setup(
entry_points={
'featuretools_primitives': [
'other_library = other_library',
],
},
)
- setup.cfg:
[options.entry_points]
featuretools_primitives =
other_library = other_library
- pyproject.toml:
[project.entry-points."featuretools_primitives"]
other_library = "other_library"
where `other_library` is a top-level module containing all the primitives.
"""
logger = logging.getLogger("featuretools")
base_primitives = AggregationPrimitive, TransformPrimitive # noqa: F405
for entry_point in pkg_resources.iter_entry_points("featuretools_primitives"):
try:
loaded = entry_point.load()
except Exception:
message = f'Featuretools failed to load "{entry_point.name}" primitives from "{entry_point.module_name}". '
message += "For a full stack trace, set logging to debug."
logger.warning(message)
logger.debug(traceback.format_exc())
continue
for key in dir(loaded):
primitive = getattr(loaded, key, None)
if (
inspect.isclass(primitive)
and issubclass(primitive, base_primitives)
and primitive not in base_primitives
):
name = primitive.__name__
scope = globals()
if name in scope:
this_module, that_module = (
primitive.__module__,
scope[name].__module__,
)
message = f'While loading primitives via "{entry_point.name}" entry point, '
message += (
f'ignored primitive "{name}" from "{this_module}" because '
)
message += (
f'a primitive with that name already exists in "{that_module}"'
)
logger.warning(message)
else:
scope[name] = primitive
_load_primitives()
| 2,800 | 30.47191 | 119 | py |
featuretools | featuretools-main/featuretools/primitives/standard/__init__.py | # flake8: noqa
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.primitives.standard.aggregation import *
from featuretools.primitives.standard.transform import *
| 305 | 50 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/num_true.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Boolean, BooleanNullable, IntegerNullable
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
from featuretools.utils.gen_utils import Library, import_or_none
dd = import_or_none("dask.dataframe")
class NumTrue(AggregationPrimitive):
"""Counts the number of `True` values.
Description:
Given a list of booleans, return the number
of `True` values. Ignores 'NaN'.
Examples:
>>> num_true = NumTrue()
>>> num_true([True, False, True, True, None])
3
"""
name = "num_true"
input_types = [
[ColumnSchema(logical_type=Boolean)],
[ColumnSchema(logical_type=BooleanNullable)],
]
return_type = ColumnSchema(logical_type=IntegerNullable, semantic_tags={"numeric"})
default_value = 0
stack_on = []
stack_on_exclude = []
compatibility = [Library.PANDAS, Library.DASK]
description_template = "the number of times {} is true"
def get_function(self, agg_type=Library.PANDAS):
if agg_type == Library.DASK:
def chunk(s):
chunk_sum = s.agg(np.sum)
if chunk_sum.dtype == "bool":
chunk_sum = chunk_sum.astype("int64")
return chunk_sum
def agg(s):
return s.agg(np.sum)
return dd.Aggregation(self.name, chunk=chunk, agg=agg)
return np.sum
| 1,515 | 28.72549 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/any_primitive.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Boolean, BooleanNullable
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
from featuretools.utils.gen_utils import Library, import_or_none
dd = import_or_none("dask.dataframe")
class Any(AggregationPrimitive):
"""Determines if any value is 'True' in a list.
Description:
Given a list of booleans, return `True` if one or
more of the values are `True`.
Examples:
>>> any = Any()
>>> any([False, False, False, True])
True
"""
name = "any"
input_types = [
[ColumnSchema(logical_type=Boolean)],
[ColumnSchema(logical_type=BooleanNullable)],
]
return_type = ColumnSchema(logical_type=Boolean)
stack_on_self = False
compatibility = [Library.PANDAS, Library.DASK]
description_template = "whether any of {} are true"
def get_function(self, agg_type=Library.PANDAS):
if agg_type == Library.DASK:
def chunk(s):
return s.agg(np.any)
def agg(s):
return s.agg(np.any)
return dd.Aggregation(self.name, chunk=chunk, agg=agg)
return np.any
| 1,266 | 26.543478 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/is_unique.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import BooleanNullable
from featuretools.primitives.base import AggregationPrimitive
class IsUnique(AggregationPrimitive):
"""Determines whether or not a series of discrete is all unique.
Description:
Given a series of discrete values, return True if each
value in the series is unique. If any value is repeated,
return False.
Examples:
>>> is_unique = IsUnique()
>>> is_unique(['red', 'blue', 'green', 'yellow'])
True
If the series is not unique, return False
>>> is_unique = IsUnique()
>>> is_unique(['red', 'blue', 'green', 'blue'])
False
"""
name = "is_unique"
input_types = [ColumnSchema(semantic_tags={"category"})]
return_type = ColumnSchema(logical_type=BooleanNullable)
stack_on_self = False
default_value = False
def get_function(self):
def is_unique(x):
return x.is_unique
return is_unique
| 1,035 | 26.263158 | 68 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/count.py | import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import IntegerNullable
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
from featuretools.utils.gen_utils import Library
class Count(AggregationPrimitive):
"""Determines the total number of values, excluding `NaN`.
Examples:
>>> count = Count()
>>> count([1, 2, 3, 4, 5, None])
5
"""
name = "count"
input_types = [ColumnSchema(semantic_tags={"index"})]
return_type = ColumnSchema(logical_type=IntegerNullable, semantic_tags={"numeric"})
stack_on_self = False
default_value = 0
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the number"
def get_function(self, agg_type=Library.PANDAS):
if agg_type in [Library.DASK, Library.SPARK]:
return "count"
return pd.Series.count
def generate_name(
self,
base_feature_names,
relationship_path_name,
parent_dataframe_name,
where_str,
use_prev_str,
):
return "COUNT(%s%s%s)" % (relationship_path_name, where_str, use_prev_str)
| 1,207 | 28.463415 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/skew.py | import pandas as pd
from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
from featuretools.utils.gen_utils import Library
class Skew(AggregationPrimitive):
"""Computes the extent to which a distribution differs from a normal distribution.
Description:
For normally distributed data, the skewness should be about 0.
A skewness value > 0 means that there is more weight in the
left tail of the distribution.
Examples:
>>> skew = Skew()
>>> skew([1, 10, 30, None])
1.0437603722639681
"""
name = "skew"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
stack_on = []
stack_on_self = False
description_template = "the skewness of {}"
def get_function(self, agg_type=Library.PANDAS):
return pd.Series.skew
| 951 | 29.709677 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/min_primitive.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
from featuretools.utils.gen_utils import Library
class Min(AggregationPrimitive):
"""Calculates the smallest value, ignoring `NaN` values.
Examples:
>>> min = Min()
>>> min([1, 2, 3, 4, 5, None])
1.0
"""
name = "min"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
stack_on_self = False
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the minimum of {}"
def get_function(self, agg_type=Library.PANDAS):
if agg_type in [Library.DASK, Library.SPARK]:
return "min"
return np.min
| 831 | 27.689655 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/median_count.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import IntegerNullable
from featuretools.primitives.base import AggregationPrimitive
class MedianCount(AggregationPrimitive):
"""Calculates the number of occurrences of the median value in a list
Args:
skipna (bool): Determines if to use NA/null values. Defaults to
True to skip NA/null. If skipna is False, and there are NaN
values in the array, the median will be NaN, regardless of
the other values.
Examples:
>>> median_count = MedianCount()
>>> median_count([1, 2, 3, 1, 5, 3, 5])
2
You can optionally specify how to handle NaN values
>>> median_count_skipna = MedianCount(skipna=False)
>>> median_count_skipna([1, 2, 3, 1, 5, 3, None])
nan
"""
name = "median_count"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(logical_type=IntegerNullable, semantic_tags={"numeric"})
stack_on_self = False
default_value = 0
def __init__(self, skipna=True):
self.skipna = skipna
def get_function(self):
def median_count(x):
median = x.median(skipna=self.skipna)
if np.isnan(median):
return np.nan
return x.eq(median).sum()
return median_count
| 1,396 | 29.369565 | 87 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/average_count_per_unique.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Double
from featuretools.primitives.base import AggregationPrimitive
class AverageCountPerUnique(AggregationPrimitive):
"""Determines the average count across all unique value.
Args:
skipna (bool): Determines if to use NA/null values.
Defaults to True to skip NA/null.
Examples:
Determine the average count values for all unique items
in the input
>>> input = [1, 1, 2, 2, 3, 4, 5, 6, 7, 8]
>>> avg_count_per_unique = AverageCountPerUnique()
>>> avg_count_per_unique(input)
1.25
Determine the average count values for all unique items
in the input with nan values ignored
>>> input = [1, 1, 2, 2, 3, 4, 5, None, 6, 7, 8]
>>> avg_count_per_unique = AverageCountPerUnique()
>>> avg_count_per_unique(input)
1.25
Determine the average count values for all unique items
in the input with nan values included
>>> input = [1, 2, 2, 3, 4, 5, None, 6, 7, 8, 9]
>>> avg_count_per_unique_skipna_false = AverageCountPerUnique(skipna=False)
>>> avg_count_per_unique_skipna_false(input)
1.1
"""
name = "average_count_per_unique"
input_types = [ColumnSchema(semantic_tags={"category"})]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
default_value = 0
def __init__(self, skipna=True):
self.skipna = skipna
def get_function(self):
def average_count_per_unique(x):
return x.value_counts(
dropna=self.skipna,
).mean(skipna=self.skipna)
return average_count_per_unique
| 1,740 | 32.480769 | 83 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/first_last_time_delta.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Double
from featuretools.primitives.base import AggregationPrimitive
class FirstLastTimeDelta(AggregationPrimitive):
"""Determines the time between the first and last time value
in seconds.
Examples:
>>> from datetime import datetime
>>> first_last_time_delta = FirstLastTimeDelta()
>>> first_last_time_delta([
... datetime(2011, 4, 9, 10, 30, 0),
... datetime(2011, 4, 9, 10, 30, 15),
... datetime(2011, 4, 9, 10, 30, 35)])
35.0
"""
name = "first_last_time_delta"
input_types = [ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"})]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
uses_calc_time = False
stack_on_self = False
default_value = 0
def get_function(self):
def first_last_time_delta(datetime_col):
datetime_col = datetime_col.dropna()
if datetime_col.empty:
return np.nan
delta = datetime_col.iloc[-1] - datetime_col.iloc[0]
return delta.total_seconds()
return first_last_time_delta
| 1,250 | 31.921053 | 85 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/num_false_since_last_true.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Boolean, IntegerNullable
from featuretools.primitives.base import AggregationPrimitive
class NumFalseSinceLastTrue(AggregationPrimitive):
"""Calculates the number of 'False' values since the last `True` value.
Description:
From a series of Booleans, find the last record with a `True` value.
Return the count of 'False' values between that record and the end of
the series. Return nan if no values are `True`. Any nan values in the
input are ignored. A 'True' value in the last row will result in a
count of 0. Inputs are converted too booleans before calculating
the result.
Examples:
>>> num_false_since_last_true = NumFalseSinceLastTrue()
>>> num_false_since_last_true([True, False, True, False, False])
2
"""
name = "num_false_since_last_true"
input_types = [ColumnSchema(logical_type=Boolean)]
return_type = ColumnSchema(logical_type=IntegerNullable, semantic_tags={"numeric"})
stack_on_self = False
default_value = 0
def get_function(self):
def num_false_since_last_true(x):
if x.empty:
return np.nan
x = x.dropna().astype(bool)
true_indices = x[x]
if true_indices.empty:
return np.nan
last_true_index = true_indices.index[-1]
x_slice = x.loc[last_true_index:]
return np.invert(x_slice).sum()
return num_false_since_last_true
| 1,585 | 36.761905 | 87 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/time_since_last_min.py | import numpy as np
import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Double
from featuretools.primitives.base import AggregationPrimitive
class TimeSinceLastMin(AggregationPrimitive):
"""Calculates the time since the minimum value occurred.
Description:
Given a list of numbers, and a corresponding index of
datetimes, find the time of the minimum value, and return
the time elapsed since it occured. This calculation is done
using an instance id's cutoff time.
If multiple values equal the minimum, use the first occuring
minimum.
Examples:
>>> from datetime import datetime
>>> time_since_last_min = TimeSinceLastMin()
>>> cutoff_time = datetime(2010, 1, 1, 12, 0, 0)
>>> times = [datetime(2010, 1, 1, 11, 45, 0),
... datetime(2010, 1, 1, 11, 55, 15),
... datetime(2010, 1, 1, 11, 57, 30)]
>>> time_since_last_min(times, [1, 3, 2], time=cutoff_time)
900.0
"""
name = "time_since_last_min"
input_types = [
ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"}),
ColumnSchema(semantic_tags={"numeric"}),
]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
uses_calc_time = True
stack_on_self = False
default_value = 0
def get_function(self):
def time_since_last_min(datetime_col, numeric_col, time=None):
df = pd.DataFrame(
{
"datetime": datetime_col,
"numeric": numeric_col,
},
).dropna()
if df.empty:
return np.nan
min_row = df.loc[df["numeric"].idxmin()]
min_time = min_row["datetime"]
time_since = time - min_time
return time_since.total_seconds()
return time_since_last_min
| 1,969 | 32.965517 | 78 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/time_since_last.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Double
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
from featuretools.utils import convert_time_units
from featuretools.utils.gen_utils import Library
class TimeSinceLast(AggregationPrimitive):
"""Calculates the time elapsed since the last datetime (default in seconds).
Description:
Given a list of datetimes, calculate the
time elapsed since the last datetime (default in
seconds). Uses the instance's cutoff time.
Args:
unit (str): Defines the unit of time to count from.
Defaults to seconds. Acceptable values:
years, months, days, hours, minutes, seconds, milliseconds, nanoseconds
Examples:
>>> from datetime import datetime
>>> time_since_last = TimeSinceLast()
>>> cutoff_time = datetime(2010, 1, 1, 12, 0, 0)
>>> times = [datetime(2010, 1, 1, 11, 45, 0),
... datetime(2010, 1, 1, 11, 55, 15),
... datetime(2010, 1, 1, 11, 57, 30)]
>>> time_since_last(times, time=cutoff_time)
150.0
>>> from datetime import datetime
>>> time_since_last = TimeSinceLast(unit = "minutes")
>>> cutoff_time = datetime(2010, 1, 1, 12, 0, 0)
>>> times = [datetime(2010, 1, 1, 11, 45, 0),
... datetime(2010, 1, 1, 11, 55, 15),
... datetime(2010, 1, 1, 11, 57, 30)]
>>> time_since_last(times, time=cutoff_time)
2.5
"""
name = "time_since_last"
input_types = [ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"})]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
uses_calc_time = True
description_template = "the time since the last {}"
def __init__(self, unit="seconds"):
self.unit = unit.lower()
def get_function(self, agg_type=Library.PANDAS):
def time_since_last(values, time=None):
time_since = time - values.iloc[-1]
return convert_time_units(time_since.total_seconds(), self.unit)
return time_since_last
| 2,198 | 36.913793 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/time_since_last_max.py | import numpy as np
import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Double
from featuretools.primitives.base import AggregationPrimitive
class TimeSinceLastMax(AggregationPrimitive):
"""Calculates the time since the maximum value occurred.
Description:
Given a list of numbers, and a corresponding index of
datetimes, find the time of the maximum value, and return
the time elapsed since it occured. This calculation is done
using an instance id's cutoff time.
If multiple values equal the maximum, use the first occuring
maximum.
Examples:
>>> from datetime import datetime
>>> time_since_last_max = TimeSinceLastMax()
>>> cutoff_time = datetime(2010, 1, 1, 12, 0, 0)
>>> times = [datetime(2010, 1, 1, 11, 45, 0),
... datetime(2010, 1, 1, 11, 55, 15),
... datetime(2010, 1, 1, 11, 57, 30)]
>>> time_since_last_max(times, [1, 3, 2], time=cutoff_time)
285.0
"""
name = "time_since_last_max"
input_types = [
ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"}),
ColumnSchema(semantic_tags={"numeric"}),
]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
uses_calc_time = True
stack_on_self = False
default_value = 0
def get_function(self):
def time_since_last_max(datetime_col, numeric_col, time=None):
df = pd.DataFrame(
{
"datetime": datetime_col,
"numeric": numeric_col,
},
).dropna()
if df.empty:
return np.nan
max_row = df.loc[df["numeric"].idxmax()]
max_time = max_row["datetime"]
time_since = time - max_time
return time_since.total_seconds()
return time_since_last_max
| 1,969 | 32.965517 | 78 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/num_unique.py | import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import IntegerNullable
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
from featuretools.utils.gen_utils import Library, import_or_none
dd = import_or_none("dask.dataframe")
class NumUnique(AggregationPrimitive):
"""Determines the number of distinct values, ignoring `NaN` values.
Examples:
>>> num_unique = NumUnique()
>>> num_unique(['red', 'blue', 'green', 'yellow'])
4
`NaN` values will be ignored.
>>> num_unique(['red', 'blue', 'green', 'yellow', None])
4
"""
name = "num_unique"
input_types = [ColumnSchema(semantic_tags={"category"})]
return_type = ColumnSchema(logical_type=IntegerNullable, semantic_tags={"numeric"})
stack_on_self = False
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the number of unique elements in {}"
def get_function(self, agg_type=Library.PANDAS):
if agg_type == Library.DASK:
def chunk(s):
def inner_chunk(x):
x = x[:].dropna()
return set(x.unique())
return s.agg(inner_chunk)
def agg(s):
def inner_agg(x):
x = x[:].dropna()
return set().union(*x.values)
return s.agg(inner_agg)
def finalize(s):
return s.apply(lambda x: len(x))
return dd.Aggregation(self.name, chunk=chunk, agg=agg, finalize=finalize)
elif agg_type == Library.SPARK:
return "nunique"
return pd.Series.nunique
| 1,737 | 28.965517 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/mean.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
from featuretools.utils.gen_utils import Library
class Mean(AggregationPrimitive):
"""Computes the average for a list of values.
Args:
skipna (bool): Determines if to use NA/null values. Defaults to
True to skip NA/null.
Examples:
>>> mean = Mean()
>>> mean([1, 2, 3, 4, 5, None])
3.0
We can also control the way `NaN` values are handled.
>>> mean = Mean(skipna=False)
>>> mean([1, 2, 3, 4, 5, None])
nan
"""
name = "mean"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the average of {}"
def __init__(self, skipna=True):
self.skipna = skipna
def get_function(self, agg_type=Library.PANDAS):
if agg_type in [Library.DASK, Library.SPARK]:
return "mean"
if self.skipna:
# np.mean of series is functionally nanmean
return np.mean
def mean(series):
return np.mean(series.values)
return mean
| 1,313 | 26.375 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/count_outside_range.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import IntegerNullable
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
class CountOutsideRange(AggregationPrimitive):
"""Determines the number of values that fall outside a certain range.
Args:
lower (float): Lower boundary of range (exclusive). Default is 0.
upper (float): Upper boundary of range (exclusive). Default is 1.
skipna (bool): Determines if to use NA/null values. Defaults to
True to skip NA/null.
Examples:
>>> count_outside_range = CountOutsideRange(lower=1.5, upper=3.6)
>>> count_outside_range([1, 2, 3, 4, 5])
3
The way NaNs are treated can be controlled.
>>> count_outside_range_skipna = CountOutsideRange(skipna=False)
>>> count_outside_range_skipna([1, 2, 3, 4, 5, None])
nan
"""
name = "count_outside_range"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(logical_type=IntegerNullable, semantic_tags={"numeric"})
stack_on_self = False
default_value = 0
def __init__(self, lower=0, upper=1, skipna=True):
self.lower = lower
self.upper = upper
self.skipna = skipna
def get_function(self):
def count_outside_range(x):
if not self.skipna and x.isnull().values.any():
return np.nan
cond = (x < self.lower) | (x > self.upper)
return cond.sum()
return count_outside_range
| 1,598 | 32.3125 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/max_consecutive_true.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Boolean, Integer
from featuretools.primitives.base import AggregationPrimitive
class MaxConsecutiveTrue(AggregationPrimitive):
"""Determines the maximum number of consecutive True values in the input
Examples:
>>> max_consecutive_true = MaxConsecutiveTrue()
>>> max_consecutive_true([True, False, True, True, True, False])
3
"""
name = "max_consecutive_true"
input_types = [ColumnSchema(logical_type=Boolean)]
return_type = ColumnSchema(logical_type=Integer, semantic_tags={"numeric"})
stack_on_self = False
default_value = 0
def get_function(self):
def max_consecutive_true(x):
# find the locations where the value changes from the previous value
not_equal = x != x.shift()
# use cumulative sum to determine where consecutive values occur. When the
# sum changes, consecutive False values are present, when the cumulative
# sum remains unchnaged, consecutive True values are present.
not_equal_sum = not_equal.cumsum()
# group the input by the cumulative sum values and use cumulative count
# to count the number of consecutive values. Add 1 to account for the cumulative
# sum starting at zero where the first True occurs
consecutive = x.groupby(not_equal_sum).cumcount() + 1
# multiply by the original input to keep only the counts that correspond to
# true values
consecutive_true = consecutive * x
# return the max of all the consecutive true values
return consecutive_true.max()
return max_consecutive_true
| 1,759 | 41.926829 | 92 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/num_consecutive_greater_mean.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import IntegerNullable
from featuretools.primitives.base import AggregationPrimitive
class NumConsecutiveGreaterMean(AggregationPrimitive):
"""Determines the length of the longest subsequence above the mean.
Description:
Given a list of numbers, find the longest subsequence of numbers
larger than the mean of the entire sequence. Return the length
of the longest subsequence.
Args:
skipna (bool): If this is False and any value in x is `NaN`, then
the result will be `NaN`. If True, `NaN` values are skipped.
Default is True.
Examples:
>>> num_consecutive_greater_mean = NumConsecutiveGreaterMean()
>>> num_consecutive_greater_mean([1, 2, 3, 4, 5, 6])
3.0
We can also control the way `NaN` values are handled.
>>> num_consecutive_greater_mean = NumConsecutiveGreaterMean(skipna=False)
>>> num_consecutive_greater_mean([1, 2, 3, 4, 5, 6, None])
nan
"""
name = "num_consecutive_greater_mean"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(logical_type=IntegerNullable, semantic_tags={"numeric"})
stack_on_self = False
default_value = 0
def __init__(self, skipna=True):
self.skipna = skipna
def get_function(self):
def num_consecutive_greater_mean(x):
# check for NaN cases
if x.isnull().all():
return np.nan
if not self.skipna and x.isnull().values.any():
return np.nan
x_mean = x.mean()
# In some cases, the mean of x may be NaN
# (such as when x has both inf and -inf values)
if np.isnan(x.mean()):
return np.nan
# Find indices of points at or below mean
x = x.dropna().reset_index(drop=True)
below_mean_indices = x[x <= x_mean].index.to_series()
# If none of x is below the mean, return the length of x
if below_mean_indices.empty:
return len(x)
# Pad index with start/end values, in case the longest
# sequence occurs at the beginning or end of x
below_mean_indices[-1] = -1
below_mean_indices[len(x)] = len(x)
below_mean_indices = below_mean_indices.sort_index()
# Calculate gaps between points below mean
below_mean_indices_shifted = below_mean_indices.shift(1)
diffs = below_mean_indices - below_mean_indices_shifted
# Take biggest gap, and subtract 1 to get result
max_gap = (diffs).max() - 1
return max_gap
return num_consecutive_greater_mean
| 2,828 | 34.810127 | 87 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/n_unique_days.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Integer
from featuretools.primitives.base import AggregationPrimitive
class NUniqueDays(AggregationPrimitive):
"""Determines the number of unique days.
Description:
Given a list of datetimes, return the number of unique days.
The same day in two different years is treated as different. So
Feb 21, 2017 is different than Feb 21, 2019, even though they are
both the 21st of February.
Examples:
>>> from datetime import datetime
>>> n_unique_days = NUniqueDays()
>>> times = [datetime(2019, 2, 1),
... datetime(2019, 2, 1),
... datetime(2018, 2, 1),
... datetime(2019, 1, 1)]
>>> n_unique_days(times)
3
"""
name = "n_unique_days"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(logical_type=Integer, semantic_tags={"numeric"})
stack_on_self = False
default_value = 0
def get_function(self):
def n_unique_days(x):
return x.dt.floor("D").nunique()
return n_unique_days
| 1,188 | 30.289474 | 79 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/percent_unique.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Double
from featuretools.primitives.base import AggregationPrimitive
class PercentUnique(AggregationPrimitive):
"""Determines the percent of unique values.
Description:
Given a list of values, determine what percent of the
list is made up of unique values. Multiple `NaN` values
are treated as one unique value.
Args:
skipna (bool): Determines whether to ignore `NaN` values.
Defaults to True.
Examples:
>>> percent_unique = PercentUnique()
>>> percent_unique([1, 1, 2, 2, 3, 4, 5, 6, 7, 8])
0.8
We can control whether or not `NaN` values are ignored.
>>> percent_unique = PercentUnique()
>>> percent_unique([1, 1, 2, None])
0.5
>>> percent_unique_skipna = PercentUnique(skipna=False)
>>> percent_unique_skipna([1, 1, 2, None])
0.75
"""
name = "percent_unique"
input_types = [ColumnSchema(semantic_tags={"category"})]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
default_value = 0
def __init__(self, skipna=True):
self.skipna = skipna
def get_function(self):
def percent_unique(x):
return x.nunique(dropna=self.skipna) / (x.shape[0] * 1.0)
return percent_unique
| 1,392 | 28.638298 | 78 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/count_inside_range.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import IntegerNullable
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
class CountInsideRange(AggregationPrimitive):
"""Determines the number of values that fall within a certain range.
Args:
lower (float): Lower boundary of range (inclusive). Default is 0.
upper (float): Upper boundary of range (inclusive). Default is 1.
skipna (bool): If this is False any value in x is NaN then
the result will be NaN. If True, `nan` values are skipped.
Default is True.
Examples:
>>> count_inside_range = CountInsideRange(lower=1.5, upper=3.6)
>>> count_inside_range([1, 2, 3, 4, 5])
2
The way NaNs are treated can be controlled.
>>> count_inside_range_skipna = CountInsideRange(skipna=False)
>>> count_inside_range_skipna([1, 2, 3, 4, 5, None])
nan
"""
name = "count_inside_range"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(logical_type=IntegerNullable, semantic_tags={"numeric"})
stack_on_self = False
default_value = 0
def __init__(self, lower=0, upper=1, skipna=True):
self.lower = lower
self.upper = upper
self.skipna = skipna
def get_function(self):
def count_inside_range(x):
if not self.skipna and x.isnull().values.any():
return np.nan
cond = (self.lower <= x) & (x <= self.upper)
return cond.sum()
return count_inside_range
| 1,650 | 32.693878 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/all_primitive.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Boolean, BooleanNullable
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
from featuretools.utils.gen_utils import Library, import_or_none
dd = import_or_none("dask.dataframe")
class All(AggregationPrimitive):
"""Calculates if all values are 'True' in a list.
Description:
Given a list of booleans, return `True` if all
of the values are `True`.
Examples:
>>> all = All()
>>> all([False, False, False, True])
False
"""
name = "all"
input_types = [
[ColumnSchema(logical_type=Boolean)],
[ColumnSchema(logical_type=BooleanNullable)],
]
return_type = ColumnSchema(logical_type=Boolean)
stack_on_self = False
compatibility = [Library.PANDAS, Library.DASK]
description_template = "whether all of {} are true"
def get_function(self, agg_type=Library.PANDAS):
if agg_type == Library.DASK:
def chunk(s):
return s.agg(np.all)
def agg(s):
return s.agg(np.all)
return dd.Aggregation(self.name, chunk=chunk, agg=agg)
return np.all
| 1,261 | 26.434783 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/n_unique_weeks.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Integer
from featuretools.primitives.base import AggregationPrimitive
class NUniqueWeeks(AggregationPrimitive):
"""Determines the number of unique weeks.
Description:
Given a list of datetimes, return the number of unique
weeks (Monday-Sunday). NUniqueWeeks counts by absolute
week, not week of year, so the first week of 2018 and
the first week of 2019 count as two unique values.
Examples:
>>> from datetime import datetime
>>> n_unique_weeks = NUniqueWeeks()
>>> times = [datetime(2018, 2, 2),
... datetime(2019, 1, 1),
... datetime(2019, 2, 1),
... datetime(2019, 2, 1),
... datetime(2019, 2, 3),
... datetime(2019, 2, 21)]
>>> n_unique_weeks(times)
4
"""
name = "n_unique_weeks"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(logical_type=Integer, semantic_tags={"numeric"})
stack_on_self = False
default_value = 0
def get_function(self):
def n_unique_weeks(x):
return x.dt.to_period("W").nunique()
return n_unique_weeks
| 1,284 | 31.125 | 79 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/avg_time_between.py | from datetime import datetime
import numpy as np
import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Double
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
from featuretools.utils import convert_time_units
from featuretools.utils.gen_utils import Library
class AvgTimeBetween(AggregationPrimitive):
"""Computes the average number of seconds between consecutive events.
Description:
Given a list of datetimes, return the average time (default in seconds)
elapsed between consecutive events. If there are fewer
than 2 non-null values, return `NaN`.
Args:
unit (str): Defines the unit of time.
Defaults to seconds. Acceptable values:
years, months, days, hours, minutes, seconds, milliseconds, nanoseconds
Examples:
>>> from datetime import datetime
>>> avg_time_between = AvgTimeBetween()
>>> times = [datetime(2010, 1, 1, 11, 45, 0),
... datetime(2010, 1, 1, 11, 55, 15),
... datetime(2010, 1, 1, 11, 57, 30)]
>>> avg_time_between(times)
375.0
>>> avg_time_between = AvgTimeBetween(unit="minutes")
>>> avg_time_between(times)
6.25
"""
name = "avg_time_between"
input_types = [ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"})]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
description_template = "the average time between each of {}"
def __init__(self, unit="seconds"):
self.unit = unit.lower()
def get_function(self, agg_type=Library.PANDAS):
def pd_avg_time_between(x):
"""Assumes time scales are closer to order
of seconds than to nanoseconds
if times are much closer to nanoseconds
we could get some floating point errors
this can be fixed with another function
that calculates the mean before converting
to seconds
"""
x = x.dropna()
if x.shape[0] < 2:
return np.nan
if isinstance(x.iloc[0], (pd.Timestamp, datetime)):
x = x.view("int64")
# use len(x)-1 because we care about difference
# between values, len(x)-1 = len(diff(x))
avg = (x.max() - x.min()) / (len(x) - 1)
avg = avg * 1e-9
# long form:
# diff_in_ns = x.diff().iloc[1:].astype('int64')
# diff_in_seconds = diff_in_ns * 1e-9
# avg = diff_in_seconds.mean()
return convert_time_units(avg, self.unit)
return pd_avg_time_between
| 2,754 | 35.25 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/count_inside_nth_std.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Integer
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
class CountInsideNthSTD(AggregationPrimitive):
"""Determines the count of observations that lie inside
the first N standard deviations (inclusive).
Args:
n (float): Number of standard deviations. Default is 1
Examples:
>>> count_inside_nth_std = CountInsideNthSTD(n=1.5)
>>> count_inside_nth_std([1, 10, 15, 20, 100])
4
"""
name = "count_inside_nth_std"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(logical_type=Integer, semantic_tags={"numeric"})
stack_on_self = False
default_value = 0
def __init__(self, n=1):
if n < 0:
raise ValueError("n must be a positive number")
self.n = n
def get_function(self):
def count_inside_nth_std(x):
cond = np.abs(x - np.mean(x)) <= np.std(x) * self.n
return cond.sum()
return count_inside_nth_std
| 1,132 | 28.051282 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/min_count.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import IntegerNullable
from featuretools.primitives.base import AggregationPrimitive
class MinCount(AggregationPrimitive):
"""Calculates the number of occurrences of the min value in a list
Args:
skipna (bool): Determines if to use NA/null values. Defaults to
True to skip NA/null. If skipna is False, and there are NaN
values in the array, the min will be NaN regardless of
the other values, and NaN will be returned.
Examples:
>>> min_count = MinCount()
>>> min_count([1, 2, 5, 1, 5, 3, 5])
2
You can optionally specify how to handle NaN values
>>> min_count_skipna = MinCount(skipna=False)
>>> min_count_skipna([1, 2, 5, 1, 5, 3, None])
nan
"""
name = "min_count"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(logical_type=IntegerNullable, semantic_tags={"numeric"})
def __init__(self, skipna=True):
self.skipna = skipna
def get_function(self):
def min_count(x):
xmin = x.min(skipna=self.skipna)
if np.isnan(xmin):
return np.nan
return x.eq(xmin).sum()
return min_count
| 1,328 | 29.204545 | 87 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/is_monotonically_increasing.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import BooleanNullable
from featuretools.primitives.base import AggregationPrimitive
class IsMonotonicallyIncreasing(AggregationPrimitive):
"""Determines if a series is monotonically increasing.
Description:
Given a list of numeric values, return True if the
values are strictly increasing. If the series contains
`NaN` values, they will be skipped.
Examples:
>>> is_monotonically_increasing = IsMonotonicallyIncreasing()
>>> is_monotonically_increasing([1, 3, 5, 9])
True
"""
name = "is_monotonically_increasing"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(logical_type=BooleanNullable)
stack_on_self = False
default_value = False
def get_function(self):
def is_monotonically_increasing(x):
return x.dropna().is_monotonic_increasing
return is_monotonically_increasing
| 1,008 | 30.53125 | 69 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/mode.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
from featuretools.utils.gen_utils import Library
class Mode(AggregationPrimitive):
"""Determines the most commonly repeated value.
Description:
Given a list of values, return the value with the
highest number of occurences. If list is
empty, return `NaN`.
Examples:
>>> mode = Mode()
>>> mode(['red', 'blue', 'green', 'blue'])
'blue'
"""
name = "mode"
input_types = [ColumnSchema(semantic_tags={"category"})]
return_type = None
description_template = "the most frequently occurring value of {}"
def get_function(self, agg_type=Library.PANDAS):
def pd_mode(s):
return s.mode().get(0, np.nan)
return pd_mode
| 882 | 26.59375 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/first.py | from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
from featuretools.utils.gen_utils import Library
class First(AggregationPrimitive):
"""Determines the first value in a list.
Examples:
>>> first = First()
>>> first([1, 2, 3, 4, 5, None])
1.0
"""
name = "first"
input_types = [ColumnSchema()]
return_type = None
stack_on_self = False
description_template = "the first instance of {}"
def get_function(self, agg_type=Library.PANDAS):
def pd_first(x):
return x.iloc[0]
return pd_first
| 664 | 23.62963 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/kurtosis.py | from scipy.stats import kurtosis
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Double, Integer
from featuretools.primitives.base import AggregationPrimitive
class Kurtosis(AggregationPrimitive):
"""Calculates the kurtosis for a list of numbers
Args:
fisher (bool): Optional. If True, Fisher's definition is used
(normal ==> 0.0). If False, Pearson's definition is used
(normal ==> 3.0). Default is True.
bias (bool): Optional. If False, then the calculations are
corrected for statistical bias. Default is True.
nan_policy (str): Optional. Defines how to handle when
input contains Nan. Possible values include
`['propagate', 'raise', 'omit']`. 'propagate'
returns Nan, 'raise' throws an error, 'omit'
performs the calculations ignoring Nan values.
Default is 'propagate'.
Examples:
>>> kurtosis = Kurtosis()
>>> kurtosis([1, 2, 3, 4, 5])
-1.3
You can use Pearson's definition by setting the 'fisher' argument to False
>>> kurtosis_fisher = Kurtosis(fisher=False)
>>> kurtosis_fisher([1, 2, 3, 4, 5])
1.7
You can correct for statistical bias by setting the 'bias' argument to False
>>> kurtosis_bias = Kurtosis(bias=False)
>>> kurtosis_bias([1, 2, 3, 4, 5])
-1.2000000000000004
You can specifiy how to handle NaN values in the input with the 'nan_policy'
argument
>>> kurtosis_nan_policy = Kurtosis(nan_policy='omit')
>>> kurtosis_nan_policy([1, 2, None, 3, 4, 5])
-1.3
"""
name = "kurtosis"
input_types = [
[ColumnSchema(logical_type=Integer, semantic_tags={"numeric"})],
[ColumnSchema(logical_type=Double, semantic_tags={"numeric"})],
]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
stack_on_self = False
default_value = 0
def __init__(self, fisher=True, bias=True, nan_policy="propagate"):
if nan_policy not in ["propagate", "raise", "omit"]:
raise ValueError("Invalid nan_policy")
self.fisher = fisher
self.bias = bias
self.nan_policy = nan_policy
def get_function(self):
def kurtosis_func(x):
return kurtosis(
x,
axis=0,
fisher=self.fisher,
bias=self.bias,
nan_policy=self.nan_policy,
)
return kurtosis_func
| 2,570 | 32.828947 | 84 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/max_consecutive_false.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Boolean, Integer
from featuretools.primitives.base import AggregationPrimitive
class MaxConsecutiveFalse(AggregationPrimitive):
"""Determines the maximum number of consecutive False values in the input
Examples:
>>> max_consecutive_false = MaxConsecutiveFalse()
>>> max_consecutive_false([True, False, False, True, True, False])
2
"""
name = "max_consecutive_false"
input_types = [ColumnSchema(logical_type=Boolean)]
return_type = ColumnSchema(logical_type=Integer, semantic_tags={"numeric"})
stack_on_self = False
default_value = 0
def get_function(self):
def max_consecutive_false(x):
# invert the input array to work properly with the computation
x[x.notnull()] = ~(x[x.notnull()].astype(bool))
# find the locations where the value changes from the previous value
not_equal = x != x.shift()
# Use cumulative sum to determine where consecutive values occur. When the
# sum changes, consecutive False values are present, when the cumulative
# sum remains unchnaged, consecutive True values are present.
not_equal_sum = not_equal.cumsum()
# group the input by the cumulative sum values and use cumulative count
# to count the number of consecutive values. Add 1 to account for the cumulative
# sum starting at zero where the first True occurs
consecutive = x.groupby(not_equal_sum).cumcount() + 1
# multiply by the inverted input to keep only the counts that correspond to
# false values
consecutive_false = consecutive * x
# return the max of all the consecutive false values
return consecutive_false.max()
return max_consecutive_false
| 1,907 | 43.372093 | 92 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/num_zero_crossings.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Integer
from featuretools.primitives.base import AggregationPrimitive
class NumZeroCrossings(AggregationPrimitive):
"""Determines the number of times a list crosses 0.
Description:
Given a list of numbers, return the number of times the value
crosses 0. It is the number of times the value goes from a
positive number to a negative number, or a negative number to
a positive number. NaN values are ignored.
Examples:
>>> num_zero_crossings = NumZeroCrossings()
>>> num_zero_crossings([1, -1, 2, -2, 3, -3])
5
"""
name = "num_zero_crossings"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(logical_type=Integer, semantic_tags={"numeric"})
def get_function(self):
def num_zero_crossings(x):
cleaned = x[(x != 0) & (x == x)]
signs = np.sign(cleaned)
difference = np.diff(signs)
crossings = np.where(difference)[0]
return len(crossings)
return num_zero_crossings
| 1,166 | 33.323529 | 79 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/date_first_event.py | from pandas import NaT
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime
from featuretools.primitives.base import AggregationPrimitive
class DateFirstEvent(AggregationPrimitive):
"""Determines the first datetime from a list of datetimes.
Examples:
>>> from datetime import datetime
>>> date_first_event = DateFirstEvent()
>>> date_first_event([
... datetime(2011, 4, 9, 10, 30, 10),
... datetime(2011, 4, 9, 10, 30, 20),
... datetime(2011, 4, 9, 10, 30, 30)])
Timestamp('2011-04-09 10:30:10')
"""
name = "date_first_event"
input_types = [ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"})]
return_type = ColumnSchema(logical_type=Datetime)
stack_on_self = False
default_value = 0
def get_function(self):
def date_first_event(x):
x = x.dropna()
if x.empty:
return NaT
return x.iat[0]
return date_first_event
| 1,043 | 28.828571 | 85 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/count_below_mean.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import IntegerNullable
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
class CountBelowMean(AggregationPrimitive):
"""Determines the number of values that are below the mean.
Args:
skipna (bool): Determines if to use NA/null values. Defaults to
True to skip NA/null.
Examples:
>>> count_below_mean = CountBelowMean()
>>> count_below_mean([1, 2, 3, 4, 10])
3
The way NaNs are treated can be controlled.
>>> count_below_mean_skipna = CountBelowMean(skipna=False)
>>> count_below_mean_skipna([1, 2, 3, 4, 5, None])
nan
"""
name = "count_below_mean"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(logical_type=IntegerNullable, semantic_tags={"numeric"})
stack_on_self = False
def __init__(self, skipna=True):
self.skipna = skipna
def get_function(self):
def count_below_mean(x):
mean = x.mean(skipna=self.skipna)
if np.isnan(mean):
return np.nan
return len(x[x < mean])
return count_below_mean
| 1,265 | 28.44186 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/time_since_first.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Double
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
from featuretools.utils import convert_time_units
from featuretools.utils.gen_utils import Library
class TimeSinceFirst(AggregationPrimitive):
"""Calculates the time elapsed since the first datetime (in seconds).
Description:
Given a list of datetimes, calculate the
time elapsed since the first datetime (in
seconds). Uses the instance's cutoff time.
Args:
unit (str): Defines the unit of time to count from.
Defaults to seconds. Acceptable values:
years, months, days, hours, minutes, seconds, milliseconds, nanoseconds
Examples:
>>> from datetime import datetime
>>> time_since_first = TimeSinceFirst()
>>> cutoff_time = datetime(2010, 1, 1, 12, 0, 0)
>>> times = [datetime(2010, 1, 1, 11, 45, 0),
... datetime(2010, 1, 1, 11, 55, 15),
... datetime(2010, 1, 1, 11, 57, 30)]
>>> time_since_first(times, time=cutoff_time)
900.0
>>> from datetime import datetime
>>> time_since_first = TimeSinceFirst(unit = "minutes")
>>> cutoff_time = datetime(2010, 1, 1, 12, 0, 0)
>>> times = [datetime(2010, 1, 1, 11, 45, 0),
... datetime(2010, 1, 1, 11, 55, 15),
... datetime(2010, 1, 1, 11, 57, 30)]
>>> time_since_first(times, time=cutoff_time)
15.0
"""
name = "time_since_first"
input_types = [ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"})]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
uses_calc_time = True
description_template = "the time since the first {}"
def __init__(self, unit="seconds"):
self.unit = unit.lower()
def get_function(self, agg_type=Library.PANDAS):
def time_since_first(values, time=None):
time_since = time - values.iloc[0]
return convert_time_units(time_since.total_seconds(), self.unit)
return time_since_first
| 2,195 | 36.862069 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/n_most_common_frequency.py | import numpy as np
import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Categorical
from featuretools.primitives.base import AggregationPrimitive
class NMostCommonFrequency(AggregationPrimitive):
"""Determines the frequency of the n most common items.
Args:
n (int): defines "n" in "n most common". Defaults to
3.
skipna (bool): Determines if to use NA/null values.
Defaults to True to skip NA/null.
Description:
Given a list, find the n most common items, and return a series
showing the frequency of each item. If the list has less than n unique
values, the resulting series will be padded with nan.
Examples:
>>> n_most_common_frequency = NMostCommonFrequency()
>>> n_most_common_frequency([1, 1, 1, 2, 2, 3, 4, 4]).to_list()
[3, 2, 2]
We can increase n to include more items.
>>> n_most_common_frequency = NMostCommonFrequency(4)
>>> n_most_common_frequency([1, 1, 1, 2, 2, 3, 4, 4]).to_list()
[3, 2, 2, 1]
`NaN`s are skipped by default.
>>> n_most_common_frequency = NMostCommonFrequency(3)
>>> n_most_common_frequency([1, 1, 1, 2, 2, 3, 4, 4, None, None, None]).to_list()
[3, 2, 2]
However, the way `NaN`s are treated can be controlled.
>>> n_most_common_frequency = NMostCommonFrequency(3, skipna=False)
>>> n_most_common_frequency([1, 1, 1, 2, 2, 3, 4, 4, None, None, None]).to_list()
[3, 3, 2]
"""
name = "n_most_common_frequency"
input_types = [ColumnSchema(semantic_tags={"category"})]
return_type = ColumnSchema(logical_type=Categorical, semantic_tags={"category"})
def __init__(self, n=3, skipna=True):
self.n = n
self.number_output_features = n
self.skipna = skipna
def get_function(self):
def n_most_common_frequency(data, n=self.n):
frequencies = data.value_counts(dropna=self.skipna)
n_most_common = frequencies.iloc[0:n]
nan_add = n - frequencies.shape[0]
if nan_add > 0:
n_most_common = pd.concat(
[n_most_common, pd.Series([np.nan] * nan_add)],
)
return n_most_common
return n_most_common_frequency
| 2,361 | 33.735294 | 89 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/count_outside_nth_std.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Integer
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
class CountOutsideNthSTD(AggregationPrimitive):
"""Determines the number of observations that lie outside
the first N standard deviations.
Args:
n (float): Number of standard deviations. Default is 1
Examples:
>>> count_outside_nth_std = CountOutsideNthSTD(n=1.5)
>>> count_outside_nth_std([1, 10, 15, 20, 100])
1
"""
name = "count_outside_nth_std"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(logical_type=Integer, semantic_tags={"numeric"})
stack_on_self = False
default_value = 0
def __init__(self, n=1):
if n < 0:
raise ValueError("n must be a positive number")
self.n = n
def get_function(self):
def count_outside_nth_std(x):
cond = np.abs(x - np.mean(x)) > np.std(x) * self.n
return cond.sum()
return count_outside_nth_std
| 1,128 | 27.948718 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/percent_true.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Boolean, BooleanNullable, Double
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
from featuretools.utils.gen_utils import Library, import_or_none
dd = import_or_none("dask.dataframe")
class PercentTrue(AggregationPrimitive):
"""Determines the percent of `True` values.
Description:
Given a list of booleans, return the percent
of values which are `True` as a decimal.
`NaN` values are treated as `False`,
adding to the denominator.
Examples:
>>> percent_true = PercentTrue()
>>> percent_true([True, False, True, True, None])
0.6
"""
name = "percent_true"
input_types = [
[ColumnSchema(logical_type=BooleanNullable)],
[ColumnSchema(logical_type=Boolean)],
]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
stack_on = []
stack_on_exclude = []
default_value = 0
compatibility = [Library.PANDAS, Library.DASK]
description_template = "the percentage of true values in {}"
def get_function(self, agg_type=Library.PANDAS):
if agg_type == Library.DASK:
def chunk(s):
def format_chunk(x):
return x[:].fillna(False)
chunk_sum = s.agg(lambda x: format_chunk(x).sum())
chunk_len = s.agg(lambda x: len(format_chunk(x)))
if chunk_sum.dtype == "bool":
chunk_sum = chunk_sum.astype("int64")
if chunk_len.dtype == "bool":
chunk_len = chunk_len.astype("int64")
return (chunk_sum, chunk_len)
def agg(val, length):
return (val.sum(), length.sum())
def finalize(total, length):
return total / length
return dd.Aggregation(self.name, chunk=chunk, agg=agg, finalize=finalize)
def percent_true(s):
return s.fillna(False).mean()
return percent_true
| 2,090 | 31.671875 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/max_consecutive_zeros.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Double, Integer
from featuretools.primitives.base import AggregationPrimitive
class MaxConsecutiveZeros(AggregationPrimitive):
"""Determines the maximum number of consecutive zero values in the input
Args:
skipna (bool): Ignore any `NaN` values in the input. Default is True.
Examples:
>>> max_consecutive_zeros = MaxConsecutiveZeros()
>>> max_consecutive_zeros([1.0, -1.4, 0, 0.0, 0, -4.3])
3
`NaN` values can be ignored with the `skipna` parameter
>>> max_consecutive_zeros_skipna = MaxConsecutiveZeros(skipna=False)
>>> max_consecutive_zeros_skipna([1.0, -1.4, 0, None, 0.0, -4.3])
1
"""
name = "max_consecutive_zeros"
input_types = [
[ColumnSchema(logical_type=Integer)],
[ColumnSchema(logical_type=Double)],
]
return_type = ColumnSchema(logical_type=Integer, semantic_tags={"numeric"})
stack_on_self = False
default_value = 0
def __init__(self, skipna=True):
self.skipna = skipna
def get_function(self):
def max_consecutive_zeros(x):
if self.skipna:
x = x.dropna()
# convert the numeric values to booleans for processing
x[x.notnull()] = x[x.notnull()].eq(0)
# find the locations where the value changes from the previous value
not_equal = x != x.shift()
# Use cumulative sum to determine where consecutive values occur. When the
# sum changes, consecutive non-zero values are present, when the cumulative
# sum remains unchnaged, consecutive zero values are present.
not_equal_sum = not_equal.cumsum()
# group the input by the cumulative sum values and use cumulative count
# to count the number of consecutive values. Add 1 to account for the cumulative
# sum starting at zero where the first zero occurs
consecutive = x.groupby(not_equal_sum).cumcount() + 1
# multiply by the boolean input to keep only the counts that correspond to
# zero values
consecutive_zero = consecutive * x
# return the max of all the consecutive zero values
return consecutive_zero.max()
return max_consecutive_zeros
| 2,379 | 38.666667 | 92 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/max_min_delta.py | from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base import AggregationPrimitive
class MaxMinDelta(AggregationPrimitive):
"""Determines the difference between the max and min value.
Args:
skipna (bool): Determines if to use NA/null values.
Defaults to True to skip NA/null.
Examples:
>>> max_min_delta = MaxMinDelta()
>>> max_min_delta([7, 2, 5, 3, 10])
8
You can optionally specify how to handle NaN values
>>> max_min_delta_skipna = MaxMinDelta(skipna=False)
>>> max_min_delta_skipna([7, 2, None, 3, 10])
nan
"""
name = "max_min_delta"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
stack_on_self = False
default_value = 0
def __init__(self, skipna=True):
self.skipna = skipna
def get_function(self):
def max_min_delta(x):
max_val = x.max(skipna=self.skipna)
min_val = x.min(skipna=self.skipna)
return max_val - min_val
return max_min_delta
| 1,126 | 26.487805 | 63 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/std.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
from featuretools.utils.gen_utils import Library
class Std(AggregationPrimitive):
"""Computes the dispersion relative to the mean value, ignoring `NaN`.
Examples:
>>> std = Std()
>>> round(std([1, 2, 3, 4, 5, None]), 3)
1.414
"""
name = "std"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
stack_on_self = False
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the standard deviation of {}"
def get_function(self, agg_type=Library.PANDAS):
if agg_type in [Library.DASK, Library.SPARK]:
return "std"
return np.std
| 868 | 28.965517 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/count_greater_than.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Integer
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
class CountGreaterThan(AggregationPrimitive):
"""Determines the number of values greater than a controllable threshold.
Args:
threshold (float): The threshold to use when counting the number
of values greater than. Defaults to 10.
Examples:
>>> count_greater_than = CountGreaterThan(threshold=3)
>>> count_greater_than([1, 2, 3, 4, 5])
2
"""
name = "count_greater_than"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(logical_type=Integer, semantic_tags={"numeric"})
stack_on_self = False
default_value = 0
def __init__(self, threshold=10):
self.threshold = threshold
def get_function(self):
def count_greater_than(x):
return x[x > self.threshold].count()
return count_greater_than
| 1,030 | 29.323529 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/max_consecutive_positives.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Double, Integer
from featuretools.primitives.base import AggregationPrimitive
class MaxConsecutivePositives(AggregationPrimitive):
"""Determines the maximum number of consecutive positive values in the input
Args:
skipna (bool): Ignore any `NaN` values in the input. Default is True.
Examples:
>>> max_consecutive_positives = MaxConsecutivePositives()
>>> max_consecutive_positives([1.0, -1.4, 2.4, 5.4, 2.9, -4.3])
3
`NaN` values can be ignored with the `skipna` parameter
>>> max_consecutive_positives_skipna = MaxConsecutivePositives(skipna=False)
>>> max_consecutive_positives_skipna([1.0, -1.4, 2.4, None, 2.9, 4.3])
2
"""
name = "max_consecutive_positives"
input_types = [
[ColumnSchema(logical_type=Integer)],
[ColumnSchema(logical_type=Double)],
]
return_type = ColumnSchema(logical_type=Integer, semantic_tags={"numeric"})
stack_on_self = False
default_value = 0
def __init__(self, skipna=True):
self.skipna = skipna
def get_function(self):
def max_consecutive_positives(x):
if self.skipna:
x = x.dropna()
# convert the numeric values to booleans for processing
x[x.notnull()] = x[x.notnull()].gt(0)
# find the locations where the value changes from the previous value
not_equal = x != x.shift()
# Use cumulative sum to determine where consecutive values occur. When the
# sum changes, consecutive non-positive values are present, when the cumulative
# sum remains unchnaged, consecutive positive values are present.
not_equal_sum = not_equal.cumsum()
# group the input by the cumulative sum values and use cumulative count
# to count the number of consecutive values. Add 1 to account for the cumulative
# sum starting at zero where the first positive occurs
consecutive = x.groupby(not_equal_sum).cumcount() + 1
# multiply by the inverted input to keep only the counts that correspond to
# positive values
consecutive_pos = consecutive * x
# return the max of all the consecutive positive values
return consecutive_pos.max()
return max_consecutive_positives
| 2,447 | 39.8 | 92 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/count_above_mean.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import IntegerNullable
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
class CountAboveMean(AggregationPrimitive):
"""Calculates the number of values that are above the mean.
Args:
skipna (bool): Determines if to use NA/null values. Defaults to
True to skip NA/null.
Examples:
>>> count_above_mean = CountAboveMean()
>>> count_above_mean([1, 2, 3, 4, 5])
2
The way NaNs are treated can be controlled.
>>> count_above_mean_skipna = CountAboveMean(skipna=False)
>>> count_above_mean_skipna([1, 2, 3, 4, 5, None])
nan
"""
name = "count_above_mean"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(logical_type=IntegerNullable, semantic_tags={"numeric"})
stack_on_self = False
def __init__(self, skipna=True):
self.skipna = skipna
def get_function(self):
def count_above_mean(x):
mean = x.mean(skipna=self.skipna)
if np.isnan(mean):
return np.nan
return len(x[x > mean])
return count_above_mean
| 1,264 | 28.418605 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/max_consecutive_negatives.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Double, Integer
from featuretools.primitives.base import AggregationPrimitive
class MaxConsecutiveNegatives(AggregationPrimitive):
"""Determines the maximum number of consecutive negative values in the input
Args:
skipna (bool): Ignore any `NaN` values in the input. Default is True.
Examples:
>>> max_consecutive_negatives = MaxConsecutiveNegatives()
>>> max_consecutive_negatives([1.0, -1.4, -2.4, -5.4, 2.9, -4.3])
3
`NaN` values can be ignored with the `skipna` parameter
>>> max_consecutive_negatives_skipna = MaxConsecutiveNegatives(skipna=False)
>>> max_consecutive_negatives_skipna([1.0, 1.4, -2.4, None, -2.9, -4.3])
2
"""
name = "max_consecutive_negatives"
input_types = [
[ColumnSchema(logical_type=Integer)],
[ColumnSchema(logical_type=Double)],
]
return_type = ColumnSchema(logical_type=Integer, semantic_tags={"numeric"})
stack_on_self = False
default_value = 0
def __init__(self, skipna=True):
self.skipna = skipna
def get_function(self):
def max_consecutive_negatives(x):
if self.skipna:
x = x.dropna()
# convert the numeric values to booleans for processing
x[x.notnull()] = x[x.notnull()].lt(0)
# find the locations where the value changes from the previous value
not_equal = x != x.shift()
# Use cumulative sum to determine where consecutive values occur. When the
# sum changes, consecutive non-negative values are present, when the cumulative
# sum remains unchnaged, consecutive negative values are present.
not_equal_sum = not_equal.cumsum()
# group the input by the cumulative sum values and use cumulative count
# to count the number of consecutive values. Add 1 to account for the cumulative
# sum starting at zero where the first negative occurs
consecutive = x.groupby(not_equal_sum).cumcount() + 1
# multiply by the inverted input to keep only the counts that correspond to
# negative values
consecutive_neg = consecutive * x
# return the max of all the consecutive negative values
return consecutive_neg.max()
return max_consecutive_negatives
| 2,451 | 39.866667 | 92 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/is_monotonically_decreasing.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import BooleanNullable
from featuretools.primitives.base import AggregationPrimitive
class IsMonotonicallyDecreasing(AggregationPrimitive):
"""Determines if a series is monotonically decreasing.
Description:
Given a list of numeric values, return True if the
values are strictly decreasing. If the series contains
`NaN` values, they will be skipped.
Examples:
>>> is_monotonically_decreasing = IsMonotonicallyDecreasing()
>>> is_monotonically_decreasing([9, 5, 3, 1])
True
"""
name = "is_monotonically_decreasing"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(logical_type=BooleanNullable)
stack_on_self = False
default_value = False
def get_function(self):
def is_monotonically_decreasing(x):
return x.dropna().is_monotonic_decreasing
return is_monotonically_decreasing
| 1,008 | 30.53125 | 69 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/time_since_last_true.py | import numpy as np
import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Boolean, BooleanNullable, Datetime, Double
from featuretools.primitives.base import AggregationPrimitive
class TimeSinceLastTrue(AggregationPrimitive):
"""Calculates the time since the last `True` value.
Description:
Using a series of Datetimes and a series of Booleans, find the last
record with a `True` value. Return the seconds elapsed between that record
and the instance's cutoff time. Return nan if no values are `True`.
Examples:
>>> from datetime import datetime
>>> time_since_last_true = TimeSinceLastTrue()
>>> cutoff_time = datetime(2010, 1, 1, 12, 0, 0)
>>> times = [datetime(2010, 1, 1, 11, 45, 0),
... datetime(2010, 1, 1, 11, 55, 15),
... datetime(2010, 1, 1, 11, 57, 30)]
>>> booleans = [True, True, False]
>>> time_since_last_true(times, booleans, time=cutoff_time)
285.0
"""
name = "time_since_last_true"
input_types = [
[
ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"}),
ColumnSchema(logical_type=Boolean),
],
[
ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"}),
ColumnSchema(logical_type=BooleanNullable),
],
]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
uses_calc_time = True
stack_on_self = False
default_value = 0
def get_function(self):
def time_since_last_true(datetime_col, bool_col, time=None):
df = pd.DataFrame(
{
"datetime": datetime_col,
"bool": bool_col,
},
).dropna()
if df.empty:
return np.nan
true_indices = df[df["bool"]]
if true_indices.empty:
return np.nan
last_true_index = true_indices.index[-1]
time_since = time - datetime_col.loc[last_true_index]
return time_since.total_seconds()
return time_since_last_true
| 2,207 | 34.047619 | 82 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/num_consecutive_less_mean.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import IntegerNullable
from featuretools.primitives.base import AggregationPrimitive
class NumConsecutiveLessMean(AggregationPrimitive):
"""Determines the length of the longest subsequence below the mean.
Description:
Given a list of numbers, find the longest subsequence of numbers
smaller than the mean of the entire sequence. Return the length
of the longest subsequence.
Args:
skipna (bool): If this is False and any value in x is `NaN`, then
the result will be `NaN`. If True, `NaN` values are skipped.
Default is True.
Examples:
>>> num_consecutive_less_mean = NumConsecutiveLessMean()
>>> num_consecutive_less_mean([1, 2, 3, 4, 5, 6])
3.0
We can also control the way `NaN` values are handled.
>>> num_consecutive_less_mean = NumConsecutiveLessMean(skipna=False)
>>> num_consecutive_less_mean([1, 2, 3, 4, 5, 6, None])
nan
"""
name = "num_consecutive_less_mean"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(logical_type=IntegerNullable, semantic_tags={"numeric"})
stack_on_self = False
default_value = 0
def __init__(self, skipna=True):
self.skipna = skipna
def get_function(self):
def num_consecutive_less_mean(x):
# check for NaN cases
if x.isnull().all():
return np.nan
if not self.skipna and x.isnull().values.any():
return np.nan
x_mean = x.mean()
# In some cases, the mean of x may be NaN
# (such as when x has both inf and -inf values)
if np.isnan(x.mean()):
return np.nan
# Find indices of points at or above mean
x = x.dropna().reset_index(drop=True)
above_mean_indices = x[x >= x_mean].index.to_series()
# If none of x is above the mean, return the length of x
if above_mean_indices.empty:
return len(x)
# Pad index with start/end values, in case the longest
# sequence occurs at the beginning or end of x
above_mean_indices[-1] = -1
above_mean_indices[len(x)] = len(x)
above_mean_indices = above_mean_indices.sort_index()
# Calculate gaps between points above mean
above_mean_indices_shifted = above_mean_indices.shift(1)
diffs = above_mean_indices - above_mean_indices_shifted
# Take biggest gap, and subtract 1 to get result
max_gap = (diffs).max() - 1
return max_gap
return num_consecutive_less_mean
| 2,799 | 34.443038 | 87 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/num_true_since_last_false.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Boolean, IntegerNullable
from featuretools.primitives.base import AggregationPrimitive
class NumTrueSinceLastFalse(AggregationPrimitive):
"""Calculates the number of 'True' values since the last `False` value.
Description:
From a series of Booleans, find the last record with a `False` value.
Return the count of 'True' values between that record and the end of
the series. Return nan if no values are `False`. Any nan values in the
input are ignored. A 'False' value in the last row will result in a
count of 0.
Examples:
>>> num_true_since_last_false = NumTrueSinceLastFalse()
>>> num_true_since_last_false([False, True, False, True, True])
2
"""
name = "num_true_since_last_false"
input_types = [ColumnSchema(logical_type=Boolean)]
return_type = ColumnSchema(logical_type=IntegerNullable, semantic_tags={"numeric"})
stack_on_self = False
default_value = 0
def get_function(self):
def num_true_since_last_false(x):
x = x.dropna().astype(bool)
false_indices = x[~x]
if false_indices.empty:
return np.nan
last_false_index = false_indices.index[-1]
x_slice = x.loc[last_false_index:]
return x_slice.sum()
return num_true_since_last_false
| 1,453 | 36.282051 | 87 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/last.py | from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
from featuretools.utils.gen_utils import Library
class Last(AggregationPrimitive):
"""Determines the last value in a list.
Examples:
>>> last = Last()
>>> last([1, 2, 3, 4, 5, None])
nan
"""
name = "last"
input_types = [ColumnSchema()]
return_type = None
stack_on_self = False
description_template = "the last instance of {}"
def get_function(self, agg_type=Library.PANDAS):
def pd_last(x):
return x.iloc[-1]
return pd_last
| 656 | 23.333333 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/max_count.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base import AggregationPrimitive
class MaxCount(AggregationPrimitive):
"""Calculates the number of occurrences of the max value in a list
Args:
skipna (bool): Determines if to use NA/null values. Defaults to
True to skip NA/null. If skipna is False, and there are NaN
values in the array, the max will be NaN regardless of
the other values, and NaN will be returned.
Examples:
>>> max_count = MaxCount()
>>> max_count([1, 2, 5, 1, 5, 3, 5])
3
You can optionally specify how to handle NaN values
>>> max_count_skipna = MaxCount(skipna=False)
>>> max_count_skipna([1, 2, 5, 1, 5, 3, None])
nan
"""
name = "max_count"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
def __init__(self, skipna=True):
self.skipna = skipna
def get_function(self):
def max_count(x):
xmax = x.max(skipna=self.skipna)
if np.isnan(xmax):
return np.nan
return x.eq(xmax).sum()
return max_count
| 1,247 | 28.023256 | 71 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/__init__.py | from featuretools.primitives.standard.aggregation.all_primitive import All
from featuretools.primitives.standard.aggregation.any_primitive import Any
from featuretools.primitives.standard.aggregation.avg_time_between import AvgTimeBetween
from featuretools.primitives.standard.aggregation.average_count_per_unique import (
AverageCountPerUnique,
)
from featuretools.primitives.standard.aggregation.count import Count
from featuretools.primitives.standard.aggregation.count_above_mean import CountAboveMean
from featuretools.primitives.standard.aggregation.count_below_mean import CountBelowMean
from featuretools.primitives.standard.aggregation.count_greater_than import (
CountGreaterThan,
)
from featuretools.primitives.standard.aggregation.count_inside_nth_std import (
CountInsideNthSTD,
)
from featuretools.primitives.standard.aggregation.count_inside_range import (
CountInsideRange,
)
from featuretools.primitives.standard.aggregation.count_less_than import CountLessThan
from featuretools.primitives.standard.aggregation.count_outside_nth_std import (
CountOutsideNthSTD,
)
from featuretools.primitives.standard.aggregation.count_outside_range import (
CountOutsideRange,
)
from featuretools.primitives.standard.aggregation.date_first_event import DateFirstEvent
from featuretools.primitives.standard.aggregation.entropy import Entropy
from featuretools.primitives.standard.aggregation.first import First
from featuretools.primitives.standard.aggregation.first_last_time_delta import (
FirstLastTimeDelta,
)
from featuretools.primitives.standard.aggregation.kurtosis import Kurtosis
from featuretools.primitives.standard.aggregation.is_unique import IsUnique
from featuretools.primitives.standard.aggregation.last import Last
from featuretools.primitives.standard.aggregation.max_primitive import Max
from featuretools.primitives.standard.aggregation.max_consecutive_false import (
MaxConsecutiveFalse,
)
from featuretools.primitives.standard.aggregation.max_consecutive_negatives import (
MaxConsecutiveNegatives,
)
from featuretools.primitives.standard.aggregation.max_consecutive_positives import (
MaxConsecutivePositives,
)
from featuretools.primitives.standard.aggregation.max_consecutive_true import (
MaxConsecutiveTrue,
)
from featuretools.primitives.standard.aggregation.max_consecutive_zeros import (
MaxConsecutiveZeros,
)
from featuretools.primitives.standard.aggregation.mean import Mean
from featuretools.primitives.standard.aggregation.median import Median
from featuretools.primitives.standard.aggregation.max_count import MaxCount
from featuretools.primitives.standard.aggregation.median_count import MedianCount
from featuretools.primitives.standard.aggregation.max_min_delta import MaxMinDelta
from featuretools.primitives.standard.aggregation.min_count import MinCount
from featuretools.primitives.standard.aggregation.min_primitive import Min
from featuretools.primitives.standard.aggregation.mode import Mode
from featuretools.primitives.standard.aggregation.n_unique_days import NUniqueDays
from featuretools.primitives.standard.aggregation.n_unique_days_of_calendar_year import (
NUniqueDaysOfCalendarYear,
)
from featuretools.primitives.standard.aggregation.n_unique_days_of_month import (
NUniqueDaysOfMonth,
)
from featuretools.primitives.standard.aggregation.has_no_duplicates import (
HasNoDuplicates,
)
from featuretools.primitives.standard.aggregation.is_monotonically_decreasing import (
IsMonotonicallyDecreasing,
)
from featuretools.primitives.standard.aggregation.is_monotonically_increasing import (
IsMonotonicallyIncreasing,
)
from featuretools.primitives.standard.aggregation.n_unique_months import NUniqueMonths
from featuretools.primitives.standard.aggregation.n_unique_weeks import NUniqueWeeks
from featuretools.primitives.standard.aggregation.n_most_common import NMostCommon
from featuretools.primitives.standard.aggregation.n_most_common_frequency import (
NMostCommonFrequency,
)
from featuretools.primitives.standard.aggregation.num_true import NumTrue
from featuretools.primitives.standard.aggregation.num_peaks import NumPeaks
from featuretools.primitives.standard.aggregation.num_zero_crossings import (
NumZeroCrossings,
)
from featuretools.primitives.standard.aggregation.num_true_since_last_false import (
NumTrueSinceLastFalse,
)
from featuretools.primitives.standard.aggregation.num_false_since_last_true import (
NumFalseSinceLastTrue,
)
from featuretools.primitives.standard.aggregation.num_consecutive_greater_mean import (
NumConsecutiveGreaterMean,
)
from featuretools.primitives.standard.aggregation.num_consecutive_less_mean import (
NumConsecutiveLessMean,
)
from featuretools.primitives.standard.aggregation.num_unique import NumUnique
from featuretools.primitives.standard.aggregation.percent_unique import PercentUnique
from featuretools.primitives.standard.aggregation.percent_true import PercentTrue
from featuretools.primitives.standard.aggregation.skew import Skew
from featuretools.primitives.standard.aggregation.std import Std
from featuretools.primitives.standard.aggregation.sum_primitive import Sum
from featuretools.primitives.standard.aggregation.time_since_first import TimeSinceFirst
from featuretools.primitives.standard.aggregation.time_since_last import TimeSinceLast
from featuretools.primitives.standard.aggregation.time_since_last_true import (
TimeSinceLastTrue,
)
from featuretools.primitives.standard.aggregation.time_since_last_min import (
TimeSinceLastMin,
)
from featuretools.primitives.standard.aggregation.time_since_last_max import (
TimeSinceLastMax,
)
from featuretools.primitives.standard.aggregation.time_since_last_false import (
TimeSinceLastFalse,
)
from featuretools.primitives.standard.aggregation.trend import Trend
from featuretools.primitives.standard.aggregation.variance import Variance
| 5,915 | 48.3 | 89 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/variance.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Double
from featuretools.primitives.base import AggregationPrimitive
class Variance(AggregationPrimitive):
"""Calculates the variance of a list of numbers.
Description:
Given a list of numbers, return the variance,
using numpy's built-in variance function. Nan
values in a series will be ignored. Return nan
when the series is empty or entirely null.
Examples:
>>> variance = Variance()
>>> variance([0, 3, 4, 3])
2.25
Null values in a series will be ignored.
>>> variance = Variance()
>>> variance([0, 3, 4, 3, None])
2.25
"""
name = "variance"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
stack_on_self = False
default_value = np.nan
def get_function(self):
return np.var
| 1,007 | 26.243243 | 78 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/median.py | import pandas as pd
from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
from featuretools.utils.gen_utils import Library
class Median(AggregationPrimitive):
"""Determines the middlemost number in a list of values.
Examples:
>>> median = Median()
>>> median([5, 3, 2, 1, 4])
3.0
`NaN` values are ignored.
>>> median([5, 3, 2, 1, 4, None])
3.0
"""
name = "median"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
description_template = "the median of {}"
def get_function(self, agg_type=Library.PANDAS):
return pd.Series.median
| 768 | 25.517241 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/n_unique_months.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Integer
from featuretools.primitives.base import AggregationPrimitive
class NUniqueMonths(AggregationPrimitive):
"""Determines the number of unique months.
Description:
Given a list of datetimes, return the number of unique months.
NUniqueMonths counts absolute month, not month of year, so the
same month in two different years is treated as different. (i.e.
Feb 2017 is different than Feb 2019.)
Examples:
>>> from datetime import datetime
>>> n_unique_months = NUniqueMonths()
>>> times = [datetime(2019, 1, 1),
... datetime(2019, 1, 2),
... datetime(2019, 1, 3),
... datetime(2019, 2, 1),
... datetime(2018, 2, 1)]
>>> n_unique_months(times)
3
"""
name = "n_unique_months"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(logical_type=Integer, semantic_tags={"numeric"})
stack_on_self = False
default_value = 0
def get_function(self):
def n_unique_months(x):
return x.dt.to_period("M").nunique()
return n_unique_months
| 1,262 | 31.384615 | 79 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/max_primitive.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
from featuretools.utils.gen_utils import Library
class Max(AggregationPrimitive):
"""Calculates the highest value, ignoring `NaN` values.
Examples:
>>> max = Max()
>>> max([1, 2, 3, 4, 5, None])
5.0
"""
name = "max"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
stack_on_self = False
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the maximum of {}"
def get_function(self, agg_type=Library.PANDAS):
if agg_type in [Library.DASK, Library.SPARK]:
return "max"
return np.max
| 830 | 27.655172 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/num_peaks.py | import pandas as pd
from scipy.signal import find_peaks
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Integer
from featuretools.primitives.base import AggregationPrimitive
class NumPeaks(AggregationPrimitive):
"""Determines the number of peaks in a list of numbers.
Description:
Given a list of numbers, count the number of local
maxima. Uses the find_peaks function from scipy.signal.
Examples:
>>> num_peaks = NumPeaks()
>>> num_peaks([-5, 0, 10, 0, 10, -5, -4, -5, 10, 0])
4
"""
name = "num_peaks"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(logical_type=Integer, semantic_tags={"numeric"})
stack_on_self = False
default_value = 0
def get_function(self):
def num_peaks(x):
if x.dtype == "Int64":
x = x.astype("float64")
peaks = find_peaks(x)[0]
return len(peaks[~pd.isna(peaks)])
return num_peaks
| 1,032 | 27.694444 | 79 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/trend.py | import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
from featuretools.utils import calculate_trend
from featuretools.utils.gen_utils import Library
class Trend(AggregationPrimitive):
"""Calculates the trend of a column over time.
Description:
Given a list of values and a corresponding list of
datetimes, calculate the slope of the linear trend
of values.
Examples:
>>> from datetime import datetime
>>> trend = Trend()
>>> times = [datetime(2010, 1, 1, 11, 45, 0),
... datetime(2010, 1, 1, 11, 55, 15),
... datetime(2010, 1, 1, 11, 57, 30),
... datetime(2010, 1, 1, 11, 12),
... datetime(2010, 1, 1, 11, 12, 15)]
>>> round(trend([1, 2, 3, 4, 5], times), 3)
-0.053
"""
name = "trend"
input_types = [
ColumnSchema(semantic_tags={"numeric"}),
ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"}),
]
return_type = ColumnSchema(semantic_tags={"numeric"})
description_template = "the linear trend of {} over time"
def get_function(self, agg_type=Library.PANDAS):
def pd_trend(y, x):
return calculate_trend(pd.Series(data=y.values, index=x.values))
return pd_trend
| 1,445 | 32.627907 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/entropy.py | from scipy import stats
from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
from featuretools.utils.gen_utils import Library
class Entropy(AggregationPrimitive):
"""Calculates the entropy for a categorical column
Description:
Given a list of observations from a categorical
column return the entropy of the distribution.
NaN values can be treated as a category or
dropped.
Args:
dropna (bool): Whether to consider NaN values as a separate category
Defaults to False.
base (float): The logarithmic base to use
Defaults to e (natural logarithm)
Examples:
>>> pd_entropy = Entropy()
>>> pd_entropy([1, 2, 3, 4])
1.3862943611198906
"""
name = "entropy"
input_types = [ColumnSchema(semantic_tags={"category"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
stack_on_self = False
description_template = "the entropy of {}"
def __init__(self, dropna=False, base=None):
self.dropna = dropna
self.base = base
def get_function(self, agg_type=Library.PANDAS):
def pd_entropy(s):
distribution = s.value_counts(normalize=True, dropna=self.dropna)
if distribution.dtype == "Float64":
distribution = distribution.astype("float64")
return stats.entropy(distribution.to_numpy(), base=self.base)
return pd_entropy
| 1,527 | 31.510638 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/n_unique_days_of_calendar_year.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Integer
from featuretools.primitives.base import AggregationPrimitive
class NUniqueDaysOfCalendarYear(AggregationPrimitive):
"""Determines the number of unique calendar days.
Description:
Given a list of datetimes, return the number of unique calendar
days. The same date in two different years is counted as one. So
Feb 21, 2017 is not unique from Feb 21, 2019.
Examples:
>>> from datetime import datetime
>>> n_unique_days_of_calendar_year = NUniqueDaysOfCalendarYear()
>>> times = [datetime(2019, 2, 1),
... datetime(2019, 2, 1),
... datetime(2018, 2, 1),
... datetime(2019, 1, 1)]
>>> n_unique_days_of_calendar_year(times)
2
"""
name = "n_unique_days_of_calendar_year"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(logical_type=Integer, semantic_tags={"numeric"})
stack_on_self = False
default_value = 0
def get_function(self):
def n_unique_days_of_calendar_year(x):
return x.dropna().dt.strftime("%m-%d").nunique()
return n_unique_days_of_calendar_year
| 1,275 | 33.486486 | 79 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/n_most_common.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
from featuretools.utils.gen_utils import Library
class NMostCommon(AggregationPrimitive):
"""Determines the `n` most common elements.
Description:
Given a list of values, return the `n` values
which appear the most frequently. If there are
fewer than `n` unique values, the output will be
filled with `NaN`.
Args:
n (int): defines "n" in "n most common." Defaults
to 3.
Examples:
>>> n_most_common = NMostCommon(n=2)
>>> x = ['orange', 'apple', 'orange', 'apple', 'orange', 'grapefruit']
>>> n_most_common(x).tolist()
['orange', 'apple']
"""
name = "n_most_common"
input_types = [ColumnSchema(semantic_tags={"category"})]
return_type = None
def __init__(self, n=3):
self.n = n
self.number_output_features = n
self.description_template = [
"the {} most common values of {{}}".format(n),
"the most common value of {}",
*["the {nth_slice} most common value of {}"] * (n - 1),
]
def get_function(self, agg_type=Library.PANDAS):
def n_most_common(x):
# Counts of 0 remain in value_counts output if dtype is category
# so we need to remove them
counts = x.value_counts()
counts = counts[counts > 0]
array = np.array(counts.index[: self.n])
if len(array) < self.n:
filler = np.full(self.n - len(array), np.nan)
array = np.append(array, filler)
return array
return n_most_common
| 1,761 | 31.62963 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/count_less_than.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Integer
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
class CountLessThan(AggregationPrimitive):
"""Determines the number of values less than a controllable threshold.
Args:
threshold (float): The threshold to use when counting the number
of values less than. Defaults to 10.
Examples:
>>> count_less_than = CountLessThan(threshold=3.5)
>>> count_less_than([1, 2, 3, 4, 5])
3
"""
name = "count_less_than"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(logical_type=Integer, semantic_tags={"numeric"})
stack_on_self = False
default_value = 0
def __init__(self, threshold=10):
self.threshold = threshold
def get_function(self):
def count_less_than(x):
return x[x < self.threshold].count()
return count_less_than
| 1,005 | 28.588235 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/time_since_last_false.py | import numpy as np
import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Boolean, BooleanNullable, Datetime, Double
from featuretools.primitives.base import AggregationPrimitive
class TimeSinceLastFalse(AggregationPrimitive):
"""Calculates the time since the last `False` value.
Description:
Using a series of Datetimes and a series of Booleans, find the last
record with a `False` value. Return the seconds elapsed between that record
and the instance's cutoff time. Return nan if no values are `False`.
Examples:
>>> from datetime import datetime
>>> time_since_last_false = TimeSinceLastFalse()
>>> cutoff_time = datetime(2010, 1, 1, 12, 0, 0)
>>> times = [datetime(2010, 1, 1, 11, 45, 0),
... datetime(2010, 1, 1, 11, 55, 15),
... datetime(2010, 1, 1, 11, 57, 30)]
>>> booleans = [True, False, True]
>>> time_since_last_false(times, booleans, time=cutoff_time)
285.0
"""
name = "time_since_last_false"
input_types = [
[
ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"}),
ColumnSchema(logical_type=Boolean),
],
[
ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"}),
ColumnSchema(logical_type=BooleanNullable),
],
]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
uses_calc_time = True
stack_on_self = False
default_value = 0
def get_function(self):
def time_since_last_false(datetime_col, bool_col, time=None):
df = pd.DataFrame(
{
"datetime": datetime_col,
"bool": bool_col,
},
).dropna()
if df.empty:
return np.nan
false_indices = df[~df["bool"]]
if false_indices.empty:
return np.nan
last_false_index = false_indices.index[-1]
time_since = time - datetime_col.loc[last_false_index]
return time_since.total_seconds()
return time_since_last_false
| 2,223 | 34.301587 | 83 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/n_unique_days_of_month.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Integer
from featuretools.primitives.base import AggregationPrimitive
class NUniqueDaysOfMonth(AggregationPrimitive):
"""Determines the number of unique days of month.
Description:
Given a list of datetimes, return the number of unique days
of month. The maximum value is 31. 2018-01-01 and 2018-02-01
will be counted as 1 unique day. 2019-01-01 and 2018-01-01
will also be counted as 1.
Examples:
>>> from datetime import datetime
>>> n_unique_days_of_month = NUniqueDaysOfMonth()
>>> times = [datetime(2019, 1, 1),
... datetime(2019, 2, 1),
... datetime(2018, 2, 1),
... datetime(2019, 1, 2),
... datetime(2019, 1, 3)]
>>> n_unique_days_of_month(times)
3
"""
name = "n_unique_days_of_month"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(logical_type=Integer, semantic_tags={"numeric"})
stack_on_self = False
default_value = 0
def get_function(self):
def n_unique_days_of_month(x):
return x.dropna().dt.day.nunique()
return n_unique_days_of_month
| 1,290 | 32.102564 | 79 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/sum_primitive.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
from featuretools.primitives.standard.aggregation.count import Count
from featuretools.utils.gen_utils import Library
class Sum(AggregationPrimitive):
"""Calculates the total addition, ignoring `NaN`.
Examples:
>>> sum = Sum()
>>> sum([1, 2, 3, 4, 5, None])
15.0
"""
name = "sum"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
stack_on_self = False
stack_on_exclude = [Count]
default_value = 0
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the sum of {}"
def get_function(self, agg_type=Library.PANDAS):
if agg_type in [Library.DASK, Library.SPARK]:
return "sum"
return np.sum
| 943 | 28.5 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/aggregation/has_no_duplicates.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import BooleanNullable
from featuretools.primitives.base import AggregationPrimitive
class HasNoDuplicates(AggregationPrimitive):
"""Determines if there are duplicates in the input.
Args:
skipna (bool): Determines if to use NA/null values.
Defaults to True to skip NA/null.
Examples:
>>> has_no_duplicates = HasNoDuplicates()
>>> has_no_duplicates([1, 1, 2])
False
>>> has_no_duplicates([1, 2, 3])
True
`NaN`s are skipped by default.
>>> has_no_duplicates([1, 2, 3, None, None])
True
However, the way `NaN`s are treated can be controlled.
>>> has_no_duplicates_skipna = HasNoDuplicates(skipna=False)
>>> has_no_duplicates_skipna([1, 2, 3, None, None])
False
>>> has_no_duplicates_skipna([1, 2, 3, None])
True
"""
name = "has_no_duplicates"
input_types = [
[ColumnSchema(semantic_tags={"category"})],
[ColumnSchema(semantic_tags={"numeric"})],
]
return_type = ColumnSchema(logical_type=BooleanNullable)
stack_on_self = False
default_value = True
def __init__(self, skipna=True):
self.skipna = skipna
def get_function(self):
def has_no_duplicates(data):
if self.skipna:
data = data.dropna()
return not data.duplicated().any()
return has_no_duplicates
| 1,498 | 26.759259 | 68 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/absolute_diff.py | from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base import TransformPrimitive
class AbsoluteDiff(TransformPrimitive):
"""Calculates the absolute difference from the previous element
in a list of numbers.
Description:
The absolute difference from the previous element is computed for
all elements in the input. The first item in the output will always
be nan, since there is no previous element for the first element.
Elements in the input containing nan will be filled using either a
forward-fill or backward-fill method, specified by the method argument.
Args:
method (str): Method to use for filling nan values in reindexed
Series. Possible values are ['pad', 'ffill', 'backfill', 'bfill'].
Default is 'ffill'.
`pad / ffill`: propagate last valid observation forward
to fill gap
`backfill / bfill`: propagate next valid observation backward
to fill gap
limit (int): The max number of consecutive NaN values in a gap that
can be filled. Default is None.
Examples:
>>> absolute_diff = AbsoluteDiff()
>>> absolute_diff([2, 5, 15, 3]).tolist()
[nan, 3.0, 10.0, 12.0]
Forward filling of input elements using the 'ffill' argument
>>> absolute_diff_ffill = AbsoluteDiff(method="ffill")
>>> absolute_diff_ffill([None, 5, 10, 20, None, 10, None]).tolist()
[nan, nan, 5.0, 10.0, 0.0, 10.0, 0.0]
Backward filling of input element using the 'bfill' argument
>>> absolute_diff_bfill = AbsoluteDiff(method="bfill")
>>> absolute_diff_bfill([None, 5, 10, 20, None, 10, None]).tolist()
[nan, 0.0, 5.0, 10.0, 10.0, 0.0, nan]
The number of nan values that are filled can be limited
>>> absolute_diff_limitfill = AbsoluteDiff(limit=2)
>>> absolute_diff_limitfill([2, None, None, None, 3, 1]).tolist()
[nan, 0.0, 0.0, nan, nan, 2.0]
"""
name = "absolute_diff"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
def __init__(self, method="ffill", limit=None):
if method not in ["backfill", "bfill", "pad", "ffill"]:
raise ValueError("Invalid method")
self.method = method
self.limit = limit
def get_function(self):
def absolute_diff(data):
return data.fillna(method=self.method, limit=self.limit).diff().abs()
return absolute_diff
| 2,599 | 35.619718 | 81 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/not_primitive.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Boolean, BooleanNullable
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class Not(TransformPrimitive):
"""Negates a boolean value.
Examples:
>>> not_func = Not()
>>> not_func([True, True, False]).tolist()
[False, False, True]
"""
name = "not"
input_types = [
[ColumnSchema(logical_type=Boolean)],
[ColumnSchema(logical_type=BooleanNullable)],
]
return_type = ColumnSchema(logical_type=BooleanNullable)
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the negation of {}"
def generate_name(self, base_feature_names):
return "NOT({})".format(base_feature_names[0])
def get_function(self):
return np.logical_not
| 917 | 27.6875 | 65 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/savgol_filter.py | from math import floor
import numpy as np
from scipy.signal import savgol_coeffs, savgol_filter
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Double
from featuretools.primitives.base import TransformPrimitive
class SavgolFilter(TransformPrimitive):
"""Applies a Savitzky-Golay filter to a list of values.
Description:
Given a list of values, return a smoothed list which increases
the signal to noise ratio without greatly distoring the
signal. Uses the `Savitzky–Golay filter` method.
If the input list has less than 20 values, it will be returned
as is.
See the following page for more info:
https://docs.scipy.org/doc/scipy-0.16.0/reference/generated/scipy.signal.savgol_filter.html
Args:
window_length (int): The length of the filter window (i.e. the number
of coefficients). `window_length` must be a positive odd integer.
polyorder (int): The order of the polynomial used to fit the samples.
`polyorder` must be less than `window_length`.
deriv (int): Optional. The order of the derivative to compute. This
must be a nonnegative integer. The default is 0, which means to
filter the data without differentiating.
delta (float): Optional. The spacing of the samples to which the filter
will be applied. This is only used if deriv > 0. Default is 1.0.
mode (str): Optional. Must be 'mirror', 'constant', 'nearest', 'wrap'
or 'interp'. This determines the type of extension to use for the
padded signal to which the filter is applied. When `mode` is
'constant', the padding value is given by `cval`. See the Notes
for more details on 'mirror', 'constant', 'wrap', and 'nearest'.
When the 'interp' mode is selected (the default), no extension
is used. Instead, a degree `polyorder` polynomial is fit to the
last `window_length` values of the edges, and this polynomial is
used to evaluate the last `window_length // 2` output values.
cval (scalar): Optional. Value to fill past the edges of the input
if `mode` is 'constant'. Default is 0.0.
Examples:
>>> savgol_filter = SavgolFilter()
>>> data = [0, 1, 1, 2, 3, 4, 5, 7, 8, 7, 9, 9, 12, 11, 12, 14, 15, 17, 17, 17, 20]
>>> [round(x, 4) for x in savgol_filter(data).tolist()[:3]]
[0.0429, 0.8286, 1.2571]
We can control `window_length` and `polyorder` of the filter.
>>> savgol_filter = SavgolFilter(window_length=13, polyorder=3)
>>> [round(x, 4) for x in savgol_filter(data).tolist()[:3]]
[-0.0962, 0.6484, 1.4451]
We can also control the `deriv` and `delta` parameters.
>>> savgol_filter = SavgolFilter(deriv=1, delta=1.5)
>>> [round(x, 4) for x in savgol_filter(data).tolist()[:3]]
[0.754, 0.3492, 0.2778]
Finally, we can use `mode` to control how edge values are handled.
>>> savgol_filter = SavgolFilter(mode='constant', cval=5)
>>> [round(x, 4) for x in savgol_filter(data).tolist()[:3]]
[1.5429, 0.2286, 1.2571]
"""
name = "savgol_filter"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
def __init__(
self,
window_length=None,
polyorder=None,
deriv=0,
delta=1.0,
mode="interp",
cval=0.0,
):
if window_length is not None and polyorder is not None:
try:
if mode not in ["mirror", "constant", "nearest", "interp", "wrap"]:
raise ValueError(
"mode must be 'mirror', 'constant', "
"'nearest', 'wrap' or 'interp'.",
)
savgol_coeffs(window_length, polyorder, deriv=deriv, delta=delta)
except Exception:
raise
elif (window_length is None and polyorder is not None) or (
window_length is not None and polyorder is None
):
error_text = (
"Both window_length and polyorder must be defined if you define one."
)
raise ValueError(error_text)
self.window_length = window_length
self.polyorder = polyorder
self.deriv = deriv
self.delta = delta
self.mode = mode
self.cval = cval
def get_function(self):
def smooth(x):
if x.shape[0] < 20:
return x
if np.isnan(np.min(x)):
# interpolate the nan values, works for edges & middle nans
mask = np.isnan(x)
x[mask] = np.interp(
np.flatnonzero(mask),
np.flatnonzero(~mask),
x[~mask],
)
window_length = self.window_length
polyorder = self.polyorder
if window_length is None and polyorder is None:
window_length = floor(len(x) / 10) * 2 + 1
polyorder = 3
return savgol_filter(
x,
window_length=window_length,
polyorder=polyorder,
deriv=self.deriv,
delta=self.delta,
mode=self.mode,
cval=self.cval,
)
return smooth
| 5,532 | 37.423611 | 99 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/file_extension.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Filepath
from featuretools.primitives.base import TransformPrimitive
class FileExtension(TransformPrimitive):
"""Determines the extension of a filepath.
Description:
Given a list of filepaths, return the extension
suffix of each one. If the filepath is missing
or invalid, return `NaN`.
Examples:
>>> file_extension = FileExtension()
>>> file_extension(['doc.txt', '~/documents/data.json', 'file']).tolist()
['.txt', '.json', nan]
"""
name = "file_extension"
input_types = [ColumnSchema(logical_type=Filepath)]
return_type = ColumnSchema(semantic_tags={"category"})
def get_function(self):
def file_extension(x):
p = r"(\.[a-z|A-Z]+$)"
return x.str.extract(p, expand=False).str.lower()
return file_extension
| 919 | 28.677419 | 81 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/full_name_to_first_name.py | import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Categorical, PersonFullName
from featuretools.primitives.base import TransformPrimitive
class FullNameToFirstName(TransformPrimitive):
"""Determines the first name from a person's name.
Description:
Given a list of names, determines the first name. If
only a single name is provided, assume this is a first name.
If only a title and a single name is provided return `nan`.
This assumes all titles will be followed by a period. Please note,
in the current implementation, last names containing spaces may
result in improper first name matches.
Examples:
>>> full_name_to_first_name = FullNameToFirstName()
>>> names = ['Woolf Spector', 'Oliva y Ocana, Dona. Fermina',
... 'Ware, Mr. Frederick', 'Peter, Michael J', 'Mr. Brown']
>>> full_name_to_first_name(names).to_list()
['Woolf', 'Oliva', 'Frederick', 'Michael', nan]
"""
name = "full_name_to_first_name"
input_types = [ColumnSchema(logical_type=PersonFullName)]
return_type = ColumnSchema(logical_type=Categorical, semantic_tags={"category"})
def get_function(self):
def full_name_to_first_name(x):
title_with_last_pattern = r"(^[A-Z][a-z]+\. [A-Z][a-z]+$)"
titles_pattern = r"([A-Z][a-z]+)\. "
df = pd.DataFrame({"names": x})
# remove any entries with just a title and a name
df["names"] = df["names"].str.replace(
title_with_last_pattern,
"",
regex=True,
)
# remove any known titles
df["names"] = df["names"].str.replace(titles_pattern, "", regex=True)
# extract first names
pattern = r"([A-Z][a-z]+ |, [A-Z][a-z]+$|^[A-Z][a-z]+$)"
df["first_name"] = df["names"].str.extract(pattern)
# clean up white space and leftover commas
df["first_name"] = df["first_name"].str.replace(",", "").str.strip()
return df["first_name"]
return full_name_to_first_name
| 2,179 | 40.132075 | 84 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/is_null.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Boolean
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class IsNull(TransformPrimitive):
"""Determines if a value is null.
Examples:
>>> is_null = IsNull()
>>> is_null([1, None, 3]).tolist()
[False, True, False]
"""
name = "is_null"
input_types = [ColumnSchema()]
return_type = ColumnSchema(logical_type=Boolean)
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "whether {} is null"
def get_function(self):
def isnull(array):
return array.isnull()
return isnull
| 738 | 25.392857 | 65 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/percent_change.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Double
from featuretools.primitives.base import TransformPrimitive
class PercentChange(TransformPrimitive):
"""Determines the percent difference between values in a list.
Description:
Given a list of numbers, return the percent difference
between each subsequent number. Percentages are shown in
decimal form (not multiplied by 100). Uses pandas' pct_change
function.
Args:
periods (int): Periods to shift for calculating percent change.
Default is 1.
fill_method (str): Method for filling gaps in reindexed
Series. Valid options are `backfill`, `bfill`, `pad`, `ffill`.
`pad / ffill`: fill gap with last valid observation.
`backfill / bfill`: fill gap with next valid observation.
Default is `pad`.
limit (int): The max number of consecutive NaN values in a gap that
can be filled. Default is None.
freq (DateOffset, timedelta, or offset alias string):
If `freq` is specified, instead of calcualting change between subsequent
points, PercentChange will calculate change between points with a
certain interval between their date indices. `freq` defines the
desired interval. When freq is used, the resulting index will also be
filled to include any missing dates from the specified interval.
If the index is not date/datetime and freq is used, it will raise a
NotImplementedError.
If freq is None, no changes will be applied. Default is None.
Examples:
>>> percent_change = PercentChange()
>>> percent_change([2, 5, 15, 3, 3, 9, 4.5]).to_list()
[nan, 1.5, 2.0, -0.8, 0.0, 2.0, -0.5]
We can control the number of periods to return the percent
difference between points further from one another.
>>> percent_change_2 = PercentChange(periods=2)
>>> percent_change_2([2, 5, 15, 3, 3, 9, 4.5]).to_list()
[nan, nan, 6.5, -0.4, -0.8, 2.0, 0.5]
We can control the method used to handle gaps in data.
>>> percent_change = PercentChange()
>>> percent_change([2, 4, 8, None, 16, None, 32, None]).to_list()
[nan, 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0]
>>> percent_change_backfill = PercentChange(fill_method='backfill')
>>> percent_change_backfill([2, 4, 8, None, 16, None, 32, None]).to_list()
[nan, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, nan]
We can also control the maximum number of NaN values to fill in a gap.
>>> percent_change = PercentChange()
>>> percent_change([2, None, None, None, 4]).to_list()
[nan, 0.0, 0.0, 0.0, 1.0]
>>> percent_change_limited = PercentChange(limit=2)
>>> percent_change_limited([2, None, None, None, 4]).to_list()
[nan, 0.0, 0.0, nan, nan]
Finally, we can specify a date frequency on which to calculate percent
change.
>>> import pandas as pd
>>> dates = pd.DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-05'])
>>> x_indexed = pd.Series([1, 2, 3, 4], index=dates)
>>> percent_change = PercentChange()
>>> percent_change(x_indexed).to_list()
[nan, 1.0, 0.5, 0.33333333333333326]
>>> date_offset = pd.tseries.offsets.DateOffset(days=1)
>>> percent_change_freq = PercentChange(freq=date_offset)
>>> percent_change_freq(x_indexed).to_list()
[nan, 1.0, 0.5, nan]
"""
name = "percent_change"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
def __init__(self, periods=1, fill_method="pad", limit=None, freq=None):
if fill_method not in ["backfill", "bfill", "pad", "ffill"]:
raise ValueError("Invalid fill_method")
self.periods = periods
self.fill_method = fill_method
self.limit = limit
self.freq = freq
def get_function(self):
def percent_change(data):
return data.pct_change(
self.periods,
self.fill_method,
self.limit,
self.freq,
)
return percent_change
| 4,377 | 39.537037 | 94 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/full_name_to_title.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Categorical, PersonFullName
from featuretools.primitives.base import TransformPrimitive
class FullNameToTitle(TransformPrimitive):
"""Determines the title from a person's name.
Description:
Given a list of names, determines the title, or
prefix of each name (e.g. "Mr", "Mrs", etc). If
no title is found, returns `NaN`.
Examples:
>>> full_name_to_title = FullNameToTitle()
>>> names = ['Spector, Mr. Woolf', 'Oliva y Ocana, Dona. Fermina',
... 'Ware, Mr. Frederick', 'Peter, Michael J', 'Mr. Brown']
>>> full_name_to_title(names).to_list()
['Mr', 'Dona', 'Mr', nan, 'Mr']
"""
name = "full_name_to_title"
input_types = [ColumnSchema(logical_type=PersonFullName)]
return_type = ColumnSchema(logical_type=Categorical, semantic_tags={"category"})
def get_function(self):
def full_name_to_title(x):
pattern = r"([A-Z][a-z]+)\. "
return x.str.extract(pattern, expand=True)[0]
return full_name_to_title
| 1,132 | 33.333333 | 84 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/full_name_to_last_name.py | import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Categorical, PersonFullName
from featuretools.primitives.base import TransformPrimitive
class FullNameToLastName(TransformPrimitive):
"""Determines the first name from a person's name.
Description:
Given a list of names, determines the last name. If
only a single name is provided, assume this is a first name, and
return `nan`. This assumes all titles will be followed by a period.
Examples:
>>> full_name_to_last_name = FullNameToLastName()
>>> names = ['Woolf Spector', 'Oliva y Ocana, Dona. Fermina',
... 'Ware, Mr. Frederick', 'Peter, Michael J', 'Mr. Brown']
>>> full_name_to_last_name(names).to_list()
['Spector', 'Oliva y Ocana', 'Ware', 'Peter', 'Brown']
"""
name = "full_name_to_last_name"
input_types = [ColumnSchema(logical_type=PersonFullName)]
return_type = ColumnSchema(logical_type=Categorical, semantic_tags={"category"})
def get_function(self):
def full_name_to_last_name(x):
titles_pattern = r"([A-Z][a-z]+)\. "
df = pd.DataFrame({"names": x})
# extract initial names
pattern = r"(^.+?,|^[A-Z][a-z]+\. [A-Z][a-z]+$| [A-Z][a-z]+$| [A-Z][a-z]+[/-][A-Z][a-z]+$)"
df["last_name"] = df["names"].str.extract(pattern)
# remove titles
df["last_name"] = df["last_name"].str.replace(
titles_pattern,
"",
regex=True,
)
# clean up white space and leftover commas
df["last_name"] = df["last_name"].str.replace(",", "").str.strip()
return df["last_name"]
return full_name_to_last_name
| 1,803 | 37.382979 | 103 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/nth_week_of_month.py | import numpy as np
import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Double
from featuretools.primitives.base import TransformPrimitive
class NthWeekOfMonth(TransformPrimitive):
"""Determines the nth week of the month from a given date.
Description:
Converts a datetime to an float representing the week
of the month in which the date falls. The first day of
the month starts week 1, and the week number is incremented
each Sunday.
Examples:
>>> from datetime import datetime
>>> nth_week_of_month = NthWeekOfMonth()
>>> times = [datetime(2019, 3, 1),
... datetime(2019, 3, 3),
... datetime(2019, 3, 31),
... datetime(2019, 3, 30)]
>>> nth_week_of_month(times).tolist()
[1.0, 2.0, 6.0, 5.0]
"""
name = "nth_week_of_month"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
def get_function(self):
def nth_week_of_month(x):
df = pd.DataFrame({"date": x})
df["first_day"] = df.date - pd.to_timedelta(df["date"].dt.day - 1, unit="d")
df["dom"] = df.date.dt.day
df["first_day_weekday"] = df.first_day.dt.weekday
df["adjusted_dom"] = df.dom + df.first_day_weekday + 1
df.loc[df["first_day_weekday"].astype(float) == 6.0, "adjusted_dom"] = df[
"dom"
]
df["week_of_month"] = np.ceil(df.adjusted_dom / 7.0)
return df.week_of_month.values
return nth_week_of_month
| 1,697 | 35.12766 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/__init__.py | # flake8: noqa
from featuretools.primitives.standard.transform.absolute_diff import AbsoluteDiff
from featuretools.primitives.standard.transform.binary import *
from featuretools.primitives.standard.transform.cumulative import *
from featuretools.primitives.standard.transform.datetime import *
from featuretools.primitives.standard.transform.email import *
from featuretools.primitives.standard.transform.exponential import *
from featuretools.primitives.standard.transform.file_extension import FileExtension
from featuretools.primitives.standard.transform.full_name_to_first_name import (
FullNameToFirstName,
)
from featuretools.primitives.standard.transform.full_name_to_last_name import (
FullNameToLastName,
)
from featuretools.primitives.standard.transform.full_name_to_title import (
FullNameToTitle,
)
from featuretools.primitives.standard.transform.nth_week_of_month import NthWeekOfMonth
from featuretools.primitives.standard.transform.is_in import IsIn
from featuretools.primitives.standard.transform.is_null import IsNull
from featuretools.primitives.standard.transform.latlong import *
from featuretools.primitives.standard.transform.natural_language import *
from featuretools.primitives.standard.transform.not_primitive import Not
from featuretools.primitives.standard.transform.numeric import *
from featuretools.primitives.standard.transform.percent_change import PercentChange
from featuretools.primitives.standard.transform.postal import *
from featuretools.primitives.standard.transform.savgol_filter import SavgolFilter
from featuretools.primitives.standard.transform.time_series import *
from featuretools.primitives.standard.transform.url import *
| 1,685 | 55.2 | 87 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/is_in.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Boolean
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class IsIn(TransformPrimitive):
"""Determines whether a value is present in a provided list.
Examples:
>>> items = ['string', 10.3, False]
>>> is_in = IsIn(list_of_outputs=items)
>>> is_in(['string', 10.5, False]).tolist()
[True, False, True]
"""
name = "isin"
input_types = [ColumnSchema()]
return_type = ColumnSchema(logical_type=Boolean)
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
def __init__(self, list_of_outputs=None):
self.list_of_outputs = list_of_outputs
if not list_of_outputs:
stringified_output_list = "[]"
else:
stringified_output_list = ", ".join([str(x) for x in list_of_outputs])
self.description_template = "whether {{}} is in {}".format(
stringified_output_list,
)
def get_function(self):
def pd_is_in(array):
return array.isin(self.list_of_outputs or [])
return pd_is_in
def generate_name(self, base_feature_names):
return "%s.isin(%s)" % (base_feature_names[0], str(self.list_of_outputs))
| 1,322 | 31.268293 | 82 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/latlong/longitude.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import LatLong
from featuretools.primitives.base import TransformPrimitive
class Longitude(TransformPrimitive):
"""Returns the second tuple value in a list of LatLong tuples.
For use with the LatLong logical type.
Examples:
>>> longitude = Longitude()
>>> longitude([(42.4, -71.1),
... (40.0, -122.4),
... (41.2, -96.75)]).tolist()
[-71.1, -122.4, -96.75]
"""
name = "longitude"
input_types = [ColumnSchema(logical_type=LatLong)]
return_type = ColumnSchema(semantic_tags={"numeric"})
description_template = "the longitude of {}"
def get_function(self):
def longitude(latlong):
latlong = np.array(latlong.tolist())
return latlong[:, 1]
return longitude
| 895 | 27.903226 | 66 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/latlong/is_in_geobox.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import BooleanNullable, LatLong
from featuretools.primitives.base import TransformPrimitive
class IsInGeoBox(TransformPrimitive):
"""Determines if coordinates are inside a box defined by two
corner coordinate points.
Description:
Coordinate values should be specified as (latitude, longitude)
tuples. This primitive is unable to handle coordinates and boxes
at the poles, and near +/- 180 degrees latitude.
Args:
point1 (tuple(float, float)): The coordinates
of the first corner of the box. Defaults to (0, 0).
point2 (tuple(float, float)): The coordinates
of the diagonal corner of the box. Defaults to (0, 0).
Example:
>>> is_in_geobox = IsInGeoBox((40.7128, -74.0060), (42.2436, -71.1677))
>>> is_in_geobox([(41.034, -72.254), (39.125, -87.345)]).tolist()
[True, False]
"""
name = "is_in_geobox"
input_types = [ColumnSchema(logical_type=LatLong)]
return_type = ColumnSchema(logical_type=BooleanNullable)
def __init__(self, point1=(0, 0), point2=(0, 0)):
self.point1 = point1
self.point2 = point2
self.lats = np.sort(np.array([point1[0], point2[0]]))
self.lons = np.sort(np.array([point1[1], point2[1]]))
def get_function(self):
def geobox(latlongs):
transposed = np.transpose(np.array(latlongs.tolist()))
lats = (self.lats[0] <= transposed[0]) & (self.lats[1] >= transposed[0])
longs = (self.lons[0] <= transposed[1]) & (self.lons[1] >= transposed[1])
return lats & longs
return geobox
| 1,722 | 35.659574 | 85 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/latlong/utils.py | import numpy as np
def _haversine_calculate(lat_1s, lon_1s, lat_2s, lon_2s, unit):
# https://stackoverflow.com/a/29546836/2512385
lon1, lat1, lon2, lat2 = map(np.radians, [lon_1s, lat_1s, lon_2s, lat_2s])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat / 2.0) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2.0) ** 2
radius_earth = 3958.7613
if unit == "kilometers":
radius_earth = 6371.0088
distances = radius_earth * 2 * np.arcsin(np.sqrt(a))
return distances
| 518 | 33.6 | 87 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/latlong/haversine.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import LatLong
from featuretools.primitives.base import TransformPrimitive
from featuretools.primitives.standard.transform.latlong.utils import (
_haversine_calculate,
)
class Haversine(TransformPrimitive):
"""Calculates the approximate haversine distance between two LatLong columns.
Args:
unit (str): Determines the unit value to output. Could
be `miles` or `kilometers`. Default is `miles`.
Examples:
>>> haversine = Haversine()
>>> distances = haversine([(42.4, -71.1), (40.0, -122.4)],
... [(40.0, -122.4), (41.2, -96.75)])
>>> np.round(distances, 3).tolist()
[2631.231, 1343.289]
Output units can be specified
>>> haversine_km = Haversine(unit='kilometers')
>>> distances_km = haversine_km([(42.4, -71.1), (40.0, -122.4)],
... [(40.0, -122.4), (41.2, -96.75)])
>>> np.round(distances_km, 3).tolist()
[4234.555, 2161.814]
"""
name = "haversine"
input_types = [
ColumnSchema(logical_type=LatLong),
ColumnSchema(logical_type=LatLong),
]
return_type = ColumnSchema(semantic_tags={"numeric"})
commutative = True
def __init__(self, unit="miles"):
valid_units = ["miles", "kilometers"]
if unit not in valid_units:
error_message = "Invalid unit %s provided. Must be one of %s" % (
unit,
valid_units,
)
raise ValueError(error_message)
self.unit = unit
self.description_template = (
"the haversine distance in {} between {{}} and {{}}".format(self.unit)
)
def get_function(self):
def haversine(latlong_1, latlong_2):
latlong_1 = np.array(latlong_1.tolist())
latlong_2 = np.array(latlong_2.tolist())
lat_1s = latlong_1[:, 0]
lat_2s = latlong_2[:, 0]
lon_1s = latlong_1[:, 1]
lon_2s = latlong_2[:, 1]
distance = _haversine_calculate(lat_1s, lon_1s, lat_2s, lon_2s, self.unit)
return distance
return haversine
def generate_name(self, base_feature_names):
name = "{}(".format(self.name.upper())
name += ", ".join(base_feature_names)
if self.unit != "miles":
name += ", unit={}".format(self.unit)
name += ")"
return name
| 2,530 | 32.302632 | 86 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/latlong/geomidpoint.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import LatLong
from featuretools.primitives.base import TransformPrimitive
class GeoMidpoint(TransformPrimitive):
"""Determines the geographic center of two coordinates.
Examples:
>>> geomidpoint = GeoMidpoint()
>>> geomidpoint([(42.4, -71.1)], [(40.0, -122.4)])
[(41.2, -96.75)]
"""
name = "geomidpoint"
input_types = [
ColumnSchema(logical_type=LatLong),
ColumnSchema(logical_type=LatLong),
]
return_type = ColumnSchema(logical_type=LatLong)
commutative = True
def get_function(self):
def geomidpoint_func(latlong_1, latlong_2):
latlong_1 = np.array(latlong_1.tolist())
latlong_2 = np.array(latlong_2.tolist())
lat_1s = latlong_1[:, 0]
lat_2s = latlong_2[:, 0]
lon_1s = latlong_1[:, 1]
lon_2s = latlong_2[:, 1]
lat_middle = np.array([lat_1s, lat_2s]).transpose().mean(axis=1)
lon_middle = np.array([lon_1s, lon_2s]).transpose().mean(axis=1)
return list(zip(lat_middle, lon_middle))
return geomidpoint_func
| 1,211 | 30.076923 | 76 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/latlong/latitude.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import LatLong
from featuretools.primitives.base import TransformPrimitive
class Latitude(TransformPrimitive):
"""Returns the first tuple value in a list of LatLong tuples.
For use with the LatLong logical type.
Examples:
>>> latitude = Latitude()
>>> latitude([(42.4, -71.1),
... (40.0, -122.4),
... (41.2, -96.75)]).tolist()
[42.4, 40.0, 41.2]
"""
name = "latitude"
input_types = [ColumnSchema(logical_type=LatLong)]
return_type = ColumnSchema(semantic_tags={"numeric"})
description_template = "the latitude of {}"
def get_function(self):
def latitude(latlong):
latlong = np.array(latlong.tolist())
return latlong[:, 0]
return latitude
| 881 | 27.451613 | 65 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/latlong/__init__.py | from featuretools.primitives.standard.transform.latlong.cityblock_distance import (
CityblockDistance,
)
from featuretools.primitives.standard.transform.latlong.geomidpoint import GeoMidpoint
from featuretools.primitives.standard.transform.latlong.haversine import Haversine
from featuretools.primitives.standard.transform.latlong.is_in_geobox import IsInGeoBox
from featuretools.primitives.standard.transform.latlong.latitude import Latitude
from featuretools.primitives.standard.transform.latlong.longitude import Longitude
| 530 | 58 | 86 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/latlong/cityblock_distance.py | import numpy as np
import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Double, LatLong
from featuretools.primitives.base import TransformPrimitive
from featuretools.primitives.standard.transform.latlong.utils import (
_haversine_calculate,
)
class CityblockDistance(TransformPrimitive):
"""Calculates the distance between points in a city road grid.
Description:
This distance is calculated using the haversine formula, which
takes into account the curvature of the Earth.
If either input data contains `NaN`s, the calculated
distance with be `NaN`.
This calculation is also known as the Mahnattan distance.
Args:
unit (str): Determines the unit value to output. Could
be miles or kilometers. Default is miles.
Examples:
>>> cityblock_distance = CityblockDistance()
>>> DC = (38, -77)
>>> Boston = (43, -71)
>>> NYC = (40, -74)
>>> distances_mi = cityblock_distance([DC, DC], [NYC, Boston])
>>> np.round(distances_mi, 3).tolist()
[301.519, 672.089]
We can also change the units in which the distance is calculated.
>>> cityblock_distance_kilometers = CityblockDistance(unit='kilometers')
>>> distances_km = cityblock_distance_kilometers([DC, DC], [NYC, Boston])
>>> np.round(distances_km, 3).tolist()
[485.248, 1081.622]
"""
name = "cityblock_distance"
input_types = [
ColumnSchema(logical_type=LatLong),
ColumnSchema(logical_type=LatLong),
]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
commutative = True
def __init__(self, unit="miles"):
if unit not in ["miles", "kilometers"]:
raise ValueError("Invalid unit given")
self.unit = unit
def get_function(self):
def cityblock(latlong_1, latlong_2):
latlong_1 = np.array(latlong_1.tolist())
latlong_2 = np.array(latlong_2.tolist())
lat_1s = latlong_1[:, 0]
lat_2s = latlong_2[:, 0]
lon_1s = latlong_1[:, 1]
lon_2s = latlong_2[:, 1]
lon_dis = _haversine_calculate(lat_1s, lon_1s, lat_1s, lon_2s, self.unit)
lat_dist = _haversine_calculate(lat_1s, lon_1s, lat_2s, lon_1s, self.unit)
return pd.Series(lon_dis + lat_dist)
return cityblock
| 2,452 | 34.550725 | 86 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/natural_language/number_of_hashtags.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import IntegerNullable, NaturalLanguage
from featuretools.primitives.standard.transform.natural_language.count_string import (
CountString,
)
class NumberOfHashtags(CountString):
"""Determines the number of hashtags in a string.
Description:
Given a list of strings, determine the number of hashtags
in each string.
A hashtag is defined as a string that meets the following criteria:
- Starts with a '#' character, followed by a sequence of alphanumeric characters containing at least one alphabetic character
- Present at the start of a string or after whitespace
- Terminated by the end of the string, a whitespace, or a punctuation character other than '#'
- e.g. The string '#yes-no' contains a valid hashtag ('#yes')
- e.g. The string '#yes#' does not contain a valid hashtag
This implementation handles Unicode characters.
This implementation does not impose any character limit on hashtags.
If a string is missing, return `NaN`.
Examples:
>>> x = ['#regular #expression', 'this is a string', '###__regular#1and_0#expression']
>>> number_of_hashtags = NumberOfHashtags()
>>> number_of_hashtags(x).tolist()
[2.0, 0.0, 0.0]
"""
name = "number_of_hashtags"
input_types = [ColumnSchema(logical_type=NaturalLanguage)]
return_type = ColumnSchema(logical_type=IntegerNullable, semantic_tags={"numeric"})
default_value = 0
def __init__(self):
pattern = r"((^#)|\s#)(\w*([^\W\d])+\w*)(?![#\w])"
super().__init__(string=pattern, is_regex=True, ignore_case=False)
| 1,750 | 38.795455 | 137 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/natural_language/total_word_length.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import IntegerNullable, NaturalLanguage
from featuretools.primitives.base import TransformPrimitive
from featuretools.primitives.standard.transform.natural_language.constants import (
PUNCTUATION_AND_WHITESPACE,
)
class TotalWordLength(TransformPrimitive):
"""Determines the total word length.
Description:
Given list of strings, determine the total
word length in each string. A word is defined as
a series of any characters not separated by a delimiter.
If a string is empty or `NaN`, return `NaN`.
Args:
delimiters_regex (str): Delimiters as a regex string for splitting text into words.
Defaults to whitespace characters.
Examples:
>>> x = ['This is a test file', 'This is second line', 'third line $1,000', None]
>>> total_word_length = TotalWordLength()
>>> total_word_length(x).tolist()
[15.0, 16.0, 13.0, nan]
"""
name = "total_word_length"
input_types = [ColumnSchema(logical_type=NaturalLanguage)]
return_type = ColumnSchema(logical_type=IntegerNullable, semantic_tags={"numeric"})
default_value = 0
def __init__(self, do_not_count=PUNCTUATION_AND_WHITESPACE):
self.do_not_count = do_not_count
def get_function(self):
def total_word_length(x):
return x.str.len() - x.str.count(self.do_not_count)
return total_word_length
| 1,486 | 32.795455 | 91 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/natural_language/number_of_unique_words.py | from string import punctuation
from typing import Iterable
import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import IntegerNullable, NaturalLanguage
from featuretools.primitives.base import TransformPrimitive
from featuretools.primitives.standard.transform.natural_language.constants import (
DELIMITERS,
)
class NumberOfUniqueWords(TransformPrimitive):
"""Determines the number of unique words in a string.
Description:
Determines the number of unique words in a given string. Includes options for
case-insensitive behavior.
Args:
case_insensitive (bool, optional): Specify case_insensitivity when searching for unique words.
For example, setting this to True would mean "WORD word" would be treated as having
one unique word. Defaults to False.
Examples:
>>> x = ['Word word Word', 'This is a SENTENCE.', 'green red green']
>>> number_of_unique_words = NumberOfUniqueWords()
>>> number_of_unique_words(x).tolist()
[2, 4, 2]
>>> x = ['word WoRD WORD worD', 'dog dog dog', 'catt CAT caT']
>>> number_of_unique_words = NumberOfUniqueWords(case_insensitive=True)
>>> number_of_unique_words(x).tolist()
[1, 1, 2]
"""
name = "number_of_unique_words"
input_types = [ColumnSchema(logical_type=NaturalLanguage)]
return_type = ColumnSchema(logical_type=IntegerNullable, semantic_tags={"numeric"})
default_value = 0
def __init__(self, case_insensitive=False):
self.case_insensitive = case_insensitive
def get_function(self):
def _unique_word_helper(text):
if not isinstance(text, Iterable):
return pd.NA
unique = set()
for t in text:
punct_less = t.strip(punctuation)
if len(punct_less) > 0:
unique.add(punct_less)
return len(unique)
def num_unique_words(array):
if self.case_insensitive:
array = array.str.lower()
array = array.str.split(f"{DELIMITERS}")
return array.apply(_unique_word_helper)
return num_unique_words
| 2,223 | 33.215385 | 102 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/natural_language/upper_case_count.py | # -*- coding: utf-8 -*-
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import IntegerNullable, NaturalLanguage
from featuretools.primitives.standard.transform.natural_language.count_string import (
CountString,
)
class UpperCaseCount(CountString):
"""Calculates the number of upper case letters in text.
Description:
Given a list of strings, determine the number of characters in each string
that are capitalized. Counts every letter individually, not just every
word that contains capitalized letters.
If a string is missing, return `NaN`
Examples:
>>> x = ['This IS a string.', 'This is a string', 'aaa']
>>> upper_case_count = UpperCaseCount()
>>> upper_case_count(x).tolist()
[3.0, 1.0, 0.0]
"""
name = "upper_case_count"
input_types = [ColumnSchema(logical_type=NaturalLanguage)]
return_type = ColumnSchema(logical_type=IntegerNullable, semantic_tags={"numeric"})
default_value = 0
def __init__(self):
pattern = r"([A-Z])"
super().__init__(string=pattern, is_regex=True, ignore_case=False)
| 1,152 | 31.942857 | 87 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/natural_language/upper_case_word_count.py | import re
from string import punctuation
import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import IntegerNullable, NaturalLanguage
from featuretools.primitives.base import TransformPrimitive
from featuretools.primitives.standard.transform.natural_language.constants import (
DELIMITERS,
)
class UpperCaseWordCount(TransformPrimitive):
"""Determines the number of words in a string that are entirely capitalized.
Description:
Given list of strings, determine the number of words in each string
that are entirely capitalized.
If a string is missing, return `NaN`.
Examples:
>>> x = ['This IS a string.', 'This is a string', 'AAA']
>>> upper_case_word_count = UpperCaseWordCount()
>>> upper_case_word_count(x).tolist()
[1, 0, 1]
"""
name = "upper_case_word_count"
input_types = [ColumnSchema(logical_type=NaturalLanguage)]
return_type = ColumnSchema(logical_type=IntegerNullable, semantic_tags={"numeric"})
default_value = 0
def get_function(self):
def upper_case_word_count(x):
def _count_upper_case_words(elem):
if pd.isna(elem):
return pd.NA
return sum(
1
for word in re.split(DELIMITERS, elem)
if word.strip(punctuation) and word.upper() == word
)
return x.apply(_count_upper_case_words)
return upper_case_word_count
| 1,539 | 30.428571 | 87 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/natural_language/constants.py | from string import punctuation
DELIMITERS = "[ \n\t]"
PUNCTUATION_AND_WHITESPACE = f"[{punctuation}\n\t ]"
common_words_1000 = frozenset(
[
"the",
"of",
"to",
"and",
"a",
"in",
"is",
"it",
"you",
"that",
"he",
"was",
"for",
"on",
"are",
"with",
"as",
"i",
"his",
"they",
"be",
"at",
"one",
"have",
"this",
"from",
"or",
"had",
"by",
"not",
"word",
"but",
"what",
"some",
"we",
"can",
"out",
"other",
"were",
"all",
"there",
"when",
"up",
"use",
"your",
"how",
"said",
"an",
"each",
"she",
"which",
"do",
"their",
"time",
"if",
"will",
"way",
"about",
"many",
"then",
"them",
"write",
"would",
"like",
"so",
"these",
"her",
"long",
"make",
"thing",
"see",
"him",
"two",
"has",
"look",
"more",
"day",
"could",
"go",
"come",
"did",
"number",
"sound",
"no",
"most",
"people",
"my",
"over",
"know",
"water",
"than",
"call",
"first",
"who",
"may",
"down",
"side",
"been",
"now",
"find",
"any",
"new",
"work",
"part",
"take",
"get",
"place",
"made",
"live",
"where",
"after",
"back",
"little",
"only",
"round",
"man",
"year",
"came",
"show",
"every",
"good",
"me",
"give",
"our",
"under",
"name",
"very",
"through",
"just",
"form",
"sentence",
"great",
"think",
"say",
"help",
"low",
"line",
"differ",
"turn",
"cause",
"much",
"mean",
"before",
"move",
"right",
"boy",
"old",
"too",
"same",
"tell",
"does",
"set",
"three",
"want",
"air",
"well",
"also",
"play",
"small",
"end",
"put",
"home",
"read",
"hand",
"port",
"large",
"spell",
"add",
"even",
"land",
"here",
"must",
"big",
"high",
"such",
"follow",
"act",
"why",
"ask",
"men",
"change",
"went",
"light",
"kind",
"off",
"need",
"house",
"picture",
"try",
"us",
"again",
"animal",
"point",
"mother",
"world",
"near",
"build",
"self",
"earth",
"father",
"head",
"stand",
"own",
"page",
"should",
"country",
"found",
"answer",
"school",
"grow",
"study",
"still",
"learn",
"plant",
"cover",
"food",
"sun",
"four",
"between",
"state",
"keep",
"eye",
"never",
"last",
"let",
"thought",
"city",
"tree",
"cross",
"farm",
"hard",
"start",
"might",
"story",
"saw",
"far",
"sea",
"draw",
"left",
"late",
"run",
"don't",
"while",
"press",
"close",
"night",
"real",
"life",
"few",
"north",
"open",
"seem",
"together",
"next",
"white",
"children",
"begin",
"got",
"walk",
"example",
"ease",
"paper",
"group",
"always",
"music",
"those",
"both",
"mark",
"often",
"letter",
"until",
"mile",
"river",
"car",
"feet",
"care",
"second",
"book",
"carry",
"took",
"science",
"eat",
"room",
"friend",
"began",
"idea",
"fish",
"mountain",
"stop",
"once",
"base",
"hear",
"horse",
"cut",
"sure",
"watch",
"color",
"face",
"wood",
"main",
"enough",
"plain",
"girl",
"usual",
"young",
"ready",
"above",
"ever",
"red",
"list",
"though",
"feel",
"talk",
"bird",
"soon",
"body",
"dog",
"family",
"direct",
"pose",
"leave",
"song",
"measure",
"door",
"product",
"black",
"short",
"numeral",
"class",
"wind",
"question",
"happen",
"complete",
"ship",
"area",
"half",
"rock",
"order",
"fire",
"south",
"problem",
"piece",
"told",
"knew",
"pass",
"since",
"top",
"whole",
"king",
"space",
"heard",
"best",
"hour",
"better",
"true",
"during",
"hundred",
"five",
"remember",
"step",
"early",
"hold",
"west",
"ground",
"interest",
"reach",
"fast",
"verb",
"sing",
"listen",
"six",
"table",
"travel",
"less",
"morning",
"ten",
"simple",
"several",
"vowel",
"toward",
"war",
"lay",
"against",
"pattern",
"slow",
"center",
"love",
"person",
"money",
"serve",
"appear",
"road",
"map",
"rain",
"rule",
"govern",
"pull",
"cold",
"notice",
"voice",
"unit",
"power",
"town",
"fine",
"certain",
"fly",
"fall",
"lead",
"cry",
"dark",
"machine",
"note",
"wait",
"plan",
"figure",
"star",
"box",
"noun",
"field",
"rest",
"correct",
"able",
"pound",
"done",
"beauty",
"drive",
"stood",
"contain",
"front",
"teach",
"week",
"final",
"gave",
"green",
"oh",
"quick",
"develop",
"ocean",
"warm",
"free",
"minute",
"strong",
"special",
"mind",
"behind",
"clear",
"tail",
"produce",
"fact",
"street",
"inch",
"multiply",
"nothing",
"course",
"stay",
"wheel",
"full",
"force",
"blue",
"object",
"decide",
"surface",
"deep",
"moon",
"island",
"foot",
"system",
"busy",
"test",
"record",
"boat",
"common",
"gold",
"possible",
"plane",
"stead",
"dry",
"wonder",
"laugh",
"thousand",
"ago",
"ran",
"check",
"game",
"shape",
"equate",
"hot",
"miss",
"brought",
"heat",
"snow",
"tire",
"bring",
"yes",
"distant",
"fill",
"east",
"paint",
"language",
"among",
"grand",
"ball",
"yet",
"wave",
"drop",
"heart",
"am",
"present",
"heavy",
"dance",
"engine",
"position",
"arm",
"wide",
"sail",
"material",
"size",
"vary",
"settle",
"speak",
"weight",
"general",
"ice",
"matter",
"circle",
"pair",
"include",
"divide",
"syllable",
"felt",
"perhaps",
"pick",
"sudden",
"count",
"square",
"reason",
"length",
"represent",
"art",
"subject",
"region",
"energy",
"hunt",
"probable",
"bed",
"brother",
"egg",
"ride",
"cell",
"believe",
"fraction",
"forest",
"sit",
"race",
"window",
"store",
"summer",
"train",
"sleep",
"prove",
"lone",
"leg",
"exercise",
"wall",
"catch",
"mount",
"wish",
"sky",
"board",
"joy",
"winter",
"sat",
"written",
"wild",
"instrument",
"kept",
"glass",
"grass",
"cow",
"job",
"edge",
"sign",
"visit",
"past",
"soft",
"fun",
"bright",
"gas",
"weather",
"month",
"million",
"bear",
"finish",
"happy",
"hope",
"flower",
"clothe",
"strange",
"gone",
"jump",
"baby",
"eight",
"village",
"meet",
"root",
"buy",
"raise",
"solve",
"metal",
"whether",
"push",
"seven",
"paragraph",
"third",
"shall",
"held",
"hair",
"describe",
"cook",
"floor",
"either",
"result",
"burn",
"hill",
"safe",
"cat",
"century",
"consider",
"type",
"law",
"bit",
"coast",
"copy",
"phrase",
"silent",
"tall",
"sand",
"soil",
"roll",
"temperature",
"finger",
"industry",
"value",
"fight",
"lie",
"beat",
"excite",
"natural",
"view",
"sense",
"ear",
"else",
"quite",
"broke",
"case",
"middle",
"kill",
"son",
"lake",
"moment",
"scale",
"loud",
"spring",
"observe",
"child",
"straight",
"consonant",
"nation",
"dictionary",
"milk",
"speed",
"method",
"organ",
"pay",
"age",
"section",
"dress",
"cloud",
"surprise",
"quiet",
"stone",
"tiny",
"climb",
"cool",
"design",
"poor",
"lot",
"experiment",
"bottom",
"key",
"iron",
"single",
"stick",
"flat",
"twenty",
"skin",
"smile",
"crease",
"hole",
"trade",
"melody",
"trip",
"office",
"receive",
"row",
"mouth",
"exact",
"symbol",
"die",
"least",
"trouble",
"shout",
"except",
"wrote",
"seed",
"tone",
"join",
"suggest",
"clean",
"break",
"lady",
"yard",
"rise",
"bad",
"blow",
"oil",
"blood",
"touch",
"grew",
"cent",
"mix",
"team",
"wire",
"cost",
"lost",
"brown",
"wear",
"garden",
"equal",
"sent",
"choose",
"fell",
"fit",
"flow",
"fair",
"bank",
"collect",
"save",
"control",
"decimal",
"gentle",
"woman",
"captain",
"practice",
"separate",
"difficult",
"doctor",
"please",
"protect",
"noon",
"whose",
"locate",
"ring",
"character",
"insect",
"caught",
"period",
"indicate",
"radio",
"spoke",
"atom",
"human",
"history",
"effect",
"electric",
"expect",
"crop",
"modern",
"element",
"hit",
"student",
"corner",
"party",
"supply",
"bone",
"rail",
"imagine",
"provide",
"agree",
"thus",
"capital",
"won't",
"chair",
"danger",
"fruit",
"rich",
"thick",
"soldier",
"process",
"operate",
"guess",
"necessary",
"sharp",
"wing",
"create",
"neighbor",
"wash",
"bat",
"rather",
"crowd",
"corn",
"compare",
"poem",
"string",
"bell",
"depend",
"meat",
"rub",
"tube",
"famous",
"dollar",
"stream",
"fear",
"sight",
"thin",
"triangle",
"planet",
"hurry",
"chief",
"colony",
"clock",
"mine",
"tie",
"enter",
"major",
"fresh",
"search",
"send",
"yellow",
"gun",
"allow",
"print",
"dead",
"spot",
"desert",
"suit",
"current",
"lift",
"rose",
"continue",
"block",
"chart",
"hat",
"sell",
"success",
"company",
"subtract",
"event",
"particular",
"deal",
"swim",
"term",
"opposite",
"wife",
"shoe",
"shoulder",
"spread",
"arrange",
"camp",
"invent",
"cotton",
"born",
"determine",
"quart",
"nine",
"truck",
"noise",
"level",
"chance",
"gather",
"shop",
"stretch",
"throw",
"shine",
"property",
"column",
"molecule",
"select",
"wrong",
"gray",
"repeat",
"require",
"broad",
"prepare",
"salt",
"nose",
"plural",
"anger",
"claim",
"continent",
"oxygen",
"sugar",
"death",
"pretty",
"skill",
"women",
"season",
"solution",
"magnet",
"silver",
"thank",
"branch",
"match",
"suffix",
"especially",
"fig",
"afraid",
"huge",
"sister",
"steel",
"discuss",
"forward",
"similar",
"guide",
"experience",
"score",
"apple",
"bought",
"led",
"pitch",
"coat",
"mass",
"card",
"band",
"rope",
"slip",
"win",
"dream",
"evening",
"condition",
"feed",
"tool",
"total",
"basic",
"smell",
"valley",
"nor",
"double",
"seat",
"arrive",
"master",
"track",
"parent",
"shore",
"division",
"sheet",
"substance",
"favor",
"connect",
"post",
"spend",
"chord",
"fat",
"glad",
"original",
"share",
"station",
"dad",
"bread",
"charge",
"proper",
"bar",
"offer",
"segment",
"slave",
"duck",
"instant",
"market",
"degree",
"populate",
"chick",
"dear",
"enemy",
"reply",
"drink",
"occur",
"support",
"speech",
"nature",
"range",
"steam",
"motion",
"path",
"liquid",
"log",
"meant",
"quotient",
"teeth",
"shell",
"neck",
],
) # https://gist.github.com/deekayen/4148741
| 17,039 | 15.871287 | 52 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/natural_language/whitespace_count.py | from featuretools.primitives.standard.transform.natural_language.count_string import (
CountString,
)
class WhitespaceCount(CountString):
"""Calculates number of whitespaces in a string.
Description:
Given a list of strings, determine the whitespaces in each string
If a string is missing, return `NaN`
Examples:
>>> x = ['', 'hi im ethan', 'multiple spaces']
>>> upper_case_count = WhitespaceCount()
>>> upper_case_count(x).tolist()
[0.0, 2.0, 4.0]
"""
name = "whitespace_count"
default_value = 0
def __init__(self):
super().__init__(string=" ")
| 644 | 24.8 | 86 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/natural_language/number_of_common_words.py | from string import punctuation
from typing import Iterable
import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import IntegerNullable, NaturalLanguage
from featuretools.primitives.base import TransformPrimitive
from featuretools.primitives.standard.transform.natural_language.constants import (
DELIMITERS,
common_words_1000,
)
class NumberOfCommonWords(TransformPrimitive):
"""Determines the number of common words in a string.
Description:
Given string, determine the number of words that appear in a supplied word set.
The word set defaults to nlp_primitives.constants.common_words_1000. The string
is case insensitive. The word bank should consist of only lower case strings. If a string is
missing, return `NaN`.
Args:
word_set (set, optional): The set of words to look for in the string. These
words should all be lower case strings.
delimiters_regex (str, optional): The regular expression used to determine
what separates words. Defaults to whitespace characters.
Examples:
>>> x = ['Hey! This is some natural language', 'bacon, cheesburger, AND, fries', 'I! Am. A; duck?']
>>> number_of_common_words = NumberOfCommonWords(word_set={'and', 'some', 'am', 'a', 'the', 'is', 'i'})
>>> number_of_common_words(x).tolist()
[2, 1, 3]
>>> x = ['Hey! This is. some. natural language']
>>> number_of_common_words = NumberOfCommonWords(word_set={'hey', 'is', 'some'}, delimiters_regex="[ .]")
>>> number_of_common_words(x).tolist()
[3]
"""
name = "number_of_common_words"
input_types = [ColumnSchema(logical_type=NaturalLanguage)]
return_type = ColumnSchema(logical_type=IntegerNullable, semantic_tags={"numeric"})
default_value = 0
def __init__(
self,
word_set=common_words_1000,
delimiters_regex=DELIMITERS,
):
self.delimiters_regex = delimiters_regex
self.word_set = word_set
def get_function(self):
def get_num_in_word_bank(words):
if not isinstance(words, Iterable):
return pd.NA
num_common_words = 0
for w in words:
if (
w.lower().strip(punctuation) in self.word_set
): # assumes word_set is all lowercase
num_common_words += 1
return num_common_words
def num_common_words(x):
words = x.str.split(self.delimiters_regex)
return words.apply(get_num_in_word_bank)
return num_common_words
| 2,663 | 35.493151 | 113 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/natural_language/count_string.py | import re
import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import IntegerNullable, NaturalLanguage
from featuretools.primitives.base import TransformPrimitive
class CountString(TransformPrimitive):
"""Determines how many times a given string shows up in a text field.
Args:
string (str): The string to determine the count of. Defaults to
the word "the".
ignore_case (bool): Determines if case of the string should be
considered or not. Defaults to true.
ignore_non_alphanumeric (bool): Determines if non-alphanumeric
characters should be used in the search. Defaults to False.
is_regex (bool): Defines if the string argument is a regex or not.
Defaults to False.
match_whole_words_only (bool): Determines if whole words should be
matched or not. For example searching for word `the` against
`then, the, there` should only return `the` if this argument
was True. Defaults to False.
Examples:
>>> count_string = CountString(string="the")
>>> count_string(["The problem was difficult.",
... "He was there.",
... "The girl went to the store."]).tolist()
[1.0, 1.0, 2.0]
>>> # Match case of string
>>> count_string_ignore_case = CountString(string="the", ignore_case=False)
>>> count_string_ignore_case(["The problem was difficult.",
... "He was there.",
... "The girl went to the store."]).tolist()
[0.0, 1.0, 1.0]
>>> # Ignore non-alphanumeric characters in the search
>>> count_string_ignore_non_alphanumeric = CountString(string="the",
... ignore_non_alphanumeric=True)
>>> count_string_ignore_non_alphanumeric(["Th*/e problem was difficult.",
... "He was there.",
... "The girl went to the store."]).tolist()
[1.0, 1.0, 2.0]
>>> # Specify the string as a regex
>>> count_string_is_regex = CountString(string="t.e", is_regex=True)
>>> count_string_is_regex(["The problem was difficult.",
... "He was there.",
... "The girl went to the store."]).tolist()
[1.0, 1.0, 2.0]
>>> # Match whole words only
>>> count_string_match_whole_words_only = CountString(string="the",
... match_whole_words_only=True)
>>> count_string_match_whole_words_only(["The problem was difficult.",
... "He was there.",
... "The girl went to the store."]).tolist()
[1.0, 0.0, 2.0]
"""
name = "count_string"
input_types = [ColumnSchema(logical_type=NaturalLanguage)]
return_type = ColumnSchema(logical_type=IntegerNullable, semantic_tags={"numeric"})
def __init__(
self,
string="the",
ignore_case=True,
ignore_non_alphanumeric=False,
is_regex=False,
match_whole_words_only=False,
):
self.string = string
self.ignore_case = ignore_case
self.ignore_non_alphanumeric = ignore_non_alphanumeric
self.match_whole_words_only = match_whole_words_only
self.is_regex = is_regex
# we don't want to strip non alphanumeric characters from the pattern
# ie h.ll. should match "hello" so we can't strip the dots to make hll
if not is_regex:
self.pattern = re.escape(self.process_text(string))
else:
self.pattern = string
if ignore_case:
self.pattern = self.pattern.lower()
# \b\b.*\b\b is the same as \b.*\b so we don't have to check if
# the pattern is given to us as regex and if it already has leading
# and trailing \b's
if match_whole_words_only:
self.pattern = "\\b" + self.pattern + "\\b"
def process_text(self, text):
if self.ignore_non_alphanumeric:
text = re.sub("[^0-9a-zA-Z ]+", "", text)
if self.ignore_case:
text = text.lower()
return text
def get_function(self):
def count_string(words):
if type(words) != str:
return np.nan
words = self.process_text(words)
return len(re.findall(self.pattern, words))
return np.vectorize(count_string, otypes=[float])
| 4,705 | 42.574074 | 92 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/natural_language/title_word_count.py | # -*- coding: utf-8 -*-
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import IntegerNullable, NaturalLanguage
from featuretools.primitives.standard.transform.natural_language.count_string import (
CountString,
)
class TitleWordCount(CountString):
"""Determines the number of title words in a string.
Description:
Given list of strings, determine the number of title words
in each string. A title word is defined as any word starting
with a capital letter. Words at the start of a sentence will
be counted.
If a string is missing, return `NaN`.
Examples:
>>> x = ['My favorite movie is Jaws.', 'this is a string', 'AAA']
>>> title_word_count = TitleWordCount()
>>> title_word_count(x).tolist()
[2.0, 0.0, 1.0]
"""
name = "title_word_count"
input_types = [ColumnSchema(logical_type=NaturalLanguage)]
return_type = ColumnSchema(logical_type=IntegerNullable, semantic_tags={"numeric"})
default_value = 0
def __init__(self):
pattern = r"([A-Z][^\s]*)"
super().__init__(string=pattern, is_regex=True, ignore_case=False)
| 1,180 | 31.805556 | 87 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/natural_language/median_word_length.py | from numpy import median
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Double, NaturalLanguage
from featuretools.primitives.base import TransformPrimitive
from featuretools.primitives.standard.transform.natural_language.constants import (
DELIMITERS,
)
class MedianWordLength(TransformPrimitive):
"""Determines the median word length.
Description:
Given list of strings, determine the median
word length in each string. A word is defined as
a series of any characters not separated by a delimiter.
If a string is empty or `NaN`, return `NaN`.
Args:
delimiters_regex (str): Delimiters as a regex string for splitting text into words.
Defaults to whitespace characters.
Examples:
>>> x = ['This is a test file', 'This is second line', 'third line $1,000', None]
>>> median_word_length = MedianWordLength()
>>> median_word_length(x).tolist()
[4.0, 4.0, 5.0, nan]
"""
name = "median_word_length"
input_types = [ColumnSchema(logical_type=NaturalLanguage)]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
default_value = 0
def __init__(self, delimiters_regex=DELIMITERS):
self.delimiters_regex = delimiters_regex
def get_function(self):
def get_median(words):
if isinstance(words, list):
return median([len(word) for word in words if len(word) != 0])
def median_word_length(x):
words = x.str.split(self.delimiters_regex)
return words.apply(get_median)
return median_word_length
| 1,664 | 32.3 | 91 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.