repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
featuretools | featuretools-main/featuretools/primitives/standard/transform/natural_language/num_unique_separators.py | import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import IntegerNullable, NaturalLanguage
from featuretools.primitives.base import TransformPrimitive
NATURAL_LANGUAGE_SEPARATORS = [" ", ".", ",", "!", "?", ";", "\n"]
class NumUniqueSeparators(TransformPrimitive):
r"""Calculates the number of unique separators.
Description:
Given a string and a list of separators, determine
the number of unique separators in each string. If a string
is null determined by pd.isnull return pd.NA.
Args:
separators (list, optional): a list of separator characters to count.
``[" ", ".", ",", "!", "?", ";", "\n"]`` is used by default.
Examples:
>>> x = ["First. Line.", "This. is the second, line!", "notinlist@#$%^%&"]
>>> num_unique_separators = NumUniqueSeparators([".", ",", "!"])
>>> num_unique_separators(x).tolist()
[1, 3, 0]
"""
name = "num_unique_separators"
input_types = [ColumnSchema(logical_type=NaturalLanguage)]
return_type = ColumnSchema(logical_type=IntegerNullable, semantic_tags={"numeric"})
def __init__(self, separators=NATURAL_LANGUAGE_SEPARATORS):
assert separators is not None, "separators needs to be defined"
self.separators = separators
def get_function(self):
def count_unique_separator(s):
if pd.isnull(s):
return pd.NA
return len(set(self.separators).intersection(set(s)))
def get_separator_count(column):
return column.apply(count_unique_separator)
return get_separator_count
| 1,657 | 34.276596 | 87 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/natural_language/num_words.py | import re
from string import punctuation
from typing import Optional
import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import IntegerNullable, NaturalLanguage
from featuretools.primitives.base import TransformPrimitive
from featuretools.primitives.standard.transform.natural_language.constants import (
DELIMITERS,
)
from featuretools.utils.gen_utils import Library
class NumWords(TransformPrimitive):
"""Determines the number of words in a string. Words are sequences of characters
delimited by whitespace.
Examples:
>>> num_words = NumWords()
>>> num_words(['This is a string',
... 'Two words',
... 'no-spaces',
... 'Also works with sentences. Second sentence!']).tolist()
[4, 2, 1, 6]
"""
name = "num_words"
input_types = [ColumnSchema(logical_type=NaturalLanguage)]
return_type = ColumnSchema(logical_type=IntegerNullable, semantic_tags={"numeric"})
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the number of words in {}"
def get_function(self):
def word_counter(array):
def _get_number_of_words(elem: Optional[str]):
"""Returns the number of words in given element,
or pd.NA given null input"""
if pd.isna(elem):
return pd.NA
return sum(
1 for word in re.split(DELIMITERS, elem) if word.strip(punctuation)
)
return array.apply(_get_number_of_words)
return word_counter
| 1,655 | 32.795918 | 87 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/natural_language/num_characters.py | import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import IntegerNullable, NaturalLanguage
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class NumCharacters(TransformPrimitive):
"""Calculates the number of characters in a given string, including whitespace and punctuation.
Description:
Returns the number of characters in a string. This is equivalent to the length of a string.
Examples:
>>> num_characters = NumCharacters()
>>> num_characters(['This is a string',
... 'second item',
... 'final1']).tolist()
[16, 11, 6]
"""
name = "num_characters"
input_types = [ColumnSchema(logical_type=NaturalLanguage)]
return_type = ColumnSchema(logical_type=IntegerNullable, semantic_tags={"numeric"})
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the number of characters in {}"
def get_function(self):
def character_counter(array):
def _get_num_characters(elem):
"""Returns the length of elem, or pd.NA given null input"""
if pd.isna(elem):
return pd.NA
return len(elem)
return array.apply(_get_num_characters)
return character_counter
| 1,413 | 34.35 | 99 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/natural_language/__init__.py | from featuretools.primitives.standard.transform.natural_language.count_string import (
CountString,
)
from featuretools.primitives.standard.transform.natural_language.mean_characters_per_word import (
MeanCharactersPerWord,
)
from featuretools.primitives.standard.transform.natural_language.median_word_length import (
MedianWordLength,
)
from featuretools.primitives.standard.transform.natural_language.num_characters import (
NumCharacters,
)
from featuretools.primitives.standard.transform.natural_language.num_unique_separators import (
NumUniqueSeparators,
)
from featuretools.primitives.standard.transform.natural_language.num_words import (
NumWords,
)
from featuretools.primitives.standard.transform.natural_language.number_of_common_words import (
NumberOfCommonWords,
)
from featuretools.primitives.standard.transform.natural_language.number_of_hashtags import (
NumberOfHashtags,
)
from featuretools.primitives.standard.transform.natural_language.number_of_mentions import (
NumberOfMentions,
)
from featuretools.primitives.standard.transform.natural_language.number_of_unique_words import (
NumberOfUniqueWords,
)
from featuretools.primitives.standard.transform.natural_language.number_of_words_in_quotes import (
NumberOfWordsInQuotes,
)
from featuretools.primitives.standard.transform.natural_language.punctuation_count import (
PunctuationCount,
)
from featuretools.primitives.standard.transform.natural_language.title_word_count import (
TitleWordCount,
)
from featuretools.primitives.standard.transform.natural_language.total_word_length import (
TotalWordLength,
)
from featuretools.primitives.standard.transform.natural_language.upper_case_count import (
UpperCaseCount,
)
from featuretools.primitives.standard.transform.natural_language.upper_case_word_count import (
UpperCaseWordCount,
)
from featuretools.primitives.standard.transform.natural_language.whitespace_count import (
WhitespaceCount,
)
| 1,988 | 37.25 | 99 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/natural_language/number_of_mentions.py | import re
import string
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import IntegerNullable, NaturalLanguage
from featuretools.primitives.standard.transform.natural_language.count_string import (
CountString,
)
class NumberOfMentions(CountString):
"""Determines the number of mentions in a string.
Description:
Given a list of strings, determine the number of mentions
in each string.
A mention is defined as a string that meets the following criteria:
- Starts with a '@' character, followed by a sequence of alphanumeric characters
- Present at the start of a string or after whitespace
- Terminated by the end of the string, a whitespace, or a punctuation character other than '@'
- e.g. The string '@yes-no' contains a valid mention ('@yes')
- e.g. The string '@yes@' does not contain a valid mention
This implementation handles Unicode characters.
This implementation does not impose any character limit on mentions.
If a string is missing, return `NaN`.
Examples:
>>> x = ['@user1 @user2', 'this is a string', '@@@__user1@1and_0@expression']
>>> number_of_mentions = NumberOfMentions()
>>> number_of_mentions(x).tolist()
[2.0, 0.0, 0.0]
"""
name = "number_of_mentions"
input_types = [ColumnSchema(logical_type=NaturalLanguage)]
return_type = ColumnSchema(logical_type=IntegerNullable, semantic_tags={"numeric"})
default_value = 0
def __init__(self):
SPECIALS_MINUS_AT = "".join(list(set(string.punctuation) - {"@"}))
SPECIALS_MINUS_AT = re.escape(SPECIALS_MINUS_AT)
pattern = rf"((^@)|(\s+@))(\w+)(?=\s|$|[{SPECIALS_MINUS_AT}])"
super().__init__(string=pattern, is_regex=True, ignore_case=False)
| 1,866 | 37.102041 | 106 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/natural_language/punctuation_count.py | # -*- coding: utf-8 -*-
import re
import string
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import IntegerNullable, NaturalLanguage
from featuretools.primitives.standard.transform.natural_language.count_string import (
CountString,
)
class PunctuationCount(CountString):
"""Determines number of punctuation characters in a string.
Description:
Given list of strings, determine the number of punctuation
characters in each string. Looks for any of the following:
!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~
If a string is missing, return `NaN`.
Examples:
>>> x = ['This is a test file.', 'This is second line', 'third line: $1,000']
>>> punctuation_count = PunctuationCount()
>>> punctuation_count(x).tolist()
[1.0, 0.0, 3.0]
"""
name = "punctuation_count"
input_types = [ColumnSchema(logical_type=NaturalLanguage)]
return_type = ColumnSchema(logical_type=IntegerNullable, semantic_tags={"numeric"})
default_value = 0
def __init__(self):
pattern = "(%s)" % "|".join([re.escape(x) for x in string.punctuation])
super().__init__(string=pattern, is_regex=True, ignore_case=False)
| 1,230 | 29.775 | 87 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/natural_language/number_of_words_in_quotes.py | import re
from string import punctuation
import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import IntegerNullable, NaturalLanguage
from featuretools.primitives.base import TransformPrimitive
from featuretools.primitives.standard.transform.natural_language.constants import (
DELIMITERS,
)
class NumberOfWordsInQuotes(TransformPrimitive):
"""Determines the number of words in quotes in a string.
Description:
Given a list of strings, determine the number of words in quotes
in each string.
This implementation handles Unicode characters.
If a string is missing, return `NaN`.
Args:
quote_type (str, optional): Specifies what type of quotation marks to match.
Argument "single" matches on only single quotes (' ').
Argument "double" matches words between double quotes (" ").
Argument "both" matches words between either type of quotes.
Defaults to "both".
Examples:
>>> x = ['"python" java prolog "Diffie-Hellman" "4.99"', "Reach me at 'user@email.com'", "'Here's an interesting example!'"]
>>> number_of_words_in_quotes = NumberOfWordsInQuotes()
>>> number_of_words_in_quotes(x).tolist()
[3, 1, 4]
"""
name = "number_of_words_in_quotes"
input_types = [ColumnSchema(logical_type=NaturalLanguage)]
return_type = ColumnSchema(logical_type=IntegerNullable, semantic_tags={"numeric"})
default_value = 0
def __init__(self, quote_type="both"):
if quote_type not in ["both", "single", "double"]:
raise ValueError(
f"{quote_type} is not a valid quote_type. Specify 'both', 'single', or 'double'",
)
self.quote_type = quote_type
IN_DOUBLE_QUOTES = r'((^|\W)"(.)*?"(?!\w))'
IN_SINGLE_QUOTES = r"((^|\W)'(.)*?'(?!\w))"
if quote_type == "double":
self.regex = IN_DOUBLE_QUOTES
elif quote_type == "single":
self.regex = IN_SINGLE_QUOTES
else:
self.regex = f"({IN_SINGLE_QUOTES}|{IN_DOUBLE_QUOTES})"
def get_function(self):
def count_words_in_quotes(text):
if pd.isnull(text):
return pd.NA
matches = re.findall(self.regex, text, re.DOTALL)
count = 0
for match in matches:
matched_phrase = match[0]
words = re.split(f"{DELIMITERS}", matched_phrase)
for word in words:
if len(word.strip(punctuation + " ")):
count += 1
return count
def num_words_in_quotes(array):
return array.apply(count_words_in_quotes).astype("Int64")
return num_words_in_quotes
| 2,781 | 35.12987 | 133 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/natural_language/mean_characters_per_word.py | # -*- coding: utf-8 -*-
import re
import numpy as np
import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Double, NaturalLanguage
from featuretools.primitives.base import TransformPrimitive
PUNCTUATION = re.escape("!,.:;?")
END_OF_SENTENCE_PUNCT_RE = re.compile(
rf"[{PUNCTUATION}]+$|[{PUNCTUATION}]+ |[{PUNCTUATION}]+\n",
)
def _mean_characters_per_word(value):
if pd.isna(value):
return np.nan
# replace end-of-sentence punctuation with space
value = END_OF_SENTENCE_PUNCT_RE.sub(" ", value)
words = value.split()
character_count = [len(x) for x in words]
return np.mean(character_count) if len(character_count) else 0
class MeanCharactersPerWord(TransformPrimitive):
"""Determines the mean number of characters per word.
Description:
Given list of strings, determine the mean number of
characters per word in each string. A word is defined as
a series of any characters not separated by white space.
Punctuation is removed before counting. If a string
is empty or `NaN`, return `NaN`.
Examples:
>>> x = ['This is a test file', 'This is second line', 'third line $1,000']
>>> mean_characters_per_word = MeanCharactersPerWord()
>>> mean_characters_per_word(x).tolist()
[3.0, 4.0, 5.0]
"""
name = "mean_characters_per_word"
input_types = [ColumnSchema(logical_type=NaturalLanguage)]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
default_value = 0
def get_function(self):
def mean_characters_per_word(series):
return series.apply(_mean_characters_per_word)
return mean_characters_per_word
| 1,754 | 29.789474 | 83 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/time_series/rolling_outlier_count.py | import numpy as np
import pandas as pd
from woodwork import init_series
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Double
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.primitives.standard.transform.time_series.utils import (
apply_rolling_agg_to_series,
)
class RollingOutlierCount(TransformPrimitive):
"""Determines how many values are outliers over a given window.
Description:
Given a list of numbers and a corresponding list of
datetimes, return a rolling count of outliers within the numeric values,
starting at the row `gap` rows away from the current row and looking backward
over the specified window (by `window_length` and `gap`). Values are deemed
outliers using the IQR method, computed over the whole series.
Input datetimes should be monotonic.
Args:
window_length (int, string, optional): Specifies the amount of data included in each window.
If an integer is provided, it will correspond to a number of rows. For data with a uniform sampling
frequency, for example of one day, the window_length will correspond to a period of time, in this case,
7 days for a window_length of 7.
If a string is provided, it must be one of Pandas' offset alias strings ('1D', '1H', etc),
and it will indicate a length of time that each window should span.
The list of available offset aliases can be found at
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases.
Defaults to 3.
gap (int, string, optional): Specifies a gap backwards from each instance before the
window of usable data begins. If an integer is provided, it will correspond to a number of rows.
If a string is provided, it must be one of Pandas' offset alias strings ('1D', '1H', etc),
and it will indicate a length of time between a target instance and the beginning of its window.
Defaults to 1, which excludes the target instance from the window.
min_periods (int, optional): Minimum number of observations required for performing calculations
over the window. Can only be as large as window_length when window_length is an integer.
When window_length is an offset alias string, this limitation does not exist, but care should be taken
to not choose a min_periods that will always be larger than the number of observations in a window.
Defaults to 1.
Note:
Only offset aliases with fixed frequencies can be used when defining gap and window_length.
This means that aliases such as `M` or `W` cannot be used, as they can indicate different
numbers of days. ('M', because different months are different numbers of days;
'W' because week will indicate a certain day of the week, like W-Wed, so that will
indicate a different number of days depending on the anchoring date.)
Note:
When using an offset alias to define `gap`, an offset alias must also be used to define `window_length`.
This limitation does not exist when using an offset alias to define `window_length`. In fact,
if the data has a uniform sampling frequency, it is preferable to use a numeric `gap` as it is more
efficient.
Examples:
>>> import pandas as pd
>>> rolling_outlier_count = RollingOutlierCount(window_length=4)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=6)
>>> rolling_outlier_count(times, [0, 0, 0, 0, 10, 0]).tolist()
[nan, 0.0, 0.0, 0.0, 0.0, 1.0]
We can also control the gap before the rolling calculation.
>>> import pandas as pd
>>> rolling_outlier_count = RollingOutlierCount(window_length=4, gap=0)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=6)
>>> rolling_outlier_count(times, [0, 0, 0, 0, 10, 0]).tolist()
[0.0, 0.0, 0.0, 0.0, 1.0, 1.0]
We can also control the minimum number of periods required for the rolling calculation.
>>> import pandas as pd
>>> rolling_outlier_count = RollingOutlierCount(window_length=4, min_periods=3)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=6)
>>> rolling_outlier_count(times, [0, 0, 0, 0, 10, 0]).tolist()
[nan, nan, nan, 0.0, 0.0, 1.0]
We can also set the window_length and gap using offset alias strings.
>>> import pandas as pd
>>> rolling_outlier_count = RollingOutlierCount(window_length='4min', gap='1min')
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=6)
>>> rolling_outlier_count(times, [0, 0, 0, 0, 10, 0]).tolist()
[nan, 0.0, 0.0, 0.0, 0.0, 1.0]
"""
name = "rolling_outlier_count"
input_types = [
ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"}),
ColumnSchema(semantic_tags={"numeric"}),
]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
uses_full_dataframe = True
def __init__(self, window_length=3, gap=1, min_periods=0):
self.window_length = window_length
self.gap = gap
self.min_periods = min_periods
def get_outliers_count(self, numeric_series):
# We know the column is numeric, so use the Double logical type in case Woodwork's
# type inference could not infer a numeric type
if not len(numeric_series.dropna()):
return np.nan
if numeric_series.ww.schema is None:
numeric_series = init_series(numeric_series, logical_type="Double")
box_plot_info = numeric_series.ww.box_plot_dict()
return len(box_plot_info["high_values"]) + len(box_plot_info["low_values"])
def get_function(self):
def rolling_outlier_count(datetime, numeric):
x = pd.Series(numeric.values, index=datetime.values)
return apply_rolling_agg_to_series(
series=x,
agg_func=self.get_outliers_count,
window_length=self.window_length,
gap=self.gap,
min_periods=self.min_periods,
ignore_window_nans=False,
)
return rolling_outlier_count
| 6,434 | 51.317073 | 115 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/time_series/lag.py | import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Boolean, BooleanNullable
from featuretools.primitives.base import TransformPrimitive
class Lag(TransformPrimitive):
"""Shifts an array of values by a specified number of periods.
Args:
periods (int): The number of periods by which to shift the input.
Default is 1. Periods correspond to rows.
Examples:
>>> lag = Lag()
>>> lag([1, 2, 3, 4, 5], pd.Series(pd.date_range(start="2020-01-01", periods=5, freq='D'))).tolist()
[nan, 1.0, 2.0, 3.0, 4.0]
You can specify the number of periods to shift the values
>>> lag_periods = Lag(periods=3)
>>> lag_periods([True, False, False, True, True], pd.Series(pd.date_range(start="2020-01-01", periods=5, freq='D'))).tolist()
[nan, nan, nan, True, False]
"""
# Note: with pandas 1.5.0, using Lag with a string input will result in `None` values
# being introduced instead of `nan` values that were present in previous versions.
# All missing values will be replaced by `np.nan` (for Double) or `pd.NA` (all other types)
# once Woodwork is initialized on the feature matrix.
name = "lag"
input_types = [
[
ColumnSchema(semantic_tags={"category"}),
ColumnSchema(semantic_tags={"time_index"}),
],
[
ColumnSchema(semantic_tags={"numeric"}),
ColumnSchema(semantic_tags={"time_index"}),
],
[
ColumnSchema(logical_type=Boolean),
ColumnSchema(semantic_tags={"time_index"}),
],
[
ColumnSchema(logical_type=BooleanNullable),
ColumnSchema(semantic_tags={"time_index"}),
],
]
return_type = None
uses_full_dataframe = True
def __init__(self, periods=1):
self.periods = periods
def get_function(self):
def lag(input_col, time_index):
x = pd.Series(input_col.values, index=time_index.values)
return x.shift(periods=self.periods, fill_value=None).values
return lag
| 2,144 | 33.596774 | 133 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/time_series/rolling_trend.py | import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Double
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.primitives.standard.transform.time_series.utils import (
apply_rolling_agg_to_series,
)
from featuretools.utils import calculate_trend
class RollingTrend(TransformPrimitive):
"""Calculates the trend of a given window of entries of a column over time.
Description:
Given a list of numbers and a corresponding list of
datetimes, return a rolling slope of the linear trend
of values, starting at the row `gap` rows away from the current row and looking backward
over the specified time window (by `window_length` and `gap`).
Input datetimes should be monotonic.
Args:
window_length (int, string, optional): Specifies the amount of data included in each window.
If an integer is provided, it will correspond to a number of rows. For data with a uniform sampling frequency,
for example of one day, the window_length will correspond to a period of time, in this case,
7 days for a window_length of 7.
If a string is provided, it must be one of pandas' offset alias strings ('1D', '1H', etc),
and it will indicate a length of time that each window should span.
The list of available offset aliases can be found at
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases.
Defaults to 3.
gap (int, string, optional): Specifies a gap backwards from each instance before the
window of usable data begins. If an integer is provided, it will correspond to a number of rows.
If a string is provided, it must be one of pandas' offset alias strings ('1D', '1H', etc),
and it will indicate a length of time between a target instance and the beginning of its window.
Defaults to 1.
min_periods (int, optional): Minimum number of observations required for performing calculations
over the window. Can only be as large as window_length when window_length is an integer.
When window_length is an offset alias string, this limitation does not exist, but care should be taken
to not choose a min_periods that will always be larger than the number of observations in a window.
Defaults to 1.
Examples:
>>> import pandas as pd
>>> rolling_trend = RollingTrend()
>>> times = pd.date_range(start="2019-01-01", freq="1D", periods=10)
>>> rolling_trend(times, [1, 2, 4, 8, 16, 24, 48, 96, 192, 384]).tolist()
[nan, nan, nan, 1.4999999999999998, 2.9999999999999996, 5.999999999999999, 7.999999999999999, 16.0, 36.0, 72.0]
We can also control the gap before the rolling calculation.
>>> rolling_trend = RollingTrend(gap=0)
>>> rolling_trend(times, [1, 2, 4, 8, 16, 24, 48, 96, 192, 384]).tolist()
[nan, nan, 1.4999999999999998, 2.9999999999999996, 5.999999999999999, 7.999999999999999, 16.0, 36.0, 72.0, 144.0]
We can also control the minimum number of periods required for the rolling calculation.
>>> rolling_trend = RollingTrend(window_length=4, min_periods=4, gap=0)
>>> rolling_trend(times, [1, 2, 4, 8, 16, 24, 48, 96, 192, 384]).tolist()
[nan, nan, nan, 2.299999999999999, 4.599999999999998, 6.799999999999996, 12.799999999999992, 26.399999999999984, 55.19999999999997, 110.39999999999993]
We can also set the window_length and gap using offset alias strings.
>>> rolling_trend = RollingTrend(window_length="4D", gap="1D")
>>> rolling_trend(times, [1, 2, 4, 8, 16, 24, 48, 96, 192, 384]).tolist()
[nan, nan, nan, 1.4999999999999998, 2.299999999999999, 4.599999999999998, 6.799999999999996, 12.799999999999992, 26.399999999999984, 55.19999999999997]
"""
name = "rolling_trend"
input_types = [
ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"}),
ColumnSchema(semantic_tags={"numeric"}),
]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
uses_full_dataframe = True
def __init__(self, window_length=3, gap=1, min_periods=0):
self.window_length = window_length
self.gap = gap
self.min_periods = min_periods
def get_function(self):
def rolling_trend(datetime, numeric):
x = pd.Series(numeric.values, index=datetime.values)
return apply_rolling_agg_to_series(
x,
calculate_trend,
self.window_length,
self.gap,
self.min_periods,
)
return rolling_trend
| 4,850 | 50.063158 | 159 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/time_series/utils.py | from typing import Callable, Optional, Union
import numpy as np
import pandas as pd
from pandas import Series
from pandas.core.window.rolling import Rolling
from pandas.tseries.frequencies import to_offset
def roll_series_with_gap(
series: Series,
window_length: Union[int, str],
gap: Union[int, str],
min_periods: int,
) -> Rolling:
"""Provide rolling window calculations where the windows are determined using both a gap parameter
that indicates the amount of time between each instance and its window and a window length parameter
that determines the amount of data in each window.
Args:
series (Series): The series over which rolling windows will be created. The series must have numeric values and a DatetimeIndex.
window_length (int, string): Specifies the amount of data included in each window.
If an integer is provided, it will correspond to a number of rows. For data with a uniform sampling frequency,
for example of one day, the window_length will correspond to a period of time, in this case,
7 days for a window_length of 7.
If a string is provided, it must be one of pandas' offset alias strings ('1D', '1H', etc),
and it will indicate a length of time that each window should span.
The list of available offset aliases can be found at
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases
gap (int, string, optional): Specifies a gap backwards from each instance before the
window of usable data begins. If an integer is provided, it will correspond to a number of rows.
If a string is provided, it must be one of pandas' offset alias strings ('1D', '1H', etc),
and it will indicate a length of time between a target instance and the beginning of its window.
Defaults to 0, which will include the target instance in the window.
min_periods (int, optional): Minimum number of observations required for performing calculations
over the window. Can only be as large as window_length when window_length is an integer.
When window_length is an offset alias string, this limitation does not exist, but care should be taken
to not choose a min_periods that will always be larger than the number of observations in a window.
Defaults to 1.
Returns:
pandas.core.window.rolling.Rolling: The Rolling object for the series passed in.
"""
_check_window_length(window_length)
_check_gap(window_length, gap)
functional_window_length = window_length
if isinstance(gap, str):
# Add the window_length and gap so that the rolling operation correctly takes gap into account.
# That way, we can later remove the gap rows in order to apply the primitive function
# to the correct window
functional_window_length = to_offset(window_length) + to_offset(gap)
elif gap > 0:
# When gap is numeric, we can apply a shift to incorporate gap right now
# since the gap will be the same number of rows for the whole dataset
series = series.shift(gap)
return series.rolling(functional_window_length, min_periods)
def _get_rolled_series_without_gap(window: Series, gap_offset: str) -> Series:
"""Applies the gap offset_string to the rolled window, returning a window
that is the correct length of time away from the original instance.
Args:
window (Series): A rolling window that includes both the window length and gap spans of time.
gap_offset (string): The pandas offset alias that determines how much time at the end of the window
should be removed.
Returns:
Series: The window with gap rows removed
"""
if not len(window):
return window
window_start_date = window.index[0]
window_end_date = window.index[-1]
gap_bound = window_end_date - to_offset(gap_offset)
# If the gap is larger than the series, no rows are left in the window
if gap_bound < window_start_date:
return Series(dtype="float64")
# Only return the rows that are within the offset's bounds
return window[window.index <= gap_bound]
def apply_roll_with_offset_gap(
window: Series,
gap_offset: str,
reducer_fn: Callable[[Series], float],
min_periods: int,
) -> float:
"""Takes in a series to which an offset gap will be applied, removing however many
rows fall under the gap before applying the reducing function.
Args:
window (Series): A rolling window that includes both the window length and gap spans of time.
gap_offset (string): The pandas offset alias that determines how much time at the end of the window
should be removed.
reducer_fn (callable[Series -> float]): The function to be applied to the window in order to produce
the aggregate that will be included in the resulting feature.
min_periods (int): Minimum number of observations required for performing calculations
over the window.
Returns:
float: The aggregate value to be used as a feature value.
"""
window = _get_rolled_series_without_gap(window, gap_offset)
if min_periods is None:
min_periods = 1
if len(window) < min_periods or not len(window):
return np.nan
return reducer_fn(window)
def _check_window_length(window_length: Union[int, str]) -> None:
# Window length must either be a valid offset alias
if isinstance(window_length, str):
try:
to_offset(window_length)
except ValueError:
raise ValueError(
f"Cannot roll series. The specified window length, {window_length}, is not a valid offset alias.",
)
# Or an integer greater than zero
elif isinstance(window_length, int):
if window_length <= 0:
raise ValueError("Window length must be greater than zero.")
else:
raise TypeError("Window length must be either an offset string or an integer.")
def _check_gap(window_length: Union[int, str], gap: Union[int, str]) -> None:
# Gap must either be a valid offset string that also has an offset string window length
if isinstance(gap, str):
if not isinstance(window_length, str):
raise TypeError(
f"Cannot roll series with offset gap, {gap}, and numeric window length, {window_length}. "
"If an offset alias is used for gap, the window length must also be defined as an offset alias. "
"Please either change gap to be numeric or change window length to be an offset alias.",
)
try:
to_offset(gap)
except ValueError:
raise ValueError(
f"Cannot roll series. The specified gap, {gap}, is not a valid offset alias.",
)
# Or an integer greater than or equal to zero
elif isinstance(gap, int):
if gap < 0:
raise ValueError("Gap must be greater than or equal to zero.")
else:
raise TypeError("Gap must be either an offset string or an integer.")
def apply_rolling_agg_to_series(
series: Series,
agg_func: Callable[[Series], float],
window_length: Union[int, str],
gap: Union[int, str] = 0,
min_periods: int = 1,
ignore_window_nans: bool = False,
) -> np.ndarray:
"""Applies a given aggregation function to a rolled series.
Args:
series (Series): The series over which rolling windows will be created. The series must have numeric values and a DatetimeIndex.
agg_func (callable[Series -> float]): The aggregation function to apply to a rolled series.
window_length (int, string): Specifies the amount of data included in each window.
If an integer is provided, it will correspond to a number of rows. For data with a uniform sampling frequency,
for example of one day, the window_length will correspond to a period of time, in this case,
7 days for a window_length of 7.
If a string is provided, it must be one of pandas' offset alias strings ('1D', '1H', etc),
and it will indicate a length of time that each window should span.
The list of available offset aliases can be found at
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases
gap (int, string, optional): Specifies a gap backwards from each instance before the
window of usable data begins. If an integer is provided, it will correspond to a number of rows.
If a string is provided, it must be one of pandas' offset alias strings ('1D', '1H', etc),
and it will indicate a length of time between a target instance and the beginning of its window.
Defaults to 0, which will include the target instance in the window.
min_periods (int, optional): Minimum number of observations required for performing calculations
over the window. Can only be as large as window_length when window_length is an integer.
When window_length is an offset alias string, this limitation does not exist, but care should be taken
to not choose a min_periods that will always be larger than the number of observations in a window.
Defaults to 1.
ignore_window_nans (bool, optional): Whether or not NaNs in the rolling window should be included in the rolling calculation.
NaNs by default get counted towards min_periods. When set to True,
all partial values calculated by `agg_func` in the rolling window get replaced with NaN.
Defaults to False.
Returns:
numpy.ndarray: The array of rolling calculated values.
Note:
Certain operations, like `pandas.core.window.rolling.Rolling.count` that can be performed
on the Rolling object returned here may treat NaNs as periods to include in window calculations.
So a window [NaN, 1, 3] when `min_periods=3` will proceed with count, saying there are three periods
but only two values and would return count=2. The calculation `max` on the other hand,
would not recognize NaN as a valid period, and would therefore return `max=NaN` as the window has
less valid periods (two, in this case) than `min_periods` (three, in this case).
Most rolling calculations act this way. The implication of that here is that in order to
achieve the gap, we insert NaNs at the beginning of the series, which would cause `count` to calculate
on windows that technically should not have the correct number of periods. Any primitive that uses this function
should determine whether `ignore_window_nans` should be set to `true`.
Note:
Only offset aliases with fixed frequencies can be used when defining gap and window_length.
This means that aliases such as `M` or `W` cannot be used, as they can indicate different
numbers of days. ('M', because different months have different numbers of days;
'W' because week will indicate a certain day of the week, like W-Wed, so that will
indicate a different number of days depending on the anchoring date.)
Note:
When using an offset alias to define `gap`, an offset alias must also be used to define `window_length`.
This limitation does not exist when using an offset alias to define `window_length`. In fact,
if the data has a uniform sampling frequency, it is preferable to use a numeric `gap` as it is more
efficient."""
rolled_series = roll_series_with_gap(series, window_length, gap, min_periods)
if isinstance(gap, str):
additional_args = (gap, agg_func, min_periods)
return rolled_series.apply(
apply_roll_with_offset_gap,
args=additional_args,
).values
applied_rolled_series = rolled_series.apply(agg_func)
if ignore_window_nans:
if not min_periods:
# when min periods is 0 or None it's treated the same as if it's 1
num_nans = gap
else:
num_nans = min_periods - 1 + gap
applied_rolled_series.iloc[range(num_nans)] = np.nan
return applied_rolled_series.values
def _apply_gap_for_expanding_primitives(
x: Union[Series, pd.Index],
gap: Union[int, str],
) -> Optional[Series]:
if not isinstance(gap, int):
raise TypeError(
"String offsets are not supported for the gap parameter in Expanding primitives",
)
if isinstance(x, pd.Index):
return x.to_series().shift(gap)
return x.shift(gap)
| 12,762 | 49.248031 | 136 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/time_series/rolling_std.py | import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Double
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.primitives.standard.transform.time_series.utils import (
apply_rolling_agg_to_series,
)
class RollingSTD(TransformPrimitive):
"""Calculates the standard deviation of entries over a given window.
Description:
Given a list of numbers and a corresponding list of
datetimes, return a rolling standard deviation of
the numeric values, starting at the row `gap` rows away from the current row and
looking backward over the specified time window
(by `window_length` and `gap`). Input datetimes should be monotonic.
Args:
window_length (int, string, optional): Specifies the amount of data included in each window.
If an integer is provided, it will correspond to a number of rows. For data with a uniform sampling frequency,
for example of one day, the window_length will correspond to a period of time, in this case,
7 days for a window_length of 7.
If a string is provided, it must be one of pandas' offset alias strings ('1D', '1H', etc),
and it will indicate a length of time that each window should span.
The list of available offset aliases can be found at
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases.
Defaults to 3.
gap (int, string, optional): Specifies a gap backwards from each instance before the
window of usable data begins. If an integer is provided, it will correspond to a number of rows.
If a string is provided, it must be one of pandas' offset alias strings ('1D', '1H', etc),
and it will indicate a length of time between a target instance and the beginning of its window.
Defaults to 1.
min_periods (int, optional): Minimum number of observations required for performing calculations
over the window. Can only be as large as window_length when window_length is an integer.
When window_length is an offset alias string, this limitation does not exist, but care should be taken
to not choose a min_periods that will always be larger than the number of observations in a window.
Defaults to 1.
Note:
Only offset aliases with fixed frequencies can be used when defining gap and window_length.
This means that aliases such as `M` or `W` cannot be used, as they can indicate different
numbers of days. ('M', because different months have different numbers of days;
'W' because week will indicate a certain day of the week, like W-Wed, so that will
indicate a different number of days depending on the anchoring date.)
Note:
When using an offset alias to define `gap`, an offset alias must also be used to define `window_length`.
This limitation does not exist when using an offset alias to define `window_length`. In fact,
if the data has a uniform sampling frequency, it is preferable to use a numeric `gap` as it is more
efficient.
Examples:
>>> import pandas as pd
>>> rolling_std = RollingSTD(window_length=4)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_std(times, [4, 3, 2, 1, 0]).tolist()
[nan, nan, 0.7071067811865476, 1.0, 1.2909944487358056]
We can also control the gap before the rolling calculation.
>>> import pandas as pd
>>> rolling_std = RollingSTD(window_length=4, gap=0)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_std(times, [4, 3, 2, 1, 0]).tolist()
[nan, 0.7071067811865476, 1.0, 1.2909944487358056, 1.2909944487358056]
We can also control the minimum number of periods required for the rolling calculation.
>>> import pandas as pd
>>> rolling_std = RollingSTD(window_length=4, min_periods=4, gap=0)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_std(times, [4, 3, 2, 1, 0]).tolist()
[nan, nan, nan, 1.2909944487358056, 1.2909944487358056]
We can also set the window_length and gap using offset alias strings.
>>> import pandas as pd
>>> rolling_std = RollingSTD(window_length='4min', gap='1min')
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_std(times, [4, 3, 2, 1, 0]).tolist()
[nan, nan, 0.7071067811865476, 1.0, 1.2909944487358056]
"""
name = "rolling_std"
input_types = [
ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"}),
ColumnSchema(semantic_tags={"numeric"}),
]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
uses_full_dataframe = True
def __init__(self, window_length=3, gap=1, min_periods=1):
self.window_length = window_length
self.gap = gap
self.min_periods = min_periods
def get_function(self):
def rolling_std(datetime, numeric):
x = pd.Series(numeric.values, index=datetime.values)
return apply_rolling_agg_to_series(
x,
lambda series: series.std(),
self.window_length,
self.gap,
self.min_periods,
)
return rolling_std
| 5,581 | 49.288288 | 122 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/time_series/rolling_min.py | import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Double
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.primitives.standard.transform.time_series.utils import (
apply_rolling_agg_to_series,
)
class RollingMin(TransformPrimitive):
"""Determines the minimum of entries over a given window.
Description:
Given a list of numbers and a corresponding list of
datetimes, return a rolling minimum of the numeric values,
starting at the row `gap` rows away from the current row and looking backward
over the specified window (by `window_length` and `gap`).
Input datetimes should be monotonic.
Args:
window_length (int, string, optional): Specifies the amount of data included in each window.
If an integer is provided, it will correspond to a number of rows. For data with a uniform sampling frequency,
for example of one day, the window_length will correspond to a period of time, in this case,
7 days for a window_length of 7.
If a string is provided, it must be one of pandas' offset alias strings ('1D', '1H', etc),
and it will indicate a length of time that each window should span.
The list of available offset aliases can be found at
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases.
Defaults to 3.
gap (int, string, optional): Specifies a gap backwards from each instance before the
window of usable data begins. If an integer is provided, it will correspond to a number of rows.
If a string is provided, it must be one of pandas' offset alias strings ('1D', '1H', etc),
and it will indicate a length of time between a target instance and the beginning of its window.
Defaults to 1.
min_periods (int, optional): Minimum number of observations required for performing calculations
over the window. Can only be as large as window_length when window_length is an integer.
When window_length is an offset alias string, this limitation does not exist, but care should be taken
to not choose a min_periods that will always be larger than the number of observations in a window.
Defaults to 1.
Note:
Only offset aliases with fixed frequencies can be used when defining gap and window_length.
This means that aliases such as `M` or `W` cannot be used, as they can indicate different
numbers of days. ('M', because different months have different numbers of days;
'W' because week will indicate a certain day of the week, like W-Wed, so that will
indicate a different number of days depending on the anchoring date.)
Note:
When using an offset alias to define `gap`, an offset alias must also be used to define `window_length`.
This limitation does not exist when using an offset alias to define `window_length`. In fact,
if the data has a uniform sampling frequency, it is preferable to use a numeric `gap` as it is more
efficient.
Examples:
>>> import pandas as pd
>>> rolling_min = RollingMin(window_length=3)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_min(times, [4, 3, 2, 1, 0]).tolist()
[nan, 4.0, 3.0, 2.0, 1.0]
We can also control the gap before the rolling calculation.
>>> import pandas as pd
>>> rolling_min = RollingMin(window_length=3, gap=0)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_min(times, [4, 3, 2, 1, 0]).tolist()
[4.0, 3.0, 2.0, 1.0, 0.0]
We can also control the minimum number of periods required for the rolling calculation.
>>> import pandas as pd
>>> rolling_min = RollingMin(window_length=3, min_periods=3, gap=0)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_min(times, [4, 3, 2, 1, 0]).tolist()
[nan, nan, 2.0, 1.0, 0.0]
We can also set the window_length and gap using offset alias strings.
>>> import pandas as pd
>>> rolling_min = RollingMin(window_length='3min', gap='1min')
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_min(times, [4, 3, 2, 1, 0]).tolist()
[nan, 4.0, 3.0, 2.0, 1.0]
"""
name = "rolling_min"
input_types = [
ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"}),
ColumnSchema(semantic_tags={"numeric"}),
]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
uses_full_dataframe = True
def __init__(self, window_length=3, gap=1, min_periods=1):
self.window_length = window_length
self.gap = gap
self.min_periods = min_periods
def get_function(self):
def rolling_min(datetime, numeric):
x = pd.Series(numeric.values, index=datetime.values)
return apply_rolling_agg_to_series(
x,
lambda series: series.min(),
self.window_length,
self.gap,
self.min_periods,
)
return rolling_min
| 5,420 | 47.401786 | 122 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/time_series/rolling_count.py | import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Double
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.primitives.standard.transform.time_series.utils import (
apply_rolling_agg_to_series,
)
class RollingCount(TransformPrimitive):
"""Determines a rolling count of events over a given window.
Description:
Given a list of datetimes, return a rolling count starting
at the row `gap` rows away from the current row and looking backward over the specified
time window (by `window_length` and `gap`).
Input datetimes should be monotonic.
Args:
window_length (int, string, optional): Specifies the amount of data included in each window.
If an integer is provided, it will correspond to a number of rows. For data with a uniform sampling frequency,
for example of one day, the window_length will correspond to a period of time, in this case,
7 days for a window_length of 7.
If a string is provided, it must be one of pandas' offset alias strings ('1D', '1H', etc),
and it will indicate a length of time that each window should span.
The list of available offset aliases can be found at
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases.
Defaults to 3.
gap (int, string, optional): Specifies a gap backwards from each instance before the
window of usable data begins. If an integer is provided, it will correspond to a number of rows.
If a string is provided, it must be one of pandas' offset alias strings ('1D', '1H', etc),
and it will indicate a length of time between a target instance and the beginning of its window.
Defaults to 1.
min_periods (int, optional): Minimum number of observations required for performing calculations
over the window. Can only be as large as window_length when window_length is an integer.
When window_length is an offset alias string, this limitation does not exist, but care should be taken
to not choose a min_periods that will always be larger than the number of observations in a window.
Defaults to 1.
Note:
Only offset aliases with fixed frequencies can be used when defining gap and h.
This means that aliases such as `M` or `W` cannot be used, as they can indicate different
numbers of days. ('M', because different months have different numbers of days;
'W' because week will indicate a certain day of the week, like W-Wed, so that will
indicate a different number of days depending on the anchoring date.)
Note:
When using an offset alias to define `gap`, an offset alias must also be used to define `window_length`.
This limitation does not exist when using an offset alias to define `window_length`. In fact,
if the data has a uniform sampling frequency, it is preferable to use a numeric `gap` as it is more
efficient.
Examples:
>>> import pandas as pd
>>> rolling_count = RollingCount(window_length=3)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_count(times).tolist()
[nan, 1.0, 2.0, 3.0, 3.0]
We can also control the gap before the rolling calculation.
>>> import pandas as pd
>>> rolling_count = RollingCount(window_length=3, gap=0)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_count(times).tolist()
[1.0, 2.0, 3.0, 3.0, 3.0]
We can also control the minimum number of periods required for the rolling calculation.
>>> import pandas as pd
>>> rolling_count = RollingCount(window_length=3, min_periods=3, gap=0)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_count(times).tolist()
[nan, nan, 3.0, 3.0, 3.0]
We can also set the window_length and gap using offset alias strings.
>>> import pandas as pd
>>> rolling_count = RollingCount(window_length='3min', gap='1min')
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_count(times).tolist()
[nan, 1.0, 2.0, 3.0, 3.0]
"""
name = "rolling_count"
input_types = [ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"})]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
uses_full_dataframe = True
def __init__(self, window_length=3, gap=1, min_periods=0):
self.window_length = window_length
self.gap = gap
self.min_periods = min_periods
def get_function(self):
def rolling_count(datetime):
x = pd.Series(1, index=datetime)
return apply_rolling_agg_to_series(
x,
lambda series: series.count(),
self.window_length,
self.gap,
self.min_periods,
ignore_window_nans=True,
)
return rolling_count
| 5,262 | 46.845455 | 122 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/time_series/rolling_max.py | import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Double
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.primitives.standard.transform.time_series.utils import (
apply_rolling_agg_to_series,
)
class RollingMax(TransformPrimitive):
"""Determines the maximum of entries over a given window.
Description:
Given a list of numbers and a corresponding list of
datetimes, return a rolling maximum of the numeric values,
starting at the row `gap` rows away from the current row and looking backward
over the specified window (by `window_length` and `gap`).
Input datetimes should be monotonic.
Args:
window_length (int, string, optional): Specifies the amount of data included in each window.
If an integer is provided, it will correspond to a number of rows. For data with a uniform sampling frequency,
for example of one day, the window_length will correspond to a period of time, in this case,
7 days for a window_length of 7.
If a string is provided, it must be one of pandas' offset alias strings ('1D', '1H', etc),
and it will indicate a length of time that each window should span.
The list of available offset aliases can be found at
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases.
Defaults to 3.
gap (int, string, optional): Specifies a gap backwards from each instance before the
window of usable data begins. If an integer is provided, it will correspond to a number of rows.
If a string is provided, it must be one of pandas' offset alias strings ('1D', '1H', etc),
and it will indicate a length of time between a target instance and the beginning of its window.
Defaults to 1.
min_periods (int, optional): Minimum number of observations required for performing calculations
over the window. Can only be as large as window_length when window_length is an integer.
When window_length is an offset alias string, this limitation does not exist, but care should be taken
to not choose a min_periods that will always be larger than the number of observations in a window.
Defaults to 1.
Note:
Only offset aliases with fixed frequencies can be used when defining gap and window_length.
This means that aliases such as `M` or `W` cannot be used, as they can indicate different
numbers of days. ('M', because different months have different numbers of days;
'W' because week will indicate a certain day of the week, like W-Wed, so that will
indicate a different number of days depending on the anchoring date.)
Note:
When using an offset alias to define `gap`, an offset alias must also be used to define `window_length`.
This limitation does not exist when using an offset alias to define `window_length`. In fact,
if the data has a uniform sampling frequency, it is preferable to use a numeric `gap` as it is more
efficient.
Examples:
>>> import pandas as pd
>>> rolling_max = RollingMax(window_length=3)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_max(times, [4, 3, 2, 1, 0]).tolist()
[nan, 4.0, 4.0, 4.0, 3.0]
We can also control the gap before the rolling calculation.
>>> import pandas as pd
>>> rolling_max = RollingMax(window_length=3, gap=0)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_max(times, [4, 3, 2, 1, 0]).tolist()
[4.0, 4.0, 4.0, 3.0, 2.0]
We can also control the minimum number of periods required for the rolling calculation.
>>> import pandas as pd
>>> rolling_max = RollingMax(window_length=3, min_periods=3, gap=0)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_max(times, [4, 3, 2, 1, 0]).tolist()
[nan, nan, 4.0, 3.0, 2.0]
We can also set the window_length and gap using offset alias strings.
>>> import pandas as pd
>>> rolling_max = RollingMax(window_length='3min', gap='1min')
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_max(times, [4, 3, 2, 1, 0]).tolist()
[nan, 4.0, 4.0, 4.0, 3.0]
"""
name = "rolling_max"
input_types = [
ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"}),
ColumnSchema(semantic_tags={"numeric"}),
]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
uses_full_dataframe = True
def __init__(self, window_length=3, gap=1, min_periods=1):
self.window_length = window_length
self.gap = gap
self.min_periods = min_periods
def get_function(self):
def rolling_max(datetime, numeric):
x = pd.Series(numeric.values, index=datetime.values)
return apply_rolling_agg_to_series(
x,
lambda series: series.max(),
self.window_length,
self.gap,
self.min_periods,
)
return rolling_max
| 5,421 | 46.982301 | 122 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/time_series/__init__.py | from featuretools.primitives.standard.transform.time_series.lag import Lag
from featuretools.primitives.standard.transform.time_series.numeric_lag import (
NumericLag,
)
from featuretools.primitives.standard.transform.time_series.rolling_count import (
RollingCount,
)
from featuretools.primitives.standard.transform.time_series.rolling_max import (
RollingMax,
)
from featuretools.primitives.standard.transform.time_series.rolling_mean import (
RollingMean,
)
from featuretools.primitives.standard.transform.time_series.rolling_min import (
RollingMin,
)
from featuretools.primitives.standard.transform.time_series.rolling_outlier_count import (
RollingOutlierCount,
)
from featuretools.primitives.standard.transform.time_series.rolling_std import (
RollingSTD,
)
from featuretools.primitives.standard.transform.time_series.rolling_trend import (
RollingTrend,
)
from featuretools.primitives.standard.transform.time_series.expanding import (
ExpandingCount,
ExpandingMax,
ExpandingMean,
ExpandingMin,
ExpandingSTD,
ExpandingTrend,
)
| 1,090 | 31.088235 | 90 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/time_series/numeric_lag.py | import warnings
import pandas as pd
from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base import TransformPrimitive
class NumericLag(TransformPrimitive):
"""Shifts an array of values by a specified number of periods.
Args:
periods (int): The number of periods by which to shift the input.
Default is 1. Periods correspond to rows.
fill_value (int, float, optional): The value to use to fill in
the gaps left after shifting the input. Default is None.
Examples:
>>> lag = NumericLag()
>>> lag(pd.Series(pd.date_range(start="2020-01-01", periods=5, freq='D')), [1, 2, 3, 4, 5]).tolist()
[nan, 1.0, 2.0, 3.0, 4.0]
You can specify the number of periods to shift the values
>>> lag_periods = NumericLag(periods=3)
>>> lag_periods(pd.Series(pd.date_range(start="2020-01-01", periods=5, freq='D')), [1, 2, 3, 4, 5]).tolist()
[nan, nan, nan, 1.0, 2.0]
You can specify the fill value to use
>>> lag_fill_value = NumericLag(fill_value=100)
>>> lag_fill_value(pd.Series(pd.date_range(start="2020-01-01", periods=4, freq='D')), [1, 2, 3, 4]).tolist()
[100, 1, 2, 3]
"""
name = "numeric_lag"
input_types = [
ColumnSchema(semantic_tags={"time_index"}),
ColumnSchema(semantic_tags={"numeric"}),
]
return_type = ColumnSchema(semantic_tags={"numeric"})
uses_full_dataframe = True
def __init__(self, periods=1, fill_value=None):
self.periods = periods
self.fill_value = fill_value
warnings.warn(
"NumericLag is deprecated and will be removed in a future version. Please use the 'Lag' primitive instead.",
FutureWarning,
)
def get_function(self):
def lag(time_index, numeric):
x = pd.Series(numeric.values, index=time_index.values)
return x.shift(periods=self.periods, fill_value=self.fill_value).values
return lag
| 2,023 | 33.305085 | 120 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/time_series/rolling_mean.py | import numpy as np
import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Double
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.primitives.standard.transform.time_series.utils import (
apply_rolling_agg_to_series,
)
class RollingMean(TransformPrimitive):
"""Calculates the mean of entries over a given window.
Description:
Given a list of numbers and a corresponding list of
datetimes, return a rolling mean of the numeric values,
starting at the row `gap` rows away from the current row and looking backward
over the specified time window (by `window_length` and `gap`).
Input datetimes should be monotonic.
Args:
window_length (int, string, optional): Specifies the amount of data included in each window.
If an integer is provided, it will correspond to a number of rows. For data with a uniform sampling frequency,
for example of one day, the window_length will correspond to a period of time, in this case,
7 days for a window_length of 7.
If a string is provided, it must be one of pandas' offset alias strings ('1D', '1H', etc),
and it will indicate a length of time that each window should span.
The list of available offset aliases can be found at
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases.
Defaults to 3.
gap (int, string, optional): Specifies a gap backwards from each instance before the
window of usable data begins. If an integer is provided, it will correspond to a number of rows.
If a string is provided, it must be one of pandas' offset alias strings ('1D', '1H', etc),
and it will indicate a length of time between a target instance and the beginning of its window.
Defaults to 1.
min_periods (int, optional): Minimum number of observations required for performing calculations
over the window. Can only be as large as window_length when window_length is an integer.
When window_length is an offset alias string, this limitation does not exist, but care should be taken
to not choose a min_periods that will always be larger than the number of observations in a window.
Defaults to 1.
Note:
Only offset aliases with fixed frequencies can be used when defining gap and window_length.
This means that aliases such as `M` or `W` cannot be used, as they can indicate different
numbers of days. ('M', because different months have different numbers of days;
'W' because week will indicate a certain day of the week, like W-Wed, so that will
indicate a different number of days depending on the anchoring date.)
Note:
When using an offset alias to define `gap`, an offset alias must also be used to define `window_length`.
This limitation does not exist when using an offset alias to define `window_length`. In fact,
if the data has a uniform sampling frequency, it is preferable to use a numeric `gap` as it is more
efficient.
Examples:
>>> import pandas as pd
>>> rolling_mean = RollingMean(window_length=3)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_mean(times, [4, 3, 2, 1, 0]).tolist()
[nan, 4.0, 3.5, 3.0, 2.0]
We can also control the gap before the rolling calculation.
>>> import pandas as pd
>>> rolling_mean = RollingMean(window_length=3, gap=0)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_mean(times, [4, 3, 2, 1, 0]).tolist()
[4.0, 3.5, 3.0, 2.0, 1.0]
We can also control the minimum number of periods required for the rolling calculation.
>>> import pandas as pd
>>> rolling_mean = RollingMean(window_length=3, min_periods=3, gap=0)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> rolling_mean(times, [4, 3, 2, 1, 0]).tolist()
[nan, nan, 3.0, 2.0, 1.0]
"""
name = "rolling_mean"
input_types = [
ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"}),
ColumnSchema(semantic_tags={"numeric"}),
]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
uses_full_dataframe = True
def __init__(self, window_length=3, gap=1, min_periods=0):
self.window_length = window_length
self.gap = gap
self.min_periods = min_periods
def get_function(self):
def rolling_mean(datetime, numeric):
x = pd.Series(numeric.values, index=datetime.values)
return apply_rolling_agg_to_series(
x,
np.mean,
self.window_length,
self.gap,
self.min_periods,
)
return rolling_mean
| 5,080 | 46.933962 | 122 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/time_series/expanding/expanding_trend.py | import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Double
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.primitives.standard.transform.time_series.utils import (
_apply_gap_for_expanding_primitives,
)
from featuretools.utils import calculate_trend
class ExpandingTrend(TransformPrimitive):
"""Computes the expanding trend for events over a given window.
Description:
Given a list of datetimes, returns the expanding trend starting
at the row `gap` rows away from the current row. An expanding
primitive calculates the value of a primitive for a given time
with all the data available up to the corresponding point in time.
Input datetimes should be monotonic.
Args:
gap (int, optional): Specifies a gap backwards from each instance before the
usable data begins. Corresponds to number of rows. Defaults to 1.
min_periods (int, optional): Minimum number of observations required for performing calculations
over the window. Defaults to 1.
Examples:
>>> import pandas as pd
>>> expanding_trend = ExpandingTrend()
>>> times = pd.date_range(start='2019-01-01', freq='1D', periods=5)
>>> ans = expanding_trend(times, [5, 4, 3, 2, 1]).tolist()
>>> [round(x, 2) for x in ans]
[nan, nan, nan, -1.0, -1.0]
We can also control the gap before the expanding calculation.
>>> import pandas as pd
>>> expanding_trend = ExpandingTrend(gap=0)
>>> times = pd.date_range(start='2019-01-01', freq='1D', periods=5)
>>> ans = expanding_trend(times, [5, 4, 3, 2, 1]).tolist()
>>> [round(x, 2) for x in ans]
[nan, nan, -1.0, -1.0, -1.0]
We can also control the minimum number of periods required for the rolling calculation.
>>> import pandas as pd
>>> expanding_trend = ExpandingTrend(min_periods=3)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> ans = expanding_trend(times, [50, 4, 13, 22, 10]).tolist()
>>> [round(x, 2) for x in ans]
[nan, nan, nan, -18.5, -7.5]
"""
name = "expanding_trend"
input_types = [
ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"}),
ColumnSchema(semantic_tags={"numeric"}),
]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
uses_full_dataframe = True
def __init__(self, gap=1, min_periods=1):
self.gap = gap
self.min_periods = min_periods
def get_function(self):
def expanding_trend(datetime, numeric):
x = pd.Series(numeric.values, index=datetime)
x = _apply_gap_for_expanding_primitives(x, self.gap)
return (
x.expanding(min_periods=self.min_periods)
.aggregate(calculate_trend)
.values
)
return expanding_trend
| 3,061 | 37.275 | 104 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/time_series/expanding/expanding_mean.py | import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Double
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.primitives.standard.transform.time_series.utils import (
_apply_gap_for_expanding_primitives,
)
class ExpandingMean(TransformPrimitive):
"""Computes the expanding mean of events over a given window.
Description:
Given a list of datetimes, returns an expanding mean starting
at the row `gap` rows away from the current row. An expanding
primitive calculates the value of a primitive for a given time
with all the data available up to the corresponding point in time.
Input datetimes should be monotonic.
Args:
gap (int, optional): Specifies a gap backwards from each instance before the
usable data begins. Corresponds to number of rows. Defaults to 1.
min_periods (int, optional): Minimum number of observations required for performing calculations
over the window. Defaults to 1.
Examples:
>>> import pandas as pd
>>> expanding_mean = ExpandingMean()
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> expanding_mean(times, [5, 4, 3, 2, 1]).tolist()
[nan, 5.0, 4.5, 4.0, 3.5]
We can also control the gap before the expanding calculation.
>>> import pandas as pd
>>> expanding_mean = ExpandingMean(gap=0)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> expanding_mean(times, [5, 4, 3, 2, 1]).tolist()
[5.0, 4.5, 4.0, 3.5, 3.0]
We can also control the minimum number of periods required for the rolling calculation.
>>> import pandas as pd
>>> expanding_mean = ExpandingMean(min_periods=3)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> expanding_mean(times, [5, 4, 3, 2, 1]).tolist()
[nan, nan, nan, 4.0, 3.5]
"""
name = "expanding_mean"
input_types = [
ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"}),
ColumnSchema(semantic_tags={"numeric"}),
]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
uses_full_dataframe = True
def __init__(self, gap=1, min_periods=1):
self.gap = gap
self.min_periods = min_periods
def get_function(self):
def expanding_mean(datetime, numeric):
x = pd.Series(numeric.values, index=datetime)
x = _apply_gap_for_expanding_primitives(x, self.gap)
return x.expanding(min_periods=self.min_periods).mean().values
return expanding_mean
| 2,768 | 37.458333 | 104 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/time_series/expanding/expanding_max.py | import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.primitives.standard.transform.time_series.utils import (
_apply_gap_for_expanding_primitives,
)
class ExpandingMax(TransformPrimitive):
"""Computes the expanding maximum of events over a given window.
Description:
Given a list of datetimes, returns an expanding maximum starting
at the row `gap` rows away from the current row. An expanding
primitive calculates the value of a primitive for a given time
with all the data available up to the corresponding point in time.
Input datetimes should be monotonic.
Args:
gap (int, optional): Specifies a gap backwards from each instance before the
usable data begins. Corresponds to number of rows. Defaults to 1.
min_periods (int, optional): Minimum number of observations required for performing calculations
over the window. Defaults to 1.
Examples:
>>> import pandas as pd
>>> expanding_min = ExpandingMax()
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> expanding_min(times, [2, 4, 6, 7, 2]).tolist()
[nan, 2.0, 4.0, 6.0, 7.0]
We can also control the gap before the expanding calculation.
>>> import pandas as pd
>>> expanding_min = ExpandingMax(gap=0)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> expanding_min(times, [2, 4, 6, 7, 2]).tolist()
[2.0, 4.0, 6.0, 7.0, 7.0]
We can also control the minimum number of periods required for the rolling calculation.
>>> import pandas as pd
>>> expanding_min = ExpandingMax(min_periods=3)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> expanding_min(times, [2, 4, 6, 7, 2]).tolist()
[nan, nan, nan, 6.0, 7.0]
"""
name = "expanding_max"
input_types = [
ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"}),
ColumnSchema(semantic_tags={"numeric"}),
]
return_type = ColumnSchema(semantic_tags={"numeric"})
uses_full_dataframe = True
def __init__(self, gap=1, min_periods=1):
self.gap = gap
self.min_periods = min_periods
def get_function(self):
def expanding_max(datetime, numeric):
x = pd.Series(numeric.values, index=datetime)
x = _apply_gap_for_expanding_primitives(x, self.gap)
return x.expanding(min_periods=self.min_periods).max().values
return expanding_max
| 2,731 | 36.944444 | 104 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/time_series/expanding/expanding_min.py | import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.primitives.standard.transform.time_series.utils import (
_apply_gap_for_expanding_primitives,
)
class ExpandingMin(TransformPrimitive):
"""Computes the expanding minimum of events over a given window.
Description:
Given a list of datetimes, returns an expanding minimum starting
at the row `gap` rows away from the current row. An expanding
primitive calculates the value of a primitive for a given time
with all the data available up to the corresponding point in time.
Input datetimes should be monotonic.
Args:
gap (int, optional): Specifies a gap backwards from each instance before the
usable data begins. Corresponds to number of rows. Defaults to 1.
min_periods (int, optional): Minimum number of observations required for performing calculations
over the window. Defaults to 1.
Examples:
>>> import pandas as pd
>>> expanding_min = ExpandingMin()
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> expanding_min(times, [5, 4, 3, 2, 1]).tolist()
[nan, 5.0, 4.0, 3.0, 2.0]
We can also control the gap before the expanding calculation.
>>> import pandas as pd
>>> expanding_min = ExpandingMin(gap=0)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> expanding_min(times, [5, 4, 3, 2, 1]).tolist()
[5.0, 4.0, 3.0, 2.0, 1.0]
We can also control the minimum number of periods required for the rolling calculation.
>>> import pandas as pd
>>> expanding_min = ExpandingMin(min_periods=3)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> expanding_min(times, [5, 4, 3, 2, 1]).tolist()
[nan, nan, nan, 3.0, 2.0]
"""
name = "expanding_min"
input_types = [
ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"}),
ColumnSchema(semantic_tags={"numeric"}),
]
return_type = ColumnSchema(semantic_tags={"numeric"})
uses_full_dataframe = True
def __init__(self, gap=1, min_periods=1):
self.gap = gap
self.min_periods = min_periods
def get_function(self):
def expanding_min(datetime, numeric):
x = pd.Series(numeric.values, index=datetime)
x = _apply_gap_for_expanding_primitives(x, self.gap)
return x.expanding(min_periods=self.min_periods).min().values
return expanding_min
| 2,730 | 37.464789 | 104 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/time_series/expanding/expanding_count.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, IntegerNullable
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.primitives.standard.transform.time_series.utils import (
_apply_gap_for_expanding_primitives,
)
class ExpandingCount(TransformPrimitive):
"""Computes the expanding count of events over a given window.
Description:
Given a list of datetimes, returns an expanding count starting
at the row `gap` rows away from the current row. An expanding
primitive calculates the value of a primitive for a given time
with all the data available up to the corresponding point in time.
Input datetimes should be monotonic.
Args:
gap (int, optional): Specifies a gap backwards from each instance before the
usable data begins. Corresponds to number of rows. Defaults to 1.
min_periods (int, optional): Minimum number of observations required for performing calculations
over the window. Defaults to 1.
Examples:
>>> import pandas as pd
>>> expanding_count = ExpandingCount()
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> expanding_count(times).tolist()
[nan, 1.0, 2.0, 3.0, 4.0]
We can also control the gap before the expanding calculation.
>>> import pandas as pd
>>> expanding_count = ExpandingCount(gap=0)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> expanding_count(times).tolist()
[1.0, 2.0, 3.0, 4.0, 5.0]
We can also control the minimum number of periods required for the rolling calculation.
>>> import pandas as pd
>>> expanding_count = ExpandingCount(min_periods=3)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> expanding_count(times).tolist()
[nan, nan, nan, 3.0, 4.0]
"""
name = "expanding_count"
input_types = [ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"})]
return_type = ColumnSchema(logical_type=IntegerNullable, semantic_tags={"numeric"})
uses_full_dataframe = True
def __init__(self, gap=1, min_periods=1):
self.gap = gap
self.min_periods = min_periods
def get_function(self):
def expanding_count(datetime_series):
datetime_series = _apply_gap_for_expanding_primitives(
datetime_series,
self.gap,
)
count_series = datetime_series.expanding(
min_periods=self.min_periods,
).count()
num_nans = self.gap + self.min_periods - 1
count_series[range(num_nans)] = np.nan
return count_series
return expanding_count
| 2,885 | 36.973684 | 104 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/time_series/expanding/__init__.py | from featuretools.primitives.standard.transform.time_series.expanding.expanding_count import (
ExpandingCount,
)
from featuretools.primitives.standard.transform.time_series.expanding.expanding_max import (
ExpandingMax,
)
from featuretools.primitives.standard.transform.time_series.expanding.expanding_mean import (
ExpandingMean,
)
from featuretools.primitives.standard.transform.time_series.expanding.expanding_min import (
ExpandingMin,
)
from featuretools.primitives.standard.transform.time_series.expanding.expanding_std import (
ExpandingSTD,
)
from featuretools.primitives.standard.transform.time_series.expanding.expanding_trend import (
ExpandingTrend,
)
| 688 | 35.263158 | 94 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/time_series/expanding/expanding_std.py | import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Double
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.primitives.standard.transform.time_series.utils import (
_apply_gap_for_expanding_primitives,
)
class ExpandingSTD(TransformPrimitive):
"""Computes the expanding standard deviation for events over a given window.
Description:
Given a list of datetimes, returns the expanding standard deviation
starting at the row `gap` rows away from the current row. An expanding
primitive calculates the value of a primitive for a given time
with all the data available up to the corresponding point in time.
Input datetimes should be monotonic.
Args:
gap (int, optional): Specifies a gap backwards from each instance before the
usable data begins. Corresponds to number of rows. Defaults to 1.
min_periods (int, optional): Minimum number of observations required for performing calculations
over the window. Defaults to 1.
Examples:
>>> import pandas as pd
>>> expanding_std = ExpandingSTD()
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> ans = expanding_std(times, [5, 4, 3, 2, 1]).tolist()
>>> [round(x, 2) for x in ans]
[nan, nan, 0.71, 1.0, 1.29]
We can also control the gap before the expanding calculation.
>>> import pandas as pd
>>> expanding_std = ExpandingSTD(gap=0)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> ans = expanding_std(times, [5, 4, 3, 2, 1]).tolist()
>>> [round(x, 2) for x in ans]
[nan, 0.71, 1.0, 1.29, 1.58]
We can also control the minimum number of periods required for the rolling calculation.
>>> import pandas as pd
>>> expanding_std = ExpandingSTD(min_periods=3)
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> ans = expanding_std(times, [5, 4, 3, 2, 1]).tolist()
>>> [round(x, 2) for x in ans]
[nan, nan, nan, 1.0, 1.29]
"""
name = "expanding_std"
input_types = [
ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"}),
ColumnSchema(semantic_tags={"numeric"}),
]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
uses_full_dataframe = True
def __init__(self, gap=1, min_periods=1):
self.gap = gap
self.min_periods = min_periods
def get_function(self):
def expanding_std(datetime, numeric):
x = pd.Series(numeric.values, index=datetime)
x = _apply_gap_for_expanding_primitives(x, self.gap)
return x.expanding(min_periods=self.min_periods).std().values
return expanding_std
| 2,925 | 38.013333 | 104 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/email/is_free_email_domain.py | import numpy as np
import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import BooleanNullable, EmailAddress
from featuretools.primitives.base import TransformPrimitive
class IsFreeEmailDomain(TransformPrimitive):
"""Determines if an email address is from a free email domain.
Description:
EmailAddress input should be a string. Will return Nan
if an invalid email address is provided, or if the input is
not a string. The list of free email domains used in this primitive
was obtained from https://github.com/willwhite/freemail/blob/master/data/free.txt.
Examples:
>>> is_free_email_domain = IsFreeEmailDomain()
>>> is_free_email_domain(['name@gmail.com', 'name@featuretools.com']).tolist()
[True, False]
"""
name = "is_free_email_domain"
input_types = [ColumnSchema(logical_type=EmailAddress)]
return_type = ColumnSchema(logical_type=BooleanNullable)
filename = "free_email_provider_domains.txt"
def get_function(self):
file_path = self.get_filepath(self.filename)
free_domains = pd.read_csv(file_path, header=None, names=["domain"])
free_domains["domain"] = free_domains.domain.str.strip()
def is_free_email_domain(emails):
# if the input is empty return an empty Series
if len(emails) == 0:
return pd.Series([], dtype="category")
emails_df = pd.DataFrame({"email": emails})
# if all emails are NaN expand won't propogate NaNs and will fail on indexing
if emails_df["email"].isnull().all():
emails_df["domain"] = np.nan
else:
# .str.strip() and .str.split() return NaN for NaN values and propogate NaNs into new columns
emails_df["domain"] = (
emails_df["email"].str.strip().str.split("@", expand=True)[1]
)
emails_df["is_free"] = emails_df["domain"].isin(free_domains["domain"])
# if there are any NaN domain values, change the series type to allow for
# both bools and NaN values and set is_free to NaN for the NaN domains
if emails_df["domain"].isnull().values.any():
emails_df["is_free"] = emails_df["is_free"].astype("object")
emails_df.loc[emails_df["domain"].isnull(), "is_free"] = np.nan
return emails_df.is_free.values
return is_free_email_domain
| 2,512 | 40.196721 | 109 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/email/email_address_to_domain.py | import numpy as np
import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Categorical, EmailAddress
from featuretools.primitives.base import TransformPrimitive
class EmailAddressToDomain(TransformPrimitive):
"""Determines the domain of an email
Description:
EmailAddress input should be a string. Will return Nan
if an invalid email address is provided, or if the input is
not a string.
Examples:
>>> email_address_to_domain = EmailAddressToDomain()
>>> email_address_to_domain(['name@gmail.com', 'name@featuretools.com']).tolist()
['gmail.com', 'featuretools.com']
"""
name = "email_address_to_domain"
input_types = [ColumnSchema(logical_type=EmailAddress)]
return_type = ColumnSchema(logical_type=Categorical, semantic_tags={"category"})
def get_function(self):
def email_address_to_domain(emails):
# if the input is empty return an empty Series
if len(emails) == 0:
return pd.Series([], dtype="category")
emails_df = pd.DataFrame({"email": emails})
# if all emails are NaN expand won't propogate NaNs and will fail on indexing
if emails_df["email"].isnull().all():
emails_df["domain"] = np.nan
emails_df["domain"] = emails_df["domain"].astype(object)
else:
# .str.strip() and .str.split() return NaN for NaN values and propogate NaNs into new columns
emails_df["domain"] = (
emails_df["email"].str.strip().str.split("@", expand=True)[1]
)
return emails_df.domain.values
return email_address_to_domain
| 1,758 | 36.425532 | 109 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/email/__init__.py | from featuretools.primitives.standard.transform.email.email_address_to_domain import (
EmailAddressToDomain,
)
from featuretools.primitives.standard.transform.email.is_free_email_domain import (
IsFreeEmailDomain,
)
| 224 | 31.142857 | 86 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/cumulative/cum_sum.py | from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base import TransformPrimitive
class CumSum(TransformPrimitive):
"""Calculates the cumulative sum.
Description:
Given a list of values, return the cumulative sum
(or running total). There is no set window, so the
sum at each point is calculated over all prior values.
`NaN` values will return `NaN`, but in the window of a
cumulative caluclation, they're ignored.
Examples:
>>> cum_sum = CumSum()
>>> cum_sum([1, 2, 3, 4, None, 5]).tolist()
[1.0, 3.0, 6.0, 10.0, nan, 15.0]
"""
name = "cum_sum"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
uses_full_dataframe = True
description_template = "the cumulative sum of {}"
def get_function(self):
def cum_sum(values):
return values.cumsum()
return cum_sum
| 982 | 28.787879 | 62 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/cumulative/cumulative_time_since_last_true.py | import numpy as np
import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Boolean, Datetime, Double
from featuretools.primitives.base import TransformPrimitive
class CumulativeTimeSinceLastTrue(TransformPrimitive):
"""Determines the time (in seconds) since the last boolean was `True`
given a datetime index column and boolean column
Examples:
>>> from datetime import datetime
>>> cumulative_time_since_last_true = CumulativeTimeSinceLastTrue()
>>> booleans = [False, True, False, True]
>>> datetimes = [
... datetime(2011, 4, 9, 10, 30, 0),
... datetime(2011, 4, 9, 10, 30, 10),
... datetime(2011, 4, 9, 10, 30, 15),
... datetime(2011, 4, 9, 10, 30, 30)
... ]
>>> cumulative_time_since_last_true(datetimes, booleans).tolist()
[nan, 0.0, 5.0, 0.0]
"""
name = "cumulative_time_since_last_true"
input_types = [
ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"}),
ColumnSchema(logical_type=Boolean),
]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
def get_function(self):
def time_since_previous_true(datetime_col, bool_col):
if bool_col.dropna().empty:
return pd.Series([np.nan] * len(bool_col))
df = pd.DataFrame(
{
"datetime": datetime_col,
"last_true_datetime": datetime_col,
"bool": bool_col,
},
)
not_false_indices = df["bool"]
df.loc[~not_false_indices, "last_true_datetime"] = np.nan
df["last_true_datetime"] = df["last_true_datetime"].fillna(method="ffill")
total_seconds = (
df["datetime"] - df["last_true_datetime"]
).dt.total_seconds()
return pd.Series(total_seconds)
return time_since_previous_true
| 2,013 | 36.296296 | 86 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/cumulative/cumulative_time_since_last_false.py | import numpy as np
import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Boolean, Datetime, Double
from featuretools.primitives.base import TransformPrimitive
class CumulativeTimeSinceLastFalse(TransformPrimitive):
"""Determines the time since last `False` value.
Description:
Given a list of booleans and a list of corresponding
datetimes, determine the time at each point since the
last `False` value. Returns time difference in seconds.
`NaN` values are ignored.
Examples:
>>> from datetime import datetime
>>> cumulative_time_since_last_false = CumulativeTimeSinceLastFalse()
>>> booleans = [False, True, False, True]
>>> datetimes = [
... datetime(2011, 4, 9, 10, 30, 0),
... datetime(2011, 4, 9, 10, 30, 10),
... datetime(2011, 4, 9, 10, 30, 15),
... datetime(2011, 4, 9, 10, 30, 29)
... ]
>>> cumulative_time_since_last_false(datetimes, booleans).tolist()
[0.0, 10.0, 0.0, 14.0]
"""
name = "cumulative_time_since_last_false"
input_types = [
ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"}),
ColumnSchema(logical_type=Boolean),
]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
def get_function(self):
def time_since_previous_false(datetime_col, bool_col):
if bool_col.dropna().empty:
return pd.Series([np.nan] * len(bool_col))
df = pd.DataFrame(
{
"datetime": datetime_col,
"last_false_datetime": datetime_col,
"bool": bool_col,
},
)
not_false_indices = df["bool"]
df.loc[not_false_indices, "last_false_datetime"] = np.nan
df["last_false_datetime"] = df["last_false_datetime"].fillna(method="ffill")
total_seconds = (
df["datetime"] - df["last_false_datetime"]
).dt.total_seconds()
return pd.Series(total_seconds)
return time_since_previous_false
| 2,191 | 36.152542 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/cumulative/cum_min.py | from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base import TransformPrimitive
class CumMin(TransformPrimitive):
"""Calculates the cumulative minimum.
Description:
Given a list of values, return the cumulative min
(or running min). There is no set window, so the min
at each point is calculated over all prior values.
`NaN` values will return `NaN`, but in the window of a
cumulative caluclation, they're ignored.
Examples:
>>> cum_min = CumMin()
>>> cum_min([1, 2, -3, 4, None, 5]).tolist()
[1.0, 1.0, -3.0, -3.0, nan, -3.0]
"""
name = "cum_min"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
uses_full_dataframe = True
description_template = "the cumulative minimum of {}"
def get_function(self):
def cum_min(values):
return values.cummin()
return cum_min
| 990 | 29.030303 | 62 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/cumulative/cum_count.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import IntegerNullable
from featuretools.primitives.base import TransformPrimitive
class CumCount(TransformPrimitive):
"""Calculates the cumulative count.
Description:
Given a list of values, return the cumulative count
(or running count). There is no set window, so the
count at each point is calculated over all prior
values. `NaN` values are counted.
Examples:
>>> cum_count = CumCount()
>>> cum_count([1, 2, 3, 4, None, 5]).tolist()
[1, 2, 3, 4, 5, 6]
"""
name = "cum_count"
input_types = [
[ColumnSchema(semantic_tags={"foreign_key"})],
[ColumnSchema(semantic_tags={"category"})],
]
return_type = ColumnSchema(logical_type=IntegerNullable, semantic_tags={"numeric"})
uses_full_dataframe = True
description_template = "the cumulative count of {}"
def get_function(self):
def cum_count(values):
return np.arange(1, len(values) + 1)
return cum_count
| 1,099 | 28.72973 | 87 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/cumulative/__init__.py | from featuretools.primitives.standard.transform.cumulative.cum_count import CumCount
from featuretools.primitives.standard.transform.cumulative.cum_max import CumMax
from featuretools.primitives.standard.transform.cumulative.cum_mean import CumMean
from featuretools.primitives.standard.transform.cumulative.cum_min import CumMin
from featuretools.primitives.standard.transform.cumulative.cum_sum import CumSum
from featuretools.primitives.standard.transform.cumulative.cumulative_time_since_last_false import (
CumulativeTimeSinceLastFalse,
)
from featuretools.primitives.standard.transform.cumulative.cumulative_time_since_last_true import (
CumulativeTimeSinceLastTrue,
)
| 683 | 56 | 100 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/cumulative/cum_mean.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base import TransformPrimitive
class CumMean(TransformPrimitive):
"""Calculates the cumulative mean.
Description:
Given a list of values, return the cumulative mean
(or running mean). There is no set window, so the
mean at each point is calculated over all prior values.
`NaN` values will return `NaN`, but in the window of a
cumulative caluclation, they're treated as 0.
Examples:
>>> cum_mean = CumMean()
>>> cum_mean([1, 2, 3, 4, None, 5]).tolist()
[1.0, 1.5, 2.0, 2.5, nan, 2.5]
"""
name = "cum_mean"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
uses_full_dataframe = True
description_template = "the cumulative mean of {}"
def get_function(self):
def cum_mean(values):
return values.cumsum() / np.arange(1, len(values) + 1)
return cum_mean
| 1,046 | 29.794118 | 66 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/cumulative/cum_max.py | from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base import TransformPrimitive
class CumMax(TransformPrimitive):
"""Calculates the cumulative maximum.
Description:
Given a list of values, return the cumulative max
(or running max). There is no set window, so the max
at each point is calculated over all prior values.
`NaN` values will return `NaN`, but in the window of a
cumulative caluclation, they're ignored.
Examples:
>>> cum_max = CumMax()
>>> cum_max([1, 2, 3, 4, None, 5]).tolist()
[1.0, 2.0, 3.0, 4.0, nan, 5.0]
"""
name = "cum_max"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
uses_full_dataframe = True
description_template = "the cumulative maximum of {}"
def get_function(self):
def cum_max(values):
return values.cummax()
return cum_max
| 986 | 28.909091 | 62 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/binary/multiply_numeric.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class MultiplyNumeric(TransformPrimitive):
"""Performs element-wise multiplication of two lists.
Description:
Given a list of values X and a list of values
Y, determine the product of each value in X
with its corresponding value in Y.
Examples:
>>> multiply_numeric = MultiplyNumeric()
>>> multiply_numeric([2, 1, 2], [1, 2, 2]).tolist()
[2, 2, 4]
"""
name = "multiply_numeric"
input_types = [
ColumnSchema(semantic_tags={"numeric"}),
ColumnSchema(semantic_tags={"numeric"}),
]
return_type = ColumnSchema(semantic_tags={"numeric"})
commutative = True
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the product of {} and {}"
def get_function(self):
return np.multiply
def generate_name(self, base_feature_names):
return "%s * %s" % (base_feature_names[0], base_feature_names[1])
| 1,158 | 30.324324 | 84 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/binary/less_than.py | import numpy as np
import pandas as pd
import pandas.api.types as pdtypes
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import BooleanNullable, Datetime, Ordinal
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class LessThan(TransformPrimitive):
"""Determines if values in one list are less than another list.
Description:
Given a list of values X and a list of values Y, determine
whether each value in X is less than each corresponding value
in Y. Equal pairs will return `False`.
Examples:
>>> less_than = LessThan()
>>> less_than([2, 1, 2], [1, 2, 2]).tolist()
[False, True, False]
"""
name = "less_than"
input_types = [
[
ColumnSchema(semantic_tags={"numeric"}),
ColumnSchema(semantic_tags={"numeric"}),
],
[ColumnSchema(logical_type=Datetime), ColumnSchema(logical_type=Datetime)],
[ColumnSchema(logical_type=Ordinal), ColumnSchema(logical_type=Ordinal)],
]
return_type = ColumnSchema(logical_type=BooleanNullable)
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "whether {} is less than {}"
def get_function(self):
def less_than(val1, val2):
val1_is_categorical = pdtypes.is_categorical_dtype(val1)
val2_is_categorical = pdtypes.is_categorical_dtype(val2)
if val1_is_categorical and val2_is_categorical:
if not all(val1.cat.categories == val2.cat.categories):
return val1.where(pd.isnull, np.nan)
elif val1_is_categorical or val2_is_categorical:
# This can happen because CFM does not set proper dtypes for intermediate
# features, so some agg features that should be Ordinal don't yet have correct type.
return val1.where(pd.isnull, np.nan)
return val1 < val2
return less_than
def generate_name(self, base_feature_names):
return "%s < %s" % (base_feature_names[0], base_feature_names[1])
| 2,178 | 38.618182 | 100 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/binary/divide_by_feature.py | from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class DivideByFeature(TransformPrimitive):
"""Divides a scalar by each value in the list.
Description:
Given a list of numeric values and a scalar, divide
the scalar by each value and return the list of
quotients.
Examples:
>>> divide_by_feature = DivideByFeature(value=2)
>>> divide_by_feature([4, 1, 2]).tolist()
[0.5, 2.0, 1.0]
"""
name = "divide_by_feature"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
def __init__(self, value=1):
self.value = value
self.description_template = "the result of {} divided by {{}}".format(
self.value,
)
def get_function(self):
def divide_by_feature(vals):
return self.value / vals
return divide_by_feature
def generate_name(self, base_feature_names):
return "%s / %s" % (str(self.value), base_feature_names[0])
| 1,231 | 29.8 | 84 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/binary/less_than_scalar.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import BooleanNullable
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class LessThanScalar(TransformPrimitive):
"""Determines if values are less than a given scalar.
Description:
Given a list of values and a constant scalar, determine
whether each of the values is less than the scalar.
If a value is equal to the scalar, return `False`.
Examples:
>>> less_than_scalar = LessThanScalar(value=2)
>>> less_than_scalar([3, 1, 2]).tolist()
[False, True, False]
"""
name = "less_than_scalar"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(logical_type=BooleanNullable)
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
def __init__(self, value=0):
self.value = value
self.description_template = "whether {{}} is less than {}".format(self.value)
def get_function(self):
def less_than_scalar(vals):
return vals < self.value
return less_than_scalar
def generate_name(self, base_feature_names):
return "%s < %s" % (base_feature_names[0], str(self.value))
| 1,311 | 32.641026 | 85 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/binary/add_numeric.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class AddNumeric(TransformPrimitive):
"""Performs element-wise addition of two lists.
Description:
Given a list of values X and a list of values
Y, determine the sum of each value in X with its
corresponding value in Y.
Examples:
>>> add_numeric = AddNumeric()
>>> add_numeric([2, 1, 2], [1, 2, 2]).tolist()
[3, 3, 4]
"""
name = "add_numeric"
input_types = [
ColumnSchema(semantic_tags={"numeric"}),
ColumnSchema(semantic_tags={"numeric"}),
]
return_type = ColumnSchema(semantic_tags={"numeric"})
commutative = True
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the sum of {} and {}"
def get_function(self):
return np.add
def generate_name(self, base_feature_names):
return "%s + %s" % (base_feature_names[0], base_feature_names[1])
| 1,114 | 29.135135 | 84 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/binary/divide_numeric_scalar.py | from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class DivideNumericScalar(TransformPrimitive):
"""Divides each element in the list by a scalar.
Description:
Given a list of numeric values and a scalar, divide
each value in the list by the scalar.
Examples:
>>> divide_numeric_scalar = DivideNumericScalar(value=2)
>>> divide_numeric_scalar([3, 1, 2]).tolist()
[1.5, 0.5, 1.0]
"""
name = "divide_numeric_scalar"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
def __init__(self, value=1):
self.value = value
self.description_template = "the result of {{}} divided by {}".format(
self.value,
)
def get_function(self):
def divide_scalar(vals):
return vals / self.value
return divide_scalar
def generate_name(self, base_feature_names):
return "%s / %s" % (base_feature_names[0], str(self.value))
| 1,216 | 30.205128 | 84 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/binary/less_than_equal_to.py | import numpy as np
import pandas as pd
import pandas.api.types as pdtypes
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import BooleanNullable, Datetime, Ordinal
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class LessThanEqualTo(TransformPrimitive):
"""Determines if values in one list are less than or equal to another list.
Description:
Given a list of values X and a list of values Y, determine
whether each value in X is less than or equal to each
corresponding value in Y. Equal pairs will return `True`.
Examples:
>>> less_than_equal_to = LessThanEqualTo()
>>> less_than_equal_to([2, 1, 2], [1, 2, 2]).tolist()
[False, True, True]
"""
name = "less_than_equal_to"
input_types = [
[
ColumnSchema(semantic_tags={"numeric"}),
ColumnSchema(semantic_tags={"numeric"}),
],
[ColumnSchema(logical_type=Datetime), ColumnSchema(logical_type=Datetime)],
[ColumnSchema(logical_type=Ordinal), ColumnSchema(logical_type=Ordinal)],
]
return_type = ColumnSchema(logical_type=BooleanNullable)
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "whether {} is less than or equal to {}"
def get_function(self):
def less_than_equal(val1, val2):
val1_is_categorical = pdtypes.is_categorical_dtype(val1)
val2_is_categorical = pdtypes.is_categorical_dtype(val2)
if val1_is_categorical and val2_is_categorical:
if not all(val1.cat.categories == val2.cat.categories):
return val1.where(pd.isnull, np.nan)
elif val1_is_categorical or val2_is_categorical:
# This can happen because CFM does not set proper dtypes for intermediate
# features, so some agg features that should be Ordinal don't yet have correct type.
return val1.where(pd.isnull, np.nan)
return val1 <= val2
return less_than_equal
def generate_name(self, base_feature_names):
return "%s <= %s" % (base_feature_names[0], base_feature_names[1])
| 2,267 | 40.236364 | 100 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/binary/not_equal.py | import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import BooleanNullable
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class NotEqual(TransformPrimitive):
"""Determines if values in one list are not equal to another list.
Description:
Given a list of values X and a list of values Y, determine
whether each value in X is not equal to each corresponding
value in Y.
Examples:
>>> not_equal = NotEqual()
>>> not_equal([2, 1, 2], [1, 2, 2]).tolist()
[True, True, False]
"""
name = "not_equal"
input_types = [ColumnSchema(), ColumnSchema()]
return_type = ColumnSchema(logical_type=BooleanNullable)
commutative = True
compatibility = [Library.PANDAS, Library.DASK]
description_template = "whether {} does not equal {}"
def get_function(self):
def not_equal(x_vals, y_vals):
if isinstance(x_vals.dtype, pd.CategoricalDtype) and isinstance(
y_vals.dtype,
pd.CategoricalDtype,
):
categories = set(x_vals.cat.categories).union(
set(y_vals.cat.categories),
)
x_vals = x_vals.cat.add_categories(
categories.difference(set(x_vals.cat.categories)),
)
y_vals = y_vals.cat.add_categories(
categories.difference(set(y_vals.cat.categories)),
)
return x_vals.ne(y_vals)
return not_equal
def generate_name(self, base_feature_names):
return "%s != %s" % (base_feature_names[0], base_feature_names[1])
| 1,768 | 33.686275 | 84 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/binary/multiply_boolean.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Boolean, BooleanNullable
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class MultiplyBoolean(TransformPrimitive):
"""Performs element-wise multiplication of two lists of boolean values.
Description:
Given a list of boolean values X and a list of boolean
values Y, determine the product of each value in X
with its corresponding value in Y.
Examples:
>>> multiply_boolean = MultiplyBoolean()
>>> multiply_boolean([True, True, False], [True, False, True]).tolist()
[True, False, False]
"""
name = "multiply_boolean"
input_types = [
[
ColumnSchema(logical_type=BooleanNullable),
ColumnSchema(logical_type=BooleanNullable),
],
[ColumnSchema(logical_type=Boolean), ColumnSchema(logical_type=Boolean)],
[
ColumnSchema(logical_type=Boolean),
ColumnSchema(logical_type=BooleanNullable),
],
[
ColumnSchema(logical_type=BooleanNullable),
ColumnSchema(logical_type=Boolean),
],
]
return_type = ColumnSchema(logical_type=BooleanNullable)
commutative = True
compatibility = [Library.PANDAS, Library.DASK]
description_template = "the product of {} and {}"
def get_function(self):
return np.bitwise_and
def generate_name(self, base_feature_names):
return "%s * %s" % (base_feature_names[0], base_feature_names[1])
| 1,641 | 32.510204 | 84 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/binary/equal_scalar.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import BooleanNullable
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class EqualScalar(TransformPrimitive):
"""Determines if values in a list are equal to a given scalar.
Description:
Given a list of values and a constant scalar, determine
whether each of the values is equal to the scalar.
Examples:
>>> equal_scalar = EqualScalar(value=2)
>>> equal_scalar([3, 1, 2]).tolist()
[False, False, True]
"""
name = "equal_scalar"
input_types = [ColumnSchema()]
return_type = ColumnSchema(logical_type=BooleanNullable)
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
def __init__(self, value=None):
self.value = value
self.description_template = "whether {{}} equals {}".format(self.value)
def get_function(self):
def equal_scalar(vals):
return vals == self.value
return equal_scalar
def generate_name(self, base_feature_names):
return "%s = %s" % (base_feature_names[0], str(self.value))
| 1,207 | 30.789474 | 84 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/binary/less_than_equal_to_scalar.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import BooleanNullable
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class LessThanEqualToScalar(TransformPrimitive):
"""Determines if values are less than or equal to a given scalar.
Description:
Given a list of values and a constant scalar, determine
whether each of the values is less than or equal to the
scalar. If a value is equal to the scalar, return `True`.
Examples:
>>> less_than_equal_to_scalar = LessThanEqualToScalar(value=2)
>>> less_than_equal_to_scalar([3, 1, 2]).tolist()
[False, True, True]
"""
name = "less_than_equal_to_scalar"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(logical_type=BooleanNullable)
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
def __init__(self, value=0):
self.value = value
self.description_template = "whether {{}} is less than or equal to {}".format(
self.value,
)
def get_function(self):
def less_than_equal_to_scalar(vals):
return vals <= self.value
return less_than_equal_to_scalar
def generate_name(self, base_feature_names):
return "%s <= %s" % (base_feature_names[0], str(self.value))
| 1,429 | 33.878049 | 86 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/binary/add_numeric_scalar.py | from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class AddNumericScalar(TransformPrimitive):
"""Adds a scalar to each value in the list.
Description:
Given a list of numeric values and a scalar, add
the given scalar to each value in the list.
Examples:
>>> add_numeric_scalar = AddNumericScalar(value=2)
>>> add_numeric_scalar([3, 1, 2]).tolist()
[5, 3, 4]
"""
name = "add_numeric_scalar"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
def __init__(self, value=0):
self.value = value
self.description_template = "the sum of {{}} and {}".format(self.value)
def get_function(self):
def add_scalar(vals):
return vals + self.value
return add_scalar
def generate_name(self, base_feature_names):
return "%s + %s" % (base_feature_names[0], str(self.value))
| 1,154 | 30.216216 | 84 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/binary/divide_numeric.py | from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class DivideNumeric(TransformPrimitive):
"""Performs element-wise division of two lists.
Description:
Given a list of values X and a list of values
Y, determine the quotient of each value in X
divided by its corresponding value in Y.
Args:
commutative (bool): determines if Deep Feature Synthesis should
generate both x / y and y / x, or just one. If True, there is
no guarantee which of the two will be generated. Defaults to False.
Examples:
>>> divide_numeric = DivideNumeric()
>>> divide_numeric([2.0, 1.0, 2.0], [1.0, 2.0, 2.0]).tolist()
[2.0, 0.5, 1.0]
"""
name = "divide_numeric"
input_types = [
ColumnSchema(semantic_tags={"numeric"}),
ColumnSchema(semantic_tags={"numeric"}),
]
return_type = ColumnSchema(semantic_tags={"numeric"})
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the result of {} divided by {}"
def __init__(self, commutative=False):
self.commutative = commutative
def get_function(self):
def divide_numeric(val1, val2):
return val1 / val2
return divide_numeric
def generate_name(self, base_feature_names):
return "%s / %s" % (base_feature_names[0], base_feature_names[1])
| 1,526 | 32.195652 | 84 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/binary/greater_than_equal_to_scalar.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import BooleanNullable
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class GreaterThanEqualToScalar(TransformPrimitive):
"""Determines if values are greater than or equal to a given scalar.
Description:
Given a list of values and a constant scalar, determine
whether each of the values is greater than or equal to the
scalar. If a value is equal to the scalar, return `True`.
Examples:
>>> greater_than_equal_to_scalar = GreaterThanEqualToScalar(value=2)
>>> greater_than_equal_to_scalar([3, 1, 2]).tolist()
[True, False, True]
"""
name = "greater_than_equal_to_scalar"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(logical_type=BooleanNullable)
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
def __init__(self, value=0):
self.value = value
self.description_template = (
"whether {{}} is greater than or equal to {}".format(self.value)
)
def get_function(self):
def greater_than_equal_to_scalar(vals):
return vals >= self.value
return greater_than_equal_to_scalar
def generate_name(self, base_feature_names):
return "%s >= %s" % (base_feature_names[0], str(self.value))
| 1,460 | 34.634146 | 84 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/binary/greater_than_scalar.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import BooleanNullable
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class GreaterThanScalar(TransformPrimitive):
"""Determines if values are greater than a given scalar.
Description:
Given a list of values and a constant scalar, determine
whether each of the values is greater than the scalar.
If a value is equal to the scalar, return `False`.
Examples:
>>> greater_than_scalar = GreaterThanScalar(value=2)
>>> greater_than_scalar([3, 1, 2]).tolist()
[True, False, False]
"""
name = "greater_than_scalar"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(logical_type=BooleanNullable)
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
def __init__(self, value=0):
self.value = value
self.description_template = "whether {{}} is greater than {}".format(self.value)
def get_function(self):
def greater_than_scalar(vals):
return vals > self.value
return greater_than_scalar
def generate_name(self, base_feature_names):
return "%s > %s" % (base_feature_names[0], str(self.value))
| 1,341 | 33.410256 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/binary/not_equal_scalar.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import BooleanNullable
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class NotEqualScalar(TransformPrimitive):
"""Determines if values in a list are not equal to a given scalar.
Description:
Given a list of values and a constant scalar, determine
whether each of the values is not equal to the scalar.
Examples:
>>> not_equal_scalar = NotEqualScalar(value=2)
>>> not_equal_scalar([3, 1, 2]).tolist()
[True, True, False]
"""
name = "not_equal_scalar"
input_types = [ColumnSchema()]
return_type = ColumnSchema(logical_type=BooleanNullable)
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
def __init__(self, value=None):
self.value = value
self.description_template = "whether {{}} does not equal {}".format(self.value)
def get_function(self):
def not_equal_scalar(vals):
return vals != self.value
return not_equal_scalar
def generate_name(self, base_feature_names):
return "%s != %s" % (base_feature_names[0], str(self.value))
| 1,249 | 31.894737 | 87 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/binary/modulo_numeric_scalar.py | from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class ModuloNumericScalar(TransformPrimitive):
"""Computes the modulo of each element in the list by a given scalar.
Description:
Given a list of numeric values and a scalar, return
the modulo, or remainder of each value after being
divided by the scalar.
Examples:
>>> modulo_numeric_scalar = ModuloNumericScalar(value=2)
>>> modulo_numeric_scalar([3, 1, 2]).tolist()
[1, 1, 0]
"""
name = "modulo_numeric_scalar"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
def __init__(self, value=1):
self.value = value
self.description_template = "the remainder after dividing {{}} by {}".format(
self.value,
)
def get_function(self):
def modulo_scalar(vals):
return vals % self.value
return modulo_scalar
def generate_name(self, base_feature_names):
return "%s %% %s" % (base_feature_names[0], str(self.value))
| 1,283 | 31.1 | 85 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/binary/or_primitive.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Boolean, BooleanNullable
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class Or(TransformPrimitive):
"""Performs element-wise logical OR of two lists.
Description:
Given a list of booleans X and a list of booleans Y,
determine whether each value in X is `True`, or
whether its corresponding value in Y is `True`.
Examples:
>>> _or = Or()
>>> _or([False, True, False], [True, True, False]).tolist()
[True, True, False]
"""
name = "or"
input_types = [
[
ColumnSchema(logical_type=BooleanNullable),
ColumnSchema(logical_type=BooleanNullable),
],
[ColumnSchema(logical_type=Boolean), ColumnSchema(logical_type=Boolean)],
[
ColumnSchema(logical_type=Boolean),
ColumnSchema(logical_type=BooleanNullable),
],
[
ColumnSchema(logical_type=BooleanNullable),
ColumnSchema(logical_type=Boolean),
],
]
return_type = ColumnSchema(logical_type=BooleanNullable)
commutative = True
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "whether {} is true or {} is true"
def get_function(self):
return np.logical_or
def generate_name(self, base_feature_names):
return "OR(%s, %s)" % (base_feature_names[0], base_feature_names[1])
| 1,586 | 31.387755 | 84 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/binary/greater_than.py | import numpy as np
import pandas as pd
import pandas.api.types as pdtypes
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import BooleanNullable, Datetime, Ordinal
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class GreaterThan(TransformPrimitive):
"""Determines if values in one list are greater than another list.
Description:
Given a list of values X and a list of values Y, determine
whether each value in X is greater than each corresponding
value in Y. Equal pairs will return `False`.
Examples:
>>> greater_than = GreaterThan()
>>> greater_than([2, 1, 2], [1, 2, 2]).tolist()
[True, False, False]
"""
name = "greater_than"
input_types = [
[
ColumnSchema(semantic_tags={"numeric"}),
ColumnSchema(semantic_tags={"numeric"}),
],
[ColumnSchema(logical_type=Datetime), ColumnSchema(logical_type=Datetime)],
[ColumnSchema(logical_type=Ordinal), ColumnSchema(logical_type=Ordinal)],
]
return_type = ColumnSchema(logical_type=BooleanNullable)
compatibility = [Library.PANDAS, Library.DASK]
description_template = "whether {} is greater than {}"
def get_function(self):
def greater_than(val1, val2):
val1_is_categorical = pdtypes.is_categorical_dtype(val1)
val2_is_categorical = pdtypes.is_categorical_dtype(val2)
if val1_is_categorical and val2_is_categorical:
if not all(val1.cat.categories == val2.cat.categories):
return val1.where(pd.isnull, np.nan)
elif val1_is_categorical or val2_is_categorical:
# This can happen because CFM does not set proper dtypes for intermediate
# features, so some agg features that should be Ordinal don't yet have correct type.
return val1.where(pd.isnull, np.nan)
return val1 > val2
return greater_than
def generate_name(self, base_feature_names):
return "%s > %s" % (base_feature_names[0], base_feature_names[1])
| 2,193 | 38.890909 | 100 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/binary/greater_than_equal_to.py | import numpy as np
import pandas as pd
import pandas.api.types as pdtypes
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import BooleanNullable, Datetime, Ordinal
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class GreaterThanEqualTo(TransformPrimitive):
"""Determines if values in one list are greater than or equal to another list.
Description:
Given a list of values X and a list of values Y, determine
whether each value in X is greater than or equal to each
corresponding value in Y. Equal pairs will return `True`.
Examples:
>>> greater_than_equal_to = GreaterThanEqualTo()
>>> greater_than_equal_to([2, 1, 2], [1, 2, 2]).tolist()
[True, False, True]
"""
name = "greater_than_equal_to"
input_types = [
[
ColumnSchema(semantic_tags={"numeric"}),
ColumnSchema(semantic_tags={"numeric"}),
],
[ColumnSchema(logical_type=Datetime), ColumnSchema(logical_type=Datetime)],
[ColumnSchema(logical_type=Ordinal), ColumnSchema(logical_type=Ordinal)],
]
return_type = ColumnSchema(logical_type=BooleanNullable)
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "whether {} is greater than or equal to {}"
def get_function(self):
def greater_than_equal(val1, val2):
val1_is_categorical = pdtypes.is_categorical_dtype(val1)
val2_is_categorical = pdtypes.is_categorical_dtype(val2)
if val1_is_categorical and val2_is_categorical:
if not all(val1.cat.categories == val2.cat.categories):
return val1.where(pd.isnull, np.nan)
elif val1_is_categorical or val2_is_categorical:
# This can happen because CFM does not set proper dtypes for intermediate
# features, so some agg features that should be Ordinal don't yet have correct type.
return val1.where(pd.isnull, np.nan)
return val1 >= val2
return greater_than_equal
def generate_name(self, base_feature_names):
return "%s >= %s" % (base_feature_names[0], base_feature_names[1])
| 2,297 | 40.781818 | 100 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/binary/and_primitive.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Boolean, BooleanNullable
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class And(TransformPrimitive):
"""Performs element-wise logical AND of two lists.
Description:
Given a list of booleans X and a list of booleans Y,
determine whether each value in X is `True`, and
whether its corresponding value in Y is also `True`.
Examples:
>>> _and = And()
>>> _and([False, True, False], [True, True, False]).tolist()
[False, True, False]
"""
name = "and"
input_types = [
[
ColumnSchema(logical_type=BooleanNullable),
ColumnSchema(logical_type=BooleanNullable),
],
[ColumnSchema(logical_type=Boolean), ColumnSchema(logical_type=Boolean)],
[
ColumnSchema(logical_type=Boolean),
ColumnSchema(logical_type=BooleanNullable),
],
[
ColumnSchema(logical_type=BooleanNullable),
ColumnSchema(logical_type=Boolean),
],
]
return_type = ColumnSchema(logical_type=BooleanNullable)
commutative = True
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "whether {} and {} are true"
def get_function(self):
return np.logical_and
def generate_name(self, base_feature_names):
return "AND(%s, %s)" % (base_feature_names[0], base_feature_names[1])
| 1,595 | 31.571429 | 84 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/binary/modulo_by_feature.py | from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class ModuloByFeature(TransformPrimitive):
"""Computes the modulo of a scalar by each element in a list.
Description:
Given a list of numeric values and a scalar, return the
modulo, or remainder of the scalar after being divided
by each value.
Examples:
>>> modulo_by_feature = ModuloByFeature(value=2)
>>> modulo_by_feature([4, 1, 2]).tolist()
[2, 0, 0]
"""
name = "modulo_by_feature"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
def __init__(self, value=1):
self.value = value
self.description_template = "the remainder after dividing {} by {{}}".format(
self.value,
)
def get_function(self):
def modulo_by_feature(vals):
return self.value % vals
return modulo_by_feature
def generate_name(self, base_feature_names):
return "%s %% %s" % (str(self.value), base_feature_names[0])
| 1,263 | 30.6 | 85 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/binary/__init__.py | from featuretools.primitives.standard.transform.binary.add_numeric import AddNumeric
from featuretools.primitives.standard.transform.binary.add_numeric_scalar import (
AddNumericScalar,
)
from featuretools.primitives.standard.transform.binary.and_primitive import And
from featuretools.primitives.standard.transform.binary.divide_by_feature import (
DivideByFeature,
)
from featuretools.primitives.standard.transform.binary.divide_numeric import (
DivideNumeric,
)
from featuretools.primitives.standard.transform.binary.divide_numeric_scalar import (
DivideNumericScalar,
)
from featuretools.primitives.standard.transform.binary.equal import Equal
from featuretools.primitives.standard.transform.binary.equal_scalar import EqualScalar
from featuretools.primitives.standard.transform.binary.greater_than import GreaterThan
from featuretools.primitives.standard.transform.binary.greater_than_equal_to import (
GreaterThanEqualTo,
)
from featuretools.primitives.standard.transform.binary.greater_than_equal_to_scalar import (
GreaterThanEqualToScalar,
)
from featuretools.primitives.standard.transform.binary.greater_than_scalar import (
GreaterThanScalar,
)
from featuretools.primitives.standard.transform.binary.less_than import LessThan
from featuretools.primitives.standard.transform.binary.less_than_equal_to import (
LessThanEqualTo,
)
from featuretools.primitives.standard.transform.binary.less_than_equal_to_scalar import (
LessThanEqualToScalar,
)
from featuretools.primitives.standard.transform.binary.less_than_scalar import (
LessThanScalar,
)
from featuretools.primitives.standard.transform.binary.modulo_by_feature import (
ModuloByFeature,
)
from featuretools.primitives.standard.transform.binary.modulo_numeric import (
ModuloNumeric,
)
from featuretools.primitives.standard.transform.binary.modulo_numeric_scalar import (
ModuloNumericScalar,
)
from featuretools.primitives.standard.transform.binary.multiply_boolean import (
MultiplyBoolean,
)
from featuretools.primitives.standard.transform.binary.multiply_numeric import (
MultiplyNumeric,
)
from featuretools.primitives.standard.transform.binary.multiply_numeric_boolean import (
MultiplyNumericBoolean,
)
from featuretools.primitives.standard.transform.binary.multiply_numeric_scalar import (
MultiplyNumericScalar,
)
from featuretools.primitives.standard.transform.binary.not_equal import NotEqual
from featuretools.primitives.standard.transform.binary.not_equal_scalar import (
NotEqualScalar,
)
from featuretools.primitives.standard.transform.binary.or_primitive import Or
from featuretools.primitives.standard.transform.binary.scalar_subtract_numeric_feature import (
ScalarSubtractNumericFeature,
)
from featuretools.primitives.standard.transform.binary.subtract_numeric import (
SubtractNumeric,
)
from featuretools.primitives.standard.transform.binary.subtract_numeric_scalar import (
SubtractNumericScalar,
)
| 2,970 | 40.263889 | 95 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/binary/multiply_numeric_boolean.py | import pandas.api.types as pdtypes
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Boolean, BooleanNullable
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class MultiplyNumericBoolean(TransformPrimitive):
"""Performs element-wise multiplication of a numeric list with a boolean list.
Description:
Given a list of numeric values X and a list of
boolean values Y, return the values in X where
the corresponding value in Y is True.
Examples:
>>> import pandas as pd
>>> multiply_numeric_boolean = MultiplyNumericBoolean()
>>> multiply_numeric_boolean([2, 1, 2], [True, True, False]).tolist()
[2, 1, 0]
>>> multiply_numeric_boolean([2, None, None], [True, True, False]).astype("float64").tolist()
[2.0, nan, nan]
>>> multiply_numeric_boolean([2, 1, 2], pd.Series([True, True, pd.NA], dtype="boolean")).tolist()
[2, 1, <NA>]
"""
name = "multiply_numeric_boolean"
input_types = [
[
ColumnSchema(semantic_tags={"numeric"}),
ColumnSchema(logical_type=Boolean),
],
[
ColumnSchema(semantic_tags={"numeric"}),
ColumnSchema(logical_type=BooleanNullable),
],
[
ColumnSchema(logical_type=Boolean),
ColumnSchema(semantic_tags={"numeric"}),
],
[
ColumnSchema(logical_type=BooleanNullable),
ColumnSchema(semantic_tags={"numeric"}),
],
]
return_type = ColumnSchema(semantic_tags={"numeric"})
compatibility = [Library.PANDAS, Library.DASK]
commutative = True
description_template = "the product of {} and {}"
def get_function(self):
def multiply_numeric_boolean(ser1, ser2):
if pdtypes.is_bool_dtype(ser1):
bools = ser1
vals = ser2
else:
bools = ser2
vals = ser1
result = vals * bools.astype("Int64")
return result
return multiply_numeric_boolean
def generate_name(self, base_feature_names):
return "%s * %s" % (base_feature_names[0], base_feature_names[1])
| 2,306 | 33.432836 | 105 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/binary/equal.py | import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import BooleanNullable
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class Equal(TransformPrimitive):
"""Determines if values in one list are equal to another list.
Description:
Given a list of values X and a list of values Y, determine
whether each value in X is equal to each corresponding value
in Y.
Examples:
>>> equal = Equal()
>>> equal([2, 1, 2], [1, 2, 2]).tolist()
[False, False, True]
"""
name = "equal"
input_types = [ColumnSchema(), ColumnSchema()]
return_type = ColumnSchema(logical_type=BooleanNullable)
commutative = True
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "whether {} equals {}"
def get_function(self):
def equal(x_vals, y_vals):
if isinstance(x_vals.dtype, pd.CategoricalDtype) and isinstance(
y_vals.dtype,
pd.CategoricalDtype,
):
categories = set(x_vals.cat.categories).union(
set(y_vals.cat.categories),
)
x_vals = x_vals.cat.add_categories(
categories.difference(set(x_vals.cat.categories)),
)
y_vals = y_vals.cat.add_categories(
categories.difference(set(y_vals.cat.categories)),
)
return x_vals.eq(y_vals)
return equal
def generate_name(self, base_feature_names):
return "%s = %s" % (base_feature_names[0], base_feature_names[1])
| 1,741 | 33.156863 | 84 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/binary/multiply_numeric_scalar.py | from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class MultiplyNumericScalar(TransformPrimitive):
"""Multiplies each element in the list by a scalar.
Description:
Given a list of numeric values and a scalar, multiply
each value in the list by the scalar.
Examples:
>>> multiply_numeric_scalar = MultiplyNumericScalar(value=2)
>>> multiply_numeric_scalar([3, 1, 2]).tolist()
[6, 2, 4]
"""
name = "multiply_numeric_scalar"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
def __init__(self, value=1):
self.value = value
self.description_template = "the product of {{}} and {}".format(self.value)
def get_function(self):
def multiply_scalar(vals):
return vals * self.value
return multiply_scalar
def generate_name(self, base_feature_names):
return "%s * %s" % (base_feature_names[0], str(self.value))
| 1,200 | 31.459459 | 84 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/binary/scalar_subtract_numeric_feature.py | from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class ScalarSubtractNumericFeature(TransformPrimitive):
"""Subtracts each value in the list from a given scalar.
Description:
Given a list of numeric values and a scalar, subtract
the each value from the scalar and return the list of
differences.
Examples:
>>> scalar_subtract_numeric_feature = ScalarSubtractNumericFeature(value=2)
>>> scalar_subtract_numeric_feature([3, 1, 2]).tolist()
[-1, 1, 0]
"""
name = "scalar_subtract_numeric_feature"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
def __init__(self, value=0):
self.value = value
self.description_template = "the result {} minus {{}}".format(self.value)
def get_function(self):
def scalar_subtract_numeric_feature(vals):
return self.value - vals
return scalar_subtract_numeric_feature
def generate_name(self, base_feature_names):
return "%s - %s" % (str(self.value), base_feature_names[0])
| 1,311 | 33.526316 | 84 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/binary/modulo_numeric.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class ModuloNumeric(TransformPrimitive):
"""Performs element-wise modulo of two lists.
Description:
Given a list of values X and a list of values Y,
determine the modulo, or remainder of each value in
X after it's divided by its corresponding value in Y.
Examples:
>>> modulo_numeric = ModuloNumeric()
>>> modulo_numeric([2, 1, 5], [1, 2, 2]).tolist()
[0, 1, 1]
"""
name = "modulo_numeric"
input_types = [
ColumnSchema(semantic_tags={"numeric"}),
ColumnSchema(semantic_tags={"numeric"}),
]
return_type = ColumnSchema(semantic_tags={"numeric"})
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the remainder after dividing {} by {}"
def get_function(self):
return np.mod
def generate_name(self, base_feature_names):
return "%s %% %s" % (base_feature_names[0], base_feature_names[1])
| 1,156 | 31.138889 | 84 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/binary/subtract_numeric_scalar.py | from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class SubtractNumericScalar(TransformPrimitive):
"""Subtracts a scalar from each element in the list.
Description:
Given a list of numeric values and a scalar, subtract
the given scalar from each value in the list.
Examples:
>>> subtract_numeric_scalar = SubtractNumericScalar(value=2)
>>> subtract_numeric_scalar([3, 1, 2]).tolist()
[1, -1, 0]
"""
name = "subtract_numeric_scalar"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
def __init__(self, value=0):
self.value = value
self.description_template = "the result of {{}} minus {}".format(self.value)
def get_function(self):
def subtract_scalar(vals):
return vals - self.value
return subtract_scalar
def generate_name(self, base_feature_names):
return "%s - %s" % (base_feature_names[0], str(self.value))
| 1,211 | 31.756757 | 84 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/binary/subtract_numeric.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base.transform_primitive_base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class SubtractNumeric(TransformPrimitive):
"""Performs element-wise subtraction of two lists.
Description:
Given a list of values X and a list of values
Y, determine the difference of each value
in X from its corresponding value in Y.
Args:
commutative (bool): determines if Deep Feature Synthesis should
generate both x - y and y - x, or just one. If True, there is no
guarantee which of the two will be generated. Defaults to True.
Notes:
commutative is True by default since False would result in 2 perfectly
correlated series.
Examples:
>>> subtract_numeric = SubtractNumeric()
>>> subtract_numeric([2, 1, 2], [1, 2, 2]).tolist()
[1, -1, 0]
"""
name = "subtract_numeric"
input_types = [
ColumnSchema(semantic_tags={"numeric"}),
ColumnSchema(semantic_tags={"numeric"}),
]
return_type = ColumnSchema(semantic_tags={"numeric"})
compatibility = [Library.PANDAS, Library.DASK]
description_template = "the result of {} minus {}"
commutative = True
def __init__(self, commutative=True):
self.commutative = commutative
def get_function(self):
return np.subtract
def generate_name(self, base_feature_names):
return "%s - %s" % (base_feature_names[0], base_feature_names[1])
| 1,581 | 31.285714 | 84 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/numeric/tangent.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Double
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class Tangent(TransformPrimitive):
"""Computes the tangent of a number.
Examples:
>>> tan = Tangent()
>>> tan([-np.pi, 0.0, np.pi/2.0]).tolist()
[1.2246467991473532e-16, 0.0, 1.633123935319537e+16]
"""
name = "tangent"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the tangent of {}"
def get_function(self):
return np.tan
| 785 | 29.230769 | 78 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/numeric/absolute.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class Absolute(TransformPrimitive):
"""Computes the absolute value of a number.
Examples:
>>> absolute = Absolute()
>>> absolute([3.0, -5.0, -2.4]).tolist()
[3.0, 5.0, 2.4]
"""
name = "absolute"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the absolute value of {}"
def get_function(self):
return np.absolute
| 710 | 27.44 | 65 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/numeric/diff.py | from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base import TransformPrimitive
class Diff(TransformPrimitive):
"""Computes the difference between the value in a list and the
previous value in that list.
Args:
periods (int): The number of periods by which to shift the index row.
Default is 0. Periods correspond to rows.
Description:
Given a list of values, compute the difference from the previous
item in the list. The result for the first element of the list will
always be `NaN`.
Examples:
>>> diff = Diff()
>>> values = [1, 10, 3, 4, 15]
>>> diff(values).tolist()
[nan, 9.0, -7.0, 1.0, 11.0]
You can specify the number of periods to shift the values
>>> values = [1, 2, 4, 7, 11, 16]
>>> diff_periods = Diff(periods = 1)
>>> diff_periods(values).tolist()
[nan, nan, 1.0, 2.0, 3.0, 4.0]
"""
name = "diff"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
uses_full_dataframe = True
description_template = "the difference from the previous value of {}"
def __init__(self, periods=0):
self.periods = periods
def get_function(self):
def pd_diff(values):
return values.shift(self.periods).diff()
return pd_diff
| 1,416 | 29.148936 | 77 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/numeric/negate.py | from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class Negate(TransformPrimitive):
"""Negates a numeric value.
Examples:
>>> negate = Negate()
>>> negate([1.0, 23.2, -7.0]).tolist()
[-1.0, -23.2, 7.0]
"""
name = "negate"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the negation of {}"
def get_function(self):
def negate(vals):
return vals * -1
return negate
def generate_name(self, base_feature_names):
return "-(%s)" % (base_feature_names[0])
| 812 | 26.1 | 65 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/numeric/sine.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Double
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class Sine(TransformPrimitive):
"""Computes the sine of a number.
Examples:
>>> sin = Sine()
>>> sin([-np.pi/2.0, 0.0, np.pi/2.0]).tolist()
[-1.0, 0.0, 1.0]
"""
name = "sine"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the sine of {}"
def get_function(self):
return np.sin
| 738 | 27.423077 | 78 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/numeric/percentile.py | from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base import TransformPrimitive
class Percentile(TransformPrimitive):
"""Determines the percentile rank for each value in a list.
Examples:
>>> percentile = Percentile()
>>> percentile([10, 15, 1, 20]).tolist()
[0.5, 0.75, 0.25, 1.0]
Nan values are ignored when determining rank
>>> percentile([10, 15, 1, None, 20]).tolist()
[0.5, 0.75, 0.25, nan, 1.0]
"""
name = "percentile"
uses_full_dataframe = True
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
description_template = "the percentile rank of {}"
def get_function(self):
return lambda array: array.rank(pct=True)
| 808 | 27.892857 | 63 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/numeric/cosine.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Double
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class Cosine(TransformPrimitive):
"""Computes the cosine of a number.
Examples:
>>> cos = Cosine()
>>> cos([0.0, np.pi/2.0, np.pi]).tolist()
[1.0, 6.123233995736766e-17, -1.0]
"""
name = "cosine"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the cosine of {}"
def get_function(self):
return np.cos
| 761 | 28.307692 | 78 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/numeric/__init__.py | from featuretools.primitives.standard.transform.numeric.absolute import Absolute
from featuretools.primitives.standard.transform.numeric.cosine import Cosine
from featuretools.primitives.standard.transform.numeric.diff import Diff
from featuretools.primitives.standard.transform.numeric.natural_logarithm import (
NaturalLogarithm,
)
from featuretools.primitives.standard.transform.numeric.negate import Negate
from featuretools.primitives.standard.transform.numeric.percentile import Percentile
from featuretools.primitives.standard.transform.numeric.rate_of_change import (
RateOfChange,
)
from featuretools.primitives.standard.transform.numeric.same_as_previous import (
SameAsPrevious,
)
from featuretools.primitives.standard.transform.numeric.sine import Sine
from featuretools.primitives.standard.transform.numeric.square_root import SquareRoot
from featuretools.primitives.standard.transform.numeric.tangent import Tangent
| 942 | 51.388889 | 85 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/numeric/same_as_previous.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import BooleanNullable
from featuretools.primitives.base import TransformPrimitive
class SameAsPrevious(TransformPrimitive):
"""Determines if a value is equal to the previous value in a list.
Description:
Compares a value in a list to the previous value and returns True if
the value is equal to the previous value or False otherwise. The
first item in the output will always be False, since there is no previous
element for the first element comparison.
Any nan values in the input will be filled using either a forward-fill
or backward-fill method, specified by the fill_method argument. The number
of consecutive nan values that get filled can be limited with the limit
argument. Any nan values left after filling will result in False being
returned for any comparison involving the nan value.
Args:
fill_method (str): Method for filling gaps in series. Valid
options are `backfill`, `bfill`, `pad`, `ffill`.
`pad / ffill`: fill gap with last valid observation.
`backfill / bfill`: fill gap with next valid observation.
Default is `pad`.
limit (int): The max number of consecutive NaN values in a gap that
can be filled. Default is None.
Examples:
>>> same_as_previous = SameAsPrevious()
>>> same_as_previous([1, 2, 2, 4]).tolist()
[False, False, True, False]
The fill method for nan values can be specified
>>> same_as_previous_fillna = SameAsPrevious(fill_method="bfill")
>>> same_as_previous_fillna([1, None, 2, 4]).tolist()
[False, False, True, False]
The number of nan values that are filled can be limited
>>> same_as_previous_limitfill = SameAsPrevious(limit=2)
>>> same_as_previous_limitfill([1, None, None, None, 2, 3]).tolist()
[False, True, True, False, False, False]
"""
name = "same_as_previous"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(BooleanNullable)
def __init__(self, fill_method="pad", limit=None):
if fill_method not in ["backfill", "bfill", "pad", "ffill"]:
raise ValueError("Invalid fill_method")
self.fill_method = fill_method
self.limit = limit
def get_function(self):
def same_as_previous(x):
x = x.fillna(method=self.fill_method, limit=self.limit)
x = x.eq(x.shift())
# first value will always be false, since there is no previous value
x.iloc[0] = False
return x
return same_as_previous
| 2,723 | 38.478261 | 82 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/numeric/rate_of_change.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Double
from featuretools.primitives.base import TransformPrimitive
class RateOfChange(TransformPrimitive):
"""Computes the rate of change of a value per second.
Examples:
>>> import pandas as pd
>>> rate_of_change = RateOfChange()
>>> times = pd.date_range(start='2019-01-01', freq='1min', periods=5)
>>> results = rate_of_change([0, 30, 180, -90, 0], times).tolist()
>>> results = [round(x, 2) for x in results]
>>> results
[nan, 0.5, 2.5, -4.5, 1.5]
"""
name = "rate_of_change"
input_types = [
ColumnSchema(semantic_tags={"numeric"}),
ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"}),
]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
uses_full_dataframe = True
description_template = "the rate of change of {} per second"
def get_function(self):
def rate_of_change(values, time):
time_delta = time.diff().dt.total_seconds()
value_delta = values.diff()
return value_delta / time_delta
return rate_of_change
| 1,217 | 32.833333 | 78 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/numeric/square_root.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Double
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class SquareRoot(TransformPrimitive):
"""Computes the square root of a number.
Examples:
>>> sqrt = SquareRoot()
>>> sqrt([9.0, 16.0, 4.0]).tolist()
[3.0, 4.0, 2.0]
"""
name = "square_root"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the square root of {}"
def get_function(self):
return np.sqrt
| 761 | 28.307692 | 78 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/numeric/natural_logarithm.py | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Double
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class NaturalLogarithm(TransformPrimitive):
"""Computes the natural logarithm of a number.
Examples:
>>> log = NaturalLogarithm()
>>> results = log([1.0, np.e]).tolist()
>>> results = [round(x, 2) for x in results]
>>> results
[0.0, 1.0]
"""
name = "natural_logarithm"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the natural logarithm of {}"
def get_function(self):
return np.log
| 861 | 29.785714 | 78 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/exponential/exponential_weighted_average.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Double
from featuretools.primitives.base import TransformPrimitive
class ExponentialWeightedAverage(TransformPrimitive):
"""Computes the exponentially weighted moving average for a series of numbers
Description:
Returns the exponentially weighted moving average for a series of
numbers. Exactly one of center of mass (com), span, half-life, and
alpha must be provided. Missing values can be ignored when calculating
weights by setting 'ignore_na' to True.
Args:
com (float): Specify decay in terms of center of mass for com >= 0.
Default is None.
span (float): Specify decay in terms of span for span >= 1.
Default is None.
halflife (float): Specify decay in terms of half-life for halflife > 0.
Default is None.
alpha (float): Specify smoothing factor alpha directly. Alpha should be
greater than 0 and less than or equal to 1. Default is None.
ignore_na (bool): Ignore missing values when calculating weights.
Default is False.
Examples:
>>> exponential_weighted_average = ExponentialWeightedAverage(com=0.5)
>>> exponential_weighted_average([1, 2, 3, 4]).tolist()
[1.0, 1.75, 2.615384615384615, 3.55]
Missing values can be ignored
>>> ewma_ignorena = ExponentialWeightedAverage(com=0.5, ignore_na=True)
>>> ewma_ignorena([1, 2, 3, None, 4]).tolist()
[1.0, 1.75, 2.615384615384615, 2.615384615384615, 3.55]
"""
name = "exponential_weighted_average"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
uses_full_dataframe = True
def __init__(self, com=None, span=None, halflife=None, alpha=None, ignore_na=False):
if all(x is None for x in [com, span, halflife, alpha]):
com = 0.5
self.com = com
self.span = span
self.halflife = halflife
self.alpha = alpha
self.ignore_na = ignore_na
def get_function(self):
def exponential_weighted_average(x):
return x.ewm(
com=self.com,
span=self.span,
halflife=self.halflife,
alpha=self.alpha,
ignore_na=self.ignore_na,
).mean()
return exponential_weighted_average
| 2,492 | 35.661765 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/exponential/exponential_weighted_variance.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Double
from featuretools.primitives.base import TransformPrimitive
class ExponentialWeightedVariance(TransformPrimitive):
"""Computes the exponentially weighted moving variance for a series of numbers
Description:
Returns the exponentially weighted moving variance for a series of
numbers. Exactly one of center of mass (com), span, half-life, and
alpha must be provided. Missing values can be ignored when calculating
weights by setting 'ignore_na' to True.
Args:
com (float): Specify decay in terms of center of mass for com >= 0.
Default is None.
span (float): Specify decay in terms of span for span >= 1.
Default is None.
halflife (float): Specify decay in terms of half-life for halflife > 0.
Default is None.
alpha (float): Specify smoothing factor alpha directly. Alpha should be
greater than 0 and less than or equal to 1. Default is None.
ignore_na (bool): Ignore missing values when calculating weights.
Default is False.
Examples:
>>> exponential_weighted_variance = ExponentialWeightedVariance(com=0.5)
>>> exponential_weighted_variance([1, 2, 3, 4]).tolist()
[nan, 0.49999999999999983, 0.8461538461538459, 1.1230769230769233]
Missing values can be ignored
>>> ewmv_ignorena = ExponentialWeightedVariance(com=0.5, ignore_na=True)
>>> ewmv_ignorena([1, 2, 3, None, 4]).tolist()
[nan, 0.49999999999999983, 0.8461538461538459, 0.8461538461538459, 1.1230769230769233]
"""
name = "exponential_weighted_variance"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
uses_full_dataframe = True
def __init__(self, com=None, span=None, halflife=None, alpha=None, ignore_na=False):
if all(x is None for x in [com, span, halflife, alpha]):
com = 0.5
self.com = com
self.span = span
self.halflife = halflife
self.alpha = alpha
self.ignore_na = ignore_na
def get_function(self):
def exponential_weighted_average(x):
return x.ewm(
com=self.com,
span=self.span,
halflife=self.halflife,
alpha=self.alpha,
ignore_na=self.ignore_na,
).var()
return exponential_weighted_average
| 2,561 | 36.130435 | 94 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/exponential/__init__.py | from featuretools.primitives.standard.transform.exponential.exponential_weighted_average import (
ExponentialWeightedAverage,
)
from featuretools.primitives.standard.transform.exponential.exponential_weighted_std import (
ExponentialWeightedSTD,
)
from featuretools.primitives.standard.transform.exponential.exponential_weighted_variance import (
ExponentialWeightedVariance,
)
| 390 | 38.1 | 98 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/exponential/exponential_weighted_std.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Double
from featuretools.primitives.base import TransformPrimitive
class ExponentialWeightedSTD(TransformPrimitive):
"""Computes the exponentially weighted moving standard deviation for
a series of numbers
Description:
Returns the exponentially weighted moving standard deviation for a
series of numbers. Exactly one of center of mass (com), span,
half-life, and alpha must be provided. Missing values can be ignored
when calculating weights by setting 'ignore_na' to True.
Args:
com (float): Specify decay in terms of center of mass for com >= 0.
Default is None.
span (float): Specify decay in terms of span for span >= 1.
Default is None.
halflife (float): Specify decay in terms of half-life for halflife > 0.
Default is None.
alpha (float): Specify smoothing factor alpha directly. Alpha should be
greater than 0 and less than or equal to 1. Default is None.
ignore_na (bool): Ignore missing values when calculating weights.
Default is False.
Examples:
>>> exponential_weighted_std = ExponentialWeightedSTD(com=0.5)
>>> exponential_weighted_std([1, 2, 3, 7]).tolist()
[nan, 0.7071067811865475, 0.9198662110077998, 2.9852200022005855]
Missing values can be ignored
>>> ewmstd_ignorena = ExponentialWeightedSTD(com=0.5, ignore_na=True)
>>> ewmstd_ignorena([1, 2, 3, None, 7]).tolist()
[nan, 0.7071067811865475, 0.9198662110077998, 0.9198662110077998, 2.9852200022005855]
"""
name = "exponential_weighted_std"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(logical_type=Double, semantic_tags={"numeric"})
uses_full_dataframe = True
def __init__(self, com=None, span=None, halflife=None, alpha=None, ignore_na=False):
if all(x is None for x in [com, span, halflife, alpha]):
com = 0.5
self.com = com
self.span = span
self.halflife = halflife
self.alpha = alpha
self.ignore_na = ignore_na
def get_function(self):
def exponential_weighted_std(x):
return x.ewm(
com=self.com,
span=self.span,
halflife=self.halflife,
alpha=self.alpha,
ignore_na=self.ignore_na,
).std()
return exponential_weighted_std
| 2,549 | 35.428571 | 93 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/url/url_to_tld.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import URL, Categorical
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.common_tld_utils import COMMON_TLDS
class URLToTLD(TransformPrimitive):
"""Determines the top level domain of a url.
Description:
Extract the top level domain of a url, using regex,
and a list of common top level domains. Returns nan if
the url is invalid or null.
Common top level domains were pulled from this list:
https://www.hayksaakian.com/most-popular-tlds/
Examples:
>>> url_to_tld = URLToTLD()
>>> urls = ['https://www.google.com', 'http://www.google.co.in',
... 'www.facebook.com']
>>> url_to_tld(urls).to_list()
['com', 'in', 'com']
"""
name = "url_to_tld"
input_types = [ColumnSchema(logical_type=URL)]
return_type = ColumnSchema(logical_type=Categorical, semantic_tags={"category"})
def get_function(self):
self.tlds_pattern = r"(?:\.({}))".format("|".join(COMMON_TLDS))
def url_to_domain(x):
p = r"^(?:https?:\/\/)?(?:[^@\/\n]+@)?(?:www\.)?([^:\/?\n]+)"
return x.str.extract(p, expand=False)
def url_to_tld(x):
domains = url_to_domain(x)
df = domains.str.extractall(self.tlds_pattern)
matches = df.groupby(level=0).last()[0]
return matches.reindex(x.index)
return url_to_tld
| 1,507 | 33.272727 | 84 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/url/url_to_domain.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import URL, Categorical
from featuretools.primitives.base import TransformPrimitive
class URLToDomain(TransformPrimitive):
"""Determines the domain of a url.
Description:
Calculates the label to identify the network domain of a URL. Supports
urls with or without protocol as well as international country domains.
Examples:
>>> url_to_domain = URLToDomain()
>>> urls = ['https://play.google.com',
... 'http://www.google.co.in',
... 'www.facebook.com']
>>> url_to_domain(urls).tolist()
['play.google.com', 'google.co.in', 'facebook.com']
"""
name = "url_to_domain"
input_types = [ColumnSchema(logical_type=URL)]
return_type = ColumnSchema(logical_type=Categorical, semantic_tags={"category"})
def get_function(self):
def url_to_domain(x):
p = r"^(?:https?:\/\/)?(?:[^@\/\n]+@)?(?:www\.)?([^:\/?\n]+)"
return x.str.extract(p, expand=False)
return url_to_domain
| 1,098 | 32.30303 | 84 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/url/__init__.py | from featuretools.primitives.standard.transform.url.url_to_domain import URLToDomain
from featuretools.primitives.standard.transform.url.url_to_protocol import URLToProtocol
from featuretools.primitives.standard.transform.url.url_to_tld import URLToTLD
| 253 | 62.5 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/url/url_to_protocol.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import URL, Categorical
from featuretools.primitives.base import TransformPrimitive
class URLToProtocol(TransformPrimitive):
"""Determines the protocol (http or https) of a url.
Description:
Extract the protocol of a url using regex.
It will be either https or http. Returns nan if
the url doesn't contain a protocol.
Examples:
>>> url_to_protocol = URLToProtocol()
>>> urls = ['https://play.google.com',
... 'http://www.google.co.in',
... 'www.facebook.com']
>>> url_to_protocol(urls).to_list()
['https', 'http', nan]
"""
name = "url_to_protocol"
input_types = [ColumnSchema(logical_type=URL)]
return_type = ColumnSchema(logical_type=Categorical, semantic_tags={"category"})
def get_function(self):
def url_to_protocol(x):
p = r"^(https|http)(?:\:)"
return x.str.extract(p, expand=False)
return url_to_protocol
| 1,059 | 30.176471 | 84 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/postal/two_digit_postal_code.py | import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Categorical, PostalCode
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class TwoDigitPostalCode(TransformPrimitive):
"""Returns the two digit prefix of a given postal code.
Description:
Given a list of postal codes, returns the two digit prefix for each postal code.
Examples:
>>> two_digit_postal_code = TwoDigitPostalCode()
>>> two_digit_postal_code(['92432', '34514']).tolist()
['92', '34']
"""
name = "two_digit_postal_code"
input_types = [ColumnSchema(logical_type=PostalCode)]
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
return_type = ColumnSchema(logical_type=Categorical, semantic_tags={"category"})
description_template = "The two digit postal code prefix of {}"
def get_function(self):
def two_digit_postal_code(postal_codes):
def transform_postal_code(pc):
return str(pc)[:2] if pd.notna(pc) else pd.NA
return postal_codes.apply(transform_postal_code)
return two_digit_postal_code
| 1,212 | 33.657143 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/postal/one_digit_postal_code.py | import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Categorical, PostalCode
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class OneDigitPostalCode(TransformPrimitive):
"""Returns the one digit prefix of a given postal code.
Description:
Given a list of postal codes, returns the one digit prefix for each postal code.
Examples:
>>> one_digit_postal_code = OneDigitPostalCode()
>>> one_digit_postal_code(['92432', '34514']).tolist()
['9', '3']
"""
name = "one_digit_postal_code"
input_types = [ColumnSchema(logical_type=PostalCode)]
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
return_type = ColumnSchema(logical_type=Categorical, semantic_tags={"category"})
description_template = "The one digit postal code prefix of {}"
def get_function(self):
def one_digit_postal_code(postal_codes):
def transform_postal_code(pc):
return str(pc)[0] if pd.notna(pc) else pd.NA
return postal_codes.apply(transform_postal_code)
return one_digit_postal_code
| 1,209 | 33.571429 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/postal/__init__.py | from featuretools.primitives.standard.transform.postal.one_digit_postal_code import (
OneDigitPostalCode,
)
from featuretools.primitives.standard.transform.postal.two_digit_postal_code import (
TwoDigitPostalCode,
)
| 224 | 31.142857 | 85 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/datetime/second.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Ordinal
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class Second(TransformPrimitive):
"""Determines the seconds value of a datetime.
Examples:
>>> from datetime import datetime
>>> dates = [datetime(2019, 3, 1),
... datetime(2019, 3, 3, 11, 10, 50),
... datetime(2019, 3, 31, 19, 45, 15)]
>>> second = Second()
>>> second(dates).tolist()
[0, 50, 15]
"""
name = "second"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(
logical_type=Ordinal(order=list(range(60))),
semantic_tags={"category"},
)
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the seconds value of {}"
def get_function(self):
def second(vals):
return vals.dt.second
return second
| 1,037 | 28.657143 | 65 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/datetime/is_first_week_of_month.py | import numpy as np
import pandas as pd
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import BooleanNullable, Datetime
from featuretools.primitives.base import TransformPrimitive
class IsFirstWeekOfMonth(TransformPrimitive):
"""Determines if a date falls in the first week of the month.
Description:
Converts a datetime to a boolean indicating if the date
falls in the first week of the month. The first week of
the month starts on day 1, and the week number is incremented
each Sunday.
Examples:
>>> from datetime import datetime
>>> is_first_week_of_month = IsFirstWeekOfMonth()
>>> times = [datetime(2019, 3, 1),
... datetime(2019, 3, 3),
... datetime(2019, 3, 31),
... datetime(2019, 3, 30)]
>>> is_first_week_of_month(times).tolist()
[True, False, False, False]
"""
name = "is_first_week_of_month"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(logical_type=BooleanNullable)
def get_function(self):
def is_first_week_of_month(x):
df = pd.DataFrame({"date": x})
df["first_day"] = df.date - pd.to_timedelta(df["date"].dt.day - 1, unit="d")
df["dom"] = df.date.dt.day
df["first_day_weekday"] = df.first_day.dt.weekday
df["adjusted_dom"] = df.dom + df.first_day_weekday + 1
df.loc[df["first_day_weekday"].astype(float) == 6.0, "adjusted_dom"] = df[
"dom"
]
df["is_first_week"] = np.ceil(df.adjusted_dom / 7.0) == 1.0
if df["date"].isnull().values.any():
df["is_first_week"] = df["is_first_week"].astype("object")
df.loc[df["date"].isnull(), "is_first_week"] = np.nan
return df.is_first_week.values
return is_first_week_of_month
| 1,937 | 37.76 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/datetime/is_year_end.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import BooleanNullable, Datetime
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class IsYearEnd(TransformPrimitive):
"""Determines if a date falls on the end of a year.
Examples:
>>> import numpy as np
>>> from datetime import datetime
>>> dates = [datetime(2019, 12, 31),
... datetime(2019, 1, 1),
... datetime(2019, 11, 30),
... np.nan]
>>> is_year_end = IsYearEnd()
>>> is_year_end(dates).tolist()
[True, False, False, False]
"""
name = "is_year_end"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(logical_type=BooleanNullable)
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "whether {} occurred on the end of a year"
def get_function(self):
def is_year_end(vals):
return vals.dt.is_year_end
return is_year_end
| 1,092 | 31.147059 | 69 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/datetime/year.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Ordinal
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class Year(TransformPrimitive):
"""Determines the year value of a datetime.
Examples:
>>> from datetime import datetime
>>> dates = [datetime(2019, 3, 1),
... datetime(2048, 6, 17, 11, 10, 50),
... datetime(1950, 11, 30, 19, 45, 15)]
>>> year = Year()
>>> year(dates).tolist()
[2019, 2048, 1950]
"""
name = "year"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(
logical_type=Ordinal(order=list(range(1, 3000))),
semantic_tags={"category"},
)
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the year of {}"
def get_function(self):
def year(vals):
return vals.dt.year
return year
| 1,023 | 28.257143 | 65 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/datetime/minute.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Ordinal
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class Minute(TransformPrimitive):
"""Determines the minutes value of a datetime.
Examples:
>>> from datetime import datetime
>>> dates = [datetime(2019, 3, 1),
... datetime(2019, 3, 3, 11, 10, 50),
... datetime(2019, 3, 31, 19, 45, 15)]
>>> minute = Minute()
>>> minute(dates).tolist()
[0, 10, 45]
"""
name = "minute"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(
logical_type=Ordinal(order=list(range(60))),
semantic_tags={"category"},
)
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the minutes value of {}"
def get_function(self):
def minute(vals):
return vals.dt.minute
return minute
| 1,037 | 28.657143 | 65 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/datetime/month.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Ordinal
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class Month(TransformPrimitive):
"""Determines the month value of a datetime.
Examples:
>>> from datetime import datetime
>>> dates = [datetime(2019, 3, 1),
... datetime(2019, 6, 17, 11, 10, 50),
... datetime(2019, 11, 30, 19, 45, 15)]
>>> month = Month()
>>> month(dates).tolist()
[3, 6, 11]
"""
name = "month"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(
logical_type=Ordinal(order=list(range(1, 13))),
semantic_tags={"category"},
)
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the month of {}"
def get_function(self):
def month(vals):
return vals.dt.month
return month
| 1,023 | 28.257143 | 65 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/datetime/quarter.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Datetime, Ordinal
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class Quarter(TransformPrimitive):
"""Determines the quarter a datetime column falls into (1, 2, 3, 4)
Examples:
>>> from datetime import datetime
>>> dates = [datetime(2019,12,1),
... datetime(2019,1,3),
... datetime(2020,2,1)]
>>> q = Quarter()
>>> q(dates).tolist()
[4, 1, 1]
"""
name = "quarter"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(
logical_type=Ordinal(order=list(range(1, 5))),
semantic_tags={"category"},
)
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "the quarter that describes {}"
def get_function(self):
def quarter(vals):
return vals.dt.quarter
return quarter
| 1,030 | 28.457143 | 71 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/datetime/is_lunch_time.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import BooleanNullable, Datetime
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class IsLunchTime(TransformPrimitive):
"""Determines if a datetime falls during configurable lunch hour, on a 24-hour clock.
Args:
lunch_hour (int): Hour when lunch is taken. Must adhere to 24-hour clock. Defaults to 12.
Examples:
>>> import numpy as np
>>> from datetime import datetime
>>> dates = [datetime(2022, 6, 21, 12, 3, 3),
... datetime(2019, 1, 3, 4, 4, 4),
... datetime(2022, 1, 1, 11, 1, 2),
... np.nan]
>>> is_lunch_time = IsLunchTime()
>>> is_lunch_time(dates).tolist()
[True, False, False, False]
>>> is_lunch_time = IsLunchTime(11)
>>> is_lunch_time(dates).tolist()
[False, False, True, False]
"""
name = "is_lunch_time"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(logical_type=BooleanNullable)
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "whether {} falls during lunch time"
def __init__(self, lunch_hour=12):
self.lunch_hour = lunch_hour
def get_function(self):
def is_lunch_time(vals):
return vals.dt.hour == self.lunch_hour
return is_lunch_time
| 1,480 | 33.44186 | 97 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/datetime/age.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import AgeFractional, Datetime
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class Age(TransformPrimitive):
"""Calculates the age in years as a floating point number given a
date of birth.
Description:
Age in years is computed by calculating the number of days between
the date of birth and the reference time and dividing the result
by 365.
Examples:
Determine the age of three people as of Jan 1, 2019
>>> import pandas as pd
>>> reference_date = pd.to_datetime("01-01-2019")
>>> age = Age()
>>> input_ages = [pd.to_datetime("01-01-2000"),
... pd.to_datetime("05-30-1983"),
... pd.to_datetime("10-17-1997")]
>>> age(input_ages, time=reference_date).tolist()
[19.013698630136986, 35.61643835616438, 21.221917808219178]
"""
name = "age"
input_types = [ColumnSchema(logical_type=Datetime, semantic_tags={"date_of_birth"})]
return_type = ColumnSchema(logical_type=AgeFractional, semantic_tags={"numeric"})
uses_calc_time = True
compatibility = [Library.PANDAS, Library.DASK]
description_template = "the age from {}"
def get_function(self):
def age(x, time=None):
return (time - x).dt.days / 365
return age
| 1,454 | 34.487805 | 88 | py |
featuretools | featuretools-main/featuretools/primitives/standard/transform/datetime/is_weekend.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import BooleanNullable, Datetime
from featuretools.primitives.base import TransformPrimitive
from featuretools.utils.gen_utils import Library
class IsWeekend(TransformPrimitive):
"""Determines if a date falls on a weekend.
Examples:
>>> from datetime import datetime
>>> dates = [datetime(2019, 3, 1),
... datetime(2019, 6, 17, 11, 10, 50),
... datetime(2019, 11, 30, 19, 45, 15)]
>>> is_weekend = IsWeekend()
>>> is_weekend(dates).tolist()
[False, False, True]
"""
name = "is_weekend"
input_types = [ColumnSchema(logical_type=Datetime)]
return_type = ColumnSchema(logical_type=BooleanNullable)
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
description_template = "whether {} occurred on a weekend"
def get_function(self):
def is_weekend(vals):
return vals.dt.weekday > 4
return is_weekend
| 1,027 | 31.125 | 65 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.