repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
featuretools | featuretools-main/featuretools/tests/primitive_tests/natural_language_primitives_tests/test_mean_characters_per_word.py | import numpy as np
import pandas as pd
import pytest
from featuretools.primitives import MeanCharactersPerWord
from featuretools.tests.primitive_tests.utils import (
PrimitiveTestBase,
find_applicable_primitives,
valid_dfs,
)
class TestMeanCharactersPerWord(PrimitiveTestBase):
primitive = MeanCharactersPerWord
def test_sentences(self):
x = pd.Series(
[
"This is a test file",
"This is second line",
"third line $1,000",
"and subsequent lines",
"and more",
],
)
primitive_func = self.primitive().get_function()
answers = pd.Series([3.0, 4.0, 5.0, 6.0, 3.5])
pd.testing.assert_series_equal(primitive_func(x), answers, check_names=False)
def test_punctuation(self):
x = pd.Series(
[
"This: is a test file",
"This, is second line?",
"third/line $1,000;",
"and--subsequen't lines...",
"*and, more..",
],
)
primitive_func = self.primitive().get_function()
answers = pd.Series([3.0, 4.0, 8.0, 10.5, 4.0])
pd.testing.assert_series_equal(primitive_func(x), answers, check_names=False)
def test_multiline(self):
x = pd.Series(
[
"This is a test file",
"This is second line\nthird line $1000;\nand subsequent lines",
"and more",
],
)
primitive_func = self.primitive().get_function()
answers = pd.Series([3.0, 4.8, 3.5])
pd.testing.assert_series_equal(primitive_func(x), answers, check_names=False)
@pytest.mark.parametrize(
"na_value",
[None, np.nan, pd.NA],
)
def test_nans(self, na_value):
x = pd.Series([na_value, "", "third line"])
primitive_func = self.primitive().get_function()
answers = pd.Series([np.nan, 0, 4.5])
pd.testing.assert_series_equal(primitive_func(x), answers, check_names=False)
@pytest.mark.parametrize(
"na_value",
[None, np.nan, pd.NA],
)
def test_all_nans(self, na_value):
x = pd.Series([na_value, na_value, na_value])
primitive_func = self.primitive().get_function()
answers = pd.Series([np.nan, np.nan, np.nan])
pd.testing.assert_series_equal(primitive_func(x), answers, check_names=False)
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
transform.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
| 2,737 | 32.802469 | 85 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/natural_language_primitives_tests/test_num_words.py | import numpy as np
import pandas as pd
from featuretools.primitives import NumWords
from featuretools.tests.primitive_tests.utils import (
PrimitiveTestBase,
find_applicable_primitives,
valid_dfs,
)
class TestNumWords(PrimitiveTestBase):
primitive = NumWords
def test_general(self):
x = pd.Series(
[
"test test test test",
"test TEST test TEST,test test test",
"and subsequent lines...",
],
)
expected = pd.Series([4, 6, 3])
actual = self.primitive().get_function()(x)
pd.testing.assert_series_equal(actual, expected, check_names=False)
def test_special_characters_and_whitespace(self):
x = pd.Series(["50% 50 50% \t\t\t\n\n", "$5,3040 a test* test"])
expected = pd.Series([3, 4])
actual = self.primitive().get_function()(x)
pd.testing.assert_series_equal(actual, expected, check_names=False)
def test_unicode_input(self):
x = pd.Series(
[
"Ángel Angel Ángel ángel",
],
)
expected = pd.Series([4])
actual = self.primitive().get_function()(x)
pd.testing.assert_series_equal(actual, expected, check_names=False)
def test_contractions(self):
x = pd.Series(
[
"can't won't don't can't aren't won't don't they'd there's",
],
)
expected = pd.Series([9])
actual = self.primitive().get_function()(x)
pd.testing.assert_series_equal(actual, expected, check_names=False)
def test_multiple_spaces(self):
x = pd.Series(
[
" word word word word .",
"This is \nthird line \nthird line",
],
)
expected = pd.Series([4, 6])
actual = self.primitive().get_function()(x)
pd.testing.assert_series_equal(actual, expected, check_names=False)
def test_null(self):
x = pd.Series([np.nan, pd.NA, None, "This is a test file."])
actual = self.primitive().get_function()(x)
expected = pd.Series([pd.NA, pd.NA, pd.NA, 5])
pd.testing.assert_series_equal(
actual,
expected,
check_names=False,
check_dtype=False,
)
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
transform.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
| 2,630 | 31.8875 | 76 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/natural_language_primitives_tests/test_total_word_length.py | import numpy as np
import pandas as pd
from featuretools.primitives import TotalWordLength
from featuretools.tests.primitive_tests.utils import (
PrimitiveTestBase,
find_applicable_primitives,
valid_dfs,
)
class TestTotalWordLength(PrimitiveTestBase):
primitive = TotalWordLength
def test_delimiter_override(self):
x = pd.Series(
["This is a test file.", "This,is,second,line?", "and;subsequent;lines..."],
)
expected = pd.Series([16, 17, 21])
actual = self.primitive("[ ,;]").get_function()(x)
pd.testing.assert_series_equal(actual, expected, check_names=False)
def test_multiline(self):
x = pd.Series(
[
"This is a test file.",
"This is second line\nthird line $1000;\nand subsequent lines",
],
)
expected = pd.Series([15, 47])
actual = self.primitive().get_function()(x)
pd.testing.assert_series_equal(actual, expected, check_names=False)
def test_null(self):
x = pd.Series([np.nan, pd.NA, None, "This is a test file."])
expected = pd.Series([np.nan, np.nan, np.nan, 15])
actual = self.primitive().get_function()(x).astype(float)
pd.testing.assert_series_equal(actual, expected, check_names=False)
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
transform.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
| 1,590 | 32.145833 | 88 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/natural_language_primitives_tests/test_median_word_length.py | import numpy as np
import pandas as pd
from featuretools.primitives import MedianWordLength
from featuretools.tests.primitive_tests.utils import (
PrimitiveTestBase,
find_applicable_primitives,
valid_dfs,
)
class TestMedianWordLength(PrimitiveTestBase):
primitive = MedianWordLength
def test_delimiter_override(self):
x = pd.Series(
["This is a test file.", "This,is,second,line?", "and;subsequent;lines..."],
)
expected = pd.Series([4.0, 4.5, 8.0])
actual = self.primitive("[ ,;]").get_function()(x)
pd.testing.assert_series_equal(actual, expected, check_names=False)
def test_multiline(self):
x = pd.Series(
[
"This is a test file.",
"This is second line\nthird line $1000;\nand subsequent lines",
],
)
expected = pd.Series([4.0, 4.5])
actual = self.primitive().get_function()(x)
pd.testing.assert_series_equal(actual, expected, check_names=False)
def test_null(self):
x = pd.Series([np.nan, pd.NA, None, "This is a test file."])
actual = self.primitive().get_function()(x)
expected = pd.Series([np.nan, np.nan, np.nan, 4.0])
pd.testing.assert_series_equal(actual, expected, check_names=False)
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
transform.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
| 1,585 | 32.041667 | 88 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/natural_language_primitives_tests/__init__.py | 0 | 0 | 0 | py | |
featuretools | featuretools-main/featuretools/tests/primitive_tests/natural_language_primitives_tests/test_whitespace_count.py | import numpy as np
import pandas as pd
from featuretools.primitives import WhitespaceCount
from featuretools.tests.primitive_tests.utils import (
PrimitiveTestBase,
find_applicable_primitives,
valid_dfs,
)
class TestWhitespaceCount(PrimitiveTestBase):
primitive = WhitespaceCount
def compare(self, primitive_initiated, test_cases, answers):
primitive_func = primitive_initiated.get_function()
primitive_answers = primitive_func(test_cases)
return np.testing.assert_array_equal(answers, primitive_answers)
def test_strings(self):
x = pd.Series(
["", "hi im ethan!", "consecutive. spaces.", " spaces-on-ends "],
)
answers = [0, 2, 4, 2]
self.compare(self.primitive(), x, answers)
def test_nan(self):
x = pd.Series([np.nan, None, pd.NA, "", "This IS a STRING."])
answers = [np.nan, np.nan, np.nan, 0, 3]
self.compare(self.primitive(), x, answers)
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
transform.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
| 1,245 | 32.675676 | 80 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/natural_language_primitives_tests/test_punctuation_count.py | import numpy as np
import pandas as pd
from featuretools.primitives import PunctuationCount
from featuretools.tests.primitive_tests.utils import (
PrimitiveTestBase,
find_applicable_primitives,
valid_dfs,
)
class TestPunctuationCount(PrimitiveTestBase):
primitive = PunctuationCount
def test_punctuation(self):
x = pd.Series(
[
"This is a test file.",
"This, is second line?",
"third/line $1,000;",
"and--subsequen't lines...",
"*and, more..",
],
)
primitive_func = self.primitive().get_function()
answers = [1.0, 2.0, 4.0, 6.0, 4.0]
np.testing.assert_array_equal(primitive_func(x), answers)
def test_multiline(self):
x = pd.Series(
[
"This is a test file.",
"This is second line\nthird line $1000;\nand subsequent lines",
],
)
primitive_func = self.primitive().get_function()
answers = [1.0, 2.0]
np.testing.assert_array_equal(primitive_func(x), answers)
def test_nan(self):
x = pd.Series([np.nan, "", "This is a test file."])
primitive_func = self.primitive().get_function()
answers = [np.nan, 0.0, 1.0]
np.testing.assert_array_equal(primitive_func(x), answers)
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
transform.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
| 1,642 | 31.215686 | 79 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/natural_language_primitives_tests/test_number_of_unique_words.py | import numpy as np
import pandas as pd
from featuretools.primitives import NumberOfUniqueWords
from featuretools.tests.primitive_tests.utils import (
PrimitiveTestBase,
find_applicable_primitives,
valid_dfs,
)
class TestNumberOfUniqueWords(PrimitiveTestBase):
primitive = NumberOfUniqueWords
def test_general(self):
x = pd.Series(
[
"test test test test",
"test TEST test TEST",
"and subsequent lines...",
],
)
expected = pd.Series([1, 2, 3])
actual = self.primitive().get_function()(x)
pd.testing.assert_series_equal(actual, expected, check_names=False)
def test_special_characters_and_whitespace(self):
x = pd.Series(["50% 50 50% \t\t\t\n\n", "a test* test"])
expected = pd.Series([1, 2])
actual = self.primitive().get_function()(x)
pd.testing.assert_series_equal(actual, expected, check_names=False)
def test_unicode_input(self):
x = pd.Series(
[
"Ángel Angel Ángel ángel",
],
)
expected = pd.Series([3])
actual = self.primitive().get_function()(x)
pd.testing.assert_series_equal(actual, expected, check_names=False)
def test_contractions(self):
x = pd.Series(
[
"can't won't don't can't aren't won't don't they'd there's",
],
)
expected = pd.Series([6])
actual = self.primitive().get_function()(x)
pd.testing.assert_series_equal(actual, expected, check_names=False)
def test_multiline(self):
x = pd.Series(
[
"word word word word.",
"This is \nthird line \nthird line",
],
)
expected = pd.Series([1, 4])
actual = self.primitive().get_function()(x)
pd.testing.assert_series_equal(actual, expected, check_names=False)
def test_null(self):
x = pd.Series([np.nan, pd.NA, None, "This is a test file."])
actual = self.primitive().get_function()(x)
expected = pd.Series([pd.NA, pd.NA, pd.NA, 5])
pd.testing.assert_series_equal(actual, expected, check_names=False)
def test_case_insensitive(self):
x = pd.Series(["WORD word WORd WORd WOrD word"])
actual = self.primitive(case_insensitive=True).get_function()(x)
expected = pd.Series([1])
pd.testing.assert_series_equal(actual, expected, check_names=False)
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
transform.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
| 2,799 | 30.818182 | 76 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/natural_language_primitives_tests/test_natural_language_primitives_terminate.py | import pandas as pd
import pytest
from featuretools.primitives.utils import _get_natural_language_primitives
TIMEOUT_THRESHOLD = 20
class TestNaturalLanguagePrimitivesTerminate:
# need to sort primitives to avoid pytest collection error
primitives = sorted(_get_natural_language_primitives().items())
@pytest.mark.timeout(TIMEOUT_THRESHOLD)
@pytest.mark.parametrize("primitive", [prim for _, prim in primitives])
def test_natlang_primitive_does_not_timeout(
self,
strings_that_have_triggered_errors_before,
primitive,
):
for text in strings_that_have_triggered_errors_before:
primitive().get_function()(pd.Series(text))
| 693 | 30.545455 | 75 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/natural_language_primitives_tests/test_count_string.py | import numpy as np
import pandas as pd
from featuretools.primitives import CountString
from featuretools.tests.primitive_tests.utils import (
PrimitiveTestBase,
find_applicable_primitives,
valid_dfs,
)
class TestCountString(PrimitiveTestBase):
primitive = CountString
def compare(self, primitive_initiated, test_cases, answers):
primitive_func = primitive_initiated.get_function()
primitive_answers = primitive_func(test_cases)
return np.testing.assert_array_equal(answers, primitive_answers)
test_cases = pd.Series(
[
# Ignore case
"Hello other words hello hEllo HELLO",
# ignore non alphanumeric
"he\\{ll\t\n\t.--?o othe/r words hello hello h.el./lo",
# match whole word
"hellohellohello other hello word go hello here 9hello hello9",
# all combined
# hello/ counts as hello being it's own word
# since * and / are non word characters
# but 9 is a "word character" so 9hello9
# does not count as hello being its own word
"helloHellohello 9Hello 9hello9 *hello/ test'hel..lo' 'hE.l.lO' \
hello",
],
)
def test_non_regex_with_no_other_parameters(self):
primitive = self.primitive(
"hello",
ignore_case=False,
ignore_non_alphanumeric=False,
is_regex=False,
match_whole_words_only=False,
)
answers = [1, 2, 7, 5]
self.compare(primitive, self.test_cases, answers)
def test_non_regex_ignore_case(self):
primitive1 = self.primitive(
"hello",
ignore_case=True,
ignore_non_alphanumeric=False,
is_regex=False,
match_whole_words_only=False,
)
primitive2 = self.primitive(
"HeLLo",
ignore_case=True,
ignore_non_alphanumeric=False,
is_regex=False,
match_whole_words_only=False,
)
answers = [4, 2, 7, 7]
self.compare(primitive1, self.test_cases, answers)
self.compare(primitive2, self.test_cases, answers)
def test_non_regex_ignore_non_alphanumeric(self):
primitive = self.primitive(
"hello",
ignore_case=False,
ignore_non_alphanumeric=True,
is_regex=False,
match_whole_words_only=False,
)
answers = [1, 4, 7, 6]
self.compare(primitive, self.test_cases, answers)
def test_non_regex_match_whole_words_only(self):
primitive = self.primitive(
"hello",
ignore_case=False,
ignore_non_alphanumeric=False,
is_regex=False,
match_whole_words_only=True,
)
answers = [1, 2, 2, 2]
self.compare(primitive, self.test_cases, answers)
def test_non_regex_with_all_others_parameters(self):
primitive = self.primitive(
"hello",
ignore_case=True,
ignore_non_alphanumeric=True,
is_regex=False,
match_whole_words_only=True,
)
answers = [4, 4, 2, 3]
self.compare(primitive, self.test_cases, answers)
def test_regex_with_no_other_parameters(self):
primitive = self.primitive(
"h.l.o",
ignore_case=False,
ignore_non_alphanumeric=False,
is_regex=True,
match_whole_words_only=False,
)
answers = [2, 2, 7, 5]
self.compare(primitive, self.test_cases, answers)
def test_regex_with_ignore_case(self):
primitive = self.primitive(
"h.l.o",
ignore_case=True,
ignore_non_alphanumeric=False,
is_regex=True,
match_whole_words_only=False,
)
answers = [4, 2, 7, 7]
self.compare(primitive, self.test_cases, answers)
def test_regex_with_ignore_non_alphanumeric(self):
primitive = self.primitive(
"h.l.o",
ignore_case=False,
ignore_non_alphanumeric=True,
is_regex=True,
match_whole_words_only=False,
)
answers = [2, 4, 7, 6]
self.compare(primitive, self.test_cases, answers)
def test_regex_with_match_whole_words_only(self):
primitive = self.primitive(
"h.l.o",
ignore_case=False,
ignore_non_alphanumeric=False,
is_regex=True,
match_whole_words_only=True,
)
answers = [2, 2, 2, 2]
self.compare(primitive, self.test_cases, answers)
def test_regex_with_all_other_parameters(self):
primitive = self.primitive(
"h.l.o",
ignore_case=True,
ignore_non_alphanumeric=True,
is_regex=True,
match_whole_words_only=True,
)
answers = [4, 4, 2, 3]
self.compare(primitive, self.test_cases, answers)
def test_overlapping_regex(self):
primitive = self.primitive(
"(?=(a.*a))",
ignore_case=True,
ignore_non_alphanumeric=True,
is_regex=True,
match_whole_words_only=False,
)
test_cases = pd.Series(["aaaaaaaaaa", "atesta aa aa a"])
answers = [9, 6]
self.compare(primitive, test_cases, answers)
def test_the(self):
primitive = self.primitive(
"the",
ignore_case=True,
ignore_non_alphanumeric=False,
is_regex=False,
match_whole_words_only=False,
)
test_cases = pd.Series(["The fox jumped over the cat", "The there then"])
answers = [2, 3]
self.compare(primitive, test_cases, answers)
def test_nan(self):
primitive = self.primitive(
"the",
ignore_case=True,
ignore_non_alphanumeric=False,
is_regex=False,
match_whole_words_only=False,
)
test_cases = pd.Series(
[np.nan, None, pd.NA, "The fox jumped over the cat", "The there then"],
)
answers = [np.nan, np.nan, np.nan, 2, 3]
self.compare(primitive, test_cases, answers)
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive(
"the",
ignore_case=True,
ignore_non_alphanumeric=False,
is_regex=False,
match_whole_words_only=False,
)
transform.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
def test_with_featuretools_nan(self, es):
log_df = es["log"]
comments = log_df["comments"]
comments[1] = pd.NA
comments[2] = np.nan
comments[3] = None
log_df["comments"] = comments
es.replace_dataframe(dataframe_name="log", df=log_df)
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive(
"the",
ignore_case=True,
ignore_non_alphanumeric=False,
is_regex=False,
match_whole_words_only=False,
)
transform.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
| 7,416 | 30.561702 | 83 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/natural_language_primitives_tests/test_upper_case_word_count.py | import numpy as np
import pandas as pd
from featuretools.primitives import UpperCaseWordCount
class TestUpperCaseWordCount:
primitive = UpperCaseWordCount
def test_strings(self):
x = pd.Series(
[
"This IS a STRING.",
"Testing AAA",
"Testing AAA BBB",
"Testing TEsTIng AA3 AA_33 HELLO",
"AAA $@()#$@@#$",
],
dtype="string",
)
primitive_func = self.primitive().get_function()
answers = pd.Series([2, 1, 2, 3, 1], dtype="Int64")
pd.testing.assert_series_equal(
primitive_func(x).astype("Int64"),
answers,
check_names=False,
)
def test_nan(self):
x = pd.Series(
[
np.nan,
"",
"This IS a STRING.",
],
dtype="string",
)
primitive_func = self.primitive().get_function()
answers = pd.Series([pd.NA, 0, 2], dtype="Int64")
pd.testing.assert_series_equal(
primitive_func(x).astype("Int64"),
answers,
check_names=False,
)
| 1,195 | 25.577778 | 59 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/primitives_to_install/custom_max.py | from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base import AggregationPrimitive
class CustomMax(AggregationPrimitive):
name = "custom_max"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
| 294 | 28.5 | 61 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/primitives_to_install/custom_sum.py | from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base import AggregationPrimitive
class CustomSum(AggregationPrimitive):
name = "custom_sum"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
| 294 | 28.5 | 61 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/primitives_to_install/custom_mean.py | from woodwork.column_schema import ColumnSchema
from featuretools.primitives.base import AggregationPrimitive
class CustomMean(AggregationPrimitive):
name = "custom_mean"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
| 296 | 28.7 | 61 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/primitives_to_install/__init__.py | 0 | 0 | 0 | py | |
featuretools | featuretools-main/featuretools/tests/primitive_tests/bad_primitive_files/multiple_primitives.py | from woodwork.column_schema import ColumnSchema
from featuretools.primitives import AggregationPrimitive
class CustomMax(AggregationPrimitive):
name = "custom_max"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
class CustomSum(AggregationPrimitive):
name = "custom_sum"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
| 472 | 28.5625 | 59 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/bad_primitive_files/__init__.py | 0 | 0 | 0 | py | |
featuretools | featuretools-main/featuretools/tests/primitive_tests/bad_primitive_files/no_primitives.py | 0 | 0 | 0 | py | |
featuretools | featuretools-main/featuretools/tests/primitive_tests/aggregation_primitive_tests/test_num_consecutive.py | import numpy as np
import pandas as pd
from featuretools.primitives import NumConsecutiveGreaterMean, NumConsecutiveLessMean
class TestNumConsecutiveGreaterMean:
primitive = NumConsecutiveGreaterMean
def test_continuous_range(self):
x = pd.Series(range(10))
longest_sequence = [5, 6, 7, 8, 9]
primitive_instance = self.primitive()
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == len(longest_sequence)
def test_subsequence_in_middle(self):
x = pd.Series(
[
0.6,
0.18,
1.11,
-0.19,
0.25,
-1.41,
0.54,
0.29,
-1.59,
1.67,
1.19,
0.44,
2.39,
-1.38,
0.15,
-1.16,
1.54,
-0.34,
-1.41,
0.58,
],
)
longest_sequence = [1.67, 1.19, 0.44, 2.39]
primitive_instance = self.primitive()
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == len(longest_sequence)
def test_subsequence_at_start(self):
x = pd.Series(
[
1.67,
1.19,
0.44,
2.39,
-0.19,
0.6,
0.18,
1.11,
0.25,
-1.41,
0.54,
0.29,
-1.59,
-1.38,
0.15,
-1.16,
1.54,
-0.34,
-1.41,
0.58,
],
)
longest_sequence = [1.67, 1.19, 0.44, 2.39]
primitive_instance = self.primitive()
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == len(longest_sequence)
def test_subsequence_at_end(self):
x = pd.Series(
[
0.6,
0.18,
1.11,
-0.19,
0.25,
-1.41,
0.54,
0.29,
-1.59,
-1.38,
0.15,
-1.16,
1.54,
-0.34,
0.58,
-1.41,
1.67,
1.19,
0.44,
2.39,
],
)
longest_sequence = [1.67, 1.19, 0.44, 2.39]
primitive_instance = self.primitive()
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == len(longest_sequence)
def test_nan(self):
x = pd.Series(range(10))
x = pd.concat([x, pd.Series([np.nan] * 20)])
longest_sequence = [5, 6, 7, 8, 9]
# test ignoring NaN values
primitive_instance = self.primitive()
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == len(longest_sequence)
# test skipna=False
primitive_instance = self.primitive(skipna=False)
primitive_func = primitive_instance.get_function()
assert np.isnan(primitive_func(x))
def test_inf(self):
primitive_instance = self.primitive()
primitive_func = primitive_instance.get_function()
x = pd.Series(range(10))
x = pd.concat([x, pd.Series([np.inf])])
assert primitive_func(x) == 0
x = pd.Series(range(10))
x = pd.concat([x, pd.Series([np.NINF])])
assert primitive_func(x) == 10
x = pd.Series(range(10))
x = pd.concat([x, pd.Series([np.NINF, np.inf, np.inf])])
assert np.isnan(primitive_func(x))
class TestNumConsecutiveLessMean:
primitive = NumConsecutiveLessMean
def test_continuous_range(self):
x = pd.Series(range(10))
longest_sequence = [0, 1, 2, 3, 4]
primitive_instance = self.primitive()
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == len(longest_sequence)
def test_subsequence_in_middle(self):
x = pd.Series(
[
0.6,
0.18,
1.11,
-0.19,
0.25,
-1.41,
0.54,
0.29,
-1.59,
1.67,
1.19,
0.44,
2.39,
-1.38,
0.15,
-1.16,
1.54,
-0.34,
-1.41,
0.58,
],
)
longest_sequence = [-1.38, 0.15, -1.16]
primitive_instance = self.primitive()
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == len(longest_sequence)
def test_subsequence_at_start(self):
x = pd.Series(
[
-1.38,
0.15,
-1.16,
0.6,
0.18,
1.11,
-0.19,
0.25,
-1.41,
0.54,
0.29,
-1.59,
1.67,
1.19,
0.44,
2.39,
1.54,
-0.34,
-1.41,
0.58,
],
)
longest_sequence = [-1.38, 0.15, -1.16]
primitive_instance = self.primitive()
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == len(longest_sequence)
def test_subsequence_at_end(self):
x = pd.Series(
[
0.6,
0.18,
1.11,
-0.19,
0.25,
-1.41,
0.54,
0.29,
-1.59,
1.67,
1.19,
0.44,
2.39,
1.54,
-0.34,
-1.41,
0.58,
-1.38,
0.15,
-1.16,
],
)
longest_sequence = [-1.38, 0.15, -1.16]
primitive_instance = self.primitive()
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == len(longest_sequence)
def test_nan(self):
x = pd.Series(range(10))
x = pd.concat([x, pd.Series([np.nan] * 20)])
longest_sequence = [0, 1, 2, 3, 4]
# test ignoring NaN values
primitive_instance = self.primitive()
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == len(longest_sequence)
# test skipna=False
primitive_instance = self.primitive(skipna=False)
primitive_func = primitive_instance.get_function()
assert np.isnan(primitive_func(x))
def test_inf(self):
primitive_instance = self.primitive()
primitive_func = primitive_instance.get_function()
x = pd.Series(range(10))
x = pd.concat([x, pd.Series([np.inf])])
assert primitive_func(x) == 10
x = pd.Series(range(10))
x = pd.concat([x, pd.Series([np.NINF])])
assert primitive_func(x) == 0
x = pd.Series(range(10))
x = pd.concat([x, pd.Series([np.NINF, np.inf, np.inf])])
assert np.isnan(primitive_func(x))
| 7,524 | 26.973978 | 85 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/aggregation_primitive_tests/test_count_aggregation_primitives.py | import numpy as np
import pandas as pd
from pytest import raises
from featuretools.primitives import (
CountAboveMean,
CountGreaterThan,
CountInsideNthSTD,
CountInsideRange,
CountLessThan,
CountOutsideNthSTD,
CountOutsideRange,
)
from featuretools.tests.primitive_tests.utils import PrimitiveTestBase
class TestCountAboveMean(PrimitiveTestBase):
primitive = CountAboveMean
def test_regular(self):
data = pd.Series([1, 2, 3, 4, 5])
expected = 2
primitive_func = self.primitive().get_function()
actual = primitive_func(data)
assert expected == actual
data = pd.Series([1, 2, 3.1, 4, 5])
expected = 3
primitive_func = self.primitive().get_function()
actual = primitive_func(data)
assert expected == actual
def test_nan_without_ignore_nan(self):
data = pd.Series([np.nan, 1, 2, 3, 4, 5, np.nan, np.nan])
expected = np.nan
primitive_func = self.primitive(skipna=False).get_function()
actual = primitive_func(data)
assert np.isnan(actual) == np.isnan(expected)
data = pd.Series([np.nan])
primitive_func = self.primitive(skipna=False).get_function()
actual = primitive_func(data)
assert np.isnan(actual) == np.isnan(expected)
def test_nan_with_ignore_nan(self):
data = pd.Series([np.nan, 1, 2, 3, 4, 5, np.nan, np.nan])
expected = 2
primitive_func = self.primitive(skipna=True).get_function()
actual = primitive_func(data)
assert expected == actual
data = pd.Series([np.nan, 1, 2, 3.1, 4, 5, np.nan, np.nan])
expected = 3
primitive_func = self.primitive(skipna=True).get_function()
actual = primitive_func(data)
assert expected == actual
data = pd.Series([np.nan])
expected = np.nan
primitive_func = self.primitive(skipna=True).get_function()
actual = primitive_func(data)
assert np.isnan(actual) == np.isnan(expected)
def test_inf(self):
data = pd.Series([np.NINF, 1, 2, 3, 4, 5])
expected = 5
primitive_func = self.primitive().get_function()
actual = primitive_func(data)
assert expected == actual
data = pd.Series([1, 2, 3, 4, 5, np.inf])
expected = 0
primitive_func = self.primitive().get_function()
actual = primitive_func(data)
assert expected == actual
data = pd.Series([np.NINF, 1, 2, 3, 4, 5, np.inf])
expected = np.nan
primitive_func = self.primitive().get_function()
actual = primitive_func(data)
assert np.isnan(actual) == np.isnan(expected)
primitive_func = self.primitive(skipna=False).get_function()
actual = primitive_func(data)
assert np.isnan(actual) == np.isnan(expected)
class TestCountGreaterThan(PrimitiveTestBase):
primitive = CountGreaterThan
def compare_results(self, data, thresholds, results):
for threshold, result in zip(thresholds, results):
primitive = self.primitive(threshold=threshold)
function = primitive.get_function()
assert function(data) == result
assert isinstance(function(data), np.int64)
def test_regular(self):
data = pd.Series([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5])
thresholds = pd.Series([-5, -2, 0, 2, 5])
results = pd.Series([10, 7, 5, 3, 0])
self.compare_results(data, thresholds, results)
def test_edges(self):
data = pd.Series([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5])
thresholds = pd.Series([np.inf, np.NINF, None, np.nan])
results = pd.Series([0, len(data), 0, 0])
self.compare_results(data, thresholds, results)
def test_nans(self):
data = pd.Series([-5, -4, -3, np.inf, np.NINF, np.nan, 1, 2, 3, 4, 5])
thresholds = pd.Series([np.inf, np.NINF, None, 0, np.nan])
results = pd.Series([0, 9, 0, 6, 0])
self.compare_results(data, thresholds, results)
class TestCountInsideNthSTD:
primitive = CountInsideNthSTD
def test_normal_distribution(self):
x = pd.Series(
[
-76.0,
41.0,
-43.0,
-152.0,
-89.0,
28.0,
49.0,
298.0,
-132.0,
146.0,
-107.0,
-26.0,
26.0,
-81.0,
116.0,
-217.0,
-102.0,
144.0,
120.0,
-130.0,
],
)
first_outliers = [-152.0, 298.0, 146.0, 116.0, -217.0, 144.0, 120.0]
primitive_instance = self.primitive(1)
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == len(x) - len(first_outliers)
second_outliers = [298.0]
primitive_instance = self.primitive(2)
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == len(x) - len(second_outliers)
def test_poisson_distribution(self):
x = pd.Series(
[
1,
1,
3,
3,
0,
0,
1,
3,
3,
1,
2,
3,
2,
0,
1,
3,
2,
1,
0,
2,
],
)
first_outliers = [3, 3, 0, 0, 3, 3, 3, 0, 3, 0]
primitive_instance = self.primitive(1)
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == len(x) - len(first_outliers)
second_outliers = []
primitive_instance = self.primitive(2)
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == len(x) - len(second_outliers)
def test_nan(self):
# test if function ignores nan values
x = pd.Series(
[
-76.0,
41.0,
-43.0,
-152.0,
-89.0,
28.0,
49.0,
298.0,
-132.0,
146.0,
-107.0,
-26.0,
26.0,
-81.0,
116.0,
-217.0,
-102.0,
144.0,
120.0,
-130.0,
],
)
x = pd.concat([x, pd.Series([np.nan] * 20)])
first_outliers = [-152.0, 298.0, 146.0, 116.0, -217.0, 144.0, 120.0]
primitive_instance = self.primitive(1)
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == len(x) - len(first_outliers) - 20
# test a series with all nan values
x = pd.Series([np.nan] * 20)
primitive_instance = self.primitive(1)
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == 0
def test_negative_n(self):
with raises(ValueError):
self.primitive(-1)
class TestCountInsideRange(PrimitiveTestBase):
primitive = CountInsideRange
def test_integer_range(self):
# all integers from -100 to 100
x = pd.Series(np.arange(-100, 101, 1))
primitive_instance = self.primitive(-100, 100)
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == 201
primitive_instance = self.primitive(-50, 50)
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == 101
primitive_instance = self.primitive(1, 1)
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == 1
def test_float_range(self):
x = pd.Series(np.linspace(-3, 3, 10))
primitive_instance = self.primitive(-3, 3)
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == 10
primitive_instance = self.primitive(-0.34, 1.68)
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == 4
primitive_instance = self.primitive(-3, -3)
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == 1
def test_nan(self):
x = pd.Series(np.linspace(-3, 3, 10))
x = pd.concat([x, pd.Series([np.nan] * 20)])
primitive_instance = self.primitive(-0.34, 1.68)
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == 4
primitive_instance = self.primitive(-3, 3, False)
primitive_func = primitive_instance.get_function()
assert np.isnan(primitive_func(x))
def test_inf(self):
x = pd.Series(np.linspace(-3, 3, 10))
num_NINF = 20
x = pd.concat([x, pd.Series([np.NINF] * num_NINF)])
num_inf = 10
x = pd.concat([x, pd.Series([np.inf] * num_inf)])
primitive_instance = self.primitive(-3, 3)
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == 10
primitive_instance = self.primitive(np.NINF, 3)
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == 10 + num_NINF
primitive_instance = self.primitive(-3, np.inf)
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == 10 + num_inf
class TestCountLessThan(PrimitiveTestBase):
primitive = CountLessThan
def compare_answers(self, data, thresholds, answers):
for threshold, answer in zip(thresholds, answers):
primitive = self.primitive(threshold=threshold)
function = primitive.get_function()
assert function(data) == answer
assert isinstance(function(data), np.int64)
def test_regular(self):
data = pd.Series([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5])
thresholds = pd.Series([-5, -2, 0, 2, 5])
answers = pd.Series([0, 3, 5, 7, 10])
self.compare_answers(data, thresholds, answers)
def test_edges(self):
data = pd.Series([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5])
thresholds = pd.Series([np.inf, np.NINF, None, np.nan])
answers = pd.Series([len(data), 0, 0, 0])
self.compare_answers(data, thresholds, answers)
def test_nans(self):
data = pd.Series([-5, -4, -3, np.inf, np.NINF, np.nan, 1, 2, 3, 4, 5])
thresholds = pd.Series([np.inf, np.NINF, None, 0, np.nan])
answers = pd.Series([9, 0, 0, 4, 0])
self.compare_answers(data, thresholds, answers)
class TestCountOutsideNthSTD(PrimitiveTestBase):
primitive = CountOutsideNthSTD
def test_normal_distribution(self):
x = pd.Series(
[
10,
386,
479,
627,
20,
523,
482,
483,
542,
699,
535,
617,
577,
471,
615,
583,
441,
562,
563,
527,
453,
530,
433,
541,
585,
704,
443,
569,
430,
637,
331,
511,
552,
496,
484,
566,
554,
472,
335,
440,
579,
341,
545,
615,
548,
604,
439,
556,
442,
461,
624,
611,
444,
578,
405,
487,
490,
496,
398,
512,
422,
455,
449,
432,
607,
679,
434,
597,
639,
565,
415,
486,
668,
414,
665,
763,
557,
304,
404,
454,
689,
610,
483,
441,
657,
590,
492,
476,
437,
483,
529,
363,
711,
543,
],
)
outliers = [10, 20, 763]
primitive_instance = self.primitive(2)
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == len(outliers)
def test_poisson_distribution(self):
x = pd.Series(
[
1,
1,
3,
3,
0,
0,
1,
3,
3,
1,
2,
3,
2,
0,
1,
3,
2,
1,
0,
2,
],
)
primitive_instance = self.primitive(1)
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == 10
primitive_instance = self.primitive(2)
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == 0
def test_nan(self):
# test if function ignores nan values
x = pd.Series(
[
-76.0,
41.0,
-43.0,
-152.0,
-89.0,
28.0,
49.0,
298.0,
-132.0,
146.0,
-107.0,
-26.0,
26.0,
-81.0,
116.0,
-217.0,
-102.0,
144.0,
120.0,
-130.0,
],
)
x = pd.concat([x, pd.Series([np.nan * 20])])
primitive_instance = self.primitive(1)
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == 7
# test a series with all nan values
x = pd.Series([np.nan] * 20)
primitive_instance = self.primitive(1)
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == 0
def test_negative_n(self):
with raises(ValueError):
self.primitive(-1)
class TestCountOutsideRange(PrimitiveTestBase):
primitive = CountOutsideRange
def test_integer_range(self):
# all integers from -100 to 100
x = pd.Series(np.arange(-100, 101, 1))
primitive_instance = CountOutsideRange(-100, 100)
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == 0
primitive_instance = CountOutsideRange(-50, 50)
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == 100
primitive_instance = CountOutsideRange(1, 1)
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == len(x) - 1
def test_float_range(self):
x = pd.Series(np.linspace(-3, 3, 10))
primitive_instance = CountOutsideRange(-3, 3)
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == 0
primitive_instance = CountOutsideRange(-0.34, 1.68)
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == 6
primitive_instance = CountOutsideRange(-3, -3)
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == 9
def test_nan(self):
x = pd.Series(np.linspace(-3, 3, 10))
x = pd.concat([x, pd.Series([np.nan] * 20)])
primitive_instance = CountOutsideRange(-0.34, 1.68)
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == 6
primitive_instance = CountOutsideRange(-3, 3, False)
primitive_func = primitive_instance.get_function()
assert np.isnan(primitive_func(x))
def test_inf(self):
x = pd.Series(np.linspace(-3, 3, 10))
num_NINF = 20
x = pd.concat([x, pd.Series([np.NINF] * num_NINF)])
num_inf = 10
x = pd.concat([x, pd.Series([np.inf] * num_inf)])
primitive_instance = CountOutsideRange(-3, 3)
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == num_inf + num_NINF
primitive_instance = CountOutsideRange(-0.34, 1.68)
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == 6 + num_inf + num_NINF
primitive_instance = CountOutsideRange(np.NINF, 3)
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == num_inf
primitive_instance = CountOutsideRange(-3, np.inf)
primitive_func = primitive_instance.get_function()
assert primitive_func(x) == num_NINF
| 17,616 | 29.321859 | 78 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/aggregation_primitive_tests/test_rolling_primitive.py | import numpy as np
import pandas as pd
import pytest
from featuretools.primitives import (
RollingCount,
RollingMax,
RollingMean,
RollingMin,
RollingOutlierCount,
RollingSTD,
RollingTrend,
)
from featuretools.primitives.standard.transform.time_series.utils import (
apply_rolling_agg_to_series,
)
from featuretools.tests.primitive_tests.utils import get_number_from_offset
@pytest.mark.parametrize(
"window_length, gap",
[
(5, 2),
(5, 0),
("5d", "7d"),
("5d", "0d"),
],
)
@pytest.mark.parametrize("min_periods", [1, 0, 2, 5])
def test_rolling_max(min_periods, window_length, gap, window_series_pd):
gap_num = get_number_from_offset(gap)
window_length_num = get_number_from_offset(window_length)
# Since we're using a uniform series we can check correctness using numeric parameters
expected_vals = apply_rolling_agg_to_series(
window_series_pd,
lambda x: x.max(),
window_length_num,
gap=gap_num,
min_periods=min_periods,
)
primitive_instance = RollingMax(
window_length=window_length,
gap=gap,
min_periods=min_periods,
)
primitive_func = primitive_instance.get_function()
actual_vals = pd.Series(
primitive_func(window_series_pd.index, pd.Series(window_series_pd.values)),
)
# Since min_periods of 0 is the same as min_periods of 1
num_nans_from_min_periods = min_periods or 1
assert actual_vals.isna().sum() == gap_num + num_nans_from_min_periods - 1
pd.testing.assert_series_equal(pd.Series(expected_vals), actual_vals)
@pytest.mark.parametrize(
"window_length, gap",
[
(5, 2),
(5, 0),
("5d", "7d"),
("5d", "0d"),
],
)
@pytest.mark.parametrize("min_periods", [1, 0, 2, 5])
def test_rolling_min(min_periods, window_length, gap, window_series_pd):
gap_num = get_number_from_offset(gap)
window_length_num = get_number_from_offset(window_length)
# Since we're using a uniform series we can check correctness using numeric parameters
expected_vals = apply_rolling_agg_to_series(
window_series_pd,
lambda x: x.min(),
window_length_num,
gap=gap_num,
min_periods=min_periods,
)
primitive_instance = RollingMin(
window_length=window_length,
gap=gap,
min_periods=min_periods,
)
primitive_func = primitive_instance.get_function()
actual_vals = pd.Series(
primitive_func(window_series_pd.index, pd.Series(window_series_pd.values)),
)
# Since min_periods of 0 is the same as min_periods of 1
num_nans_from_min_periods = min_periods or 1
assert actual_vals.isna().sum() == gap_num + num_nans_from_min_periods - 1
pd.testing.assert_series_equal(pd.Series(expected_vals), actual_vals)
@pytest.mark.parametrize(
"window_length, gap",
[
(5, 2),
(5, 0),
("5d", "7d"),
("5d", "0d"),
],
)
@pytest.mark.parametrize("min_periods", [1, 0, 2, 5])
def test_rolling_mean(min_periods, window_length, gap, window_series_pd):
gap_num = get_number_from_offset(gap)
window_length_num = get_number_from_offset(window_length)
# Since we're using a uniform series we can check correctness using numeric parameters
expected_vals = apply_rolling_agg_to_series(
window_series_pd,
np.mean,
window_length_num,
gap=gap_num,
min_periods=min_periods,
)
primitive_instance = RollingMean(
window_length=window_length,
gap=gap,
min_periods=min_periods,
)
primitive_func = primitive_instance.get_function()
actual_vals = pd.Series(
primitive_func(window_series_pd.index, pd.Series(window_series_pd.values)),
)
# Since min_periods of 0 is the same as min_periods of 1
num_nans_from_min_periods = min_periods or 1
assert actual_vals.isna().sum() == gap_num + num_nans_from_min_periods - 1
pd.testing.assert_series_equal(pd.Series(expected_vals), actual_vals)
@pytest.mark.parametrize(
"window_length, gap",
[
(5, 2),
(5, 0),
("5d", "7d"),
("5d", "0d"),
],
)
@pytest.mark.parametrize("min_periods", [1, 0, 2, 5])
def test_rolling_std(min_periods, window_length, gap, window_series_pd):
gap_num = get_number_from_offset(gap)
window_length_num = get_number_from_offset(window_length)
# Since we're using a uniform series we can check correctness using numeric parameters
expected_vals = apply_rolling_agg_to_series(
window_series_pd,
lambda x: x.std(),
window_length_num,
gap=gap_num,
min_periods=min_periods,
)
primitive_instance = RollingSTD(
window_length=window_length,
gap=gap,
min_periods=min_periods,
)
primitive_func = primitive_instance.get_function()
actual_vals = pd.Series(
primitive_func(window_series_pd.index, pd.Series(window_series_pd.values)),
)
# Since min_periods of 0 is the same as min_periods of 1
num_nans_from_min_periods = min_periods or 2
if min_periods in [0, 1]:
# the additional nan is because std pandas function returns NaN if there's only one value
num_nans = gap_num + 1
else:
num_nans = gap_num + num_nans_from_min_periods - 1
# The extra 1 at the beginning is because the std pandas function returns NaN if there's only one value
assert actual_vals.isna().sum() == num_nans
pd.testing.assert_series_equal(pd.Series(expected_vals), actual_vals)
@pytest.mark.parametrize(
"window_length, gap",
[
(5, 2),
("6d", "7d"),
],
)
def test_rolling_count(window_length, gap, window_series_pd):
gap_num = get_number_from_offset(gap)
window_length_num = get_number_from_offset(window_length)
expected_vals = apply_rolling_agg_to_series(
window_series_pd,
lambda x: x.count(),
window_length_num,
gap=gap_num,
)
primitive_instance = RollingCount(
window_length=window_length,
gap=gap,
min_periods=window_length_num,
)
primitive_func = primitive_instance.get_function()
actual_vals = pd.Series(primitive_func(window_series_pd.index))
num_nans = gap_num + window_length_num - 1
assert actual_vals.isna().sum() == num_nans
# RollingCount will not match the exact roll_series_with_gap call,
# because it handles the min_periods difference within the primitive
pd.testing.assert_series_equal(
pd.Series(expected_vals).iloc[num_nans:],
actual_vals.iloc[num_nans:],
)
@pytest.mark.parametrize(
"min_periods, expected_num_nams",
[(0, 2), (1, 2), (3, 4), (5, 6)], # 0 and 1 get treated the same
)
@pytest.mark.parametrize("window_length, gap", [("5d", "2d"), (5, 2)])
def test_rolling_count_primitive_min_periods_nans(
window_length,
gap,
min_periods,
expected_num_nams,
window_series_pd,
):
primitive_instance = RollingCount(
window_length=window_length,
gap=gap,
min_periods=min_periods,
)
primitive_func = primitive_instance.get_function()
vals = pd.Series(primitive_func(window_series_pd.index))
assert vals.isna().sum() == expected_num_nams
@pytest.mark.parametrize(
"min_periods, expected_num_nams",
[(0, 0), (1, 0), (3, 2), (5, 4)], # 0 and 1 get treated the same
)
@pytest.mark.parametrize("window_length, gap", [("5d", "0d"), (5, 0)])
def test_rolling_count_with_no_gap(
window_length,
gap,
min_periods,
expected_num_nams,
window_series_pd,
):
primitive_instance = RollingCount(
window_length=window_length,
gap=gap,
min_periods=min_periods,
)
primitive_func = primitive_instance.get_function()
vals = pd.Series(primitive_func(window_series_pd.index))
assert vals.isna().sum() == expected_num_nams
@pytest.mark.parametrize(
"window_length, gap, expected_vals",
[
(3, 0, [np.nan, np.nan, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
(
4,
1,
[np.nan, np.nan, np.nan, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
),
(
"5d",
"7d",
[
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
],
),
(
"5d",
"0d",
[np.nan, np.nan, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
),
],
)
def test_rolling_trend(window_length, gap, expected_vals, window_series_pd):
primitive_instance = RollingTrend(window_length=window_length, gap=gap)
actual_vals = primitive_instance(window_series_pd.index, window_series_pd.values)
pd.testing.assert_series_equal(pd.Series(expected_vals), pd.Series(actual_vals))
def test_rolling_trend_window_length_less_than_three(window_series_pd):
primitive_instance = RollingTrend(window_length=2)
vals = primitive_instance(window_series_pd.index, window_series_pd.values)
for v in vals:
assert np.isnan(v)
@pytest.mark.parametrize(
"primitive",
[
RollingCount,
RollingMax,
RollingMin,
RollingMean,
RollingOutlierCount,
],
)
def test_rolling_primitives_non_uniform(primitive):
# When the data isn't uniform, this impacts the number of values in each rolling window
datetimes = (
list(pd.date_range(start="2017-01-01", freq="1d", periods=3))
+ list(pd.date_range(start="2017-01-10", freq="2d", periods=4))
+ list(pd.date_range(start="2017-01-22", freq="1d", periods=7))
)
no_freq_series = pd.Series(range(len(datetimes)), index=datetimes)
# Should match RollingCount exactly and have same nan values as other primitives
expected_series = pd.Series(
[None, 1, 2] + [None, 1, 1, 1] + [None, 1, 2, 3, 3, 3, 3],
)
primitive_instance = primitive(window_length="3d", gap="1d")
if isinstance(primitive_instance, RollingCount):
rolled_series = pd.Series(primitive_instance(no_freq_series.index))
pd.testing.assert_series_equal(rolled_series, expected_series)
else:
rolled_series = pd.Series(
primitive_instance(no_freq_series.index, pd.Series(no_freq_series.values)),
)
pd.testing.assert_series_equal(expected_series.isna(), rolled_series.isna())
def test_rolling_std_non_uniform():
# When the data isn't uniform, this impacts the number of values in each rolling window
datetimes = (
list(pd.date_range(start="2017-01-01", freq="1d", periods=3))
+ list(pd.date_range(start="2017-01-10", freq="2d", periods=4))
+ list(pd.date_range(start="2017-01-22", freq="1d", periods=7))
)
no_freq_series = pd.Series(range(len(datetimes)), index=datetimes)
# There will be at least two null values at the beginning of each range's rows, the first for the
# row skipped by the gap, and the second because pandas' std returns NaN if there's only one row
expected_series = pd.Series(
[None, None, 0.707107]
+ [None, None, None, None]
+ [ # Because the freq was 2 days, there will never be more than 1 observation
None,
None,
0.707107,
1.0,
1.0,
1.0,
1.0,
],
)
primitive_instance = RollingSTD(window_length="3d", gap="1d")
rolled_series = pd.Series(
primitive_instance(no_freq_series.index, pd.Series(no_freq_series.values)),
)
pd.testing.assert_series_equal(rolled_series, expected_series)
def test_rolling_trend_non_uniform():
datetimes = (
list(pd.date_range(start="2017-01-01", freq="1d", periods=3))
+ list(pd.date_range(start="2017-01-10", freq="2d", periods=4))
+ list(pd.date_range(start="2017-01-22", freq="1d", periods=7))
)
no_freq_series = pd.Series(range(len(datetimes)), index=datetimes)
expected_series = pd.Series(
[None, None, None]
+ [None, None, None, None]
+ [
None,
None,
None,
1.0,
1.0,
1.0,
1.0,
],
)
primitive_instance = RollingTrend(window_length="3d", gap="1d")
rolled_series = pd.Series(
primitive_instance(no_freq_series.index, pd.Series(no_freq_series.values)),
)
pd.testing.assert_series_equal(rolled_series, expected_series)
@pytest.mark.parametrize(
"window_length, gap",
[
(5, 2),
(5, 0),
("5d", "7d"),
("5d", "0d"),
],
)
@pytest.mark.parametrize(
"min_periods",
[1, 0, 2, 5],
)
def test_rolling_outlier_count(
min_periods,
window_length,
gap,
rolling_outlier_series_pd,
):
primitive_instance = RollingOutlierCount(
window_length=window_length,
gap=gap,
min_periods=min_periods,
)
primitive_func = primitive_instance.get_function()
actual_vals = pd.Series(
primitive_func(
rolling_outlier_series_pd.index,
pd.Series(rolling_outlier_series_pd.values),
),
)
expected_vals = apply_rolling_agg_to_series(
series=rolling_outlier_series_pd,
agg_func=primitive_instance.get_outliers_count,
window_length=window_length,
gap=gap,
min_periods=min_periods,
)
# Since min_periods of 0 is the same as min_periods of 1
num_nans_from_min_periods = min_periods or 1
assert (
actual_vals.isna().sum()
== get_number_from_offset(gap) + num_nans_from_min_periods - 1
)
pd.testing.assert_series_equal(actual_vals, pd.Series(data=expected_vals))
| 14,148 | 28.72479 | 107 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/aggregation_primitive_tests/test_max_consecutive.py | import numpy as np
import pandas as pd
import pytest
from featuretools.primitives import (
MaxConsecutiveFalse,
MaxConsecutiveNegatives,
MaxConsecutivePositives,
MaxConsecutiveTrue,
MaxConsecutiveZeros,
)
class TestMaxConsecutiveFalse:
def test_regular(self):
primitive_instance = MaxConsecutiveFalse()
primitive_func = primitive_instance.get_function()
array = pd.Series([False, False, False, True, True, False, True], dtype="bool")
assert primitive_func(array) == 3
def test_all_true(self):
primitive_instance = MaxConsecutiveFalse()
primitive_func = primitive_instance.get_function()
array = pd.Series([True, True, True, True], dtype="bool")
assert primitive_func(array) == 0
def test_all_false(self):
primitive_instance = MaxConsecutiveFalse()
primitive_func = primitive_instance.get_function()
array = pd.Series([False, False, False], dtype="bool")
assert primitive_func(array) == 3
class TestMaxConsecutiveTrue:
def test_regular(self):
primitive_instance = MaxConsecutiveTrue()
primitive_func = primitive_instance.get_function()
array = pd.Series([True, False, True, True, True, False, True], dtype="bool")
assert primitive_func(array) == 3
def test_all_true(self):
primitive_instance = MaxConsecutiveTrue()
primitive_func = primitive_instance.get_function()
array = pd.Series([True, True, True, True], dtype="bool")
assert primitive_func(array) == 4
def test_all_false(self):
primitive_instance = MaxConsecutiveTrue()
primitive_func = primitive_instance.get_function()
array = pd.Series([False, False, False], dtype="bool")
assert primitive_func(array) == 0
@pytest.mark.parametrize("dtype", ["float64", "int64"])
class TestMaxConsecutiveNegatives:
def test_regular(self, dtype):
if dtype == "int64":
pytest.skip("test array contains floats which are not supported int64")
primitive_instance = MaxConsecutiveNegatives()
primitive_func = primitive_instance.get_function()
array = pd.Series([1.3, -3.4, -1, -4, 10, -1.7, -4.9], dtype=dtype)
assert primitive_func(array) == 3
def test_all_int(self, dtype):
primitive_instance = MaxConsecutiveNegatives()
primitive_func = primitive_instance.get_function()
array = pd.Series([1, -1, 2, 4, -5], dtype=dtype)
assert primitive_func(array) == 1
def test_all_float(self, dtype):
if dtype == "int64":
pytest.skip("test array contains floats which are not supported int64")
primitive_instance = MaxConsecutiveNegatives()
primitive_func = primitive_instance.get_function()
array = pd.Series([1.0, -1.0, -2.0, 0.0, 5.0], dtype=dtype)
assert primitive_func(array) == 2
def test_with_nan(self, dtype):
if dtype == "int64":
pytest.skip("nans not supported in int64")
primitive_instance = MaxConsecutiveNegatives()
primitive_func = primitive_instance.get_function()
array = pd.Series([1, np.nan, -2, -3], dtype=dtype)
assert primitive_func(array) == 2
def test_with_nan_skipna(self, dtype):
if dtype == "int64":
pytest.skip("nans not supported in int64")
primitive_instance = MaxConsecutiveNegatives(skipna=False)
primitive_func = primitive_instance.get_function()
array = pd.Series([-1, np.nan, -2, -3], dtype=dtype)
assert primitive_func(array) == 2
def test_all_nan(self, dtype):
if dtype == "int64":
pytest.skip("nans not supported in int64")
primitive_instance = MaxConsecutiveNegatives()
primitive_func = primitive_instance.get_function()
array = pd.Series([np.nan, np.nan, np.nan, np.nan], dtype=dtype)
assert np.isnan(primitive_func(array))
def test_all_nan_skipna(self, dtype):
if dtype == "int64":
pytest.skip("nans not supported in int64")
primitive_instance = MaxConsecutiveNegatives(skipna=True)
primitive_func = primitive_instance.get_function()
array = pd.Series([np.nan, np.nan, np.nan, np.nan], dtype=dtype)
assert np.isnan(primitive_func(array))
@pytest.mark.parametrize("dtype", ["float64", "int64"])
class TestMaxConsecutivePositives:
def test_regular(self, dtype):
if dtype == "int64":
pytest.skip("test array contains floats which are not supported int64")
primitive_instance = MaxConsecutivePositives()
primitive_func = primitive_instance.get_function()
array = pd.Series([1.3, -3.4, 1, 4, 10, -1.7, -4.9], dtype=dtype)
assert primitive_func(array) == 3
def test_all_int(self, dtype):
primitive_instance = MaxConsecutivePositives()
primitive_func = primitive_instance.get_function()
array = pd.Series([1, -1, 2, 4, -5], dtype=dtype)
assert primitive_func(array) == 2
def test_all_float(self, dtype):
if dtype == "int64":
pytest.skip("test array contains floats which are not supported int64")
primitive_instance = MaxConsecutivePositives()
primitive_func = primitive_instance.get_function()
array = pd.Series([1.0, -1.0, 2.0, 4.0, 5.0], dtype=dtype)
assert primitive_func(array) == 3
def test_with_nan(self, dtype):
if dtype == "int64":
pytest.skip("nans not supported in int64")
primitive_instance = MaxConsecutivePositives()
primitive_func = primitive_instance.get_function()
array = pd.Series([1, np.nan, 2, -3], dtype=dtype)
assert primitive_func(array) == 2
def test_with_nan_skipna(self, dtype):
if dtype == "int64":
pytest.skip("nans not supported in int64")
primitive_instance = MaxConsecutivePositives(skipna=False)
primitive_func = primitive_instance.get_function()
array = pd.Series([1, np.nan, 2, -3], dtype=dtype)
assert primitive_func(array) == 1
def test_all_nan(self, dtype):
if dtype == "int64":
pytest.skip("nans not supported in int64")
primitive_instance = MaxConsecutivePositives()
primitive_func = primitive_instance.get_function()
array = pd.Series([np.nan, np.nan, np.nan, np.nan], dtype=dtype)
assert np.isnan(primitive_func(array))
def test_all_nan_skipna(self, dtype):
if dtype == "int64":
pytest.skip("nans not supported in int64")
primitive_instance = MaxConsecutivePositives(skipna=True)
primitive_func = primitive_instance.get_function()
array = pd.Series([np.nan, np.nan, np.nan, np.nan], dtype=dtype)
assert np.isnan(primitive_func(array))
@pytest.mark.parametrize("dtype", ["float64", "int64"])
class TestMaxConsecutiveZeros:
def test_regular(self, dtype):
if dtype == "int64":
pytest.skip("test array contains floats which are not supported int64")
primitive_instance = MaxConsecutiveZeros()
primitive_func = primitive_instance.get_function()
array = pd.Series([1.3, -3.4, 0, 0, 0.0, 1.7, -4.9], dtype=dtype)
assert primitive_func(array) == 3
def test_all_int(self, dtype):
primitive_instance = MaxConsecutiveZeros()
primitive_func = primitive_instance.get_function()
array = pd.Series([1, -1, 0, 0, -5], dtype=dtype)
assert primitive_func(array) == 2
def test_all_float(self, dtype):
if dtype == "int64":
pytest.skip("test array contains floats which are not supported int64")
primitive_instance = MaxConsecutiveZeros()
primitive_func = primitive_instance.get_function()
array = pd.Series([1.0, 0.0, 0.0, 0.0, -5.3], dtype=dtype)
assert primitive_func(array) == 3
def test_with_nan(self, dtype):
if dtype == "int64":
pytest.skip("nans not supported in int64")
primitive_instance = MaxConsecutiveZeros()
primitive_func = primitive_instance.get_function()
array = pd.Series([0, np.nan, 0, -3], dtype=dtype)
assert primitive_func(array) == 2
def test_with_nan_skipna(self, dtype):
if dtype == "int64":
pytest.skip("nans not supported in int64")
primitive_instance = MaxConsecutiveZeros(skipna=False)
primitive_func = primitive_instance.get_function()
array = pd.Series([0, np.nan, 0, -3], dtype=dtype)
assert primitive_func(array) == 1
def test_all_nan(self, dtype):
if dtype == "int64":
pytest.skip("nans not supported in int64")
primitive_instance = MaxConsecutiveZeros()
primitive_func = primitive_instance.get_function()
array = pd.Series([np.nan, np.nan, np.nan, np.nan], dtype=dtype)
assert np.isnan(primitive_func(array))
def test_all_nan_skipna(self, dtype):
if dtype == "int64":
pytest.skip("nans not supported in int64")
primitive_instance = MaxConsecutiveZeros(skipna=True)
primitive_func = primitive_instance.get_function()
array = pd.Series([np.nan, np.nan, np.nan, np.nan], dtype=dtype)
assert np.isnan(primitive_func(array))
| 9,320 | 40.798206 | 87 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/aggregation_primitive_tests/test_time_since.py | from datetime import datetime
from math import isnan
import numpy as np
import pandas as pd
from featuretools.primitives import (
TimeSinceLastFalse,
TimeSinceLastMax,
TimeSinceLastMin,
TimeSinceLastTrue,
)
class TestTimeSinceLastFalse:
primitive = TimeSinceLastFalse
cutoff_time = datetime(2011, 4, 9, 11, 31, 27)
times = pd.Series(
[datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)]
+ [datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)],
)
booleans = pd.Series([True] * 5 + [False] * 4)
def test_booleans(self):
primitive_func = self.primitive().get_function()
answer = self.cutoff_time - datetime(2011, 4, 9, 10, 31, 27)
assert (
primitive_func(
self.times,
self.booleans,
time=self.cutoff_time,
)
== answer.total_seconds()
)
def test_booleans_reversed(self):
primitive_func = self.primitive().get_function()
answer = self.cutoff_time - datetime(2011, 4, 9, 10, 30, 18)
reversed_booleans = pd.Series(self.booleans.values[::-1])
assert (
primitive_func(
self.times,
reversed_booleans,
time=self.cutoff_time,
)
== answer.total_seconds()
)
def test_no_false(self):
primitive_func = self.primitive().get_function()
times = pd.Series([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)])
booleans = pd.Series([True] * 5)
assert isnan(primitive_func(times, booleans, time=self.cutoff_time))
def test_nans(self):
primitive_func = self.primitive().get_function()
times = pd.concat([self.times.copy(), pd.Series([np.nan, pd.NaT])])
booleans = pd.concat(
[self.booleans.copy(), pd.Series([np.nan], dtype="boolean")],
)
times = times.reset_index(drop=True)
booleans = booleans.reset_index(drop=True)
answer = self.cutoff_time - datetime(2011, 4, 9, 10, 31, 27)
assert (
primitive_func(
times,
booleans,
time=self.cutoff_time,
)
== answer.total_seconds()
)
def test_empty(self):
primitive_func = self.primitive().get_function()
times = pd.Series([], dtype="datetime64[ns]")
booleans = pd.Series([], dtype="boolean")
times = times.reset_index(drop=True)
answer = primitive_func(
times,
booleans,
time=self.cutoff_time,
)
assert pd.isna(answer)
class TestTimeSinceLastMax:
primitive = TimeSinceLastMax
cutoff_time = datetime(2011, 4, 9, 11, 31, 27)
times = pd.Series(
[datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)]
+ [datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)],
)
numerics = pd.Series([0, 1, 2, 8, 2, 5, 1, 3, 7])
actual_time_since = cutoff_time - datetime(2011, 4, 9, 10, 30, 18)
actual_seconds = actual_time_since.total_seconds()
def test_primitive_func_1(self):
primitive_func = self.primitive().get_function()
assert (
primitive_func(
self.times,
self.numerics,
time=self.cutoff_time,
)
== self.actual_seconds
)
def test_no_max(self):
primitive_func = self.primitive().get_function()
times = pd.Series([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)])
numerics = pd.Series([0] * 5)
actual_time_since = self.cutoff_time - datetime(2011, 4, 9, 10, 30, 0)
actual_seconds = actual_time_since.total_seconds()
assert primitive_func(times, numerics, time=self.cutoff_time) == actual_seconds
def test_nans(self):
primitive_func = self.primitive().get_function()
times = pd.concat([self.times.copy(), pd.Series([np.nan, pd.NaT])])
numerics = pd.concat(
[self.numerics.copy(), pd.Series([np.nan], dtype="float64")],
)
times = times.reset_index(drop=True)
numerics = numerics.reset_index(drop=True)
assert (
primitive_func(
times,
numerics,
time=self.cutoff_time,
)
== self.actual_seconds
)
class TestTimeSinceLastMin:
primitive = TimeSinceLastMin
cutoff_time = datetime(2011, 4, 9, 11, 31, 27)
times = pd.Series(
[datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)]
+ [datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)],
)
numerics = pd.Series([1, 0, 2, 8, 2, 5, 1, 3, 7])
actual_time_since = cutoff_time - datetime(2011, 4, 9, 10, 30, 6)
actual_seconds = actual_time_since.total_seconds()
def test_primitive_func_1(self):
primitive_func = self.primitive().get_function()
assert (
primitive_func(
self.times,
self.numerics,
time=self.cutoff_time,
)
== self.actual_seconds
)
def test_no_max(self):
primitive_func = self.primitive().get_function()
times = pd.Series([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)])
numerics = pd.Series([0] * 5)
actual_time_since = self.cutoff_time - datetime(2011, 4, 9, 10, 30, 0)
actual_seconds = actual_time_since.total_seconds()
assert primitive_func(times, numerics, time=self.cutoff_time) == actual_seconds
def test_nans(self):
primitive_func = self.primitive().get_function()
times = pd.concat(
[self.times.copy(), pd.Series([np.nan, pd.NaT], dtype="datetime64[ns]")],
)
numerics = pd.concat(
[self.numerics.copy(), pd.Series([np.nan, np.nan], dtype="float64")],
)
times = times.reset_index(drop=True)
numerics = numerics.reset_index(drop=True)
assert (
primitive_func(
times,
numerics,
time=self.cutoff_time,
)
== self.actual_seconds
)
class TestTimeSinceLastTrue:
primitive = TimeSinceLastTrue
cutoff_time = datetime(2011, 4, 9, 11, 31, 27)
times = pd.Series(
[datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)]
+ [datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)],
)
booleans = pd.Series([True] * 5 + [False] * 4)
actual_time_since = cutoff_time - datetime(2011, 4, 9, 10, 30, 24)
actual_seconds = actual_time_since.total_seconds()
def test_primitive_func_1(self):
primitive_func = self.primitive().get_function()
assert (
primitive_func(
self.times,
self.booleans,
time=self.cutoff_time,
)
== self.actual_seconds
)
def test_no_true(self):
primitive_func = self.primitive().get_function()
times = pd.Series([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)])
booleans = pd.Series([False] * 5)
assert isnan(primitive_func(times, booleans, time=self.cutoff_time))
def test_nans(self):
primitive_func = self.primitive().get_function()
times = pd.concat(
[self.times.copy(), pd.Series([np.nan, pd.NaT], dtype="datetime64[ns]")],
)
booleans = pd.concat(
[self.booleans.copy(), pd.Series([np.nan], dtype="boolean")],
)
times = times.reset_index(drop=True)
booleans = booleans.reset_index(drop=True)
assert (
primitive_func(
times,
booleans,
time=self.cutoff_time,
)
== self.actual_seconds
)
def test_no_cutofftime(self):
primitive_func = self.primitive().get_function()
times = pd.Series([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)])
booleans = pd.Series([False] * 5)
assert isnan(primitive_func(times, booleans))
def test_empty(self):
primitive_func = self.primitive().get_function()
times = pd.Series([], dtype="datetime64[ns]")
booleans = pd.Series([], dtype="boolean")
times = times.reset_index(drop=True)
answer = primitive_func(
times,
booleans,
time=self.cutoff_time,
)
assert pd.isna(answer)
| 8,501 | 33.282258 | 87 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/aggregation_primitive_tests/test_agg_primitives.py | from datetime import datetime
from math import sqrt
import numpy as np
import pandas as pd
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
from pytest import raises
from featuretools.primitives import (
AverageCountPerUnique,
DateFirstEvent,
Entropy,
FirstLastTimeDelta,
HasNoDuplicates,
IsMonotonicallyDecreasing,
IsMonotonicallyIncreasing,
Kurtosis,
MaxCount,
MaxMinDelta,
MedianCount,
MinCount,
NMostCommon,
NMostCommonFrequency,
NumFalseSinceLastTrue,
NumPeaks,
NumTrueSinceLastFalse,
NumZeroCrossings,
NUniqueDays,
NUniqueDaysOfCalendarYear,
NUniqueDaysOfMonth,
NUniqueMonths,
NUniqueWeeks,
PercentTrue,
Trend,
Variance,
get_aggregation_primitives,
)
from featuretools.tests.primitive_tests.utils import (
PrimitiveTestBase,
check_serialize,
find_applicable_primitives,
valid_dfs,
)
def test_nmostcommon_categorical():
n_most = NMostCommon(3)
expected = pd.Series([1.0, 2.0, np.nan])
ints = pd.Series([1, 2, 1, 1]).astype("int64")
assert pd.Series(n_most(ints)).equals(expected)
cats = pd.Series([1, 2, 1, 1]).astype("category")
assert pd.Series(n_most(cats)).equals(expected)
# Value counts includes data for categories that are not present in data.
# Make sure these counts are not included in most common outputs
extra_dtype = CategoricalDtype(categories=[1, 2, 3])
cats_extra = pd.Series([1, 2, 1, 1]).astype(extra_dtype)
assert pd.Series(n_most(cats_extra)).equals(expected)
def test_agg_primitives_can_init_without_params():
agg_primitives = get_aggregation_primitives().values()
for agg_primitive in agg_primitives:
agg_primitive()
def test_trend_works_with_different_input_dtypes():
dates = pd.to_datetime(["2020-01-01", "2020-01-02", "2020-01-03"])
numeric = pd.Series([1, 2, 3])
trend = Trend()
dtypes = ["float64", "int64", "Int64"]
for dtype in dtypes:
actual = trend(numeric.astype(dtype), dates)
assert np.isclose(actual, 1)
def test_percent_true_boolean():
booleans = pd.Series([True, False, True, pd.NA], dtype="boolean")
pct_true = PercentTrue()
pct_true(booleans) == 0.5
class TestAverageCountPerUnique(PrimitiveTestBase):
primitive = AverageCountPerUnique
array = pd.Series([1, 1, 2, 2, 3, 4, 5, 6, 7, 8])
def test_percent_unique(self):
primitive_func = AverageCountPerUnique().get_function()
assert primitive_func(self.array) == 1.25
def test_nans(self):
primitive_func = AverageCountPerUnique().get_function()
array_nans = pd.concat([self.array.copy(), pd.Series([np.nan])])
assert primitive_func(array_nans) == 1.25
primitive_func = AverageCountPerUnique(skipna=False).get_function()
array_nans = pd.concat([self.array.copy(), pd.Series([np.nan])])
assert primitive_func(array_nans) == (11 / 9.0)
def test_empty_string(self):
primitive_func = AverageCountPerUnique().get_function()
array_empty_string = pd.concat([self.array.copy(), pd.Series([np.nan, "", ""])])
assert primitive_func(array_empty_string) == (4 / 3.0)
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
aggregation.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
class TestVariance(PrimitiveTestBase):
primitive = Variance
def test_regular(self):
variance = self.primitive().get_function()
np.testing.assert_almost_equal(variance(np.array([0, 3, 4, 3])), 2.25)
def test_single(self):
variance = self.primitive().get_function()
np.testing.assert_almost_equal(variance(np.array([4])), 0)
def test_double(self):
variance = self.primitive().get_function()
np.testing.assert_almost_equal(variance(np.array([3, 4])), 0.25)
def test_empty(self):
variance = self.primitive().get_function()
np.testing.assert_almost_equal(variance(np.array([])), np.nan)
def test_nan(self):
variance = self.primitive().get_function()
np.testing.assert_almost_equal(
variance(pd.Series([0, np.nan, 4, 3])),
2.8888888888888893,
)
def test_allnan(self):
variance = self.primitive().get_function()
np.testing.assert_almost_equal(
variance(pd.Series([np.nan, np.nan, np.nan])),
np.nan,
)
class TestFirstLastTimeDelta(PrimitiveTestBase):
primitive = FirstLastTimeDelta
times = pd.Series([datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)])
actual_delta = (times.iloc[-1] - times.iloc[0]).total_seconds()
def test_first_last_time_delta(self):
primitive_func = self.primitive().get_function()
assert primitive_func(self.times) == self.actual_delta
def test_with_nans(self):
primitive_func = self.primitive().get_function()
times = pd.concat([self.times, pd.Series([np.nan])])
assert primitive_func(times) == self.actual_delta
assert pd.isna(primitive_func(pd.Series([np.nan])))
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
aggregation.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
class TestEntropy(PrimitiveTestBase):
primitive = Entropy
@pytest.mark.parametrize(
"dtype",
["category", "object", "string"],
)
def test_regular(self, dtype):
data = pd.Series([1, 2, 3, 2], dtype=dtype)
primitive_func = self.primitive().get_function()
given_answer = primitive_func(data)
assert np.isclose(given_answer, 1.03, atol=0.01)
@pytest.mark.parametrize(
"dtype",
["category", "object", "string"],
)
def test_empty(self, dtype):
data = pd.Series([], dtype=dtype)
primitive_func = self.primitive().get_function()
given_answer = primitive_func(data)
assert given_answer == 0.0
@pytest.mark.parametrize(
"dtype",
["category", "object", "string"],
)
def test_args(self, dtype):
data = pd.Series([1, 2, 3, 2], dtype=dtype)
if dtype == "string":
data = pd.concat([data, pd.Series([pd.NA, pd.NA], dtype=dtype)])
else:
data = pd.concat([data, pd.Series([np.nan, np.nan], dtype=dtype)])
primitive_func = self.primitive(dropna=True, base=2).get_function()
given_answer = primitive_func(data)
assert np.isclose(given_answer, 1.5, atol=0.001)
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
aggregation.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive, max_depth=2)
class TestKurtosis(PrimitiveTestBase):
primitive = Kurtosis
@pytest.mark.parametrize(
"dtype",
["int64", "float64"],
)
def test_regular(self, dtype):
data = pd.Series([1, 2, 3, 4, 5], dtype=dtype)
answer = -1.3
primitive_func = self.primitive().get_function()
given_answer = primitive_func(data)
assert np.isclose(answer, given_answer, atol=0.01)
data = pd.Series([1, 2, 3, 4, 5, 6], dtype=dtype)
answer = -1.26
primitive_func = self.primitive().get_function()
given_answer = primitive_func(data)
assert np.isclose(answer, given_answer, atol=0.01)
data = pd.Series([x * x for x in list(range(100))], dtype=dtype)
answer = -0.85
primitive_func = self.primitive().get_function()
given_answer = primitive_func(data)
assert np.isclose(answer, given_answer, atol=0.01)
if dtype == "float64":
# Series contains floating point values - only check with float dtype
data = pd.Series([sqrt(x) for x in list(range(100))], dtype=dtype)
answer = -0.46
primitive_func = self.primitive().get_function()
given_answer = primitive_func(data)
assert np.isclose(answer, given_answer, atol=0.01)
def test_nan(self):
data = pd.Series([np.nan, 5, 3], dtype="float64")
primitive_func = self.primitive().get_function()
given_answer = primitive_func(data)
assert pd.isna(given_answer)
@pytest.mark.parametrize(
"dtype",
["int64", "float64"],
)
def test_empty(self, dtype):
data = pd.Series([], dtype=dtype)
primitive_func = self.primitive().get_function()
given_answer = primitive_func(data)
assert pd.isna(given_answer)
def test_inf(self):
data = pd.Series([1, np.inf], dtype="float64")
primitive_func = self.primitive().get_function()
given_answer = primitive_func(data)
assert pd.isna(given_answer)
data = pd.Series([np.NINF, 1, np.inf], dtype="float64")
primitive_func = self.primitive().get_function()
given_answer = primitive_func(data)
assert pd.isna(given_answer)
def test_arg(self):
data = pd.Series([1, 2, 3, 4, 5, np.nan, np.nan], dtype="float64")
answer = -1.3
primitive_func = self.primitive(nan_policy="omit").get_function()
given_answer = primitive_func(data)
assert answer == given_answer
primitive_func = self.primitive(nan_policy="propagate").get_function()
given_answer = primitive_func(data)
assert np.isnan(given_answer)
primitive_func = self.primitive(nan_policy="raise").get_function()
with raises(ValueError):
primitive_func(data)
def test_error(self):
with raises(ValueError):
self.primitive(nan_policy="invalid_policy").get_function()
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
aggregation.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
class TestNumZeroCrossings(PrimitiveTestBase):
primitive = NumZeroCrossings
def test_nan(self):
data = pd.Series([3, np.nan, 5, 3, np.nan, 0, np.nan, 0, np.nan, -2])
# crossing from 0 to np.nan to -2, which is 1 crossing
answer = 1
primtive_func = self.primitive().get_function()
given_answer = primtive_func(data)
assert given_answer == answer
def test_empty(self):
data = pd.Series([], dtype="int64")
answer = 0
primtive_func = self.primitive().get_function()
given_answer = primtive_func(data)
assert given_answer == answer
def test_inf(self):
data = pd.Series([-1, np.inf])
answer = 1
primtive_func = self.primitive().get_function()
given_answer = primtive_func(data)
assert given_answer == answer
data = pd.Series([np.NINF, 1, np.inf])
answer = 1
primtive_func = self.primitive().get_function()
given_answer = primtive_func(data)
assert given_answer == answer
def test_zeros(self):
data = pd.Series([1, 0, -1, 0, 1, 0, -1])
answer = 3
primtive_func = self.primitive().get_function()
given_answer = primtive_func(data)
assert given_answer == answer
data = pd.Series([1, 0, 1, 0, 1])
answer = 0
primtive_func = self.primitive().get_function()
given_answer = primtive_func(data)
assert given_answer == answer
def test_regular(self):
data = pd.Series([1, 2, 3, 4, 5])
answer = 0
primtive_func = self.primitive().get_function()
given_answer = primtive_func(data)
assert given_answer == answer
data = pd.Series([1, -1, 2, -2, 3, -3])
answer = 5
primtive_func = self.primitive().get_function()
given_answer = primtive_func(data)
assert given_answer == answer
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
aggregation.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
class TestNumTrueSinceLastFalse(PrimitiveTestBase):
primitive = NumTrueSinceLastFalse
def test_regular(self):
primitive_func = self.primitive().get_function()
bools = pd.Series([False, True, False, True, True])
answer = primitive_func(bools)
correct_answer = 2
assert answer == correct_answer
def test_regular_end_in_false(self):
primitive_func = self.primitive().get_function()
bools = pd.Series([False, True, False, True, True, False])
answer = primitive_func(bools)
correct_answer = 0
assert answer == correct_answer
def test_no_false(self):
primitive_func = self.primitive().get_function()
bools = pd.Series([True] * 5)
assert pd.isna(primitive_func(bools))
def test_all_false(self):
primitive_func = self.primitive().get_function()
bools = pd.Series([False, False, False])
answer = primitive_func(bools)
correct_answer = 0
assert answer == correct_answer
def test_nan(self):
primitive_func = self.primitive().get_function()
bools = pd.Series([False, True, np.nan, True, True])
answer = primitive_func(bools)
correct_answer = 3
assert answer == correct_answer
def test_all_nan(self):
primitive_func = self.primitive().get_function()
bools = pd.Series([np.nan, np.nan, np.nan])
assert pd.isna(primitive_func(bools))
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
aggregation.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
class TestNumFalseSinceLastTrue(PrimitiveTestBase):
primitive = NumFalseSinceLastTrue
def test_regular(self):
primitive_func = self.primitive().get_function()
bools = pd.Series([True, False, True, False, False])
answer = primitive_func(bools)
correct_answer = 2
assert answer == correct_answer
def test_regular_end_in_true(self):
primitive_func = self.primitive().get_function()
bools = pd.Series([True, False, True, False, False, True])
answer = primitive_func(bools)
correct_answer = 0
assert answer == correct_answer
def test_no_true(self):
primitive_func = self.primitive().get_function()
bools = pd.Series([False] * 5)
assert pd.isna(primitive_func(bools))
def test_all_true(self):
primitive_func = self.primitive().get_function()
bools = pd.Series([True, True, True])
answer = primitive_func(bools)
correct_answer = 0
assert answer == correct_answer
def test_nan(self):
primitive_func = self.primitive().get_function()
bools = pd.Series([True, False, np.nan, False, False])
answer = primitive_func(bools)
correct_answer = 3
assert answer == correct_answer
def test_all_nan(self):
primitive_func = self.primitive().get_function()
bools = pd.Series([np.nan, np.nan, np.nan])
assert pd.isna(primitive_func(bools))
def test_numeric_and_string_input(self):
primitive_func = self.primitive().get_function()
bools = pd.Series([True, 0, 1, "10", ""])
answer = primitive_func(bools)
correct_answer = 1
assert answer == correct_answer
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
aggregation.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
class TestNumPeaks(PrimitiveTestBase):
primitive = NumPeaks
@pytest.mark.parametrize(
"dtype",
["int64", "float64", "Int64"],
)
def test_negative_and_positive_nums(self, dtype):
get_peaks = self.primitive().get_function()
assert (
get_peaks(pd.Series([-5, 0, 10, 0, 10, -5, -4, -5, 10, 0], dtype=dtype))
== 4
)
@pytest.mark.parametrize(
"dtype",
["int64", "float64", "Int64"],
)
def test_plateu(self, dtype):
get_peaks = self.primitive().get_function()
assert get_peaks(pd.Series([1, 2, 3, 3, 3, 3, 3, 2, 1], dtype=dtype)) == 1
assert get_peaks(pd.Series([1, 2, 3, 3, 3, 4, 3, 3, 3, 2, 1], dtype=dtype)) == 1
assert (
get_peaks(
pd.Series(
[
5,
4,
3,
3,
3,
3,
3,
3,
4,
5,
5,
5,
5,
5,
3,
3,
3,
3,
4,
],
dtype=dtype,
),
)
== 1
)
assert (
get_peaks(
pd.Series(
[
1,
2,
3,
3,
3,
2,
1,
2,
3,
3,
3,
2,
5,
5,
5,
2,
],
dtype=dtype,
),
)
== 3
)
@pytest.mark.parametrize(
"dtype",
["int64", "float64", "Int64"],
)
def test_regular(self, dtype):
get_peaks = self.primitive().get_function()
assert get_peaks(pd.Series([1, 7, 3, 8, 2, 3, 4, 3, 4, 2, 4], dtype=dtype)) == 4
assert get_peaks(pd.Series([1, 2, 3, 2, 1], dtype=dtype)) == 1
@pytest.mark.parametrize(
"dtype",
["int64", "float64", "Int64"],
)
def test_no_peak(self, dtype):
get_peaks = self.primitive().get_function()
assert get_peaks(pd.Series([1, 2, 3], dtype=dtype)) == 0
assert get_peaks(pd.Series([3, 2, 2, 2, 2, 1], dtype=dtype)) == 0
@pytest.mark.parametrize(
"dtype",
["int64", "float64", "Int64"],
)
def test_too_small_data(self, dtype):
get_peaks = self.primitive().get_function()
assert get_peaks(pd.Series([], dtype=dtype)) == 0
assert get_peaks(pd.Series([1])) == 0
assert get_peaks(pd.Series([1, 1])) == 0
assert get_peaks(pd.Series([1, 2])) == 0
assert get_peaks(pd.Series([2, 1])) == 0
@pytest.mark.parametrize(
"dtype",
["int64", "float64", "Int64"],
)
def test_nans(self, dtype):
get_peaks = self.primitive().get_function()
array = pd.Series(
[
0,
5,
10,
15,
20,
0,
1,
2,
3,
0,
0,
5,
0,
7,
14,
],
dtype=dtype,
)
if dtype == "float64":
array = pd.concat([array, pd.Series([np.nan, np.nan])])
elif dtype == "Int64":
array = pd.concat([array, pd.Series([pd.NA, pd.NA])])
array = array.astype(dtype=dtype)
assert get_peaks(array) == 3
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
aggregation.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
class TestDateFirstEvent(PrimitiveTestBase):
primitive = DateFirstEvent
def test_regular(self):
primitive_func = self.primitive().get_function()
case = pd.Series(
[
"2011-04-09 10:30:00",
"2011-04-09 10:30:06",
"2011-04-09 10:30:12",
"2011-04-09 10:30:18",
],
dtype="datetime64[ns]",
)
answer = pd.Timestamp("2011-04-09 10:30:00")
given_answer = primitive_func(case)
assert given_answer == answer
def test_nat(self):
primitive_func = self.primitive().get_function()
case = pd.Series(
[
pd.NaT,
pd.NaT,
"2011-04-09 10:30:12",
"2011-04-09 10:30:18",
],
dtype="datetime64[ns]",
)
answer = pd.Timestamp("2011-04-09 10:30:12")
given_answer = primitive_func(case)
assert given_answer == answer
def test_empty(self):
primitive_func = self.primitive().get_function()
case = pd.Series([], dtype="datetime64[ns]")
given_answer = primitive_func(case)
assert pd.isna(given_answer)
def test_with_featuretools(self, pd_es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
aggregation.append(primitive_instance)
valid_dfs(pd_es, aggregation, transform, self.primitive)
def test_serialize(self, es):
check_serialize(self.primitive, es, target_dataframe_name="sessions")
class TestMinCount(PrimitiveTestBase):
primitive = MinCount
def test_nan(self):
data = pd.Series([np.nan, np.nan, np.nan])
primitive_func = self.primitive().get_function()
answer = primitive_func(data)
assert pd.isna(answer)
def test_inf(self):
data = pd.Series([5, 10, 10, np.inf, np.inf, np.inf])
primitive_func = self.primitive().get_function()
answer = primitive_func(data)
assert answer == 1
def test_regular(self):
data = pd.Series([1, 2, 2, 2, 3, 4, 4, 4, 5])
primitive_func = self.primitive().get_function()
answer = primitive_func(data)
assert answer == 1
data = pd.Series([2, 2, 2, 3, 4, 4, 4])
primitive_func = self.primitive().get_function()
answer = primitive_func(data)
assert answer == 3
def test_skipna(self):
data = pd.Series([1, 1, 2, 3, 4, 4, np.nan, 5])
primitive_func = self.primitive(skipna=False).get_function()
answer = primitive_func(data)
assert pd.isna(answer)
def test_ninf(self):
data = pd.Series([np.NINF, np.NINF, np.nan])
primitive_func = self.primitive().get_function()
answer = primitive_func(data)
assert answer == 2
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
aggregation.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
class TestMaxCount(PrimitiveTestBase):
primitive = MaxCount
def test_nan(self):
data = pd.Series([np.nan, np.nan, np.nan])
primitive_func = self.primitive().get_function()
answer = primitive_func(data)
assert pd.isna(answer)
def test_inf(self):
data = pd.Series([5, 10, 10, np.inf, np.inf, np.inf])
primitive_func = self.primitive().get_function()
answer = primitive_func(data)
assert answer == 3
def test_regular(self):
data = pd.Series([1, 1, 2, 3, 4, 4, 4, 5])
primitive_func = self.primitive().get_function()
answer = primitive_func(data)
assert answer == 1
data = pd.Series([1, 1, 2, 3, 4, 4, 4])
primitive_func = self.primitive().get_function()
answer = primitive_func(data)
assert answer == 3
def test_skipna(self):
data = pd.Series([1, 1, 2, 3, 4, 4, np.nan, 5])
primitive_func = self.primitive(skipna=False).get_function()
answer = primitive_func(data)
assert pd.isna(answer)
def test_ninf(self):
data = pd.Series([np.NINF, np.NINF, np.nan])
primitive_func = self.primitive().get_function()
answer = primitive_func(data)
assert answer == 2
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
aggregation.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
class TestMaxMinDelta(PrimitiveTestBase):
primitive = MaxMinDelta
array = pd.Series([1, 1, 2, 2, 3, 4, 5, 6, 7, 8])
def test_max_min_delta(self):
primitive_func = self.primitive().get_function()
assert primitive_func(self.array) == 7.0
def test_nans(self):
primitive_func = self.primitive().get_function()
array_nans = pd.concat([self.array, pd.Series([np.nan])])
assert primitive_func(array_nans) == 7.0
primitive_func = self.primitive(skipna=False).get_function()
array_nans = pd.concat([self.array, pd.Series([np.nan])])
assert pd.isna(primitive_func(array_nans))
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
aggregation.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
class TestMedianCount(PrimitiveTestBase):
primitive = MedianCount
def test_regular(self):
primitive_func = self.primitive().get_function()
case = pd.Series([1, 3, 5, 7])
given_answer = primitive_func(case)
assert given_answer == 0
def test_nans(self):
primitive_func = self.primitive().get_function()
case = pd.Series([1, 3, 4, 4, 4, 5, 7, np.nan, np.nan])
given_answer = primitive_func(case)
assert given_answer == 3
primitive_func = self.primitive(skipna=False).get_function()
given_answer = primitive_func(case)
assert pd.isna(given_answer)
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
aggregation.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
class TestNMostCommonFrequency(PrimitiveTestBase):
primitive = NMostCommonFrequency
def test_regular(self):
test_cases = [
pd.Series([8, 7, 10, 10, 10, 3, 4, 5, 10, 8, 7]),
pd.Series([7, 7, 7, 6, 6, 5, 4]),
pd.Series([4, 5, 6, 6, 7, 7, 7]),
]
answers = [
pd.Series([4, 2, 2]),
pd.Series([3, 2, 1]),
pd.Series([3, 2, 1]),
]
primtive_func = self.primitive(3).get_function()
for case, answer in zip(test_cases, answers):
given_answer = primtive_func(case)
given_answer = given_answer.reset_index(drop=True)
assert given_answer.equals(answer)
def test_n_larger_than_len(self):
test_cases = [
pd.Series(["red", "red", "blue", "green"]),
pd.Series(["red", "red", "red", "blue", "green"]),
pd.Series(["red", "blue", "green", "orange"]),
]
answers = [
pd.Series([2, 1, 1, np.nan, np.nan]),
pd.Series([3, 1, 1, np.nan, np.nan]),
pd.Series([1, 1, 1, 1, np.nan]),
]
primtive_func = self.primitive(5).get_function()
for case, answer in zip(test_cases, answers):
given_answer = primtive_func(case)
given_answer = given_answer.reset_index(drop=True)
assert given_answer.equals(answer)
def test_skipna(self):
array = pd.Series(["red", "red", "blue", "green", np.nan, np.nan])
primtive_func = self.primitive(5, skipna=False).get_function()
given_answer = primtive_func(array)
given_answer = given_answer.reset_index(drop=True)
answer = pd.Series([2, 2, 1, 1, np.nan])
assert given_answer.equals(answer)
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
aggregation.append(self.primitive(5))
valid_dfs(
es,
aggregation,
transform,
self.primitive,
target_dataframe_name="customers",
multi_output=True,
)
def test_with_featuretools_args(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
aggregation.append(self.primitive(5, skipna=False))
valid_dfs(
es,
aggregation,
transform,
self.primitive,
target_dataframe_name="customers",
multi_output=True,
)
def test_serialize(self, es):
check_serialize(
primitive=self.primitive,
es=es,
target_dataframe_name="customers",
)
class TestNUniqueDays(PrimitiveTestBase):
primitive = NUniqueDays
def test_two_years(self):
primitive_func = self.primitive().get_function()
array = pd.Series(pd.date_range("2010-01-01", "2011-12-31"))
assert primitive_func(array) == 365 * 2
def test_leap_year(self):
primitive_func = self.primitive().get_function()
array = pd.Series(pd.date_range("2016-01-01", "2017-12-31"))
assert primitive_func(array) == 365 * 2 + 1
def test_ten_years(self):
primitive_func = self.primitive().get_function()
array = pd.Series(pd.date_range("2010-01-01", "2019-12-31"))
assert primitive_func(array) == 365 * 10 + 1 + 1
def test_distinct_dt(self):
primitive_func = self.primitive().get_function()
array = pd.Series(
[
datetime(2019, 2, 21),
datetime(2019, 2, 1, 1, 20, 0),
datetime(2019, 2, 1, 1, 30, 0),
datetime(2018, 2, 1),
datetime(2019, 1, 1),
],
)
assert primitive_func(array) == 4
def test_NaT(self):
primitive_func = self.primitive().get_function()
array = pd.Series(pd.date_range("2010-01-01", "2011-12-31"))
NaT_array = pd.Series([pd.NaT] * 100)
assert primitive_func(pd.concat([array, NaT_array])) == 365 * 2
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
aggregation.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
class TestNUniqueDaysOfCalendarYear(PrimitiveTestBase):
primitive = NUniqueDaysOfCalendarYear
def test_two_years(self):
primitive_func = self.primitive().get_function()
array = pd.Series(pd.date_range("2010-01-01", "2011-12-31"))
assert primitive_func(array) == 365
def test_leap_year(self):
primitive_func = self.primitive().get_function()
array = pd.Series(pd.date_range("2016-01-01", "2017-12-31"))
assert primitive_func(array) == 366
def test_ten_years(self):
primitive_func = self.primitive().get_function()
array = pd.Series(pd.date_range("2010-01-01", "2019-12-31"))
assert primitive_func(array) == 366
def test_distinct_dt(self):
primitive_func = self.primitive().get_function()
array = pd.Series(
[
datetime(2019, 2, 21),
datetime(2019, 2, 1, 1, 20, 0),
datetime(2019, 2, 1, 1, 30, 0),
datetime(2018, 2, 1),
datetime(2019, 1, 1),
],
)
assert primitive_func(array) == 3
def test_NaT(self):
primitive_func = self.primitive().get_function()
array = pd.Series(pd.date_range("2010-01-01", "2011-12-31"))
NaT_array = pd.Series([pd.NaT] * 100)
assert primitive_func(pd.concat([array, NaT_array])) == 365
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
aggregation.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
class TestNUniqueDaysOfMonth(PrimitiveTestBase):
primitive = NUniqueDaysOfMonth
def test_two_days(self):
primitive_func = self.primitive().get_function()
array = pd.Series(pd.date_range("2010-01-01", "2010-01-02"))
assert primitive_func(array) == 2
def test_one_year(self):
primitive_func = self.primitive().get_function()
array = pd.Series(pd.date_range("2010-01-01", "2010-12-31"))
assert primitive_func(array) == 31
def test_leap_year(self):
primitive_func = self.primitive().get_function()
array = pd.Series(pd.date_range("2016-01-01", "2017-12-31"))
assert primitive_func(array) == 31
def test_distinct_dt(self):
primitive_func = self.primitive().get_function()
array = pd.Series(
[
datetime(2019, 2, 21),
datetime(2019, 2, 1, 1, 20, 0),
datetime(2019, 2, 1, 1, 30, 0),
datetime(2018, 2, 1),
datetime(2019, 1, 1),
],
)
assert primitive_func(array) == 2
def test_NaT(self):
primitive_func = self.primitive().get_function()
array = pd.Series(pd.date_range("2010-01-01", "2010-12-31"))
NaT_array = pd.Series([pd.NaT] * 100)
assert primitive_func(pd.concat([array, NaT_array])) == 31
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
aggregation.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
class TestNUniqueMonths(PrimitiveTestBase):
primitive = NUniqueMonths
def test_two_days(self):
primitive_func = self.primitive().get_function()
array = pd.Series(pd.date_range("2010-01-01", "2010-01-02"))
assert primitive_func(array) == 1
def test_ten_years(self):
primitive_func = self.primitive().get_function()
array = pd.Series(pd.date_range("2010-01-01", "2019-12-31"))
assert primitive_func(array) == 12 * 10
def test_distinct_dt(self):
primitive_func = self.primitive().get_function()
array = pd.Series(
[
datetime(2019, 2, 21),
datetime(2019, 2, 1, 1, 20, 0),
datetime(2019, 2, 1, 1, 30, 0),
datetime(2018, 2, 1),
datetime(2019, 1, 1),
],
)
assert primitive_func(array) == 3
def test_NaT(self):
primitive_func = self.primitive().get_function()
array = pd.Series(pd.date_range("2010-01-01", "2011-12-31"))
NaT_array = pd.Series([pd.NaT] * 100)
assert primitive_func(pd.concat([array, NaT_array])) == 12 * 2
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
aggregation.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
class TestNUniqueWeeks(PrimitiveTestBase):
primitive = NUniqueWeeks
def test_same_week(self):
primitive_func = self.primitive().get_function()
array = pd.Series(pd.date_range("2019-01-01", "2019-01-02"))
assert primitive_func(array) == 1
def test_ten_years(self):
primitive_func = self.primitive().get_function()
array = pd.Series(pd.date_range("2010-01-01", "2019-12-31"))
assert primitive_func(array) == 523
def test_distinct_dt(self):
primitive_func = self.primitive().get_function()
array = pd.Series(
[
datetime(2019, 2, 21),
datetime(2019, 2, 1, 1, 20, 0),
datetime(2019, 2, 1, 1, 30, 0),
datetime(2018, 2, 2),
datetime(2019, 2, 3, 1, 30, 0),
datetime(2019, 1, 1),
],
)
assert primitive_func(array) == 4
def test_NaT(self):
primitive_func = self.primitive().get_function()
array = pd.Series(pd.date_range("2019-01-01", "2019-01-02"))
NaT_array = pd.Series([pd.NaT] * 100)
assert primitive_func(pd.concat([array, NaT_array])) == 1
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
aggregation.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
class TestHasNoDuplicates(PrimitiveTestBase):
primitive = HasNoDuplicates
def test_regular(self):
primitive_func = self.primitive().get_function()
data = pd.Series([1, 1, 2])
assert not primitive_func(data)
assert isinstance(primitive_func(data), bool)
data = pd.Series([1, 2, 3])
assert primitive_func(data)
assert isinstance(primitive_func(data), bool)
data = pd.Series([1, 2, 4])
assert primitive_func(data)
assert isinstance(primitive_func(data), bool)
data = pd.Series(["red", "blue", "orange"])
assert primitive_func(data)
assert isinstance(primitive_func(data), bool)
data = pd.Series(["red", "blue", "red"])
assert not primitive_func(data)
def test_nan(self):
primitive_func = self.primitive().get_function()
data = pd.Series([np.nan, 1, 2, 3])
assert primitive_func(data)
assert isinstance(primitive_func(data), bool)
data = pd.Series([np.nan, np.nan, 1])
# drop both nans, so has 1 value
assert primitive_func(data) is True
assert isinstance(primitive_func(data), bool)
primitive_func = self.primitive(skipna=False).get_function()
data = pd.Series([np.nan, np.nan, 1])
assert primitive_func(data) is False
assert isinstance(primitive_func(data), bool)
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instantiate = self.primitive()
aggregation.append(primitive_instantiate)
valid_dfs(
es,
aggregation,
transform,
self.primitive,
target_dataframe_name="customers",
instance_ids=[0, 1, 2],
)
class TestIsMonotonicallyDecreasing(PrimitiveTestBase):
primitive = IsMonotonicallyDecreasing
def test_monotonically_decreasing(self):
primitive_func = self.primitive().get_function()
case = pd.Series([9, 5, 3, 1, -1])
assert primitive_func(case) is True
def test_monotonically_increasing(self):
primitive_func = self.primitive().get_function()
case = pd.Series([-1, 1, 3, 5, 9])
assert primitive_func(case) is False
def test_non_monotonic(self):
primitive_func = self.primitive().get_function()
case = pd.Series([-1, 1, 3, 2, 5])
assert primitive_func(case) is False
def test_weakly_decreasing(self):
primitive_func = self.primitive().get_function()
case = pd.Series([9, 3, 3, 1, -1])
assert primitive_func(case) is True
def test_nan(self):
primitive_func = self.primitive().get_function()
case = pd.Series([9, 5, 3, np.nan, 1, -1])
assert primitive_func(case) is True
primitive_func = self.primitive().get_function()
case = pd.Series([-1, 1, 3, np.nan, 5, 9])
assert primitive_func(case) is False
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instantiate = self.primitive()
aggregation.append(primitive_instantiate)
valid_dfs(es, aggregation, transform, self.primitive)
class TestIsMonotonicallyIncreasing(PrimitiveTestBase):
primitive = IsMonotonicallyIncreasing
def test_monotonically_increasing(self):
primitive_func = self.primitive().get_function()
case = pd.Series([-1, 1, 3, 5, 9])
assert primitive_func(case) is True
def test_monotonically_decreasing(self):
primitive_func = self.primitive().get_function()
case = pd.Series([9, 5, 3, 1, -1])
assert primitive_func(case) is False
def test_non_monotonic(self):
primitive_func = self.primitive().get_function()
case = pd.Series([-1, 1, 3, 2, 5])
assert primitive_func(case) is False
def test_weakly_increasing(self):
primitive_func = self.primitive().get_function()
case = pd.Series([-1, 1, 3, 3, 9])
assert primitive_func(case) is True
def test_nan(self):
primitive_func = self.primitive().get_function()
case = pd.Series([-1, 1, 3, np.nan, 5, 9])
assert primitive_func(case) is True
primitive_func = self.primitive().get_function()
case = pd.Series([9, 5, 3, np.nan, 1, -1])
assert primitive_func(case) is False
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instantiate = self.primitive()
aggregation.append(primitive_instantiate)
valid_dfs(es, aggregation, transform, self.primitive)
| 42,360 | 33.524042 | 88 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/aggregation_primitive_tests/__init__.py | 0 | 0 | 0 | py | |
featuretools | featuretools-main/featuretools/tests/primitive_tests/transform_primitive_tests/test_percent_unique.py | import numpy as np
import pandas as pd
from featuretools.primitives import PercentUnique
from featuretools.tests.primitive_tests.utils import (
PrimitiveTestBase,
)
class TestPercentUnique(PrimitiveTestBase):
array = pd.Series([1, 1, 2, 2, 3, 4, 5, 6, 7, 8])
primitive = PercentUnique
def test_percent_unique(self):
primitive_func = self.primitive().get_function()
assert primitive_func(self.array) == (8 / 10.0)
def test_nans(self):
primitive_func = self.primitive().get_function()
array_nans = pd.concat([self.array.copy(), pd.Series([np.nan])])
assert primitive_func(array_nans) == (8 / 11.0)
primitive_func = self.primitive(skipna=False).get_function()
assert primitive_func(array_nans) == (9 / 11.0)
def test_multiple_nans(self):
primitive_func = self.primitive().get_function()
array_nans = pd.concat([self.array.copy(), pd.Series([np.nan] * 3)])
assert primitive_func(array_nans) == (8 / 13.0)
primitive_func = self.primitive(skipna=False).get_function()
assert primitive_func(array_nans) == (9 / 13.0)
def test_empty_string(self):
primitive_func = self.primitive().get_function()
array_empty_string = pd.concat([self.array.copy(), pd.Series([np.nan, "", ""])])
assert primitive_func(array_empty_string) == (9 / 13.0)
| 1,380 | 37.361111 | 88 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/transform_primitive_tests/test_latlong_primitives.py | import numpy as np
import pandas as pd
import pytest
from featuretools.primitives import CityblockDistance, GeoMidpoint, IsInGeoBox
def test_cityblock():
primitive_instance = CityblockDistance()
latlong_1 = pd.Series([(i, i) for i in range(3)])
latlong_2 = pd.Series([(i, i) for i in range(3, 6)])
answer = pd.Series([414.56051391, 414.52893691, 414.43421555])
given_answer = primitive_instance(latlong_1, latlong_2)
np.testing.assert_allclose(given_answer, answer, rtol=1e-09)
primitive_instance = CityblockDistance(unit="kilometers")
answer = primitive_instance(latlong_1, latlong_2)
given_answer = pd.Series([667.1704814, 667.11966315, 666.96722389])
np.testing.assert_allclose(given_answer, answer, rtol=1e-09)
def test_cityblock_nans():
primitive_instance = CityblockDistance()
lats_longs_1 = [(i, i) for i in range(2)]
lats_longs_2 = [(i, i) for i in range(2, 4)]
lats_longs_1 += [(1, 1), (np.nan, 3), (4, np.nan), (np.nan, np.nan)]
lats_longs_2 += [(np.nan, np.nan), (np.nan, 5), (6, np.nan), (np.nan, np.nan)]
given_answer = pd.Series(list([276.37367594, 276.35262728] + [np.nan] * 4))
answer = primitive_instance(lats_longs_1, lats_longs_2)
np.testing.assert_allclose(given_answer, answer, rtol=1e-09)
def test_cityblock_error():
error_text = "Invalid unit given"
with pytest.raises(ValueError, match=error_text):
CityblockDistance(unit="invalid")
def test_midpoint():
latlong1 = pd.Series([(-90, -180), (90, 180)])
latlong2 = pd.Series([(+90, +180), (-90, -180)])
function = GeoMidpoint().get_function()
answer = function(latlong1, latlong2)
for lat, longi in answer:
assert lat == 0.0
assert longi == 0.0
def test_midpoint_floating():
latlong1 = pd.Series([(-45.5, -100.5), (45.5, 100.5)])
latlong2 = pd.Series([(+45.5, +100.5), (-45.5, -100.5)])
function = GeoMidpoint().get_function()
answer = function(latlong1, latlong2)
for lat, longi in answer:
assert lat == 0.0
assert longi == 0.0
def test_midpoint_zeros():
latlong1 = pd.Series([(0, 0), (0, 0)])
latlong2 = pd.Series([(0, 0), (0, 0)])
function = GeoMidpoint().get_function()
answer = function(latlong1, latlong2)
for lat, longi in answer:
assert lat == 0.0
assert longi == 0.0
def test_midpoint_nan():
all_nan = pd.Series([(np.nan, np.nan), (np.nan, np.nan)])
latlong1 = pd.Series([(0, 0), (0, 0)])
function = GeoMidpoint().get_function()
answer = function(all_nan, latlong1)
for lat, longi in answer:
assert np.isnan(lat)
assert np.isnan(longi)
def test_isingeobox():
latlong = pd.Series(
[
(1, 2),
(5, 7),
(-5, 4),
(2, 3),
(0, 0),
(np.nan, np.nan),
(-2, np.nan),
(np.nan, 1),
],
)
bottomleft = (-5, -5)
topright = (5, 5)
primitive = IsInGeoBox(bottomleft, topright)
function = primitive.get_function()
primitive_answer = function(latlong)
answer = pd.Series([True, False, True, True, True, False, False, False])
assert np.array_equal(primitive_answer, answer)
def test_boston():
NYC = (40.7128, -74.0060)
SF = (37.7749, -122.4194)
Somerville = (42.3876, -71.0995)
Bejing = (39.9042, 116.4074)
CapeTown = (-33.9249, 18.4241)
latlong = pd.Series([NYC, SF, Somerville, Bejing, CapeTown])
LynnMA = (42.4668, -70.9495)
DedhamMA = (42.2436, -71.1677)
primitive = IsInGeoBox(LynnMA, DedhamMA)
function = primitive.get_function()
primitive_answer = function(latlong)
answer = pd.Series([False, False, True, False, False])
assert np.array_equal(primitive_answer, answer)
| 3,786 | 31.930435 | 82 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/transform_primitive_tests/test_cumulative_time_since.py | from datetime import datetime
import numpy as np
import pandas as pd
from featuretools.primitives import (
CumulativeTimeSinceLastFalse,
CumulativeTimeSinceLastTrue,
)
from featuretools.tests.primitive_tests.utils import (
PrimitiveTestBase,
find_applicable_primitives,
valid_dfs,
)
class TestCumulativeTimeSinceLastTrue(PrimitiveTestBase):
primitive = CumulativeTimeSinceLastTrue
booleans = pd.Series([False, True, False, True, False, False])
datetimes = pd.Series(
[datetime(2011, 4, 9, 10, 30, i * 6) for i in range(len(booleans))],
)
answer = pd.Series([np.nan, 0, 6, 0, 6, 12])
def test_regular(self):
primitive_func = self.primitive().get_function()
given_answer = primitive_func(self.datetimes, self.booleans)
assert given_answer.equals(self.answer)
def test_all_false(self):
primitive_func = self.primitive().get_function()
booleans = pd.Series([False, False, False])
datetimes = pd.Series(
[datetime(2011, 4, 9, 10, 30, i * 6) for i in range(len(booleans))],
)
given_answer = primitive_func(datetimes, booleans)
answer = pd.Series([np.nan] * 3)
assert given_answer.equals(answer)
def test_all_nan(self):
primitive_func = self.primitive().get_function()
datetimes = pd.Series([np.nan] * 4)
booleans = pd.Series([np.nan] * 4)
given_answer = primitive_func(datetimes, booleans)
answer = pd.Series([np.nan] * 4)
assert given_answer.equals(answer)
def test_some_nans(self):
primitive_func = self.primitive().get_function()
booleans = pd.Series(
[
False,
True,
False,
True,
False,
False,
True,
True,
False,
False,
],
)
datetimes = pd.Series([np.nan] * 2)
datetimes = pd.concat([datetimes, self.datetimes])
datetimes = pd.concat([datetimes, pd.Series([np.nan] * 2)])
datetimes = datetimes.reset_index(drop=True)
answer = pd.Series(
[
np.nan,
np.nan,
np.nan,
0,
6,
12,
0,
0,
np.nan,
np.nan,
],
)
given_answer = primitive_func(datetimes, booleans)
assert given_answer.equals(answer)
def test_with_featuretools(self, pd_es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
transform.append(primitive_instance)
valid_dfs(pd_es, aggregation, transform, self.primitive)
class TestCumulativeTimeSinceLastFalse(PrimitiveTestBase):
primitive = CumulativeTimeSinceLastFalse
booleans = pd.Series([True, False, True, False, True, True])
datetimes = pd.Series(
[datetime(2011, 4, 9, 10, 30, i * 6) for i in range(len(booleans))],
)
answer = pd.Series([np.nan, 0, 6, 0, 6, 12])
def test_regular(self):
primitive_func = self.primitive().get_function()
given_answer = primitive_func(self.datetimes, self.booleans)
assert given_answer.equals(self.answer)
def test_all_true(self):
primitive_func = self.primitive().get_function()
booleans = pd.Series([True, True, True])
datetimes = pd.Series(
[datetime(2011, 4, 9, 10, 30, i * 6) for i in range(len(booleans))],
)
given_answer = primitive_func(datetimes, booleans)
answer = pd.Series([np.nan] * 3)
assert given_answer.equals(answer)
def test_all_nan(self):
primitive_func = self.primitive().get_function()
datetimes = pd.Series([np.nan] * 4)
booleans = pd.Series([np.nan] * 4)
given_answer = primitive_func(datetimes, booleans)
answer = pd.Series([np.nan] * 4)
assert given_answer.equals(answer)
def test_some_nans(self):
primitive_func = self.primitive().get_function()
booleans = pd.Series(
[
True,
False,
True,
False,
True,
True,
False,
False,
True,
True,
],
)
datetimes = pd.Series([np.nan] * 2)
datetimes = pd.concat([datetimes, self.datetimes])
datetimes = pd.concat([datetimes, pd.Series([np.nan] * 2)])
datetimes = datetimes.reset_index(drop=True)
answer = pd.Series(
[
np.nan,
np.nan,
np.nan,
0,
6,
12,
0,
0,
np.nan,
np.nan,
],
)
given_answer = primitive_func(datetimes, booleans)
assert given_answer.equals(answer)
def test_with_featuretools(self, pd_es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
transform.append(primitive_instance)
valid_dfs(pd_es, aggregation, transform, self.primitive)
| 5,347 | 31.412121 | 80 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/transform_primitive_tests/test_percent_change.py | import numpy as np
import pandas as pd
from pytest import raises
from featuretools.primitives import PercentChange
from featuretools.tests.primitive_tests.utils import (
PrimitiveTestBase,
find_applicable_primitives,
valid_dfs,
)
class TestPercentChange(PrimitiveTestBase):
primitive = PercentChange
def test_regular(self):
data = pd.Series([2, 5, 15, 3, 3, 9, 4.5])
answer = pd.Series([np.nan, 1.5, 2.0, -0.8, 0, 2.0, -0.5])
primtive_func = self.primitive().get_function()
given_answer = primtive_func(data)
np.testing.assert_array_equal(given_answer, answer)
def test_raises(self):
with raises(ValueError):
self.primitive(fill_method="invalid")
def test_period(self):
data = pd.Series([2, 4, 8])
answer = pd.Series([np.nan, np.nan, 3])
primtive_func = self.primitive(periods=2).get_function()
given_answer = primtive_func(data)
np.testing.assert_array_equal(given_answer, answer)
primtive_func = self.primitive(periods=2).get_function()
data = pd.Series([2, 4, 8] + [np.nan] * 4)
primtive_func = self.primitive(limit=2).get_function()
answer = pd.Series([np.nan, 1, 1, 0, 0, np.nan, np.nan])
given_answer = primtive_func(data)
np.testing.assert_array_equal(given_answer, answer)
def test_nan(self):
data = pd.Series([np.nan, 5, 10, 20, np.nan, 10, np.nan])
answer = pd.Series([np.nan, np.nan, 1, 1, 0, -0.5, 0])
primtive_func = self.primitive().get_function()
given_answer = primtive_func(data)
np.testing.assert_array_equal(given_answer, answer)
def test_zero(self):
data = pd.Series([2, 0, 0, 5, 0, -4])
answer = pd.Series([np.nan, -1, np.nan, np.inf, -1, np.NINF])
primtive_func = self.primitive().get_function()
given_answer = primtive_func(data)
np.testing.assert_array_equal(given_answer, answer)
def test_inf(self):
data = pd.Series([0, np.inf, 0, 5, np.NINF, np.inf, np.NINF])
answer = pd.Series([np.nan, np.inf, -1, np.inf, np.NINF, np.nan, np.nan])
primtive_func = self.primitive().get_function()
given_answer = primtive_func(data)
np.testing.assert_array_equal(given_answer, answer)
def test_freq(self):
dates = pd.DatetimeIndex(
["2018-01-01", "2018-01-02", "2018-01-03", "2018-01-05"],
)
data = pd.Series([1, 2, 3, 4], index=dates)
answer = pd.Series([np.nan, 1.0, 0.5, np.nan])
date_offset = pd.tseries.offsets.DateOffset(days=1)
primtive_func = self.primitive(freq=date_offset).get_function()
given_answer = primtive_func(data)
np.testing.assert_array_equal(given_answer, answer)
def test_with_featuretools(self, pd_es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instantiate = self.primitive
transform.append(primitive_instantiate)
valid_dfs(pd_es, aggregation, transform, self.primitive)
| 3,077 | 38.974026 | 81 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/transform_primitive_tests/test_distancetoholiday_primitive.py | from datetime import datetime
import holidays
import numpy as np
import pandas as pd
import pytest
from packaging.version import parse
from featuretools.primitives import DistanceToHoliday
def test_distanceholiday():
distance_to_holiday = DistanceToHoliday("New Year's Day")
dates = pd.Series(
[
datetime(2010, 1, 1),
datetime(2012, 5, 31),
datetime(2017, 7, 31),
datetime(2020, 12, 31),
],
)
expected = [0, -151, 154, 1]
output = distance_to_holiday(dates).tolist()
np.testing.assert_array_equal(output, expected)
def test_holiday_out_of_range():
date_to_holiday = DistanceToHoliday("Boxing Day", country="Canada")
array = pd.Series(
[
datetime(2010, 1, 1),
datetime(2012, 5, 31),
datetime(2017, 7, 31),
datetime(2020, 12, 31),
],
)
days_to_boxing_day = -157 if parse(holidays.__version__) >= parse("0.15.0") else 209
edge_case_first_day_of_year = (
-6 if parse(holidays.__version__) >= parse("0.17.0") else np.nan
)
edge_case_last_day_of_year = (
-5 if parse(holidays.__version__) >= parse("0.17.0") else np.nan
)
answer = pd.Series(
[
edge_case_first_day_of_year,
days_to_boxing_day,
148,
edge_case_last_day_of_year,
],
)
pd.testing.assert_series_equal(date_to_holiday(array), answer, check_names=False)
def test_unknown_country_error():
error_text = r"must be one of the available countries.*"
with pytest.raises(ValueError, match=error_text):
DistanceToHoliday("Victoria Day", country="UNK")
def test_unknown_holiday_error():
error_text = r"must be one of the available holidays.*"
with pytest.raises(ValueError, match=error_text):
DistanceToHoliday("Alteryx Day")
def test_nat():
date_to_holiday = DistanceToHoliday("New Year's Day")
case = pd.Series(
[
"2010-01-01",
"NaT",
"2012-05-31",
"NaT",
],
).astype("datetime64[ns]")
answer = [0, np.nan, -151, np.nan]
given_answer = date_to_holiday(case).astype("float")
np.testing.assert_array_equal(given_answer, answer)
def test_valid_country():
distance_to_holiday = DistanceToHoliday("Victoria Day", country="Canada")
case = pd.Series(
[
"2010-01-01",
"2012-05-31",
"2017-07-31",
"2020-12-31",
],
).astype("datetime64[ns]")
answer = [143, -10, -70, 144]
given_answer = distance_to_holiday(case).astype("float")
np.testing.assert_array_equal(given_answer, answer)
def test_with_timezone_aware_datetimes():
df = pd.DataFrame(
{
"non_timezone_aware_with_time": pd.date_range(
"2018-07-03 09:00",
periods=3,
),
"non_timezone_aware_no_time": pd.date_range("2018-07-03", periods=3),
"timezone_aware_with_time": pd.date_range(
"2018-07-03 09:00",
periods=3,
).tz_localize(tz="US/Eastern"),
"timezone_aware_no_time": pd.date_range(
"2018-07-03",
periods=3,
).tz_localize(tz="US/Eastern"),
},
)
distance_to_holiday = DistanceToHoliday("Independence Day", country="US")
expected = [1, 0, -1]
for col in df.columns:
actual = distance_to_holiday(df[col])
np.testing.assert_array_equal(actual, expected)
| 3,580 | 28.113821 | 88 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/transform_primitive_tests/test_postal_primitives.py | import pandas as pd
from featuretools.primitives.standard.transform.postal import (
OneDigitPostalCode,
TwoDigitPostalCode,
)
from featuretools.tests.testing_utils.es_utils import to_pandas
def test_one_digit_postal_code(postal_code_dataframe):
primitive = OneDigitPostalCode().get_function()
for x in postal_code_dataframe:
series = postal_code_dataframe[x]
actual = to_pandas(primitive(series))
expected = to_pandas(
series.apply(lambda t: str(t)[0] if pd.notna(t) else pd.NA),
)
pd.testing.assert_series_equal(actual, expected)
def test_two_digit_postal_code(postal_code_dataframe):
primitive = TwoDigitPostalCode().get_function()
for x in postal_code_dataframe:
series = postal_code_dataframe[x]
actual = to_pandas(primitive(series))
expected = to_pandas(
series.apply(lambda t: str(t)[:2] if pd.notna(t) else pd.NA),
)
pd.testing.assert_series_equal(actual, expected)
| 1,006 | 32.566667 | 73 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/transform_primitive_tests/test_expanding_primitives.py | import numpy as np
import pandas as pd
import pytest
from featuretools.primitives.standard.transform.time_series.expanding import (
ExpandingCount,
ExpandingMax,
ExpandingMean,
ExpandingMin,
ExpandingSTD,
ExpandingTrend,
)
from featuretools.primitives.standard.transform.time_series.utils import (
_apply_gap_for_expanding_primitives,
)
from featuretools.utils import calculate_trend
@pytest.mark.parametrize(
"min_periods, gap",
[
(5, 2),
(5, 0),
(0, 0),
],
)
def test_expanding_count_series(window_series_pd, min_periods, gap):
test = window_series_pd.shift(gap)
expected = test.expanding(min_periods=min_periods).count()
num_nans = gap + min_periods - 1
expected[range(num_nans)] = np.nan
primitive_instance = ExpandingCount(min_periods=min_periods, gap=gap).get_function()
actual = primitive_instance(window_series_pd.index)
pd.testing.assert_series_equal(pd.Series(actual), expected)
@pytest.mark.parametrize(
"min_periods, gap",
[
(5, 2),
(5, 0),
(0, 0),
(0, 1),
],
)
def test_expanding_count_date_range(window_date_range_pd, min_periods, gap):
test = _apply_gap_for_expanding_primitives(gap=gap, x=window_date_range_pd)
expected = test.expanding(min_periods=min_periods).count()
num_nans = gap + min_periods - 1
expected[range(num_nans)] = np.nan
primitive_instance = ExpandingCount(min_periods=min_periods, gap=gap).get_function()
actual = primitive_instance(window_date_range_pd)
pd.testing.assert_series_equal(pd.Series(actual), expected)
@pytest.mark.parametrize(
"min_periods, gap",
[
(5, 2),
(5, 0),
(0, 0),
(0, 1),
],
)
def test_expanding_min(window_series_pd, min_periods, gap):
test = window_series_pd.shift(gap)
expected = test.expanding(min_periods=min_periods).min().values
primitive_instance = ExpandingMin(min_periods=min_periods, gap=gap).get_function()
actual = primitive_instance(
numeric=window_series_pd,
datetime=window_series_pd.index,
)
pd.testing.assert_series_equal(pd.Series(actual), pd.Series(expected))
@pytest.mark.parametrize(
"min_periods, gap",
[
(5, 2),
(5, 0),
(0, 0),
(0, 1),
],
)
def test_expanding_max(window_series_pd, min_periods, gap):
test = window_series_pd.shift(gap)
expected = test.expanding(min_periods=min_periods).max().values
primitive_instance = ExpandingMax(min_periods=min_periods, gap=gap).get_function()
actual = primitive_instance(
numeric=window_series_pd,
datetime=window_series_pd.index,
)
pd.testing.assert_series_equal(pd.Series(actual), pd.Series(expected))
@pytest.mark.parametrize(
"min_periods, gap",
[
(5, 2),
(5, 0),
(0, 0),
(0, 1),
],
)
def test_expanding_std(window_series_pd, min_periods, gap):
test = window_series_pd.shift(gap)
expected = test.expanding(min_periods=min_periods).std().values
primitive_instance = ExpandingSTD(min_periods=min_periods, gap=gap).get_function()
actual = primitive_instance(
numeric=window_series_pd,
datetime=window_series_pd.index,
)
pd.testing.assert_series_equal(pd.Series(actual), pd.Series(expected))
@pytest.mark.parametrize(
"min_periods, gap",
[
(5, 2),
(5, 0),
(0, 0),
(0, 1),
],
)
def test_expanding_mean(window_series_pd, min_periods, gap):
test = window_series_pd.shift(gap)
expected = test.expanding(min_periods=min_periods).mean().values
primitive_instance = ExpandingMean(min_periods=min_periods, gap=gap).get_function()
actual = primitive_instance(
numeric=window_series_pd,
datetime=window_series_pd.index,
)
pd.testing.assert_series_equal(pd.Series(actual), pd.Series(expected))
@pytest.mark.parametrize(
"min_periods, gap",
[
(5, 2),
(5, 0),
(0, 0),
(0, 1),
],
)
def test_expanding_trend(window_series_pd, min_periods, gap):
test = window_series_pd.shift(gap)
expected = test.expanding(min_periods=min_periods).aggregate(calculate_trend).values
primitive_instance = ExpandingTrend(min_periods=min_periods, gap=gap).get_function()
actual = primitive_instance(
numeric=window_series_pd,
datetime=window_series_pd.index,
)
pd.testing.assert_series_equal(pd.Series(actual), pd.Series(expected))
@pytest.mark.parametrize(
"primitive",
[
ExpandingMax,
ExpandingMean,
ExpandingMin,
ExpandingSTD,
ExpandingTrend,
],
)
def test_expanding_primitives_throw_error_when_given_string_offset(
window_series_pd,
primitive,
):
error_msg = (
"String offsets are not supported for the gap parameter in Expanding primitives"
)
with pytest.raises(TypeError, match=error_msg):
primitive(gap="2H").get_function()(
numeric=window_series_pd,
datetime=window_series_pd.index,
)
def test_apply_gap_for_expanding_primitives_throws_error_when_given_string_offset(
window_series_pd,
):
error_msg = (
"String offsets are not supported for the gap parameter in Expanding primitives"
)
with pytest.raises(TypeError, match=error_msg):
_apply_gap_for_expanding_primitives(window_series_pd, gap="2H")
@pytest.mark.parametrize(
"gap",
[
2,
5,
3,
0,
],
)
def test_apply_gap_for_expanding_primitives(window_series_pd, gap):
actual = _apply_gap_for_expanding_primitives(window_series_pd, gap).values
expected = window_series_pd.shift(gap).values
pd.testing.assert_series_equal(pd.Series(actual), pd.Series(expected))
@pytest.mark.parametrize(
"gap",
[
2,
5,
3,
0,
],
)
def test_apply_gap_for_expanding_primitives_handles_date_range(
window_date_range_pd,
gap,
):
actual = pd.Series(
_apply_gap_for_expanding_primitives(window_date_range_pd, gap).values,
)
expected = pd.Series(window_date_range_pd.to_series().shift(gap).values)
pd.testing.assert_series_equal(actual, expected)
| 6,255 | 27.053812 | 88 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/transform_primitive_tests/test_full_name_primitives.py | import numpy as np
import pandas as pd
from featuretools.primitives import (
FullNameToFirstName,
FullNameToLastName,
FullNameToTitle,
)
from featuretools.tests.primitive_tests.utils import (
PrimitiveTestBase,
find_applicable_primitives,
valid_dfs,
)
class TestFullNameToFirstName(PrimitiveTestBase):
primitive = FullNameToFirstName
def test_urls(self):
# note this implementation incorrectly identifies the first
# name for 'Oliva y Ocana, Dona. Fermina'
primitive_func = self.primitive().get_function()
names = pd.Series(
[
"Spector, Mr. Woolf",
"Oliva y Ocana, Dona. Fermina",
"Saether, Mr. Simon Sivertsen",
"Ware, Mr. Frederick",
"Peter, Master. Michael J",
],
)
answer = pd.Series(["Woolf", "Oliva", "Simon", "Frederick", "Michael"])
pd.testing.assert_series_equal(primitive_func(names), answer, check_names=False)
def test_no_title(self):
primitive_func = self.primitive().get_function()
names = pd.Series(
[
"Peter, Michael J",
"James Masters",
"Kate Elizabeth Brown-Jones",
],
)
answer = pd.Series(["Michael", "James", "Kate"], dtype=object)
pd.testing.assert_series_equal(primitive_func(names), answer, check_names=False)
def test_empty_string(self):
primitive_func = self.primitive().get_function()
names = pd.Series(
[
"Peter, Michael J",
"",
"Kate Elizabeth Brown-Jones",
],
)
answer = pd.Series(["Michael", np.nan, "Kate"], dtype=object)
pd.testing.assert_series_equal(primitive_func(names), answer, check_names=False)
def test_single_name(self):
primitive_func = self.primitive().get_function()
names = pd.Series(
[
"Peter, Michael J",
"James",
"Kate Elizabeth Brown-Jones",
],
)
answer = pd.Series(["Michael", "James", "Kate"], dtype=object)
pd.testing.assert_series_equal(primitive_func(names), answer, check_names=False)
def test_nan(self):
primitive_func = self.primitive().get_function()
names = pd.Series(["Mr. James Brown", np.nan, None])
answer = pd.Series(["James", np.nan, np.nan])
pd.testing.assert_series_equal(primitive_func(names), answer, check_names=False)
def test_with_featuretools(self, pd_es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
transform.append(primitive_instance)
valid_dfs(pd_es, aggregation, transform, self.primitive)
class TestFullNameToLastName(PrimitiveTestBase):
primitive = FullNameToLastName
def test_urls(self):
primitive_func = self.primitive().get_function()
names = pd.Series(
[
"Spector, Mr. Woolf",
"Oliva y Ocana, Dona. Fermina",
"Saether, Mr. Simon Sivertsen",
"Ware, Mr. Frederick",
"Peter, Master. Michael J",
],
)
answer = pd.Series(["Spector", "Oliva y Ocana", "Saether", "Ware", "Peter"])
pd.testing.assert_series_equal(primitive_func(names), answer, check_names=False)
def test_no_title(self):
primitive_func = self.primitive().get_function()
names = pd.Series(
[
"Peter, Michael J",
"James Masters",
"Kate Elizabeth Brown-Jones",
],
)
answer = pd.Series(["Peter", "Masters", "Brown-Jones"], dtype=object)
pd.testing.assert_series_equal(primitive_func(names), answer, check_names=False)
def test_empty_string(self):
primitive_func = self.primitive().get_function()
names = pd.Series(
[
"Peter, Michael J",
"",
"Kate Elizabeth Brown-Jones",
],
)
answer = pd.Series(["Peter", np.nan, "Brown-Jones"], dtype=object)
pd.testing.assert_series_equal(primitive_func(names), answer, check_names=False)
def test_single_name(self):
primitive_func = self.primitive().get_function()
names = pd.Series(
[
"Peter, Michael J",
"James",
"Kate Elizabeth Brown-Jones",
],
)
answer = pd.Series(["Peter", np.nan, "Brown-Jones"], dtype=object)
pd.testing.assert_series_equal(primitive_func(names), answer, check_names=False)
def test_nan(self):
primitive_func = self.primitive().get_function()
names = pd.Series(["Mr. James Brown", np.nan, None])
answer = pd.Series(["Brown", np.nan, np.nan])
pd.testing.assert_series_equal(primitive_func(names), answer, check_names=False)
def test_with_featuretools(self, pd_es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
transform.append(primitive_instance)
valid_dfs(pd_es, aggregation, transform, self.primitive)
class TestFullNameToTitle(PrimitiveTestBase):
primitive = FullNameToTitle
def test_urls(self):
primitive_func = self.primitive().get_function()
names = pd.Series(
[
"Spector, Mr. Woolf",
"Oliva y Ocana, Dona. Fermina",
"Saether, Mr. Simon Sivertsen",
"Ware, Mr. Frederick",
"Peter, Master. Michael J",
"Mr. Brown",
],
)
answer = pd.Series(["Mr", "Dona", "Mr", "Mr", "Master", "Mr"])
pd.testing.assert_series_equal(primitive_func(names), answer, check_names=False)
def test_no_title(self):
primitive_func = self.primitive().get_function()
names = pd.Series(
[
"Peter, Michael J",
"James Master.",
"Mrs Brown",
"",
],
)
answer = pd.Series([np.nan, np.nan, np.nan, np.nan], dtype=object)
pd.testing.assert_series_equal(primitive_func(names), answer, check_names=False)
def test_nan(self):
primitive_func = self.primitive().get_function()
names = pd.Series(["Mr. Brown", np.nan, None])
answer = pd.Series(["Mr", np.nan, np.nan])
pd.testing.assert_series_equal(primitive_func(names), answer, check_names=False)
def test_with_featuretools(self, pd_es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
transform.append(primitive_instance)
valid_dfs(pd_es, aggregation, transform, self.primitive)
| 6,951 | 35.208333 | 88 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/transform_primitive_tests/test_datetoholiday_primitive.py | from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from featuretools.primitives import DateToHoliday
def test_datetoholiday():
date_to_holiday = DateToHoliday()
dates = pd.Series(
[
datetime(2016, 1, 1),
datetime(2016, 2, 27),
datetime(2017, 5, 29, 10, 30, 5),
datetime(2018, 7, 4),
],
)
holiday_series = date_to_holiday(dates).tolist()
assert holiday_series[0] == "New Year's Day"
assert np.isnan(holiday_series[1])
assert holiday_series[2] == "Memorial Day"
assert holiday_series[3] == "Independence Day"
def test_datetoholiday_error():
error_text = r"must be one of the available countries.*"
with pytest.raises(ValueError, match=error_text):
DateToHoliday(country="UNK")
def test_nat():
date_to_holiday = DateToHoliday()
case = pd.Series(
[
"2019-10-14",
"NaT",
"2016-02-15",
"NaT",
],
).astype("datetime64[ns]")
answer = ["Columbus Day", np.nan, "Washington's Birthday", np.nan]
given_answer = date_to_holiday(case).astype("str")
np.testing.assert_array_equal(given_answer, answer)
def test_valid_country():
date_to_holiday = DateToHoliday(country="Canada")
case = pd.Series(
[
"2016-07-01",
"2016-11-11",
"2017-12-26",
"2018-09-03",
],
).astype("datetime64[ns]")
answer = ["Canada Day", np.nan, "Boxing Day", "Labour Day"]
given_answer = date_to_holiday(case).astype("str")
np.testing.assert_array_equal(given_answer, answer)
def test_multiple_countries():
dth_mexico = DateToHoliday(country="Mexico")
case = pd.Series([datetime(2000, 9, 16), datetime(2005, 1, 1)])
assert len(dth_mexico(case)) > 1
dth_india = DateToHoliday(country="IND")
case = pd.Series([datetime(2048, 1, 1), datetime(2048, 10, 2)])
assert len(dth_india(case)) > 1
dth_uk = DateToHoliday(country="UK")
case = pd.Series([datetime(2048, 3, 17), datetime(2048, 4, 6)])
assert len(dth_uk(case)) > 1
countries = [
"Argentina",
"AU",
"Austria",
"BY",
"Belgium",
"Brazil",
"Canada",
"Colombia",
"Croatia",
"England",
"Finland",
"FRA",
"Germany",
"Germany",
"Italy",
"NewZealand",
"PortugalExt",
"PTE",
"Spain",
"ES",
"Switzerland",
"UnitedStates",
"US",
"UK",
"UA",
"CH",
"SE",
"ZA",
]
for x in countries:
DateToHoliday(country=x)
def test_with_timezone_aware_datetimes():
df = pd.DataFrame(
{
"non_timezone_aware_with_time": pd.date_range(
"2018-07-03 09:00",
periods=3,
),
"non_timezone_aware_no_time": pd.date_range("2018-07-03", periods=3),
"timezone_aware_with_time": pd.date_range(
"2018-07-03 09:00",
periods=3,
).tz_localize(tz="US/Eastern"),
"timezone_aware_no_time": pd.date_range(
"2018-07-03",
periods=3,
).tz_localize(tz="US/Eastern"),
},
)
date_to_holiday = DateToHoliday(country="US")
expected = [np.nan, "Independence Day", np.nan]
for col in df.columns:
actual = date_to_holiday(df[col]).astype("str")
np.testing.assert_array_equal(actual, expected)
| 3,591 | 25.028986 | 81 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/transform_primitive_tests/test_savgol_filter.py | from math import floor
import numpy as np
import pandas as pd
from pytest import raises
from featuretools.primitives import SavgolFilter
from featuretools.tests.primitive_tests.utils import (
PrimitiveTestBase,
find_applicable_primitives,
valid_dfs,
)
class TestSavgolFilter(PrimitiveTestBase):
primitive = SavgolFilter
data = pd.Series(
[
0,
1,
1,
2,
3,
4,
5,
7,
8,
7,
9,
9,
12,
11,
12,
14,
15,
17,
17,
17,
20,
21,
20,
20,
22,
21,
25,
25,
26,
29,
30,
30,
28,
26,
34,
35,
33,
31,
38,
34,
39,
37,
42,
35,
36,
44,
46,
43,
39,
39,
44,
49,
45,
44,
44,
52,
50,
47,
58,
59,
60,
55,
57,
63,
61,
65,
66,
57,
65,
61,
60,
71,
64,
62,
70,
65,
67,
77,
68,
75,
72,
69,
82,
66,
84,
80,
76,
87,
77,
73,
90,
91,
92,
93,
78,
76,
82,
96,
91,
94,
],
)
expected_output = pd.Series(
[
-0.24600037643516087,
0.6354225484660259,
1.518717742974036,
2.405318302343475,
3.296657321828948,
4.1941678966850615,
5.099283122166421,
6.0134360935276305,
6.938059906023296,
7.874587654908025,
8.824452435436303,
9.786858450473883,
10.923177508989724,
12.025171624713803,
13.009153318077633,
14.08041843739766,
14.900621118012227,
15.796338672768673,
16.77084014383764,
17.662961752206375,
18.472703497874882,
19.451454723765682,
20.530565544295253,
21.849950964367157,
22.478260869564927,
23.15233736515171,
24.12356979405003,
25.23962079110788,
26.000980712650854,
27.082379862699877,
27.787839163124843,
28.879045439685797,
29.762994442627924,
31.067342268714864,
32.11147433801854,
32.666557698593884,
33.06864988558309,
34.00098071265075,
35.134030728995945,
36.15135665250035,
36.945733899966825,
37.56227525335028,
38.55769859431137,
39.3975155279498,
39.87054593004198,
40.304347826086435,
41.11670480549146,
42.00948022229432,
41.982674076495044,
42.62798300098016,
43.15887544949274,
44.53481529911678,
45.680614579927486,
46.93886891140834,
47.98300098071202,
48.80549199084604,
50.28244524354299,
52.66851912389601,
54.28604118993064,
55.81529911735788,
57.10297482837455,
57.82641386073805,
59.45276234063342,
60.77280156913945,
61.23667865315383,
61.81660673422607,
62.60281137626594,
62.54004576658957,
62.78653154625613,
63.23046747302958,
64.09087937234307,
65.25661981039471,
65.19385420071833,
66.34161490683144,
66.65021248774022,
67.38280483818154,
68.8126838836212,
69.79470415168265,
70.943772474664,
72.74076495586698,
73.04020921869797,
73.3586139261187,
74.67734553775647,
75.71559333115299,
77.51814318404607,
79.62471395880902,
80.60150375939745,
80.61163779012645,
81.89342922523593,
82.41124550506593,
83.19293292519846,
83.97174920172642,
84.7620599588564,
85.57823082079385,
86.4346274117442,
87.34561535591293,
88.32556027750543,
89.38882780072717,
90.54978354978357,
91.82279314888011,
],
)
def test_error(self):
window_length = 1
polyorder = 3
mode = "incorrect"
error_text = "polyorder must be less than window_length."
with raises(ValueError, match=error_text):
self.primitive(window_length, polyorder)
error_text = (
"Both window_length and polyorder must be defined if you define one."
)
with raises(ValueError, match=error_text):
self.primitive(window_length=window_length)
with raises(ValueError, match=error_text):
self.primitive(polyorder=polyorder)
error_text = "mode must be 'mirror', 'constant', 'nearest', 'wrap' or 'interp'."
with raises(ValueError, match=error_text):
self.primitive(
window_length=window_length,
polyorder=polyorder,
mode=mode,
)
def test_less_window_size(self):
primitive_func = self.primitive().get_function()
for i in range(20):
data = pd.Series(list(range(i)), dtype="float64")
assert data.equals(primitive_func(data))
def test_regular(self):
window_length = floor(len(self.data) / 10) * 2 + 1
polyorder = 3
primitive_func = self.primitive(window_length, polyorder).get_function()
output = list(primitive_func(self.data))
for a, b in zip(self.expected_output, output):
assert np.isclose(a, b)
def test_nans(self):
primitive_func = self.primitive().get_function()
data_nans = self.data.copy()
data_nans = pd.concat([data_nans, pd.Series([np.nan] * 5, dtype="float64")])
# more than 5 nans due to window
assert sum(np.isnan(primitive_func(data_nans))) == 15
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instantiate = self.primitive()
transform.append(primitive_instantiate)
valid_dfs(es, aggregation, transform, self.primitive)
| 7,200 | 25.09058 | 88 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/transform_primitive_tests/test_exponential_primitives.py | import numpy as np
import pandas as pd
from featuretools.primitives import (
ExponentialWeightedAverage,
ExponentialWeightedSTD,
ExponentialWeightedVariance,
)
def test_regular_com_avg():
primitive_instance = ExponentialWeightedAverage(com=0.5)
primitive_func = primitive_instance.get_function()
array = pd.Series([1, 2, 7, 5])
answer = pd.Series(primitive_func(array))
correct_answer = pd.Series([1.0, 1.75, 5.384615384615384, 5.125])
pd.testing.assert_series_equal(answer, correct_answer)
def test_regular_span_avg():
primitive_instance = ExponentialWeightedAverage(span=1.5)
primitive_func = primitive_instance.get_function()
array = pd.Series([1, 2, 7, 5])
answer = pd.Series(primitive_func(array))
correct_answer = pd.Series([1.0, 1.8333333333333335, 6.0, 5.198717948717948])
pd.testing.assert_series_equal(answer, correct_answer)
def test_regular_halflife_avg():
primitive_instance = ExponentialWeightedAverage(halflife=2.7)
primitive_func = primitive_instance.get_function()
array = pd.Series([1, 2, 7, 5])
answer = pd.Series(primitive_func(array))
correct_answer = pd.Series(
[1.0, 1.563830114594977, 3.8556233149044865, 4.2592901785684205],
)
pd.testing.assert_series_equal(answer, correct_answer)
def test_regular_alpha_avg():
primitive_instance = ExponentialWeightedAverage(alpha=0.8)
primitive_func = primitive_instance.get_function()
array = pd.Series([1, 2, 7, 5])
answer = pd.Series(primitive_func(array))
correct_answer = pd.Series([1.0, 1.8333333333333335, 6.0, 5.198717948717948])
pd.testing.assert_series_equal(answer, correct_answer)
def test_na_avg():
primitive_instance = ExponentialWeightedAverage(com=0.5)
primitive_func = primitive_instance.get_function()
array = pd.Series([1, 2, 7, np.nan, 5])
answer = pd.Series(primitive_func(array))
correct_answer = pd.Series(
[1.0, 1.75, 5.384615384615384, 5.384615384615384, 5.053191489361702],
)
pd.testing.assert_series_equal(answer, correct_answer)
def test_ignorena_true_avg():
primitive_instance = ExponentialWeightedAverage(com=0.5, ignore_na=True)
primitive_func = primitive_instance.get_function()
array = pd.Series([1, 2, 7, np.nan, 5])
answer = pd.Series(primitive_func(array))
correct_answer = pd.Series(
[1.0, 1.75, 5.384615384615384, 5.384615384615384, 5.125],
)
pd.testing.assert_series_equal(answer, correct_answer)
def test_regular_com_std():
primitive_instance = ExponentialWeightedSTD(com=0.5)
primitive_func = primitive_instance.get_function()
array = pd.Series([1, 2, 7, 5])
answer = pd.Series(primitive_func(array))
correct_answer = pd.Series(
[np.nan, 0.7071067811865475, 3.584153156068229, 2.0048019276803304],
)
pd.testing.assert_series_equal(answer, correct_answer)
def test_regular_span_std():
primitive_instance = ExponentialWeightedSTD(span=1.5)
primitive_func = primitive_instance.get_function()
array = pd.Series([1, 2, 7, 5])
answer = pd.Series(primitive_func(array))
correct_answer = pd.Series(
[np.nan, 0.7071067811865476, 3.6055512754639887, 1.7311551816712718],
)
pd.testing.assert_series_equal(answer, correct_answer)
def test_regular_halflife_std():
primitive_instance = ExponentialWeightedSTD(halflife=2.7)
primitive_func = primitive_instance.get_function()
array = pd.Series([1, 2, 7, 5])
answer = pd.Series(primitive_func(array))
correct_answer = pd.Series(
[np.nan, 0.7071067811865475, 3.3565236098585416, 2.631776826295855],
)
pd.testing.assert_series_equal(answer, correct_answer)
def test_regular_alpha_std():
primitive_instance = ExponentialWeightedSTD(alpha=0.8)
primitive_func = primitive_instance.get_function()
array = pd.Series([1, 2, 7, 5])
answer = pd.Series(primitive_func(array))
correct_answer = pd.Series(
[np.nan, 0.7071067811865476, 3.6055512754639887, 1.7311551816712718],
)
pd.testing.assert_series_equal(answer, correct_answer)
def test_na_std():
primitive_instance = ExponentialWeightedSTD(com=0.5)
primitive_func = primitive_instance.get_function()
array = pd.Series([1, 2, 7, np.nan, 5])
answer = pd.Series(primitive_func(array))
correct_answer = pd.Series(
[
np.nan,
0.7071067811865475,
3.584153156068229,
3.5841531560682287,
1.8408520483016189,
],
)
pd.testing.assert_series_equal(answer, correct_answer)
def test_ignorena_true_std():
primitive_instance = ExponentialWeightedSTD(com=0.5, ignore_na=True)
primitive_func = primitive_instance.get_function()
array = pd.Series([1, 2, 7, np.nan, 5])
answer = pd.Series(primitive_func(array))
correct_answer = pd.Series(
[
np.nan,
0.7071067811865475,
3.584153156068229,
3.584153156068229,
2.0048019276803304,
],
)
pd.testing.assert_series_equal(answer, correct_answer)
def test_regular_com_var():
primitive_instance = ExponentialWeightedVariance(com=0.5)
primitive_func = primitive_instance.get_function()
array = pd.Series([1, 2, 7, 5])
answer = pd.Series(primitive_func(array))
correct_answer = pd.Series(
[np.nan, 0.49999999999999983, 12.846153846153847, 4.019230769230769],
)
pd.testing.assert_series_equal(answer, correct_answer)
def test_regular_span_var():
primitive_instance = ExponentialWeightedVariance(span=1.5)
primitive_func = primitive_instance.get_function()
array = pd.Series([1, 2, 7, 5])
answer = pd.Series(primitive_func(array))
correct_answer = pd.Series([np.nan, 0.5, 12.999999999999996, 2.996898263027294])
pd.testing.assert_series_equal(answer, correct_answer)
def test_regular_halflife_var():
primitive_instance = ExponentialWeightedVariance(halflife=2.7)
primitive_func = primitive_instance.get_function()
array = pd.Series([1, 2, 7, 5])
answer = pd.Series(primitive_func(array))
correct_answer = pd.Series(
[np.nan, 0.49999999999999994, 11.266250743537816, 6.926249263427883],
)
pd.testing.assert_series_equal(answer, correct_answer)
def test_regular_alpha_var():
primitive_instance = ExponentialWeightedVariance(alpha=0.8)
primitive_func = primitive_instance.get_function()
array = pd.Series([1, 2, 7, 5])
answer = pd.Series(primitive_func(array))
correct_answer = pd.Series([np.nan, 0.5, 12.999999999999996, 2.996898263027294])
pd.testing.assert_series_equal(answer, correct_answer)
def test_na_var():
primitive_instance = ExponentialWeightedVariance(com=0.5)
primitive_func = primitive_instance.get_function()
array = pd.Series([1, 2, 7, np.nan, 5])
answer = pd.Series(primitive_func(array))
correct_answer = pd.Series(
[
np.nan,
0.49999999999999983,
12.846153846153847,
12.846153846153843,
3.3887362637362655,
],
)
pd.testing.assert_series_equal(answer, correct_answer)
def test_ignorena_true_var():
primitive_instance = ExponentialWeightedVariance(com=0.5, ignore_na=True)
primitive_func = primitive_instance.get_function()
array = pd.Series([1, 2, 7, np.nan, 5])
answer = pd.Series(primitive_func(array))
correct_answer = pd.Series(
[
np.nan,
0.49999999999999983,
12.846153846153847,
12.846153846153847,
4.019230769230769,
],
)
pd.testing.assert_series_equal(answer, correct_answer)
| 7,700 | 33.846154 | 84 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/transform_primitive_tests/test_transform_primitive.py | from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from pytz import timezone
from featuretools.primitives import (
Age,
DateToTimeZone,
DayOfYear,
DaysInMonth,
EmailAddressToDomain,
FileExtension,
IsFirstWeekOfMonth,
IsFreeEmailDomain,
IsLeapYear,
IsLunchTime,
IsMonthEnd,
IsMonthStart,
IsQuarterEnd,
IsQuarterStart,
IsWorkingHours,
IsYearEnd,
IsYearStart,
Lag,
NthWeekOfMonth,
NumericLag,
PartOfDay,
Quarter,
RateOfChange,
TimeSince,
URLToDomain,
URLToProtocol,
URLToTLD,
Week,
get_transform_primitives,
)
from featuretools.tests.primitive_tests.utils import (
PrimitiveTestBase,
find_applicable_primitives,
valid_dfs,
)
def test_time_since():
time_since = TimeSince()
# class datetime.datetime(year, month, day[, hour[, minute[, second[, microsecond[,
times = pd.Series(
[
datetime(2019, 3, 1, 0, 0, 0, 1),
datetime(2019, 3, 1, 0, 0, 1, 0),
datetime(2019, 3, 1, 0, 2, 0, 0),
],
)
cutoff_time = datetime(2019, 3, 1, 0, 0, 0, 0)
values = time_since(array=times, time=cutoff_time)
assert list(map(int, values)) == [0, -1, -120]
time_since = TimeSince(unit="nanoseconds")
values = time_since(array=times, time=cutoff_time)
assert list(map(round, values)) == [-1000, -1000000000, -120000000000]
time_since = TimeSince(unit="milliseconds")
values = time_since(array=times, time=cutoff_time)
assert list(map(int, values)) == [0, -1000, -120000]
time_since = TimeSince(unit="Milliseconds")
values = time_since(array=times, time=cutoff_time)
assert list(map(int, values)) == [0, -1000, -120000]
time_since = TimeSince(unit="Years")
values = time_since(array=times, time=cutoff_time)
assert list(map(int, values)) == [0, 0, 0]
times_y = pd.Series(
[
datetime(2019, 3, 1, 0, 0, 0, 1),
datetime(2020, 3, 1, 0, 0, 1, 0),
datetime(2017, 3, 1, 0, 0, 0, 0),
],
)
time_since = TimeSince(unit="Years")
values = time_since(array=times_y, time=cutoff_time)
assert list(map(int, values)) == [0, -1, 1]
error_text = "Invalid unit given, make sure it is plural"
with pytest.raises(ValueError, match=error_text):
time_since = TimeSince(unit="na")
time_since(array=times, time=cutoff_time)
def test_age():
age = Age()
dates = pd.Series(datetime(2010, 2, 26))
ages = age(dates, time=datetime(2020, 2, 26))
correct_ages = [10.005] # .005 added due to leap years
np.testing.assert_array_almost_equal(ages, correct_ages, decimal=3)
def test_age_two_years_quarterly():
age = Age()
dates = pd.Series(pd.date_range("2010-01-01", "2011-12-31", freq="Q"))
ages = age(dates, time=datetime(2020, 2, 26))
correct_ages = [9.915, 9.666, 9.414, 9.162, 8.915, 8.666, 8.414, 8.162]
np.testing.assert_array_almost_equal(ages, correct_ages, decimal=3)
def test_age_leap_year():
age = Age()
dates = pd.Series([datetime(2016, 1, 1)])
ages = age(dates, time=datetime(2016, 3, 1))
correct_ages = [(31 + 29) / 365.0]
np.testing.assert_array_almost_equal(ages, correct_ages, decimal=3)
# born leap year date
dates = pd.Series([datetime(2016, 2, 29)])
ages = age(dates, time=datetime(2020, 2, 29))
correct_ages = [4.0027] # .0027 added due to leap year
np.testing.assert_array_almost_equal(ages, correct_ages, decimal=3)
def test_age_nan():
age = Age()
dates = pd.Series([datetime(2010, 1, 1), np.nan, datetime(2012, 1, 1)])
ages = age(dates, time=datetime(2020, 2, 26))
correct_ages = [10.159, np.nan, 8.159]
np.testing.assert_array_almost_equal(ages, correct_ages, decimal=3)
def test_day_of_year():
doy = DayOfYear()
dates = pd.Series([datetime(2019, 12, 31), np.nan, datetime(2020, 12, 31)])
days_of_year = doy(dates)
correct_days = [365, np.nan, 366]
np.testing.assert_array_equal(days_of_year, correct_days)
def test_days_in_month():
dim = DaysInMonth()
dates = pd.Series(
[datetime(2010, 1, 1), datetime(2019, 2, 1), np.nan, datetime(2020, 2, 1)],
)
days_in_month = dim(dates)
correct_days = [31, 28, np.nan, 29]
np.testing.assert_array_equal(days_in_month, correct_days)
def test_is_leap_year():
ily = IsLeapYear()
dates = pd.Series([datetime(2020, 1, 1), datetime(2021, 1, 1)])
leap_year_bools = ily(dates)
correct_bools = [True, False]
np.testing.assert_array_equal(leap_year_bools, correct_bools)
def test_is_month_end():
ime = IsMonthEnd()
dates = pd.Series(
[datetime(2019, 3, 1), datetime(2021, 2, 28), datetime(2020, 2, 29)],
)
ime_bools = ime(dates)
correct_bools = [False, True, True]
np.testing.assert_array_equal(ime_bools, correct_bools)
def test_is_month_start():
ims = IsMonthStart()
dates = pd.Series(
[datetime(2019, 3, 1), datetime(2020, 2, 28), datetime(2020, 2, 29)],
)
ims_bools = ims(dates)
correct_bools = [True, False, False]
np.testing.assert_array_equal(ims_bools, correct_bools)
def test_is_quarter_end():
iqe = IsQuarterEnd()
dates = pd.Series([datetime(2020, 1, 1), datetime(2021, 3, 31)])
iqe_bools = iqe(dates)
correct_bools = [False, True]
np.testing.assert_array_equal(iqe_bools, correct_bools)
def test_is_quarter_start():
iqs = IsQuarterStart()
dates = pd.Series([datetime(2020, 1, 1), datetime(2021, 3, 31)])
iqs_bools = iqs(dates)
correct_bools = [True, False]
np.testing.assert_array_equal(iqs_bools, correct_bools)
def test_is_lunch_time_default():
is_lunch_time = IsLunchTime()
dates = pd.Series(
[
datetime(2022, 6, 26, 12, 12, 12),
datetime(2022, 6, 28, 12, 3, 4),
datetime(2022, 6, 28, 11, 3, 4),
np.nan,
],
)
actual = is_lunch_time(dates)
expected = [True, True, False, False]
np.testing.assert_array_equal(actual, expected)
def test_is_lunch_time_configurable():
is_lunch_time = IsLunchTime(14)
dates = pd.Series(
[
datetime(2022, 6, 26, 12, 12, 12),
datetime(2022, 6, 28, 14, 3, 4),
datetime(2022, 6, 28, 11, 3, 4),
np.nan,
],
)
actual = is_lunch_time(dates)
expected = [False, True, False, False]
np.testing.assert_array_equal(actual, expected)
def test_is_working_hours_standard_hours():
is_working_hours = IsWorkingHours()
dates = pd.Series(
[
datetime(2022, 6, 21, 16, 3, 3),
datetime(2019, 1, 3, 4, 4, 4),
datetime(2022, 1, 1, 12, 1, 2),
],
)
actual = is_working_hours(dates).tolist()
expected = [True, False, True]
np.testing.assert_array_equal(actual, expected)
def test_is_working_hours_configured_hours():
is_working_hours = IsWorkingHours(15, 18)
dates = pd.Series(
[
datetime(2022, 6, 21, 16, 3, 3),
datetime(2022, 6, 26, 14, 4, 4),
datetime(2022, 1, 1, 12, 1, 2),
],
)
answer = is_working_hours(dates).tolist()
expected = [True, False, False]
np.testing.assert_array_equal(answer, expected)
def test_part_of_day():
pod = PartOfDay()
dates = pd.Series(
[
datetime(2020, 1, 11, 0, 2, 1),
datetime(2020, 1, 11, 1, 2, 1),
datetime(2021, 3, 31, 4, 2, 1),
datetime(2020, 3, 4, 6, 2, 1),
datetime(2020, 3, 4, 8, 2, 1),
datetime(2020, 3, 4, 11, 2, 1),
datetime(2020, 3, 4, 14, 2, 3),
datetime(2020, 3, 4, 17, 2, 3),
datetime(2020, 2, 2, 20, 2, 2),
np.nan,
],
)
actual = pod(dates)
expected = pd.Series(
[
"midnight",
"midnight",
"dawn",
"early morning",
"late morning",
"noon",
"afternoon",
"evening",
"night",
np.nan,
],
)
pd.testing.assert_series_equal(expected, actual)
def test_is_year_end():
is_year_end = IsYearEnd()
dates = pd.Series([datetime(2020, 12, 31), np.nan, datetime(2020, 1, 1)])
answer = is_year_end(dates)
correct_answer = [True, False, False]
np.testing.assert_array_equal(answer, correct_answer)
def test_is_year_start():
is_year_start = IsYearStart()
dates = pd.Series([datetime(2020, 12, 31), np.nan, datetime(2020, 1, 1)])
answer = is_year_start(dates)
correct_answer = [False, False, True]
np.testing.assert_array_equal(answer, correct_answer)
def test_quarter_regular():
q = Quarter()
array = pd.Series(
[
pd.to_datetime("2018-01-01"),
pd.to_datetime("2018-04-01"),
pd.to_datetime("2018-07-01"),
pd.to_datetime("2018-10-01"),
],
)
answer = q(array)
correct_answer = pd.Series([1, 2, 3, 4])
np.testing.assert_array_equal(answer, correct_answer)
def test_quarter_leap_year():
q = Quarter()
array = pd.Series(
[
pd.to_datetime("2016-02-29"),
pd.to_datetime("2018-04-01"),
pd.to_datetime("2018-07-01"),
pd.to_datetime("2018-10-01"),
],
)
answer = q(array)
correct_answer = pd.Series([1, 2, 3, 4])
np.testing.assert_array_equal(answer, correct_answer)
def test_quarter_nan_and_nat_input():
q = Quarter()
array = pd.Series(
[
pd.to_datetime("2016-02-29"),
np.nan,
np.datetime64("NaT"),
pd.to_datetime("2018-10-01"),
],
)
answer = q(array)
correct_answer = pd.Series([1, np.nan, np.nan, 4])
np.testing.assert_array_equal(answer, correct_answer)
def test_quarter_year_before_1970():
q = Quarter()
array = pd.Series(
[
pd.to_datetime("2018-01-01"),
pd.to_datetime("1950-04-01"),
pd.to_datetime("1874-07-01"),
pd.to_datetime("2018-10-01"),
],
)
answer = q(array)
correct_answer = pd.Series([1, 2, 3, 4])
np.testing.assert_array_equal(answer, correct_answer)
def test_quarter_year_after_2038():
q = Quarter()
array = pd.Series(
[
pd.to_datetime("2018-01-01"),
pd.to_datetime("2050-04-01"),
pd.to_datetime("2174-07-01"),
pd.to_datetime("2018-10-01"),
],
)
answer = q(array)
correct_answer = pd.Series([1, 2, 3, 4])
np.testing.assert_array_equal(answer, correct_answer)
def test_quarter():
q = Quarter()
dates = [datetime(2019, 12, 1), datetime(2019, 1, 3), datetime(2020, 2, 1)]
quarter = q(dates)
correct_quarters = [4, 1, 1]
np.testing.assert_array_equal(quarter, correct_quarters)
def test_week_no_deprecation_message():
dates = [
datetime(2019, 1, 3),
datetime(2019, 6, 17, 11, 10, 50),
datetime(2019, 11, 30, 19, 45, 15),
]
with pytest.warns(None) as record:
week = Week()
week(dates).tolist()
assert not record
def test_url_to_domain_urls():
url_to_domain = URLToDomain()
urls = pd.Series(
[
"https://play.google.com/store/apps/details?id=com.skgames.trafficracer%22",
"http://mplay.google.co.in/sadfask/asdkfals?dk=10",
"http://lplay.google.co.in/sadfask/asdkfals?dk=10",
"http://play.google.co.in/sadfask/asdkfals?dk=10",
"http://tplay.google.co.in/sadfask/asdkfals?dk=10",
"http://www.google.co.in/sadfask/asdkfals?dk=10",
"www.google.co.in/sadfask/asdkfals?dk=10",
"http://user:pass@google.com/?a=b#asdd",
"https://www.compzets.com?asd=10",
"www.compzets.com?asd=10",
"facebook.com",
"https://www.compzets.net?asd=10",
"http://www.featuretools.org",
],
)
correct_urls = [
"play.google.com",
"mplay.google.co.in",
"lplay.google.co.in",
"play.google.co.in",
"tplay.google.co.in",
"google.co.in",
"google.co.in",
"google.com",
"compzets.com",
"compzets.com",
"facebook.com",
"compzets.net",
"featuretools.org",
]
np.testing.assert_array_equal(url_to_domain(urls), correct_urls)
def test_url_to_domain_long_url():
url_to_domain = URLToDomain()
urls = pd.Series(
[
"http://chart.apis.google.com/chart?chs=500x500&chma=0,0,100, \
100&cht=p&chco=FF0000%2CFFFF00%7CFF8000%2C00FF00%7C00FF00%2C0 \
000FF&chd=t%3A122%2C42%2C17%2C10%2C8%2C7%2C7%2C7%2C7%2C6%2C6% \
2C6%2C6%2C5%2C5&chl=122%7C42%7C17%7C10%7C8%7C7%7C7%7C7%7C7%7C \
6%7C6%7C6%7C6%7C5%7C5&chdl=android%7Cjava%7Cstack-trace%7Cbro \
adcastreceiver%7Candroid-ndk%7Cuser-agent%7Candroid-webview%7 \
Cwebview%7Cbackground%7Cmultithreading%7Candroid-source%7Csms \
%7Cadb%7Csollections%7Cactivity|Chart",
],
)
correct_urls = ["chart.apis.google.com"]
results = url_to_domain(urls)
np.testing.assert_array_equal(results, correct_urls)
def test_url_to_domain_nan():
url_to_domain = URLToDomain()
urls = pd.Series(["www.featuretools.com", np.nan], dtype="object")
correct_urls = pd.Series(["featuretools.com", np.nan], dtype="object")
results = url_to_domain(urls)
pd.testing.assert_series_equal(results, correct_urls)
def test_url_to_protocol_urls():
url_to_protocol = URLToProtocol()
urls = pd.Series(
[
"https://play.google.com/store/apps/details?id=com.skgames.trafficracer%22",
"http://mplay.google.co.in/sadfask/asdkfals?dk=10",
"http://lplay.google.co.in/sadfask/asdkfals?dk=10",
"www.google.co.in/sadfask/asdkfals?dk=10",
"http://user:pass@google.com/?a=b#asdd",
"https://www.compzets.com?asd=10",
"www.compzets.com?asd=10",
"facebook.com",
"https://www.compzets.net?asd=10",
"http://www.featuretools.org",
"https://featuretools.com",
],
)
correct_urls = pd.Series(
[
"https",
"http",
"http",
np.nan,
"http",
"https",
np.nan,
np.nan,
"https",
"http",
"https",
],
)
results = url_to_protocol(urls)
pd.testing.assert_series_equal(results, correct_urls)
def test_url_to_protocol_long_url():
url_to_protocol = URLToProtocol()
urls = pd.Series(
[
"http://chart.apis.google.com/chart?chs=500x500&chma=0,0,100, \
100&cht=p&chco=FF0000%2CFFFF00%7CFF8000%2C00FF00%7C00FF00%2C0 \
000FF&chd=t%3A122%2C42%2C17%2C10%2C8%2C7%2C7%2C7%2C7%2C6%2C6% \
2C6%2C6%2C5%2C5&chl=122%7C42%7C17%7C10%7C8%7C7%7C7%7C7%7C7%7C \
6%7C6%7C6%7C6%7C5%7C5&chdl=android%7Cjava%7Cstack-trace%7Cbro \
adcastreceiver%7Candroid-ndk%7Cuser-agent%7Candroid-webview%7 \
Cwebview%7Cbackground%7Cmultithreading%7Candroid-source%7Csms \
%7Cadb%7Csollections%7Cactivity|Chart",
],
)
correct_urls = ["http"]
results = url_to_protocol(urls)
np.testing.assert_array_equal(results, correct_urls)
def test_url_to_protocol_nan():
url_to_protocol = URLToProtocol()
urls = pd.Series(["www.featuretools.com", np.nan, ""], dtype="object")
correct_urls = pd.Series([np.nan, np.nan, np.nan], dtype="object")
results = url_to_protocol(urls)
pd.testing.assert_series_equal(results, correct_urls)
def test_url_to_tld_urls():
url_to_tld = URLToTLD()
urls = pd.Series(
[
"https://play.google.com/store/apps/details?id=com.skgames.trafficracer%22",
"http://mplay.google.co.in/sadfask/asdkfals?dk=10",
"http://lplay.google.co.in/sadfask/asdkfals?dk=10",
"http://play.google.co.in/sadfask/asdkfals?dk=10",
"http://tplay.google.co.in/sadfask/asdkfals?dk=10",
"http://www.google.co.in/sadfask/asdkfals?dk=10",
"www.google.co.in/sadfask/asdkfals?dk=10",
"http://user:pass@google.com/?a=b#asdd",
"https://www.compzets.dev?asd=10",
"www.compzets.com?asd=10",
"https://www.compzets.net?asd=10",
"http://www.featuretools.org",
"featuretools.org",
],
)
correct_urls = [
"com",
"in",
"in",
"in",
"in",
"in",
"in",
"com",
"dev",
"com",
"net",
"org",
"org",
]
np.testing.assert_array_equal(url_to_tld(urls), correct_urls)
def test_url_to_tld_long_url():
url_to_tld = URLToTLD()
urls = pd.Series(
[
"http://chart.apis.google.com/chart?chs=500x500&chma=0,0,100, \
100&cht=p&chco=FF0000%2CFFFF00%7CFF8000%2C00FF00%7C00FF00%2C0 \
000FF&chd=t%3A122%2C42%2C17%2C10%2C8%2C7%2C7%2C7%2C7%2C6%2C6% \
2C6%2C6%2C5%2C5&chl=122%7C42%7C17%7C10%7C8%7C7%7C7%7C7%7C7%7C \
6%7C6%7C6%7C6%7C5%7C5&chdl=android%7Cjava%7Cstack-trace%7Cbro \
adcastreceiver%7Candroid-ndk%7Cuser-agent%7Candroid-webview%7 \
Cwebview%7Cbackground%7Cmultithreading%7Candroid-source%7Csms \
%7Cadb%7Csollections%7Cactivity|Chart",
],
)
correct_urls = ["com"]
np.testing.assert_array_equal(url_to_tld(urls), correct_urls)
def test_url_to_tld_nan():
url_to_tld = URLToTLD()
urls = pd.Series(
["www.featuretools.com", np.nan, "featuretools", ""],
dtype="object",
)
correct_urls = pd.Series(["com", np.nan, np.nan, np.nan], dtype="object")
results = url_to_tld(urls)
pd.testing.assert_series_equal(results, correct_urls, check_names=False)
def test_is_free_email_domain_valid_addresses():
is_free_email_domain = IsFreeEmailDomain()
array = pd.Series(
[
"test@hotmail.com",
"name@featuretools.com",
"nobody@yahoo.com",
"free@gmail.com",
],
)
answers = pd.Series(is_free_email_domain(array))
correct_answers = pd.Series([True, False, True, True])
pd.testing.assert_series_equal(answers, correct_answers)
def test_is_free_email_domain_valid_addresses_whitespace():
is_free_email_domain = IsFreeEmailDomain()
array = pd.Series(
[
" test@hotmail.com",
" name@featuretools.com",
"nobody@yahoo.com ",
" free@gmail.com ",
],
)
answers = pd.Series(is_free_email_domain(array))
correct_answers = pd.Series([True, False, True, True])
pd.testing.assert_series_equal(answers, correct_answers)
def test_is_free_email_domain_nan():
is_free_email_domain = IsFreeEmailDomain()
array = pd.Series([np.nan, "name@featuretools.com", "nobody@yahoo.com"])
answers = pd.Series(is_free_email_domain(array))
correct_answers = pd.Series([np.nan, False, True])
pd.testing.assert_series_equal(answers, correct_answers)
def test_is_free_email_domain_empty_string():
is_free_email_domain = IsFreeEmailDomain()
array = pd.Series(["", "name@featuretools.com", "nobody@yahoo.com"])
answers = pd.Series(is_free_email_domain(array))
correct_answers = pd.Series([np.nan, False, True])
pd.testing.assert_series_equal(answers, correct_answers)
def test_is_free_email_domain_empty_series():
is_free_email_domain = IsFreeEmailDomain()
array = pd.Series([], dtype="category")
answers = pd.Series(is_free_email_domain(array))
correct_answers = pd.Series([], dtype="category")
pd.testing.assert_series_equal(answers, correct_answers)
def test_is_free_email_domain_invalid_email():
is_free_email_domain = IsFreeEmailDomain()
array = pd.Series(
[
np.nan,
"this is not an email address",
"name@featuretools.com",
"nobody@yahoo.com",
1234,
1.23,
True,
],
)
answers = pd.Series(is_free_email_domain(array))
correct_answers = pd.Series([np.nan, np.nan, False, True, np.nan, np.nan, np.nan])
pd.testing.assert_series_equal(answers, correct_answers)
def test_is_free_email_domain_all_nan():
is_free_email_domain = IsFreeEmailDomain()
array = pd.Series([np.nan, np.nan])
answers = pd.Series(is_free_email_domain(array))
correct_answers = pd.Series([np.nan, np.nan], dtype=object)
pd.testing.assert_series_equal(answers, correct_answers)
def test_email_address_to_domain_valid_addresses():
email_address_to_domain = EmailAddressToDomain()
array = pd.Series(
[
"test@hotmail.com",
"name@featuretools.com",
"nobody@yahoo.com",
"free@gmail.com",
],
)
answers = pd.Series(email_address_to_domain(array))
correct_answers = pd.Series(
["hotmail.com", "featuretools.com", "yahoo.com", "gmail.com"],
)
pd.testing.assert_series_equal(answers, correct_answers)
def test_email_address_to_domain_valid_addresses_whitespace():
email_address_to_domain = EmailAddressToDomain()
array = pd.Series(
[
" test@hotmail.com",
" name@featuretools.com",
"nobody@yahoo.com ",
" free@gmail.com ",
],
)
answers = pd.Series(email_address_to_domain(array))
correct_answers = pd.Series(
["hotmail.com", "featuretools.com", "yahoo.com", "gmail.com"],
)
pd.testing.assert_series_equal(answers, correct_answers)
def test_email_address_to_domain_nan():
email_address_to_domain = EmailAddressToDomain()
array = pd.Series([np.nan, "name@featuretools.com", "nobody@yahoo.com"])
answers = pd.Series(email_address_to_domain(array))
correct_answers = pd.Series([np.nan, "featuretools.com", "yahoo.com"])
pd.testing.assert_series_equal(answers, correct_answers)
def test_email_address_to_domain_empty_string():
email_address_to_domain = EmailAddressToDomain()
array = pd.Series(["", "name@featuretools.com", "nobody@yahoo.com"])
answers = pd.Series(email_address_to_domain(array))
correct_answers = pd.Series([np.nan, "featuretools.com", "yahoo.com"])
pd.testing.assert_series_equal(answers, correct_answers)
def test_email_address_to_domain_empty_series():
email_address_to_domain = EmailAddressToDomain()
array = pd.Series([], dtype="category")
answers = pd.Series(email_address_to_domain(array))
correct_answers = pd.Series([], dtype="category")
pd.testing.assert_series_equal(answers, correct_answers)
def test_email_address_to_domain_invalid_email():
email_address_to_domain = EmailAddressToDomain()
array = pd.Series(
[
np.nan,
"this is not an email address",
"name@featuretools.com",
"nobody@yahoo.com",
1234,
1.23,
True,
],
)
answers = pd.Series(email_address_to_domain(array))
correct_answers = pd.Series(
[np.nan, np.nan, "featuretools.com", "yahoo.com", np.nan, np.nan, np.nan],
)
pd.testing.assert_series_equal(answers, correct_answers)
def test_email_address_to_domain_all_nan():
email_address_to_domain = EmailAddressToDomain()
array = pd.Series([np.nan, np.nan])
answers = pd.Series(email_address_to_domain(array))
correct_answers = pd.Series([np.nan, np.nan], dtype=object)
pd.testing.assert_series_equal(answers, correct_answers)
def test_trans_primitives_can_init_without_params():
trans_primitives = get_transform_primitives().values()
for trans_primitive in trans_primitives:
trans_primitive()
def test_numeric_lag_future_warning():
warning_text = "NumericLag is deprecated and will be removed in a future version. Please use the 'Lag' primitive instead."
with pytest.warns(FutureWarning, match=warning_text):
NumericLag()
def test_lag_regular():
primitive_instance = Lag()
primitive_func = primitive_instance.get_function()
array = pd.Series([1, 2, 3, 4])
time_array = pd.Series(pd.date_range(start="2020-01-01", periods=4, freq="D"))
answer = pd.Series(primitive_func(array, time_array))
correct_answer = pd.Series([np.nan, 1, 2, 3])
pd.testing.assert_series_equal(answer, correct_answer)
def test_lag_period():
primitive_instance = Lag(periods=3)
primitive_func = primitive_instance.get_function()
array = pd.Series([1, 2, 3, 4])
time_array = pd.Series(pd.date_range(start="2020-01-01", periods=4, freq="D"))
answer = pd.Series(primitive_func(array, time_array))
correct_answer = pd.Series([np.nan, np.nan, np.nan, 1])
pd.testing.assert_series_equal(answer, correct_answer)
def test_lag_negative_period():
primitive_instance = Lag(periods=-2)
primitive_func = primitive_instance.get_function()
array = pd.Series([1, 2, 3, 4])
time_array = pd.Series(pd.date_range(start="2020-01-01", periods=4, freq="D"))
answer = pd.Series(primitive_func(array, time_array))
correct_answer = pd.Series([3, 4, np.nan, np.nan])
pd.testing.assert_series_equal(answer, correct_answer)
def test_lag_starts_with_nan():
primitive_instance = Lag()
primitive_func = primitive_instance.get_function()
array = pd.Series([np.nan, 2, 3, 4])
time_array = pd.Series(pd.date_range(start="2020-01-01", periods=4, freq="D"))
answer = pd.Series(primitive_func(array, time_array))
correct_answer = pd.Series([np.nan, np.nan, 2, 3])
pd.testing.assert_series_equal(answer, correct_answer)
def test_lag_ends_with_nan():
primitive_instance = Lag()
primitive_func = primitive_instance.get_function()
array = pd.Series([1, 2, 3, np.nan])
time_array = pd.Series(pd.date_range(start="2020-01-01", periods=4, freq="D"))
answer = pd.Series(primitive_func(array, time_array))
correct_answer = pd.Series([np.nan, 1, 2, 3])
pd.testing.assert_series_equal(answer, correct_answer)
@pytest.mark.parametrize(
"input_array,expected_output",
[
(
pd.Series(["hello", "world", "foo", "bar"], dtype="string"),
pd.Series([np.nan, "hello", "world", "foo"], dtype="string"),
),
(
pd.Series(["cow", "cow", "pig", "pig"], dtype="category"),
pd.Series([np.nan, "cow", "cow", "pig"], dtype="category"),
),
(
pd.Series([True, False, True, False], dtype="bool"),
pd.Series([np.nan, True, False, True], dtype="object"),
),
(
pd.Series([True, False, True, False], dtype="boolean"),
pd.Series([np.nan, True, False, True], dtype="boolean"),
),
(
pd.Series([1.23, 2.45, 3.56, 4.98], dtype="float"),
pd.Series([np.nan, 1.23, 2.45, 3.56], dtype="float"),
),
(
pd.Series([1, 2, 3, 4], dtype="Int64"),
pd.Series([np.nan, 1, 2, 3], dtype="Int64"),
),
(
pd.Series([1, 2, 3, 4], dtype="int64"),
pd.Series([np.nan, 1, 2, 3], dtype="float64"),
),
],
)
def test_lag_with_different_dtypes(input_array, expected_output):
primitive_instance = Lag()
primitive_func = primitive_instance.get_function()
time_array = pd.Series(pd.date_range(start="2020-01-01", periods=4, freq="D"))
answer = pd.Series(primitive_func(input_array, time_array))
pd.testing.assert_series_equal(answer, expected_output)
def test_date_to_time_zone_primitive():
primitive_func = DateToTimeZone().get_function()
x = pd.Series(
[
datetime(2010, 1, 1, tzinfo=timezone("America/Los_Angeles")),
datetime(2010, 1, 10, tzinfo=timezone("Singapore")),
datetime(2020, 1, 1, tzinfo=timezone("UTC")),
datetime(2010, 1, 1, tzinfo=timezone("Europe/London")),
],
)
answer = pd.Series(["America/Los_Angeles", "Singapore", "UTC", "Europe/London"])
pd.testing.assert_series_equal(primitive_func(x), answer)
def test_date_to_time_zone_datetime64():
primitive_func = DateToTimeZone().get_function()
x = pd.Series(
[
datetime(2010, 1, 1),
datetime(2010, 1, 10),
datetime(2020, 1, 1),
],
).astype("datetime64[ns]")
x = x.dt.tz_localize("America/Los_Angeles")
answer = pd.Series(["America/Los_Angeles"] * 3)
pd.testing.assert_series_equal(primitive_func(x), answer)
def test_date_to_time_zone_naive_dates():
primitive_func = DateToTimeZone().get_function()
x = pd.Series(
[
datetime(2010, 1, 1, tzinfo=timezone("America/Los_Angeles")),
datetime(2010, 1, 1),
datetime(2010, 1, 2),
],
)
answer = pd.Series(["America/Los_Angeles", np.nan, np.nan])
pd.testing.assert_series_equal(primitive_func(x), answer)
def test_date_to_time_zone_nan():
primitive_func = DateToTimeZone().get_function()
x = pd.Series(
[
datetime(2010, 1, 1, tzinfo=timezone("America/Los_Angeles")),
pd.NaT,
np.nan,
],
)
answer = pd.Series(["America/Los_Angeles", np.nan, np.nan])
pd.testing.assert_series_equal(primitive_func(x), answer)
def test_rate_of_change_primitive_regular_interval():
rate_of_change = RateOfChange()
times = pd.date_range(start="2019-01-01", freq="2s", periods=5)
values = [0, 30, 180, -90, 0]
expected = pd.Series([np.nan, 15, 75, -135, 45])
actual = rate_of_change(values, times)
pd.testing.assert_series_equal(actual, expected)
def test_rate_of_change_primitive_uneven_interval():
rate_of_change = RateOfChange()
times = pd.to_datetime(
[
"2019-01-01 00:00:00",
"2019-01-01 00:00:01",
"2019-01-01 00:00:03",
"2019-01-01 00:00:07",
"2019-01-01 00:00:08",
],
)
values = [0, 30, 180, -90, 0]
expected = pd.Series([np.nan, 30, 75, -67.5, 90])
actual = rate_of_change(values, times)
pd.testing.assert_series_equal(actual, expected)
def test_rate_of_change_primitive_with_nan():
rate_of_change = RateOfChange()
times = pd.date_range(start="2019-01-01", freq="2s", periods=5)
values = [0, 30, np.nan, -90, 0]
expected = pd.Series([np.nan, 15, np.nan, np.nan, 45])
actual = rate_of_change(values, times)
pd.testing.assert_series_equal(actual, expected)
class TestFileExtension(PrimitiveTestBase):
primitive = FileExtension
def test_filepaths(self):
primitive_func = FileExtension().get_function()
array = pd.Series(
[
"doc.txt",
"~/documents/data.json",
"data.JSON",
"C:\\Projects\\apilibrary\\apilibrary.sln",
],
dtype="string",
)
answer = pd.Series([".txt", ".json", ".json", ".sln"], dtype="string")
pd.testing.assert_series_equal(primitive_func(array), answer)
def test_invalid(self):
primitive_func = FileExtension().get_function()
array = pd.Series(["doc.txt", "~/documents/data", np.nan], dtype="string")
answer = pd.Series([".txt", np.nan, np.nan], dtype="string")
pd.testing.assert_series_equal(primitive_func(array), answer)
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
transform.append(primitive_instance)
valid_dfs(
es,
aggregation,
transform,
self.primitive,
target_dataframe_name="sessions",
)
class TestIsFirstWeekOfMonth(PrimitiveTestBase):
primitive = IsFirstWeekOfMonth
def test_valid_dates(self):
primitive_func = self.primitive().get_function()
array = pd.Series(
[
pd.to_datetime("03/01/2019"),
pd.to_datetime("03/03/2019"),
pd.to_datetime("03/31/2019"),
pd.to_datetime("03/30/2019"),
],
)
answers = primitive_func(array).tolist()
correct_answers = [True, False, False, False]
np.testing.assert_array_equal(answers, correct_answers)
def test_leap_year(self):
primitive_func = self.primitive().get_function()
array = pd.Series(
[
pd.to_datetime("03/01/2019"),
pd.to_datetime("02/29/2016"),
pd.to_datetime("03/31/2019"),
pd.to_datetime("03/30/2019"),
],
)
answers = primitive_func(array).tolist()
correct_answers = [True, False, False, False]
np.testing.assert_array_equal(answers, correct_answers)
def test_year_before_1970(self):
primitive_func = self.primitive().get_function()
array = pd.Series(
[
pd.to_datetime("06/01/1965"),
pd.to_datetime("03/02/2019"),
pd.to_datetime("03/31/2019"),
pd.to_datetime("03/30/2019"),
],
)
answers = primitive_func(array).tolist()
correct_answers = [True, True, False, False]
np.testing.assert_array_equal(answers, correct_answers)
def test_year_after_2038(self):
primitive_func = self.primitive().get_function()
array = pd.Series(
[
pd.to_datetime("12/31/2040"),
pd.to_datetime("01/01/2040"),
pd.to_datetime("03/31/2019"),
pd.to_datetime("03/30/2019"),
],
)
answers = primitive_func(array).tolist()
correct_answers = [False, True, False, False]
np.testing.assert_array_equal(answers, correct_answers)
def test_nan_input(self):
primitive_func = self.primitive().get_function()
array = pd.Series(
[
pd.to_datetime("03/01/2019"),
np.nan,
np.datetime64("NaT"),
pd.to_datetime("03/30/2019"),
],
)
answers = primitive_func(array).tolist()
correct_answers = [True, np.nan, np.nan, False]
np.testing.assert_array_equal(answers, correct_answers)
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
transform.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
class TestNthWeekOfMonth(PrimitiveTestBase):
primitive = NthWeekOfMonth
def test_valid_dates(self):
primitive_func = self.primitive().get_function()
array = pd.Series(
[
pd.to_datetime("03/01/2019"),
pd.to_datetime("03/03/2019"),
pd.to_datetime("03/31/2019"),
pd.to_datetime("03/30/2019"),
pd.to_datetime("09/01/2019"),
],
)
answers = primitive_func(array)
correct_answers = [1, 2, 6, 5, 1]
np.testing.assert_array_equal(answers, correct_answers)
def test_leap_year(self):
primitive_func = self.primitive().get_function()
array = pd.Series(
[
pd.to_datetime("03/01/2019"),
pd.to_datetime("02/29/2016"),
pd.to_datetime("03/31/2019"),
pd.to_datetime("03/30/2019"),
],
)
answers = primitive_func(array)
correct_answers = [1, 5, 6, 5]
np.testing.assert_array_equal(answers, correct_answers)
def test_year_before_1970(self):
primitive_func = self.primitive().get_function()
array = pd.Series(
[
pd.to_datetime("06/06/1965"),
pd.to_datetime("03/02/2019"),
pd.to_datetime("03/31/2019"),
pd.to_datetime("03/30/2019"),
],
)
answers = primitive_func(array)
correct_answers = [2, 1, 6, 5]
np.testing.assert_array_equal(answers, correct_answers)
def test_year_after_2038(self):
primitive_func = self.primitive().get_function()
array = pd.Series(
[
pd.to_datetime("12/31/2040"),
pd.to_datetime("01/01/2001"),
pd.to_datetime("03/31/2019"),
pd.to_datetime("03/30/2019"),
],
)
answers = primitive_func(array)
correct_answers = [6, 1, 6, 5]
np.testing.assert_array_equal(answers, correct_answers)
def test_nan_input(self):
primitive_func = self.primitive().get_function()
array = pd.Series(
[
pd.to_datetime("03/01/2019"),
np.nan,
np.datetime64("NaT"),
pd.to_datetime("03/30/2019"),
],
)
answers = primitive_func(array)
correct_answers = [1, np.nan, np.nan, 5]
np.testing.assert_array_equal(answers, correct_answers)
def test_with_featuretools(self, es):
transform, aggregation = find_applicable_primitives(self.primitive)
primitive_instance = self.primitive()
transform.append(primitive_instance)
valid_dfs(es, aggregation, transform, self.primitive)
| 37,994 | 31.810881 | 126 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/transform_primitive_tests/test_season.py | from datetime import datetime
import pandas as pd
from featuretools.primitives import Season
class TestSeason:
def test_regular(self):
primitive_instance = Season()
primitive_func = primitive_instance.get_function()
case = pd.date_range(start="2019-01", periods=12, freq="m").to_series()
answer = pd.Series(
[
"winter",
"winter",
"spring",
"spring",
"spring",
"summer",
"summer",
"summer",
"fall",
"fall",
"fall",
"winter",
],
dtype="string",
)
given_answer = primitive_func(case)
pd.testing.assert_series_equal(
given_answer.reset_index(drop=True),
answer.reset_index(drop=True),
)
def test_nat(self):
primitive_instance = Season()
primitive_func = primitive_instance.get_function()
case = pd.Series(
[
"NaT",
"2019-02",
"2019-03",
"NaT",
],
).astype("datetime64[ns]")
answer = pd.Series([pd.NA, "winter", "winter", pd.NA], dtype="string")
given_answer = pd.Series(primitive_func(case))
pd.testing.assert_series_equal(given_answer, answer)
def test_datetime(self):
primitive_instance = Season()
primitive_func = primitive_instance.get_function()
case = pd.Series(
[
datetime(2011, 3, 1),
datetime(2011, 6, 1),
datetime(2011, 9, 1),
datetime(2011, 12, 1),
# leap year
datetime(2020, 2, 29),
],
)
answer = pd.Series(
["winter", "spring", "summer", "fall", "winter"],
dtype="string",
)
given_answer = primitive_func(case)
pd.testing.assert_series_equal(given_answer, answer)
| 2,059 | 28.428571 | 79 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/transform_primitive_tests/test_is_federal_holiday.py | from datetime import datetime
import numpy as np
import pandas as pd
from pytest import raises
from featuretools.primitives import IsFederalHoliday
def test_regular():
primitive_instance = IsFederalHoliday()
primitive_func = primitive_instance.get_function()
case = pd.Series(
[
"2016-01-01",
"2016-02-29",
"2017-05-29",
datetime(2019, 7, 4, 10, 0, 30),
],
).astype("datetime64[ns]")
answer = pd.Series([True, False, True, True])
given_answer = pd.Series(primitive_func(case))
assert given_answer.equals(answer)
def test_nat():
primitive_instance = IsFederalHoliday()
primitive_func = primitive_instance.get_function()
case = pd.Series(
[
"2019-10-14",
"NaT",
"2016-02-29",
"NaT",
],
).astype("datetime64[ns]")
answer = pd.Series([True, np.nan, False, np.nan])
given_answer = pd.Series(primitive_func(case))
assert given_answer.equals(answer)
def test_valid_country():
primitive_instance = IsFederalHoliday(country="Canada")
primitive_func = primitive_instance.get_function()
case = pd.Series(
[
"2016-07-01",
"2016-11-11",
"2017-12-26",
"2018-09-03",
],
).astype("datetime64[ns]")
answer = pd.Series([True, False, True, True])
given_answer = pd.Series(primitive_func(case))
assert given_answer.equals(answer)
def test_invalid_country():
error_text = "must be one of the available countries"
with raises(ValueError, match=error_text):
IsFederalHoliday(country="")
def test_multiple_countries():
primitive_mexico = IsFederalHoliday(country="Mexico")
primitive_func = primitive_mexico.get_function()
case = pd.Series([datetime(2000, 9, 16), datetime(2005, 1, 1)])
assert len(primitive_func(case)) > 1
primitive_india = IsFederalHoliday(country="IND")
primitive_func = primitive_mexico.get_function()
case = pd.Series([datetime(2048, 1, 1), datetime(2048, 10, 2)])
primitive_func = primitive_india.get_function()
assert len(primitive_func(case)) > 1
primitive_uk = IsFederalHoliday(country="UK")
primitive_func = primitive_uk.get_function()
case = pd.Series([datetime(2048, 3, 17), datetime(2048, 4, 6)])
assert len(primitive_func(case)) > 1
countries = [
"Argentina",
"AU",
"Austria",
"BY",
"Belgium",
"Brazil",
"Canada",
"Colombia",
"Croatia",
"England",
"Finland",
"FRA",
"Germany",
"Germany",
"Italy",
"NewZealand",
"PortugalExt",
"PTE",
"Spain",
"ES",
"Switzerland",
"UnitedStates",
"US",
"UK",
"UA",
"CH",
"SE",
"ZA",
]
for x in countries:
IsFederalHoliday(country=x)
| 2,971 | 26.018182 | 67 | py |
featuretools | featuretools-main/featuretools/tests/primitive_tests/transform_primitive_tests/__init__.py | 0 | 0 | 0 | py | |
featuretools | featuretools-main/featuretools/tests/primitive_tests/transform_primitive_tests/test_same_as_previous.py | import numpy as np
import pandas as pd
import pytest
from featuretools.primitives import SameAsPrevious
class TestSameAsPrevious:
def test_ints(self):
primitive_func = SameAsPrevious().get_function()
array = pd.Series([1, 2, 2, 3, 2], dtype="int64")
answer = primitive_func(array)
correct_answer = pd.Series([False, False, True, False, False])
pd.testing.assert_series_equal(answer, correct_answer)
def test_int64(self):
primitive_func = SameAsPrevious().get_function()
array = pd.Series([1, 2, 2, 3, 2], dtype="Int64")
answer = primitive_func(array)
correct_answer = pd.Series([False, False, True, False, False], dtype="boolean")
pd.testing.assert_series_equal(answer, correct_answer)
def test_floats(self):
primitive_func = SameAsPrevious().get_function()
array = pd.Series([1.0, 2.5, 2.5, 3.0, 2.0], dtype="float64")
answer = primitive_func(array)
correct_answer = pd.Series([False, False, True, False, False])
pd.testing.assert_series_equal(answer, correct_answer)
def test_mixed(self):
primitive_func = SameAsPrevious().get_function()
array = pd.Series([1, 2, 2.0, 3, 2.0], dtype="float64")
answer = primitive_func(array)
correct_answer = pd.Series([False, False, True, False, False])
np.testing.assert_array_equal(answer, correct_answer)
def test_nan(self):
primitive_instance = SameAsPrevious()
primitive_func = primitive_instance.get_function()
array = pd.Series([1, np.nan, 3, np.nan, 2], dtype="float64")
answer = primitive_func(array)
correct_answer = pd.Series([False, True, False, True, False])
np.testing.assert_array_equal(answer, correct_answer)
def test_all_nan(self):
primitive_instance = SameAsPrevious()
primitive_func = primitive_instance.get_function()
array = pd.Series([np.nan, np.nan, np.nan, np.nan], dtype="float64")
answer = primitive_func(array)
correct_answer = pd.Series([False, False, False, False])
np.testing.assert_array_equal(answer, correct_answer)
def test_inf(self):
primitive_instance = SameAsPrevious()
primitive_func = primitive_instance.get_function()
array = pd.Series([1, np.inf, 3, np.inf, 2], dtype="float64")
answer = primitive_func(array)
correct_answer = pd.Series([False, False, False, False, False])
np.testing.assert_array_equal(answer, correct_answer)
def test_all_inf(self):
primitive_instance = SameAsPrevious()
primitive_func = primitive_instance.get_function()
array = pd.Series([np.inf, np.inf, np.inf, np.inf], dtype="float64")
answer = primitive_func(array)
correct_answer = pd.Series([False, True, True, True])
np.testing.assert_array_equal(answer, correct_answer)
def test_fill_method_bfill(self):
primitive_instance = SameAsPrevious(fill_method="bfill")
primitive_func = primitive_instance.get_function()
array = pd.Series([1, np.nan, 3, 2, 2], dtype="float64")
answer = primitive_func(array)
correct_answer = pd.Series([False, False, True, False, True])
np.testing.assert_array_equal(answer, correct_answer)
def test_fill_method_bfill_with_limit(self):
primitive_instance = SameAsPrevious(fill_method="bfill", limit=2)
primitive_func = primitive_instance.get_function()
array = pd.Series([1, np.nan, np.nan, np.nan, 2, 3], dtype="float64")
answer = primitive_func(array)
correct_answer = pd.Series([False, False, False, True, True, False])
np.testing.assert_array_equal(answer, correct_answer)
def test_raises(self):
with pytest.raises(ValueError):
SameAsPrevious(fill_method="invalid")
| 3,877 | 43.068182 | 87 | py |
featuretools | featuretools-main/featuretools/tests/integration_data/__init__.py | 0 | 0 | 0 | py | |
featuretools | featuretools-main/featuretools/tests/computational_backend/test_calculate_feature_matrix.py | import logging
import os
import re
import shutil
from datetime import datetime
from itertools import combinations
from random import randint
import numpy as np
import pandas as pd
import psutil
import pytest
from tqdm import tqdm
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import (
Age,
AgeNullable,
Boolean,
BooleanNullable,
Integer,
IntegerNullable,
)
from featuretools import (
EntitySet,
Feature,
GroupByTransformFeature,
Timedelta,
calculate_feature_matrix,
dfs,
)
from featuretools.computational_backends import utils
from featuretools.computational_backends.calculate_feature_matrix import (
FEATURE_CALCULATION_PERCENTAGE,
_chunk_dataframe_groups,
_handle_chunk_size,
scatter_warning,
)
from featuretools.computational_backends.utils import (
bin_cutoff_times,
create_client_and_cluster,
n_jobs_to_workers,
)
from featuretools.feature_base import (
AggregationFeature,
DirectFeature,
FeatureOutputSlice,
IdentityFeature,
)
from featuretools.primitives import (
Count,
Max,
Min,
Negate,
NMostCommon,
Percentile,
Sum,
TransformPrimitive,
)
from featuretools.tests.testing_utils import (
backward_path,
get_mock_client_cluster,
to_pandas,
)
from featuretools.utils.gen_utils import Library, import_or_none
dd = import_or_none("dask.dataframe")
def test_scatter_warning(caplog):
logger = logging.getLogger("featuretools")
match = "EntitySet was only scattered to {} out of {} workers"
warning_message = match.format(1, 2)
logger.propagate = True
scatter_warning(1, 2)
logger.propagate = False
assert warning_message in caplog.text
# TODO: final assert fails w/ Dask
def test_calc_feature_matrix(es):
if es.dataframe_type != Library.PANDAS:
pytest.xfail("Distributed dataframe result not ordered")
times = list(
[datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)]
+ [datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)]
+ [datetime(2011, 4, 9, 10, 40, 0)]
+ [datetime(2011, 4, 10, 10, 40, i) for i in range(2)]
+ [datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)]
+ [datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)],
)
instances = range(17)
cutoff_time = pd.DataFrame({"time": times, es["log"].ww.index: instances})
labels = [False] * 3 + [True] * 2 + [False] * 9 + [True] + [False] * 2
property_feature = Feature(es["log"].ww["value"]) > 10
feature_matrix = calculate_feature_matrix(
[property_feature],
es,
cutoff_time=cutoff_time,
verbose=True,
)
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
error_text = "features must be a non-empty list of features"
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix(
"features",
es,
cutoff_time=cutoff_time,
)
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix([], es, cutoff_time=cutoff_time)
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix(
[1, 2, 3],
es,
cutoff_time=cutoff_time,
)
error_text = (
"cutoff_time times must be datetime type: try casting via "
"pd\\.to_datetime\\(\\)"
)
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix(
[property_feature],
es,
instance_ids=range(17),
cutoff_time=17,
)
error_text = "cutoff_time must be a single value or DataFrame"
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix(
[property_feature],
es,
instance_ids=range(17),
cutoff_time=times,
)
cutoff_times_dup = pd.DataFrame(
{
"time": [datetime(2018, 3, 1), datetime(2018, 3, 1)],
es["log"].ww.index: [1, 1],
},
)
error_text = "Duplicated rows in cutoff time dataframe."
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix(
[property_feature],
entityset=es,
cutoff_time=cutoff_times_dup,
)
cutoff_reordered = cutoff_time.iloc[[-1, 10, 1]] # 3 ids not ordered by cutoff time
feature_matrix = calculate_feature_matrix(
[property_feature],
es,
cutoff_time=cutoff_reordered,
verbose=True,
)
assert all(feature_matrix.index == cutoff_reordered["id"].values)
# fails with Dask and Spark entitysets, cutoff time not reordered; cannot verify out of order
# - can't tell if wrong/different all are false so can't check positional
def test_cfm_warns_dask_cutoff_time(es):
dd = pytest.importorskip("dask.dataframe", reason="Dask not installed, skipping")
times = list(
[datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)]
+ [datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)]
+ [datetime(2011, 4, 9, 10, 40, 0)]
+ [datetime(2011, 4, 10, 10, 40, i) for i in range(2)]
+ [datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)]
+ [datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)],
)
instances = range(17)
cutoff_time = pd.DataFrame({"time": times, es["log"].ww.index: instances})
cutoff_time = dd.from_pandas(cutoff_time, npartitions=4)
property_feature = Feature(es["log"].ww["value"]) > 10
match = (
"cutoff_time should be a Pandas DataFrame: "
"computing cutoff_time, this may take a while"
)
with pytest.warns(UserWarning, match=match):
calculate_feature_matrix([property_feature], es, cutoff_time=cutoff_time)
def test_cfm_compose(es, lt):
property_feature = Feature(es["log"].ww["value"]) > 10
feature_matrix = calculate_feature_matrix(
[property_feature],
es,
cutoff_time=lt,
verbose=True,
)
feature_matrix = to_pandas(feature_matrix, index="id", sort_index=True)
assert (
feature_matrix[property_feature.get_name()] == feature_matrix["label_func"]
).values.all()
def test_cfm_compose_approximate(es, lt):
if es.dataframe_type != Library.PANDAS:
pytest.xfail("dask does not support approximate")
property_feature = Feature(es["log"].ww["value"]) > 10
feature_matrix = calculate_feature_matrix(
[property_feature],
es,
cutoff_time=lt,
approximate="1s",
verbose=True,
)
assert type(feature_matrix) == pd.core.frame.DataFrame
feature_matrix = to_pandas(feature_matrix, index="id", sort_index=True)
assert (
feature_matrix[property_feature.get_name()] == feature_matrix["label_func"]
).values.all()
def test_cfm_dask_compose(dask_es, lt):
property_feature = Feature(dask_es["log"].ww["value"]) > 10
feature_matrix = calculate_feature_matrix(
[property_feature],
dask_es,
cutoff_time=lt,
verbose=True,
)
feature_matrix = feature_matrix.compute()
assert (
feature_matrix[property_feature.get_name()] == feature_matrix["label_func"]
).values.all()
# tests approximate, skip for dask/spark
def test_cfm_approximate_correct_ordering():
trips = {
"trip_id": [i for i in range(1000)],
"flight_time": [datetime(1998, 4, 2) for i in range(350)]
+ [datetime(1997, 4, 3) for i in range(650)],
"flight_id": [randint(1, 25) for i in range(1000)],
"trip_duration": [randint(1, 999) for i in range(1000)],
}
df = pd.DataFrame.from_dict(trips)
es = EntitySet("flights")
es.add_dataframe(
dataframe_name="trips",
dataframe=df,
index="trip_id",
time_index="flight_time",
)
es.normalize_dataframe(
base_dataframe_name="trips",
new_dataframe_name="flights",
index="flight_id",
make_time_index=True,
)
features = dfs(entityset=es, target_dataframe_name="trips", features_only=True)
flight_features = [
feature
for feature in features
if isinstance(feature, DirectFeature)
and isinstance(feature.base_features[0], AggregationFeature)
]
property_feature = IdentityFeature(es["trips"].ww["trip_id"])
cutoff_time = pd.DataFrame.from_dict(
{"instance_id": df["trip_id"], "time": df["flight_time"]},
)
time_feature = IdentityFeature(es["trips"].ww["flight_time"])
feature_matrix = calculate_feature_matrix(
flight_features + [property_feature, time_feature],
es,
cutoff_time_in_index=True,
cutoff_time=cutoff_time,
)
feature_matrix.index.names = ["instance", "time"]
assert np.all(
feature_matrix.reset_index("time").reset_index()[["instance", "time"]].values
== feature_matrix[["trip_id", "flight_time"]].values,
)
feature_matrix_2 = calculate_feature_matrix(
flight_features + [property_feature, time_feature],
es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
approximate=Timedelta(2, "d"),
)
feature_matrix_2.index.names = ["instance", "time"]
assert np.all(
feature_matrix_2.reset_index("time").reset_index()[["instance", "time"]].values
== feature_matrix_2[["trip_id", "flight_time"]].values,
)
for column in feature_matrix:
for x, y in zip(feature_matrix[column], feature_matrix_2[column]):
assert (pd.isnull(x) and pd.isnull(y)) or (x == y)
# uses approximate, skip for dask/spark entitysets
def test_cfm_no_cutoff_time_index(pd_es):
agg_feat = Feature(
pd_es["log"].ww["id"],
parent_dataframe_name="sessions",
primitive=Count,
)
agg_feat4 = Feature(agg_feat, parent_dataframe_name="customers", primitive=Sum)
dfeat = DirectFeature(agg_feat4, "sessions")
cutoff_time = pd.DataFrame(
{
"time": [datetime(2013, 4, 9, 10, 31, 19), datetime(2013, 4, 9, 11, 0, 0)],
"instance_id": [0, 2],
},
)
feature_matrix = calculate_feature_matrix(
[dfeat, agg_feat],
pd_es,
cutoff_time_in_index=False,
approximate=Timedelta(12, "s"),
cutoff_time=cutoff_time,
)
assert feature_matrix.index.name == "id"
assert feature_matrix.index.tolist() == [0, 2]
assert feature_matrix[dfeat.get_name()].tolist() == [10, 10]
assert feature_matrix[agg_feat.get_name()].tolist() == [5, 1]
cutoff_time = pd.DataFrame(
{
"time": [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)],
"instance_id": [0, 2],
},
)
feature_matrix_2 = calculate_feature_matrix(
[dfeat, agg_feat],
pd_es,
cutoff_time_in_index=False,
approximate=Timedelta(10, "s"),
cutoff_time=cutoff_time,
)
assert feature_matrix_2.index.name == "id"
assert feature_matrix_2.index.tolist() == [0, 2]
assert feature_matrix_2[dfeat.get_name()].tolist() == [7, 10]
assert feature_matrix_2[agg_feat.get_name()].tolist() == [5, 1]
# TODO: fails with dask entitysets
# TODO: fails with spark entitysets
def test_cfm_duplicated_index_in_cutoff_time(es):
if es.dataframe_type != Library.PANDAS:
pytest.xfail("Distributed results not ordered, missing duplicates")
times = [
datetime(2011, 4, 1),
datetime(2011, 5, 1),
datetime(2011, 4, 1),
datetime(2011, 5, 1),
]
instances = [1, 1, 2, 2]
property_feature = Feature(es["log"].ww["value"]) > 10
cutoff_time = pd.DataFrame({"id": instances, "time": times}, index=[1, 1, 1, 1])
feature_matrix = calculate_feature_matrix(
[property_feature],
es,
cutoff_time=cutoff_time,
chunk_size=1,
)
assert feature_matrix.shape[0] == cutoff_time.shape[0]
# TODO: fails with Dask, Spark
def test_saveprogress(es, tmp_path):
if es.dataframe_type != Library.PANDAS:
pytest.xfail("saveprogress fails with distributed entitysets")
times = list(
[datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)]
+ [datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)]
+ [datetime(2011, 4, 9, 10, 40, 0)]
+ [datetime(2011, 4, 10, 10, 40, i) for i in range(2)]
+ [datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)]
+ [datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)],
)
cutoff_time = pd.DataFrame({"time": times, "instance_id": range(17)})
property_feature = Feature(es["log"].ww["value"]) > 10
save_progress = str(tmp_path)
fm_save = calculate_feature_matrix(
[property_feature],
es,
cutoff_time=cutoff_time,
save_progress=save_progress,
)
_, _, files = next(os.walk(save_progress))
files = [os.path.join(save_progress, file) for file in files]
# there are 17 datetime files created above
assert len(files) == 17
list_df = []
for file_ in files:
df = pd.read_csv(file_, index_col="id", header=0)
list_df.append(df)
merged_df = pd.concat(list_df)
merged_df.set_index(pd.DatetimeIndex(times), inplace=True, append=True)
fm_no_save = calculate_feature_matrix(
[property_feature],
es,
cutoff_time=cutoff_time,
)
assert np.all((merged_df.sort_index().values) == (fm_save.sort_index().values))
assert np.all((fm_no_save.sort_index().values) == (fm_save.sort_index().values))
assert np.all((fm_no_save.sort_index().values) == (merged_df.sort_index().values))
shutil.rmtree(save_progress)
def test_cutoff_time_correctly(es):
property_feature = Feature(
es["log"].ww["id"],
parent_dataframe_name="customers",
primitive=Count,
)
times = [datetime(2011, 4, 10), datetime(2011, 4, 11), datetime(2011, 4, 7)]
cutoff_time = pd.DataFrame({"time": times, "instance_id": [0, 1, 2]})
feature_matrix = calculate_feature_matrix(
[property_feature],
es,
cutoff_time=cutoff_time,
)
feature_matrix = to_pandas(feature_matrix, index="id", sort_index=True)
labels = [10, 5, 0]
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
def test_cutoff_time_binning():
cutoff_time = pd.DataFrame(
{
"time": [
datetime(2011, 4, 9, 12, 31),
datetime(2011, 4, 10, 11),
datetime(2011, 4, 10, 13, 10, 1),
],
"instance_id": [1, 2, 3],
},
)
cutoff_time.ww.init()
binned_cutoff_times = bin_cutoff_times(cutoff_time, Timedelta(4, "h"))
labels = [
datetime(2011, 4, 9, 12),
datetime(2011, 4, 10, 8),
datetime(2011, 4, 10, 12),
]
for i in binned_cutoff_times.index:
assert binned_cutoff_times["time"][i] == labels[i]
binned_cutoff_times = bin_cutoff_times(cutoff_time, Timedelta(25, "h"))
labels = [
datetime(2011, 4, 8, 22),
datetime(2011, 4, 9, 23),
datetime(2011, 4, 9, 23),
]
for i in binned_cutoff_times.index:
assert binned_cutoff_times["time"][i] == labels[i]
error_text = "Unit is relative"
with pytest.raises(ValueError, match=error_text):
binned_cutoff_times = bin_cutoff_times(cutoff_time, Timedelta(1, "mo"))
def test_training_window_fails_dask(dask_es):
property_feature = Feature(
dask_es["log"].ww["id"],
parent_dataframe_name="customers",
primitive=Count,
)
error_text = "Using training_window is not supported with Dask dataframes"
with pytest.raises(ValueError, match=error_text):
calculate_feature_matrix([property_feature], dask_es, training_window="2 hours")
def test_cutoff_time_columns_order(es):
property_feature = Feature(
es["log"].ww["id"],
parent_dataframe_name="customers",
primitive=Count,
)
times = [datetime(2011, 4, 10), datetime(2011, 4, 11), datetime(2011, 4, 7)]
id_col_names = ["instance_id", es["customers"].ww.index]
time_col_names = ["time", es["customers"].ww.time_index]
for id_col in id_col_names:
for time_col in time_col_names:
cutoff_time = pd.DataFrame(
{
"dummy_col_1": [1, 2, 3],
id_col: [0, 1, 2],
"dummy_col_2": [True, False, False],
time_col: times,
},
)
feature_matrix = calculate_feature_matrix(
[property_feature],
es,
cutoff_time=cutoff_time,
)
labels = [10, 5, 0]
feature_matrix = to_pandas(feature_matrix, index="id", sort_index=True)
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
def test_cutoff_time_df_redundant_column_names(es):
property_feature = Feature(
es["log"].ww["id"],
parent_dataframe_name="customers",
primitive=Count,
)
times = [datetime(2011, 4, 10), datetime(2011, 4, 11), datetime(2011, 4, 7)]
cutoff_time = pd.DataFrame(
{
es["customers"].ww.index: [0, 1, 2],
"instance_id": [0, 1, 2],
"dummy_col": [True, False, False],
"time": times,
},
)
err_msg = (
'Cutoff time DataFrame cannot contain both a column named "instance_id" and a column'
" with the same name as the target dataframe index"
)
with pytest.raises(AttributeError, match=err_msg):
calculate_feature_matrix([property_feature], es, cutoff_time=cutoff_time)
cutoff_time = pd.DataFrame(
{
es["customers"].ww.time_index: [0, 1, 2],
"instance_id": [0, 1, 2],
"dummy_col": [True, False, False],
"time": times,
},
)
err_msg = (
'Cutoff time DataFrame cannot contain both a column named "time" and a column'
" with the same name as the target dataframe time index"
)
with pytest.raises(AttributeError, match=err_msg):
calculate_feature_matrix([property_feature], es, cutoff_time=cutoff_time)
def test_training_window(pd_es):
property_feature = Feature(
pd_es["log"].ww["id"],
parent_dataframe_name="customers",
primitive=Count,
)
top_level_agg = Feature(
pd_es["customers"].ww["id"],
parent_dataframe_name="régions",
primitive=Count,
)
# make sure features that have a direct to a higher level agg
# so we have multiple "filter eids" in get_pandas_data_slice,
# and we go through the loop to pull data with a training_window param more than once
dagg = DirectFeature(top_level_agg, "customers")
# for now, warns if last_time_index not present
times = [
datetime(2011, 4, 9, 12, 31),
datetime(2011, 4, 10, 11),
datetime(2011, 4, 10, 13, 10),
]
cutoff_time = pd.DataFrame({"time": times, "instance_id": [0, 1, 2]})
warn_text = (
"Using training_window but last_time_index is not set for dataframe customers"
)
with pytest.warns(UserWarning, match=warn_text):
feature_matrix = calculate_feature_matrix(
[property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window="2 hours",
)
pd_es.add_last_time_indexes()
error_text = "Training window cannot be in observations"
with pytest.raises(AssertionError, match=error_text):
feature_matrix = calculate_feature_matrix(
[property_feature],
pd_es,
cutoff_time=cutoff_time,
training_window=Timedelta(2, "observations"),
)
# Case1. include_cutoff_time = True
feature_matrix = calculate_feature_matrix(
[property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window="2 hours",
include_cutoff_time=True,
)
prop_values = [4, 5, 1]
dagg_values = [3, 2, 1]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# Case2. include_cutoff_time = False
feature_matrix = calculate_feature_matrix(
[property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window="2 hours",
include_cutoff_time=False,
)
prop_values = [5, 5, 2]
dagg_values = [3, 2, 1]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# Case3. include_cutoff_time = False with single cutoff time value
feature_matrix = calculate_feature_matrix(
[property_feature, dagg],
pd_es,
cutoff_time=pd.to_datetime("2011-04-09 10:40:00"),
training_window="9 minutes",
include_cutoff_time=False,
)
prop_values = [0, 4, 0]
dagg_values = [3, 3, 3]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# Case4. include_cutoff_time = True with single cutoff time value
feature_matrix = calculate_feature_matrix(
[property_feature, dagg],
pd_es,
cutoff_time=pd.to_datetime("2011-04-10 10:40:00"),
training_window="2 days",
include_cutoff_time=True,
)
prop_values = [0, 10, 1]
dagg_values = [3, 3, 3]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
def test_training_window_overlap(pd_es):
pd_es.add_last_time_indexes()
count_log = Feature(
Feature(pd_es["log"].ww["id"]),
parent_dataframe_name="customers",
primitive=Count,
)
cutoff_time = pd.DataFrame(
{
"id": [0, 0],
"time": ["2011-04-09 10:30:00", "2011-04-09 10:40:00"],
},
).astype({"time": "datetime64[ns]"})
# Case1. include_cutoff_time = True
actual = calculate_feature_matrix(
features=[count_log],
entityset=pd_es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
training_window="10 minutes",
include_cutoff_time=True,
)
actual = actual["COUNT(log)"]
np.testing.assert_array_equal(actual.values, [1, 9])
# Case2. include_cutoff_time = False
actual = calculate_feature_matrix(
features=[count_log],
entityset=pd_es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
training_window="10 minutes",
include_cutoff_time=False,
)
actual = actual["COUNT(log)"]
np.testing.assert_array_equal(actual.values, [0, 9])
def test_include_cutoff_time_without_training_window(es):
es.add_last_time_indexes()
count_log = Feature(
base=Feature(es["log"].ww["id"]),
parent_dataframe_name="customers",
primitive=Count,
)
cutoff_time = pd.DataFrame(
{
"id": [0, 0],
"time": ["2011-04-09 10:30:00", "2011-04-09 10:31:00"],
},
).astype({"time": "datetime64[ns]"})
# Case1. include_cutoff_time = True
actual = calculate_feature_matrix(
features=[count_log],
entityset=es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
include_cutoff_time=True,
)
actual = to_pandas(actual)["COUNT(log)"]
np.testing.assert_array_equal(actual.values, [1, 6])
# Case2. include_cutoff_time = False
actual = calculate_feature_matrix(
features=[count_log],
entityset=es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
include_cutoff_time=False,
)
actual = to_pandas(actual)["COUNT(log)"]
np.testing.assert_array_equal(actual.values, [0, 5])
# Case3. include_cutoff_time = True with single cutoff time value
actual = calculate_feature_matrix(
features=[count_log],
entityset=es,
cutoff_time=pd.to_datetime("2011-04-09 10:31:00"),
instance_ids=[0],
cutoff_time_in_index=True,
include_cutoff_time=True,
)
actual = to_pandas(actual)["COUNT(log)"]
np.testing.assert_array_equal(actual.values, [6])
# Case4. include_cutoff_time = False with single cutoff time value
actual = calculate_feature_matrix(
features=[count_log],
entityset=es,
cutoff_time=pd.to_datetime("2011-04-09 10:31:00"),
instance_ids=[0],
cutoff_time_in_index=True,
include_cutoff_time=False,
)
actual = to_pandas(actual)["COUNT(log)"]
np.testing.assert_array_equal(actual.values, [5])
def test_approximate_dfeat_of_agg_on_target_include_cutoff_time(pd_es):
agg_feat = Feature(
pd_es["log"].ww["id"],
parent_dataframe_name="sessions",
primitive=Count,
)
agg_feat2 = Feature(agg_feat, parent_dataframe_name="customers", primitive=Sum)
dfeat = DirectFeature(agg_feat2, "sessions")
cutoff_time = pd.DataFrame(
{"time": [datetime(2011, 4, 9, 10, 31, 19)], "instance_id": [0]},
)
feature_matrix = calculate_feature_matrix(
[dfeat, agg_feat2, agg_feat],
pd_es,
approximate=Timedelta(20, "s"),
cutoff_time=cutoff_time,
include_cutoff_time=False,
)
# binned cutoff_time will be datetime(2011, 4, 9, 10, 31, 0) and
# log event 5 at datetime(2011, 4, 9, 10, 31, 0) will be
# excluded due to approximate cutoff time point
assert feature_matrix[dfeat.get_name()].tolist() == [5]
assert feature_matrix[agg_feat.get_name()].tolist() == [5]
feature_matrix = calculate_feature_matrix(
[dfeat, agg_feat],
pd_es,
approximate=Timedelta(20, "s"),
cutoff_time=cutoff_time,
include_cutoff_time=True,
)
# binned cutoff_time will be datetime(2011, 4, 9, 10, 31, 0) and
# log event 5 at datetime(2011, 4, 9, 10, 31, 0) will be
# included due to approximate cutoff time point
assert feature_matrix[dfeat.get_name()].tolist() == [6]
assert feature_matrix[agg_feat.get_name()].tolist() == [5]
def test_training_window_recent_time_index(pd_es):
# customer with no sessions
row = {
"id": [3],
"age": [73],
"région_id": ["United States"],
"cohort": [1],
"cancel_reason": ["Lost interest"],
"loves_ice_cream": [True],
"favorite_quote": ["Don't look back. Something might be gaining on you."],
"signup_date": [datetime(2011, 4, 10)],
"upgrade_date": [datetime(2011, 4, 12)],
"cancel_date": [datetime(2011, 5, 13)],
"birthday": [datetime(1938, 2, 1)],
"engagement_level": [2],
}
to_add_df = pd.DataFrame(row)
to_add_df.index = range(3, 4)
# have to convert category to int in order to concat
old_df = pd_es["customers"]
old_df.index = old_df.index.astype("int")
old_df["id"] = old_df["id"].astype(int)
df = pd.concat([old_df, to_add_df], sort=True)
# convert back after
df.index = df.index.astype("category")
df["id"] = df["id"].astype("category")
pd_es.replace_dataframe(
dataframe_name="customers",
df=df,
recalculate_last_time_indexes=False,
)
pd_es.add_last_time_indexes()
property_feature = Feature(
pd_es["log"].ww["id"],
parent_dataframe_name="customers",
primitive=Count,
)
top_level_agg = Feature(
pd_es["customers"].ww["id"],
parent_dataframe_name="régions",
primitive=Count,
)
dagg = DirectFeature(top_level_agg, "customers")
instance_ids = [0, 1, 2, 3]
times = [
datetime(2011, 4, 9, 12, 31),
datetime(2011, 4, 10, 11),
datetime(2011, 4, 10, 13, 10, 1),
datetime(2011, 4, 10, 1, 59, 59),
]
cutoff_time = pd.DataFrame({"time": times, "instance_id": instance_ids})
# Case1. include_cutoff_time = True
feature_matrix = calculate_feature_matrix(
[property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window="2 hours",
include_cutoff_time=True,
)
prop_values = [4, 5, 1, 0]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
dagg_values = [3, 2, 1, 3]
feature_matrix.sort_index(inplace=True)
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# Case2. include_cutoff_time = False
feature_matrix = calculate_feature_matrix(
[property_feature, dagg],
pd_es,
cutoff_time=cutoff_time,
training_window="2 hours",
include_cutoff_time=False,
)
prop_values = [5, 5, 1, 0]
assert (feature_matrix[property_feature.get_name()] == prop_values).values.all()
dagg_values = [3, 2, 1, 3]
feature_matrix.sort_index(inplace=True)
assert (feature_matrix[dagg.get_name()] == dagg_values).values.all()
# TODO: add test to fail w/ spark
def test_approximate_fails_dask(dask_es):
agg_feat = Feature(
dask_es["log"].ww["id"],
parent_dataframe_name="sessions",
primitive=Count,
)
error_text = "Using approximate is not supported with Dask dataframes"
with pytest.raises(ValueError, match=error_text):
calculate_feature_matrix([agg_feat], dask_es, approximate=Timedelta(1, "week"))
def test_approximate_multiple_instances_per_cutoff_time(pd_es):
agg_feat = Feature(
pd_es["log"].ww["id"],
parent_dataframe_name="sessions",
primitive=Count,
)
agg_feat2 = Feature(agg_feat, parent_dataframe_name="customers", primitive=Sum)
dfeat = DirectFeature(agg_feat2, "sessions")
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({"time": times, "instance_id": [0, 2]})
feature_matrix = calculate_feature_matrix(
[dfeat, agg_feat],
pd_es,
approximate=Timedelta(1, "week"),
cutoff_time=cutoff_time,
)
assert feature_matrix.shape[0] == 2
assert feature_matrix[agg_feat.get_name()].tolist() == [5, 1]
def test_approximate_with_multiple_paths(pd_diamond_es):
pd_es = pd_diamond_es
path = backward_path(pd_es, ["regions", "customers", "transactions"])
agg_feat = AggregationFeature(
Feature(pd_es["transactions"].ww["id"]),
parent_dataframe_name="regions",
relationship_path=path,
primitive=Count,
)
dfeat = DirectFeature(agg_feat, "customers")
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({"time": times, "instance_id": [0, 2]})
feature_matrix = calculate_feature_matrix(
[dfeat],
pd_es,
approximate=Timedelta(1, "week"),
cutoff_time=cutoff_time,
)
assert feature_matrix[dfeat.get_name()].tolist() == [6, 2]
def test_approximate_dfeat_of_agg_on_target(pd_es):
agg_feat = Feature(
pd_es["log"].ww["id"],
parent_dataframe_name="sessions",
primitive=Count,
)
agg_feat2 = Feature(agg_feat, parent_dataframe_name="customers", primitive=Sum)
dfeat = DirectFeature(agg_feat2, "sessions")
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({"time": times, "instance_id": [0, 2]})
feature_matrix = calculate_feature_matrix(
[dfeat, agg_feat],
pd_es,
instance_ids=[0, 2],
approximate=Timedelta(10, "s"),
cutoff_time=cutoff_time,
)
assert feature_matrix[dfeat.get_name()].tolist() == [7, 10]
assert feature_matrix[agg_feat.get_name()].tolist() == [5, 1]
def test_approximate_dfeat_of_need_all_values(pd_es):
p = Feature(pd_es["log"].ww["value"], primitive=Percentile)
agg_feat = Feature(p, parent_dataframe_name="sessions", primitive=Sum)
agg_feat2 = Feature(agg_feat, parent_dataframe_name="customers", primitive=Sum)
dfeat = DirectFeature(agg_feat2, "sessions")
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({"time": times, "instance_id": [0, 2]})
feature_matrix = calculate_feature_matrix(
[dfeat, agg_feat],
pd_es,
approximate=Timedelta(10, "s"),
cutoff_time_in_index=True,
cutoff_time=cutoff_time,
)
log_df = pd_es["log"]
instances = [0, 2]
cutoffs = [pd.Timestamp("2011-04-09 10:31:19"), pd.Timestamp("2011-04-09 11:00:00")]
approxes = [
pd.Timestamp("2011-04-09 10:31:10"),
pd.Timestamp("2011-04-09 11:00:00"),
]
true_vals = []
true_vals_approx = []
for instance, cutoff, approx in zip(instances, cutoffs, approxes):
log_data_cutoff = log_df[log_df["datetime"] < cutoff]
log_data_cutoff["percentile"] = log_data_cutoff["value"].rank(pct=True)
true_agg = (
log_data_cutoff.loc[log_data_cutoff["session_id"] == instance, "percentile"]
.fillna(0)
.sum()
)
true_vals.append(round(true_agg, 3))
log_data_approx = log_df[log_df["datetime"] < approx]
log_data_approx["percentile"] = log_data_approx["value"].rank(pct=True)
true_agg_approx = (
log_data_approx.loc[
log_data_approx["session_id"].isin([0, 1, 2]),
"percentile",
]
.fillna(0)
.sum()
)
true_vals_approx.append(round(true_agg_approx, 3))
lapprox = [round(x, 3) for x in feature_matrix[dfeat.get_name()].tolist()]
test_list = [round(x, 3) for x in feature_matrix[agg_feat.get_name()].tolist()]
assert lapprox == true_vals_approx
assert test_list == true_vals
def test_uses_full_dataframe_feat_of_approximate(pd_es):
agg_feat = Feature(
pd_es["log"].ww["value"],
parent_dataframe_name="sessions",
primitive=Sum,
)
agg_feat2 = Feature(agg_feat, parent_dataframe_name="customers", primitive=Sum)
agg_feat3 = Feature(agg_feat, parent_dataframe_name="customers", primitive=Max)
dfeat = DirectFeature(agg_feat2, "sessions")
dfeat2 = DirectFeature(agg_feat3, "sessions")
p = Feature(dfeat, primitive=Percentile)
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({"time": times, "instance_id": [0, 2]})
# only dfeat2 should be approximated
# because Percentile needs all values
feature_matrix_only_dfeat2 = calculate_feature_matrix(
[dfeat2],
pd_es,
approximate=Timedelta(10, "s"),
cutoff_time_in_index=True,
cutoff_time=cutoff_time,
)
assert feature_matrix_only_dfeat2[dfeat2.get_name()].tolist() == [50, 50]
feature_matrix_approx = calculate_feature_matrix(
[p, dfeat, dfeat2, agg_feat],
pd_es,
approximate=Timedelta(10, "s"),
cutoff_time_in_index=True,
cutoff_time=cutoff_time,
)
assert (
feature_matrix_only_dfeat2[dfeat2.get_name()].tolist()
== feature_matrix_approx[dfeat2.get_name()].tolist()
)
feature_matrix_small_approx = calculate_feature_matrix(
[p, dfeat, dfeat2, agg_feat],
pd_es,
approximate=Timedelta(10, "ms"),
cutoff_time_in_index=True,
cutoff_time=cutoff_time,
)
feature_matrix_no_approx = calculate_feature_matrix(
[p, dfeat, dfeat2, agg_feat],
pd_es,
cutoff_time_in_index=True,
cutoff_time=cutoff_time,
)
for f in [p, dfeat, agg_feat]:
for fm1, fm2 in combinations(
[
feature_matrix_approx,
feature_matrix_small_approx,
feature_matrix_no_approx,
],
2,
):
assert fm1[f.get_name()].tolist() == fm2[f.get_name()].tolist()
def test_approximate_dfeat_of_dfeat_of_agg_on_target(pd_es):
agg_feat = Feature(
pd_es["log"].ww["id"],
parent_dataframe_name="sessions",
primitive=Count,
)
agg_feat2 = Feature(agg_feat, parent_dataframe_name="customers", primitive=Sum)
dfeat = DirectFeature(Feature(agg_feat2, "sessions"), "log")
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({"time": times, "instance_id": [0, 2]})
feature_matrix = calculate_feature_matrix(
[dfeat],
pd_es,
approximate=Timedelta(10, "s"),
cutoff_time=cutoff_time,
)
assert feature_matrix[dfeat.get_name()].tolist() == [7, 10]
def test_empty_path_approximate_full(pd_es):
pd_es["sessions"].ww["customer_id"] = pd.Series(
[np.nan, np.nan, np.nan, 1, 1, 2],
dtype="category",
)
# Need to reassign the `foreign_key` tag as the column reassignment above removes it
pd_es["sessions"].ww.set_types(semantic_tags={"customer_id": "foreign_key"})
agg_feat = Feature(
pd_es["log"].ww["id"],
parent_dataframe_name="sessions",
primitive=Count,
)
agg_feat2 = Feature(agg_feat, parent_dataframe_name="customers", primitive=Sum)
dfeat = DirectFeature(agg_feat2, "sessions")
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({"time": times, "instance_id": [0, 2]})
feature_matrix = calculate_feature_matrix(
[dfeat, agg_feat],
pd_es,
approximate=Timedelta(10, "s"),
cutoff_time=cutoff_time,
)
vals1 = feature_matrix[dfeat.get_name()].tolist()
assert vals1[0] == 0
assert vals1[1] == 0
assert feature_matrix[agg_feat.get_name()].tolist() == [5, 1]
def test_approx_base_feature_is_also_first_class_feature(pd_es):
log_to_products = DirectFeature(Feature(pd_es["products"].ww["rating"]), "log")
# This should still be computed properly
agg_feat = Feature(log_to_products, parent_dataframe_name="sessions", primitive=Min)
customer_agg_feat = Feature(
agg_feat,
parent_dataframe_name="customers",
primitive=Sum,
)
# This is to be approximated
sess_to_cust = DirectFeature(customer_agg_feat, "sessions")
times = [datetime(2011, 4, 9, 10, 31, 19), datetime(2011, 4, 9, 11, 0, 0)]
cutoff_time = pd.DataFrame({"time": times, "instance_id": [0, 2]})
feature_matrix = calculate_feature_matrix(
[sess_to_cust, agg_feat],
pd_es,
approximate=Timedelta(10, "s"),
cutoff_time=cutoff_time,
)
vals1 = feature_matrix[sess_to_cust.get_name()].tolist()
assert vals1 == [8.5, 7]
vals2 = feature_matrix[agg_feat.get_name()].tolist()
assert vals2 == [4, 1.5]
def test_approximate_time_split_returns_the_same_result(pd_es):
agg_feat = Feature(
pd_es["log"].ww["id"],
parent_dataframe_name="sessions",
primitive=Count,
)
agg_feat2 = Feature(agg_feat, parent_dataframe_name="customers", primitive=Sum)
dfeat = DirectFeature(agg_feat2, "sessions")
cutoff_df = pd.DataFrame(
{
"time": [
pd.Timestamp("2011-04-09 10:07:30"),
pd.Timestamp("2011-04-09 10:07:40"),
],
"instance_id": [0, 0],
},
)
feature_matrix_at_once = calculate_feature_matrix(
[dfeat, agg_feat],
pd_es,
approximate=Timedelta(10, "s"),
cutoff_time=cutoff_df,
)
divided_matrices = []
separate_cutoff = [cutoff_df.iloc[0:1], cutoff_df.iloc[1:]]
# Make sure indexes are different
# Note that this step is unnecessary and done to showcase the issue here
separate_cutoff[0].index = [0]
separate_cutoff[1].index = [1]
for ct in separate_cutoff:
fm = calculate_feature_matrix(
[dfeat, agg_feat],
pd_es,
approximate=Timedelta(10, "s"),
cutoff_time=ct,
)
divided_matrices.append(fm)
feature_matrix_from_split = pd.concat(divided_matrices)
assert feature_matrix_from_split.shape == feature_matrix_at_once.shape
for i1, i2 in zip(feature_matrix_at_once.index, feature_matrix_from_split.index):
assert (pd.isnull(i1) and pd.isnull(i2)) or (i1 == i2)
for c in feature_matrix_from_split:
for i1, i2 in zip(feature_matrix_at_once[c], feature_matrix_from_split[c]):
assert (pd.isnull(i1) and pd.isnull(i2)) or (i1 == i2)
def test_approximate_returns_correct_empty_default_values(pd_es):
agg_feat = Feature(
pd_es["log"].ww["id"],
parent_dataframe_name="customers",
primitive=Count,
)
dfeat = DirectFeature(agg_feat, "sessions")
cutoff_df = pd.DataFrame(
{
"time": [
pd.Timestamp("2011-04-08 11:00:00"),
pd.Timestamp("2011-04-09 11:00:00"),
],
"instance_id": [0, 0],
},
)
fm = calculate_feature_matrix(
[dfeat],
pd_es,
approximate=Timedelta(10, "s"),
cutoff_time=cutoff_df,
)
assert fm[dfeat.get_name()].tolist() == [0, 10]
def test_approximate_child_aggs_handled_correctly(pd_es):
agg_feat = Feature(
pd_es["customers"].ww["id"],
parent_dataframe_name="régions",
primitive=Count,
)
dfeat = DirectFeature(agg_feat, "customers")
agg_feat_2 = Feature(
pd_es["log"].ww["value"],
parent_dataframe_name="customers",
primitive=Sum,
)
cutoff_df = pd.DataFrame(
{
"time": [
pd.Timestamp("2011-04-08 10:30:00"),
pd.Timestamp("2011-04-09 10:30:06"),
],
"instance_id": [0, 0],
},
)
fm = calculate_feature_matrix(
[dfeat],
pd_es,
approximate=Timedelta(10, "s"),
cutoff_time=cutoff_df,
)
fm_2 = calculate_feature_matrix(
[dfeat, agg_feat_2],
pd_es,
approximate=Timedelta(10, "s"),
cutoff_time=cutoff_df,
)
assert fm[dfeat.get_name()].tolist() == [2, 3]
assert fm_2[agg_feat_2.get_name()].tolist() == [0, 5]
def test_cutoff_time_naming(es):
agg_feat = Feature(
es["customers"].ww["id"],
parent_dataframe_name="régions",
primitive=Count,
)
dfeat = DirectFeature(agg_feat, "customers")
cutoff_df = pd.DataFrame(
{
"time": [
pd.Timestamp("2011-04-08 10:30:00"),
pd.Timestamp("2011-04-09 10:30:06"),
],
"instance_id": [0, 0],
},
)
cutoff_df_index_name = cutoff_df.rename(columns={"instance_id": "id"})
cutoff_df_wrong_index_name = cutoff_df.rename(columns={"instance_id": "wrong_id"})
cutoff_df_wrong_time_name = cutoff_df.rename(columns={"time": "cutoff_time"})
fm1 = calculate_feature_matrix([dfeat], es, cutoff_time=cutoff_df)
fm1 = to_pandas(fm1, index="id", sort_index=True)
fm2 = calculate_feature_matrix([dfeat], es, cutoff_time=cutoff_df_index_name)
fm2 = to_pandas(fm2, index="id", sort_index=True)
assert all((fm1 == fm2.values).values)
error_text = (
"Cutoff time DataFrame must contain a column with either the same name"
' as the target dataframe index or a column named "instance_id"'
)
with pytest.raises(AttributeError, match=error_text):
calculate_feature_matrix([dfeat], es, cutoff_time=cutoff_df_wrong_index_name)
time_error_text = (
"Cutoff time DataFrame must contain a column with either the same name"
' as the target dataframe time_index or a column named "time"'
)
with pytest.raises(AttributeError, match=time_error_text):
calculate_feature_matrix([dfeat], es, cutoff_time=cutoff_df_wrong_time_name)
# TODO: order doesn't match, but output matches
def test_cutoff_time_extra_columns(es):
if es.dataframe_type != Library.PANDAS:
pytest.xfail("Distributed result not ordered")
agg_feat = Feature(
es["customers"].ww["id"],
parent_dataframe_name="régions",
primitive=Count,
)
dfeat = DirectFeature(agg_feat, "customers")
cutoff_df = pd.DataFrame(
{
"time": [
pd.Timestamp("2011-04-09 10:30:06"),
pd.Timestamp("2011-04-09 10:30:03"),
pd.Timestamp("2011-04-08 10:30:00"),
],
"instance_id": [0, 1, 0],
"label": [True, True, False],
},
columns=["time", "instance_id", "label"],
)
fm = calculate_feature_matrix([dfeat], es, cutoff_time=cutoff_df)
# check column was added to end of matrix
assert "label" == fm.columns[-1]
assert (fm["label"].values == cutoff_df["label"].values).all()
def test_cutoff_time_extra_columns_approximate(pd_es):
agg_feat = Feature(
pd_es["customers"].ww["id"],
parent_dataframe_name="régions",
primitive=Count,
)
dfeat = DirectFeature(agg_feat, "customers")
cutoff_df = pd.DataFrame(
{
"time": [
pd.Timestamp("2011-04-09 10:30:06"),
pd.Timestamp("2011-04-09 10:30:03"),
pd.Timestamp("2011-04-08 10:30:00"),
],
"instance_id": [0, 1, 0],
"label": [True, True, False],
},
columns=["time", "instance_id", "label"],
)
fm = calculate_feature_matrix(
[dfeat],
pd_es,
cutoff_time=cutoff_df,
approximate="2 days",
)
# check column was added to end of matrix
assert "label" in fm.columns
assert (fm["label"].values == cutoff_df["label"].values).all()
def test_cutoff_time_extra_columns_same_name(es):
if es.dataframe_type != Library.PANDAS:
pytest.xfail("Distributed result not ordered")
agg_feat = Feature(
es["customers"].ww["id"],
parent_dataframe_name="régions",
primitive=Count,
)
dfeat = DirectFeature(agg_feat, "customers")
cutoff_df = pd.DataFrame(
{
"time": [
pd.Timestamp("2011-04-09 10:30:06"),
pd.Timestamp("2011-04-09 10:30:03"),
pd.Timestamp("2011-04-08 10:30:00"),
],
"instance_id": [0, 1, 0],
"régions.COUNT(customers)": [False, False, True],
},
columns=["time", "instance_id", "régions.COUNT(customers)"],
)
fm = calculate_feature_matrix([dfeat], es, cutoff_time=cutoff_df)
assert (
fm["régions.COUNT(customers)"].values
== cutoff_df["régions.COUNT(customers)"].values
).all()
def test_cutoff_time_extra_columns_same_name_approximate(pd_es):
agg_feat = Feature(
pd_es["customers"].ww["id"],
parent_dataframe_name="régions",
primitive=Count,
)
dfeat = DirectFeature(agg_feat, "customers")
cutoff_df = pd.DataFrame(
{
"time": [
pd.Timestamp("2011-04-09 10:30:06"),
pd.Timestamp("2011-04-09 10:30:03"),
pd.Timestamp("2011-04-08 10:30:00"),
],
"instance_id": [0, 1, 0],
"régions.COUNT(customers)": [False, False, True],
},
columns=["time", "instance_id", "régions.COUNT(customers)"],
)
fm = calculate_feature_matrix(
[dfeat],
pd_es,
cutoff_time=cutoff_df,
approximate="2 days",
)
assert (
fm["régions.COUNT(customers)"].values
== cutoff_df["régions.COUNT(customers)"].values
).all()
def test_instances_after_cutoff_time_removed(es):
property_feature = Feature(
es["log"].ww["id"],
parent_dataframe_name="customers",
primitive=Count,
)
cutoff_time = datetime(2011, 4, 8)
fm = calculate_feature_matrix(
[property_feature],
es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
)
fm = to_pandas(fm, index="id", sort_index=True)
actual_ids = (
[id for (id, _) in fm.index]
if isinstance(fm.index, pd.MultiIndex)
else fm.index
)
# Customer with id 1 should be removed
assert set(actual_ids) == set([2, 0])
# TODO: Dask and Spark do not keep instance_id after cutoff
def test_instances_with_id_kept_after_cutoff(es):
if es.dataframe_type != Library.PANDAS:
pytest.xfail("Distributed result not ordered, missing extra instances")
property_feature = Feature(
es["log"].ww["id"],
parent_dataframe_name="customers",
primitive=Count,
)
cutoff_time = datetime(2011, 4, 8)
fm = calculate_feature_matrix(
[property_feature],
es,
instance_ids=[0, 1, 2],
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
)
# Customer #1 is after cutoff, but since it is included in instance_ids it
# should be kept.
actual_ids = (
[id for (id, _) in fm.index]
if isinstance(fm.index, pd.MultiIndex)
else fm.index
)
assert set(actual_ids) == set([0, 1, 2])
# TODO: Fails with Dask
# TODO: Fails with Spark
def test_cfm_returns_original_time_indexes(es):
if es.dataframe_type != Library.PANDAS:
pytest.xfail(
"Distributed result not ordered, indexes are lost due to not multiindexing",
)
agg_feat = Feature(
es["customers"].ww["id"],
parent_dataframe_name="régions",
primitive=Count,
)
dfeat = DirectFeature(agg_feat, "customers")
cutoff_df = pd.DataFrame(
{
"time": [
pd.Timestamp("2011-04-09 10:30:06"),
pd.Timestamp("2011-04-09 10:30:03"),
pd.Timestamp("2011-04-08 10:30:00"),
],
"instance_id": [0, 1, 0],
},
)
fm = calculate_feature_matrix(
[dfeat],
es,
cutoff_time=cutoff_df,
cutoff_time_in_index=True,
)
instance_level_vals = fm.index.get_level_values(0).values
time_level_vals = fm.index.get_level_values(1).values
assert (instance_level_vals == cutoff_df["instance_id"].values).all()
assert (time_level_vals == cutoff_df["time"].values).all()
def test_cfm_returns_original_time_indexes_approximate(pd_es):
agg_feat = Feature(
pd_es["customers"].ww["id"],
parent_dataframe_name="régions",
primitive=Count,
)
dfeat = DirectFeature(agg_feat, "customers")
agg_feat_2 = Feature(
pd_es["sessions"].ww["id"],
parent_dataframe_name="customers",
primitive=Count,
)
cutoff_df = pd.DataFrame(
{
"time": [
pd.Timestamp("2011-04-09 10:30:06"),
pd.Timestamp("2011-04-09 10:30:03"),
pd.Timestamp("2011-04-08 10:30:00"),
],
"instance_id": [0, 1, 0],
},
)
# approximate, in different windows, no unapproximated aggs
fm = calculate_feature_matrix(
[dfeat],
pd_es,
cutoff_time=cutoff_df,
cutoff_time_in_index=True,
approximate="1 m",
)
instance_level_vals = fm.index.get_level_values(0).values
time_level_vals = fm.index.get_level_values(1).values
assert (instance_level_vals == cutoff_df["instance_id"].values).all()
assert (time_level_vals == cutoff_df["time"].values).all()
# approximate, in different windows, unapproximated aggs
fm = calculate_feature_matrix(
[dfeat, agg_feat_2],
pd_es,
cutoff_time=cutoff_df,
cutoff_time_in_index=True,
approximate="1 m",
)
instance_level_vals = fm.index.get_level_values(0).values
time_level_vals = fm.index.get_level_values(1).values
assert (instance_level_vals == cutoff_df["instance_id"].values).all()
assert (time_level_vals == cutoff_df["time"].values).all()
# approximate, in same window, no unapproximated aggs
fm2 = calculate_feature_matrix(
[dfeat],
pd_es,
cutoff_time=cutoff_df,
cutoff_time_in_index=True,
approximate="2 d",
)
instance_level_vals = fm2.index.get_level_values(0).values
time_level_vals = fm2.index.get_level_values(1).values
assert (instance_level_vals == cutoff_df["instance_id"].values).all()
assert (time_level_vals == cutoff_df["time"].values).all()
# approximate, in same window, unapproximated aggs
fm3 = calculate_feature_matrix(
[dfeat, agg_feat_2],
pd_es,
cutoff_time=cutoff_df,
cutoff_time_in_index=True,
approximate="2 d",
)
instance_level_vals = fm3.index.get_level_values(0).values
time_level_vals = fm3.index.get_level_values(1).values
assert (instance_level_vals == cutoff_df["instance_id"].values).all()
assert (time_level_vals == cutoff_df["time"].values).all()
def test_dask_kwargs(pd_es, dask_cluster):
times = (
[datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)]
+ [datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)]
+ [datetime(2011, 4, 9, 10, 40, 0)]
+ [datetime(2011, 4, 10, 10, 40, i) for i in range(2)]
+ [datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)]
+ [datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)]
)
labels = [False] * 3 + [True] * 2 + [False] * 9 + [True] + [False] * 2
cutoff_time = pd.DataFrame({"time": times, "instance_id": range(17)})
property_feature = IdentityFeature(pd_es["log"].ww["value"]) > 10
dkwargs = {"cluster": dask_cluster.scheduler.address}
feature_matrix = calculate_feature_matrix(
[property_feature],
entityset=pd_es,
cutoff_time=cutoff_time,
verbose=True,
chunk_size=0.13,
dask_kwargs=dkwargs,
approximate="1 hour",
)
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
def test_dask_persisted_es(pd_es, capsys, dask_cluster):
times = (
[datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)]
+ [datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)]
+ [datetime(2011, 4, 9, 10, 40, 0)]
+ [datetime(2011, 4, 10, 10, 40, i) for i in range(2)]
+ [datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)]
+ [datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)]
)
labels = [False] * 3 + [True] * 2 + [False] * 9 + [True] + [False] * 2
cutoff_time = pd.DataFrame({"time": times, "instance_id": range(17)})
property_feature = IdentityFeature(pd_es["log"].ww["value"]) > 10
dkwargs = {"cluster": dask_cluster.scheduler.address}
feature_matrix = calculate_feature_matrix(
[property_feature],
entityset=pd_es,
cutoff_time=cutoff_time,
verbose=True,
chunk_size=0.13,
dask_kwargs=dkwargs,
approximate="1 hour",
)
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
feature_matrix = calculate_feature_matrix(
[property_feature],
entityset=pd_es,
cutoff_time=cutoff_time,
verbose=True,
chunk_size=0.13,
dask_kwargs=dkwargs,
approximate="1 hour",
)
captured = capsys.readouterr()
assert "Using EntitySet persisted on the cluster as dataset " in captured[0]
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
class TestCreateClientAndCluster(object):
def test_user_cluster_as_string(self, monkeypatch):
monkeypatch.setattr(utils, "get_client_cluster", get_mock_client_cluster)
# cluster in dask_kwargs case
client, cluster = create_client_and_cluster(
n_jobs=2,
dask_kwargs={"cluster": "tcp://127.0.0.1:54321"},
entityset_size=1,
)
assert cluster == "tcp://127.0.0.1:54321"
def test_cluster_creation(self, monkeypatch):
total_memory = psutil.virtual_memory().total
monkeypatch.setattr(utils, "get_client_cluster", get_mock_client_cluster)
try:
cpus = len(psutil.Process().cpu_affinity())
except AttributeError: # pragma: no cover
cpus = psutil.cpu_count()
# jobs < tasks case
client, cluster = create_client_and_cluster(
n_jobs=2,
dask_kwargs={},
entityset_size=1,
)
num_workers = min(cpus, 2)
memory_limit = int(total_memory / float(num_workers))
assert cluster == (min(cpus, 2), 1, None, memory_limit)
# jobs > tasks case
match = r".*workers requested, but only .* workers created"
with pytest.warns(UserWarning, match=match) as record:
client, cluster = create_client_and_cluster(
n_jobs=1000,
dask_kwargs={"diagnostics_port": 8789},
entityset_size=1,
)
assert len(record) == 1
num_workers = cpus
memory_limit = int(total_memory / float(num_workers))
assert cluster == (num_workers, 1, 8789, memory_limit)
# dask_kwargs sets memory limit
client, cluster = create_client_and_cluster(
n_jobs=2,
dask_kwargs={"diagnostics_port": 8789, "memory_limit": 1000},
entityset_size=1,
)
num_workers = min(cpus, 2)
assert cluster == (num_workers, 1, 8789, 1000)
def test_not_enough_memory(self, monkeypatch):
total_memory = psutil.virtual_memory().total
monkeypatch.setattr(utils, "get_client_cluster", get_mock_client_cluster)
# errors if not enough memory for each worker to store the entityset
with pytest.raises(ValueError, match=""):
create_client_and_cluster(
n_jobs=1,
dask_kwargs={},
entityset_size=total_memory * 2,
)
# does not error even if worker memory is less than 2x entityset size
create_client_and_cluster(
n_jobs=1,
dask_kwargs={},
entityset_size=total_memory * 0.75,
)
@pytest.mark.skipif("not dd")
def test_parallel_failure_raises_correct_error(pd_es):
times = (
[datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)]
+ [datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)]
+ [datetime(2011, 4, 9, 10, 40, 0)]
+ [datetime(2011, 4, 10, 10, 40, i) for i in range(2)]
+ [datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)]
+ [datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)]
)
cutoff_time = pd.DataFrame({"time": times, "instance_id": range(17)})
property_feature = IdentityFeature(pd_es["log"].ww["value"]) > 10
error_text = "Need at least one worker"
with pytest.raises(AssertionError, match=error_text):
calculate_feature_matrix(
[property_feature],
entityset=pd_es,
cutoff_time=cutoff_time,
verbose=True,
chunk_size=0.13,
n_jobs=0,
approximate="1 hour",
)
def test_warning_not_enough_chunks(
pd_es,
capsys,
three_worker_dask_cluster,
): # pragma: no cover
property_feature = IdentityFeature(pd_es["log"].ww["value"]) > 10
dkwargs = {"cluster": three_worker_dask_cluster.scheduler.address}
calculate_feature_matrix(
[property_feature],
entityset=pd_es,
chunk_size=0.5,
verbose=True,
dask_kwargs=dkwargs,
)
captured = capsys.readouterr()
pattern = r"Fewer chunks \([0-9]+\), than workers \([0-9]+\) consider reducing the chunk size"
assert re.search(pattern, captured.out) is not None
def test_n_jobs():
try:
cpus = len(psutil.Process().cpu_affinity())
except AttributeError: # pragma: no cover
cpus = psutil.cpu_count()
assert n_jobs_to_workers(1) == 1
assert n_jobs_to_workers(-1) == cpus
assert n_jobs_to_workers(cpus) == cpus
assert n_jobs_to_workers((cpus + 1) * -1) == 1
if cpus > 1:
assert n_jobs_to_workers(-2) == cpus - 1
error_text = "Need at least one worker"
with pytest.raises(AssertionError, match=error_text):
n_jobs_to_workers(0)
def test_parallel_cutoff_time_column_pass_through(pd_es, dask_cluster):
times = (
[datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)]
+ [datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)]
+ [datetime(2011, 4, 9, 10, 40, 0)]
+ [datetime(2011, 4, 10, 10, 40, i) for i in range(2)]
+ [datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)]
+ [datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)]
)
labels = [False] * 3 + [True] * 2 + [False] * 9 + [True] + [False] * 2
cutoff_time = pd.DataFrame(
{"time": times, "instance_id": range(17), "labels": labels},
)
property_feature = IdentityFeature(pd_es["log"].ww["value"]) > 10
dkwargs = {"cluster": dask_cluster.scheduler.address}
feature_matrix = calculate_feature_matrix(
[property_feature],
entityset=pd_es,
cutoff_time=cutoff_time,
verbose=True,
dask_kwargs=dkwargs,
approximate="1 hour",
)
assert (
feature_matrix[property_feature.get_name()] == feature_matrix["labels"]
).values.all()
def test_integer_time_index(int_es):
if int_es.dataframe_type != Library.PANDAS:
pytest.xfail("Dask and Spark do not retain time column")
times = list(range(8, 18)) + list(range(19, 26))
labels = [False] * 3 + [True] * 2 + [False] * 9 + [True] + [False] * 2
cutoff_df = pd.DataFrame({"time": times, "instance_id": range(17)})
property_feature = IdentityFeature(int_es["log"].ww["value"]) > 10
feature_matrix = calculate_feature_matrix(
[property_feature],
int_es,
cutoff_time=cutoff_df,
cutoff_time_in_index=True,
)
time_level_vals = feature_matrix.index.get_level_values(1).values
sorted_df = cutoff_df.sort_values(["time", "instance_id"], kind="mergesort")
assert (time_level_vals == sorted_df["time"].values).all()
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
def test_integer_time_index_single_cutoff_value(int_es):
if int_es.dataframe_type != Library.PANDAS:
pytest.xfail("Dask and Spark do not retain time column")
labels = [False] * 3 + [True] * 2 + [False] * 4
property_feature = IdentityFeature(int_es["log"].ww["value"]) > 10
cutoff_times = [16, pd.Series([16])[0], 16.0, pd.Series([16.0])[0]]
for cutoff_time in cutoff_times:
feature_matrix = calculate_feature_matrix(
[property_feature],
int_es,
cutoff_time=cutoff_time,
cutoff_time_in_index=True,
)
time_level_vals = feature_matrix.index.get_level_values(1).values
assert (time_level_vals == [16] * 9).all()
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
def test_integer_time_index_datetime_cutoffs(int_es):
times = [datetime.now()] * 17
cutoff_df = pd.DataFrame({"time": times, "instance_id": range(17)})
property_feature = IdentityFeature(int_es["log"].ww["value"]) > 10
error_text = (
"cutoff_time times must be numeric: try casting via pd\\.to_numeric\\(\\)"
)
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix(
[property_feature],
int_es,
cutoff_time=cutoff_df,
cutoff_time_in_index=True,
)
def test_integer_time_index_passes_extra_columns(int_es):
times = list(range(8, 18)) + list(range(19, 23)) + [25, 24, 23]
labels = [False] * 3 + [True] * 2 + [False] * 9 + [False] * 2 + [True]
instances = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, 15, 14]
cutoff_df = pd.DataFrame(
{"time": times, "instance_id": instances, "labels": labels},
)
cutoff_df = cutoff_df[["time", "instance_id", "labels"]]
property_feature = IdentityFeature(int_es["log"].ww["value"]) > 10
fm = calculate_feature_matrix(
[property_feature],
int_es,
cutoff_time=cutoff_df,
cutoff_time_in_index=True,
)
fm = to_pandas(fm)
assert (fm[property_feature.get_name()] == fm["labels"]).all()
def test_integer_time_index_mixed_cutoff(int_es):
times_dt = list(range(8, 17)) + [datetime(2011, 1, 1), 19, 20, 21, 22, 25, 24, 23]
labels = [False] * 3 + [True] * 2 + [False] * 9 + [False] * 2 + [True]
instances = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, 15, 14]
cutoff_df = pd.DataFrame(
{"time": times_dt, "instance_id": instances, "labels": labels},
)
cutoff_df = cutoff_df[["time", "instance_id", "labels"]]
property_feature = IdentityFeature(int_es["log"].ww["value"]) > 10
error_text = "cutoff_time times must be.*try casting via.*"
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature], int_es, cutoff_time=cutoff_df)
times_str = list(range(8, 17)) + ["foobar", 19, 20, 21, 22, 25, 24, 23]
cutoff_df["time"] = times_str
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature], int_es, cutoff_time=cutoff_df)
times_date_str = list(range(8, 17)) + ["2018-04-02", 19, 20, 21, 22, 25, 24, 23]
cutoff_df["time"] = times_date_str
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature], int_es, cutoff_time=cutoff_df)
times_int_str = [0, 1, 2, 3, 4, 5, "6", 7, 8, 9, 9, 10, 11, 12, 15, 14, 13]
times_int_str = list(range(8, 17)) + ["17", 19, 20, 21, 22, 25, 24, 23]
cutoff_df["time"] = times_int_str
# calculate_feature_matrix should convert time column to ints successfully here
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature], int_es, cutoff_time=cutoff_df)
def test_datetime_index_mixed_cutoff(es):
times = list(
[datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)]
+ [datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)]
+ [17]
+ [datetime(2011, 4, 10, 10, 40, i) for i in range(2)]
+ [datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)]
+ [datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)],
)
labels = [False] * 3 + [True] * 2 + [False] * 9 + [False] * 2 + [True]
instances = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16, 15, 14]
cutoff_df = pd.DataFrame(
{"time": times, "instance_id": instances, "labels": labels},
)
cutoff_df = cutoff_df[["time", "instance_id", "labels"]]
property_feature = IdentityFeature(es["log"].ww["value"]) > 10
error_text = "cutoff_time times must be.*try casting via.*"
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature], es, cutoff_time=cutoff_df)
times[9] = "foobar"
cutoff_df["time"] = times
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature], es, cutoff_time=cutoff_df)
times[9] = "17"
cutoff_df["time"] = times
with pytest.raises(TypeError, match=error_text):
calculate_feature_matrix([property_feature], es, cutoff_time=cutoff_df)
# TODO: Dask version fails (feature matrix is empty)
# TODO: Spark version fails (spark groupby agg doesn't support custom functions)
def test_no_data_for_cutoff_time(mock_customer):
if mock_customer.dataframe_type != Library.PANDAS:
pytest.xfail(
"Dask fails because returned feature matrix is empty; Spark doesn't support custom agg functions",
)
es = mock_customer
cutoff_times = pd.DataFrame(
{"customer_id": [4], "time": pd.Timestamp("2011-04-08 20:08:13")},
)
trans_per_session = Feature(
es["transactions"].ww["transaction_id"],
parent_dataframe_name="sessions",
primitive=Count,
)
trans_per_customer = Feature(
es["transactions"].ww["transaction_id"],
parent_dataframe_name="customers",
primitive=Count,
)
max_count = Feature(
trans_per_session,
parent_dataframe_name="customers",
primitive=Max,
)
features = [trans_per_customer, max_count]
fm = calculate_feature_matrix(features, entityset=es, cutoff_time=cutoff_times)
# due to default values for each primitive
# count will be 0, but max will nan
answer = pd.DataFrame(
{
trans_per_customer.get_name(): pd.Series([0], dtype="Int64"),
max_count.get_name(): pd.Series([np.nan], dtype="float"),
},
)
for column in fm.columns:
pd.testing.assert_series_equal(
fm[column],
answer[column],
check_index=False,
check_names=False,
)
# adding missing instances not supported in Dask or Spark
def test_instances_not_in_data(pd_es):
last_instance = max(pd_es["log"].index.values)
instances = list(range(last_instance + 1, last_instance + 11))
identity_feature = IdentityFeature(pd_es["log"].ww["value"])
property_feature = identity_feature > 10
agg_feat = AggregationFeature(
Feature(pd_es["log"].ww["value"]),
parent_dataframe_name="sessions",
primitive=Max,
)
direct_feature = DirectFeature(agg_feat, "log")
features = [identity_feature, property_feature, direct_feature]
fm = calculate_feature_matrix(features, entityset=pd_es, instance_ids=instances)
assert all(fm.index.values == instances)
for column in fm.columns:
assert fm[column].isnull().all()
fm = calculate_feature_matrix(
features,
entityset=pd_es,
instance_ids=instances,
approximate="730 days",
)
assert all(fm.index.values == instances)
for column in fm.columns:
assert fm[column].isnull().all()
def test_some_instances_not_in_data(pd_es):
a_time = datetime(2011, 4, 10, 10, 41, 9) # only valid data
b_time = datetime(2011, 4, 10, 11, 10, 5) # some missing data
c_time = datetime(2011, 4, 10, 12, 0, 0) # all missing data
times = [a_time, b_time, a_time, a_time, b_time, b_time] + [c_time] * 4
cutoff_time = pd.DataFrame({"instance_id": list(range(12, 22)), "time": times})
identity_feature = IdentityFeature(pd_es["log"].ww["value"])
property_feature = identity_feature > 10
agg_feat = AggregationFeature(
Feature(pd_es["log"].ww["value"]),
parent_dataframe_name="sessions",
primitive=Max,
)
direct_feature = DirectFeature(agg_feat, "log")
features = [identity_feature, property_feature, direct_feature]
fm = calculate_feature_matrix(features, entityset=pd_es, cutoff_time=cutoff_time)
ifeat_answer = pd.Series([0, 7, 14, np.nan] + [np.nan] * 6)
prop_answer = pd.Series([0, 0, 1, pd.NA, 0] + [pd.NA] * 5, dtype="boolean")
dfeat_answer = pd.Series([14, 14, 14, np.nan] + [np.nan] * 6)
assert all(fm.index.values == cutoff_time["instance_id"].values)
for x, y in zip(fm.columns, [ifeat_answer, prop_answer, dfeat_answer]):
pd.testing.assert_series_equal(fm[x], y, check_index=False, check_names=False)
fm = calculate_feature_matrix(
features,
entityset=pd_es,
cutoff_time=cutoff_time,
approximate="5 seconds",
)
dfeat_answer[0] = 7 # approximate calculated before 14 appears
dfeat_answer[2] = 7 # approximate calculated before 14 appears
prop_answer[3] = False # no_unapproximated_aggs code ignores cutoff time
assert all(fm.index.values == cutoff_time["instance_id"].values)
for x, y in zip(fm.columns, [ifeat_answer, prop_answer, dfeat_answer]):
pd.testing.assert_series_equal(fm[x], y, check_index=False, check_names=False)
def test_missing_instances_with_categorical_index(pd_es):
instance_ids = ["coke zero", "car", 3, "taco clock"]
features = dfs(
entityset=pd_es,
target_dataframe_name="products",
features_only=True,
)
fm = calculate_feature_matrix(
entityset=pd_es,
features=features,
instance_ids=instance_ids,
)
assert fm.index.values.to_list() == instance_ids
assert isinstance(fm.index, pd.CategoricalIndex)
def test_handle_chunk_size():
total_size = 100
# user provides no chunk size
assert _handle_chunk_size(None, total_size) is None
# user provides fractional size
assert _handle_chunk_size(0.1, total_size) == total_size * 0.1
assert _handle_chunk_size(0.001, total_size) == 1 # rounds up
assert _handle_chunk_size(0.345, total_size) == 35 # rounds up
# user provides absolute size
assert _handle_chunk_size(1, total_size) == 1
assert _handle_chunk_size(100, total_size) == 100
assert isinstance(_handle_chunk_size(100.0, total_size), int)
# test invalid cases
with pytest.raises(AssertionError, match="Chunk size must be greater than 0"):
_handle_chunk_size(0, total_size)
with pytest.raises(AssertionError, match="Chunk size must be greater than 0"):
_handle_chunk_size(-1, total_size)
def test_chunk_dataframe_groups():
df = pd.DataFrame({"group": [1, 1, 1, 1, 2, 2, 3]})
grouped = df.groupby("group")
chunked_grouped = _chunk_dataframe_groups(grouped, 2)
# test group larger than chunk size gets split up
first = next(chunked_grouped)
assert first[0] == 1 and first[1].shape[0] == 2
second = next(chunked_grouped)
assert second[0] == 1 and second[1].shape[0] == 2
# test that equal to and less than chunk size stays together
third = next(chunked_grouped)
assert third[0] == 2 and third[1].shape[0] == 2
fourth = next(chunked_grouped)
assert fourth[0] == 3 and fourth[1].shape[0] == 1
def test_calls_progress_callback(mock_customer):
class MockProgressCallback:
def __init__(self):
self.progress_history = []
self.total_update = 0
self.total_progress_percent = 0
def __call__(self, update, progress_percent, time_elapsed):
self.total_update += update
self.total_progress_percent = progress_percent
self.progress_history.append(progress_percent)
mock_progress_callback = MockProgressCallback()
es = mock_customer
# make sure to calculate features that have different paths to same base feature
trans_per_session = Feature(
es["transactions"].ww["transaction_id"],
parent_dataframe_name="sessions",
primitive=Count,
)
trans_per_customer = Feature(
es["transactions"].ww["transaction_id"],
parent_dataframe_name="customers",
primitive=Count,
)
features = [trans_per_session, Feature(trans_per_customer, "sessions")]
calculate_feature_matrix(
features,
entityset=es,
progress_callback=mock_progress_callback,
)
# second to last entry is the last update from feature calculation
assert np.isclose(
mock_progress_callback.progress_history[-2],
FEATURE_CALCULATION_PERCENTAGE * 100,
)
assert np.isclose(mock_progress_callback.total_update, 100.0)
assert np.isclose(mock_progress_callback.total_progress_percent, 100.0)
# test with cutoff time dataframe
mock_progress_callback = MockProgressCallback()
cutoff_time = pd.DataFrame(
{
"instance_id": [1, 2, 3],
"time": [
pd.to_datetime("2014-01-01 01:00:00"),
pd.to_datetime("2014-01-01 02:00:00"),
pd.to_datetime("2014-01-01 03:00:00"),
],
},
)
calculate_feature_matrix(
features,
entityset=es,
cutoff_time=cutoff_time,
progress_callback=mock_progress_callback,
)
assert np.isclose(
mock_progress_callback.progress_history[-2],
FEATURE_CALCULATION_PERCENTAGE * 100,
)
assert np.isclose(mock_progress_callback.total_update, 100.0)
assert np.isclose(mock_progress_callback.total_progress_percent, 100.0)
def test_calls_progress_callback_cluster(pd_mock_customer, dask_cluster):
class MockProgressCallback:
def __init__(self):
self.progress_history = []
self.total_update = 0
self.total_progress_percent = 0
def __call__(self, update, progress_percent, time_elapsed):
self.total_update += update
self.total_progress_percent = progress_percent
self.progress_history.append(progress_percent)
mock_progress_callback = MockProgressCallback()
trans_per_session = Feature(
pd_mock_customer["transactions"].ww["transaction_id"],
parent_dataframe_name="sessions",
primitive=Count,
)
trans_per_customer = Feature(
pd_mock_customer["transactions"].ww["transaction_id"],
parent_dataframe_name="customers",
primitive=Count,
)
features = [trans_per_session, Feature(trans_per_customer, "sessions")]
dkwargs = {"cluster": dask_cluster.scheduler.address}
calculate_feature_matrix(
features,
entityset=pd_mock_customer,
progress_callback=mock_progress_callback,
dask_kwargs=dkwargs,
)
assert np.isclose(mock_progress_callback.total_update, 100.0)
assert np.isclose(mock_progress_callback.total_progress_percent, 100.0)
def test_closes_tqdm(es):
class ErrorPrim(TransformPrimitive):
"""A primitive whose function raises an error"""
name = "error_prim"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = "Numeric"
compatibility = [Library.PANDAS, Library.DASK, Library.SPARK]
def get_function(self):
def error(s):
raise RuntimeError("This primitive has errored")
return error
value = Feature(es["log"].ww["value"])
property_feature = value > 10
error_feature = Feature(value, primitive=ErrorPrim)
calculate_feature_matrix([property_feature], es, verbose=True)
assert len(tqdm._instances) == 0
match = "This primitive has errored"
with pytest.raises(RuntimeError, match=match):
calculate_feature_matrix([value, error_feature], es, verbose=True)
assert len(tqdm._instances) == 0
def test_approximate_with_single_cutoff_warns(pd_es):
features = dfs(
entityset=pd_es,
target_dataframe_name="customers",
features_only=True,
ignore_dataframes=["cohorts"],
agg_primitives=["sum"],
)
match = (
"Using approximate with a single cutoff_time value or no cutoff_time "
"provides no computational efficiency benefit"
)
# test warning with single cutoff time
with pytest.warns(UserWarning, match=match):
calculate_feature_matrix(
features,
pd_es,
cutoff_time=pd.to_datetime("2020-01-01"),
approximate="1 day",
)
# test warning with no cutoff time
with pytest.warns(UserWarning, match=match):
calculate_feature_matrix(features, pd_es, approximate="1 day")
# check proper handling of approximate
feature_matrix = calculate_feature_matrix(
features,
pd_es,
cutoff_time=pd.to_datetime("2011-04-09 10:31:30"),
approximate="1 minute",
)
expected_values = [50, 50, 50]
assert (feature_matrix["régions.SUM(log.value)"] == expected_values).values.all()
def test_calc_feature_matrix_with_cutoff_df_and_instance_ids(es):
times = list(
[datetime(2011, 4, 9, 10, 30, i * 6) for i in range(5)]
+ [datetime(2011, 4, 9, 10, 31, i * 9) for i in range(4)]
+ [datetime(2011, 4, 9, 10, 40, 0)]
+ [datetime(2011, 4, 10, 10, 40, i) for i in range(2)]
+ [datetime(2011, 4, 10, 10, 41, i * 3) for i in range(3)]
+ [datetime(2011, 4, 10, 11, 10, i * 3) for i in range(2)],
)
instances = range(17)
cutoff_time = pd.DataFrame({"time": times, es["log"].ww.index: instances})
labels = [False] * 3 + [True] * 2 + [False] * 9 + [True] + [False] * 2
property_feature = Feature(es["log"].ww["value"]) > 10
match = "Passing 'instance_ids' is valid only if 'cutoff_time' is a single value or None - ignoring"
with pytest.warns(UserWarning, match=match):
feature_matrix = calculate_feature_matrix(
[property_feature],
es,
cutoff_time=cutoff_time,
instance_ids=[1, 3, 5],
verbose=True,
)
feature_matrix = to_pandas(feature_matrix)
assert (feature_matrix[property_feature.get_name()] == labels).values.all()
def test_calculate_feature_matrix_returns_default_values(default_value_es):
sum_features = Feature(
default_value_es["transactions"].ww["value"],
parent_dataframe_name="sessions",
primitive=Sum,
)
sessions_sum = Feature(sum_features, "transactions")
feature_matrix = calculate_feature_matrix(
features=[sessions_sum],
entityset=default_value_es,
)
feature_matrix = to_pandas(feature_matrix, index="id", sort_index=True)
expected_values = [2.0, 2.0, 1.0, 0.0]
assert (feature_matrix[sessions_sum.get_name()] == expected_values).values.all()
def test_dataframes_relationships(dataframes, relationships):
fm_1, features = dfs(
dataframes=dataframes,
relationships=relationships,
target_dataframe_name="transactions",
)
fm_2 = calculate_feature_matrix(
features=features,
dataframes=dataframes,
relationships=relationships,
)
fm_1 = to_pandas(fm_1, index="id", sort_index=True)
fm_2 = to_pandas(fm_2, index="id", sort_index=True)
assert fm_1.equals(fm_2)
def test_no_dataframes(dataframes, relationships):
features = dfs(
dataframes=dataframes,
relationships=relationships,
target_dataframe_name="transactions",
features_only=True,
)
msg = "No dataframes or valid EntitySet provided"
with pytest.raises(TypeError, match=msg):
calculate_feature_matrix(features=features, dataframes=None, relationships=None)
def test_no_relationships(dataframes):
fm_1, features = dfs(
dataframes=dataframes,
relationships=None,
target_dataframe_name="transactions",
)
fm_2 = calculate_feature_matrix(
features=features,
dataframes=dataframes,
relationships=None,
)
fm_1 = to_pandas(fm_1, index="id")
fm_2 = to_pandas(fm_2, index="id")
assert fm_1.equals(fm_2)
def test_cfm_with_invalid_time_index(es):
features = dfs(entityset=es, target_dataframe_name="customers", features_only=True)
es["customers"].ww.set_types(logical_types={"signup_date": "integer"})
match = "customers time index is numeric type "
match += "which differs from other entityset time indexes"
with pytest.raises(TypeError, match=match):
calculate_feature_matrix(features=features, entityset=es)
def test_cfm_introduces_nan_values_in_direct_feats(es):
es["customers"].ww.set_types(
logical_types={"age": "Age", "engagement_level": "Integer"},
)
age_feat = Feature(es["customers"].ww["age"])
engagement_feat = Feature(es["customers"].ww["engagement_level"])
loves_ice_cream_feat = Feature(es["customers"].ww["loves_ice_cream"])
features = [age_feat, engagement_feat, loves_ice_cream_feat]
fm = calculate_feature_matrix(
features=features,
entityset=es,
cutoff_time=pd.Timestamp("2010-04-08 04:00"),
instance_ids=[1],
)
assert isinstance(es["customers"].ww.logical_types["age"], Age)
assert isinstance(es["customers"].ww.logical_types["engagement_level"], Integer)
assert isinstance(es["customers"].ww.logical_types["loves_ice_cream"], Boolean)
assert isinstance(fm.ww.logical_types["age"], AgeNullable)
assert isinstance(fm.ww.logical_types["engagement_level"], IntegerNullable)
assert isinstance(fm.ww.logical_types["loves_ice_cream"], BooleanNullable)
def test_feature_origins_present_on_all_fm_cols(pd_es):
class MultiCumSum(TransformPrimitive):
name = "multi_cum_sum"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
number_output_features = 3
def get_function(self):
def multi_cum_sum(x):
return x.cumsum(), x.cummax(), x.cummin()
return multi_cum_sum
feature_matrix, _ = dfs(
entityset=pd_es,
target_dataframe_name="log",
trans_primitives=[MultiCumSum],
)
for col in feature_matrix.columns:
origin = feature_matrix.ww[col].ww.origin
assert origin in ["base", "engineered"]
def test_renamed_features_have_expected_column_names_in_feature_matrix(pd_es):
class MultiCumulative(TransformPrimitive):
name = "multi_cum_sum"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
number_output_features = 3
def get_function(self):
def multi_cum_sum(x):
return x.cumsum(), x.cummax(), x.cummin()
return multi_cum_sum
multi_output_trans_feat = Feature(
pd_es["log"].ww["value"],
primitive=MultiCumulative,
)
groupby_trans_feat = GroupByTransformFeature(
pd_es["log"].ww["value"],
primitive=MultiCumulative,
groupby=pd_es["log"].ww["product_id"],
)
multi_output_agg_feat = Feature(
pd_es["log"].ww["product_id"],
parent_dataframe_name="customers",
primitive=NMostCommon(n=2),
)
slice = FeatureOutputSlice(multi_output_trans_feat, 1)
stacked_feat = Feature(slice, primitive=Negate)
multi_output_trans_names = ["cumulative_sum", "cumulative_max", "cumulative_min"]
multi_output_trans_feat.set_feature_names(multi_output_trans_names)
groupby_trans_feat_names = ["grouped_sum", "grouped_max", "grouped_min"]
groupby_trans_feat.set_feature_names(groupby_trans_feat_names)
agg_names = ["first_most_common", "second_most_common"]
multi_output_agg_feat.set_feature_names(agg_names)
features = [
multi_output_trans_feat,
multi_output_agg_feat,
stacked_feat,
groupby_trans_feat,
]
feature_matrix = calculate_feature_matrix(entityset=pd_es, features=features)
expected_names = multi_output_trans_names + agg_names + groupby_trans_feat_names
for renamed_col in expected_names:
assert renamed_col in feature_matrix.columns
expected_stacked_name = "-(cumulative_max)"
assert expected_stacked_name in feature_matrix.columns
| 87,223 | 33.530483 | 110 | py |
featuretools | featuretools-main/featuretools/tests/computational_backend/test_feature_set_calculator.py | from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Categorical, Datetime, Double, Integer
from featuretools import (
AggregationFeature,
EntitySet,
Feature,
Timedelta,
calculate_feature_matrix,
)
from featuretools.computational_backends.feature_set import FeatureSet
from featuretools.computational_backends.feature_set_calculator import (
FeatureSetCalculator,
)
from featuretools.entityset.relationship import RelationshipPath
from featuretools.feature_base import DirectFeature, IdentityFeature
from featuretools.primitives import (
And,
Count,
CumSum,
EqualScalar,
GreaterThanEqualToScalar,
GreaterThanScalar,
LessThanEqualToScalar,
LessThanScalar,
Mean,
Min,
Mode,
Negate,
NMostCommon,
NotEqualScalar,
NumTrue,
Sum,
TimeSinceLast,
Trend,
)
from featuretools.primitives.base import AggregationPrimitive
from featuretools.tests.testing_utils import backward_path, to_pandas
from featuretools.utils import Trie
from featuretools.utils.gen_utils import Library, import_or_none, is_instance
dd = import_or_none("dask.dataframe")
def test_make_identity(es):
f = IdentityFeature(es["log"].ww["datetime"])
feature_set = FeatureSet([f])
calculator = FeatureSetCalculator(es, time_last=None, feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0])))
v = df[f.get_name()][0]
assert v == datetime(2011, 4, 9, 10, 30, 0)
def test_make_dfeat(es):
f = DirectFeature(
Feature(es["customers"].ww["age"]),
child_dataframe_name="sessions",
)
feature_set = FeatureSet([f])
calculator = FeatureSetCalculator(es, time_last=None, feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0])))
v = df[f.get_name()][0]
assert v == 33
def test_make_agg_feat_of_identity_column(es):
agg_feat = Feature(
es["log"].ww["value"],
parent_dataframe_name="sessions",
primitive=Sum,
)
feature_set = FeatureSet([agg_feat])
calculator = FeatureSetCalculator(es, time_last=None, feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0])))
v = df[agg_feat.get_name()][0]
assert v == 50
# full_dataframe not supported with Dask
def test_full_dataframe_trans_of_agg(pd_es):
agg_feat = Feature(
pd_es["log"].ww["value"],
parent_dataframe_name="customers",
primitive=Sum,
)
trans_feat = Feature(agg_feat, primitive=CumSum)
feature_set = FeatureSet([trans_feat])
calculator = FeatureSetCalculator(pd_es, time_last=None, feature_set=feature_set)
df = calculator.run(np.array([1]))
v = df[trans_feat.get_name()].values[0]
assert v == 82
def test_full_dataframe_error_dask(dask_es):
agg_feat = Feature(
dask_es["log"].ww["value"],
parent_dataframe_name="customers",
primitive=Sum,
)
trans_feat = Feature(agg_feat, primitive=CumSum)
feature_set = FeatureSet([trans_feat])
calculator = FeatureSetCalculator(dask_es, time_last=None, feature_set=feature_set)
error_text = "Cannot use primitives that require full dataframe with Dask"
with pytest.raises(ValueError, match=error_text):
calculator.run(np.array([1]))
def test_make_agg_feat_of_identity_index_column(es):
agg_feat = Feature(
es["log"].ww["id"],
parent_dataframe_name="sessions",
primitive=Count,
)
feature_set = FeatureSet([agg_feat])
calculator = FeatureSetCalculator(es, time_last=None, feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0])))
v = df[agg_feat.get_name()][0]
assert v == 5
def test_make_agg_feat_where_count(es):
agg_feat = Feature(
es["log"].ww["id"],
parent_dataframe_name="sessions",
where=IdentityFeature(es["log"].ww["product_id"]) == "coke zero",
primitive=Count,
)
feature_set = FeatureSet([agg_feat])
calculator = FeatureSetCalculator(es, time_last=None, feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0])))
v = df[agg_feat.get_name()][0]
assert v == 3
def test_make_agg_feat_using_prev_time(es):
agg_feat = Feature(
es["log"].ww["id"],
parent_dataframe_name="sessions",
use_previous=Timedelta(10, "s"),
primitive=Count,
)
feature_set = FeatureSet([agg_feat])
calculator = FeatureSetCalculator(
es,
time_last=datetime(2011, 4, 9, 10, 30, 10),
feature_set=feature_set,
)
df = to_pandas(calculator.run(np.array([0])))
v = df[agg_feat.get_name()][0]
assert v == 2
calculator = FeatureSetCalculator(
es,
time_last=datetime(2011, 4, 9, 10, 30, 30),
feature_set=feature_set,
)
df = to_pandas(calculator.run(np.array([0])))
v = df[agg_feat.get_name()][0]
assert v == 1
def test_make_agg_feat_using_prev_n_events(es):
if es.dataframe_type != Library.PANDAS:
pytest.xfail("Distrubuted entitysets do not support use_previous")
agg_feat_1 = Feature(
es["log"].ww["value"],
parent_dataframe_name="sessions",
use_previous=Timedelta(1, "observations"),
primitive=Min,
)
agg_feat_2 = Feature(
es["log"].ww["value"],
parent_dataframe_name="sessions",
use_previous=Timedelta(3, "observations"),
primitive=Min,
)
assert (
agg_feat_1.get_name() != agg_feat_2.get_name()
), "Features should have different names based on use_previous"
feature_set = FeatureSet([agg_feat_1, agg_feat_2])
calculator = FeatureSetCalculator(
es,
time_last=datetime(2011, 4, 9, 10, 30, 6),
feature_set=feature_set,
)
df = calculator.run(np.array([0]))
# time_last is included by default
v1 = df[agg_feat_1.get_name()][0]
v2 = df[agg_feat_2.get_name()][0]
assert v1 == 5
assert v2 == 0
calculator = FeatureSetCalculator(
es,
time_last=datetime(2011, 4, 9, 10, 30, 30),
feature_set=feature_set,
)
df = calculator.run(np.array([0]))
v1 = df[agg_feat_1.get_name()][0]
v2 = df[agg_feat_2.get_name()][0]
assert v1 == 20
assert v2 == 10
def test_make_agg_feat_multiple_dtypes(es):
if es.dataframe_type != Library.PANDAS:
pytest.xfail(
"Currently no Dask or Spark compatible agg prims that use multiple dtypes",
)
compare_prod = IdentityFeature(es["log"].ww["product_id"]) == "coke zero"
agg_feat = Feature(
es["log"].ww["id"],
parent_dataframe_name="sessions",
where=compare_prod,
primitive=Count,
)
agg_feat2 = Feature(
es["log"].ww["product_id"],
parent_dataframe_name="sessions",
where=compare_prod,
primitive=Mode,
)
feature_set = FeatureSet([agg_feat, agg_feat2])
calculator = FeatureSetCalculator(es, time_last=None, feature_set=feature_set)
df = calculator.run(np.array([0]))
v = df[agg_feat.get_name()][0]
v2 = df[agg_feat2.get_name()][0]
assert v == 3
assert v2 == "coke zero"
def test_make_agg_feat_where_different_identity_feat(es):
feats = []
where_cmps = [
LessThanScalar,
GreaterThanScalar,
LessThanEqualToScalar,
GreaterThanEqualToScalar,
EqualScalar,
NotEqualScalar,
]
for where_cmp in where_cmps:
feats.append(
Feature(
es["log"].ww["id"],
parent_dataframe_name="sessions",
where=Feature(
es["log"].ww["value"],
primitive=where_cmp(10.0),
),
primitive=Count,
),
)
df = calculate_feature_matrix(
entityset=es,
features=feats,
instance_ids=[0, 1, 2, 3],
)
df = to_pandas(df, index="id", sort_index=True)
for i, where_cmp in enumerate(where_cmps):
name = feats[i].get_name()
instances = df[name]
v0, v1, v2, v3 = instances[0:4]
if where_cmp == LessThanScalar:
assert v0 == 2
assert v1 == 4
assert v2 == 1
assert v3 == 2
elif where_cmp == GreaterThanScalar:
assert v0 == 2
assert v1 == 0
assert v2 == 0
assert v3 == 0
elif where_cmp == LessThanEqualToScalar:
assert v0 == 3
assert v1 == 4
assert v2 == 1
assert v3 == 2
elif where_cmp == GreaterThanEqualToScalar:
assert v0 == 3
assert v1 == 0
assert v2 == 0
assert v3 == 0
elif where_cmp == EqualScalar:
assert v0 == 1
assert v1 == 0
assert v2 == 0
assert v3 == 0
elif where_cmp == NotEqualScalar:
assert v0 == 4
assert v1 == 4
assert v2 == 1
assert v3 == 2
def test_make_agg_feat_of_grandchild_dataframe(es):
agg_feat = Feature(
es["log"].ww["id"],
parent_dataframe_name="customers",
primitive=Count,
)
feature_set = FeatureSet([agg_feat])
calculator = FeatureSetCalculator(es, time_last=None, feature_set=feature_set)
df = calculator.run(np.array([0]))
df = to_pandas(df, index="id")
v = df[agg_feat.get_name()].values[0]
assert v == 10
def test_make_agg_feat_where_count_feat(es):
"""
Feature we're creating is:
Number of sessions for each customer where the
number of logs in the session is less than 3
"""
log_count_feat = Feature(
es["log"].ww["id"],
parent_dataframe_name="sessions",
primitive=Count,
)
feat = Feature(
es["sessions"].ww["id"],
parent_dataframe_name="customers",
where=log_count_feat > 1,
primitive=Count,
)
feature_set = FeatureSet([feat])
calculator = FeatureSetCalculator(es, time_last=None, feature_set=feature_set)
df = calculator.run(np.array([0, 1]))
df = to_pandas(df, index="id", sort_index=True)
name = feat.get_name()
instances = df[name]
v0, v1 = instances[0:2]
assert v0 == 2
assert v1 == 2
def test_make_compare_feat(es):
"""
Feature we're creating is:
Number of sessions for each customer where the
number of logs in the session is less than 3
"""
log_count_feat = Feature(
es["log"].ww["id"],
parent_dataframe_name="sessions",
primitive=Count,
)
mean_agg_feat = Feature(
log_count_feat,
parent_dataframe_name="customers",
primitive=Mean,
)
mean_feat = DirectFeature(mean_agg_feat, child_dataframe_name="sessions")
feat = log_count_feat > mean_feat
feature_set = FeatureSet([feat])
calculator = FeatureSetCalculator(es, time_last=None, feature_set=feature_set)
df = calculator.run(np.array([0, 1, 2]))
df = to_pandas(df, index="id", sort_index=True)
name = feat.get_name()
instances = df[name]
v0, v1, v2 = instances[0:3]
assert v0
assert v1
assert not v2
def test_make_agg_feat_where_count_and_device_type_feat(es):
"""
Feature we're creating is:
Number of sessions for each customer where the
number of logs in the session is less than 3
"""
log_count_feat = Feature(
es["log"].ww["id"],
parent_dataframe_name="sessions",
primitive=Count,
)
compare_count = log_count_feat == 1
compare_device_type = IdentityFeature(es["sessions"].ww["device_type"]) == 1
and_feat = Feature([compare_count, compare_device_type], primitive=And)
feat = Feature(
es["sessions"].ww["id"],
parent_dataframe_name="customers",
where=and_feat,
primitive=Count,
)
feature_set = FeatureSet([feat])
calculator = FeatureSetCalculator(es, time_last=None, feature_set=feature_set)
df = calculator.run(np.array([0]))
df = to_pandas(df, index="id")
name = feat.get_name()
instances = df[name]
assert instances.values[0] == 1
def test_make_agg_feat_where_count_or_device_type_feat(es):
"""
Feature we're creating is:
Number of sessions for each customer where the
number of logs in the session is less than 3
"""
log_count_feat = Feature(
es["log"].ww["id"],
parent_dataframe_name="sessions",
primitive=Count,
)
compare_count = log_count_feat > 1
compare_device_type = IdentityFeature(es["sessions"].ww["device_type"]) == 1
or_feat = compare_count.OR(compare_device_type)
feat = Feature(
es["sessions"].ww["id"],
parent_dataframe_name="customers",
where=or_feat,
primitive=Count,
)
feature_set = FeatureSet([feat])
calculator = FeatureSetCalculator(es, time_last=None, feature_set=feature_set)
df = calculator.run(np.array([0]))
df = to_pandas(df, index="id", int_index=True)
name = feat.get_name()
instances = df[name]
assert instances.values[0] == 3
def test_make_agg_feat_of_agg_feat(es):
log_count_feat = Feature(
es["log"].ww["id"],
parent_dataframe_name="sessions",
primitive=Count,
)
customer_sum_feat = Feature(
log_count_feat,
parent_dataframe_name="customers",
primitive=Sum,
)
feature_set = FeatureSet([customer_sum_feat])
calculator = FeatureSetCalculator(es, time_last=None, feature_set=feature_set)
df = calculator.run(np.array([0]))
df = to_pandas(df, index="id")
v = df[customer_sum_feat.get_name()].values[0]
assert v == 10
@pytest.fixture
def pd_df():
return pd.DataFrame(
{
"id": ["a", "b", "c", "d", "e"],
"e1": ["h", "h", "i", "i", "j"],
"e2": ["x", "x", "y", "y", "x"],
"e3": ["z", "z", "z", "z", "z"],
"val": [1, 1, 1, 1, 1],
},
)
@pytest.fixture
def dd_df(pd_df):
dd = pytest.importorskip("dask.dataframe", reason="Dask not installed, skipping")
return dd.from_pandas(pd_df, npartitions=2)
@pytest.fixture
def spark_df(pd_df):
ps = pytest.importorskip("pyspark.pandas", reason="Spark not installed, skipping")
return ps.from_pandas(pd_df)
@pytest.fixture(params=["pd_df", "dd_df", "spark_df"])
def df(request):
return request.getfixturevalue(request.param)
def test_make_3_stacked_agg_feats(df):
"""
Tests stacking 3 agg features.
The test specifically uses non numeric indices to test how ancestor columns are handled
as dataframes are merged together
"""
if is_instance(df, dd, "DataFrame"):
pytest.xfail("normalize_datdataframe fails with dask DataFrame")
es = EntitySet()
ltypes = {"e1": Categorical, "e2": Categorical, "e3": Categorical, "val": Double}
es.add_dataframe(
dataframe=df,
index="id",
dataframe_name="e0",
logical_types=ltypes,
)
es.normalize_dataframe(
base_dataframe_name="e0",
new_dataframe_name="e1",
index="e1",
additional_columns=["e2", "e3"],
)
es.normalize_dataframe(
base_dataframe_name="e1",
new_dataframe_name="e2",
index="e2",
additional_columns=["e3"],
)
es.normalize_dataframe(
base_dataframe_name="e2",
new_dataframe_name="e3",
index="e3",
)
sum_1 = Feature(es["e0"].ww["val"], parent_dataframe_name="e1", primitive=Sum)
sum_2 = Feature(sum_1, parent_dataframe_name="e2", primitive=Sum)
sum_3 = Feature(sum_2, parent_dataframe_name="e3", primitive=Sum)
feature_set = FeatureSet([sum_3])
calculator = FeatureSetCalculator(es, time_last=None, feature_set=feature_set)
df = calculator.run(np.array(["z"]))
v = df[sum_3.get_name()][0]
assert v == 5
def test_make_dfeat_of_agg_feat_on_self(es):
"""
The graph looks like this:
R R = Regions, a parent of customers
|
C C = Customers, the dataframe we're trying to predict on
|
etc.
We're trying to calculate a DFeat from C to R on an agg_feat of R on C.
"""
customer_count_feat = Feature(
es["customers"].ww["id"],
parent_dataframe_name="régions",
primitive=Count,
)
num_customers_feat = DirectFeature(
customer_count_feat,
child_dataframe_name="customers",
)
feature_set = FeatureSet([num_customers_feat])
calculator = FeatureSetCalculator(es, time_last=None, feature_set=feature_set)
df = calculator.run(np.array([0]))
df = to_pandas(df, index="id")
v = df[num_customers_feat.get_name()].values[0]
assert v == 3
def test_make_dfeat_of_agg_feat_through_parent(es):
"""
The graph looks like this:
R C = Customers, the dataframe we're trying to predict on
/ \\ R = Regions, a parent of customers
S C S = Stores, a child of regions
|
etc.
We're trying to calculate a DFeat from C to R on an agg_feat of R on S.
"""
store_id_feat = IdentityFeature(es["stores"].ww["id"])
store_count_feat = Feature(
store_id_feat,
parent_dataframe_name="régions",
primitive=Count,
)
num_stores_feat = DirectFeature(store_count_feat, child_dataframe_name="customers")
feature_set = FeatureSet([num_stores_feat])
calculator = FeatureSetCalculator(es, time_last=None, feature_set=feature_set)
df = calculator.run(np.array([0]))
df = to_pandas(df, index="id")
v = df[num_stores_feat.get_name()].values[0]
assert v == 3
def test_make_deep_agg_feat_of_dfeat_of_agg_feat(es):
"""
The graph looks like this (higher implies parent):
C C = Customers, the dataframe we're trying to predict on
| S = Sessions, a child of Customers
P S L = Log, a child of both Sessions and Log
\\ / P = Products, a parent of Log which is not a descendent of customers
L
We're trying to calculate a DFeat from L to P on an agg_feat of P on L, and
then aggregate it with another agg_feat of C on L.
"""
log_count_feat = Feature(
es["log"].ww["id"],
parent_dataframe_name="products",
primitive=Count,
)
product_purchases_feat = DirectFeature(log_count_feat, child_dataframe_name="log")
purchase_popularity = Feature(
product_purchases_feat,
parent_dataframe_name="customers",
primitive=Mean,
)
feature_set = FeatureSet([purchase_popularity])
calculator = FeatureSetCalculator(es, time_last=None, feature_set=feature_set)
df = calculator.run(np.array([0]))
df = to_pandas(df, index="id")
v = df[purchase_popularity.get_name()].values[0]
assert v == 38.0 / 10.0
def test_deep_agg_feat_chain(es):
"""
Agg feat of agg feat:
region.Mean(customer.Count(Log))
"""
customer_count_feat = Feature(
es["log"].ww["id"],
parent_dataframe_name="customers",
primitive=Count,
)
region_avg_feat = Feature(
customer_count_feat,
parent_dataframe_name="régions",
primitive=Mean,
)
feature_set = FeatureSet([region_avg_feat])
calculator = FeatureSetCalculator(es, time_last=None, feature_set=feature_set)
df = calculator.run(np.array(["United States"]))
df = to_pandas(df, index="id")
v = df[region_avg_feat.get_name()][0]
assert v == 17 / 3.0
# NMostCommon not supported with Dask or Spark
def test_topn(pd_es):
topn = Feature(
pd_es["log"].ww["product_id"],
parent_dataframe_name="customers",
primitive=NMostCommon(n=2),
)
feature_set = FeatureSet([topn])
calculator = FeatureSetCalculator(pd_es, time_last=None, feature_set=feature_set)
df = calculator.run(np.array([0, 1, 2]))
true_results = pd.DataFrame(
[
["toothpaste", "coke zero"],
["coke zero", "Haribo sugar-free gummy bears"],
["taco clock", np.nan],
],
)
assert [name in df.columns for name in topn.get_feature_names()]
for i in range(df.shape[0]):
true = true_results.loc[i]
actual = df.loc[i]
if i == 0:
# coke zero and toothpase have same number of occurrences
assert set(true.values) == set(actual.values)
else:
for i1, i2 in zip(true, actual):
assert (pd.isnull(i1) and pd.isnull(i2)) or (i1 == i2)
# Trend not supported with Dask or Spark
def test_trend(pd_es):
trend = Feature(
[Feature(pd_es["log"].ww["value"]), Feature(pd_es["log"].ww["datetime"])],
parent_dataframe_name="customers",
primitive=Trend,
)
feature_set = FeatureSet([trend])
calculator = FeatureSetCalculator(pd_es, time_last=None, feature_set=feature_set)
df = calculator.run(np.array([0, 1, 2]))
true_results = [-0.812730, 4.870378, np.nan]
np.testing.assert_almost_equal(
df[trend.get_name()].tolist(),
true_results,
decimal=5,
)
def test_direct_squared(es):
feature = IdentityFeature(es["log"].ww["value"])
squared = feature * feature
feature_set = FeatureSet([feature, squared])
calculator = FeatureSetCalculator(es, time_last=None, feature_set=feature_set)
df = to_pandas(calculator.run(np.array([0, 1, 2])))
for i, row in df.iterrows():
assert (row[0] * row[0]) == row[1]
def test_agg_empty_child(es):
customer_count_feat = Feature(
es["log"].ww["id"],
parent_dataframe_name="customers",
primitive=Count,
)
feature_set = FeatureSet([customer_count_feat])
# time last before the customer had any events, so child frame is empty
calculator = FeatureSetCalculator(
es,
time_last=datetime(2011, 4, 8),
feature_set=feature_set,
)
df = to_pandas(calculator.run(np.array([0])), index="id")
assert df["COUNT(log)"].iloc[0] == 0
def test_diamond_entityset(diamond_es):
es = diamond_es
amount = IdentityFeature(es["transactions"].ww["amount"])
path = backward_path(es, ["regions", "customers", "transactions"])
through_customers = AggregationFeature(
amount,
"regions",
primitive=Sum,
relationship_path=path,
)
path = backward_path(es, ["regions", "stores", "transactions"])
through_stores = AggregationFeature(
amount,
"regions",
primitive=Sum,
relationship_path=path,
)
feature_set = FeatureSet([through_customers, through_stores])
calculator = FeatureSetCalculator(
es,
time_last=datetime(2011, 4, 8),
feature_set=feature_set,
)
df = calculator.run(np.array([0, 1, 2]))
df = to_pandas(df, index="id", sort_index=True)
assert (df["SUM(stores.transactions.amount)"] == [94, 261, 128]).all()
assert (df["SUM(customers.transactions.amount)"] == [72, 411, 0]).all()
def test_two_relationships_to_single_dataframe(games_es):
es = games_es
home_team, away_team = es.relationships
path = RelationshipPath([(False, home_team)])
mean_at_home = AggregationFeature(
Feature(es["games"].ww["home_team_score"]),
"teams",
relationship_path=path,
primitive=Mean,
)
path = RelationshipPath([(False, away_team)])
mean_at_away = AggregationFeature(
Feature(es["games"].ww["away_team_score"]),
"teams",
relationship_path=path,
primitive=Mean,
)
home_team_mean = DirectFeature(mean_at_home, "games", relationship=home_team)
away_team_mean = DirectFeature(mean_at_away, "games", relationship=away_team)
feature_set = FeatureSet([home_team_mean, away_team_mean])
calculator = FeatureSetCalculator(
es,
time_last=datetime(2011, 8, 28),
feature_set=feature_set,
)
df = calculator.run(np.array(range(3)))
df = to_pandas(df, index="id", sort_index=True)
assert (df[home_team_mean.get_name()] == [1.5, 1.5, 2.5]).all()
assert (df[away_team_mean.get_name()] == [1, 0.5, 2]).all()
@pytest.fixture
def pd_parent_child():
parent_df = pd.DataFrame({"id": [1]})
child_df = pd.DataFrame(
{
"id": [1, 2, 3],
"parent_id": [1, 1, 1],
"time_index": pd.date_range(start="1/1/2018", periods=3),
"value": [10, 5, 2],
"cat": ["a", "a", "b"],
},
).astype({"cat": "category"})
return (parent_df, child_df)
@pytest.fixture
def dd_parent_child(pd_parent_child):
dd = pytest.importorskip("dask.dataframe", reason="Dask not installed, skipping")
parent_df, child_df = pd_parent_child
parent_df = dd.from_pandas(parent_df, npartitions=2)
child_df = dd.from_pandas(child_df, npartitions=2)
return (parent_df, child_df)
@pytest.fixture
def spark_parent_child(pd_parent_child):
ps = pytest.importorskip("pyspark.pandas", reason="Spark not installed, skipping")
parent_df, child_df = pd_parent_child
parent_df = ps.from_pandas(parent_df)
child_df = ps.from_pandas(child_df)
return (parent_df, child_df)
@pytest.fixture(params=["pd_parent_child", "dd_parent_child", "spark_parent_child"])
def parent_child(request):
return request.getfixturevalue(request.param)
def test_empty_child_dataframe(parent_child):
parent_df, child_df = parent_child
child_ltypes = {
"parent_id": Integer,
"time_index": Datetime,
"value": Double,
"cat": Categorical,
}
es = EntitySet(id="blah")
es.add_dataframe(dataframe_name="parent", dataframe=parent_df, index="id")
es.add_dataframe(
dataframe_name="child",
dataframe=child_df,
index="id",
time_index="time_index",
logical_types=child_ltypes,
)
es.add_relationship("parent", "id", "child", "parent_id")
# create regular agg
count = Feature(
es["child"].ww["id"],
parent_dataframe_name="parent",
primitive=Count,
)
# create agg feature that requires multiple arguments
trend = Feature(
[Feature(es["child"].ww["value"]), Feature(es["child"].ww["time_index"])],
parent_dataframe_name="parent",
primitive=Trend,
)
# create multi-output agg feature
n_most_common = Feature(
es["child"].ww["cat"],
parent_dataframe_name="parent",
primitive=NMostCommon,
)
# create aggs with where
where = Feature(es["child"].ww["value"]) == 1
count_where = Feature(
es["child"].ww["id"],
parent_dataframe_name="parent",
where=where,
primitive=Count,
)
trend_where = Feature(
[Feature(es["child"].ww["value"]), Feature(es["child"].ww["time_index"])],
parent_dataframe_name="parent",
where=where,
primitive=Trend,
)
n_most_common_where = Feature(
es["child"].ww["cat"],
parent_dataframe_name="parent",
where=where,
primitive=NMostCommon,
)
if isinstance(parent_df, pd.DataFrame):
features = [
count,
count_where,
trend,
trend_where,
n_most_common,
n_most_common_where,
]
data = {
count.get_name(): pd.Series([0], dtype="Int64"),
count_where.get_name(): pd.Series([0], dtype="Int64"),
trend.get_name(): pd.Series([np.nan], dtype="float"),
trend_where.get_name(): pd.Series([np.nan], dtype="float"),
}
for name in n_most_common.get_feature_names():
data[name] = pd.Series([np.nan], dtype="category")
for name in n_most_common_where.get_feature_names():
data[name] = pd.Series([np.nan], dtype="category")
else:
features = [count, count_where]
data = {
count.get_name(): pd.Series([0], dtype="Int64"),
count_where.get_name(): pd.Series([0], dtype="Int64"),
}
answer = pd.DataFrame(data)
# cutoff time before all rows
fm = calculate_feature_matrix(
entityset=es,
features=features,
cutoff_time=pd.Timestamp("12/31/2017"),
)
fm = to_pandas(fm)
for column in data.keys():
pd.testing.assert_series_equal(
fm[column],
answer[column],
check_names=False,
check_index=False,
)
# cutoff time after all rows, but where clause filters all rows
if isinstance(parent_df, pd.DataFrame):
features = [count_where, trend_where, n_most_common_where]
data = {
count_where.get_name(): pd.Series([0], dtype="Int64"),
trend_where.get_name(): pd.Series([np.nan], dtype="float"),
}
for name in n_most_common_where.get_feature_names():
data[name] = pd.Series([np.nan], dtype="category")
else:
features = [count_where]
data = {count_where.get_name(): pd.Series([0], dtype="Int64")}
answer = pd.DataFrame(data)
fm2 = calculate_feature_matrix(
entityset=es,
features=features,
cutoff_time=pd.Timestamp("1/4/2018"),
)
fm2 = to_pandas(fm2)
for column in data.keys():
pd.testing.assert_series_equal(
fm[column],
answer[column],
check_names=False,
check_index=False,
)
def test_with_features_built_from_es_metadata(es):
metadata = es.metadata
agg_feat = Feature(
metadata["log"].ww["id"],
parent_dataframe_name="customers",
primitive=Count,
)
feature_set = FeatureSet([agg_feat])
calculator = FeatureSetCalculator(es, time_last=None, feature_set=feature_set)
df = calculator.run(np.array([0]))
df = to_pandas(df, index="id")
v = df[agg_feat.get_name()].values[0]
assert v == 10
# TODO: Fails with Dask and Spark (conflicting aggregation primitives)
def test_handles_primitive_function_name_uniqueness(es):
if es.dataframe_type != Library.PANDAS:
pytest.xfail(
"Fails with Dask and Spark due conflicting aggregation primitive names",
)
class SumTimesN(AggregationPrimitive):
name = "sum_times_n"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
def __init__(self, n):
self.n = n
def get_function(self, agg_type="pandas"):
def my_function(values):
return values.sum() * self.n
return my_function
# works as expected
f1 = Feature(
es["log"].ww["value"],
parent_dataframe_name="customers",
primitive=SumTimesN(n=1),
)
fm = calculate_feature_matrix(features=[f1], entityset=es)
value_sum = pd.Series([56, 26, 0])
assert all(fm[f1.get_name()].sort_index() == value_sum)
# works as expected
f2 = Feature(
es["log"].ww["value"],
parent_dataframe_name="customers",
primitive=SumTimesN(n=2),
)
fm = calculate_feature_matrix(features=[f2], entityset=es)
double_value_sum = pd.Series([112, 52, 0])
assert all(fm[f2.get_name()].sort_index() == double_value_sum)
# same primitive, same column, different args
fm = calculate_feature_matrix(features=[f1, f2], entityset=es)
assert all(fm[f1.get_name()].sort_index() == value_sum)
assert all(fm[f2.get_name()].sort_index() == double_value_sum)
# different primitives, same function returned by get_function,
# different base features
f3 = Feature(
es["log"].ww["value"],
parent_dataframe_name="customers",
primitive=Sum,
)
f4 = Feature(
es["log"].ww["purchased"],
parent_dataframe_name="customers",
primitive=NumTrue,
)
fm = calculate_feature_matrix(features=[f3, f4], entityset=es)
purchased_sum = pd.Series([10, 1, 1])
assert all(fm[f3.get_name()].sort_index() == value_sum)
assert all(fm[f4.get_name()].sort_index() == purchased_sum)
# different primitives, same function returned by get_function,
# same base feature
class Sum1(AggregationPrimitive):
"""Sums elements of a numeric or boolean feature."""
name = "sum1"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
stack_on_self = False
stack_on_exclude = [Count]
default_value = 0
def get_function(self, agg_type="pandas"):
return np.sum
class Sum2(AggregationPrimitive):
"""Sums elements of a numeric or boolean feature."""
name = "sum2"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
stack_on_self = False
stack_on_exclude = [Count]
default_value = 0
def get_function(self, agg_type="pandas"):
return np.sum
class Sum3(AggregationPrimitive):
"""Sums elements of a numeric or boolean feature."""
name = "sum3"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
stack_on_self = False
stack_on_exclude = [Count]
default_value = 0
def get_function(self, agg_type="pandas"):
return np.sum
f5 = Feature(
es["log"].ww["value"],
parent_dataframe_name="customers",
primitive=Sum1,
)
f6 = Feature(
es["log"].ww["value"],
parent_dataframe_name="customers",
primitive=Sum2,
)
f7 = Feature(
es["log"].ww["value"],
parent_dataframe_name="customers",
primitive=Sum3,
)
fm = calculate_feature_matrix(features=[f5, f6, f7], entityset=es)
assert all(fm[f5.get_name()].sort_index() == value_sum)
assert all(fm[f6.get_name()].sort_index() == value_sum)
assert all(fm[f7.get_name()].sort_index() == value_sum)
# No order guarantees w/ Dask
def test_returns_order_of_instance_ids(pd_es):
feature_set = FeatureSet([Feature(pd_es["customers"].ww["age"])])
calculator = FeatureSetCalculator(pd_es, time_last=None, feature_set=feature_set)
instance_ids = [0, 1, 2]
assert list(pd_es["customers"]["id"]) != instance_ids
df = calculator.run(np.array(instance_ids))
assert list(df.index) == instance_ids
def test_calls_progress_callback(es):
# call with all feature types. make sure progress callback calls sum to 1
identity = Feature(es["customers"].ww["age"])
direct = Feature(es["cohorts"].ww["cohort_name"], "customers")
agg = Feature(
es["sessions"].ww["id"],
parent_dataframe_name="customers",
primitive=Count,
)
agg_apply = Feature(
es["log"].ww["datetime"],
parent_dataframe_name="customers",
primitive=TimeSinceLast,
) # this feature is handle differently than simple features
trans = Feature(agg, primitive=Negate)
trans_full = Feature(agg, primitive=CumSum)
groupby_trans = Feature(
agg,
primitive=CumSum,
groupby=Feature(es["customers"].ww["cohort"]),
)
if es.dataframe_type != Library.PANDAS:
all_features = [identity, direct, agg, trans]
else:
all_features = [
identity,
direct,
agg,
agg_apply,
trans,
trans_full,
groupby_trans,
]
feature_set = FeatureSet(all_features)
calculator = FeatureSetCalculator(es, time_last=None, feature_set=feature_set)
class MockProgressCallback:
def __init__(self):
self.total = 0
def __call__(self, update):
self.total += update
mock_progress_callback = MockProgressCallback()
instance_ids = [0, 1, 2]
calculator.run(np.array(instance_ids), mock_progress_callback)
assert np.isclose(mock_progress_callback.total, 1)
# testing again with a time_last with no data
feature_set = FeatureSet(all_features)
calculator = FeatureSetCalculator(
es,
time_last=pd.Timestamp("1950"),
feature_set=feature_set,
)
mock_progress_callback = MockProgressCallback()
calculator.run(np.array(instance_ids), mock_progress_callback)
assert np.isclose(mock_progress_callback.total, 1)
# precalculated_features is only used with approximate
def test_precalculated_features(pd_es):
error_msg = (
"This primitive should never be used because the features are precalculated"
)
class ErrorPrim(AggregationPrimitive):
"""A primitive whose function raises an error."""
name = "error_prim"
input_types = [ColumnSchema(semantic_tags={"numeric"})]
return_type = ColumnSchema(semantic_tags={"numeric"})
def get_function(self, agg_type="pandas"):
def error(s):
raise RuntimeError(error_msg)
return error
value = Feature(pd_es["log"].ww["value"])
agg = Feature(value, parent_dataframe_name="sessions", primitive=ErrorPrim)
agg2 = Feature(agg, parent_dataframe_name="customers", primitive=ErrorPrim)
direct = Feature(agg2, dataframe_name="sessions")
# Set up a FeatureSet which knows which features are precalculated.
precalculated_feature_trie = Trie(default=set, path_constructor=RelationshipPath)
precalculated_feature_trie.get_node(direct.relationship_path).value.add(
agg2.unique_name(),
)
feature_set = FeatureSet(
[direct],
approximate_feature_trie=precalculated_feature_trie,
)
# Fake precalculated data.
values = [0, 1, 2]
parent_fm = pd.DataFrame({agg2.get_name(): values})
precalculated_fm_trie = Trie(path_constructor=RelationshipPath)
precalculated_fm_trie.get_node(direct.relationship_path).value = parent_fm
calculator = FeatureSetCalculator(
pd_es,
feature_set=feature_set,
precalculated_features=precalculated_fm_trie,
)
instance_ids = [0, 2, 3, 5]
fm = calculator.run(np.array(instance_ids))
assert list(fm[direct.get_name()]) == [values[0], values[0], values[1], values[2]]
# Calculating without precalculated features should error.
with pytest.raises(RuntimeError, match=error_msg):
FeatureSetCalculator(pd_es, feature_set=FeatureSet([direct])).run(instance_ids)
| 38,668 | 28.837191 | 91 | py |
featuretools | featuretools-main/featuretools/tests/computational_backend/test_feature_set.py | from featuretools import (
AggregationFeature,
DirectFeature,
IdentityFeature,
TransformFeature,
primitives,
)
from featuretools.computational_backends.feature_set import FeatureSet
from featuretools.entityset.relationship import RelationshipPath
from featuretools.tests.testing_utils import backward_path
from featuretools.utils import Trie
def test_feature_trie_without_needs_full_dataframe(diamond_es):
es = diamond_es
country_name = IdentityFeature(es["countries"].ww["name"])
direct_name = DirectFeature(country_name, "regions")
amount = IdentityFeature(es["transactions"].ww["amount"])
path_through_customers = backward_path(es, ["regions", "customers", "transactions"])
through_customers = AggregationFeature(
amount,
"regions",
primitive=primitives.Mean,
relationship_path=path_through_customers,
)
path_through_stores = backward_path(es, ["regions", "stores", "transactions"])
through_stores = AggregationFeature(
amount,
"regions",
primitive=primitives.Mean,
relationship_path=path_through_stores,
)
customers_to_transactions = backward_path(es, ["customers", "transactions"])
customers_mean = AggregationFeature(
amount,
"customers",
primitive=primitives.Mean,
relationship_path=customers_to_transactions,
)
negation = TransformFeature(customers_mean, primitives.Negate)
regions_to_customers = backward_path(es, ["regions", "customers"])
mean_of_mean = AggregationFeature(
negation,
"regions",
primitive=primitives.Mean,
relationship_path=regions_to_customers,
)
features = [direct_name, through_customers, through_stores, mean_of_mean]
feature_set = FeatureSet(features)
trie = feature_set.feature_trie
assert trie.value == (False, set(), {f.unique_name() for f in features})
assert trie.get_node(direct_name.relationship_path).value == (
False,
set(),
{country_name.unique_name()},
)
assert trie.get_node(regions_to_customers).value == (
False,
set(),
{negation.unique_name(), customers_mean.unique_name()},
)
regions_to_stores = backward_path(es, ["regions", "stores"])
assert trie.get_node(regions_to_stores).value == (False, set(), set())
assert trie.get_node(path_through_customers).value == (
False,
set(),
{amount.unique_name()},
)
assert trie.get_node(path_through_stores).value == (
False,
set(),
{amount.unique_name()},
)
def test_feature_trie_with_needs_full_dataframe(diamond_es):
pd_es = diamond_es
amount = IdentityFeature(pd_es["transactions"].ww["amount"])
path_through_customers = backward_path(
pd_es,
["regions", "customers", "transactions"],
)
agg = AggregationFeature(
amount,
"regions",
primitive=primitives.Mean,
relationship_path=path_through_customers,
)
trans_of_agg = TransformFeature(agg, primitives.CumSum)
path_through_stores = backward_path(pd_es, ["regions", "stores", "transactions"])
trans = TransformFeature(amount, primitives.CumSum)
agg_of_trans = AggregationFeature(
trans,
"regions",
primitive=primitives.Mean,
relationship_path=path_through_stores,
)
features = [agg, trans_of_agg, agg_of_trans]
feature_set = FeatureSet(features)
trie = feature_set.feature_trie
assert trie.value == (
True,
{agg.unique_name(), trans_of_agg.unique_name()},
{agg_of_trans.unique_name()},
)
assert trie.get_node(path_through_customers).value == (
True,
{amount.unique_name()},
set(),
)
assert trie.get_node(path_through_customers[:1]).value == (True, set(), set())
assert trie.get_node(path_through_stores).value == (
True,
{amount.unique_name(), trans.unique_name()},
set(),
)
assert trie.get_node(path_through_stores[:1]).value == (False, set(), set())
def test_feature_trie_with_needs_full_dataframe_direct(es):
value = IdentityFeature(es["log"].ww["value"])
agg = AggregationFeature(value, "sessions", primitive=primitives.Mean)
agg_of_agg = AggregationFeature(agg, "customers", primitive=primitives.Sum)
direct = DirectFeature(agg_of_agg, "sessions")
trans = TransformFeature(direct, primitives.CumSum)
features = [trans, agg]
feature_set = FeatureSet(features)
trie = feature_set.feature_trie
assert trie.value == (
True,
{direct.unique_name(), trans.unique_name()},
{agg.unique_name()},
)
assert trie.get_node(agg.relationship_path).value == (
False,
set(),
{value.unique_name()},
)
parent_node = trie.get_node(direct.relationship_path)
assert parent_node.value == (True, {agg_of_agg.unique_name()}, set())
child_through_parent_node = parent_node.get_node(agg_of_agg.relationship_path)
assert child_through_parent_node.value == (True, {agg.unique_name()}, set())
assert child_through_parent_node.get_node(agg.relationship_path).value == (
True,
{value.unique_name()},
set(),
)
def test_feature_trie_ignores_approximate_features(es):
value = IdentityFeature(es["log"].ww["value"])
agg = AggregationFeature(value, "sessions", primitive=primitives.Mean)
agg_of_agg = AggregationFeature(agg, "customers", primitive=primitives.Sum)
direct = DirectFeature(agg_of_agg, "sessions")
features = [direct, agg]
approximate_feature_trie = Trie(default=list, path_constructor=RelationshipPath)
approximate_feature_trie.get_node(direct.relationship_path).value = [agg_of_agg]
feature_set = FeatureSet(
features,
approximate_feature_trie=approximate_feature_trie,
)
trie = feature_set.feature_trie
# Since agg_of_agg is ignored it and its dependencies should not be in the
# trie.
sub_trie = trie.get_node(direct.relationship_path)
for _path, (_, _, features) in sub_trie:
assert not features
assert trie.value == (False, set(), {direct.unique_name(), agg.unique_name()})
assert trie.get_node(agg.relationship_path).value == (
False,
set(),
{value.unique_name()},
)
| 6,385 | 32.260417 | 88 | py |
featuretools | featuretools-main/featuretools/tests/computational_backend/__init__.py | 0 | 0 | 0 | py | |
featuretools | featuretools-main/featuretools/tests/computational_backend/test_dask_features.py | import pandas as pd
import pytest
from featuretools.tests.testing_utils import make_ecommerce_entityset
def test_tokenize_entityset(pd_es, pd_int_es):
pytest.importorskip("dask", reason="Dask not installed, skipping")
from dask.base import tokenize
dupe = make_ecommerce_entityset()
# check identitcal entitysets hash to same token
assert tokenize(pd_es) == tokenize(dupe)
# not same if product relationship is missing
productless = make_ecommerce_entityset()
productless.relationships.pop()
assert tokenize(pd_es) != tokenize(productless)
# not same if integer entityset
assert tokenize(pd_es) != tokenize(pd_int_es)
# add row to cohorts
cohorts_df = dupe["cohorts"]
new_row = pd.DataFrame(
data={
"cohort": [2],
"cohort_name": None,
"cohort_end": [pd.Timestamp("2011-04-08 12:00:00")],
},
columns=["cohort", "cohort_name", "cohort_end"],
index=[2],
)
more_cohorts = pd.concat([cohorts_df, new_row])
dupe.replace_dataframe(dataframe_name="cohorts", df=more_cohorts)
assert tokenize(pd_es) == tokenize(dupe)
| 1,156 | 29.447368 | 70 | py |
featuretools | featuretools-main/featuretools/tests/computational_backend/test_utils.py | import numpy as np
from featuretools import dfs
from featuretools.computational_backends import replace_inf_values
from featuretools.primitives import DivideByFeature, DivideNumericScalar
from featuretools.tests.testing_utils import to_pandas
def test_replace_inf_values(divide_by_zero_es):
div_by_scalar = DivideNumericScalar(value=0)
div_by_feature = DivideByFeature(value=1)
div_by_feature_neg = DivideByFeature(value=-1)
for primitive in [
"divide_numeric",
div_by_scalar,
div_by_feature,
div_by_feature_neg,
]:
fm, _ = dfs(
entityset=divide_by_zero_es,
target_dataframe_name="zero",
trans_primitives=[primitive],
)
assert np.inf in to_pandas(fm).values or -np.inf in to_pandas(fm).values
replaced_fm = replace_inf_values(fm)
replaced_fm = to_pandas(replaced_fm)
assert np.inf not in replaced_fm.values
assert -np.inf not in replaced_fm.values
custom_value_fm = replace_inf_values(fm, replacement_value="custom_val")
custom_value_fm = to_pandas(custom_value_fm)
assert np.inf not in custom_value_fm.values
assert -np.inf not in replaced_fm.values
assert "custom_val" in custom_value_fm.values
def test_replace_inf_values_specify_cols(divide_by_zero_es):
div_by_scalar = DivideNumericScalar(value=0)
fm, _ = dfs(
entityset=divide_by_zero_es,
target_dataframe_name="zero",
trans_primitives=[div_by_scalar],
)
assert np.inf in to_pandas(fm["col1 / 0"]).values
replaced_fm = replace_inf_values(fm, columns=["col1 / 0"])
replaced_fm = to_pandas(replaced_fm)
assert np.inf not in replaced_fm["col1 / 0"].values
assert np.inf in replaced_fm["col2 / 0"].values
| 1,804 | 35.1 | 80 | py |
featuretools | featuretools-main/featuretools/tests/config_tests/test_config.py | from featuretools import config
def test_get_default_config_does_not_change():
old_config = config.get_all()
key = "primitive_data_folder"
value = "This is an example string"
config.set({key: value})
config.set_to_default()
assert config.get(key) != value
config.set(old_config)
def test_set_and_get_config():
key = "primitive_data_folder"
old_value = config.get(key)
value = "This is an example string"
config.set({key: value})
assert config.get(key) == value
config.set({key: old_value})
def test_get_all():
assert config.get_all() == config._data
| 617 | 19.6 | 46 | py |
featuretools | featuretools-main/featuretools/tests/config_tests/__init__.py | 0 | 0 | 0 | py | |
featuretools | featuretools-main/featuretools/tests/selection/test_selection.py | import numpy as np
import pandas as pd
import pytest
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Boolean, BooleanNullable, NaturalLanguage
from featuretools import EntitySet, Feature, dfs
from featuretools.selection import (
remove_highly_correlated_features,
remove_highly_null_features,
remove_low_information_features,
remove_single_value_features,
)
from featuretools.tests.testing_utils import make_ecommerce_entityset
@pytest.fixture
def feature_matrix():
feature_matrix = pd.DataFrame(
{
"test": [0, 1, 2],
"no_null": [np.nan, 0, 0],
"some_null": [np.nan, 0, 0],
"all_null": [np.nan, np.nan, np.nan],
"many_value": [1, 2, 3],
"dup_value": [1, 1, 2],
"one_value": [1, 1, 1],
},
)
return feature_matrix
@pytest.fixture
def test_es(pd_es, feature_matrix):
pd_es.add_dataframe(dataframe_name="test", dataframe=feature_matrix, index="test")
return pd_es
# remove low information features not supported in Dask
def test_remove_low_information_feature_names(feature_matrix):
feature_matrix = remove_low_information_features(feature_matrix)
assert feature_matrix.shape == (3, 5)
assert "one_value" not in feature_matrix.columns
assert "all_null" not in feature_matrix.columns
# remove low information features not supported in Dask
def test_remove_low_information_features(test_es, feature_matrix):
features = [Feature(test_es["test"].ww[col]) for col in test_es["test"].columns]
feature_matrix, features = remove_low_information_features(feature_matrix, features)
assert feature_matrix.shape == (3, 5)
assert len(features) == 5
for f in features:
assert f.get_name() in feature_matrix.columns
assert "one_value" not in feature_matrix.columns
assert "all_null" not in feature_matrix.columns
def test_remove_highly_null_features():
nulls_df = pd.DataFrame(
{
"id": [0, 1, 2, 3],
"half_nulls": [None, None, 88, 99],
"all_nulls": [None, None, None, None],
"quarter": ["a", "b", None, "c"],
"vals": [True, True, False, False],
},
)
es = EntitySet("data", {"nulls": (nulls_df, "id")})
es["nulls"].ww.set_types(
logical_types={"all_nulls": "categorical", "quarter": "categorical"},
)
fm, features = dfs(
entityset=es,
target_dataframe_name="nulls",
trans_primitives=["is_null"],
max_depth=2,
)
with pytest.raises(
ValueError,
match="pct_null_threshold must be a float between 0 and 1, inclusive.",
):
remove_highly_null_features(fm, pct_null_threshold=1.1)
with pytest.raises(
ValueError,
match="pct_null_threshold must be a float between 0 and 1, inclusive.",
):
remove_highly_null_features(fm, pct_null_threshold=-0.1)
no_thresh = remove_highly_null_features(fm)
no_thresh_cols = set(no_thresh.columns)
diff = set(fm.columns) - no_thresh_cols
assert len(diff) == 1
assert "all_nulls" not in no_thresh_cols
half = remove_highly_null_features(fm, pct_null_threshold=0.5)
half_cols = set(half.columns)
diff = set(fm.columns) - half_cols
assert len(diff) == 2
assert "all_nulls" not in half_cols
assert "half_nulls" not in half_cols
no_tolerance = remove_highly_null_features(fm, pct_null_threshold=0)
no_tolerance_cols = set(no_tolerance.columns)
diff = set(fm.columns) - no_tolerance_cols
assert len(diff) == 3
assert "all_nulls" not in no_tolerance_cols
assert "half_nulls" not in no_tolerance_cols
assert "quarter" not in no_tolerance_cols
(
with_features_param,
with_features_param_features,
) = remove_highly_null_features(fm, features)
assert len(with_features_param_features) == len(no_thresh.columns)
for i in range(len(with_features_param_features)):
assert with_features_param_features[i].get_name() == no_thresh.columns[i]
assert with_features_param.columns[i] == no_thresh.columns[i]
def test_remove_single_value_features():
same_vals_df = pd.DataFrame(
{
"id": [0, 1, 2, 3],
"all_numeric": [88, 88, 88, 88],
"with_nan": [1, 1, None, 1],
"all_nulls": [None, None, None, None],
"all_categorical": ["a", "a", "a", "a"],
"all_bools": [True, True, True, True],
"diff_vals": ["hi", "bye", "bye", "hi"],
},
)
es = EntitySet("data", {"single_vals": (same_vals_df, "id")})
es["single_vals"].ww.set_types(
logical_types={
"all_nulls": "categorical",
"all_categorical": "categorical",
"diff_vals": "categorical",
},
)
fm, features = dfs(
entityset=es,
target_dataframe_name="single_vals",
trans_primitives=["is_null"],
max_depth=2,
)
no_params, no_params_features = remove_single_value_features(fm, features)
no_params_cols = set(no_params.columns)
assert len(no_params_features) == 2
assert "IS_NULL(with_nan)" in no_params_cols
assert "diff_vals" in no_params_cols
nan_as_value, nan_as_value_features = remove_single_value_features(
fm,
features,
count_nan_as_value=True,
)
nan_cols = set(nan_as_value.columns)
assert len(nan_as_value_features) == 3
assert "IS_NULL(with_nan)" in nan_cols
assert "diff_vals" in nan_cols
assert "with_nan" in nan_cols
without_features_param = remove_single_value_features(fm)
assert len(no_params.columns) == len(without_features_param.columns)
for i in range(len(no_params.columns)):
assert no_params.columns[i] == without_features_param.columns[i]
assert no_params_features[i].get_name() == without_features_param.columns[i]
def test_remove_highly_correlated_features():
correlated_df = pd.DataFrame(
{
"id": [0, 1, 2, 3],
"diff_ints": [34, 11, 29, 91],
"words": ["test", "this is a short sentence", "foo bar", "baz"],
"corr_words": [4, 24, 7, 3],
"corr_1": [99, 88, 77, 33],
"corr_2": [99, 88, 77, 33],
},
)
es = EntitySet(
"data",
{"correlated": (correlated_df, "id", None, {"words": NaturalLanguage})},
)
fm, _ = dfs(
entityset=es,
target_dataframe_name="correlated",
trans_primitives=["num_characters"],
max_depth=2,
)
with pytest.raises(
ValueError,
match="pct_corr_threshold must be a float between 0 and 1, inclusive.",
):
remove_highly_correlated_features(fm, pct_corr_threshold=1.1)
with pytest.raises(
ValueError,
match="pct_corr_threshold must be a float between 0 and 1, inclusive.",
):
remove_highly_correlated_features(fm, pct_corr_threshold=-0.1)
with pytest.raises(
AssertionError,
match="feature named not_a_feature is not in feature matrix",
):
remove_highly_correlated_features(fm, features_to_check=["not_a_feature"])
to_check = remove_highly_correlated_features(
fm,
features_to_check=["corr_words", "NUM_CHARACTERS(words)", "diff_ints"],
)
to_check_columns = set(to_check.columns)
assert len(to_check_columns) == 4
assert "NUM_CHARACTERS(words)" not in to_check_columns
assert "corr_1" in to_check_columns
assert "corr_2" in to_check_columns
to_keep = remove_highly_correlated_features(
fm,
features_to_keep=["NUM_CHARACTERS(words)"],
)
to_keep_names = set(to_keep.columns)
assert len(to_keep_names) == 4
assert "corr_words" in to_keep_names
assert "NUM_CHARACTERS(words)" in to_keep_names
assert "corr_2" not in to_keep_names
new_fm = remove_highly_correlated_features(fm)
assert len(new_fm.columns) == 3
assert "corr_2" not in new_fm.columns
assert "NUM_CHARACTERS(words)" not in new_fm.columns
diff_threshold = remove_highly_correlated_features(fm, pct_corr_threshold=0.8)
diff_threshold_cols = diff_threshold.columns
assert len(diff_threshold_cols) == 2
assert "corr_words" in diff_threshold_cols
assert "diff_ints" in diff_threshold_cols
def test_remove_highly_correlated_features_init_woodwork():
correlated_df = pd.DataFrame(
{
"id": [0, 1, 2, 3],
"diff_ints": [34, 11, 29, 91],
"words": ["test", "this is a short sentence", "foo bar", "baz"],
"corr_words": [4, 24, 7, 3],
"corr_1": [99, 88, 77, 33],
"corr_2": [99, 88, 77, 33],
},
)
es = EntitySet(
"data",
{"correlated": (correlated_df, "id", None, {"words": NaturalLanguage})},
)
fm, _ = dfs(
entityset=es,
target_dataframe_name="correlated",
trans_primitives=["num_characters"],
max_depth=2,
)
no_ww_fm = fm.copy()
ww_fm = fm.copy()
ww_fm.ww.init()
new_no_ww_fm = remove_highly_correlated_features(no_ww_fm)
new_ww_fm = remove_highly_correlated_features(ww_fm)
pd.testing.assert_frame_equal(new_no_ww_fm, new_ww_fm)
def test_multi_output_selection():
df1 = pd.DataFrame({"id": [0, 1, 2, 3]})
df2 = pd.DataFrame(
{
"first_id": [0, 1, 1, 3],
"all_nulls": [None, None, None, None],
"quarter": ["a", "b", None, "c"],
},
)
dataframes = {
"first": (df1, "id"),
"second": (df2, "index"),
}
relationships = [("first", "id", "second", "first_id")]
es = EntitySet("data", dataframes, relationships=relationships)
es["second"].ww.set_types(
logical_types={"all_nulls": "categorical", "quarter": "categorical"},
)
fm, features = dfs(
entityset=es,
target_dataframe_name="first",
trans_primitives=[],
agg_primitives=["n_most_common"],
max_depth=2,
)
multi_output, multi_output_features = remove_single_value_features(fm, features)
assert multi_output.columns == ["N_MOST_COMMON(second.quarter)[0]"]
assert len(multi_output_features) == 1
assert multi_output_features[0].get_name() == multi_output.columns[0]
es = make_ecommerce_entityset()
fm, features = dfs(
entityset=es,
target_dataframe_name="régions",
trans_primitives=[],
agg_primitives=["n_most_common"],
max_depth=2,
)
matrix_with_slices, unsliced_features = remove_highly_null_features(fm, features)
assert len(matrix_with_slices.columns) == 18
assert len(unsliced_features) == 14
matrix_columns = set(matrix_with_slices.columns)
for f in unsliced_features:
for f_name in f.get_feature_names():
assert f_name in matrix_columns
def test_remove_highly_correlated_features_on_boolean_cols():
correlated_df = pd.DataFrame(
{
"id": [0, 1, 2, 3],
"diff_ints": [34, 11, 29, 91],
"corr_words": [4, 24, 7, 3],
"bools": [True, True, False, True],
},
)
es = EntitySet(
"data",
{"correlated": (correlated_df, "id", None, {"bools": Boolean})},
)
feature_matrix, features = dfs(
entityset=es,
target_dataframe_name="correlated",
trans_primitives=["equal"],
agg_primitives=[],
max_depth=1,
return_types=[
ColumnSchema(logical_type=BooleanNullable),
ColumnSchema(logical_type=Boolean),
],
)
# Confirm both boolean logical types are included so that we know we're checking the correct types
assert {
ltype.type_string for ltype in feature_matrix.ww.logical_types.values()
} == {Boolean.type_string, BooleanNullable.type_string}
to_keep = remove_highly_correlated_features(
feature_matrix=feature_matrix,
features=features,
pct_corr_threshold=0.3,
)
assert len(to_keep[0].columns) < len(feature_matrix.columns)
| 12,146 | 31.565684 | 102 | py |
featuretools | featuretools-main/featuretools/tests/selection/__init__.py | 0 | 0 | 0 | py | |
featuretools | featuretools-main/featuretools/tests/feature_discovery/test_convertors.py | from woodwork.logical_types import Double, NaturalLanguage
from featuretools.entityset.entityset import EntitySet
from featuretools.feature_base.feature_base import (
FeatureBase,
IdentityFeature,
TransformFeature,
)
from featuretools.feature_discovery.convertors import (
_convert_feature_to_featurebase,
convert_feature_list_to_featurebase_list,
convert_featurebase_list_to_feature_list,
)
from featuretools.feature_discovery.feature_discovery import (
generate_features_from_primitives,
schema_to_features,
)
from featuretools.feature_discovery.LiteFeature import (
LiteFeature,
)
from featuretools.primitives import Absolute, AddNumeric, Lag
from featuretools.synthesis import dfs
from featuretools.tests.feature_discovery.test_feature_discovery import (
TestMultiOutputPrimitive,
)
from featuretools.tests.testing_utils.generate_fake_dataframe import (
generate_fake_dataframe,
)
def test_convert_featurebase_list_to_feature_list():
col_defs = [
("idx", "Integer", {"index"}),
("f_1", "Double"),
("f_2", "Double"),
("f_3", "NaturalLanguage"),
]
df = generate_fake_dataframe(
col_defs=col_defs,
)
es = EntitySet(id="es")
es.add_dataframe(df, df.ww.name, index="idx")
fdefs = dfs(
entityset=es,
target_dataframe_name=df.ww.name,
trans_primitives=[AddNumeric, TestMultiOutputPrimitive],
features_only=True,
)
assert isinstance(fdefs, list)
assert isinstance(fdefs[0], FeatureBase)
converted_features = set(convert_featurebase_list_to_feature_list(fdefs))
f1 = LiteFeature("f_1", Double)
f2 = LiteFeature("f_2", Double)
f3 = LiteFeature("f_3", NaturalLanguage)
fadd = LiteFeature(
name="f_1 + f_2",
tags={"numeric"},
primitive=AddNumeric(),
base_features=[f1, f2],
)
fmo0 = LiteFeature(
name="TEST_MO(f_3)[0]",
tags={"numeric"},
primitive=TestMultiOutputPrimitive(),
base_features=[f3],
idx=0,
)
fmo1 = LiteFeature(
name="TEST_MO(f_3)[1]",
tags={"numeric"},
primitive=TestMultiOutputPrimitive(),
base_features=[f3],
idx=1,
)
fmo0.related_features = {fmo1}
fmo1.related_features = {fmo0}
orig_features = set([f1, f2, fadd, fmo0, fmo1])
assert len(orig_features.symmetric_difference(converted_features)) == 0
def test_origin_feature_to_featurebase():
df = generate_fake_dataframe(
col_defs=[("idx", "Double", {"index"}), ("f_1", "Double")],
)
es = EntitySet(id="test")
es.add_dataframe(df, df.ww.name)
origin_features = schema_to_features(df.ww.schema)
f_1 = [f for f in origin_features if f.name == "f_1"][0]
fb = _convert_feature_to_featurebase(f_1, df, {})
assert isinstance(fb, IdentityFeature)
assert fb.get_name() == "f_1"
f_1.set_alias("new name")
df.ww.rename({"f_1": "new name"}, inplace=True)
fb = _convert_feature_to_featurebase(f_1, df, {})
assert isinstance(fb, IdentityFeature)
assert fb.get_name() == "new name"
def test_stacked_feature_to_featurebase():
df = generate_fake_dataframe(
col_defs=[("idx", "Double", {"index"}), ("f_1", "Double")],
)
es = EntitySet(id="test")
es.add_dataframe(df, df.ww.name)
origin_features = schema_to_features(df.ww.schema)
f_1 = [f for f in origin_features if f.name == "f_1"][0]
features = generate_features_from_primitives([f_1], [Absolute()])
f_2 = [f for f in features if f.name == "ABSOLUTE(f_1)"][0]
fb = _convert_feature_to_featurebase(f_2, df, {})
assert isinstance(fb, TransformFeature)
assert fb.get_name() == "ABSOLUTE(f_1)"
assert len(fb.base_features) == 1
assert fb.base_features[0].get_name() == "f_1"
f_2.set_alias("f_2")
fb = _convert_feature_to_featurebase(f_2, df, {})
assert isinstance(fb, TransformFeature)
assert fb.get_name() == "f_2"
assert len(fb.base_features) == 1
assert fb.base_features[0].get_name() == "f_1"
def test_multi_output_to_featurebase():
df = generate_fake_dataframe(
col_defs=[
("idx", "Double", {"index"}),
("f_1", "NaturalLanguage"),
],
)
es = EntitySet(id="test")
es.add_dataframe(df, df.ww.name)
origin_features = schema_to_features(df.ww.schema)
f_1 = [f for f in origin_features if f.name == "f_1"][0]
features = generate_features_from_primitives([f_1], [TestMultiOutputPrimitive()])
lsa_features = [f for f in features if f.get_primitive_name() == "test_mo"]
assert len(lsa_features) == 2
# Test Single LiteFeature
fb = _convert_feature_to_featurebase(lsa_features[0], df, {})
assert isinstance(fb, TransformFeature)
assert fb.get_name() == "TEST_MO(f_1)"
assert len(fb.base_features) == 1
assert set(fb.get_feature_names()) == set(["TEST_MO(f_1)[0]", "TEST_MO(f_1)[1]"])
assert fb.base_features[0].get_name() == "f_1"
# Test that feature gets consolidated
fb_list = convert_feature_list_to_featurebase_list(lsa_features, df)
assert len(fb_list) == 1
assert fb_list[0].get_name() == "TEST_MO(f_1)"
assert len(fb_list[0].base_features) == 1
assert set(fb_list[0].get_feature_names()) == set(
["TEST_MO(f_1)[0]", "TEST_MO(f_1)[1]"],
)
assert fb_list[0].base_features[0].get_name() == "f_1"
lsa_features[0].set_alias("f_2")
lsa_features[1].set_alias("f_3")
fb = _convert_feature_to_featurebase(lsa_features[0], df, {})
assert isinstance(fb, TransformFeature)
assert len(fb.base_features) == 1
assert set(fb.get_feature_names()) == set(["f_2", "f_3"])
assert fb.base_features[0].get_name() == "f_1"
# Test that feature gets consolidated
fb_list = convert_feature_list_to_featurebase_list(lsa_features, df)
assert len(fb_list) == 1
assert len(fb_list[0].base_features) == 1
assert set(fb_list[0].get_feature_names()) == set(["f_2", "f_3"])
assert fb_list[0].base_features[0].get_name() == "f_1"
def test_stacking_on_multioutput_to_featurebase():
col_defs = [
("idx", "Double", {"index"}),
("t_idx", "Datetime", {"time_index"}),
("f_1", "NaturalLanguage"),
]
df = generate_fake_dataframe(
col_defs=col_defs,
)
es = EntitySet(id="test")
es.add_dataframe(df, df.ww.name)
origin_features = schema_to_features(df.ww.schema)
time_index_feature = [f for f in origin_features if f.name == "t_idx"][0]
f_1 = [f for f in origin_features if f.name == "f_1"][0]
features = generate_features_from_primitives([f_1], [TestMultiOutputPrimitive()])
lsa_features = [f for f in features if f.get_primitive_name() == "test_mo"]
assert len(lsa_features) == 2
features = generate_features_from_primitives(
lsa_features + [time_index_feature],
[Lag(periods=2)],
)
lag_features = [f for f in features if f.get_primitive_name() == "lag"]
assert len(lag_features) == 2
fb_list = convert_feature_list_to_featurebase_list(lag_features, df)
assert len(fb_list) == 2
assert isinstance(fb_list[0], TransformFeature)
assert set([x.get_name() for x in fb_list]) == set(
[
"LAG(TEST_MO(f_1)[0], t_idx, periods=2)",
"LAG(TEST_MO(f_1)[1], t_idx, periods=2)",
],
)
lsa_features[0].set_alias("f_2")
lsa_features[1].set_alias("f_3")
features = generate_features_from_primitives(
lsa_features + [time_index_feature],
[Lag(periods=2)],
)
lag_features = [f for f in features if f.get_primitive_name() == "lag"]
assert len(lag_features) == 2
fb_list = convert_feature_list_to_featurebase_list(lag_features, df)
assert len(fb_list) == 2
assert isinstance(fb_list[0], TransformFeature)
assert set([x.get_name() for x in fb_list]) == set(
["LAG(f_2, t_idx, periods=2)", "LAG(f_3, t_idx, periods=2)"],
)
| 7,999 | 31.786885 | 85 | py |
featuretools | featuretools-main/featuretools/tests/feature_discovery/test_feature_discovery.py | from unittest.mock import patch
import pytest
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import (
Boolean,
BooleanNullable,
Datetime,
Double,
NaturalLanguage,
Ordinal,
)
from featuretools.entityset.entityset import EntitySet
from featuretools.feature_discovery.feature_discovery import (
_get_features,
_get_matching_features,
_index_column_set,
generate_features_from_primitives,
schema_to_features,
)
from featuretools.feature_discovery.FeatureCollection import FeatureCollection
from featuretools.feature_discovery.LiteFeature import (
LiteFeature,
)
from featuretools.feature_discovery.utils import column_schema_to_keys
from featuretools.primitives import (
Absolute,
AddNumeric,
Count,
DateFirstEvent,
Equal,
Lag,
MultiplyNumericBoolean,
NumUnique,
TransformPrimitive,
)
from featuretools.primitives.utils import get_transform_primitives
from featuretools.synthesis import dfs
from featuretools.tests.testing_utils.generate_fake_dataframe import (
generate_fake_dataframe,
)
DEFAULT_LT_FOR_TAG = {
"category": Ordinal,
"numeric": Double,
"time_index": Datetime,
}
class TestMultiOutputPrimitive(TransformPrimitive):
name = "test_mo"
input_types = [ColumnSchema(logical_type=NaturalLanguage)]
return_type = ColumnSchema(semantic_tags={"numeric"})
number_output_features = 2
class TestDoublePrimitive(TransformPrimitive):
name = "test_double"
input_types = [ColumnSchema(logical_type=Double)]
return_type = ColumnSchema(logical_type=Double)
@pytest.mark.parametrize(
"column_schema, expected",
[
(ColumnSchema(logical_type=Double), "Double"),
(ColumnSchema(semantic_tags={"index"}), "index"),
(
ColumnSchema(logical_type=Double, semantic_tags={"index", "other"}),
"Double,index,other",
),
],
)
def test_column_schema_to_keys(column_schema, expected):
actual = column_schema_to_keys(column_schema)
assert set(actual) == set(expected)
@pytest.mark.parametrize(
"column_list, expected",
[
([ColumnSchema(logical_type=Boolean)], [("Boolean", 1)]),
([ColumnSchema()], [("ANY", 1)]),
(
[
ColumnSchema(logical_type=Boolean),
ColumnSchema(logical_type=Boolean),
],
[("Boolean", 2)],
),
],
)
def test_index_input_set(column_list, expected):
actual = _index_column_set(column_list)
assert actual == expected
@pytest.mark.parametrize(
"feature_args, input_set, commutative, expected",
[
(
[("f1", Boolean), ("f2", Boolean), ("f3", Boolean)],
[ColumnSchema(logical_type=Boolean)],
False,
[["f1"], ["f2"], ["f3"]],
),
(
[("f1", Boolean), ("f2", Boolean)],
[ColumnSchema(logical_type=Boolean), ColumnSchema(logical_type=Boolean)],
False,
[["f1", "f2"], ["f2", "f1"]],
),
(
[("f1", Boolean), ("f2", Boolean)],
[ColumnSchema(logical_type=Boolean), ColumnSchema(logical_type=Boolean)],
True,
[["f1", "f2"]],
),
(
[("f1", Datetime, {"time_index"})],
[ColumnSchema(logical_type=Datetime, semantic_tags={"time_index"})],
False,
[["f1"]],
),
(
[("f1", Double, {"other", "index"})],
[ColumnSchema(logical_type=Double, semantic_tags={"index", "other"})],
False,
[["f1"]],
),
(
[
("f1", Double),
("f2", Boolean),
("f3", Double),
("f4", Boolean),
("f5", Double),
],
[
ColumnSchema(logical_type=Double),
ColumnSchema(logical_type=Double),
ColumnSchema(logical_type=Boolean),
],
True,
[
["f1", "f3", "f2"],
["f1", "f3", "f4"],
["f1", "f5", "f2"],
["f1", "f5", "f4"],
["f3", "f5", "f2"],
["f3", "f5", "f4"],
],
),
],
)
@patch.object(LiteFeature, "_generate_hash", lambda x: x.name)
def test_get_features(feature_args, input_set, commutative, expected):
features = [LiteFeature(*args) for args in feature_args]
feature_collection = FeatureCollection(features).reindex()
column_keys = _index_column_set(input_set)
actual = _get_features(feature_collection, tuple(column_keys), commutative)
assert set([tuple([y.id for y in x]) for x in actual]) == set(
[tuple(x) for x in expected],
)
@pytest.mark.parametrize(
"feature_args, primitive, expected",
[
(
[("f1", Double), ("f2", Double), ("f3", Double)],
AddNumeric,
[["f1", "f2"], ["f1", "f3"], ["f2", "f3"]],
),
(
[("f1", Boolean), ("f2", Boolean), ("f3", Boolean)],
AddNumeric,
[],
),
(
[("f7", Double), ("f8", Boolean)],
MultiplyNumericBoolean,
[["f7", "f8"]],
),
(
[("f9", Datetime)],
DateFirstEvent,
[],
),
(
[("f10", Datetime, {"time_index"})],
DateFirstEvent,
[["f10"]],
),
(
[("f11", Datetime, {"time_index"}), ("f12", Double)],
NumUnique,
[],
),
(
[("f13", Datetime, {"time_index"}), ("f14", Double), ("f15", Ordinal)],
NumUnique,
[["f15"]],
),
(
[("f16", Datetime, {"time_index"}), ("f17", Double), ("f18", Ordinal)],
Equal,
[["f16", "f17"], ["f16", "f18"], ["f17", "f18"]],
),
(
[
("t_idx", Datetime, {"time_index"}),
("f19", Ordinal),
("f20", Double),
("f21", Boolean),
("f22", BooleanNullable),
],
Lag,
[["f19", "t_idx"], ["f20", "t_idx"], ["f21", "t_idx"], ["f22", "t_idx"]],
),
(
[
("idx", Double, {"index"}),
("f23", Double),
],
Count,
[["idx"]],
),
(
[
("idx", Double, {"index"}),
("f23", Double),
],
AddNumeric,
[],
),
],
)
@patch.object(LiteFeature, "__lt__", lambda x, y: x.name < y.name)
def test_get_matching_features(feature_args, primitive, expected):
features = [LiteFeature(*args) for args in feature_args]
feature_collection = FeatureCollection(features).reindex()
actual = _get_matching_features(feature_collection, primitive())
assert [[y.name for y in x] for x in actual] == expected
@pytest.mark.parametrize(
"col_defs, primitives, expected",
[
(
[
("f_1", "Double"),
("f_2", "Double"),
("f_3", "Boolean"),
("f_4", "Double"),
],
[AddNumeric],
{"f_1 + f_2", "f_1 + f_4", "f_2 + f_4"},
),
(
[
("f_1", "Double"),
("f_2", "Double"),
],
[Absolute],
{"ABSOLUTE(f_1)", "ABSOLUTE(f_2)"},
),
],
)
@patch.object(LiteFeature, "__lt__", lambda x, y: x.name < y.name)
def test_generate_features_from_primitives(col_defs, primitives, expected):
input_feature_names = set([x[0] for x in col_defs])
df = generate_fake_dataframe(
col_defs=col_defs,
)
origin_features = schema_to_features(df.ww.schema)
features = generate_features_from_primitives(origin_features, primitives)
new_feature_names = set([x.name for x in features]) - input_feature_names
assert new_feature_names == expected
ALL_TRANSFORM_PRIMITIVES = list(get_transform_primitives().values())
@pytest.mark.parametrize(
"col_defs, primitives",
[
(
[
("idx", "Double", {"index"}),
("t_idx", "Datetime", {"time_index"}),
("f_3", "Boolean"),
("f_4", "Boolean"),
("f_5", "BooleanNullable"),
("f_6", "BooleanNullable"),
("f_7", "Categorical"),
("f_8", "Categorical"),
("f_9", "Datetime"),
("f_10", "Datetime"),
("f_11", "Double"),
("f_12", "Double"),
("f_13", "Integer"),
("f_14", "Integer"),
("f_15", "IntegerNullable"),
("f_16", "IntegerNullable"),
("f_17", "EmailAddress"),
("f_18", "EmailAddress"),
("f_19", "LatLong"),
("f_20", "LatLong"),
("f_21", "NaturalLanguage"),
("f_22", "NaturalLanguage"),
("f_23", "Ordinal"),
("f_24", "Ordinal"),
("f_25", "URL"),
("f_26", "URL"),
("f_27", "PostalCode"),
("f_28", "PostalCode"),
],
ALL_TRANSFORM_PRIMITIVES,
),
],
)
@patch.object(LiteFeature, "_generate_hash", lambda x: x.name)
def test_compare_dfs(col_defs, primitives):
input_feature_names = set([x[0] for x in col_defs])
df = generate_fake_dataframe(
col_defs=col_defs,
)
es = EntitySet(id="test")
es.add_dataframe(df, "df", index="idx")
features_old = dfs(
entityset=es,
target_dataframe_name="df",
trans_primitives=primitives,
features_only=True,
return_types="all",
)
origin_features = schema_to_features(df.ww.schema)
features = generate_features_from_primitives(origin_features, primitives)
feature_names_old = set([x.get_name() for x in features_old]) - input_feature_names # type: ignore
feature_names_new = set([x.name for x in features]) - input_feature_names
assert feature_names_old == feature_names_new
def test_generate_features_from_primitives_inputs():
f1 = LiteFeature("f1", Double)
with pytest.raises(
ValueError,
match="input_features must be an iterable of LiteFeature objects",
):
generate_features_from_primitives(f1, [Absolute])
with pytest.raises(
ValueError,
match="input_features must be an iterable of LiteFeature objects",
):
generate_features_from_primitives([f1, "other"], [Absolute])
with pytest.raises(
ValueError,
match="primitives must be a list of Primitive classes or Primitive instances",
):
generate_features_from_primitives([f1], ["absolute"])
with pytest.raises(
ValueError,
match="primitives must be a list of Primitive classes or Primitive instances",
):
generate_features_from_primitives([f1], Absolute)
| 11,215 | 28.43832 | 103 | py |
featuretools | featuretools-main/featuretools/tests/feature_discovery/test_feature_collection.py | import pytest
from woodwork.logical_types import (
Boolean,
Double,
Ordinal,
)
from featuretools.feature_discovery.FeatureCollection import FeatureCollection
from featuretools.feature_discovery.LiteFeature import LiteFeature
from featuretools.primitives import Absolute, AddNumeric
@pytest.mark.parametrize(
"feature_args, expected",
[
(
("idx", Double),
["ANY", "Double", "Double,numeric", "numeric"],
),
(
("idx", Double, {"index"}),
["ANY", "Double", "Double,index", "index"],
),
(
("idx", Double, {"other"}),
[
"ANY",
"Double",
"other",
"numeric",
"Double,other",
"Double,numeric",
"numeric,other",
"Double,numeric,other",
],
),
(
("idx", Ordinal, {"other"}),
[
"ANY",
"Ordinal",
"other",
"category",
"Ordinal,other",
"Ordinal,category",
"category,other",
"Ordinal,category,other",
],
),
(
("idx", Double, {"a", "b", "numeric"}),
[
"ANY",
"Double",
"a",
"b",
"numeric",
"Double,a",
"Double,b",
"Double,numeric",
"a,b",
"a,numeric",
"b,numeric",
"a,b,numeric",
"Double,a,b",
"Double,a,numeric",
"Double,b,numeric",
"Double,a,b,numeric",
],
),
],
)
def test_to_keys_method(feature_args, expected):
feature = LiteFeature(*feature_args)
keys = FeatureCollection.feature_to_keys(feature)
assert set(keys) == set(expected)
def test_feature_collection_hashing():
f1 = LiteFeature(name="f1", logical_type=Double)
f2 = LiteFeature(name="f2", logical_type=Double, tags={"index"})
f3 = LiteFeature(name="f3", logical_type=Boolean, tags={"other"})
f4 = LiteFeature(name="f4", primitive=Absolute(), base_features=[f1])
f5 = LiteFeature(name="f5", primitive=AddNumeric(), base_features=[f1, f2])
fc1 = FeatureCollection([f1, f2, f3, f4, f5])
fc2 = FeatureCollection([f1, f2, f3, f4, f5])
assert len(set([fc1, fc2])) == 1
fc1.reindex()
assert fc1.get_by_logical_type(Double) == set([f1, f2])
assert fc1.get_by_tag("index") == set([f2])
assert fc1.get_by_origin_feature(f1) == set([f1, f4, f5])
assert fc1.get_dependencies_by_origin_name("f1") == set([f1, f4, f5])
assert fc1.get_dependencies_by_origin_name("null") == set()
assert fc1.get_by_origin_feature_name("f1") == f1
assert fc1.get_by_origin_feature_name("null") is None
| 2,981 | 26.869159 | 79 | py |
featuretools | featuretools-main/featuretools/tests/feature_discovery/test_type_defs.py | import json
from unittest.mock import patch
import pytest
from woodwork.logical_types import Boolean, Double
from featuretools.feature_discovery.feature_discovery import (
generate_features_from_primitives,
schema_to_features,
)
from featuretools.feature_discovery.FeatureCollection import FeatureCollection
from featuretools.feature_discovery.LiteFeature import LiteFeature
from featuretools.primitives import (
Absolute,
AddNumeric,
DivideNumeric,
Lag,
MultiplyNumeric,
)
from featuretools.tests.feature_discovery.test_feature_discovery import (
TestMultiOutputPrimitive,
)
from featuretools.tests.testing_utils.generate_fake_dataframe import (
generate_fake_dataframe,
)
def test_feature_type_equality():
f1 = LiteFeature("f1", Double)
f2 = LiteFeature("f2", Double)
# Add Numeric is Commutative, so should all be equal
f3 = LiteFeature(
name="Column 1",
primitive=AddNumeric(),
logical_type=Double,
base_features=[f1, f2],
)
f4 = LiteFeature(
name="Column 10",
primitive=AddNumeric(),
logical_type=Double,
base_features=[f1, f2],
)
f5 = LiteFeature(
name="Column 20",
primitive=AddNumeric(),
logical_type=Double,
base_features=[f2, f1],
)
assert f3 == f4 == f5
# Divide Numeric is not Commutative, so should not be equal
f6 = LiteFeature(
name="Column 1",
primitive=DivideNumeric(),
logical_type=Double,
base_features=[f1, f2],
)
f7 = LiteFeature(
name="Column 1",
primitive=DivideNumeric(),
logical_type=Double,
base_features=[f2, f1],
)
assert f6 != f7
def test_feature_type_assertions():
with pytest.raises(
ValueError,
match="there must be base features if given a primitive",
):
LiteFeature(
name="Column 1",
primitive=AddNumeric(),
logical_type=Double,
)
@patch.object(LiteFeature, "_generate_hash", lambda x: x.name)
@patch(
"featuretools.feature_discovery.LiteFeature.hash_primitive",
lambda x: (x.name, None),
)
def test_feature_to_dict():
f1 = LiteFeature("f1", Double)
f2 = LiteFeature("f2", Double)
f = LiteFeature(
name="Column 1",
primitive=AddNumeric(),
base_features=[f1, f2],
)
expected = {
"name": "Column 1",
"logical_type": None,
"tags": ["numeric"],
"primitive": "add_numeric",
"base_features": ["f1", "f2"],
"df_id": None,
"id": "Column 1",
"related_features": [],
"idx": 0,
}
actual = f.to_dict()
json_str = json.dumps(actual)
assert actual == expected
assert json.dumps(expected) == json_str
def test_feature_hash():
bf1 = LiteFeature("bf", Double)
bf2 = LiteFeature("bf", Double, df_id="df")
p1 = Lag(periods=1)
p2 = Lag(periods=2)
f1 = LiteFeature(
primitive=p1,
logical_type=Double,
base_features=[bf1],
)
f2 = LiteFeature(
primitive=p2,
logical_type=Double,
base_features=[bf1],
)
f3 = LiteFeature(
primitive=p2,
logical_type=Double,
base_features=[bf1],
)
f4 = LiteFeature(
primitive=p1,
logical_type=Double,
base_features=[bf2],
)
# TODO(dreed): ensure ID is parquet and arrow acceptable, length and starting character might be problematic
assert f1 != f2
assert f2 == f3
assert f1 != f4
def test_feature_forced_name():
bf = LiteFeature("bf", Double)
p1 = Lag(periods=1)
f1 = LiteFeature(
name="target_delay_1",
primitive=p1,
logical_type=Double,
base_features=[bf],
)
assert f1.name == "target_delay_1"
@patch.object(LiteFeature, "_generate_hash", lambda x: x.name)
@patch(
"featuretools.feature_discovery.FeatureCollection.hash_primitive",
lambda x: (x.name, None),
)
@patch(
"featuretools.feature_discovery.LiteFeature.hash_primitive",
lambda x: (x.name, None),
)
def test_feature_collection_to_dict():
f1 = LiteFeature("f1", Double)
f2 = LiteFeature("f2", Double)
f3 = LiteFeature(
name="Column 1",
primitive=AddNumeric(),
base_features=[f1, f2],
)
fc = FeatureCollection([f3])
expected = {
"primitives": {
"add_numeric": None,
},
"feature_ids": ["Column 1"],
"all_features": {
"Column 1": {
"name": "Column 1",
"logical_type": None,
"tags": ["numeric"],
"primitive": "add_numeric",
"base_features": ["f1", "f2"],
"df_id": None,
"id": "Column 1",
"related_features": [],
"idx": 0,
},
"f1": {
"name": "f1",
"logical_type": "Double",
"tags": ["numeric"],
"primitive": None,
"base_features": [],
"df_id": None,
"id": "f1",
"related_features": [],
"idx": 0,
},
"f2": {
"name": "f2",
"logical_type": "Double",
"tags": ["numeric"],
"primitive": None,
"base_features": [],
"df_id": None,
"id": "f2",
"related_features": [],
"idx": 0,
},
},
}
actual = fc.to_dict()
assert actual == expected
assert json.dumps(expected, sort_keys=True) == json.dumps(actual, sort_keys=True)
@patch.object(LiteFeature, "_generate_hash", lambda x: x.name)
def test_feature_collection_from_dict():
f1 = LiteFeature("f1", Double)
f2 = LiteFeature("f2", Double)
f3 = LiteFeature(
name="Column 1",
primitive=AddNumeric(),
base_features=[f1, f2],
)
expected = FeatureCollection([f3])
input_dict = {
"primitives": {
"009da67f0a1430630c4a419c84aac270ec62337ab20c080e4495272950fd03b3": {
"type": "AddNumeric",
"module": "featuretools.primitives.standard.transform.binary.add_numeric",
"arguments": {},
},
},
"feature_ids": ["Column 1"],
"all_features": {
"f2": {
"name": "f2",
"logical_type": "Double",
"tags": ["numeric"],
"primitive": None,
"base_features": [],
"df_id": None,
"id": "f2",
"related_features": [],
"idx": 0,
},
"f1": {
"name": "f1",
"logical_type": "Double",
"tags": ["numeric"],
"primitive": None,
"base_features": [],
"df_id": None,
"id": "f1",
"related_features": [],
"idx": 0,
},
"Column 1": {
"name": "Column 1",
"logical_type": None,
"tags": ["numeric"],
"primitive": "009da67f0a1430630c4a419c84aac270ec62337ab20c080e4495272950fd03b3",
"base_features": ["f1", "f2"],
"df_id": None,
"id": "Column 1",
"related_features": [],
"idx": 0,
},
},
}
actual = FeatureCollection.from_dict(input_dict)
assert actual == expected
@patch.object(LiteFeature, "__lt__", lambda x, y: x.name < y.name)
def test_feature_collection_serialization_roundtrip():
col_defs = [
("idx", "Integer", {"index"}),
("t_idx", "Datetime", {"time_index"}),
("f_1", "Double"),
("f_2", "Double"),
("f_3", "Categorical"),
("f_4", "Boolean"),
("f_5", "NaturalLanguage"),
]
df = generate_fake_dataframe(
col_defs=col_defs,
)
origin_features = schema_to_features(df.ww.schema)
features = generate_features_from_primitives(
origin_features,
[Absolute, MultiplyNumeric, TestMultiOutputPrimitive],
)
features = generate_features_from_primitives(features, [Lag])
assert set([x.name for x in features]) == set(
[
"idx",
"t_idx",
"f_1",
"f_2",
"f_3",
"f_4",
"f_5",
"ABSOLUTE(f_1)",
"ABSOLUTE(f_2)",
"f_1 * f_2",
"TEST_MO(f_5)[0]",
"TEST_MO(f_5)[1]",
"LAG(f_1, t_idx)",
"LAG(f_2, t_idx)",
"LAG(f_3, t_idx)",
"LAG(f_4, t_idx)",
"LAG(ABSOLUTE(f_1), t_idx)",
"LAG(ABSOLUTE(f_2), t_idx)",
"LAG(f_1 * f_2, t_idx)",
"LAG(TEST_MO(f_5)[1], t_idx)",
"LAG(TEST_MO(f_5)[0], t_idx)",
],
)
fc = FeatureCollection(features=features)
fc_dict = fc.to_dict()
fc_json = json.dumps(fc_dict)
fc2_dict = json.loads(fc_json)
fc2 = FeatureCollection.from_dict(fc2_dict)
assert fc == fc2
lsa_features = [x for x in fc2.all_features if x.get_primitive_name() == "test_mo"]
assert len(lsa_features[0].related_features) == 1
def test_lite_feature_assertions():
f1 = LiteFeature(name="f1", logical_type=Double)
f2 = LiteFeature(name="f1", logical_type=Double, df_id="df1")
assert f1 != f2
with pytest.raises(
TypeError,
match="Name must be given if origin feature",
):
LiteFeature(logical_type=Double)
with pytest.raises(
TypeError,
match="Logical Type must be given if origin feature",
):
LiteFeature(name="f1")
with pytest.raises(
ValueError,
match="primitive input must be of type PrimitiveBase",
):
LiteFeature(name="f3", primitive="AddNumeric", base_features=[f1, f2])
f = LiteFeature("f4", logical_type=Double)
with pytest.raises(AttributeError, match="name is immutable"):
f.name = "new name"
with pytest.raises(ValueError, match="only used on multioutput features"):
f.non_indexed_name
with pytest.raises(AttributeError, match="logical_type is immutable"):
f.logical_type = Boolean
with pytest.raises(AttributeError, match="tags is immutable"):
f.tags = {"other"}
with pytest.raises(AttributeError, match="primitive is immutable"):
f.primitive = AddNumeric
with pytest.raises(AttributeError, match="base_features are immutable"):
f.base_features = [f1]
with pytest.raises(AttributeError, match="df_id is immutable"):
f.df_id = "df_id"
with pytest.raises(AttributeError, match="id is immutable"):
f.id = "id"
with pytest.raises(AttributeError, match="n_output_features is immutable"):
f.n_output_features = "n_output_features"
with pytest.raises(AttributeError, match="depth is immutable"):
f.depth = "depth"
with pytest.raises(AttributeError, match="idx is immutable"):
f.idx = "idx"
def test_lite_feature_to_column_schema():
f1 = LiteFeature(name="f1", logical_type=Double, tags={"index", "numeric"})
column_schema = f1.column_schema
assert column_schema.is_numeric
assert isinstance(column_schema.logical_type, Double)
assert column_schema.semantic_tags == {"index", "numeric"}
f2 = LiteFeature(name="f2", primitive=Absolute(), base_features=[f1])
column_schema = f2.column_schema
assert column_schema.semantic_tags == {"numeric"}
def test_lite_feature_to_dependent_primitives():
f1 = LiteFeature(name="f1", logical_type=Double)
f2 = LiteFeature(name="f2", primitive=Absolute(), base_features=[f1])
f3 = LiteFeature(name="f3", primitive=AddNumeric(), base_features=[f1, f2])
f4 = LiteFeature(name="f4", primitive=MultiplyNumeric(), base_features=[f1, f3])
assert set([x.name for x in f4.dependent_primitives()]) == set(
["multiply_numeric", "absolute", "add_numeric"],
)
| 12,240 | 26.384787 | 112 | py |
featuretools | featuretools-main/featuretools/computational_backends/feature_set_calculator.py | from datetime import datetime
from functools import partial
import numpy as np
import pandas as pd
import pandas.api.types as pdtypes
from featuretools.entityset.relationship import RelationshipPath
from featuretools.exceptions import UnknownFeature
from featuretools.feature_base import (
AggregationFeature,
DirectFeature,
GroupByTransformFeature,
IdentityFeature,
TransformFeature,
)
from featuretools.utils import Trie
from featuretools.utils.gen_utils import (
Library,
get_relationship_column_id,
import_or_none,
is_instance,
)
dd = import_or_none("dask.dataframe")
ps = import_or_none("pyspark.pandas")
class FeatureSetCalculator(object):
"""
Calculates the values of a set of features for given instance ids.
"""
def __init__(
self,
entityset,
feature_set,
time_last=None,
training_window=None,
precalculated_features=None,
):
"""
Args:
feature_set (FeatureSet): The features to calculate values for.
time_last (pd.Timestamp, optional): Last allowed time. Data from exactly this
time not allowed.
training_window (Timedelta, optional): Window defining how much time before the cutoff time data
can be used when calculating features. If None, all data before cutoff time is used.
precalculated_features (Trie[RelationshipPath -> pd.DataFrame]):
Maps RelationshipPaths to dataframes of precalculated_features
"""
self.entityset = entityset
self.feature_set = feature_set
self.training_window = training_window
if time_last is None:
time_last = datetime.now()
self.time_last = time_last
if precalculated_features is None:
precalculated_features = Trie(path_constructor=RelationshipPath)
self.precalculated_features = precalculated_features
# total number of features (including dependencies) to be calculate
self.num_features = sum(
len(features1) + len(features2)
for _, (_, features1, features2) in self.feature_set.feature_trie
)
def run(self, instance_ids, progress_callback=None, include_cutoff_time=True):
"""
Calculate values of features for the given instances of the target
dataframe.
Summary of algorithm:
1. Construct a trie where the edges are relationships and each node
contains a set of features for a single dataframe. See
FeatureSet._build_feature_trie.
2. Initialize a trie for storing dataframes.
3. Traverse the trie using depth first search. At each node calculate
the features and store the resulting dataframe in the dataframe
trie (so that its values can be used by features which depend on
these features). See _calculate_features_for_dataframe.
4. Get the dataframe at the root of the trie (for the target dataframe) and
return the columns corresponding to the requested features.
Args:
instance_ids (np.ndarray or pd.Categorical): Instance ids for which
to build features.
progress_callback (callable): function to be called with incremental progress updates
include_cutoff_time (bool): If True, data at cutoff time are included
in calculating features.
Returns:
pd.DataFrame : Pandas DataFrame of calculated feature values.
Indexed by instance_ids. Columns in same order as features
passed in.
"""
assert len(instance_ids) > 0, "0 instance ids provided"
if progress_callback is None:
# do nothing for the progress call back if not provided
def progress_callback(*args):
pass
feature_trie = self.feature_set.feature_trie
df_trie = Trie(path_constructor=RelationshipPath)
full_dataframe_trie = Trie(path_constructor=RelationshipPath)
target_dataframe = self.entityset[self.feature_set.target_df_name]
self._calculate_features_for_dataframe(
dataframe_name=self.feature_set.target_df_name,
feature_trie=feature_trie,
df_trie=df_trie,
full_dataframe_trie=full_dataframe_trie,
precalculated_trie=self.precalculated_features,
filter_column=target_dataframe.ww.index,
filter_values=instance_ids,
progress_callback=progress_callback,
include_cutoff_time=include_cutoff_time,
)
# The dataframe for the target dataframe should be stored at the root of
# df_trie.
df = df_trie.value
# Fill in empty rows with default values. This only works for pandas dataframes
# and is not currently supported for Dask dataframes.
if isinstance(df, pd.DataFrame):
index_dtype = df.index.dtype.name
if df.empty:
return self.generate_default_df(instance_ids=instance_ids)
missing_ids = [
i for i in instance_ids if i not in df[target_dataframe.ww.index]
]
if missing_ids:
default_df = self.generate_default_df(
instance_ids=missing_ids,
extra_columns=df.columns,
)
df = pd.concat([df, default_df], sort=True)
df.index.name = self.entityset[self.feature_set.target_df_name].ww.index
# Order by instance_ids
unique_instance_ids = pd.unique(instance_ids)
unique_instance_ids = unique_instance_ids.astype(instance_ids.dtype)
df = df.reindex(unique_instance_ids)
# Keep categorical index if original index was categorical
if index_dtype == "category":
df.index = df.index.astype("category")
column_list = []
for feat in self.feature_set.target_features:
column_list.extend(feat.get_feature_names())
if is_instance(df, (dd, ps), "DataFrame"):
column_list.extend([target_dataframe.ww.index])
return df[column_list]
def _calculate_features_for_dataframe(
self,
dataframe_name,
feature_trie,
df_trie,
full_dataframe_trie,
precalculated_trie,
filter_column,
filter_values,
parent_data=None,
progress_callback=None,
include_cutoff_time=True,
):
"""
Generate dataframes with features calculated for this node of the trie,
and all descendant nodes. The dataframes will be stored in df_trie.
Args:
dataframe_name (str): The name of the dataframe to calculate features for.
feature_trie (Trie): the trie with sets of features to calculate.
The root contains features for the given dataframe.
df_trie (Trie): a parallel trie for storing dataframes. The
dataframe with features calculated will be placed in the root.
full_dataframe_trie (Trie): a trie storing dataframes will all dataframe
rows, for features that are uses_full_dataframe.
precalculated_trie (Trie): a parallel trie containing dataframes
with precalculated features. The dataframe specified by dataframe_name
will be at the root.
filter_column (str): The name of the column to filter this
dataframe by.
filter_values (pd.Series): The values to filter the filter_column
to.
parent_data (tuple[Relationship, list[str], pd.DataFrame]): Data
related to the parent of this trie. This will only be present if
the relationship points from this dataframe to the parent dataframe. A
3 tuple of (parent_relationship,
ancestor_relationship_columns, parent_df).
ancestor_relationship_columns is the names of columns which
link the parent dataframe to its ancestors.
include_cutoff_time (bool): If True, data at cutoff time are included
in calculating features.
"""
# Step 1: Get a dataframe for the given dataframe name, filtered by the given
# conditions.
(
need_full_dataframe,
full_dataframe_features,
not_full_dataframe_features,
) = feature_trie.value
all_features = full_dataframe_features | not_full_dataframe_features
columns = self._necessary_columns(dataframe_name, all_features)
# If we need the full dataframe then don't filter by filter_values.
if need_full_dataframe:
query_column = None
query_values = None
else:
query_column = filter_column
query_values = filter_values
df = self.entityset.query_by_values(
dataframe_name=dataframe_name,
instance_vals=query_values,
column_name=query_column,
columns=columns,
time_last=self.time_last,
training_window=self.training_window,
include_cutoff_time=include_cutoff_time,
)
# call to update timer
progress_callback(0)
# Step 2: Add columns to the dataframe linking it to all ancestors.
new_ancestor_relationship_columns = []
if parent_data:
parent_relationship, ancestor_relationship_columns, parent_df = parent_data
if ancestor_relationship_columns:
(
df,
new_ancestor_relationship_columns,
) = self._add_ancestor_relationship_columns(
df,
parent_df,
ancestor_relationship_columns,
parent_relationship,
)
# Add the column linking this dataframe to its parent, so that
# descendants get linked to the parent.
new_ancestor_relationship_columns.append(
parent_relationship._child_column_name,
)
# call to update timer
progress_callback(0)
# Step 3: Recurse on children.
# Pass filtered values, even if we are using a full df.
if need_full_dataframe:
if is_instance(filter_values, dd, "Series"):
msg = "Cannot use primitives that require full dataframe with Dask EntitySets"
raise ValueError(msg)
filtered_df = df[df[filter_column].isin(filter_values)]
else:
filtered_df = df
for edge, sub_trie in feature_trie.children():
is_forward, relationship = edge
if is_forward:
sub_dataframe_name = relationship.parent_dataframe.ww.name
sub_filter_column = relationship._parent_column_name
sub_filter_values = filtered_df[relationship._child_column_name]
parent_data = None
else:
sub_dataframe_name = relationship.child_dataframe.ww.name
sub_filter_column = relationship._child_column_name
sub_filter_values = filtered_df[relationship._parent_column_name]
parent_data = (relationship, new_ancestor_relationship_columns, df)
sub_df_trie = df_trie.get_node([edge])
sub_full_dataframe_trie = full_dataframe_trie.get_node([edge])
sub_precalc_trie = precalculated_trie.get_node([edge])
self._calculate_features_for_dataframe(
dataframe_name=sub_dataframe_name,
feature_trie=sub_trie,
df_trie=sub_df_trie,
full_dataframe_trie=sub_full_dataframe_trie,
precalculated_trie=sub_precalc_trie,
filter_column=sub_filter_column,
filter_values=sub_filter_values,
parent_data=parent_data,
progress_callback=progress_callback,
include_cutoff_time=include_cutoff_time,
)
# Step 4: Calculate the features for this dataframe.
#
# All dependencies of the features for this dataframe have been calculated
# by the above recursive calls, and their results stored in df_trie.
# Add any precalculated features.
precalculated_features_df = precalculated_trie.value
if precalculated_features_df is not None:
# Left outer merge to keep all rows of df.
df = df.merge(
precalculated_features_df,
how="left",
left_index=True,
right_index=True,
suffixes=("", "_precalculated"),
)
# call to update timer
progress_callback(0)
# First, calculate any features that require the full dataframe. These can
# be calculated first because all of their dependents are included in
# full_dataframe_features.
if need_full_dataframe:
df = self._calculate_features(
df,
full_dataframe_trie,
full_dataframe_features,
progress_callback,
)
# Store full dataframe
full_dataframe_trie.value = df
# Filter df so that features that don't require the full dataframe are
# only calculated on the necessary instances.
df = df[df[filter_column].isin(filter_values)]
# Calculate all features that don't require the full dataframe.
df = self._calculate_features(
df,
df_trie,
not_full_dataframe_features,
progress_callback,
)
# Step 5: Store the dataframe for this dataframe at the root of df_trie, so
# that it can be accessed by the caller.
df_trie.value = df
def _calculate_features(self, df, df_trie, features, progress_callback):
# Group the features so that each group can be calculated together.
# The groups must also be in topological order (if A is a transform of B
# then B must be in a group before A).
feature_groups = self.feature_set.group_features(features)
for group in feature_groups:
representative_feature = group[0]
handler = self._feature_type_handler(representative_feature)
df = handler(group, df, df_trie, progress_callback)
return df
def _add_ancestor_relationship_columns(
self,
child_df,
parent_df,
ancestor_relationship_columns,
relationship,
):
"""
Merge ancestor_relationship_columns from parent_df into child_df, adding a prefix to
each column name specifying the relationship.
Return the updated df and the new relationship column names.
Args:
child_df (pd.DataFrame): The dataframe to add relationship columns to.
parent_df (pd.DataFrame): The dataframe to copy relationship columns from.
ancestor_relationship_columns (list[str]): The names of
relationship columns in the parent_df to copy into child_df.
relationship (Relationship): the relationship through which the
child is connected to the parent.
"""
relationship_name = relationship.parent_name
new_relationship_columns = [
"%s.%s" % (relationship_name, col) for col in ancestor_relationship_columns
]
# create an intermediate dataframe which shares a column
# with the child dataframe and has a column with the
# original parent's id.
col_map = {relationship._parent_column_name: relationship._child_column_name}
for child_column, parent_column in zip(
new_relationship_columns,
ancestor_relationship_columns,
):
col_map[parent_column] = child_column
merge_df = parent_df[list(col_map.keys())].rename(columns=col_map)
merge_df.index.name = None # change index name for merge
# Merge the dataframe, adding the relationship columns to the child.
# Left outer join so that all rows in child are kept (if it contains
# all rows of the dataframe then there may not be corresponding rows in the
# parent_df).
df = child_df.merge(
merge_df,
how="left",
left_on=relationship._child_column_name,
right_on=relationship._child_column_name,
)
# ensure index is maintained
# TODO: Review for dask dataframes
if isinstance(df, pd.DataFrame):
df.set_index(
relationship.child_dataframe.ww.index,
drop=False,
inplace=True,
)
return df, new_relationship_columns
def generate_default_df(self, instance_ids, extra_columns=None):
default_row = []
default_cols = []
for f in self.feature_set.target_features:
for name in f.get_feature_names():
default_cols.append(name)
default_row.append(f.default_value)
default_matrix = [default_row] * len(instance_ids)
default_df = pd.DataFrame(
default_matrix,
columns=default_cols,
index=instance_ids,
dtype="object",
)
index_name = self.entityset[self.feature_set.target_df_name].ww.index
default_df.index.name = index_name
if extra_columns is not None:
for c in extra_columns:
if c not in default_df.columns:
default_df[c] = [np.nan] * len(instance_ids)
return default_df
def _feature_type_handler(self, f):
if type(f) == TransformFeature:
return self._calculate_transform_features
elif type(f) == GroupByTransformFeature:
return self._calculate_groupby_features
elif type(f) == DirectFeature:
return self._calculate_direct_features
elif type(f) == AggregationFeature:
return self._calculate_agg_features
elif type(f) == IdentityFeature:
return self._calculate_identity_features
else:
raise UnknownFeature("{} feature unknown".format(f.__class__))
def _calculate_identity_features(self, features, df, _df_trie, progress_callback):
for f in features:
assert f.get_name() in df.columns, (
'Column "%s" missing frome dataframe' % f.get_name()
)
progress_callback(len(features) / float(self.num_features))
return df
def _calculate_transform_features(
self,
features,
frame,
_df_trie,
progress_callback,
):
frame_empty = frame.empty if isinstance(frame, pd.DataFrame) else False
feature_values = []
for f in features:
# handle when no data
if frame_empty:
# Even though we are adding the default values here, when these new
# features are added to the dataframe in update_feature_columns, they
# are added as empty columns since the dataframe itself is empty.
feature_values.append(
(f, [f.default_value for _ in range(f.number_output_features)]),
)
progress_callback(1 / float(self.num_features))
continue
# collect only the columns we need for this transformation
column_data = [frame[bf.get_name()] for bf in f.base_features]
feature_func = f.get_function()
# apply the function to the relevant dataframe slice and add the
# feature row to the results dataframe.
if f.primitive.uses_calc_time:
values = feature_func(*column_data, time=self.time_last)
else:
values = feature_func(*column_data)
# if we don't get just the values, the assignment breaks when indexes don't match
if f.number_output_features > 1:
values = [strip_values_if_series(value) for value in values]
else:
values = [strip_values_if_series(values)]
feature_values.append((f, values))
progress_callback(1 / float(self.num_features))
frame = update_feature_columns(feature_values, frame)
return frame
def _calculate_groupby_features(self, features, frame, _df_trie, progress_callback):
# set default values to handle the null group
default_values = {}
for f in features:
for name in f.get_feature_names():
default_values[name] = f.default_value
frame = pd.concat(
[frame, pd.DataFrame(default_values, index=frame.index)],
axis=1,
)
# handle when no data
if frame.shape[0] == 0:
progress_callback(len(features) / float(self.num_features))
return frame
groupby = features[0].groupby.get_name()
grouped = frame.groupby(groupby)
groups = frame[
groupby
].unique() # get all the unique group name to iterate over later
for f in features:
feature_vals = []
for _ in range(f.number_output_features):
feature_vals.append([])
for group in groups:
# skip null key if it exists
if pd.isnull(group):
continue
column_names = [bf.get_name() for bf in f.base_features]
# exclude the groupby column from being passed to the function
column_data = [
grouped[name].get_group(group) for name in column_names[:-1]
]
feature_func = f.get_function()
# apply the function to the relevant dataframe slice and add the
# feature row to the results dataframe.
if f.primitive.uses_calc_time:
values = feature_func(*column_data, time=self.time_last)
else:
values = feature_func(*column_data)
if f.number_output_features == 1:
values = [values]
# make sure index is aligned
for i, value in enumerate(values):
if isinstance(value, pd.Series):
value.index = column_data[0].index
else:
value = pd.Series(value, index=column_data[0].index)
feature_vals[i].append(value)
if any(feature_vals):
assert len(feature_vals) == len(f.get_feature_names())
for col_vals, name in zip(feature_vals, f.get_feature_names()):
frame[name].update(pd.concat(col_vals))
progress_callback(1 / float(self.num_features))
return frame
def _calculate_direct_features(
self,
features,
child_df,
df_trie,
progress_callback,
):
path = features[0].relationship_path
assert len(path) == 1, "Error calculating DirectFeatures, len(path) != 1"
parent_df = df_trie.get_node([path[0]]).value
_is_forward, relationship = path[0]
merge_col = relationship._child_column_name
# generate a mapping of old column names (in the parent dataframe) to
# new column names (in the child dataframe) for the merge
col_map = {relationship._parent_column_name: merge_col}
index_as_feature = None
fillna_dict = {}
for f in features:
feature_defaults = {
name: f.default_value
for name in f.get_feature_names()
if not pd.isna(f.default_value)
}
fillna_dict.update(feature_defaults)
if f.base_features[0].get_name() == relationship._parent_column_name:
index_as_feature = f
base_names = f.base_features[0].get_feature_names()
for name, base_name in zip(f.get_feature_names(), base_names):
if name in child_df.columns:
continue
col_map[base_name] = name
# merge the identity feature from the parent dataframe into the child
merge_df = parent_df[list(col_map.keys())].rename(columns=col_map)
if is_instance(merge_df, (dd, ps), "DataFrame"):
new_df = child_df.merge(
merge_df,
left_on=merge_col,
right_on=merge_col,
how="left",
)
else:
if index_as_feature is not None:
merge_df.set_index(
index_as_feature.get_name(),
inplace=True,
drop=False,
)
else:
merge_df.set_index(merge_col, inplace=True)
new_df = child_df.merge(
merge_df,
left_on=merge_col,
right_index=True,
how="left",
)
progress_callback(len(features) / float(self.num_features))
return new_df.fillna(fillna_dict)
def _calculate_agg_features(self, features, frame, df_trie, progress_callback):
test_feature = features[0]
child_dataframe = test_feature.base_features[0].dataframe
base_frame = df_trie.get_node(test_feature.relationship_path).value
parent_merge_col = test_feature.relationship_path[0][1]._parent_column_name
# Sometimes approximate features get computed in a previous filter frame
# and put in the current one dynamically,
# so there may be existing features here
fl = []
for f in features:
for ind in f.get_feature_names():
if ind not in frame.columns:
fl.append(f)
break
features = fl
if not len(features):
progress_callback(len(features) / float(self.num_features))
return frame
# handle where
base_frame_empty = (
base_frame.empty if isinstance(base_frame, pd.DataFrame) else False
)
where = test_feature.where
if where is not None and not base_frame_empty:
base_frame = base_frame.loc[base_frame[where.get_name()]]
# when no child data, just add all the features to frame with nan
base_frame_empty = (
base_frame.empty if isinstance(base_frame, pd.DataFrame) else False
)
if base_frame_empty:
feature_values = []
for f in features:
feature_values.append((f, np.full(f.number_output_features, np.nan)))
progress_callback(1 / float(self.num_features))
frame = update_feature_columns(feature_values, frame)
else:
relationship_path = test_feature.relationship_path
groupby_col = get_relationship_column_id(relationship_path)
# if the use_previous property exists on this feature, include only the
# instances from the child dataframe included in that Timedelta
use_previous = test_feature.use_previous
if use_previous:
# Filter by use_previous values
time_last = self.time_last
if use_previous.has_no_observations():
time_first = time_last - use_previous
ti = child_dataframe.ww.time_index
if ti is not None:
base_frame = base_frame[base_frame[ti] >= time_first]
else:
n = use_previous.get_value("o")
def last_n(df):
return df.iloc[-n:]
base_frame = base_frame.groupby(
groupby_col,
observed=True,
sort=False,
).apply(last_n)
to_agg = {}
agg_rename = {}
to_apply = set()
# apply multi-column and time-dependent features as we find them, and
# save aggregable features for later
for f in features:
if _can_agg(f):
column_id = f.base_features[0].get_name()
if column_id not in to_agg:
to_agg[column_id] = []
if is_instance(base_frame, dd, "DataFrame"):
func = f.get_function(agg_type=Library.DASK)
elif is_instance(base_frame, ps, "DataFrame"):
func = f.get_function(agg_type=Library.SPARK)
else:
func = f.get_function()
# for some reason, using the string count is significantly
# faster than any method a primitive can return
# https://stackoverflow.com/questions/55731149/use-a-function-instead-of-string-in-pandas-groupby-agg
if func == pd.Series.count:
func = "count"
funcname = func
if callable(func):
# if the same function is being applied to the same
# column twice, wrap it in a partial to avoid
# duplicate functions
funcname = str(id(func))
if "{}-{}".format(column_id, funcname) in agg_rename:
func = partial(func)
funcname = str(id(func))
func.__name__ = funcname
if dd and isinstance(func, dd.Aggregation):
# TODO: handle aggregation being applied to same column twice
# (see above partial wrapping of functions)
funcname = func.__name__
to_agg[column_id].append(func)
# this is used below to rename columns that pandas names for us
agg_rename["{}-{}".format(column_id, funcname)] = f.get_name()
continue
to_apply.add(f)
# Apply the non-aggregable functions generate a new dataframe, and merge
# it with the existing one
if len(to_apply):
wrap = agg_wrapper(to_apply, self.time_last)
# groupby_col can be both the name of the index and a column,
# to silence pandas warning about ambiguity we explicitly pass
# the column (in actuality grouping by both index and group would
# work)
to_merge = base_frame.groupby(
base_frame[groupby_col],
observed=True,
sort=False,
).apply(wrap)
frame = pd.merge(
left=frame,
right=to_merge,
left_index=True,
right_index=True,
how="left",
)
progress_callback(len(to_apply) / float(self.num_features))
# Apply the aggregate functions to generate a new dataframe, and merge
# it with the existing one
if len(to_agg):
# groupby_col can be both the name of the index and a column,
# to silence pandas warning about ambiguity we explicitly pass
# the column (in actuality grouping by both index and group would
# work)
if is_instance(base_frame, (dd, ps), "DataFrame"):
to_merge = base_frame.groupby(groupby_col).agg(to_agg)
else:
to_merge = base_frame.groupby(
base_frame[groupby_col],
observed=True,
sort=False,
).agg(to_agg)
# rename columns to the correct feature names
to_merge.columns = [agg_rename["-".join(x)] for x in to_merge.columns]
to_merge = to_merge[list(agg_rename.values())]
# Workaround for pandas bug where categories are in the wrong order
# see: https://github.com/pandas-dev/pandas/issues/22501
#
# Pandas claims that bug is fixed but it still shows up in some
# cases. More investigation needed.
if pdtypes.is_categorical_dtype(frame.index):
categories = pdtypes.CategoricalDtype(
categories=frame.index.categories,
)
to_merge.index = to_merge.index.astype(object).astype(categories)
if is_instance(frame, (dd, ps), "DataFrame"):
frame = frame.merge(
to_merge,
left_on=parent_merge_col,
right_index=True,
how="left",
)
else:
frame = pd.merge(
left=frame,
right=to_merge,
left_index=True,
right_index=True,
how="left",
)
# determine number of features that were just merged
progress_callback(len(to_merge.columns) / float(self.num_features))
# Handle default values
fillna_dict = {}
for f in features:
feature_defaults = {name: f.default_value for name in f.get_feature_names()}
fillna_dict.update(feature_defaults)
frame = frame.fillna(fillna_dict)
return frame
def _necessary_columns(self, dataframe_name, feature_names):
# We have to keep all index and foreign columns because we don't know what forward
# relationships will come from this node.
df = self.entityset[dataframe_name]
index_columns = {
col
for col in df.columns
if {"index", "foreign_key", "time_index"} & df.ww.semantic_tags[col]
}
features = (self.feature_set.features_by_name[name] for name in feature_names)
feature_columns = {
f.column_name for f in features if isinstance(f, IdentityFeature)
}
return list(index_columns | feature_columns)
def _can_agg(feature):
assert isinstance(feature, AggregationFeature)
base_features = feature.base_features
if feature.where is not None:
base_features = [
bf.get_name()
for bf in base_features
if bf.get_name() != feature.where.get_name()
]
if feature.primitive.uses_calc_time:
return False
single_output = feature.primitive.number_output_features == 1
return len(base_features) == 1 and single_output
def agg_wrapper(feats, time_last):
def wrap(df):
d = {}
feature_values = []
for f in feats:
func = f.get_function()
column_ids = [bf.get_name() for bf in f.base_features]
args = [df[v] for v in column_ids]
if f.primitive.uses_calc_time:
values = func(*args, time=time_last)
else:
values = func(*args)
if f.number_output_features == 1:
values = [values]
feature_values.append((f, values))
d = update_feature_columns(feature_values, d)
return pd.Series(d)
return wrap
def update_feature_columns(feature_data, data):
new_cols = {}
for item in feature_data:
names = item[0].get_feature_names()
values = item[1]
assert len(names) == len(values)
for name, value in zip(names, values):
new_cols[name] = value
# Handle the case where a dict is being updated
if isinstance(data, dict):
data.update(new_cols)
return data
# Handle pandas input
if isinstance(data, pd.DataFrame):
return pd.concat([data, pd.DataFrame(new_cols, index=data.index)], axis=1)
# Handle dask/spark input
for name, col in new_cols.items():
col.name = name
if is_instance(data, dd, "DataFrame"):
data = dd.concat([data, col], axis=1)
else:
data = ps.concat([data, col], axis=1)
return data
def strip_values_if_series(values):
if isinstance(values, pd.Series):
values = values.values
return values
| 36,926 | 37.385655 | 121 | py |
featuretools | featuretools-main/featuretools/computational_backends/calculate_feature_matrix.py | import logging
import math
import os
import shutil
import time
import warnings
from datetime import datetime
import cloudpickle
import numpy as np
import pandas as pd
from woodwork.logical_types import (
Age,
AgeNullable,
Boolean,
BooleanNullable,
Integer,
IntegerNullable,
)
from featuretools.computational_backends.feature_set import FeatureSet
from featuretools.computational_backends.feature_set_calculator import (
FeatureSetCalculator,
)
from featuretools.computational_backends.utils import (
_check_cutoff_time_type,
_validate_cutoff_time,
bin_cutoff_times,
create_client_and_cluster,
gather_approximate_features,
gen_empty_approx_features_df,
get_ww_types_from_features,
save_csv_decorator,
)
from featuretools.entityset.relationship import RelationshipPath
from featuretools.feature_base import AggregationFeature, FeatureBase
from featuretools.utils import Trie
from featuretools.utils.gen_utils import (
Library,
import_or_none,
import_or_raise,
is_instance,
make_tqdm_iterator,
)
dd = import_or_none("dask.dataframe")
ps = import_or_none("pyspark.pandas")
logger = logging.getLogger("featuretools.computational_backend")
PBAR_FORMAT = "Elapsed: {elapsed} | Progress: {l_bar}{bar}"
FEATURE_CALCULATION_PERCENTAGE = (
0.95 # make total 5% higher to allot time for wrapping up at end
)
def calculate_feature_matrix(
features,
entityset=None,
cutoff_time=None,
instance_ids=None,
dataframes=None,
relationships=None,
cutoff_time_in_index=False,
training_window=None,
approximate=None,
save_progress=None,
verbose=False,
chunk_size=None,
n_jobs=1,
dask_kwargs=None,
progress_callback=None,
include_cutoff_time=True,
):
"""Calculates a matrix for a given set of instance ids and calculation times.
Args:
features (list[:class:`.FeatureBase`]): Feature definitions to be calculated.
entityset (EntitySet): An already initialized entityset. Required if `dataframes` and `relationships`
not provided
cutoff_time (pd.DataFrame or Datetime): Specifies times at which to calculate
the features for each instance. The resulting feature matrix will use data
up to and including the cutoff_time. Can either be a DataFrame or a single
value. If a DataFrame is passed the instance ids for which to calculate features
must be in a column with the same name as the target dataframe index or a column
named `instance_id`. The cutoff time values in the DataFrame must be in a column with
the same name as the target dataframe time index or a column named `time`. If the
DataFrame has more than two columns, any additional columns will be added to the
resulting feature matrix. If a single value is passed, this value will be used for
all instances.
instance_ids (list): List of instances to calculate features on. Only
used if cutoff_time is a single datetime.
dataframes (dict[str -> tuple(DataFrame, str, str, dict[str -> str/Woodwork.LogicalType], dict[str->str/set], boolean)]):
Dictionary of DataFrames. Entries take the format
{dataframe name -> (dataframe, index column, time_index, logical_types, semantic_tags, make_index)}.
Note that only the dataframe is required. If a Woodwork DataFrame is supplied, any other parameters
will be ignored.
relationships (list[(str, str, str, str)]): list of relationships
between dataframes. List items are a tuple with the format
(parent dataframe name, parent column, child dataframe name, child column).
cutoff_time_in_index (bool): If True, return a DataFrame with a MultiIndex
where the second index is the cutoff time (first is instance id).
DataFrame will be sorted by (time, instance_id).
training_window (Timedelta or str, optional):
Window defining how much time before the cutoff time data
can be used when calculating features. If ``None``, all data before cutoff time is used.
Defaults to ``None``.
approximate (Timedelta or str): Frequency to group instances with similar
cutoff times by for features with costly calculations. For example,
if bucket is 24 hours, all instances with cutoff times on the same
day will use the same calculation for expensive features.
verbose (bool, optional): Print progress info. The time granularity is
per chunk.
chunk_size (int or float or None): maximum number of rows of
output feature matrix to calculate at time. If passed an integer
greater than 0, will try to use that many rows per chunk. If passed
a float value between 0 and 1 sets the chunk size to that
percentage of all rows. if None, and n_jobs > 1 it will be set to 1/n_jobs
n_jobs (int, optional): number of parallel processes to use when
calculating feature matrix. Requires Dask if not equal to 1.
dask_kwargs (dict, optional): Dictionary of keyword arguments to be
passed when creating the dask client and scheduler. Even if n_jobs
is not set, using `dask_kwargs` will enable multiprocessing.
Main parameters:
cluster (str or dask.distributed.LocalCluster):
cluster or address of cluster to send tasks to. If unspecified,
a cluster will be created.
diagnostics port (int):
port number to use for web dashboard. If left unspecified, web
interface will not be enabled.
Valid keyword arguments for LocalCluster will also be accepted.
save_progress (str, optional): path to save intermediate computational results.
progress_callback (callable): function to be called with incremental progress updates.
Has the following parameters:
update: percentage change (float between 0 and 100) in progress since last call
progress_percent: percentage (float between 0 and 100) of total computation completed
time_elapsed: total time in seconds that has elapsed since start of call
include_cutoff_time (bool): Include data at cutoff times in feature calculations. Defaults to ``True``.
Returns:
pd.DataFrame: The feature matrix.
"""
assert (
isinstance(features, list)
and features != []
and all([isinstance(feature, FeatureBase) for feature in features])
), "features must be a non-empty list of features"
# handle loading entityset
from featuretools.entityset.entityset import EntitySet
if not isinstance(entityset, EntitySet):
if dataframes is not None:
entityset = EntitySet("entityset", dataframes, relationships)
else:
raise TypeError("No dataframes or valid EntitySet provided")
if entityset.dataframe_type == Library.DASK:
if approximate:
msg = "Using approximate is not supported with Dask dataframes"
raise ValueError(msg)
if training_window:
msg = "Using training_window is not supported with Dask dataframes"
raise ValueError(msg)
target_dataframe = entityset[features[0].dataframe_name]
cutoff_time = _validate_cutoff_time(cutoff_time, target_dataframe)
entityset._check_time_indexes()
if isinstance(cutoff_time, pd.DataFrame):
if instance_ids:
msg = "Passing 'instance_ids' is valid only if 'cutoff_time' is a single value or None - ignoring"
warnings.warn(msg)
pass_columns = [
col for col in cutoff_time.columns if col not in ["instance_id", "time"]
]
# make sure dtype of instance_id in cutoff time
# is same as column it references
target_dataframe = features[0].dataframe
ltype = target_dataframe.ww.logical_types[target_dataframe.ww.index]
cutoff_time.ww.init(logical_types={"instance_id": ltype})
else:
pass_columns = []
if cutoff_time is None:
if entityset.time_type == "numeric":
cutoff_time = np.inf
else:
cutoff_time = datetime.now()
if instance_ids is None:
index_col = target_dataframe.ww.index
df = entityset._handle_time(
dataframe_name=target_dataframe.ww.name,
df=target_dataframe,
time_last=cutoff_time,
training_window=training_window,
include_cutoff_time=include_cutoff_time,
)
instance_ids = df[index_col]
if is_instance(instance_ids, dd, "Series"):
instance_ids = instance_ids.compute()
elif is_instance(instance_ids, ps, "Series"):
instance_ids = instance_ids.to_pandas()
# convert list or range object into series
if not isinstance(instance_ids, pd.Series):
instance_ids = pd.Series(instance_ids)
cutoff_time = (cutoff_time, instance_ids)
_check_cutoff_time_type(cutoff_time, entityset.time_type)
# Approximate provides no benefit with a single cutoff time, so ignore it
if isinstance(cutoff_time, tuple) and approximate is not None:
msg = (
"Using approximate with a single cutoff_time value or no cutoff_time "
"provides no computational efficiency benefit"
)
warnings.warn(msg)
cutoff_time = pd.DataFrame(
{
"instance_id": cutoff_time[1],
"time": [cutoff_time[0]] * len(cutoff_time[1]),
},
)
target_dataframe = features[0].dataframe
ltype = target_dataframe.ww.logical_types[target_dataframe.ww.index]
cutoff_time.ww.init(logical_types={"instance_id": ltype})
feature_set = FeatureSet(features)
# Get features to approximate
if approximate is not None:
approximate_feature_trie = gather_approximate_features(feature_set)
# Make a new FeatureSet that ignores approximated features
feature_set = FeatureSet(
features,
approximate_feature_trie=approximate_feature_trie,
)
# Check if there are any non-approximated aggregation features
no_unapproximated_aggs = True
for feature in features:
if isinstance(feature, AggregationFeature):
# do not need to check if feature is in to_approximate since
# only base features of direct features can be in to_approximate
no_unapproximated_aggs = False
break
if approximate is not None:
all_approx_features = {
f for _, feats in feature_set.approximate_feature_trie for f in feats
}
else:
all_approx_features = set()
deps = feature.get_dependencies(deep=True, ignored=all_approx_features)
for dependency in deps:
if isinstance(dependency, AggregationFeature):
no_unapproximated_aggs = False
break
cutoff_df_time_col = "time"
target_time = "_original_time"
if approximate is not None:
# If there are approximated aggs, bin times
binned_cutoff_time = bin_cutoff_times(cutoff_time, approximate)
# Think about collisions: what if original time is a feature
binned_cutoff_time.ww[target_time] = cutoff_time[cutoff_df_time_col]
cutoff_time_to_pass = binned_cutoff_time
else:
cutoff_time_to_pass = cutoff_time
if isinstance(cutoff_time, pd.DataFrame):
cutoff_time_len = cutoff_time.shape[0]
else:
cutoff_time_len = len(cutoff_time[1])
chunk_size = _handle_chunk_size(chunk_size, cutoff_time_len)
tqdm_options = {
"total": (cutoff_time_len / FEATURE_CALCULATION_PERCENTAGE),
"bar_format": PBAR_FORMAT,
"disable": True,
}
if verbose:
tqdm_options.update({"disable": False})
elif progress_callback is not None:
# allows us to utilize progress_bar updates without printing to anywhere
tqdm_options.update({"file": open(os.devnull, "w"), "disable": False})
with make_tqdm_iterator(**tqdm_options) as progress_bar:
if n_jobs != 1 or dask_kwargs is not None:
feature_matrix = parallel_calculate_chunks(
cutoff_time=cutoff_time_to_pass,
chunk_size=chunk_size,
feature_set=feature_set,
approximate=approximate,
training_window=training_window,
save_progress=save_progress,
entityset=entityset,
n_jobs=n_jobs,
no_unapproximated_aggs=no_unapproximated_aggs,
cutoff_df_time_col=cutoff_df_time_col,
target_time=target_time,
pass_columns=pass_columns,
progress_bar=progress_bar,
dask_kwargs=dask_kwargs or {},
progress_callback=progress_callback,
include_cutoff_time=include_cutoff_time,
)
else:
feature_matrix = calculate_chunk(
cutoff_time=cutoff_time_to_pass,
chunk_size=chunk_size,
feature_set=feature_set,
approximate=approximate,
training_window=training_window,
save_progress=save_progress,
entityset=entityset,
no_unapproximated_aggs=no_unapproximated_aggs,
cutoff_df_time_col=cutoff_df_time_col,
target_time=target_time,
pass_columns=pass_columns,
progress_bar=progress_bar,
progress_callback=progress_callback,
include_cutoff_time=include_cutoff_time,
)
# ensure rows are sorted by input order
if isinstance(feature_matrix, pd.DataFrame):
if isinstance(cutoff_time, pd.DataFrame):
feature_matrix = feature_matrix.ww.reindex(
pd.MultiIndex.from_frame(
cutoff_time[["instance_id", "time"]],
names=feature_matrix.index.names,
),
)
else:
# Maintain index dtype
index_dtype = feature_matrix.index.get_level_values(0).dtype
feature_matrix = feature_matrix.ww.reindex(
cutoff_time[1].astype(index_dtype),
level=0,
)
if not cutoff_time_in_index:
feature_matrix.ww.reset_index(level="time", drop=True, inplace=True)
if save_progress and os.path.exists(os.path.join(save_progress, "temp")):
shutil.rmtree(os.path.join(save_progress, "temp"))
# force to 100% since we saved last 5 percent
previous_progress = progress_bar.n
progress_bar.update(progress_bar.total - progress_bar.n)
if progress_callback is not None:
(
update,
progress_percent,
time_elapsed,
) = update_progress_callback_parameters(progress_bar, previous_progress)
progress_callback(update, progress_percent, time_elapsed)
progress_bar.refresh()
return feature_matrix
def calculate_chunk(
cutoff_time,
chunk_size,
feature_set,
entityset,
approximate,
training_window,
save_progress,
no_unapproximated_aggs,
cutoff_df_time_col,
target_time,
pass_columns,
progress_bar=None,
progress_callback=None,
include_cutoff_time=True,
schema=None,
):
if not isinstance(feature_set, FeatureSet):
feature_set = cloudpickle.loads(feature_set) # pragma: no cover
feature_matrix = []
if no_unapproximated_aggs and approximate is not None:
if entityset.time_type == "numeric":
group_time = np.inf
else:
group_time = datetime.now()
if isinstance(cutoff_time, tuple):
update_progress_callback = None
if progress_bar is not None:
def update_progress_callback(done):
previous_progress = progress_bar.n
progress_bar.update(done * len(cutoff_time[1]))
if progress_callback is not None:
(
update,
progress_percent,
time_elapsed,
) = update_progress_callback_parameters(
progress_bar,
previous_progress,
)
progress_callback(update, progress_percent, time_elapsed)
time_last = cutoff_time[0]
ids = cutoff_time[1]
calculator = FeatureSetCalculator(
entityset,
feature_set,
time_last,
training_window=training_window,
)
_feature_matrix = calculator.run(
ids,
progress_callback=update_progress_callback,
include_cutoff_time=include_cutoff_time,
)
if isinstance(_feature_matrix, pd.DataFrame):
time_index = pd.Index([time_last] * len(ids), name="time")
_feature_matrix = _feature_matrix.set_index(time_index, append=True)
feature_matrix.append(_feature_matrix)
else:
if schema:
cutoff_time.ww.init_with_full_schema(schema=schema) # pragma: no cover
for _, group in cutoff_time.groupby(cutoff_df_time_col):
# if approximating, calculate the approximate features
if approximate is not None:
group.ww.init(schema=cutoff_time.ww.schema, validate=False)
precalculated_features_trie = approximate_features(
feature_set,
group,
window=approximate,
entityset=entityset,
training_window=training_window,
include_cutoff_time=include_cutoff_time,
)
else:
precalculated_features_trie = None
@save_csv_decorator(save_progress)
def calc_results(
time_last,
ids,
precalculated_features=None,
training_window=None,
include_cutoff_time=True,
):
update_progress_callback = None
if progress_bar is not None:
def update_progress_callback(done):
previous_progress = progress_bar.n
progress_bar.update(done * group.shape[0])
if progress_callback is not None:
(
update,
progress_percent,
time_elapsed,
) = update_progress_callback_parameters(
progress_bar,
previous_progress,
)
progress_callback(update, progress_percent, time_elapsed)
calculator = FeatureSetCalculator(
entityset,
feature_set,
time_last,
training_window=training_window,
precalculated_features=precalculated_features,
)
matrix = calculator.run(
ids,
progress_callback=update_progress_callback,
include_cutoff_time=include_cutoff_time,
)
return matrix
# if all aggregations have been approximated, can calculate all together
if no_unapproximated_aggs and approximate is not None:
inner_grouped = [[group_time, group]]
else:
# if approximated features, set cutoff_time to unbinned time
if precalculated_features_trie is not None:
group[cutoff_df_time_col] = group[target_time]
inner_grouped = group.groupby(cutoff_df_time_col, sort=True)
if chunk_size is not None:
inner_grouped = _chunk_dataframe_groups(inner_grouped, chunk_size)
for time_last, group in inner_grouped:
# sort group by instance id
ids = group["instance_id"].sort_values().values
if no_unapproximated_aggs and approximate is not None:
window = None
else:
window = training_window
# calculate values for those instances at time time_last
_feature_matrix = calc_results(
time_last,
ids,
precalculated_features=precalculated_features_trie,
training_window=window,
include_cutoff_time=include_cutoff_time,
)
if is_instance(_feature_matrix, (dd, ps), "DataFrame"):
id_name = _feature_matrix.columns[-1]
else:
id_name = _feature_matrix.index.name
# if approximate, merge feature matrix with group frame to get original
# cutoff times and passed columns
if approximate:
cols = [c for c in _feature_matrix.columns if c not in pass_columns]
indexer = group[["instance_id", target_time] + pass_columns]
_feature_matrix = _feature_matrix[cols].merge(
indexer,
right_on=["instance_id"],
left_index=True,
how="right",
)
_feature_matrix.set_index(
["instance_id", target_time],
inplace=True,
)
_feature_matrix.index.set_names([id_name, "time"], inplace=True)
_feature_matrix.sort_index(level=1, kind="mergesort", inplace=True)
else:
# all rows have same cutoff time. set time and add passed columns
num_rows = len(ids)
if len(pass_columns) > 0:
pass_through = group[
["instance_id", cutoff_df_time_col] + pass_columns
]
pass_through.rename(
columns={
"instance_id": id_name,
cutoff_df_time_col: "time",
},
inplace=True,
)
if isinstance(_feature_matrix, pd.DataFrame):
time_index = pd.Index([time_last] * num_rows, name="time")
_feature_matrix = _feature_matrix.set_index(
time_index,
append=True,
)
if len(pass_columns) > 0:
pass_through.set_index([id_name, "time"], inplace=True)
for col in pass_columns:
_feature_matrix[col] = pass_through[col]
elif is_instance(_feature_matrix, dd, "DataFrame") and (
len(pass_columns) > 0
):
_feature_matrix["time"] = time_last
for col in pass_columns:
pass_df = dd.from_pandas(
pass_through[[id_name, "time", col]],
npartitions=_feature_matrix.npartitions,
)
_feature_matrix = _feature_matrix.merge(
pass_df,
how="outer",
)
_feature_matrix = _feature_matrix.drop(columns=["time"])
elif is_instance(_feature_matrix, ps, "DataFrame") and (
len(pass_columns) > 0
):
_feature_matrix["time"] = time_last
for col in pass_columns:
pass_df = ps.from_pandas(
pass_through[[id_name, "time", col]],
)
_feature_matrix = _feature_matrix.merge(
pass_df,
how="outer",
)
_feature_matrix = _feature_matrix.drop(columns=["time"])
feature_matrix.append(_feature_matrix)
ww_init_kwargs = get_ww_types_from_features(
feature_set.target_features,
entityset,
pass_columns,
cutoff_time,
)
feature_matrix = init_ww_and_concat_fm(feature_matrix, ww_init_kwargs)
return feature_matrix
def approximate_features(
feature_set,
cutoff_time,
window,
entityset,
training_window=None,
include_cutoff_time=True,
):
"""Given a set of features and cutoff_times to be passed to
calculate_feature_matrix, calculates approximate values of some features
to speed up calculations. Cutoff times are sorted into
window-sized buckets and the approximate feature values are only calculated
at one cutoff time for each bucket.
..note:: this only approximates DirectFeatures of AggregationFeatures, on
the target dataframe. In future versions, it may also be possible to
approximate these features on other top-level dataframes
Args:
cutoff_time (pd.DataFrame): specifies what time to calculate
the features for each instance at. The resulting feature matrix will use data
up to and including the cutoff_time. A DataFrame with
'instance_id' and 'time' columns.
window (Timedelta or str): frequency to group instances with similar
cutoff times by for features with costly calculations. For example,
if bucket is 24 hours, all instances with cutoff times on the same
day will use the same calculation for expensive features.
entityset (:class:`.EntitySet`): An already initialized entityset.
feature_set (:class:`.FeatureSet`): The features to be calculated.
training_window (`Timedelta`, optional):
Window defining how much older than the cutoff time data
can be to be included when calculating the feature. If None, all older data is used.
include_cutoff_time (bool):
If True, data at cutoff times are included in feature calculations.
"""
approx_fms_trie = Trie(path_constructor=RelationshipPath)
target_time_colname = "target_time"
cutoff_time.ww[target_time_colname] = cutoff_time["time"]
approx_cutoffs = bin_cutoff_times(cutoff_time, window)
cutoff_df_time_col = "time"
cutoff_df_instance_col = "instance_id"
# should this order be by dependencies so that calculate_feature_matrix
# doesn't skip approximating something?
for relationship_path, approx_feature_names in feature_set.approximate_feature_trie:
if not approx_feature_names:
continue
(
cutoffs_with_approx_e_ids,
new_approx_dataframe_index_col,
) = _add_approx_dataframe_index_col(
entityset,
feature_set.target_df_name,
approx_cutoffs.copy(),
relationship_path,
)
# Select only columns we care about
columns_we_want = [
new_approx_dataframe_index_col,
cutoff_df_time_col,
target_time_colname,
]
cutoffs_with_approx_e_ids = cutoffs_with_approx_e_ids[columns_we_want]
cutoffs_with_approx_e_ids = cutoffs_with_approx_e_ids.drop_duplicates()
cutoffs_with_approx_e_ids.dropna(
subset=[new_approx_dataframe_index_col],
inplace=True,
)
approx_features = [
feature_set.features_by_name[name] for name in approx_feature_names
]
if cutoffs_with_approx_e_ids.empty:
approx_fm = gen_empty_approx_features_df(approx_features)
else:
cutoffs_with_approx_e_ids.sort_values(
[cutoff_df_time_col, new_approx_dataframe_index_col],
inplace=True,
)
# CFM assumes specific column names for cutoff_time argument
rename = {new_approx_dataframe_index_col: cutoff_df_instance_col}
cutoff_time_to_pass = cutoffs_with_approx_e_ids.rename(columns=rename)
cutoff_time_to_pass = cutoff_time_to_pass[
[cutoff_df_instance_col, cutoff_df_time_col]
]
cutoff_time_to_pass.drop_duplicates(inplace=True)
approx_fm = calculate_feature_matrix(
approx_features,
entityset,
cutoff_time=cutoff_time_to_pass,
training_window=training_window,
approximate=None,
cutoff_time_in_index=False,
chunk_size=cutoff_time_to_pass.shape[0],
include_cutoff_time=include_cutoff_time,
)
approx_fms_trie.get_node(relationship_path).value = approx_fm
return approx_fms_trie
def scatter_warning(num_scattered_workers, num_workers):
if num_scattered_workers != num_workers:
scatter_warning = "EntitySet was only scattered to {} out of {} workers"
logger.warning(scatter_warning.format(num_scattered_workers, num_workers))
def parallel_calculate_chunks(
cutoff_time,
chunk_size,
feature_set,
approximate,
training_window,
save_progress,
entityset,
n_jobs,
no_unapproximated_aggs,
cutoff_df_time_col,
target_time,
pass_columns,
progress_bar,
dask_kwargs=None,
progress_callback=None,
include_cutoff_time=True,
):
import_or_raise(
"distributed",
"Dask must be installed to calculate feature matrix with n_jobs set to anything but 1",
)
from dask.base import tokenize
from distributed import Future, as_completed
client = None
cluster = None
try:
client, cluster = create_client_and_cluster(
n_jobs=n_jobs,
dask_kwargs=dask_kwargs,
entityset_size=entityset.__sizeof__(),
)
# scatter the entityset
# denote future with leading underscore
start = time.time()
es_token = "EntitySet-{}".format(tokenize(entityset))
if es_token in client.list_datasets():
msg = "Using EntitySet persisted on the cluster as dataset {}"
progress_bar.write(msg.format(es_token))
_es = client.get_dataset(es_token)
else:
_es = client.scatter([entityset])[0]
client.publish_dataset(**{_es.key: _es})
# save features to a tempfile and scatter it
pickled_feats = cloudpickle.dumps(feature_set)
_saved_features = client.scatter(pickled_feats)
client.replicate([_es, _saved_features])
num_scattered_workers = len(
client.who_has([Future(es_token)]).get(es_token, []),
)
num_workers = len(client.scheduler_info()["workers"].values())
schema = None
if isinstance(cutoff_time, pd.DataFrame):
schema = cutoff_time.ww.schema
chunks = cutoff_time.groupby(cutoff_df_time_col)
cutoff_time_len = cutoff_time.shape[0]
else:
chunks = cutoff_time
cutoff_time_len = len(cutoff_time[1])
if not chunk_size:
chunk_size = _handle_chunk_size(1.0 / num_workers, cutoff_time_len)
chunks = _chunk_dataframe_groups(chunks, chunk_size)
chunks = [df for _, df in chunks]
if len(chunks) < num_workers: # pragma: no cover
chunk_warning = (
"Fewer chunks ({}), than workers ({}) consider reducing the chunk size"
)
warning_string = chunk_warning.format(len(chunks), num_workers)
progress_bar.write(warning_string)
scatter_warning(num_scattered_workers, num_workers)
end = time.time()
scatter_time = round(end - start)
# if enabled, reset timer after scatter for better time remaining estimates
if not progress_bar.disable:
progress_bar.reset()
scatter_string = "EntitySet scattered to {} workers in {} seconds"
progress_bar.write(scatter_string.format(num_scattered_workers, scatter_time))
# map chunks
# TODO: consider handling task submission dask kwargs
_chunks = client.map(
calculate_chunk,
chunks,
feature_set=_saved_features,
chunk_size=None,
entityset=_es,
approximate=approximate,
training_window=training_window,
save_progress=save_progress,
no_unapproximated_aggs=no_unapproximated_aggs,
cutoff_df_time_col=cutoff_df_time_col,
target_time=target_time,
pass_columns=pass_columns,
progress_bar=None,
progress_callback=progress_callback,
include_cutoff_time=include_cutoff_time,
schema=schema,
)
feature_matrix = []
iterator = as_completed(_chunks).batches()
for batch in iterator:
results = client.gather(batch)
for result in results:
feature_matrix.append(result)
previous_progress = progress_bar.n
progress_bar.update(result.shape[0])
if progress_callback is not None:
(
update,
progress_percent,
time_elapsed,
) = update_progress_callback_parameters(
progress_bar,
previous_progress,
)
progress_callback(update, progress_percent, time_elapsed)
except Exception:
raise
finally:
if client is not None:
client.close()
if "cluster" not in dask_kwargs and cluster is not None:
cluster.close() # pragma: no cover
ww_init_kwargs = get_ww_types_from_features(
feature_set.target_features,
entityset,
pass_columns,
cutoff_time,
)
feature_matrix = init_ww_and_concat_fm(feature_matrix, ww_init_kwargs)
return feature_matrix
def _add_approx_dataframe_index_col(es, target_dataframe_name, cutoffs, path):
"""
Add a column to the cutoff df linking it to the dataframe at the end of the
path.
Return the updated cutoff df and the name of this column. The name will
consist of the columns which were joined through.
"""
last_child_col = "instance_id"
last_parent_col = es[target_dataframe_name].ww.index
for _, relationship in path:
child_cols = [last_parent_col, relationship._child_column_name]
child_df = es[relationship.child_name][child_cols]
# Rename relationship.child_column to include the columns we have
# joined through.
new_col_name = "%s.%s" % (last_child_col, relationship._child_column_name)
to_rename = {relationship._child_column_name: new_col_name}
child_df = child_df.rename(columns=to_rename)
cutoffs = cutoffs.merge(
child_df,
left_on=last_child_col,
right_on=last_parent_col,
)
# These will be used in the next iteration.
last_child_col = new_col_name
last_parent_col = relationship._parent_column_name
return cutoffs, new_col_name
def _chunk_dataframe_groups(grouped, chunk_size):
"""chunks a grouped dataframe into groups no larger than chunk_size"""
if isinstance(grouped, tuple):
for i in range(0, len(grouped[1]), chunk_size):
yield None, (grouped[0], grouped[1].iloc[i : i + chunk_size])
else:
for group_key, group_df in grouped:
for i in range(0, len(group_df), chunk_size):
yield group_key, group_df.iloc[i : i + chunk_size]
def _handle_chunk_size(chunk_size, total_size):
if chunk_size is not None:
assert chunk_size > 0, "Chunk size must be greater than 0"
if chunk_size < 1:
chunk_size = math.ceil(chunk_size * total_size)
chunk_size = int(chunk_size)
return chunk_size
def update_progress_callback_parameters(progress_bar, previous_progress):
update = (progress_bar.n - previous_progress) / progress_bar.total * 100
progress_percent = (progress_bar.n / progress_bar.total) * 100
time_elapsed = progress_bar.format_dict["elapsed"]
return (update, progress_percent, time_elapsed)
def init_ww_and_concat_fm(feature_matrix, ww_init_kwargs):
cols_to_check = {
col
for col, ltype in ww_init_kwargs["logical_types"].items()
if isinstance(ltype, (Age, Boolean, Integer))
}
replacement_type = {
"age": AgeNullable(),
"boolean": BooleanNullable(),
"integer": IntegerNullable(),
}
for fm in feature_matrix:
updated_cols = set()
for col in cols_to_check:
# Only convert types for pandas if null values are present
# Always convert for Dask/Spark to avoid pulling data into memory for null check
is_pandas_df_with_null = (
isinstance(fm, pd.DataFrame) and fm[col].isnull().any()
)
is_dask_df = is_instance(fm, dd, "DataFrame")
is_spark_df = is_instance(fm, ps, "DataFrame")
if is_pandas_df_with_null or is_dask_df or is_spark_df:
current_type = ww_init_kwargs["logical_types"][col].type_string
ww_init_kwargs["logical_types"][col] = replacement_type[current_type]
updated_cols.add(col)
cols_to_check = cols_to_check - updated_cols
fm.ww.init(**ww_init_kwargs)
if any(is_instance(fm, dd, "DataFrame") for fm in feature_matrix):
feature_matrix = dd.concat(feature_matrix)
elif any(is_instance(fm, ps, "DataFrame") for fm in feature_matrix):
feature_matrix = ps.concat(feature_matrix)
else:
feature_matrix = pd.concat(feature_matrix)
feature_matrix.ww.init(**ww_init_kwargs)
return feature_matrix
| 39,008 | 38.244467 | 129 | py |
featuretools | featuretools-main/featuretools/computational_backends/utils.py | import logging
import os
import typing
import warnings
from datetime import datetime
from functools import wraps
import numpy as np
import pandas as pd
import psutil
from woodwork.logical_types import Datetime, Double
from featuretools.entityset.relationship import RelationshipPath
from featuretools.feature_base import AggregationFeature, DirectFeature
from featuretools.utils import Trie
from featuretools.utils.gen_utils import Library, import_or_none, is_instance
from featuretools.utils.wrangle import _check_time_type, _check_timedelta
dd = import_or_none("dask.dataframe")
logger = logging.getLogger("featuretools.computational_backend")
def bin_cutoff_times(cutoff_time, bin_size):
binned_cutoff_time = cutoff_time.ww.copy()
if type(bin_size) == int:
binned_cutoff_time["time"] = binned_cutoff_time["time"].apply(
lambda x: x / bin_size * bin_size,
)
else:
bin_size = _check_timedelta(bin_size)
binned_cutoff_time["time"] = datetime_round(
binned_cutoff_time["time"],
bin_size,
)
return binned_cutoff_time
def save_csv_decorator(save_progress=None):
def inner_decorator(method):
@wraps(method)
def wrapped(*args, **kwargs):
if save_progress is None:
r = method(*args, **kwargs)
else:
time = args[0].to_pydatetime()
file_name = "ft_" + time.strftime("%Y_%m_%d_%I-%M-%S-%f") + ".csv"
file_path = os.path.join(save_progress, file_name)
temp_dir = os.path.join(save_progress, "temp")
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
temp_file_path = os.path.join(temp_dir, file_name)
r = method(*args, **kwargs)
r.to_csv(temp_file_path)
os.rename(temp_file_path, file_path)
return r
return wrapped
return inner_decorator
def datetime_round(dt, freq):
"""
round down Timestamp series to a specified freq
"""
if not freq.is_absolute():
raise ValueError("Unit is relative")
# TODO: multitemporal units
all_units = list(freq.times.keys())
if len(all_units) == 1:
unit = all_units[0]
value = freq.times[unit]
if unit == "m":
unit = "t"
# No support for weeks in datetime.datetime
if unit == "w":
unit = "d"
value = value * 7
freq = str(value) + unit
return dt.dt.floor(freq)
else:
assert "Frequency cannot have multiple temporal parameters"
def gather_approximate_features(feature_set):
"""
Find features which can be approximated. Returned as a trie where the values
are sets of feature names.
Args:
feature_set (FeatureSet): Features to search the dependencies of for
features to approximate.
Returns:
Trie[RelationshipPath, set[str]]
"""
approximate_feature_trie = Trie(default=set, path_constructor=RelationshipPath)
for feature in feature_set.target_features:
if feature_set.uses_full_dataframe(feature, check_dependents=True):
continue
if isinstance(feature, DirectFeature):
path = feature.relationship_path
base_feature = feature.base_features[0]
while isinstance(base_feature, DirectFeature):
path = path + base_feature.relationship_path
base_feature = base_feature.base_features[0]
if isinstance(base_feature, AggregationFeature):
node_feature_set = approximate_feature_trie.get_node(path).value
node_feature_set.add(base_feature.unique_name())
return approximate_feature_trie
def gen_empty_approx_features_df(approx_features):
df = pd.DataFrame(columns=[f.get_name() for f in approx_features])
df.index.name = approx_features[0].dataframe.ww.index
return df
def n_jobs_to_workers(n_jobs):
try:
cpus = len(psutil.Process().cpu_affinity())
except AttributeError:
cpus = psutil.cpu_count()
# Taken from sklearn parallel_backends code
# https://github.com/scikit-learn/scikit-learn/blob/27bbdb570bac062c71b3bb21b0876fd78adc9f7e/sklearn/externals/joblib/_parallel_backends.py#L120
if n_jobs < 0:
workers = max(cpus + 1 + n_jobs, 1)
else:
workers = min(n_jobs, cpus)
assert workers > 0, "Need at least one worker"
return workers
def create_client_and_cluster(n_jobs, dask_kwargs, entityset_size):
Client, LocalCluster = get_client_cluster()
cluster = None
if "cluster" in dask_kwargs:
cluster = dask_kwargs["cluster"]
else:
# diagnostics_port sets the default port to launch bokeh web interface
# if it is set to None web interface will not be launched
diagnostics_port = None
if "diagnostics_port" in dask_kwargs:
diagnostics_port = dask_kwargs["diagnostics_port"]
del dask_kwargs["diagnostics_port"]
workers = n_jobs_to_workers(n_jobs)
if n_jobs != -1 and workers < n_jobs:
warning_string = "{} workers requested, but only {} workers created."
warning_string = warning_string.format(n_jobs, workers)
warnings.warn(warning_string)
# Distributed default memory_limit for worker is 'auto'. It calculates worker
# memory limit as total virtual memory divided by the number
# of cores available to the workers (alwasy 1 for featuretools setup).
# This means reducing the number of workers does not increase the memory
# limit for other workers. Featuretools default is to calculate memory limit
# as total virtual memory divided by number of workers. To use distributed
# default memory limit, set dask_kwargs['memory_limit']='auto'
if "memory_limit" in dask_kwargs:
memory_limit = dask_kwargs["memory_limit"]
del dask_kwargs["memory_limit"]
else:
total_memory = psutil.virtual_memory().total
memory_limit = int(total_memory / float(workers))
cluster = LocalCluster(
n_workers=workers,
threads_per_worker=1,
diagnostics_port=diagnostics_port,
memory_limit=memory_limit,
**dask_kwargs,
)
# if cluster has bokeh port, notify user if unexpected port number
if diagnostics_port is not None:
if hasattr(cluster, "scheduler") and cluster.scheduler:
info = cluster.scheduler.identity()
if "bokeh" in info["services"]:
msg = "Dashboard started on port {}"
print(msg.format(info["services"]["bokeh"]))
client = Client(cluster)
warned_of_memory = False
for worker in list(client.scheduler_info()["workers"].values()):
worker_limit = worker["memory_limit"]
if worker_limit < entityset_size:
raise ValueError("Insufficient memory to use this many workers")
elif worker_limit < 2 * entityset_size and not warned_of_memory:
logger.warning(
"Worker memory is between 1 to 2 times the memory"
" size of the EntitySet. If errors occur that do"
" not occur with n_jobs equals 1, this may be the "
"cause. See https://featuretools.alteryx.com/en/stable/guides/performance.html#parallel-feature-computation"
" for more information.",
)
warned_of_memory = True
return client, cluster
def get_client_cluster():
"""
Separated out the imports to make it easier to mock during testing
"""
distributed = import_or_none("distributed")
Client = distributed.Client
LocalCluster = distributed.LocalCluster
return Client, LocalCluster
if dd:
CutoffTimeType = typing.Union[dd.DataFrame, pd.DataFrame, str, datetime]
else:
CutoffTimeType = typing.Union[pd.DataFrame, str, datetime]
def _validate_cutoff_time(
cutoff_time: CutoffTimeType,
target_dataframe,
):
"""
Verify that the cutoff time is a single value or a pandas dataframe with the proper columns
containing no duplicate rows
"""
if is_instance(cutoff_time, dd, "DataFrame"):
msg = (
"cutoff_time should be a Pandas DataFrame: "
"computing cutoff_time, this may take a while"
)
warnings.warn(msg)
cutoff_time = cutoff_time.compute()
if isinstance(cutoff_time, pd.DataFrame):
cutoff_time = cutoff_time.reset_index(drop=True)
if "instance_id" not in cutoff_time.columns:
if target_dataframe.ww.index not in cutoff_time.columns:
raise AttributeError(
"Cutoff time DataFrame must contain a column with either the same name"
' as the target dataframe index or a column named "instance_id"',
)
# rename to instance_id
cutoff_time.rename(
columns={target_dataframe.ww.index: "instance_id"},
inplace=True,
)
if "time" not in cutoff_time.columns:
if (
target_dataframe.ww.time_index
and target_dataframe.ww.time_index not in cutoff_time.columns
):
raise AttributeError(
"Cutoff time DataFrame must contain a column with either the same name"
' as the target dataframe time_index or a column named "time"',
)
# rename to time
cutoff_time.rename(
columns={target_dataframe.ww.time_index: "time"},
inplace=True,
)
# Make sure user supplies only one valid name for instance id and time columns
if (
"instance_id" in cutoff_time.columns
and target_dataframe.ww.index in cutoff_time.columns
and "instance_id" != target_dataframe.ww.index
):
raise AttributeError(
'Cutoff time DataFrame cannot contain both a column named "instance_id" and a column'
" with the same name as the target dataframe index",
)
if (
"time" in cutoff_time.columns
and target_dataframe.ww.time_index in cutoff_time.columns
and "time" != target_dataframe.ww.time_index
):
raise AttributeError(
'Cutoff time DataFrame cannot contain both a column named "time" and a column'
" with the same name as the target dataframe time index",
)
assert (
cutoff_time[["instance_id", "time"]].duplicated().sum() == 0
), "Duplicated rows in cutoff time dataframe."
if isinstance(cutoff_time, str):
try:
cutoff_time = pd.to_datetime(cutoff_time)
except ValueError as e:
raise ValueError(f"While parsing cutoff_time: {str(e)}")
except OverflowError as e:
raise OverflowError(f"While parsing cutoff_time: {str(e)}")
else:
if isinstance(cutoff_time, list):
raise TypeError("cutoff_time must be a single value or DataFrame")
return cutoff_time
def _check_cutoff_time_type(cutoff_time, es_time_type):
"""
Check that the cutoff time values are of the proper type given the entityset time type
"""
# Check that cutoff_time time type matches entityset time type
if isinstance(cutoff_time, tuple):
cutoff_time_value = cutoff_time[0]
time_type = _check_time_type(cutoff_time_value)
is_numeric = time_type == "numeric"
is_datetime = time_type == Datetime
else:
cutoff_time_col = cutoff_time.ww["time"]
is_numeric = cutoff_time_col.ww.schema.is_numeric
is_datetime = cutoff_time_col.ww.schema.is_datetime
if es_time_type == "numeric" and not is_numeric:
raise TypeError(
"cutoff_time times must be numeric: try casting " "via pd.to_numeric()",
)
if es_time_type == Datetime and not is_datetime:
raise TypeError(
"cutoff_time times must be datetime type: try casting "
"via pd.to_datetime()",
)
def replace_inf_values(feature_matrix, replacement_value=np.nan, columns=None):
"""Replace all ``np.inf`` values in a feature matrix with the specified replacement value.
Args:
feature_matrix (DataFrame): DataFrame whose columns are feature names and rows are instances
replacement_value (int, float, str, optional): Value with which ``np.inf`` values will be replaced
columns (list[str], optional): A list specifying which columns should have values replaced. If None,
values will be replaced for all columns.
Returns:
feature_matrix
"""
if columns is None:
feature_matrix = feature_matrix.replace([np.inf, -np.inf], replacement_value)
else:
feature_matrix[columns] = feature_matrix[columns].replace(
[np.inf, -np.inf],
replacement_value,
)
return feature_matrix
def get_ww_types_from_features(
features,
entityset,
pass_columns=None,
cutoff_time=None,
):
"""Given a list of features and entityset (and optionally a list of pass
through columns and the cutoff time dataframe), returns the logical types,
semantic tags,and origin of each column in the feature matrix. Both
pass_columns and cutoff_time will need to be supplied in order to get the
type information for the pass through columns
"""
if pass_columns is None:
pass_columns = []
logical_types = {}
semantic_tags = {}
origins = {}
for feature in features:
names = feature.get_feature_names()
for name in names:
logical_types[name] = feature.column_schema.logical_type
semantic_tags[name] = feature.column_schema.semantic_tags.copy()
semantic_tags[name] -= {"index", "time_index"}
if logical_types[name] is None and "numeric" in semantic_tags[name]:
logical_types[name] = Double
if all([f.primitive is None for f in feature.get_dependencies(deep=True)]):
origins[name] = "base"
else:
origins[name] = "engineered"
if pass_columns:
cutoff_schema = cutoff_time.ww.schema
for column in pass_columns:
logical_types[column] = cutoff_schema.logical_types[column]
semantic_tags[column] = cutoff_schema.semantic_tags[column]
origins[column] = "base"
if entityset.dataframe_type in (Library.DASK, Library.SPARK):
target_dataframe_name = features[0].dataframe_name
table_schema = entityset[target_dataframe_name].ww.schema
index_col = table_schema.index
logical_types[index_col] = table_schema.logical_types[index_col]
semantic_tags[index_col] = table_schema.semantic_tags[index_col]
semantic_tags[index_col] -= {"index"}
origins[index_col] = "base"
ww_init = {
"logical_types": logical_types,
"semantic_tags": semantic_tags,
"column_origins": origins,
}
return ww_init
| 15,370 | 36.038554 | 148 | py |
featuretools | featuretools-main/featuretools/computational_backends/api.py | # flake8: noqa
from featuretools.computational_backends.calculate_feature_matrix import (
approximate_features,
calculate_feature_matrix,
)
from featuretools.computational_backends.utils import (
bin_cutoff_times,
create_client_and_cluster,
replace_inf_values,
)
| 283 | 24.818182 | 74 | py |
featuretools | featuretools-main/featuretools/computational_backends/__init__.py | # flake8: noqa
from featuretools.computational_backends.api import *
| 69 | 22.333333 | 53 | py |
featuretools | featuretools-main/featuretools/computational_backends/feature_set.py | import itertools
import logging
from collections import defaultdict
from featuretools.entityset.relationship import RelationshipPath
from featuretools.feature_base import (
AggregationFeature,
FeatureOutputSlice,
GroupByTransformFeature,
TransformFeature,
)
from featuretools.utils import Trie
logger = logging.getLogger("featuretools.computational_backend")
class FeatureSet(object):
"""
Represents an immutable set of features to be calculated for a single dataframe, and their
dependencies.
"""
def __init__(self, features, approximate_feature_trie=None):
"""
Args:
features (list[Feature]): Features of the target dataframe.
approximate_feature_trie (Trie[RelationshipPath, set[str]], optional): Dependency
features to ignore because they have already been approximated. For example, if
one of the target features is a direct feature of a feature A and A is included in
approximate_feature_trie then neither A nor its dependencies will appear in
FeatureSet.feature_trie.
"""
self.target_df_name = features[0].dataframe_name
self.target_features = features
self.target_feature_names = {f.unique_name() for f in features}
if not approximate_feature_trie:
approximate_feature_trie = Trie(
default=list,
path_constructor=RelationshipPath,
)
self.approximate_feature_trie = approximate_feature_trie
# Maps the unique name of each feature to the actual feature. This is necessary
# because features do not support equality and so cannot be used as
# dictionary keys. The equality operator on features produces a new
# feature (which will always be truthy).
self.features_by_name = {f.unique_name(): f for f in features}
feature_dependents = defaultdict(set)
for f in features:
deps = f.get_dependencies(deep=True)
for dep in deps:
feature_dependents[dep.unique_name()].add(f.unique_name())
self.features_by_name[dep.unique_name()] = dep
subdeps = dep.get_dependencies(deep=True)
for sd in subdeps:
feature_dependents[sd.unique_name()].add(dep.unique_name())
# feature names (keys) and the features that rely on them (values).
self.feature_dependents = {
fname: [self.features_by_name[dname] for dname in feature_dependents[fname]]
for fname, f in self.features_by_name.items()
}
self._feature_trie = None
@property
def feature_trie(self):
"""
The target features and their dependencies organized into a trie by relationship path.
This is built once when it is first called (to avoid building it if it is not needed) and
then used for all subsequent calls.
The edges of the trie are RelationshipPaths and the values are tuples of
(bool, set[str], set[str]). The bool represents whether the full dataframe is needed at
that node, the first set contains the names of features which are needed on the full
dataframe, and the second set contains the names of the rest of the features
Returns:
Trie[RelationshipPath, (bool, set[str], set[str])]
"""
if not self._feature_trie:
self._feature_trie = self._build_feature_trie()
return self._feature_trie
def _build_feature_trie(self):
"""
Build the feature trie by adding the target features and their dependencies recursively.
"""
feature_trie = Trie(
default=lambda: (False, set(), set()),
path_constructor=RelationshipPath,
)
for f in self.target_features:
self._add_feature_to_trie(feature_trie, f, self.approximate_feature_trie)
return feature_trie
def _add_feature_to_trie(
self,
trie,
feature,
approximate_feature_trie,
ancestor_needs_full_dataframe=False,
):
"""
Add the given feature to the root of the trie, and recurse on its dependencies. If it is in
approximate_feature_trie then it will not be added and we will not recurse on its dependencies.
"""
node_needs_full_dataframe, full_features, not_full_features = trie.value
needs_full_dataframe = (
ancestor_needs_full_dataframe or self.uses_full_dataframe(feature)
)
name = feature.unique_name()
# If this feature is ignored then don't add it or any of its dependencies.
if name in approximate_feature_trie.value:
return
# Add the feature to one of the sets, depending on whether it needs the full dataframe.
if needs_full_dataframe:
full_features.add(name)
if name in not_full_features:
not_full_features.remove(name)
# Update needs_full_dataframe for this node.
trie.value = (True, full_features, not_full_features)
# Set every node in relationship path to needs_full_dataframe.
sub_trie = trie
for edge in feature.relationship_path:
sub_trie = sub_trie.get_node([edge])
(_, f1, f2) = sub_trie.value
sub_trie.value = (True, f1, f2)
else:
if name not in full_features:
not_full_features.add(name)
sub_trie = trie.get_node(feature.relationship_path)
sub_ignored_trie = approximate_feature_trie.get_node(feature.relationship_path)
for dep_feat in feature.get_dependencies():
if isinstance(dep_feat, FeatureOutputSlice):
dep_feat = dep_feat.base_feature
self._add_feature_to_trie(
sub_trie,
dep_feat,
sub_ignored_trie,
ancestor_needs_full_dataframe=needs_full_dataframe,
)
def group_features(self, feature_names):
"""
Topologically sort the given features, then group by path,
feature type, use_previous, and where.
"""
features = [self.features_by_name[name] for name in feature_names]
depths = self._get_feature_depths(features)
def key_func(f):
return (
depths[f.unique_name()],
f.relationship_path_name(),
str(f.__class__),
_get_use_previous(f),
_get_where(f),
self.uses_full_dataframe(f),
_get_groupby(f),
)
# Sort the list of features by the complex key function above, then
# group them by the same key
sort_feats = sorted(features, key=key_func)
feature_groups = [
list(g) for _, g in itertools.groupby(sort_feats, key=key_func)
]
return feature_groups
def _get_feature_depths(self, features):
"""
Generate and return a mapping of {feature name -> depth} in the
feature DAG for the given dataframe.
"""
order = defaultdict(int)
depths = {}
queue = features[:]
while queue:
# Get the next feature.
f = queue.pop(0)
depths[f.unique_name()] = order[f.unique_name()]
# Only look at dependencies if they are on the same dataframe.
if not f.relationship_path:
dependencies = f.get_dependencies()
for dep in dependencies:
order[dep.unique_name()] = min(
order[f.unique_name()] - 1,
order[dep.unique_name()],
)
queue.append(dep)
return depths
def uses_full_dataframe(self, feature, check_dependents=False):
if (
isinstance(feature, TransformFeature)
and feature.primitive.uses_full_dataframe
):
return True
return check_dependents and self._dependent_uses_full_dataframe(feature)
def _dependent_uses_full_dataframe(self, feature):
for d in self.feature_dependents[feature.unique_name()]:
if isinstance(d, TransformFeature) and d.primitive.uses_full_dataframe:
return True
return False
# These functions are used for sorting and grouping features
def _get_use_previous(
f,
): # TODO Sort and group features for DateOffset with two different temporal values
if isinstance(f, AggregationFeature) and f.use_previous is not None:
if len(f.use_previous.times.keys()) > 1:
return ("", -1)
else:
unit = list(f.use_previous.times.keys())[0]
value = f.use_previous.times[unit]
return (unit, value)
else:
return ("", -1)
def _get_where(f):
if isinstance(f, AggregationFeature) and f.where is not None:
return f.where.unique_name()
else:
return ""
def _get_groupby(f):
if isinstance(f, GroupByTransformFeature):
return f.groupby.unique_name()
else:
return ""
| 9,251 | 35.282353 | 103 | py |
featuretools | featuretools-main/featuretools/entityset/relationship.py | class Relationship(object):
"""Class to represent a relationship between dataframes
See Also:
:class:`.EntitySet`
"""
def __init__(
self,
entityset,
parent_dataframe_name,
parent_column_name,
child_dataframe_name,
child_column_name,
):
"""Create a relationship
Args:
entityset (:class:`.EntitySet`): EntitySet to which the relationship belongs
parent_dataframe_name (str): Name of the parent dataframe in the EntitySet
parent_column_name (str): Name of the parent column
child_dataframe_name (str): Name of the child dataframe in the EntitySet
child_column_name (str): Name of the child column
"""
self.entityset = entityset
self._parent_dataframe_name = parent_dataframe_name
self._child_dataframe_name = child_dataframe_name
self._parent_column_name = parent_column_name
self._child_column_name = child_column_name
if (
self.parent_dataframe.ww.index is not None
and self._parent_column_name != self.parent_dataframe.ww.index
):
raise AttributeError(
f"Parent column '{self._parent_column_name}' is not the index of "
f"dataframe {self._parent_dataframe_name}",
)
@classmethod
def from_dictionary(cls, arguments, es):
parent_dataframe = arguments["parent_dataframe_name"]
child_dataframe = arguments["child_dataframe_name"]
parent_column = arguments["parent_column_name"]
child_column = arguments["child_column_name"]
return cls(es, parent_dataframe, parent_column, child_dataframe, child_column)
def __repr__(self):
ret = "<Relationship: %s.%s -> %s.%s>" % (
self._child_dataframe_name,
self._child_column_name,
self._parent_dataframe_name,
self._parent_column_name,
)
return ret
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (
self._parent_dataframe_name == other._parent_dataframe_name
and self._child_dataframe_name == other._child_dataframe_name
and self._parent_column_name == other._parent_column_name
and self._child_column_name == other._child_column_name
)
def __hash__(self):
return hash(
(
self._parent_dataframe_name,
self._child_dataframe_name,
self._parent_column_name,
self._child_column_name,
),
)
@property
def parent_dataframe(self):
"""Parent dataframe object"""
return self.entityset[self._parent_dataframe_name]
@property
def child_dataframe(self):
"""Child dataframe object"""
return self.entityset[self._child_dataframe_name]
@property
def parent_column(self):
"""Column in parent dataframe"""
return self.parent_dataframe.ww[self._parent_column_name]
@property
def child_column(self):
"""Column in child dataframe"""
return self.child_dataframe.ww[self._child_column_name]
@property
def parent_name(self):
"""The name of the parent, relative to the child."""
if self._is_unique():
return self._parent_dataframe_name
else:
return "%s[%s]" % (self._parent_dataframe_name, self._child_column_name)
@property
def child_name(self):
"""The name of the child, relative to the parent."""
if self._is_unique():
return self._child_dataframe_name
else:
return "%s[%s]" % (self._child_dataframe_name, self._child_column_name)
def to_dictionary(self):
return {
"parent_dataframe_name": self._parent_dataframe_name,
"child_dataframe_name": self._child_dataframe_name,
"parent_column_name": self._parent_column_name,
"child_column_name": self._child_column_name,
}
def _is_unique(self):
"""Is there any other relationship with same parent and child dataframes?"""
es = self.entityset
relationships = es.get_forward_relationships(self._child_dataframe_name)
n = len(
[
r
for r in relationships
if r._parent_dataframe_name == self._parent_dataframe_name
],
)
assert n > 0, "This relationship is missing from the entityset"
return n == 1
class RelationshipPath(object):
def __init__(self, relationships_with_direction):
self._relationships_with_direction = relationships_with_direction
@property
def name(self):
relationship_names = [
_direction_name(is_forward, r)
for is_forward, r in self._relationships_with_direction
]
return ".".join(relationship_names)
def dataframes(self):
if self:
# Yield first dataframe.
is_forward, relationship = self[0]
if is_forward:
yield relationship._child_dataframe_name
else:
yield relationship._parent_dataframe_name
# Yield the dataframe pointed to by each relationship.
for is_forward, relationship in self:
if is_forward:
yield relationship._parent_dataframe_name
else:
yield relationship._child_dataframe_name
def __add__(self, other):
return RelationshipPath(
self._relationships_with_direction + other._relationships_with_direction,
)
def __getitem__(self, index):
return self._relationships_with_direction[index]
def __iter__(self):
for is_forward, relationship in self._relationships_with_direction:
yield is_forward, relationship
def __len__(self):
return len(self._relationships_with_direction)
def __eq__(self, other):
return (
isinstance(other, RelationshipPath)
and self._relationships_with_direction
== other._relationships_with_direction
)
def __ne__(self, other):
return not self == other
def __repr__(self):
if self._relationships_with_direction:
path = "%s.%s" % (next(self.dataframes()), self.name)
else:
path = "[]"
return "<RelationshipPath %s>" % path
def _direction_name(is_forward, relationship):
if is_forward:
return relationship.parent_name
else:
return relationship.child_name
| 6,709 | 31.259615 | 88 | py |
featuretools | featuretools-main/featuretools/entityset/timedelta.py | import pandas as pd
from dateutil.relativedelta import relativedelta
class Timedelta(object):
"""Represents differences in time.
Timedeltas can be defined in multiple units. Supported units:
- "ms" : milliseconds
- "s" : seconds
- "h" : hours
- "m" : minutes
- "d" : days
- "o"/"observations" : number of individual events
- "mo" : months
- "Y" : years
Timedeltas can also be defined in terms of observations. In this case, the
Timedelta represents the period spanned by `value`.
For observation timedeltas:
>>> three_observations_log = Timedelta(3, "observations")
>>> three_observations_log.get_name()
'3 Observations'
"""
_Observations = "o"
# units for absolute times
_absolute_units = ["ms", "s", "h", "m", "d", "w"]
_relative_units = ["mo", "Y"]
_readable_units = {
"ms": "Milliseconds",
"s": "Seconds",
"h": "Hours",
"m": "Minutes",
"d": "Days",
"o": "Observations",
"w": "Weeks",
"Y": "Years",
"mo": "Months",
}
_readable_to_unit = {v.lower(): k for k, v in _readable_units.items()}
def __init__(self, value, unit=None, delta_obj=None):
"""
Args:
value (float, str, dict) : Value of timedelta, string providing
both unit and value, or a dictionary of units and times.
unit (str) : Unit of time delta.
delta_obj (pd.Timedelta or pd.DateOffset) : A time object used
internally to do time operations. If None is provided, one will
be created using the provided value and unit.
"""
self.check_value(value, unit)
self.times = self.fix_units()
if delta_obj is not None:
self.delta_obj = delta_obj
else:
self.delta_obj = self.get_unit_type()
@classmethod
def from_dictionary(cls, dictionary):
dict_units = dictionary["unit"]
dict_values = dictionary["value"]
if isinstance(dict_units, str) and isinstance(dict_values, (int, float)):
return cls({dict_units: dict_values})
else:
all_units = dict()
for i in range(len(dict_units)):
all_units[dict_units[i]] = dict_values[i]
return cls(all_units)
@classmethod
def make_singular(cls, s):
if len(s) > 1 and s.endswith("s"):
return s[:-1]
return s
@classmethod
def _check_unit_plural(cls, s):
if len(s) > 2 and not s.endswith("s"):
return (s + "s").lower()
elif len(s) > 1:
return s.lower()
return s
def get_value(self, unit=None):
if unit is not None:
return self.times[unit]
elif len(self.times.values()) == 1:
return list(self.times.values())[0]
else:
return self.times
def get_units(self):
return list(self.times.keys())
def get_unit_type(self):
all_units = self.get_units()
if self._Observations in all_units:
return None
elif self.is_absolute() and self.has_multiple_units() is False:
return pd.Timedelta(self.times[all_units[0]], all_units[0])
else:
readable_times = self.lower_readable_times()
return relativedelta(**readable_times)
def check_value(self, value, unit):
if isinstance(value, str):
from featuretools.utils.wrangle import _check_timedelta
td = _check_timedelta(value)
self.times = td.times
elif isinstance(value, dict):
self.times = value
else:
self.times = {unit: value}
def fix_units(self):
fixed_units = dict()
for unit, value in self.times.items():
unit = self._check_unit_plural(unit)
if unit in self._readable_to_unit:
unit = self._readable_to_unit[unit]
fixed_units[unit] = value
return fixed_units
def lower_readable_times(self):
readable_times = dict()
for unit, value in self.times.items():
readable_unit = self._readable_units[unit].lower()
readable_times[readable_unit] = value
return readable_times
def get_name(self):
all_units = self.get_units()
if self.has_multiple_units() is False:
return "{} {}".format(
self.times[all_units[0]],
self._readable_units[all_units[0]],
)
final_str = ""
for unit, value in self.times.items():
if value == 1:
unit = self.make_singular(unit)
final_str += "{} {} ".format(value, self._readable_units[unit])
return final_str[:-1]
def get_arguments(self):
units = list()
values = list()
for unit, value in self.times.items():
units.append(unit)
values.append(value)
if len(units) == 1:
return {"unit": units[0], "value": values[0]}
else:
return {"unit": units, "value": values}
def is_absolute(self):
for unit in self.get_units():
if unit not in self._absolute_units:
return False
return True
def has_no_observations(self):
for unit in self.get_units():
if unit in self._Observations:
return False
return True
def has_multiple_units(self):
if len(self.get_units()) > 1:
return True
else:
return False
def __eq__(self, other):
if not isinstance(other, Timedelta):
return False
return self.times == other.times
def __neg__(self):
"""Negate the timedelta"""
new_times = dict()
for unit, value in self.times.items():
new_times[unit] = -value
if self.delta_obj is not None:
return Timedelta(new_times, delta_obj=-self.delta_obj)
else:
return Timedelta(new_times)
def __radd__(self, time):
"""Add the Timedelta to a timestamp value"""
if self._Observations not in self.get_units():
return time + self.delta_obj
else:
raise Exception("Invalid unit")
def __rsub__(self, time):
"""Subtract the Timedelta from a timestamp value"""
if self._Observations not in self.get_units():
return time - self.delta_obj
else:
raise Exception("Invalid unit")
| 6,575 | 30.018868 | 81 | py |
featuretools | featuretools-main/featuretools/entityset/deserialize.py | import json
import os
import tarfile
import tempfile
import pandas as pd
import woodwork.type_sys.type_system as ww_type_system
from woodwork.deserialize import read_woodwork_table
from featuretools.entityset.relationship import Relationship
from featuretools.utils.gen_utils import Library, import_or_none
from featuretools.utils.s3_utils import get_transport_params, use_smartopen_es
from featuretools.utils.schema_utils import check_schema_version
from featuretools.utils.wrangle import _is_local_tar, _is_s3, _is_url
dd = import_or_none("dask.dataframe")
ps = import_or_none("pyspark.pandas")
def description_to_entityset(description, **kwargs):
"""Deserialize entityset from data description.
Args:
description (dict) : Description of an :class:`.EntitySet`. Likely generated using :meth:`.serialize.entityset_to_description`
kwargs (keywords): Additional keyword arguments to pass as keywords arguments to the underlying deserialization method.
Returns:
entityset (EntitySet) : Instance of :class:`.EntitySet`.
"""
check_schema_version(description, "entityset")
from featuretools.entityset import EntitySet
# If data description was not read from disk, path is None.
path = description.get("path")
entityset = EntitySet(description["id"])
for df in description["dataframes"].values():
if path is not None:
data_path = os.path.join(path, "data", df["name"])
format = description.get("format")
if format is not None:
kwargs["format"] = format
if format == "parquet" and df["loading_info"]["table_type"] == "pandas":
kwargs["filename"] = df["name"] + ".parquet"
dataframe = read_woodwork_table(data_path, validate=False, **kwargs)
else:
dataframe = empty_dataframe(df, description["data_type"])
entityset.add_dataframe(dataframe)
for relationship in description["relationships"]:
rel = Relationship.from_dictionary(relationship, entityset)
entityset.add_relationship(relationship=rel)
return entityset
def empty_dataframe(description, data_type=Library.PANDAS):
"""Deserialize empty dataframe from dataframe description.
Args:
description (dict) : Description of dataframe.
Returns:
df (DataFrame) : Empty dataframe with Woodwork initialized.
"""
# TODO: Can we update Woodwork to return an empty initialized dataframe from a description
# instead of using this function? Or otherwise eliminate? Issue #1476
logical_types = {}
semantic_tags = {}
column_descriptions = {}
column_metadata = {}
use_standard_tags = {}
category_dtypes = {}
columns = []
for col in description["column_typing_info"]:
col_name = col["name"]
columns.append(col_name)
ltype_metadata = col["logical_type"]
ltype = ww_type_system.str_to_logical_type(
ltype_metadata["type"],
params=ltype_metadata["parameters"],
)
tags = col["semantic_tags"]
if "index" in tags:
tags.remove("index")
elif "time_index" in tags:
tags.remove("time_index")
logical_types[col_name] = ltype
semantic_tags[col_name] = tags
column_descriptions[col_name] = col["description"]
column_metadata[col_name] = col["metadata"]
use_standard_tags[col_name] = col["use_standard_tags"]
if col["physical_type"]["type"] == "category":
# Make sure categories are recreated properly
cat_values = col["physical_type"]["cat_values"]
cat_dtype = col["physical_type"]["cat_dtype"]
cat_object = pd.CategoricalDtype(pd.Index(cat_values, dtype=cat_dtype))
category_dtypes[col_name] = cat_object
dataframe = pd.DataFrame(columns=columns).astype(category_dtypes)
if data_type == Library.DASK:
dataframe = dd.from_pandas(dataframe, npartitions=1)
elif data_type == Library.SPARK:
dataframe = ps.from_pandas(dataframe)
dataframe.ww.init(
name=description.get("name"),
index=description.get("index"),
time_index=description.get("time_index"),
logical_types=logical_types,
semantic_tags=semantic_tags,
use_standard_tags=use_standard_tags,
table_metadata=description.get("table_metadata"),
column_metadata=column_metadata,
column_descriptions=column_descriptions,
validate=False,
)
return dataframe
def read_data_description(path):
"""Read data description from disk, S3 path, or URL.
Args:
path (str): Location on disk, S3 path, or URL to read `data_description.json`.
Returns:
description (dict) : Description of :class:`.EntitySet`.
"""
path = os.path.abspath(path)
assert os.path.exists(path), '"{}" does not exist'.format(path)
filepath = os.path.join(path, "data_description.json")
with open(filepath, "r") as file:
description = json.load(file)
description["path"] = path
return description
def read_entityset(path, profile_name=None, **kwargs):
"""Read entityset from disk, S3 path, or URL.
Args:
path (str): Directory on disk, S3 path, or URL to read `data_description.json`.
profile_name (str, bool): The AWS profile specified to write to S3. Will default to None and search for AWS credentials.
Set to False to use an anonymous profile.
kwargs (keywords): Additional keyword arguments to pass as keyword arguments to the underlying deserialization method.
"""
if _is_url(path) or _is_s3(path) or _is_local_tar(str(path)):
with tempfile.TemporaryDirectory() as tmpdir:
local_path = path
transport_params = None
if _is_s3(path):
transport_params = get_transport_params(profile_name)
if _is_s3(path) or _is_url(path):
local_path = os.path.join(tmpdir, "temporary_es")
use_smartopen_es(local_path, path, transport_params)
with tarfile.open(str(local_path)) as tar:
tar.extractall(path=tmpdir)
data_description = read_data_description(tmpdir)
return description_to_entityset(data_description, **kwargs)
else:
data_description = read_data_description(path)
return description_to_entityset(data_description, **kwargs)
| 6,508 | 35.774011 | 134 | py |
featuretools | featuretools-main/featuretools/entityset/api.py | # flake8: noqa
from featuretools.entityset.deserialize import read_entityset
from featuretools.entityset.entityset import EntitySet
from featuretools.entityset.relationship import Relationship
from featuretools.entityset.timedelta import Timedelta
| 248 | 40.5 | 61 | py |
featuretools | featuretools-main/featuretools/entityset/entityset.py | import copy
import logging
import warnings
from collections import defaultdict
import numpy as np
import pandas as pd
from woodwork import init_series
from woodwork.logical_types import Datetime, LatLong
from featuretools.entityset import deserialize, serialize
from featuretools.entityset.relationship import Relationship, RelationshipPath
from featuretools.feature_base.feature_base import _ES_REF
from featuretools.utils.gen_utils import Library, import_or_none, is_instance
from featuretools.utils.plot_utils import (
check_graphviz,
get_graphviz_format,
save_graph,
)
from featuretools.utils.wrangle import _check_timedelta
dd = import_or_none("dask.dataframe")
ps = import_or_none("pyspark.pandas")
pd.options.mode.chained_assignment = None # default='warn'
logger = logging.getLogger("featuretools.entityset")
LTI_COLUMN_NAME = "_ft_last_time"
WW_SCHEMA_KEY = "_ww__getstate__schemas"
class EntitySet(object):
"""
Stores all actual data and typing information for an entityset
Attributes:
id
dataframe_dict
relationships
time_type
Properties:
metadata
"""
def __init__(self, id=None, dataframes=None, relationships=None):
"""Creates EntitySet
Args:
id (str) : Unique identifier to associate with this instance
dataframes (dict[str -> tuple(DataFrame, str, str, dict[str -> str/Woodwork.LogicalType], dict[str->str/set], boolean)]):
Dictionary of DataFrames. Entries take the format
{dataframe name -> (dataframe, index column, time_index, logical_types, semantic_tags, make_index)}.
Note that only the dataframe is required. If a Woodwork DataFrame is supplied, any other parameters
will be ignored.
relationships (list[(str, str, str, str)]): List of relationships
between dataframes. List items are a tuple with the format
(parent dataframe name, parent column, child dataframe name, child column).
Example:
.. code-block:: python
dataframes = {
"cards" : (card_df, "id"),
"transactions" : (transactions_df, "id", "transaction_time")
}
relationships = [("cards", "id", "transactions", "card_id")]
ft.EntitySet("my-entity-set", dataframes, relationships)
"""
self.id = id
self.dataframe_dict = {}
self.relationships = []
self.time_type = None
dataframes = dataframes or {}
relationships = relationships or []
for df_name in dataframes:
df = dataframes[df_name][0]
if df.ww.schema is not None and df.ww.name != df_name:
raise ValueError(
f"Naming conflict in dataframes dictionary: dictionary key '{df_name}' does not match dataframe name '{df.ww.name}'",
)
index_column = None
time_index = None
make_index = False
semantic_tags = None
logical_types = None
if len(dataframes[df_name]) > 1:
index_column = dataframes[df_name][1]
if len(dataframes[df_name]) > 2:
time_index = dataframes[df_name][2]
if len(dataframes[df_name]) > 3:
logical_types = dataframes[df_name][3]
if len(dataframes[df_name]) > 4:
semantic_tags = dataframes[df_name][4]
if len(dataframes[df_name]) > 5:
make_index = dataframes[df_name][5]
self.add_dataframe(
dataframe_name=df_name,
dataframe=df,
index=index_column,
time_index=time_index,
logical_types=logical_types,
semantic_tags=semantic_tags,
make_index=make_index,
)
for relationship in relationships:
parent_df, parent_column, child_df, child_column = relationship
self.add_relationship(parent_df, parent_column, child_df, child_column)
self.reset_data_description()
_ES_REF[self.id] = self
def __sizeof__(self):
return sum([df.__sizeof__() for df in self.dataframes])
def __dask_tokenize__(self):
return (EntitySet, serialize.entityset_to_description(self.metadata))
def __eq__(self, other, deep=False):
if self.id != other.id:
return False
if self.time_type != other.time_type:
return False
if len(self.dataframe_dict) != len(other.dataframe_dict):
return False
for df_name, df in self.dataframe_dict.items():
if df_name not in other.dataframe_dict:
return False
if not df.ww.__eq__(other[df_name].ww, deep=deep):
return False
if not len(self.relationships) == len(other.relationships):
return False
for r in self.relationships:
if r not in other.relationships:
return False
return True
def __ne__(self, other, deep=False):
return not self.__eq__(other, deep=deep)
def __getitem__(self, dataframe_name):
"""Get dataframe instance from entityset
Args:
dataframe_name (str): Name of dataframe.
Returns:
:class:`.DataFrame` : Instance of dataframe with Woodwork typing information. None if dataframe doesn't
exist on the entityset.
"""
if dataframe_name in self.dataframe_dict:
return self.dataframe_dict[dataframe_name]
name = self.id or "entity set"
raise KeyError("DataFrame %s does not exist in %s" % (dataframe_name, name))
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k == "dataframe_dict":
# Copy the DataFrames, retaining Woodwork typing information
copied_attr = copy.copy(v)
for df_name, df in copied_attr.items():
copied_attr[df_name] = df.ww.copy()
else:
copied_attr = copy.deepcopy(v, memo)
setattr(result, k, copied_attr)
for df in result.dataframe_dict.values():
result._add_references_to_metadata(df)
return result
@property
def dataframes(self):
return list(self.dataframe_dict.values())
@property
def dataframe_type(self):
"""String specifying the library used for the dataframes. Null if no dataframes"""
df_type = None
if self.dataframes:
if isinstance(self.dataframes[0], pd.DataFrame):
df_type = Library.PANDAS
elif is_instance(self.dataframes[0], dd, "DataFrame"):
df_type = Library.DASK
elif is_instance(self.dataframes[0], ps, "DataFrame"):
df_type = Library.SPARK
return df_type
@property
def metadata(self):
"""Returns the metadata for this EntitySet. The metadata will be recomputed if it does not exist."""
if self._data_description is None:
description = serialize.entityset_to_description(self)
self._data_description = deserialize.description_to_entityset(description)
return self._data_description
def reset_data_description(self):
self._data_description = None
def to_pickle(self, path, compression=None, profile_name=None):
"""Write entityset in the pickle format, location specified by `path`.
Path could be a local path or a S3 path.
If writing to S3 a tar archive of files will be written.
Args:
path (str): location on disk to write to (will be created as a directory)
compression (str) : Name of the compression to use. Possible values are: {'gzip', 'bz2', 'zip', 'xz', None}.
profile_name (str) : Name of AWS profile to use, False to use an anonymous profile, or None.
"""
serialize.write_data_description(
self,
path,
format="pickle",
compression=compression,
profile_name=profile_name,
)
return self
def to_parquet(self, path, engine="auto", compression=None, profile_name=None):
"""Write entityset to disk in the parquet format, location specified by `path`.
Path could be a local path or a S3 path.
If writing to S3 a tar archive of files will be written.
Args:
path (str): location on disk to write to (will be created as a directory)
engine (str) : Name of the engine to use. Possible values are: {'auto', 'pyarrow'}.
compression (str) : Name of the compression to use. Possible values are: {'snappy', 'gzip', 'brotli', None}.
profile_name (str) : Name of AWS profile to use, False to use an anonymous profile, or None.
"""
serialize.write_data_description(
self,
path,
format="parquet",
engine=engine,
compression=compression,
profile_name=profile_name,
)
return self
def to_csv(
self,
path,
sep=",",
encoding="utf-8",
engine="python",
compression=None,
profile_name=None,
):
"""Write entityset to disk in the csv format, location specified by `path`.
Path could be a local path or a S3 path.
If writing to S3 a tar archive of files will be written.
Args:
path (str) : Location on disk to write to (will be created as a directory)
sep (str) : String of length 1. Field delimiter for the output file.
encoding (str) : A string representing the encoding to use in the output file, defaults to 'utf-8'.
engine (str) : Name of the engine to use. Possible values are: {'c', 'python'}.
compression (str) : Name of the compression to use. Possible values are: {'gzip', 'bz2', 'zip', 'xz', None}.
profile_name (str) : Name of AWS profile to use, False to use an anonymous profile, or None.
"""
if self.dataframe_type == Library.SPARK:
compression = str(compression)
serialize.write_data_description(
self,
path,
format="csv",
index=False,
sep=sep,
encoding=encoding,
engine=engine,
compression=compression,
profile_name=profile_name,
)
return self
def to_dictionary(self):
return serialize.entityset_to_description(self)
###########################################################################
# Public getter/setter methods #########################################
###########################################################################
def __repr__(self):
repr_out = "Entityset: {}\n".format(self.id)
repr_out += " DataFrames:"
for df in self.dataframes:
if df.shape:
repr_out += "\n {} [Rows: {}, Columns: {}]".format(
df.ww.name,
df.shape[0],
df.shape[1],
)
else:
repr_out += "\n {} [Rows: None, Columns: None]".format(df.ww.name)
repr_out += "\n Relationships:"
if len(self.relationships) == 0:
repr_out += "\n No relationships"
for r in self.relationships:
repr_out += "\n %s.%s -> %s.%s" % (
r._child_dataframe_name,
r._child_column_name,
r._parent_dataframe_name,
r._parent_column_name,
)
return repr_out
def add_relationships(self, relationships):
"""Add multiple new relationships to a entityset
Args:
relationships (list[tuple(str, str, str, str)] or list[Relationship]) : List of
new relationships to add. Relationships are specified either as a :class:`.Relationship`
object or a four element tuple identifying the parent and child columns:
(parent_dataframe_name, parent_column_name, child_dataframe_name, child_column_name)
"""
for rel in relationships:
if isinstance(rel, Relationship):
self.add_relationship(relationship=rel)
else:
self.add_relationship(*rel)
return self
def add_relationship(
self,
parent_dataframe_name=None,
parent_column_name=None,
child_dataframe_name=None,
child_column_name=None,
relationship=None,
):
"""Add a new relationship between dataframes in the entityset. Relationships can be specified
by passing dataframe and columns names or by passing a :class:`.Relationship` object.
Args:
parent_dataframe_name (str): Name of the parent dataframe in the EntitySet. Must be specified
if relationship is not.
parent_column_name (str): Name of the parent column. Must be specified if relationship is not.
child_dataframe_name (str): Name of the child dataframe in the EntitySet. Must be specified
if relationship is not.
child_column_name (str): Name of the child column. Must be specified if relationship is not.
relationship (Relationship): Instance of new relationship to be added. Must be specified
if dataframe and column names are not supplied.
"""
if relationship and (
parent_dataframe_name
or parent_column_name
or child_dataframe_name
or child_column_name
):
raise ValueError(
"Cannot specify dataframe and column name values and also supply a Relationship",
)
if not relationship:
relationship = Relationship(
self,
parent_dataframe_name,
parent_column_name,
child_dataframe_name,
child_column_name,
)
if relationship in self.relationships:
warnings.warn("Not adding duplicate relationship: " + str(relationship))
return self
# _operations?
# this is a new pair of dataframes
child_df = relationship.child_dataframe
child_column = relationship._child_column_name
if child_df.ww.index == child_column:
msg = "Unable to add relationship because child column '{}' in '{}' is also its index"
raise ValueError(msg.format(child_column, child_df.ww.name))
parent_df = relationship.parent_dataframe
parent_column = relationship._parent_column_name
if parent_df.ww.index != parent_column:
parent_df.ww.set_index(parent_column)
# Empty dataframes (as a result of accessing metadata)
# default to object dtypes for categorical columns, but
# indexes/foreign keys default to ints. In this case, we convert
# the empty column's type to int
if isinstance(child_df, pd.DataFrame) and (
child_df.empty
and child_df[child_column].dtype == object
and parent_df.ww.columns[parent_column].is_numeric
):
child_df.ww[child_column] = pd.Series(name=child_column, dtype=np.int64)
parent_ltype = parent_df.ww.logical_types[parent_column]
child_ltype = child_df.ww.logical_types[child_column]
if parent_ltype != child_ltype:
difference_msg = ""
if str(parent_ltype) == str(child_ltype):
difference_msg = "There is a conflict between the parameters. "
warnings.warn(
f"Logical type {child_ltype} for child column {child_column} does not match "
f"parent column {parent_column} logical type {parent_ltype}. {difference_msg}"
"Changing child logical type to match parent.",
)
child_df.ww.set_types(logical_types={child_column: parent_ltype})
if "foreign_key" not in child_df.ww.semantic_tags[child_column]:
child_df.ww.add_semantic_tags({child_column: "foreign_key"})
self.relationships.append(relationship)
self.reset_data_description()
return self
def set_secondary_time_index(self, dataframe_name, secondary_time_index):
"""
Set the secondary time index for a dataframe in the EntitySet using its dataframe name.
Args:
dataframe_name (str) : name of the dataframe for which to set the secondary time index.
secondary_time_index (dict[str-> list[str]]): Name of column containing time data to
be used as a secondary time index mapped to a list of the columns in the dataframe
associated with that secondary time index.
"""
dataframe = self[dataframe_name]
self._set_secondary_time_index(dataframe, secondary_time_index)
def _set_secondary_time_index(self, dataframe, secondary_time_index):
"""Sets the secondary time index for a Woodwork dataframe passed in"""
assert (
dataframe.ww.schema is not None
), "Cannot set secondary time index if Woodwork is not initialized"
self._check_secondary_time_index(dataframe, secondary_time_index)
if secondary_time_index is not None:
dataframe.ww.metadata["secondary_time_index"] = secondary_time_index
###########################################################################
# Relationship access/helper methods ###################################
###########################################################################
def find_forward_paths(self, start_dataframe_name, goal_dataframe_name):
"""
Generator which yields all forward paths between a start and goal
dataframe. Does not include paths which contain cycles.
Args:
start_dataframe_name (str) : name of dataframe to start the search from
goal_dataframe_name (str) : name of dataframe to find forward path to
See Also:
:func:`BaseEntitySet.find_backward_paths`
"""
for sub_dataframe_name, path in self._forward_dataframe_paths(
start_dataframe_name,
):
if sub_dataframe_name == goal_dataframe_name:
yield path
def find_backward_paths(self, start_dataframe_name, goal_dataframe_name):
"""
Generator which yields all backward paths between a start and goal
dataframe. Does not include paths which contain cycles.
Args:
start_dataframe_name (str) : Name of dataframe to start the search from.
goal_dataframe_name (str) : Name of dataframe to find backward path to.
See Also:
:func:`BaseEntitySet.find_forward_paths`
"""
for path in self.find_forward_paths(goal_dataframe_name, start_dataframe_name):
# Reverse path
yield path[::-1]
def _forward_dataframe_paths(self, start_dataframe_name, seen_dataframes=None):
"""
Generator which yields the names of all dataframes connected through forward
relationships, and the path taken to each. A dataframe will be yielded
multiple times if there are multiple paths to it.
Implemented using depth first search.
"""
if seen_dataframes is None:
seen_dataframes = set()
if start_dataframe_name in seen_dataframes:
return
seen_dataframes.add(start_dataframe_name)
yield start_dataframe_name, []
for relationship in self.get_forward_relationships(start_dataframe_name):
next_dataframe = relationship._parent_dataframe_name
# Copy seen dataframes for each next node to allow multiple paths (but
# not cycles).
descendants = self._forward_dataframe_paths(
next_dataframe,
seen_dataframes.copy(),
)
for sub_dataframe_name, sub_path in descendants:
yield sub_dataframe_name, [relationship] + sub_path
def get_forward_dataframes(self, dataframe_name, deep=False):
"""
Get dataframes that are in a forward relationship with dataframe
Args:
dataframe_name (str): Name of dataframe to search from.
deep (bool): if True, recursively find forward dataframes.
Yields a tuple of (descendent_name, path from dataframe_name to descendant).
"""
for relationship in self.get_forward_relationships(dataframe_name):
parent_dataframe_name = relationship._parent_dataframe_name
direct_path = RelationshipPath([(True, relationship)])
yield parent_dataframe_name, direct_path
if deep:
sub_dataframes = self.get_forward_dataframes(
parent_dataframe_name,
deep=True,
)
for sub_dataframe_name, path in sub_dataframes:
yield sub_dataframe_name, direct_path + path
def get_backward_dataframes(self, dataframe_name, deep=False):
"""
Get dataframes that are in a backward relationship with dataframe
Args:
dataframe_name (str): Name of dataframe to search from.
deep (bool): if True, recursively find backward dataframes.
Yields a tuple of (descendent_name, path from dataframe_name to descendant).
"""
for relationship in self.get_backward_relationships(dataframe_name):
child_dataframe_name = relationship._child_dataframe_name
direct_path = RelationshipPath([(False, relationship)])
yield child_dataframe_name, direct_path
if deep:
sub_dataframes = self.get_backward_dataframes(
child_dataframe_name,
deep=True,
)
for sub_dataframe_name, path in sub_dataframes:
yield sub_dataframe_name, direct_path + path
def get_forward_relationships(self, dataframe_name):
"""Get relationships where dataframe "dataframe_name" is the child
Args:
dataframe_name (str): Name of dataframe to get relationships for.
Returns:
list[:class:`.Relationship`]: List of forward relationships.
"""
return [
r for r in self.relationships if r._child_dataframe_name == dataframe_name
]
def get_backward_relationships(self, dataframe_name):
"""
get relationships where dataframe "dataframe_name" is the parent.
Args:
dataframe_name (str): Name of dataframe to get relationships for.
Returns:
list[:class:`.Relationship`]: list of backward relationships
"""
return [
r for r in self.relationships if r._parent_dataframe_name == dataframe_name
]
def has_unique_forward_path(self, start_dataframe_name, end_dataframe_name):
"""
Is the forward path from start to end unique?
This will raise if there is no such path.
"""
paths = self.find_forward_paths(start_dataframe_name, end_dataframe_name)
next(paths)
second_path = next(paths, None)
return not second_path
###########################################################################
# DataFrame creation methods ##############################################
###########################################################################
def add_dataframe(
self,
dataframe,
dataframe_name=None,
index=None,
logical_types=None,
semantic_tags=None,
make_index=False,
time_index=None,
secondary_time_index=None,
already_sorted=False,
):
"""
Add a DataFrame to the EntitySet with Woodwork typing information.
Args:
dataframe (pandas.DataFrame) : Dataframe containing the data.
dataframe_name (str, optional) : Unique name to associate with this dataframe. Must be
provided if Woodwork is not initialized on the input DataFrame.
index (str, optional): Name of the column used to index the dataframe.
Must be unique. If None, take the first column.
logical_types (dict[str -> Woodwork.LogicalTypes/str, optional]):
Keys are column names and values are logical types. Will be inferred if not specified.
semantic_tags (dict[str -> str/set], optional):
Keys are column names and values are semantic tags.
make_index (bool, optional) : If True, assume index does not
exist as a column in dataframe, and create a new column of that name
using integers. Otherwise, assume index exists.
time_index (str, optional): Name of the column containing
time data. Type must be numeric or datetime in nature.
secondary_time_index (dict[str -> list[str]]): Name of column containing time data to
be used as a secondary time index mapped to a list of the columns in the dataframe
associated with that secondary time index.
already_sorted (bool, optional) : If True, assumes that input dataframe
is already sorted by time. Defaults to False.
Notes:
Will infer logical types from the data.
Example:
.. ipython:: python
import featuretools as ft
import pandas as pd
transactions_df = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6],
"session_id": [1, 2, 1, 3, 4, 5],
"amount": [100.40, 20.63, 33.32, 13.12, 67.22, 1.00],
"transaction_time": pd.date_range(start="10:00", periods=6, freq="10s"),
"fraud": [True, False, True, False, True, True]})
es = ft.EntitySet("example")
es.add_dataframe(dataframe_name="transactions",
index="id",
time_index="transaction_time",
dataframe=transactions_df)
es["transactions"]
"""
logical_types = logical_types or {}
semantic_tags = semantic_tags or {}
if len(self.dataframes) > 0:
if not isinstance(dataframe, type(self.dataframes[0])):
raise ValueError(
"All dataframes must be of the same type. "
"Cannot add dataframe of type {} to an entityset with existing dataframes "
"of type {}".format(type(dataframe), type(self.dataframes[0])),
)
# Only allow string column names
non_string_names = [
name for name in dataframe.columns if not isinstance(name, str)
]
if non_string_names:
raise ValueError(
"All column names must be strings (Columns {} "
"are not strings)".format(non_string_names),
)
if dataframe.ww.schema is None:
if dataframe_name is None:
raise ValueError(
"Cannot add dataframe to EntitySet without a name. "
"Please provide a value for the dataframe_name parameter.",
)
# Warn when performing inference on Dask or Spark DataFrames
if not set(dataframe.columns).issubset(set(logical_types.keys())) and (
is_instance(dataframe, dd, "DataFrame")
or is_instance(dataframe, ps, "DataFrame")
):
warnings.warn(
"Performing type inference on Dask or Spark DataFrames may be computationally intensive. "
"Specify logical types for each column to speed up EntitySet initialization.",
)
index_was_created, index, dataframe = _get_or_create_index(
index,
make_index,
dataframe,
)
dataframe.ww.init(
name=dataframe_name,
index=index,
time_index=time_index,
logical_types=logical_types,
semantic_tags=semantic_tags,
already_sorted=already_sorted,
)
if index_was_created:
dataframe.ww.metadata["created_index"] = index
else:
if dataframe.ww.name is None:
raise ValueError(
"Cannot add a Woodwork DataFrame to EntitySet without a name",
)
if dataframe.ww.index is None:
raise ValueError(
"Cannot add Woodwork DataFrame to EntitySet without index",
)
extra_params = []
if index is not None:
extra_params.append("index")
if time_index is not None:
extra_params.append("time_index")
if logical_types:
extra_params.append("logical_types")
if make_index:
extra_params.append("make_index")
if semantic_tags:
extra_params.append("semantic_tags")
if already_sorted:
extra_params.append("already_sorted")
if dataframe_name is not None and dataframe_name != dataframe.ww.name:
extra_params.append("dataframe_name")
if extra_params:
warnings.warn(
"A Woodwork-initialized DataFrame was provided, so the following parameters were ignored: "
+ ", ".join(extra_params),
)
if dataframe.ww.time_index is not None:
self._check_uniform_time_index(dataframe)
self._check_secondary_time_index(dataframe)
if secondary_time_index:
self._set_secondary_time_index(
dataframe,
secondary_time_index=secondary_time_index,
)
dataframe = self._normalize_values(dataframe)
self.dataframe_dict[dataframe.ww.name] = dataframe
self.reset_data_description()
self._add_references_to_metadata(dataframe)
return self
def __setitem__(self, key, value):
self.add_dataframe(dataframe=value, dataframe_name=key)
def normalize_dataframe(
self,
base_dataframe_name,
new_dataframe_name,
index,
additional_columns=None,
copy_columns=None,
make_time_index=None,
make_secondary_time_index=None,
new_dataframe_time_index=None,
new_dataframe_secondary_time_index=None,
):
"""Create a new dataframe and relationship from unique values of an existing column.
Args:
base_dataframe_name (str) : Dataframe name from which to split.
new_dataframe_name (str): Name of the new dataframe.
index (str): Column in old dataframe
that will become index of new dataframe. Relationship
will be created across this column.
additional_columns (list[str]):
List of column names to remove from
base_dataframe and move to new dataframe.
copy_columns (list[str]): List of
column names to copy from old dataframe
and move to new dataframe.
make_time_index (bool or str, optional): Create time index for new dataframe based
on time index in base_dataframe, optionally specifying which column in base_dataframe
to use for time_index. If specified as True without a specific column name,
uses the primary time index. Defaults to True if base dataframe has a time index.
make_secondary_time_index (dict[str -> list[str]], optional): Create a secondary time index
from key. Values of dictionary are the columns to associate with a secondary time index.
Only one secondary time index is allowed. If None, only associate the time index.
new_dataframe_time_index (str, optional): Rename new dataframe time index.
new_dataframe_secondary_time_index (str, optional): Rename new dataframe secondary time index.
"""
base_dataframe = self.dataframe_dict[base_dataframe_name]
additional_columns = additional_columns or []
copy_columns = copy_columns or []
for list_name, col_list in {
"copy_columns": copy_columns,
"additional_columns": additional_columns,
}.items():
if not isinstance(col_list, list):
raise TypeError(
"'{}' must be a list, but received type {}".format(
list_name,
type(col_list),
),
)
if len(col_list) != len(set(col_list)):
raise ValueError(
f"'{list_name}' contains duplicate columns. All columns must be unique.",
)
for col_name in col_list:
if col_name == index:
raise ValueError(
"Not adding {} as both index and column in {}".format(
col_name,
list_name,
),
)
for col in additional_columns:
if col == base_dataframe.ww.time_index:
raise ValueError(
"Not moving {} as it is the base time index column. Perhaps, move the column to the copy_columns.".format(
col,
),
)
if isinstance(make_time_index, str):
if make_time_index not in base_dataframe.columns:
raise ValueError(
"'make_time_index' must be a column in the base dataframe",
)
elif make_time_index not in additional_columns + copy_columns:
raise ValueError(
"'make_time_index' must be specified in 'additional_columns' or 'copy_columns'",
)
if index == base_dataframe.ww.index:
raise ValueError(
"'index' must be different from the index column of the base dataframe",
)
transfer_types = {}
# Types will be a tuple of (logical_type, semantic_tags, column_metadata, column_description)
transfer_types[index] = (
base_dataframe.ww.logical_types[index],
base_dataframe.ww.semantic_tags[index],
base_dataframe.ww.columns[index].metadata,
base_dataframe.ww.columns[index].description,
)
for col_name in additional_columns + copy_columns:
# Remove any existing time index tags
transfer_types[col_name] = (
base_dataframe.ww.logical_types[col_name],
(base_dataframe.ww.semantic_tags[col_name] - {"time_index"}),
base_dataframe.ww.columns[col_name].metadata,
base_dataframe.ww.columns[col_name].description,
)
# create and add new dataframe
new_dataframe = self[base_dataframe_name].copy()
if make_time_index is None and base_dataframe.ww.time_index is not None:
make_time_index = True
if isinstance(make_time_index, str):
# Set the new time index to make_time_index.
base_time_index = make_time_index
new_dataframe_time_index = make_time_index
already_sorted = new_dataframe_time_index == base_dataframe.ww.time_index
elif make_time_index:
# Create a new time index based on the base dataframe time index.
base_time_index = base_dataframe.ww.time_index
if new_dataframe_time_index is None:
new_dataframe_time_index = "first_%s_time" % (base_dataframe.ww.name)
already_sorted = True
assert (
base_dataframe.ww.time_index is not None
), "Base dataframe doesn't have time_index defined"
if base_time_index not in [col for col in copy_columns]:
copy_columns.append(base_time_index)
time_index_types = (
base_dataframe.ww.logical_types[base_dataframe.ww.time_index],
base_dataframe.ww.semantic_tags[base_dataframe.ww.time_index],
base_dataframe.ww.columns[base_dataframe.ww.time_index].metadata,
base_dataframe.ww.columns[base_dataframe.ww.time_index].description,
)
else:
# If base_time_index is in copy_columns then we've already added the transfer types
# but since we're changing the name, we have to remove it
time_index_types = transfer_types[base_dataframe.ww.time_index]
del transfer_types[base_dataframe.ww.time_index]
transfer_types[new_dataframe_time_index] = time_index_types
else:
new_dataframe_time_index = None
already_sorted = False
if new_dataframe_time_index is not None and new_dataframe_time_index == index:
raise ValueError(
"time_index and index cannot be the same value, %s"
% (new_dataframe_time_index),
)
selected_columns = (
[index]
+ [col for col in additional_columns]
+ [col for col in copy_columns]
)
new_dataframe = new_dataframe.dropna(subset=[index])
new_dataframe2 = new_dataframe.drop_duplicates(index, keep="first")[
selected_columns
]
if make_time_index:
new_dataframe2 = new_dataframe2.rename(
columns={base_time_index: new_dataframe_time_index},
)
if make_secondary_time_index:
assert (
len(make_secondary_time_index) == 1
), "Can only provide 1 secondary time index"
secondary_time_index = list(make_secondary_time_index.keys())[0]
secondary_columns = [index, secondary_time_index] + list(
make_secondary_time_index.values(),
)[0]
secondary_df = new_dataframe.drop_duplicates(index, keep="last")[
secondary_columns
]
if new_dataframe_secondary_time_index:
secondary_df = secondary_df.rename(
columns={secondary_time_index: new_dataframe_secondary_time_index},
)
secondary_time_index = new_dataframe_secondary_time_index
else:
new_dataframe_secondary_time_index = secondary_time_index
secondary_df = secondary_df.set_index(index)
new_dataframe = new_dataframe2.join(secondary_df, on=index)
else:
new_dataframe = new_dataframe2
base_dataframe_index = index
if make_secondary_time_index:
old_ti_name = list(make_secondary_time_index.keys())[0]
ti_cols = list(make_secondary_time_index.values())[0]
ti_cols = [c if c != old_ti_name else secondary_time_index for c in ti_cols]
make_secondary_time_index = {secondary_time_index: ti_cols}
if is_instance(new_dataframe, ps, "DataFrame"):
already_sorted = False
# will initialize Woodwork on this DataFrame
logical_types = {}
semantic_tags = {}
column_metadata = {}
column_descriptions = {}
for col_name, (ltype, tags, metadata, description) in transfer_types.items():
logical_types[col_name] = ltype
semantic_tags[col_name] = tags - {"time_index"}
column_metadata[col_name] = copy.deepcopy(metadata)
column_descriptions[col_name] = description
new_dataframe.ww.init(
name=new_dataframe_name,
index=index,
already_sorted=already_sorted,
time_index=new_dataframe_time_index,
logical_types=logical_types,
semantic_tags=semantic_tags,
column_metadata=column_metadata,
column_descriptions=column_descriptions,
)
self.add_dataframe(
new_dataframe,
secondary_time_index=make_secondary_time_index,
)
self.dataframe_dict[base_dataframe_name] = self.dataframe_dict[
base_dataframe_name
].ww.drop(additional_columns)
self.dataframe_dict[base_dataframe_name].ww.add_semantic_tags(
{base_dataframe_index: "foreign_key"},
)
self.add_relationship(
new_dataframe_name,
index,
base_dataframe_name,
base_dataframe_index,
)
self.reset_data_description()
return self
# ###########################################################################
# # Data wrangling methods ###############################################
# ###########################################################################
def concat(self, other, inplace=False):
"""Combine entityset with another to create a new entityset with the
combined data of both entitysets.
"""
if not self.__eq__(other):
raise ValueError(
"Entitysets must have the same dataframes, relationships"
", and column names",
)
if inplace:
combined_es = self
else:
combined_es = copy.deepcopy(self)
lib = pd
if self.dataframe_type == Library.SPARK:
lib = ps
elif self.dataframe_type == Library.DASK:
lib = dd
has_last_time_index = []
for df in self.dataframes:
self_df = df
other_df = other[df.ww.name]
combined_df = lib.concat([self_df, other_df])
# If both DataFrames have made indexes, there will likely
# be overlap in the index column, so we use the other values
if self_df.ww.metadata.get("created_index") or other_df.ww.metadata.get(
"created_index",
):
columns = [
col
for col in combined_df.columns
if col != df.ww.index or col != df.ww.time_index
]
else:
columns = [df.ww.index]
combined_df.drop_duplicates(columns, inplace=True)
self_lti_col = df.ww.metadata.get("last_time_index")
other_lti_col = other[df.ww.name].ww.metadata.get("last_time_index")
if self_lti_col is not None or other_lti_col is not None:
has_last_time_index.append(df.ww.name)
combined_es.replace_dataframe(
dataframe_name=df.ww.name,
df=combined_df,
recalculate_last_time_indexes=False,
already_sorted=False,
)
if has_last_time_index:
combined_es.add_last_time_indexes(updated_dataframes=has_last_time_index)
combined_es.reset_data_description()
return combined_es
###########################################################################
# Indexing methods ###############################################
###########################################################################
def add_last_time_indexes(self, updated_dataframes=None):
"""
Calculates the last time index values for each dataframe (the last time
an instance or children of that instance were observed). Used when
calculating features using training windows. Adds the last time index as
a series named _ft_last_time on the dataframe.
Args:
updated_dataframes (list[str]): List of dataframe names to update last_time_index for
(will update all parents of those dataframes as well)
"""
# Generate graph of dataframes to find leaf dataframes
children = defaultdict(list) # parent --> child mapping
child_cols = defaultdict(dict)
for r in self.relationships:
children[r._parent_dataframe_name].append(r.child_dataframe)
child_cols[r._parent_dataframe_name][
r._child_dataframe_name
] = r.child_column
updated_dataframes = updated_dataframes or []
if updated_dataframes:
# find parents of updated_dataframes
parent_queue = updated_dataframes[:]
parents = set()
while len(parent_queue):
df_name = parent_queue.pop(0)
if df_name in parents:
continue
parents.add(df_name)
for parent_name, _ in self.get_forward_dataframes(df_name):
parent_queue.append(parent_name)
queue = [self[p] for p in parents]
to_explore = parents
else:
to_explore = set(self.dataframe_dict.keys())
queue = self.dataframes[:]
explored = set()
# Store the last time indexes for the entire entityset in a dictionary to update
es_lti_dict = {}
for df in self.dataframes:
lti_col = df.ww.metadata.get("last_time_index")
if lti_col is not None:
lti_col = df[lti_col]
es_lti_dict[df.ww.name] = lti_col
for df in queue:
es_lti_dict[df.ww.name] = None
# We will explore children of dataframes on the queue,
# which may not be in the to_explore set. Therefore,
# we check whether all elements of to_explore are in
# explored, rather than just comparing length
while not to_explore.issubset(explored):
dataframe = queue.pop(0)
if es_lti_dict[dataframe.ww.name] is None:
if dataframe.ww.time_index is not None:
lti = dataframe[dataframe.ww.time_index].copy()
if is_instance(dataframe, dd, "DataFrame"):
# The current Dask implementation doesn't set the index of the dataframe
# to the dataframe's index, so we have to do it manually here
lti.index = dataframe[dataframe.ww.index].copy()
else:
lti = dataframe.ww[dataframe.ww.index].copy()
if is_instance(dataframe, dd, "DataFrame"):
lti.index = dataframe[dataframe.ww.index].copy()
lti = lti.apply(lambda x: None)
elif is_instance(dataframe, ps, "DataFrame"):
lti = ps.Series(pd.Series(index=lti.to_list(), name=lti.name))
else:
# Cannot have a category dtype with nans when calculating last time index
lti = lti.astype("object")
lti[:] = None
es_lti_dict[dataframe.ww.name] = lti
if dataframe.ww.name in children:
child_dataframes = children[dataframe.ww.name]
# if all children not explored, skip for now
if not set([df.ww.name for df in child_dataframes]).issubset(explored):
# Now there is a possibility that a child dataframe
# was not explicitly provided in updated_dataframes,
# and never made it onto the queue. If updated_dataframes
# is None then we just load all dataframes onto the queue
# so we didn't need this logic
for df in child_dataframes:
if df.ww.name not in explored and df.ww.name not in [
q.ww.name for q in queue
]:
# must also reset last time index here
es_lti_dict[df.ww.name] = None
queue.append(df)
queue.append(dataframe)
continue
# updated last time from all children
for child_df in child_dataframes:
# TODO: Figure out if Dask code related to indexes is important for Spark
if es_lti_dict[child_df.ww.name] is None:
continue
link_col = child_cols[dataframe.ww.name][child_df.ww.name].name
lti_is_dask = is_instance(
es_lti_dict[child_df.ww.name],
dd,
"Series",
)
lti_is_spark = is_instance(
es_lti_dict[child_df.ww.name],
ps,
"Series",
)
if lti_is_dask or lti_is_spark:
to_join = child_df[link_col]
if lti_is_dask:
to_join.index = child_df[child_df.ww.index]
lti_df = (
es_lti_dict[child_df.ww.name]
.to_frame(name="last_time")
.join(to_join.to_frame(name=dataframe.ww.index))
)
if lti_is_dask:
new_index = lti_df.index.copy()
new_index.name = None
lti_df.index = new_index
lti_df = lti_df.groupby(lti_df[dataframe.ww.index]).agg("max")
lti_df = (
es_lti_dict[dataframe.ww.name]
.to_frame(name="last_time_old")
.join(lti_df)
)
else:
lti_df = pd.DataFrame(
{
"last_time": es_lti_dict[child_df.ww.name],
dataframe.ww.index: child_df[link_col],
},
)
# sort by time and keep only the most recent
lti_df.sort_values(
["last_time", dataframe.ww.index],
kind="mergesort",
inplace=True,
)
lti_df.drop_duplicates(
dataframe.ww.index,
keep="last",
inplace=True,
)
lti_df.set_index(dataframe.ww.index, inplace=True)
lti_df = lti_df.reindex(es_lti_dict[dataframe.ww.name].index)
lti_df["last_time_old"] = es_lti_dict[dataframe.ww.name]
if not (lti_is_dask or lti_is_spark) and lti_df.empty:
# Pandas errors out if it tries to do fillna and then max on an empty dataframe
lti_df = pd.Series([], dtype="object")
else:
if lti_is_spark:
lti_df["last_time"] = ps.to_datetime(lti_df["last_time"])
lti_df["last_time_old"] = ps.to_datetime(
lti_df["last_time_old"],
)
# TODO: Figure out a workaround for fillna and replace
lti_df = lti_df.max(axis=1)
else:
lti_df["last_time"] = lti_df["last_time"].astype(
"datetime64[ns]",
)
lti_df["last_time_old"] = lti_df["last_time_old"].astype(
"datetime64[ns]",
)
lti_df = lti_df.fillna(
pd.to_datetime("1800-01-01 00:00"),
).max(axis=1)
lti_df = lti_df.replace(
pd.to_datetime("1800-01-01 00:00"),
pd.NaT,
)
es_lti_dict[dataframe.ww.name] = lti_df
es_lti_dict[dataframe.ww.name].name = "last_time"
explored.add(dataframe.ww.name)
# Store the last time index on the DataFrames
dfs_to_update = {}
for df in self.dataframes:
lti = es_lti_dict[df.ww.name]
if lti is not None:
lti_ltype = None
if self.time_type == "numeric":
if lti.dtype == "datetime64[ns]":
# Woodwork cannot convert from datetime to numeric
lti = lti.apply(lambda x: x.value)
lti = init_series(lti, logical_type="Double")
lti_ltype = "Double"
else:
lti = init_series(lti, logical_type="Datetime")
lti_ltype = "Datetime"
lti.name = LTI_COLUMN_NAME
if LTI_COLUMN_NAME in df.columns:
if "last_time_index" in df.ww.semantic_tags[LTI_COLUMN_NAME]:
# Remove any previous last time index placed by featuretools
df.ww.pop(LTI_COLUMN_NAME)
else:
raise ValueError(
"Cannot add a last time index on DataFrame with an existing "
f"'{LTI_COLUMN_NAME}' column. Please rename '{LTI_COLUMN_NAME}'.",
)
# Add the new column to the DataFrame
if is_instance(df, dd, "DataFrame"):
new_df = df.merge(lti.reset_index(), on=df.ww.index)
new_df.ww.init_with_partial_schema(
schema=df.ww.schema,
logical_types={LTI_COLUMN_NAME: lti_ltype},
)
new_idx = new_df[new_df.ww.index]
new_idx.name = None
new_df.index = new_idx
dfs_to_update[df.ww.name] = new_df
elif is_instance(df, ps, "DataFrame"):
new_df = df.merge(lti, left_on=df.ww.index, right_index=True)
new_df.ww.init_with_partial_schema(
schema=df.ww.schema,
logical_types={LTI_COLUMN_NAME: lti_ltype},
)
dfs_to_update[df.ww.name] = new_df
else:
df.ww[LTI_COLUMN_NAME] = lti
if "last_time_index" not in df.ww.semantic_tags[LTI_COLUMN_NAME]:
df.ww.add_semantic_tags({LTI_COLUMN_NAME: "last_time_index"})
df.ww.metadata["last_time_index"] = LTI_COLUMN_NAME
for df in dfs_to_update.values():
df.ww.add_semantic_tags({LTI_COLUMN_NAME: "last_time_index"})
df.ww.metadata["last_time_index"] = LTI_COLUMN_NAME
self.dataframe_dict[df.ww.name] = df
self.reset_data_description()
for df in self.dataframes:
self._add_references_to_metadata(df)
# ###########################################################################
# # Pickling ###############################################
# ###########################################################################
def __getstate__(self):
return {
**self.__dict__,
WW_SCHEMA_KEY: {
df_name: df.ww.schema for df_name, df in self.dataframe_dict.items()
},
}
def __setstate__(self, state):
ww_schemas = state.pop(WW_SCHEMA_KEY)
for df_name, df in state.get("dataframe_dict", {}).items():
if ww_schemas[df_name] is not None:
df.ww.init(schema=ww_schemas[df_name], validate=False)
self.__dict__.update(state)
# ###########################################################################
# # Other ###############################################
# ###########################################################################
def add_interesting_values(
self,
max_values=5,
verbose=False,
dataframe_name=None,
values=None,
):
"""Find or set interesting values for categorical columns, to be used to generate "where" clauses
Args:
max_values (int) : Maximum number of values per column to add.
verbose (bool) : If True, print summary of interesting values found.
dataframe_name (str) : The dataframe in the EntitySet for which to add interesting values.
If not specified interesting values will be added for all dataframes.
values (dict): A dictionary mapping column names to the interesting values to set
for the column. If specified, a corresponding dataframe_name must also be provided.
If not specified, interesting values will be set for all eligible columns. If values
are specified, max_values and verbose parameters will be ignored.
Notes:
Finding interesting values is not supported with Dask or Spark EntitySets.
To set interesting values for Dask or Spark EntitySets, values must be
specified with the ``values`` parameter.
Returns:
None
"""
if dataframe_name is None and values is not None:
raise ValueError("dataframe_name must be specified if values are provided")
if dataframe_name is not None and values is not None:
for column, vals in values.items():
self[dataframe_name].ww.columns[column].metadata[
"interesting_values"
] = vals
return
if dataframe_name:
dataframes = [self[dataframe_name]]
else:
dataframes = self.dataframes
def add_value(df, col, val, verbose):
if verbose:
msg = "Column {}: Marking {} as an interesting value"
logger.info(msg.format(col, val))
interesting_vals = df.ww.columns[col].metadata.get("interesting_values", [])
interesting_vals.append(val)
df.ww.columns[col].metadata["interesting_values"] = interesting_vals
for df in dataframes:
value_counts = df.ww.value_counts(top_n=max(25, max_values), dropna=True)
total_count = len(df)
for col, counts in value_counts.items():
if {"index", "foreign_key"}.intersection(df.ww.semantic_tags[col]):
continue
for i in range(min(max_values, len(counts))):
# Categorical columns will include counts of 0 for all values
# in categories. Stop when we encounter a 0 count.
if counts[i]["count"] == 0:
break
if len(counts) < 25:
value = counts[i]["value"]
add_value(df, col, value, verbose)
else:
fraction = counts[i]["count"] / total_count
if fraction > 0.05 and fraction < 0.95:
value = counts[i]["value"]
add_value(df, col, value, verbose)
else:
break
self.reset_data_description()
def plot(self, to_file=None):
"""
Create a UML diagram-ish graph of the EntitySet.
Args:
to_file (str, optional) : Path to where the plot should be saved.
If set to None (as by default), the plot will not be saved.
Returns:
graphviz.Digraph : Graph object that can directly be displayed in
Jupyter notebooks. Nodes of the graph correspond to the DataFrames
in the EntitySet, showing the typing information for each column.
Note:
The typing information displayed for each column is based off of the Woodwork
ColumnSchema for that column and is represented as ``LogicalType; semantic_tags``,
but the standard semantic tags have been removed for brevity.
"""
graphviz = check_graphviz()
format_ = get_graphviz_format(graphviz=graphviz, to_file=to_file)
# Initialize a new directed graph
graph = graphviz.Digraph(
self.id,
format=format_,
graph_attr={"splines": "ortho"},
)
# Draw dataframes
for df in self.dataframes:
column_typing_info = []
for col_name, col_schema in df.ww.columns.items():
col_string = col_name + " : " + str(col_schema.logical_type)
tags = col_schema.semantic_tags - col_schema.logical_type.standard_tags
if tags:
col_string += "; "
col_string += ", ".join(tags)
column_typing_info.append(col_string)
columns_string = "\l".join(column_typing_info) # noqa: W605
if is_instance(df, dd, "DataFrame"): # dataframe is a dask dataframe
label = "{%s |%s\l}" % (df.ww.name, columns_string) # noqa: W605
else:
nrows = df.shape[0]
label = "{%s (%d row%s)|%s\l}" % ( # noqa: W605
df.ww.name,
nrows,
"s" * (nrows > 1),
columns_string,
)
graph.node(df.ww.name, shape="record", label=label)
# Draw relationships
for rel in self.relationships:
# Display the key only once if is the same for both related dataframes
if rel._parent_column_name == rel._child_column_name:
label = rel._parent_column_name
else:
label = "%s -> %s" % (rel._parent_column_name, rel._child_column_name)
graph.edge(
rel._child_dataframe_name,
rel._parent_dataframe_name,
xlabel=label,
)
if to_file:
save_graph(graph, to_file, format_)
return graph
def _handle_time(
self,
dataframe_name,
df,
time_last=None,
training_window=None,
include_cutoff_time=True,
):
"""
Filter a dataframe for all instances before time_last.
If the dataframe does not have a time index, return the original
dataframe.
"""
schema = self[dataframe_name].ww.schema
if is_instance(df, ps, "DataFrame") and isinstance(time_last, np.datetime64):
time_last = pd.to_datetime(time_last)
if schema.time_index:
df_empty = df.empty if isinstance(df, pd.DataFrame) else False
if time_last is not None and not df_empty:
if include_cutoff_time:
df = df[df[schema.time_index] <= time_last]
else:
df = df[df[schema.time_index] < time_last]
if training_window is not None:
training_window = _check_timedelta(training_window)
if include_cutoff_time:
mask = df[schema.time_index] > time_last - training_window
else:
mask = df[schema.time_index] >= time_last - training_window
lti_col = schema.metadata.get("last_time_index")
if lti_col is not None:
if include_cutoff_time:
lti_mask = df[lti_col] > time_last - training_window
else:
lti_mask = df[lti_col] >= time_last - training_window
mask = mask | lti_mask
else:
warnings.warn(
"Using training_window but last_time_index is "
"not set for dataframe %s" % (dataframe_name),
)
df = df[mask]
secondary_time_indexes = schema.metadata.get("secondary_time_index") or {}
for secondary_time_index, columns in secondary_time_indexes.items():
# should we use ignore time last here?
df_empty = df.empty if isinstance(df, pd.DataFrame) else False
if time_last is not None and not df_empty:
mask = df[secondary_time_index] >= time_last
if is_instance(df, dd, "DataFrame"):
for col in columns:
df[col] = df[col].mask(mask, np.nan)
elif is_instance(df, ps, "DataFrame"):
df.loc[mask, columns] = None
else:
df.loc[mask, columns] = np.nan
return df
def query_by_values(
self,
dataframe_name,
instance_vals,
column_name=None,
columns=None,
time_last=None,
training_window=None,
include_cutoff_time=True,
):
"""Query instances that have column with given value
Args:
dataframe_name (str): The id of the dataframe to query
instance_vals (pd.Dataframe, pd.Series, list[str] or str) :
Instance(s) to match.
column_name (str) : Column to query on. If None, query on index.
columns (list[str]) : Columns to return. Return all columns if None.
time_last (pd.TimeStamp) : Query data up to and including this
time. Only applies if dataframe has a time index.
training_window (Timedelta, optional):
Window defining how much time before the cutoff time data
can be used when calculating features. If None, all data before cutoff time is used.
include_cutoff_time (bool):
If True, data at cutoff time are included in calculating features
Returns:
pd.DataFrame : instances that match constraints with ids in order of underlying dataframe
"""
dataframe = self[dataframe_name]
if not column_name:
column_name = dataframe.ww.index
instance_vals = _vals_to_series(instance_vals, column_name)
training_window = _check_timedelta(training_window)
if training_window is not None:
assert (
training_window.has_no_observations()
), "Training window cannot be in observations"
if instance_vals is None:
df = dataframe.copy()
elif isinstance(instance_vals, pd.Series) and instance_vals.empty:
df = dataframe.head(0)
else:
if is_instance(instance_vals, (dd, ps), "Series"):
df = dataframe.merge(
instance_vals.to_frame(),
how="inner",
on=column_name,
)
elif isinstance(instance_vals, pd.Series) and is_instance(
dataframe,
ps,
"DataFrame",
):
df = dataframe.merge(
ps.DataFrame({column_name: instance_vals}),
how="inner",
on=column_name,
)
else:
df = dataframe[dataframe[column_name].isin(instance_vals)]
if isinstance(dataframe, pd.DataFrame):
df = df.set_index(dataframe.ww.index, drop=False)
# ensure filtered df has same categories as original
# workaround for issue below
# github.com/pandas-dev/pandas/issues/22501#issuecomment-415982538
#
# Pandas claims that bug is fixed but it still shows up in some
# cases. More investigation needed.
#
# Note: Woodwork stores categorical columns with a `string` dtype for Spark
if dataframe.ww.columns[column_name].is_categorical and not is_instance(
df,
ps,
"DataFrame",
):
categories = pd.api.types.CategoricalDtype(
categories=dataframe[column_name].cat.categories,
)
df[column_name] = df[column_name].astype(categories)
df = self._handle_time(
dataframe_name=dataframe_name,
df=df,
time_last=time_last,
training_window=training_window,
include_cutoff_time=include_cutoff_time,
)
if columns is not None:
df = df[columns]
return df
def replace_dataframe(
self,
dataframe_name,
df,
already_sorted=False,
recalculate_last_time_indexes=True,
):
"""Replace the internal dataframe of an EntitySet table, keeping Woodwork typing information the same.
Optionally makes sure that data is sorted, that reference indexes to other dataframes are consistent,
and that last_time_indexes are updated to reflect the new data. If an index was created for the original
dataframe and is not present on the new dataframe, an index column of the same name will be added to the
new dataframe.
"""
if not isinstance(df, type(self[dataframe_name])):
raise TypeError("Incorrect DataFrame type used")
# If the original DataFrame has a last time index column and the new one doesnt
# remove the column and the reference to last time index from that dataframe
last_time_index_column = self[dataframe_name].ww.metadata.get("last_time_index")
if (
last_time_index_column is not None
and last_time_index_column not in df.columns
):
self[dataframe_name].ww.pop(last_time_index_column)
del self[dataframe_name].ww.metadata["last_time_index"]
# If the original DataFrame had an index created via make_index,
# we may need to remake the index if it's not in the new DataFrame
created_index = self[dataframe_name].ww.metadata.get("created_index")
if created_index is not None and created_index not in df.columns:
df = _create_index(df, created_index)
old_column_names = list(self[dataframe_name].columns)
if len(df.columns) != len(old_column_names):
raise ValueError(
"New dataframe contains {} columns, expecting {}".format(
len(df.columns),
len(old_column_names),
),
)
for col_name in old_column_names:
if col_name not in df.columns:
raise ValueError(
"New dataframe is missing new {} column".format(col_name),
)
if df.ww.schema is not None:
warnings.warn(
"Woodwork typing information on new dataframe will be replaced "
f"with existing typing information from {dataframe_name}",
)
df.ww.init(
schema=self[dataframe_name].ww._schema,
already_sorted=already_sorted,
)
# Make sure column ordering matches original ordering
df = df.ww[old_column_names]
df = self._normalize_values(df)
self.dataframe_dict[dataframe_name] = df
if self[dataframe_name].ww.time_index is not None:
self._check_uniform_time_index(self[dataframe_name])
df_metadata = self[dataframe_name].ww.metadata
self.set_secondary_time_index(
dataframe_name,
df_metadata.get("secondary_time_index"),
)
if recalculate_last_time_indexes and last_time_index_column is not None:
self.add_last_time_indexes(updated_dataframes=[dataframe_name])
self.reset_data_description()
self._add_references_to_metadata(df)
def _check_time_indexes(self):
for dataframe in self.dataframe_dict.values():
self._check_uniform_time_index(dataframe)
self._check_secondary_time_index(dataframe)
def _check_secondary_time_index(self, dataframe, secondary_time_index=None):
secondary_time_index = secondary_time_index or dataframe.ww.metadata.get(
"secondary_time_index",
{},
)
if secondary_time_index and dataframe.ww.time_index is None:
raise ValueError(
"Cannot set secondary time index on a DataFrame that has no primary time index.",
)
for time_index, columns in secondary_time_index.items():
self._check_uniform_time_index(dataframe, column_name=time_index)
if time_index not in columns:
columns.append(time_index)
def _check_uniform_time_index(self, dataframe, column_name=None):
column_name = column_name or dataframe.ww.time_index
if column_name is None:
return
time_type = self._get_time_type(dataframe, column_name)
if self.time_type is None:
self.time_type = time_type
elif self.time_type != time_type:
info = "%s time index is %s type which differs from other entityset time indexes"
raise TypeError(info % (dataframe.ww.name, time_type))
def _get_time_type(self, dataframe, column_name=None):
column_name = column_name or dataframe.ww.time_index
column_schema = dataframe.ww.columns[column_name]
time_type = None
if column_schema.is_numeric:
time_type = "numeric"
elif column_schema.is_datetime:
time_type = Datetime
if time_type is None:
info = "%s time index not recognized as numeric or datetime"
raise TypeError(info % dataframe.ww.name)
return time_type
def _add_references_to_metadata(self, dataframe):
dataframe.ww.metadata.update(entityset_id=self.id)
for column in dataframe.columns:
metadata = dataframe.ww._schema.columns[column].metadata
metadata.update(dataframe_name=dataframe.ww.name)
metadata.update(entityset_id=self.id)
_ES_REF[self.id] = self
def _normalize_values(self, dataframe):
def replace(x, is_spark=False):
if not isinstance(x, (list, tuple, np.ndarray)) and pd.isna(x):
if is_spark:
return [np.nan, np.nan]
else:
return (np.nan, np.nan)
else:
return x
for column, logical_type in dataframe.ww.logical_types.items():
if isinstance(logical_type, LatLong):
series = dataframe[column]
if ps and isinstance(series, ps.Series):
if len(series):
dataframe[column] = dataframe[column].apply(
replace,
args=(True,),
)
elif is_instance(dataframe, dd, "DataFrame"):
dataframe[column] = dataframe[column].apply(
replace,
meta=(column, logical_type.primary_dtype),
)
else:
dataframe[column] = dataframe[column].apply(replace)
return dataframe
def _vals_to_series(instance_vals, column_id):
"""
instance_vals may be a pd.Dataframe, a pd.Series, a list, a single
value, or None. This function always returns a Series or None.
"""
if instance_vals is None:
return None
# If this is a single value, make it a list
if not hasattr(instance_vals, "__iter__"):
instance_vals = [instance_vals]
# convert iterable to pd.Series
if isinstance(instance_vals, pd.DataFrame):
out_vals = instance_vals[column_id]
elif is_instance(instance_vals, (pd, dd, ps), "Series"):
out_vals = instance_vals.rename(column_id)
else:
out_vals = pd.Series(instance_vals)
# no duplicates or NaN values
out_vals = out_vals.drop_duplicates().dropna()
# want index to have no name for the merge in query_by_values
out_vals.index.name = None
return out_vals
def _get_or_create_index(index, make_index, df):
"""Handles index creation logic base on user input"""
index_was_created = False
if index is None:
# Case 1: user wanted to make index but did not specify column name
assert not make_index, "Must specify an index name if make_index is True"
# Case 2: make_index not specified but no index supplied, use first column
warnings.warn(
(
"Using first column as index. "
"To change this, specify the index parameter"
),
)
index = df.columns[0]
elif make_index and index in df.columns:
# Case 3: user wanted to make index but column already exists
raise RuntimeError(
f"Cannot make index: column with name {index} already present",
)
elif index not in df.columns:
if not make_index:
# Case 4: user names index, it is not in df. does not specify
# make_index. Make new index column and warn
warnings.warn(
"index {} not found in dataframe, creating new "
"integer column".format(index),
)
# Case 5: make_index with no errors or warnings
# (Case 4 also uses this code path)
df = _create_index(df, index)
index_was_created = True
# Case 6: user specified index, which is already in df. No action needed.
return index_was_created, index, df
def _create_index(df, index):
if is_instance(df, dd, "DataFrame") or is_instance(df, ps, "DataFrame"):
df[index] = 1
df[index] = df[index].cumsum() - 1
else:
df.insert(0, index, range(len(df)))
return df
| 79,133 | 40.023328 | 137 | py |
featuretools | featuretools-main/featuretools/entityset/__init__.py | # flake8: noqa
from featuretools.entityset.api import *
| 56 | 18 | 40 | py |
featuretools | featuretools-main/featuretools/entityset/serialize.py | import datetime
import json
import os
import tarfile
import tempfile
from woodwork.serializers.serializer_base import typing_info_to_dict
from featuretools.utils.gen_utils import import_or_none
from featuretools.utils.s3_utils import get_transport_params, use_smartopen_es
from featuretools.utils.wrangle import _is_s3, _is_url
from featuretools.version import ENTITYSET_SCHEMA_VERSION
ps = import_or_none("pyspark.pandas")
FORMATS = ["csv", "pickle", "parquet"]
def entityset_to_description(entityset, format=None):
"""Serialize entityset to data description.
Args:
entityset (EntitySet) : Instance of :class:`.EntitySet`.
Returns:
description (dict) : Description of :class:`.EntitySet`.
"""
dataframes = {
dataframe.ww.name: typing_info_to_dict(dataframe)
for dataframe in entityset.dataframes
}
relationships = [
relationship.to_dictionary() for relationship in entityset.relationships
]
data_type = entityset.dataframe_type
data_description = {
"schema_version": ENTITYSET_SCHEMA_VERSION,
"id": entityset.id,
"dataframes": dataframes,
"relationships": relationships,
"format": format,
"data_type": data_type,
}
return data_description
def write_data_description(entityset, path, profile_name=None, **kwargs):
"""Serialize entityset to data description and write to disk or S3 path.
Args:
entityset (EntitySet) : Instance of :class:`.EntitySet`.
path (str) : Location on disk or S3 path to write `data_description.json` and dataframe data.
profile_name (str, bool): The AWS profile specified to write to S3. Will default to None and search for AWS credentials.
Set to False to use an anonymous profile.
kwargs (keywords) : Additional keyword arguments to pass as keywords arguments to the underlying serialization method or to specify AWS profile.
"""
if _is_s3(path):
with tempfile.TemporaryDirectory() as tmpdir:
os.makedirs(os.path.join(tmpdir, "data"))
dump_data_description(entityset, tmpdir, **kwargs)
file_path = create_archive(tmpdir)
transport_params = get_transport_params(profile_name)
use_smartopen_es(
file_path,
path,
read=False,
transport_params=transport_params,
)
elif _is_url(path):
raise ValueError("Writing to URLs is not supported")
else:
path = os.path.abspath(path)
os.makedirs(os.path.join(path, "data"), exist_ok=True)
dump_data_description(entityset, path, **kwargs)
def dump_data_description(entityset, path, **kwargs):
format = kwargs.get("format")
description = entityset_to_description(entityset, format)
for df in entityset.dataframes:
data_path = os.path.join(path, "data", df.ww.name)
os.makedirs(os.path.join(data_path, "data"), exist_ok=True)
df.ww.to_disk(data_path, **kwargs)
file = os.path.join(path, "data_description.json")
with open(file, "w") as file:
json.dump(description, file)
def create_archive(tmpdir):
file_name = "es-{date:%Y-%m-%d_%H%M%S}.tar".format(date=datetime.datetime.now())
file_path = os.path.join(tmpdir, file_name)
tar = tarfile.open(str(file_path), "w")
tar.add(str(tmpdir) + "/data_description.json", arcname="/data_description.json")
tar.add(str(tmpdir) + "/data", arcname="/data")
tar.close()
return file_path
| 3,558 | 34.237624 | 152 | py |
featuretools | featuretools-main/featuretools/demo/flight.py | import math
import re
import pandas as pd
from tqdm import tqdm
from woodwork.logical_types import Boolean, Categorical, Ordinal
import featuretools as ft
def load_flight(
month_filter=None,
categorical_filter=None,
nrows=None,
demo=True,
return_single_table=False,
verbose=False,
):
"""
Download, clean, and filter flight data from 2017.
The original dataset can be found `here <https://www.transtats.bts.gov/ot_delay/ot_delaycause1.asp>`_.
Args:
month_filter (list[int]): Only use data from these months (example is ``[1, 2]``).
To skip, set to None.
categorical_filter (dict[str->str]): Use only specified categorical values.
Example is ``{'dest_city': ['Boston, MA'], 'origin_city': ['Boston, MA']}``
which returns all flights in OR out of Boston. To skip, set to None.
nrows (int): Passed to nrows in ``pd.read_csv``. Used before filtering.
demo (bool): Use only two months of data. If False, use the whole year.
return_single_table (bool): Exit the function early and return a dataframe.
verbose (bool): Show a progress bar while loading the data.
Examples:
.. ipython::
:verbatim:
In [1]: import featuretools as ft
In [2]: es = ft.demo.load_flight(verbose=True,
...: month_filter=[1],
...: categorical_filter={'origin_city':['Boston, MA']})
100%|xxxxxxxxxxxxxxxxxxxxxxxxx| 100/100 [01:16<00:00, 1.31it/s]
In [3]: es
Out[3]:
Entityset: Flight Data
DataFrames:
airports [Rows: 55, Columns: 3]
flights [Rows: 613, Columns: 9]
trip_logs [Rows: 9456, Columns: 22]
airlines [Rows: 10, Columns: 1]
Relationships:
trip_logs.flight_id -> flights.flight_id
flights.carrier -> airlines.carrier
flights.dest -> airports.dest
"""
filename, csv_length = get_flight_filename(demo=demo)
print("Downloading data ...")
url = "https://oss.alteryx.com/datasets/{}?library=featuretools&version={}".format(
filename,
ft.__version__,
)
chunksize = math.ceil(csv_length / 99)
pd.options.display.max_columns = 200
iter_csv = pd.read_csv(
url,
compression="zip",
iterator=True,
nrows=nrows,
chunksize=chunksize,
)
if verbose:
iter_csv = tqdm(iter_csv, total=100)
partial_df_list = []
for chunk in iter_csv:
df = filter_data(
_clean_data(chunk),
month_filter=month_filter,
categorical_filter=categorical_filter,
)
partial_df_list.append(df)
data = pd.concat(partial_df_list)
if return_single_table:
return data
es = make_es(data)
return es
def make_es(data):
es = ft.EntitySet("Flight Data")
arr_time_columns = [
"arr_delay",
"dep_delay",
"carrier_delay",
"weather_delay",
"national_airspace_delay",
"security_delay",
"late_aircraft_delay",
"canceled",
"diverted",
"taxi_in",
"taxi_out",
"air_time",
"dep_time",
]
logical_types = {
"flight_num": Categorical,
"distance_group": Ordinal(order=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]),
"canceled": Boolean,
"diverted": Boolean,
}
es.add_dataframe(
data,
dataframe_name="trip_logs",
index="trip_log_id",
make_index=True,
time_index="date_scheduled",
secondary_time_index={"arr_time": arr_time_columns},
logical_types=logical_types,
)
es.normalize_dataframe(
"trip_logs",
"flights",
"flight_id",
additional_columns=[
"origin",
"origin_city",
"origin_state",
"dest",
"dest_city",
"dest_state",
"distance_group",
"carrier",
"flight_num",
],
)
es.normalize_dataframe("flights", "airlines", "carrier", make_time_index=False)
es.normalize_dataframe(
"flights",
"airports",
"dest",
additional_columns=["dest_city", "dest_state"],
make_time_index=False,
)
return es
def _clean_data(data):
# Make column names snake case
clean_data = data.rename(columns={col: convert(col) for col in data})
# Chance crs -> "scheduled" and other minor clarifications
clean_data = clean_data.rename(
columns={
"crs_arr_time": "scheduled_arr_time",
"crs_dep_time": "scheduled_dep_time",
"crs_elapsed_time": "scheduled_elapsed_time",
"nas_delay": "national_airspace_delay",
"origin_city_name": "origin_city",
"dest_city_name": "dest_city",
"cancelled": "canceled",
},
)
# Combine strings like 0130 (1:30 AM) with dates (2017-01-01)
clean_data["scheduled_dep_time"] = clean_data["scheduled_dep_time"].apply(
lambda x: str(x),
) + clean_data["flight_date"].astype("str")
# Parse combined string as a date
clean_data.loc[:, "scheduled_dep_time"] = pd.to_datetime(
clean_data["scheduled_dep_time"],
format="%H%M%Y-%m-%d",
errors="coerce",
)
clean_data["scheduled_elapsed_time"] = pd.to_timedelta(
clean_data["scheduled_elapsed_time"],
unit="m",
)
clean_data = _reconstruct_times(clean_data)
# Create a time index 6 months before scheduled_dep
clean_data.loc[:, "date_scheduled"] = pd.to_datetime(
clean_data["scheduled_dep_time"],
).dt.date - pd.Timedelta("120d")
# A null entry for a delay means no delay
clean_data = _fill_labels(clean_data)
# Nulls for scheduled values are too problematic. Remove them.
clean_data = clean_data.dropna(
axis="rows",
subset=["scheduled_dep_time", "scheduled_arr_time"],
)
# Make a flight id. Define a flight as a combination of:
# 1. carrier 2. flight number 3. origin airport 4. dest airport
clean_data.loc[:, "flight_id"] = (
clean_data["carrier"]
+ "-"
+ clean_data["flight_num"].apply(lambda x: str(x))
+ ":"
+ clean_data["origin"]
+ "->"
+ clean_data["dest"]
)
column_order = [
"flight_id",
"flight_num",
"date_scheduled",
"scheduled_dep_time",
"scheduled_arr_time",
"carrier",
"origin",
"origin_city",
"origin_state",
"dest",
"dest_city",
"dest_state",
"distance_group",
"dep_time",
"arr_time",
"dep_delay",
"taxi_out",
"taxi_in",
"arr_delay",
"diverted",
"scheduled_elapsed_time",
"air_time",
"distance",
"carrier_delay",
"weather_delay",
"national_airspace_delay",
"security_delay",
"late_aircraft_delay",
"canceled",
]
clean_data = clean_data[column_order]
return clean_data
def _fill_labels(clean_data):
labely_columns = [
"arr_delay",
"dep_delay",
"carrier_delay",
"weather_delay",
"national_airspace_delay",
"security_delay",
"late_aircraft_delay",
"canceled",
"diverted",
"taxi_in",
"taxi_out",
"air_time",
]
for col in labely_columns:
clean_data.loc[:, col] = clean_data[col].fillna(0)
return clean_data
def _reconstruct_times(clean_data):
"""Reconstruct departure_time, scheduled_dep_time,
arrival_time and scheduled_arr_time by adding known delays
to known times. We do:
- dep_time is scheduled_dep + dep_delay
- arr_time is dep_time + taxiing and air_time
- scheduled arrival is scheduled_dep + scheduled_elapsed
"""
clean_data.loc[:, "dep_time"] = clean_data["scheduled_dep_time"] + pd.to_timedelta(
clean_data["dep_delay"],
unit="m",
)
clean_data.loc[:, "arr_time"] = clean_data["dep_time"] + pd.to_timedelta(
clean_data["taxi_out"] + clean_data["air_time"] + clean_data["taxi_in"],
unit="m",
)
clean_data.loc[:, "scheduled_arr_time"] = (
clean_data["scheduled_dep_time"] + clean_data["scheduled_elapsed_time"]
)
return clean_data
def filter_data(clean_data, month_filter=None, categorical_filter=None):
if month_filter is not None:
tmp = pd.to_datetime(clean_data["scheduled_dep_time"]).dt.month.isin(
month_filter,
)
clean_data = clean_data[tmp]
if categorical_filter is not None:
tmp = False
for key, values in categorical_filter.items():
tmp = tmp | clean_data[key].isin(values)
clean_data = clean_data[tmp]
return clean_data
def convert(name):
# Rename columns to underscore
# Code via SO https://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-snake-case
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
def get_flight_filename(demo=True):
if demo:
filename = SMALL_FLIGHT_CSV
rows = 860457
else:
filename = BIG_FLIGHT_CSV
rows = 5162742
return filename, rows
SMALL_FLIGHT_CSV = "data_2017_jan_feb.csv.zip"
BIG_FLIGHT_CSV = "data_all_2017.csv.zip"
| 9,667 | 27.186589 | 120 | py |
featuretools | featuretools-main/featuretools/demo/weather.py | import pandas as pd
import featuretools as ft
def load_weather(nrows=None, return_single_table=False):
"""
Load the Australian daily-min-temperatures weather dataset.
Args:
nrows (int): Passed to nrows in ``pd.read_csv``.
return_single_table (bool): Exit the function early and return a dataframe.
"""
filename = "daily-min-temperatures.csv"
print("Downloading data ...")
url = "https://oss.alteryx.com/datasets/{}?library=featuretools&version={}".format(
filename,
ft.__version__,
)
data = pd.read_csv(url, index_col=None, nrows=nrows)
if return_single_table:
return data
es = make_es(data)
return es
def make_es(data):
es = ft.EntitySet("Weather Data")
es.add_dataframe(
data,
dataframe_name="temperatures",
index="id",
make_index=True,
time_index="Date",
)
return es
| 923 | 22.1 | 87 | py |
featuretools | featuretools-main/featuretools/demo/mock_customer.py | import pandas as pd
from numpy import random
from numpy.random import choice
from woodwork.logical_types import Categorical, PostalCode
import featuretools as ft
def load_mock_customer(
n_customers=5,
n_products=5,
n_sessions=35,
n_transactions=500,
random_seed=0,
return_single_table=False,
return_entityset=False,
):
"""Return dataframes of mock customer data"""
random.seed(random_seed)
last_date = pd.to_datetime("12/31/2013")
first_date = pd.to_datetime("1/1/2008")
first_bday = pd.to_datetime("1/1/1970")
join_dates = [
random.uniform(0, 1) * (last_date - first_date) + first_date
for _ in range(n_customers)
]
birth_dates = [
random.uniform(0, 1) * (first_date - first_bday) + first_bday
for _ in range(n_customers)
]
customers_df = pd.DataFrame({"customer_id": range(1, n_customers + 1)})
customers_df["zip_code"] = choice(
["60091", "13244"],
n_customers,
)
customers_df["join_date"] = pd.Series(join_dates).dt.round("1s")
customers_df["birthday"] = pd.Series(birth_dates).dt.round("1d")
products_df = pd.DataFrame({"product_id": pd.Categorical(range(1, n_products + 1))})
products_df["brand"] = choice(["A", "B", "C"], n_products)
sessions_df = pd.DataFrame({"session_id": range(1, n_sessions + 1)})
sessions_df["customer_id"] = choice(customers_df["customer_id"], n_sessions)
sessions_df["device"] = choice(["desktop", "mobile", "tablet"], n_sessions)
transactions_df = pd.DataFrame({"transaction_id": range(1, n_transactions + 1)})
transactions_df["session_id"] = choice(sessions_df["session_id"], n_transactions)
transactions_df = transactions_df.sort_values("session_id").reset_index(drop=True)
transactions_df["transaction_time"] = pd.date_range(
"1/1/2014",
periods=n_transactions,
freq="65s",
) # todo make these less regular
transactions_df["product_id"] = pd.Categorical(
choice(products_df["product_id"], n_transactions),
)
transactions_df["amount"] = random.randint(500, 15000, n_transactions) / 100
# calculate and merge in session start
# based on the times we came up with for transactions
session_starts = transactions_df.drop_duplicates("session_id")[
["session_id", "transaction_time"]
].rename(columns={"transaction_time": "session_start"})
sessions_df = sessions_df.merge(session_starts)
if return_single_table:
return (
transactions_df.merge(sessions_df)
.merge(customers_df)
.merge(products_df)
.reset_index(drop=True)
)
elif return_entityset:
es = ft.EntitySet(id="transactions")
es = es.add_dataframe(
dataframe_name="transactions",
dataframe=transactions_df,
index="transaction_id",
time_index="transaction_time",
logical_types={"product_id": Categorical},
)
es = es.add_dataframe(
dataframe_name="products",
dataframe=products_df,
index="product_id",
)
es = es.add_dataframe(
dataframe_name="sessions",
dataframe=sessions_df,
index="session_id",
time_index="session_start",
)
es = es.add_dataframe(
dataframe_name="customers",
dataframe=customers_df,
index="customer_id",
time_index="join_date",
logical_types={"zip_code": PostalCode},
)
rels = [
("products", "product_id", "transactions", "product_id"),
("sessions", "session_id", "transactions", "session_id"),
("customers", "customer_id", "sessions", "customer_id"),
]
es = es.add_relationships(rels)
es.add_last_time_indexes()
return es
return {
"customers": customers_df,
"sessions": sessions_df,
"transactions": transactions_df,
"products": products_df,
}
| 4,072 | 32.385246 | 88 | py |
featuretools | featuretools-main/featuretools/demo/api.py | # flake8: noqa
from featuretools.demo.flight import load_flight
from featuretools.demo.mock_customer import load_mock_customer
from featuretools.demo.retail import load_retail
from featuretools.demo.weather import load_weather
| 227 | 37 | 62 | py |
featuretools | featuretools-main/featuretools/demo/retail.py | import pandas as pd
from woodwork.logical_types import NaturalLanguage
import featuretools as ft
def load_retail(id="demo_retail_data", nrows=None, return_single_table=False):
"""Returns the retail entityset example.
The original dataset can be found `here <https://archive.ics.uci.edu/ml/datasets/online+retail>`_.
We have also made some modifications to the data. We
changed the column names, converted the ``customer_id``
to a unique fake ``customer_name``, dropped duplicates,
added columns for ``total`` and ``cancelled`` and
converted amounts from GBP to USD. You can download the modified CSV in gz `compressed (7 MB)
<https://oss.alteryx.com/datasets/online-retail-logs-2018-08-28.csv.gz>`_
or `uncompressed (43 MB)
<https://oss.alteryx.com/datasets/online-retail-logs-2018-08-28.csv>`_ formats.
Args:
id (str): Id to assign to EntitySet.
nrows (int): Number of rows to load of the underlying CSV.
If None, load all.
return_single_table (bool): If True, return a CSV rather than an EntitySet. Default is False.
Examples:
.. ipython::
:verbatim:
In [1]: import featuretools as ft
In [2]: es = ft.demo.load_retail()
In [3]: es
Out[3]:
Entityset: demo_retail_data
DataFrames:
orders (shape = [22190, 3])
products (shape = [3684, 3])
customers (shape = [4372, 2])
order_products (shape = [401704, 7])
Load in subset of data
.. ipython::
:verbatim:
In [4]: es = ft.demo.load_retail(nrows=1000)
In [5]: es
Out[5]:
Entityset: demo_retail_data
DataFrames:
orders (shape = [67, 5])
products (shape = [606, 3])
customers (shape = [50, 2])
order_products (shape = [1000, 7])
"""
es = ft.EntitySet(id)
csv_s3_gz = (
"https://oss.alteryx.com/datasets/online-retail-logs-2018-08-28.csv.gz?library=featuretools&version="
+ ft.__version__
)
csv_s3 = (
"https://oss.alteryx.com/datasets/online-retail-logs-2018-08-28.csv?library=featuretools&version="
+ ft.__version__
)
# Try to read in gz compressed file
try:
df = pd.read_csv(csv_s3_gz, nrows=nrows, parse_dates=["order_date"])
# Fall back to uncompressed
except Exception:
df = pd.read_csv(csv_s3, nrows=nrows, parse_dates=["order_date"])
if return_single_table:
return df
es.add_dataframe(
dataframe_name="order_products",
dataframe=df,
index="order_product_id",
make_index=True,
time_index="order_date",
logical_types={"description": NaturalLanguage},
)
es.normalize_dataframe(
new_dataframe_name="products",
base_dataframe_name="order_products",
index="product_id",
additional_columns=["description"],
)
es.normalize_dataframe(
new_dataframe_name="orders",
base_dataframe_name="order_products",
index="order_id",
additional_columns=["customer_name", "country", "cancelled"],
)
es.normalize_dataframe(
new_dataframe_name="customers",
base_dataframe_name="orders",
index="customer_name",
)
es.add_last_time_indexes()
return es
| 3,466 | 30.807339 | 109 | py |
featuretools | featuretools-main/featuretools/demo/__init__.py | # flake8: noqa
from featuretools.demo.api import *
| 51 | 16.333333 | 35 | py |
featuretools | featuretools-main/featuretools/feature_base/features_serializer.py | import json
from featuretools.primitives.utils import serialize_primitive
from featuretools.utils.s3_utils import get_transport_params, use_smartopen_features
from featuretools.utils.wrangle import _is_s3, _is_url
from featuretools.version import FEATURES_SCHEMA_VERSION
from featuretools.version import __version__ as ft_version
def save_features(features, location=None, profile_name=None):
"""Saves the features list as JSON to a specified filepath/S3 path, writes to an open file, or
returns the serialized features as a JSON string. If no file provided, returns a string.
Args:
features (list[:class:`.FeatureBase`]): List of Feature definitions.
location (str or :class:`.FileObject`, optional): The location of where to save
the features list which must include the name of the file,
or a writeable file handle to write to. If location is None, will return a JSON string
of the serialized features.
Default: None
profile_name (str, bool): The AWS profile specified to write to S3. Will default to None and search for AWS credentials.
Set to False to use an anonymous profile.
Note:
Features saved in one version of Featuretools are not guaranteed to work in another.
After upgrading Featuretools, features may need to be generated again.
Example:
.. ipython:: python
:suppress:
from featuretools.tests.testing_utils import (
make_ecommerce_entityset)
import featuretools as ft
es = make_ecommerce_entityset()
import os
.. code-block:: python
f1 = ft.Feature(es["log"].ww["product_id"])
f2 = ft.Feature(es["log"].ww["purchased"])
f3 = ft.Feature(es["log"].ww["value"])
features = [f1, f2, f3]
# Option 1
filepath = os.path.join('/Home/features/', 'list.json')
ft.save_features(features, filepath)
# Option 2
filepath = os.path.join('/Home/features/', 'list.json')
with open(filepath, 'w') as f:
ft.save_features(features, f)
# Option 3
features_string = ft.save_features(features)
.. seealso::
:func:`.load_features`
"""
return FeaturesSerializer(features).save(location, profile_name=profile_name)
class FeaturesSerializer(object):
def __init__(self, feature_list):
self.feature_list = feature_list
self._features_dict = None
def to_dict(self):
names_list = [feat.unique_name() for feat in self.feature_list]
es = self.feature_list[0].entityset
feature_defs, primitive_defs = self._feature_definitions()
return {
"schema_version": FEATURES_SCHEMA_VERSION,
"ft_version": ft_version,
"entityset": es.to_dictionary(),
"feature_list": names_list,
"feature_definitions": feature_defs,
"primitive_definitions": primitive_defs,
}
def save(self, location, profile_name):
features_dict = self.to_dict()
if location is None:
return json.dumps(features_dict)
if isinstance(location, str):
if _is_url(location):
raise ValueError("Writing to URLs is not supported")
if _is_s3(location):
transport_params = get_transport_params(profile_name)
use_smartopen_features(
location,
features_dict,
transport_params,
read=False,
)
else:
with open(location, "w") as f:
json.dump(features_dict, f)
else:
json.dump(features_dict, location)
def _feature_definitions(self):
if not self._features_dict:
self._features_dict = {}
self._primitives_dict = {}
for feature in self.feature_list:
self._serialize_feature(feature)
primitive_number = 0
primitive_id_to_key = {}
for name, feature in self._features_dict.items():
primitive = feature["arguments"].get("primitive")
if primitive:
primitive_id = id(primitive)
if primitive_id not in primitive_id_to_key.keys():
# Primitive we haven't seen before, add to dict and increment primitive_id counter
# Always use string for keys because json conversion results in integer dict keys
# being converted to strings, but integer dict values are not.
primitives_dict_key = str(primitive_number)
primitive_id_to_key[primitive_id] = primitives_dict_key
self._primitives_dict[
primitives_dict_key
] = serialize_primitive(primitive)
self._features_dict[name]["arguments"][
"primitive"
] = primitives_dict_key
primitive_number += 1
else:
# Primitive we have seen already - use existing primitive_id key
key = primitive_id_to_key[primitive_id]
self._features_dict[name]["arguments"]["primitive"] = key
return self._features_dict, self._primitives_dict
def _serialize_feature(self, feature):
name = feature.unique_name()
if name not in self._features_dict:
self._features_dict[feature.unique_name()] = feature.to_dictionary()
for dependency in feature.get_dependencies(deep=True):
name = dependency.unique_name()
if name not in self._features_dict:
self._features_dict[name] = dependency.to_dictionary()
| 6,035 | 39.24 | 128 | py |
featuretools | featuretools-main/featuretools/feature_base/utils.py | def is_valid_input(candidate, template):
"""Checks if a candidate schema should be considered a match for a template schema"""
if template.logical_type is not None and not isinstance(
candidate.logical_type,
type(template.logical_type),
):
return False
if len(template.semantic_tags - candidate.semantic_tags):
return False
return True
| 388 | 34.363636 | 89 | py |
featuretools | featuretools-main/featuretools/feature_base/features_deserializer.py | import json
from featuretools.entityset.deserialize import (
description_to_entityset as deserialize_es,
)
from featuretools.feature_base.feature_base import (
AggregationFeature,
DirectFeature,
Feature,
FeatureBase,
FeatureOutputSlice,
GroupByTransformFeature,
IdentityFeature,
TransformFeature,
)
from featuretools.primitives.utils import PrimitivesDeserializer
from featuretools.utils.s3_utils import get_transport_params, use_smartopen_features
from featuretools.utils.schema_utils import check_schema_version
from featuretools.utils.wrangle import _is_s3, _is_url
def load_features(features, profile_name=None):
"""Loads the features from a filepath, S3 path, URL, an open file, or a JSON formatted string.
Args:
features (str or :class:`.FileObject`): The file location of saved features.
This must either be the name of the file, a JSON formatted string, or a readable file handle.
profile_name (str, bool): The AWS profile specified to write to S3. Will default to None and search for AWS credentials.
Set to False to use an anonymous profile.
Returns:
features (list[:class:`.FeatureBase`]): Feature definitions list.
Note:
Features saved in one version of Featuretools or Python are not guaranteed to work in another.
After upgrading Featuretools or Python, features may need to be generated again.
Example:
.. ipython:: python
:suppress:
import featuretools as ft
import os
.. code-block:: python
# Option 1
filepath = os.path.join('/Home/features/', 'list.json')
features = ft.load_features(filepath)
# Option 2
filepath = os.path.join('/Home/features/', 'list.json')
with open(filepath, 'r') as f:
features = ft.load_features(f)
# Option 3
filepath = os.path.join('/Home/features/', 'list.json')
with open(filepath, 'r') as :
feature_str = f.read()
features = ft.load_features(feature_str)
.. seealso::
:func:`.save_features`
"""
return FeaturesDeserializer.load(features, profile_name).to_list()
class FeaturesDeserializer(object):
FEATURE_CLASSES = {
"AggregationFeature": AggregationFeature,
"DirectFeature": DirectFeature,
"Feature": Feature,
"FeatureBase": FeatureBase,
"GroupByTransformFeature": GroupByTransformFeature,
"IdentityFeature": IdentityFeature,
"TransformFeature": TransformFeature,
"FeatureOutputSlice": FeatureOutputSlice,
}
def __init__(self, features_dict):
self.features_dict = features_dict
self._check_schema_version()
self.entityset = deserialize_es(features_dict["entityset"])
self._deserialized_features = {} # name -> feature
primitive_deserializer = PrimitivesDeserializer()
primitive_definitions = features_dict["primitive_definitions"]
self._deserialized_primitives = {
k: primitive_deserializer.deserialize_primitive(v)
for k, v in primitive_definitions.items()
}
@classmethod
def load(cls, features, profile_name):
if isinstance(features, str):
try:
features_dict = json.loads(features)
except ValueError:
if _is_url(features) or _is_s3(features):
transport_params = None
if _is_s3(features):
transport_params = get_transport_params(profile_name)
features_dict = use_smartopen_features(
features,
transport_params=transport_params,
)
else:
with open(features, "r") as f:
features_dict = json.load(f)
return cls(features_dict)
return cls(json.load(features))
def to_list(self):
feature_names = self.features_dict["feature_list"]
return [self._deserialize_feature(name) for name in feature_names]
def _deserialize_feature(self, feature_name):
if feature_name in self._deserialized_features:
return self._deserialized_features[feature_name]
feature_dict = self.features_dict["feature_definitions"][feature_name]
dependencies_list = feature_dict["dependencies"]
primitive = None
primitive_id = feature_dict["arguments"].get("primitive")
if primitive_id is not None:
primitive = self._deserialized_primitives[primitive_id]
# Collect dependencies into a dictionary of name -> feature.
dependencies = {
dependency: self._deserialize_feature(dependency)
for dependency in dependencies_list
}
type = feature_dict["type"]
cls = self.FEATURE_CLASSES.get(type)
if not cls:
raise RuntimeError('Unrecognized feature type "%s"' % type)
args = feature_dict["arguments"]
feature = cls.from_dictionary(args, self.entityset, dependencies, primitive)
self._deserialized_features[feature_name] = feature
return feature
def _check_schema_version(self):
check_schema_version(self, "features")
| 5,377 | 35.337838 | 128 | py |
featuretools | featuretools-main/featuretools/feature_base/api.py | # flake8: noqa
from featuretools.feature_base.feature_base import (
AggregationFeature,
DirectFeature,
Feature,
FeatureBase,
FeatureOutputSlice,
GroupByTransformFeature,
IdentityFeature,
TransformFeature,
)
from featuretools.feature_base.feature_descriptions import describe_feature
from featuretools.feature_base.feature_visualizer import graph_feature
from featuretools.feature_base.features_deserializer import load_features
from featuretools.feature_base.features_serializer import save_features
| 532 | 32.3125 | 75 | py |
featuretools | featuretools-main/featuretools/feature_base/feature_visualizer.py | import html
from featuretools.feature_base.feature_base import (
AggregationFeature,
DirectFeature,
FeatureOutputSlice,
IdentityFeature,
TransformFeature,
)
from featuretools.feature_base.feature_descriptions import describe_feature
from featuretools.utils.plot_utils import (
check_graphviz,
get_graphviz_format,
save_graph,
)
TARGET_COLOR = "#D9EAD3"
TABLE_TEMPLATE = """<
<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0" CELLPADDING="10">
<TR>
<TD colspan="1" bgcolor="#A9A9A9"><B>{dataframe_name}</B></TD>
</TR>{table_cols}
</TABLE>>"""
COL_TEMPLATE = """<TR><TD ALIGN="LEFT" port="{}">{}</TD></TR>"""
TARGET_TEMPLATE = """
<TR>
<TD ALIGN="LEFT" port="{}" BGCOLOR="{target_color}">{}</TD>
</TR>""".format(
"{}",
"{}",
target_color=TARGET_COLOR,
)
def graph_feature(feature, to_file=None, description=False, **kwargs):
"""Generates a feature lineage graph for the given feature
Args:
feature (FeatureBase) : Feature to generate lineage graph for
to_file (str, optional) : Path to where the plot should be saved.
If set to None (as by default), the plot will not be saved.
description (bool or str, optional): The feature description to use as a caption
for the graph. If False, no description is added. Set to True
to use an auto-generated description. Defaults to False.
kwargs (keywords): Additional keyword arguments to pass as keyword arguments
to the ft.describe_feature function.
Returns:
graphviz.Digraph : Graph object that can directly be displayed in Jupyter notebooks.
"""
graphviz = check_graphviz()
format_ = get_graphviz_format(graphviz=graphviz, to_file=to_file)
# Initialize a new directed graph
graph = graphviz.Digraph(
feature.get_name(),
format=format_,
graph_attr={"rankdir": "LR"},
)
dataframes = {}
edges = ([], [])
primitives = []
groupbys = []
_, max_depth = get_feature_data(
feature,
dataframes,
groupbys,
edges,
primitives,
layer=0,
)
dataframes[feature.dataframe_name]["targets"].add(feature.get_name())
for df_name in dataframes:
dataframe_name = (
"\u2605 {} (target)".format(df_name)
if df_name == feature.dataframe_name
else df_name
)
dataframe_table = get_dataframe_table(dataframe_name, dataframes[df_name])
graph.attr("node", shape="plaintext")
graph.node(df_name, dataframe_table)
graph.attr("node", shape="diamond")
num_primitives = len(primitives)
for prim_name, prim_label, layer, prim_type in primitives:
step_num = max_depth - layer
if num_primitives == 1:
type_str = (
'<FONT POINT-SIZE="12"><B>{}</B><BR></BR></FONT>'.format(prim_type)
if prim_type
else ""
)
prim_label = "<{}{}>".format(type_str, prim_label)
else:
step = "Step {}".format(step_num)
type_str = " " + prim_type if prim_type else ""
prim_label = (
'<<FONT POINT-SIZE="12"><B>{}:</B>{}<BR></BR></FONT>{}>'.format(
step,
type_str,
prim_label,
)
)
# sink first layer transform primitive if multiple primitives
if step_num == 1 and prim_type == "Transform" and num_primitives > 1:
with graph.subgraph() as init_transform:
init_transform.attr(rank="min")
init_transform.node(name=prim_name, label=prim_label)
else:
graph.node(name=prim_name, label=prim_label)
graph.attr("node", shape="box")
for groupby_name, groupby_label in groupbys:
graph.node(name=groupby_name, label=groupby_label)
graph.attr("edge", style="solid", dir="forward")
for edge in edges[1]:
graph.edge(*edge)
graph.attr("edge", style="dotted", arrowhead="none", dir="forward")
for edge in edges[0]:
graph.edge(*edge)
if description is True:
graph.attr(label=describe_feature(feature, **kwargs))
elif description is not False:
graph.attr(label=description)
if to_file:
save_graph(graph, to_file, format_)
return graph
def get_feature_data(feat, dataframes, groupbys, edges, primitives, layer=0):
# 1) add feature to dataframes tables:
feat_name = feat.get_name()
if feat.dataframe_name not in dataframes:
add_dataframe(feat.dataframe, dataframes)
dataframe_dict = dataframes[feat.dataframe_name]
# if we've already explored this feat, continue
feat_node = "{}:{}".format(feat.dataframe_name, feat_name)
if feat_name in dataframe_dict["columns"] or feat_name in dataframe_dict["feats"]:
return feat_node, layer
if isinstance(feat, IdentityFeature):
dataframe_dict["columns"].add(feat_name)
else:
dataframe_dict["feats"].add(feat_name)
base_node = feat_node
# 2) if multi-output, convert feature to generic base
if isinstance(feat, FeatureOutputSlice):
feat = feat.base_feature
feat_name = feat.get_name()
# 3) add primitive node
if feat.primitive.name or isinstance(feat, DirectFeature):
prim_name = feat.primitive.name if feat.primitive.name else "join"
prim_type = ""
if isinstance(feat, AggregationFeature):
prim_type = "Aggregation"
elif isinstance(feat, TransformFeature):
prim_type = "Transform"
primitive_node = "{}_{}_{}".format(layer, feat_name, prim_name)
primitives.append((primitive_node, prim_name.upper(), layer, prim_type))
edges[1].append([primitive_node, base_node])
base_node = primitive_node
# 4) add groupby/join edges and nodes
dependencies = [(dep.hash(), dep) for dep in feat.get_dependencies()]
for is_forward, r in feat.relationship_path:
if is_forward:
if r.child_dataframe.ww.name not in dataframes:
add_dataframe(r.child_dataframe, dataframes)
dataframes[r.child_dataframe.ww.name]["columns"].add(r._child_column_name)
child_node = "{}:{}".format(r.child_dataframe.ww.name, r._child_column_name)
edges[0].append([base_node, child_node])
else:
if r.child_dataframe.ww.name not in dataframes:
add_dataframe(r.child_dataframe, dataframes)
dataframes[r.child_dataframe.ww.name]["columns"].add(r._child_column_name)
child_node = "{}:{}".format(r.child_dataframe.ww.name, r._child_column_name)
child_name = child_node.replace(":", "--")
groupby_node = "{}_groupby_{}".format(feat_name, child_name)
groupby_name = "group by\n{}".format(r._child_column_name)
groupbys.append((groupby_node, groupby_name))
edges[0].append([child_node, groupby_node])
edges[1].append([groupby_node, base_node])
base_node = groupby_node
if hasattr(feat, "groupby"):
groupby = feat.groupby
_ = get_feature_data(
groupby,
dataframes,
groupbys,
edges,
primitives,
layer + 1,
)
dependencies.remove((groupby.hash(), groupby))
groupby_name = groupby.get_name()
if isinstance(groupby, IdentityFeature):
dataframes[groupby.dataframe_name]["columns"].add(groupby_name)
else:
dataframes[groupby.dataframe_name]["feats"].add(groupby_name)
child_node = "{}:{}".format(groupby.dataframe_name, groupby_name)
child_name = child_node.replace(":", "--")
groupby_node = "{}_groupby_{}".format(feat_name, child_name)
groupby_name = "group by\n{}".format(groupby_name)
groupbys.append((groupby_node, groupby_name))
edges[0].append([child_node, groupby_node])
edges[1].append([groupby_node, base_node])
base_node = groupby_node
# 5) recurse over dependents
max_depth = layer
for _, f in dependencies:
dependent_node, depth = get_feature_data(
f,
dataframes,
groupbys,
edges,
primitives,
layer + 1,
)
edges[1].append([dependent_node, base_node])
max_depth = max(depth, max_depth)
return feat_node, max_depth
def add_dataframe(dataframe, dataframe_dict):
dataframe_dict[dataframe.ww.name] = {
"index": dataframe.ww.index,
"targets": set(),
"columns": set(),
"feats": set(),
}
def get_dataframe_table(dataframe_name, dataframe_dict):
"""
given a dict of columns and feats, construct the html table for it
"""
index = dataframe_dict["index"]
targets = dataframe_dict["targets"]
columns = dataframe_dict["columns"].difference(targets)
feats = dataframe_dict["feats"].difference(targets)
# If the index is used, make sure it's the first element in the table
clean_index = html.escape(index)
if index in columns:
rows = [COL_TEMPLATE.format(clean_index, clean_index + " (index)")]
columns.discard(index)
elif index in targets:
rows = [TARGET_TEMPLATE.format(clean_index, clean_index + " (index)")]
targets.discard(index)
else:
rows = []
for col in list(columns) + list(feats) + list(targets):
template = COL_TEMPLATE
if col in targets:
template = TARGET_TEMPLATE
col = html.escape(col)
rows.append(template.format(col, col))
table = TABLE_TEMPLATE.format(
dataframe_name=dataframe_name,
table_cols="\n".join(rows),
)
return table
| 9,889 | 33.701754 | 92 | py |
featuretools | featuretools-main/featuretools/feature_base/cache.py | """
cache.py
Custom caching class, currently used for FeatureBase
"""
# needed for defaultdict annotation if < python 3.9
from __future__ import annotations
from collections import defaultdict
from dataclasses import dataclass, field
from enum import Enum
from typing import Any, List, Optional, Union
class CacheType(Enum):
"""Enumerates the supported cache types"""
DEPENDENCY = 1
DEPTH = 2
@dataclass()
class FeatureCache:
"""Provides caching for the defined types"""
enabled: bool = False
cache: defaultdict[dict] = field(default_factory=lambda: defaultdict(dict))
def get(
self,
cache_type: CacheType,
hashkey: int,
) -> Optional[Union[List[Any], Any]]:
"""Gets the cache entry, if enabled and defined
Args:
cache_type (CacheType): type of cache
hashkey (int): hash key
Returns:
Optional[Union[List[Any], Any]]: payload assigned to the hashkey
"""
if not self.enabled or cache_type not in self.cache:
return None
return self.cache[cache_type].get(hashkey, None)
def add(self, cache_type: CacheType, hashkey: int, payload: Any):
"""Adds an entry to the cache, if enabled
Args:
cache_type (CacheType): type of cache
hashkey (int): hash key
payload (Any): payload to assign
"""
if self.enabled:
self.cache[cache_type][hashkey] = payload
def clear_all(self):
"""Clears the cache collections"""
self.cache.clear()
feature_cache = FeatureCache()
| 1,617 | 24.28125 | 79 | py |
featuretools | featuretools-main/featuretools/feature_base/feature_base.py | from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Boolean, BooleanNullable
from featuretools import primitives
from featuretools.entityset.relationship import Relationship, RelationshipPath
from featuretools.entityset.timedelta import Timedelta
from featuretools.feature_base.utils import is_valid_input
from featuretools.primitives.base import (
AggregationPrimitive,
PrimitiveBase,
TransformPrimitive,
)
from featuretools.utils.gen_utils import Library, import_or_none, is_instance
from featuretools.utils.wrangle import _check_time_against_column, _check_timedelta
dd = import_or_none("dask.dataframe")
ps = import_or_none("pyspark.pandas")
_ES_REF = {}
class FeatureBase(object):
def __init__(
self,
dataframe,
base_features,
relationship_path,
primitive,
name=None,
names=None,
):
"""Base class for all features
Args:
entityset (EntitySet): entityset this feature is being calculated for
dataframe (DataFrame): dataframe for calculating this feature
base_features (list[FeatureBase]): list of base features for primitive
relationship_path (RelationshipPath): path from this dataframe to the
dataframe of the base features.
primitive (:class:`.PrimitiveBase`): primitive to calculate. if not initialized when passed, gets initialized with no arguments
"""
assert all(
isinstance(f, FeatureBase) for f in base_features
), "All base features must be features"
self.dataframe_name = dataframe.ww.name
self.entityset = _ES_REF[dataframe.ww.metadata["entityset_id"]]
self.base_features = base_features
# initialize if not already initialized
if not isinstance(primitive, PrimitiveBase):
primitive = primitive()
# default library is PANDAS
if is_instance(dataframe, dd, "DataFrame"):
primitive.series_library = Library.DASK
elif is_instance(dataframe, ps, "DataFrame"):
primitive.series_library = Library.SPARK
self.primitive = primitive
self.relationship_path = relationship_path
self._name = name
self._names = names
assert self._check_input_types(), (
"Provided inputs don't match input " "type requirements"
)
def __getitem__(self, key):
assert (
self.number_output_features > 1
), "can only access slice of multi-output feature"
assert (
self.number_output_features > key
), "index is higher than the number of outputs"
return FeatureOutputSlice(self, key)
@classmethod
def from_dictionary(cls, arguments, entityset, dependencies, primitive):
raise NotImplementedError("Must define from_dictionary on FeatureBase subclass")
def rename(self, name):
"""Rename Feature, returns copy. Will reset any custom feature column names
to their default value."""
feature_copy = self.copy()
feature_copy._name = name
feature_copy._names = None
return feature_copy
def copy(self):
raise NotImplementedError("Must define copy on FeatureBase subclass")
def get_name(self):
if not self._name:
self._name = self.generate_name()
return self._name
def get_feature_names(self):
if not self._names:
if self.number_output_features == 1:
self._names = [self.get_name()]
else:
self._names = self.generate_names()
if self.get_name() != self.generate_name():
self._names = [
self.get_name() + "[{}]".format(i)
for i in range(len(self._names))
]
return self._names
def set_feature_names(self, names):
"""Set new values for the feature column names, overriding the default values.
Number of names provided must match the number of output columns defined for
the feature, and all provided names should be unique. Only works for features
that have more than one output column. Use ``Feature.rename`` to change the column
name for single output features.
Args:
names (list[str]): List of names to use for the output feature columns. Provided
names must be unique.
"""
if self.number_output_features == 1:
raise ValueError(
"The set_feature_names can only be used on features that have more than one output column.",
)
num_new_names = len(names)
if self.number_output_features != num_new_names:
raise ValueError(
"Number of names provided must match the number of output features:"
f" {num_new_names} name(s) provided, {self.number_output_features} expected.",
)
if len(set(names)) != num_new_names:
raise ValueError("Provided output feature names must be unique.")
self._names = names
def get_function(self, **kwargs):
return self.primitive.get_function(**kwargs)
def get_dependencies(self, deep=False, ignored=None, copy=True):
"""Returns features that are used to calculate this feature
..note::
If you only want the features that make up the input to the feature
function use the base_features attribute instead.
"""
deps = []
for d in self.base_features[:]:
deps += [d]
if hasattr(self, "where") and self.where:
deps += [self.where]
if ignored is None:
ignored = set([])
deps = [d for d in deps if d.unique_name() not in ignored]
if deep:
for dep in deps[:]: # copy so we don't modify list we iterate over
deep_deps = dep.get_dependencies(deep, ignored)
deps += deep_deps
return deps
def get_depth(self, stop_at=None):
"""Returns depth of feature"""
max_depth = 0
stop_at_set = set()
if stop_at is not None:
stop_at_set = set([i.unique_name() for i in stop_at])
if self.unique_name() in stop_at_set:
return 0
for dep in self.get_dependencies(deep=True, ignored=stop_at_set):
max_depth = max(dep.get_depth(stop_at=stop_at), max_depth)
return max_depth + 1
def _check_input_types(self):
if len(self.base_features) == 0:
return True
input_types = self.primitive.input_types
if input_types is not None:
if type(input_types[0]) != list:
input_types = [input_types]
for t in input_types:
zipped = list(zip(t, self.base_features))
if all([is_valid_input(f.column_schema, t) for t, f in zipped]):
return True
else:
return True
return False
@property
def dataframe(self):
"""Dataframe this feature belongs too"""
return self.entityset[self.dataframe_name]
@property
def number_output_features(self):
return self.primitive.number_output_features
def __repr__(self):
return "<Feature: %s>" % (self.get_name())
def hash(self):
return hash(self.get_name() + self.dataframe_name)
def __hash__(self):
return self.hash()
@property
def column_schema(self):
feature = self
column_schema = self.primitive.return_type
while column_schema is None:
# get column_schema of first base feature
base_feature = feature.base_features[0]
column_schema = base_feature.column_schema
# only the original time index should exist
# so make this feature's return type just a Datetime
if "time_index" in column_schema.semantic_tags:
column_schema = ColumnSchema(
logical_type=column_schema.logical_type,
semantic_tags=column_schema.semantic_tags - {"time_index"},
)
elif "index" in column_schema.semantic_tags:
column_schema = ColumnSchema(
logical_type=column_schema.logical_type,
semantic_tags=column_schema.semantic_tags - {"index"},
)
# Need to add back in the numeric standard tag so the schema can get recognized
# as a valid return type
if column_schema.is_numeric:
column_schema.semantic_tags.add("numeric")
if column_schema.is_categorical:
column_schema.semantic_tags.add("category")
# direct features should keep the foreign key tag, but all other features should get converted
if (
not isinstance(feature, DirectFeature)
and "foreign_key" in column_schema.semantic_tags
):
column_schema = ColumnSchema(
logical_type=column_schema.logical_type,
semantic_tags=column_schema.semantic_tags - {"foreign_key"},
)
feature = base_feature
return column_schema
@property
def default_value(self):
return self.primitive.default_value
def get_arguments(self):
raise NotImplementedError("Must define get_arguments on FeatureBase subclass")
def to_dictionary(self):
return {
"type": type(self).__name__,
"dependencies": [dep.unique_name() for dep in self.get_dependencies()],
"arguments": self.get_arguments(),
}
def _handle_binary_comparison(self, other, Primitive, PrimitiveScalar):
if isinstance(other, FeatureBase):
return Feature([self, other], primitive=Primitive)
return Feature([self], primitive=PrimitiveScalar(other))
def __eq__(self, other):
"""Compares to other by equality"""
return self._handle_binary_comparison(
other,
primitives.Equal,
primitives.EqualScalar,
)
def __ne__(self, other):
"""Compares to other by non-equality"""
return self._handle_binary_comparison(
other,
primitives.NotEqual,
primitives.NotEqualScalar,
)
def __gt__(self, other):
"""Compares if greater than other"""
return self._handle_binary_comparison(
other,
primitives.GreaterThan,
primitives.GreaterThanScalar,
)
def __ge__(self, other):
"""Compares if greater than or equal to other"""
return self._handle_binary_comparison(
other,
primitives.GreaterThanEqualTo,
primitives.GreaterThanEqualToScalar,
)
def __lt__(self, other):
"""Compares if less than other"""
return self._handle_binary_comparison(
other,
primitives.LessThan,
primitives.LessThanScalar,
)
def __le__(self, other):
"""Compares if less than or equal to other"""
return self._handle_binary_comparison(
other,
primitives.LessThanEqualTo,
primitives.LessThanEqualToScalar,
)
def __add__(self, other):
"""Add other"""
return self._handle_binary_comparison(
other,
primitives.AddNumeric,
primitives.AddNumericScalar,
)
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
"""Subtract other"""
return self._handle_binary_comparison(
other,
primitives.SubtractNumeric,
primitives.SubtractNumericScalar,
)
def __rsub__(self, other):
return Feature([self], primitive=primitives.ScalarSubtractNumericFeature(other))
def __div__(self, other):
"""Divide by other"""
return self._handle_binary_comparison(
other,
primitives.DivideNumeric,
primitives.DivideNumericScalar,
)
def __truediv__(self, other):
return self.__div__(other)
def __rtruediv__(self, other):
return self.__rdiv__(other)
def __rdiv__(self, other):
return Feature([self], primitive=primitives.DivideByFeature(other))
def __mul__(self, other):
"""Multiply by other"""
if isinstance(other, FeatureBase):
if all(
[
isinstance(f.column_schema.logical_type, (Boolean, BooleanNullable))
for f in (self, other)
],
):
return Feature([self, other], primitive=primitives.MultiplyBoolean)
if (
"numeric" in self.column_schema.semantic_tags
and isinstance(
other.column_schema.logical_type,
(Boolean, BooleanNullable),
)
or "numeric" in other.column_schema.semantic_tags
and isinstance(
self.column_schema.logical_type,
(Boolean, BooleanNullable),
)
):
return Feature(
[self, other],
primitive=primitives.MultiplyNumericBoolean,
)
return self._handle_binary_comparison(
other,
primitives.MultiplyNumeric,
primitives.MultiplyNumericScalar,
)
def __rmul__(self, other):
return self.__mul__(other)
def __mod__(self, other):
"""Take modulus of other"""
return self._handle_binary_comparison(
other,
primitives.ModuloNumeric,
primitives.ModuloNumericScalar,
)
def __rmod__(self, other):
return Feature([self], primitive=primitives.ModuloByFeature(other))
def __and__(self, other):
return self.AND(other)
def __rand__(self, other):
return Feature([other, self], primitive=primitives.And)
def __or__(self, other):
return self.OR(other)
def __ror__(self, other):
return Feature([other, self], primitive=primitives.Or)
def __not__(self, other):
return self.NOT(other)
def __abs__(self):
return Feature([self], primitive=primitives.Absolute)
def __neg__(self):
return Feature([self], primitive=primitives.Negate)
def AND(self, other_feature):
"""Logical AND with other_feature"""
return Feature([self, other_feature], primitive=primitives.And)
def OR(self, other_feature):
"""Logical OR with other_feature"""
return Feature([self, other_feature], primitive=primitives.Or)
def NOT(self):
"""Creates inverse of feature"""
return Feature([self], primitive=primitives.Not)
def isin(self, list_of_output):
return Feature(
[self],
primitive=primitives.IsIn(list_of_outputs=list_of_output),
)
def is_null(self):
"""Compares feature to null by equality"""
return Feature([self], primitive=primitives.IsNull)
def __invert__(self):
return self.NOT()
def unique_name(self):
return "%s: %s" % (self.dataframe_name, self.get_name())
def relationship_path_name(self):
return self.relationship_path.name
class IdentityFeature(FeatureBase):
"""Feature for dataframe that is equivalent to underlying column"""
def __init__(self, column, name=None):
self.column_name = column.ww.name
self.return_type = column.ww.schema
metadata = column.ww.schema._metadata
es = _ES_REF[metadata["entityset_id"]]
super(IdentityFeature, self).__init__(
dataframe=es[metadata["dataframe_name"]],
base_features=[],
relationship_path=RelationshipPath([]),
primitive=PrimitiveBase,
name=name,
)
@classmethod
def from_dictionary(cls, arguments, entityset, dependencies, primitive):
dataframe_name = arguments["dataframe_name"]
column_name = arguments["column_name"]
column = entityset[dataframe_name].ww[column_name]
return cls(column=column, name=arguments["name"])
def copy(self):
"""Return copy of feature"""
return IdentityFeature(self.entityset[self.dataframe_name].ww[self.column_name])
def generate_name(self):
return self.column_name
def get_depth(self, stop_at=None):
return 0
def get_arguments(self):
return {
"name": self.get_name(),
"column_name": self.column_name,
"dataframe_name": self.dataframe_name,
}
@property
def column_schema(self):
return self.return_type
class DirectFeature(FeatureBase):
"""Feature for child dataframe that inherits
a feature value from a parent dataframe"""
input_types = [ColumnSchema()]
return_type = None
def __init__(
self,
base_feature,
child_dataframe_name,
relationship=None,
name=None,
):
base_feature = _validate_base_features(base_feature)[0]
self.parent_dataframe_name = base_feature.dataframe_name
relationship = self._handle_relationship(
base_feature.entityset,
child_dataframe_name,
relationship,
)
child_dataframe = base_feature.entityset[child_dataframe_name]
super(DirectFeature, self).__init__(
dataframe=child_dataframe,
base_features=[base_feature],
relationship_path=RelationshipPath([(True, relationship)]),
primitive=PrimitiveBase,
name=name,
)
def _handle_relationship(self, entityset, child_dataframe_name, relationship):
child_dataframe = entityset[child_dataframe_name]
if relationship:
relationship_child = relationship.child_dataframe
assert (
child_dataframe.ww.name == relationship_child.ww.name
), "child_dataframe must be the relationship child dataframe"
assert (
self.parent_dataframe_name == relationship.parent_dataframe.ww.name
), "Base feature must be defined on the relationship parent dataframe"
else:
child_relationships = entityset.get_forward_relationships(
child_dataframe.ww.name,
)
possible_relationships = (
r
for r in child_relationships
if r.parent_dataframe.ww.name == self.parent_dataframe_name
)
relationship = next(possible_relationships, None)
if not relationship:
raise RuntimeError(
'No relationship from "%s" to "%s" found.'
% (child_dataframe.ww.name, self.parent_dataframe_name),
)
# Check for another path.
elif next(possible_relationships, None):
message = (
"There are multiple relationships to the base dataframe. "
"You must specify a relationship."
)
raise RuntimeError(message)
return relationship
@classmethod
def from_dictionary(cls, arguments, entityset, dependencies, primitive):
base_feature = dependencies[arguments["base_feature"]]
relationship = Relationship.from_dictionary(
arguments["relationship"],
entityset,
)
child_dataframe_name = relationship.child_dataframe.ww.name
return cls(
base_feature=base_feature,
child_dataframe_name=child_dataframe_name,
relationship=relationship,
name=arguments["name"],
)
@property
def number_output_features(self):
return self.base_features[0].number_output_features
@property
def default_value(self):
return self.base_features[0].default_value
def copy(self):
"""Return copy of feature"""
_is_forward, relationship = self.relationship_path[0]
return DirectFeature(
self.base_features[0],
self.dataframe_name,
relationship=relationship,
)
@property
def column_schema(self):
return self.base_features[0].column_schema
def generate_name(self):
return self._name_from_base(self.base_features[0].get_name())
def generate_names(self):
return [
self._name_from_base(base_name)
for base_name in self.base_features[0].get_feature_names()
]
def get_arguments(self):
_is_forward, relationship = self.relationship_path[0]
return {
"name": self.get_name(),
"base_feature": self.base_features[0].unique_name(),
"relationship": relationship.to_dictionary(),
}
def _name_from_base(self, base_name):
return "%s.%s" % (self.relationship_path_name(), base_name)
class AggregationFeature(FeatureBase):
# Feature to condition this feature by in
# computation (e.g. take the Count of products where the product_id is
# "basketball".)
where = None
#: (str or :class:`.Timedelta`): Use only some amount of previous data from
# each time point during calculation
use_previous = None
def __init__(
self,
base_features,
parent_dataframe_name,
primitive,
relationship_path=None,
use_previous=None,
where=None,
name=None,
):
base_features = _validate_base_features(base_features)
for bf in base_features:
if bf.number_output_features > 1:
raise ValueError("Cannot stack on whole multi-output feature.")
self.child_dataframe_name = base_features[0].dataframe_name
entityset = base_features[0].entityset
relationship_path, self._path_is_unique = self._handle_relationship_path(
entityset,
parent_dataframe_name,
relationship_path,
)
self.parent_dataframe_name = parent_dataframe_name
if where is not None:
self.where = _validate_base_features(where)[0]
msg = "Where feature must be defined on child dataframe {}".format(
self.child_dataframe_name,
)
assert self.where.dataframe_name == self.child_dataframe_name, msg
if use_previous:
assert entityset[self.child_dataframe_name].ww.time_index is not None, (
"Applying function that requires time index to dataframe that "
"doesn't have one"
)
self.use_previous = _check_timedelta(use_previous)
assert len(base_features) > 0
time_index = base_features[0].dataframe.ww.time_index
time_col = base_features[0].dataframe.ww[time_index]
assert time_index is not None, (
"Use previous can only be defined " "on dataframes with a time index"
)
assert _check_time_against_column(self.use_previous, time_col)
super(AggregationFeature, self).__init__(
dataframe=entityset[parent_dataframe_name],
base_features=base_features,
relationship_path=relationship_path,
primitive=primitive,
name=name,
)
def _handle_relationship_path(
self,
entityset,
parent_dataframe_name,
relationship_path,
):
parent_dataframe = entityset[parent_dataframe_name]
child_dataframe = entityset[self.child_dataframe_name]
if relationship_path:
assert all(
not is_forward for is_forward, _r in relationship_path
), "All relationships in path must be backward"
_is_forward, first_relationship = relationship_path[0]
first_parent = first_relationship.parent_dataframe
assert (
parent_dataframe.ww.name == first_parent.ww.name
), "parent_dataframe must match first relationship in path."
_is_forward, last_relationship = relationship_path[-1]
assert (
child_dataframe.ww.name == last_relationship.child_dataframe.ww.name
), "Base feature must be defined on the dataframe at the end of relationship_path"
path_is_unique = entityset.has_unique_forward_path(
child_dataframe.ww.name,
parent_dataframe.ww.name,
)
else:
paths = entityset.find_backward_paths(
parent_dataframe.ww.name,
child_dataframe.ww.name,
)
first_path = next(paths, None)
if not first_path:
raise RuntimeError(
'No backward path from "%s" to "%s" found.'
% (parent_dataframe.ww.name, child_dataframe.ww.name),
)
# Check for another path.
elif next(paths, None):
message = (
"There are multiple possible paths to the base dataframe. "
"You must specify a relationship path."
)
raise RuntimeError(message)
relationship_path = RelationshipPath([(False, r) for r in first_path])
path_is_unique = True
return relationship_path, path_is_unique
@classmethod
def from_dictionary(cls, arguments, entityset, dependencies, primitive):
base_features = [dependencies[name] for name in arguments["base_features"]]
relationship_path = [
Relationship.from_dictionary(r, entityset)
for r in arguments["relationship_path"]
]
parent_dataframe_name = relationship_path[0].parent_dataframe.ww.name
relationship_path = RelationshipPath([(False, r) for r in relationship_path])
use_previous_data = arguments["use_previous"]
use_previous = use_previous_data and Timedelta.from_dictionary(
use_previous_data,
)
where_name = arguments["where"]
where = where_name and dependencies[where_name]
feat = cls(
base_features=base_features,
parent_dataframe_name=parent_dataframe_name,
primitive=primitive,
relationship_path=relationship_path,
use_previous=use_previous,
where=where,
name=arguments["name"],
)
feat._names = arguments.get("feature_names")
return feat
def copy(self):
return AggregationFeature(
self.base_features,
parent_dataframe_name=self.parent_dataframe_name,
relationship_path=self.relationship_path,
primitive=self.primitive,
use_previous=self.use_previous,
where=self.where,
)
def _where_str(self):
if self.where is not None:
where_str = " WHERE " + self.where.get_name()
else:
where_str = ""
return where_str
def _use_prev_str(self):
if self.use_previous is not None and hasattr(self.use_previous, "get_name"):
use_prev_str = ", Last {}".format(self.use_previous.get_name())
else:
use_prev_str = ""
return use_prev_str
def generate_name(self):
return self.primitive.generate_name(
base_feature_names=[bf.get_name() for bf in self.base_features],
relationship_path_name=self.relationship_path_name(),
parent_dataframe_name=self.parent_dataframe_name,
where_str=self._where_str(),
use_prev_str=self._use_prev_str(),
)
def generate_names(self):
return self.primitive.generate_names(
base_feature_names=[bf.get_name() for bf in self.base_features],
relationship_path_name=self.relationship_path_name(),
parent_dataframe_name=self.parent_dataframe_name,
where_str=self._where_str(),
use_prev_str=self._use_prev_str(),
)
def get_arguments(self):
arg_dict = {
"name": self.get_name(),
"base_features": [feat.unique_name() for feat in self.base_features],
"relationship_path": [r.to_dictionary() for _, r in self.relationship_path],
"primitive": self.primitive,
"where": self.where and self.where.unique_name(),
"use_previous": self.use_previous and self.use_previous.get_arguments(),
}
if self.number_output_features > 1:
arg_dict["feature_names"] = self.get_feature_names()
return arg_dict
def relationship_path_name(self):
if self._path_is_unique:
return self.child_dataframe_name
else:
return self.relationship_path.name
class TransformFeature(FeatureBase):
def __init__(self, base_features, primitive, name=None):
base_features = _validate_base_features(base_features)
for bf in base_features:
if bf.number_output_features > 1:
raise ValueError("Cannot stack on whole multi-output feature.")
dataframe = base_features[0].entityset[base_features[0].dataframe_name]
super(TransformFeature, self).__init__(
dataframe=dataframe,
base_features=base_features,
relationship_path=RelationshipPath([]),
primitive=primitive,
name=name,
)
@classmethod
def from_dictionary(cls, arguments, entityset, dependencies, primitive):
base_features = [dependencies[name] for name in arguments["base_features"]]
feat = cls(
base_features=base_features,
primitive=primitive,
name=arguments["name"],
)
feat._names = arguments.get("feature_names")
return feat
def copy(self):
return TransformFeature(self.base_features, self.primitive)
def generate_name(self):
return self.primitive.generate_name(
base_feature_names=[bf.get_name() for bf in self.base_features],
)
def generate_names(self):
return self.primitive.generate_names(
base_feature_names=[bf.get_name() for bf in self.base_features],
)
def get_arguments(self):
arg_dict = {
"name": self.get_name(),
"base_features": [feat.unique_name() for feat in self.base_features],
"primitive": self.primitive,
}
if self.number_output_features > 1:
arg_dict["feature_names"] = self.get_feature_names()
return arg_dict
class GroupByTransformFeature(TransformFeature):
def __init__(self, base_features, primitive, groupby, name=None):
if not isinstance(groupby, FeatureBase):
groupby = IdentityFeature(groupby)
assert (
len({"category", "foreign_key"} - groupby.column_schema.semantic_tags) < 2
)
self.groupby = groupby
base_features = _validate_base_features(base_features)
base_features.append(groupby)
super(GroupByTransformFeature, self).__init__(
base_features=base_features,
primitive=primitive,
name=name,
)
@classmethod
def from_dictionary(cls, arguments, entityset, dependencies, primitive):
base_features = [dependencies[name] for name in arguments["base_features"]]
groupby = dependencies[arguments["groupby"]]
feat = cls(
base_features=base_features,
primitive=primitive,
groupby=groupby,
name=arguments["name"],
)
feat._names = arguments.get("feature_names")
return feat
def copy(self):
# the groupby feature is appended to base_features in the __init__
# so here we separate them again
return GroupByTransformFeature(
self.base_features[:-1],
self.primitive,
self.groupby,
)
def generate_name(self):
# exclude the groupby feature from base_names since it has a special
# place in the feature name
base_names = [bf.get_name() for bf in self.base_features[:-1]]
_name = self.primitive.generate_name(base_names)
return "{} by {}".format(_name, self.groupby.get_name())
def generate_names(self):
base_names = [bf.get_name() for bf in self.base_features[:-1]]
_names = self.primitive.generate_names(base_names)
names = [name + " by {}".format(self.groupby.get_name()) for name in _names]
return names
def get_arguments(self):
# Do not include groupby in base_features.
feature_names = [
feat.unique_name()
for feat in self.base_features
if feat.unique_name() != self.groupby.unique_name()
]
arg_dict = {
"name": self.get_name(),
"base_features": feature_names,
"primitive": self.primitive,
"groupby": self.groupby.unique_name(),
}
if self.number_output_features > 1:
arg_dict["feature_names"] = self.get_feature_names()
return arg_dict
class Feature(object):
"""
Alias to create feature. Infers the feature type based on init parameters.
"""
def __new__(
self,
base,
dataframe_name=None,
groupby=None,
parent_dataframe_name=None,
primitive=None,
use_previous=None,
where=None,
):
# either direct or identity
if primitive is None and dataframe_name is None:
return IdentityFeature(base)
elif primitive is None and dataframe_name is not None:
return DirectFeature(base, dataframe_name)
elif primitive is not None and parent_dataframe_name is not None:
assert isinstance(primitive, AggregationPrimitive) or issubclass(
primitive,
AggregationPrimitive,
)
return AggregationFeature(
base,
parent_dataframe_name=parent_dataframe_name,
use_previous=use_previous,
where=where,
primitive=primitive,
)
elif primitive is not None:
assert isinstance(primitive, TransformPrimitive) or issubclass(
primitive,
TransformPrimitive,
)
if groupby is not None:
return GroupByTransformFeature(
base,
primitive=primitive,
groupby=groupby,
)
return TransformFeature(base, primitive=primitive)
raise Exception("Unrecognized feature initialization")
class FeatureOutputSlice(FeatureBase):
"""
Class to access specific multi output feature column
"""
def __init__(self, base_feature, n, name=None):
base_features = [base_feature]
self.num_output_parent = base_feature.number_output_features
msg = "cannot access slice from single output feature"
assert self.num_output_parent > 1, msg
msg = "cannot access column that is not between 0 and " + str(
self.num_output_parent - 1,
)
assert n < self.num_output_parent, msg
self.n = n
self._name = name
self._names = [name] if name else None
self.base_features = base_features
self.base_feature = base_features[0]
self.dataframe_name = base_feature.dataframe_name
self.entityset = base_feature.entityset
self.primitive = base_feature.primitive
self.relationship_path = base_feature.relationship_path
def __getitem__(self, key):
raise ValueError("Cannot get item from slice of multi output feature")
def generate_name(self):
return self.base_feature.get_feature_names()[self.n]
@property
def number_output_features(self):
return 1
def get_arguments(self):
return {
"name": self.get_name(),
"base_feature": self.base_feature.unique_name(),
"n": self.n,
}
@classmethod
def from_dictionary(cls, arguments, entityset, dependencies, primitive):
base_feature_name = arguments["base_feature"]
base_feature = dependencies[base_feature_name]
n = arguments["n"]
name = arguments["name"]
return cls(base_feature=base_feature, n=n, name=name)
def copy(self):
return FeatureOutputSlice(self.base_feature, self.n)
def _validate_base_features(feature):
if "Series" == type(feature).__name__:
return [IdentityFeature(feature)]
elif hasattr(feature, "__iter__"):
features = [_validate_base_features(f)[0] for f in feature]
msg = "all base features must share the same dataframe"
assert len(set([bf.dataframe_name for bf in features])) == 1, msg
return features
elif isinstance(feature, FeatureBase):
return [feature]
else:
raise Exception("Not a feature")
| 37,541 | 33.410632 | 139 | py |
featuretools | featuretools-main/featuretools/feature_base/__init__.py | # flake8: noqa
from featuretools.feature_base.api import *
| 59 | 19 | 43 | py |
featuretools | featuretools-main/featuretools/feature_base/feature_descriptions.py | import json
import featuretools as ft
def describe_feature(
feature,
feature_descriptions=None,
primitive_templates=None,
metadata_file=None,
):
"""Generates an English language description of a feature.
Args:
feature (FeatureBase) : Feature to describe
feature_descriptions (dict, optional) : dictionary mapping features or unique
feature names to custom descriptions
primitive_templates (dict, optional) : dictionary mapping primitives or
primitive names to description templates
metadata_file (str, optional) : path to json metadata file
Returns:
str : English description of the feature
"""
feature_descriptions = feature_descriptions or {}
primitive_templates = primitive_templates or {}
if metadata_file:
file_feature_descriptions, file_primitive_templates = parse_json_metadata(
metadata_file,
)
feature_descriptions = {**file_feature_descriptions, **feature_descriptions}
primitive_templates = {**file_primitive_templates, **primitive_templates}
description = generate_description(
feature,
feature_descriptions,
primitive_templates,
)
return description[:1].upper() + description[1:] + "."
def generate_description(feature, feature_descriptions, primitive_templates):
# Check if feature has custom description
if feature in feature_descriptions or feature.unique_name() in feature_descriptions:
description = feature_descriptions.get(feature) or feature_descriptions.get(
feature.unique_name(),
)
return description
# Check if identity feature:
if isinstance(feature, ft.IdentityFeature):
description = feature.column_schema.description
if description is None:
description = 'the "{}"'.format(feature.column_name)
return description
# Handle direct features
if isinstance(feature, ft.DirectFeature):
base_feature, direct_description = get_direct_description(feature)
direct_base = generate_description(
base_feature,
feature_descriptions,
primitive_templates,
)
return direct_base + direct_description
# Get input descriptions
input_descriptions = []
input_columns = feature.base_features
if isinstance(feature, ft.feature_base.FeatureOutputSlice):
input_columns = feature.base_feature.base_features
for input_col in input_columns:
col_description = generate_description(
input_col,
feature_descriptions,
primitive_templates,
)
input_descriptions.append(col_description)
# Remove groupby description from input columns
groupby_description = None
if isinstance(feature, ft.GroupByTransformFeature):
groupby_description = input_descriptions.pop()
# Generate primitive description
template_override = None
if (
feature.primitive in primitive_templates
or feature.primitive.name in primitive_templates
):
template_override = primitive_templates.get(
feature.primitive,
) or primitive_templates.get(feature.primitive.name)
slice_num = feature.n if hasattr(feature, "n") else None
primitive_description = feature.primitive.get_description(
input_descriptions,
slice_num=slice_num,
template_override=template_override,
)
if isinstance(feature, ft.feature_base.FeatureOutputSlice):
feature = feature.base_feature
# Generate groupby phrase if applicable
groupby = ""
if isinstance(feature, ft.AggregationFeature):
groupby_description = get_aggregation_groupby(feature, feature_descriptions)
if groupby_description is not None:
if groupby_description.startswith("the "):
groupby_description = groupby_description[4:]
groupby = "for each {}".format(groupby_description)
# Generate aggregation dataframe phrase with use_previous
dataframe_description = ""
if isinstance(feature, ft.AggregationFeature):
if feature.use_previous:
dataframe_description = "of the previous {} of ".format(
feature.use_previous.get_name().lower(),
)
else:
dataframe_description = "of all instances of "
dataframe_description += '"{}"'.format(
feature.relationship_path[-1][1].child_dataframe.ww.name,
)
# Generate where phrase
where = ""
if hasattr(feature, "where") and feature.where:
where_col = generate_description(
feature.where.base_features[0],
feature_descriptions,
primitive_templates,
)
where = "where {} is {}".format(where_col, feature.where.primitive.value)
# Join all parts of template
description_template = [
primitive_description,
dataframe_description,
where,
groupby,
]
description = " ".join([phrase for phrase in description_template if phrase != ""])
return description
def get_direct_description(feature):
direct_description = (
' the instance of "{}" associated with this '
'instance of "{}"'.format(
feature.relationship_path[-1][1].parent_dataframe.ww.name,
feature.dataframe_name,
)
)
base_features = feature.base_features
# shortens stacked direct features to make it easier to understand
while isinstance(base_features[0], ft.DirectFeature):
base_feat = base_features[0]
base_feat_description = ' the instance of "{}" associated ' "with".format(
base_feat.relationship_path[-1][1].parent_dataframe.ww.name,
)
direct_description = base_feat_description + direct_description
base_features = base_feat.base_features
direct_description = " for" + direct_description
return base_features[0], direct_description
def get_aggregation_groupby(feature, feature_descriptions=None):
if feature_descriptions is None:
feature_descriptions = {}
groupby_name = feature.dataframe.ww.index
groupby = ft.IdentityFeature(
feature.entityset[feature.dataframe_name].ww[groupby_name],
)
if groupby in feature_descriptions or groupby.unique_name() in feature_descriptions:
return feature_descriptions.get(groupby) or feature_descriptions.get(
groupby.unique_name(),
)
else:
return '"{}" in "{}"'.format(groupby_name, feature.dataframe_name)
def parse_json_metadata(file):
with open(file) as f:
json_metadata = json.load(f)
return (
json_metadata.get("feature_descriptions", {}),
json_metadata.get("primitive_templates", {}),
)
| 6,827 | 34.195876 | 88 | py |
featuretools | featuretools-main/featuretools/utils/plot_utils.py | from featuretools.utils.gen_utils import import_or_raise
def check_graphviz():
GRAPHVIZ_ERR_MSG = (
"Please install graphviz to plot."
+ " (See https://featuretools.alteryx.com/en/stable/install.html#installing-graphviz for"
+ " details)"
)
graphviz = import_or_raise("graphviz", GRAPHVIZ_ERR_MSG)
# Try rendering a dummy graph to see if a working backend is installed
try:
graphviz.Digraph().pipe(format="svg")
except graphviz.backend.ExecutableNotFound:
raise RuntimeError(
"To plot entity sets, a graphviz backend is required.\n"
+ "Install the backend using one of the following commands:\n"
+ " Mac OS: brew install graphviz\n"
+ " Linux (Ubuntu): $ sudo apt install graphviz\n"
+ " Windows (conda): conda install -c conda-forge python-graphviz\n"
+ " Windows (pip): pip install graphviz\n"
+ " Windows (EXE required if graphviz was installed via pip): https://graphviz.org/download/#windows"
+ " For more details visit: https://featuretools.alteryx.com/en/stable/install.html#installing-graphviz",
)
return graphviz
def get_graphviz_format(graphviz, to_file):
if to_file:
# Explicitly cast to str in case a Path object was passed in
to_file = str(to_file)
split_path = to_file.split(".")
if len(split_path) < 2:
raise ValueError(
"Please use a file extension like '.pdf'"
+ " so that the format can be inferred",
)
format_ = split_path[-1]
valid_formats = graphviz.FORMATS
if format_ not in valid_formats:
raise ValueError(
"Unknown format. Make sure your format is"
+ " amongst the following: %s" % valid_formats,
)
else:
format_ = None
return format_
def save_graph(graph, to_file, format_):
# Graphviz always appends the format to the file name, so we need to
# remove it manually to avoid file names like 'file_name.pdf.pdf'
offset = len(format_) + 1 # Add 1 for the dot
output_path = to_file[:-offset]
graph.render(output_path, cleanup=True)
| 2,248 | 37.775862 | 118 | py |
featuretools | featuretools-main/featuretools/utils/wrangle.py | import re
import tarfile
from datetime import datetime
import numpy as np
import pandas as pd
from woodwork.logical_types import Datetime, Ordinal
from featuretools.entityset.timedelta import Timedelta
def _check_timedelta(td):
"""
Convert strings to Timedelta objects
Allows for both shortform and longform units, as well as any form of capitalization
'2 Minutes'
'2 minutes'
'2 m'
'1 Minute'
'1 minute'
'1 m'
'1 units'
'1 Units'
'1 u'
Shortform is fine if space is dropped
'2m'
'1u"
If a pd.Timedelta object is passed, units will be converted to seconds due to the underlying representation
of pd.Timedelta.
If a pd.DateOffset object is passed, it will be converted to a Featuretools Timedelta if it has one
temporal parameter. Otherwise, it will remain a pd.DateOffset.
"""
if td is None:
return td
if isinstance(td, Timedelta):
return td
elif not isinstance(td, (int, float, str, pd.DateOffset, pd.Timedelta)):
raise ValueError("Unable to parse timedelta: {}".format(td))
if isinstance(td, pd.Timedelta):
unit = "s"
value = td.total_seconds()
times = {unit: value}
return Timedelta(times, delta_obj=td)
elif isinstance(td, pd.DateOffset):
# DateOffsets
if td.__class__.__name__ != "DateOffset":
if hasattr(td, "__dict__"):
# Special offsets (such as BDay) - prior to pandas 1.0.0
value = td.__dict__["n"]
else:
# Special offsets (such as BDay) - after pandas 1.0.0
value = td.n
unit = td.__class__.__name__
times = dict([(unit, value)])
else:
times = dict()
for td_unit, td_value in td.kwds.items():
times[td_unit] = td_value
return Timedelta(times, delta_obj=td)
else:
pattern = "([0-9]+) *([a-zA-Z]+)$"
match = re.match(pattern, td)
value, unit = match.groups()
try:
value = int(value)
except Exception:
try:
value = float(value)
except Exception:
raise ValueError(
"Unable to parse value {} from ".format(value)
+ "timedelta string: {}".format(td),
)
times = {unit: value}
return Timedelta(times)
def _check_time_against_column(time, time_column):
"""
Check to make sure that time is compatible with time_column,
where time could be a timestamp, or a Timedelta, number, or None,
and time_column is a Woodwork initialized column. Compatibility means that
arithmetic can be performed between time and elements of time_column
If time is None, then we don't care if arithmetic can be performed
(presumably it won't ever be performed)
"""
if time is None:
return True
elif isinstance(time, (int, float)):
return time_column.ww.schema.is_numeric
elif isinstance(time, (pd.Timestamp, datetime, pd.DateOffset)):
return time_column.ww.schema.is_datetime
elif isinstance(time, Timedelta):
if time_column.ww.schema.is_datetime:
return True
elif time.unit not in Timedelta._time_units:
if (
isinstance(time_column.ww.logical_type, Ordinal)
or "numeric" in time_column.ww.semantic_tags
or "time_index" in time_column.ww.semantic_tags
):
return True
return False
def _check_time_type(time):
"""
Checks if `time` is an instance of common int, float, or datetime types.
Returns "numeric" or Datetime based on results
"""
time_type = None
if isinstance(time, (datetime, np.datetime64)):
time_type = Datetime
elif (
isinstance(time, (int, float))
or np.issubdtype(time, np.integer)
or np.issubdtype(time, np.floating)
):
time_type = "numeric"
return time_type
def _is_s3(string):
"""
Checks if the given string is a s3 path.
Returns a boolean.
"""
return string.startswith("s3://")
def _is_url(string):
"""
Checks if the given string is an url path.
Returns a boolean.
"""
return string.startswith("http")
def _is_local_tar(string):
"""
Checks if the given string is a local tarfile path.
Returns a boolean.
"""
return string.endswith(".tar") and tarfile.is_tarfile(string)
| 4,538 | 30.089041 | 111 | py |
featuretools | featuretools-main/featuretools/utils/gen_utils.py | import importlib
import logging
import re
import sys
from enum import Enum
from tqdm import tqdm
logger = logging.getLogger("featuretools.utils")
def make_tqdm_iterator(**kwargs):
options = {"file": sys.stdout, "leave": True}
options.update(kwargs)
return tqdm(**options)
def get_relationship_column_id(path):
_, r = path[0]
child_link_name = r._child_column_name
for _, r in path[1:]:
parent_link_name = child_link_name
child_link_name = "%s.%s" % (r.parent_name, parent_link_name)
return child_link_name
def find_descendents(cls):
"""
A generator which yields all descendent classes of the given class
(including the given class)
Args:
cls (Class): the class to find descendents of
"""
yield cls
for sub in cls.__subclasses__():
for c in find_descendents(sub):
yield c
def import_or_raise(library, error_msg):
"""
Attempts to import the requested library. If the import fails, raises an
ImportErorr with the supplied
Args:
library (str): the name of the library
error_msg (str): error message to return if the import fails
"""
try:
return importlib.import_module(library)
except ImportError:
raise ImportError(error_msg)
def import_or_none(library):
"""
Attemps to import the requested library.
Args:
library (str): the name of the library
Returns: the library if it is installed, else None
"""
try:
return importlib.import_module(library)
except ImportError:
return None
def is_instance(obj, modules, classnames):
"""
Check if the given object is an instance of classname in module(s). Module
can be None (i.e. not installed)
Args:
obj (obj): object to test
modules (module or tuple[module]): module to check, can be also be None (will be ignored)
classnames (str or tuple[str]): classname from module to check. If multiple values are
provided, they should match with a single module in order.
If a single value is provided, will be used for all modules.
Returns:
bool: True if object is an instance of classname from corresponding module, otherwise False.
Also returns False if the module is None (i.e. module is not installed)
"""
if type(modules) is not tuple:
modules = (modules,)
if type(classnames) is not tuple:
classnames = (classnames,) * len(modules)
if len(modules) != len(classnames):
raise ValueError("Number of modules does not match number of classnames")
to_check = tuple(
getattr(mod, classname, mod)
for mod, classname in zip(modules, classnames)
if mod
)
return isinstance(obj, to_check)
def camel_and_title_to_snake(name):
name = re.sub(r"([^_\d]+)(\d+)", r"\1_\2", name)
name = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", name).lower()
class Library(str, Enum):
PANDAS = "pandas"
DASK = "Dask"
SPARK = "Spark"
| 3,159 | 27.990826 | 100 | py |
featuretools | featuretools-main/featuretools/utils/description_utils.py | def convert_to_nth(integer):
string_nth = str(integer)
end_int = integer % 10
if end_int == 1 and integer % 100 != 11:
return str(integer) + "st"
elif end_int == 2 and integer % 100 != 12:
return str(string_nth) + "nd"
elif end_int == 3 and integer % 100 != 13:
return str(string_nth) + "rd"
else:
return str(string_nth) + "th"
| 384 | 31.083333 | 46 | py |
featuretools | featuretools-main/featuretools/utils/schema_utils.py | import logging
import warnings
from packaging.version import parse
from featuretools.version import ENTITYSET_SCHEMA_VERSION, FEATURES_SCHEMA_VERSION
logger = logging.getLogger("featuretools.utils")
def check_schema_version(cls, cls_type):
"""
If the saved schema version is newer than the current featuretools
schema version, this function will output a warning saying so.
If the saved schema version is a major release or more behind
the current featuretools schema version, this function will log
a message saying so.
"""
if isinstance(cls_type, str):
current = None
saved = None
if cls_type == "entityset":
current = ENTITYSET_SCHEMA_VERSION
saved = cls.get("schema_version")
elif cls_type == "features":
current = FEATURES_SCHEMA_VERSION
saved = cls.features_dict["schema_version"]
if parse(current) < parse(saved):
warning_text_upgrade = (
"The schema version of the saved %s"
"(%s) is greater than the latest supported (%s). "
"You may need to upgrade featuretools. Attempting to load %s ..."
% (cls_type, saved, current, cls_type)
)
warnings.warn(warning_text_upgrade)
if parse(current).major > parse(saved).major:
warning_text_outdated = (
"The schema version of the saved %s"
"(%s) is no longer supported by this version "
"of featuretools. Attempting to load %s ..."
% (cls_type, saved, cls_type)
)
logger.warning(warning_text_outdated)
| 1,684 | 34.851064 | 82 | py |
featuretools | featuretools-main/featuretools/utils/s3_utils.py | import json
import shutil
from featuretools.utils.gen_utils import import_or_raise
def use_smartopen_es(file_path, path, transport_params=None, read=True):
open = import_or_raise("smart_open", SMART_OPEN_ERR_MSG).open
if read:
with open(path, "rb", transport_params=transport_params) as fin:
with open(file_path, "wb") as fout:
shutil.copyfileobj(fin, fout)
else:
with open(file_path, "rb") as fin:
with open(path, "wb", transport_params=transport_params) as fout:
shutil.copyfileobj(fin, fout)
def use_smartopen_features(path, features_dict=None, transport_params=None, read=True):
open = import_or_raise("smart_open", SMART_OPEN_ERR_MSG).open
if read:
with open(path, "r", encoding="utf-8", transport_params=transport_params) as f:
features_dict = json.load(f)
return features_dict
else:
with open(path, "w", transport_params=transport_params) as f:
json.dump(features_dict, f)
def get_transport_params(profile_name):
boto3 = import_or_raise("boto3", BOTO3_ERR_MSG)
UNSIGNED = import_or_raise("botocore", BOTOCORE_ERR_MSG).UNSIGNED
Config = import_or_raise("botocore.config", BOTOCORE_ERR_MSG).Config
if isinstance(profile_name, str):
session = boto3.Session(profile_name=profile_name)
transport_params = {"client": session.client("s3")}
elif profile_name is False or boto3.Session().get_credentials() is None:
session = boto3.Session()
client = session.client("s3", config=Config(signature_version=UNSIGNED))
transport_params = {"client": client}
else:
transport_params = None
return transport_params
BOTO3_ERR_MSG = (
"The boto3 library is required to read and write from URLs and S3.\n"
"Install via pip:\n"
" pip install boto3\n"
"Install via conda:\n"
" conda install -c conda-forge boto3"
)
BOTOCORE_ERR_MSG = (
"The botocore library is required to read and write from URLs and S3.\n"
"Install via pip:\n"
" pip install botocore\n"
"Install via conda:\n"
" conda install -c conda-forge botocore"
)
SMART_OPEN_ERR_MSG = (
"The smart_open library is required to read and write from URLs and S3.\n"
"Install via pip:\n"
" pip install 'smart-open>=5.0.0'\n"
"Install via conda:\n"
" conda install -c conda-forge 'smart_open>=5.0.0'"
)
| 2,451 | 35.058824 | 87 | py |
featuretools | featuretools-main/featuretools/utils/utils_info.py | import locale
import os
import platform
import struct
import sys
import pkg_resources
import featuretools
deps = [
"numpy",
"pandas",
"tqdm",
"cloudpickle",
"dask",
"distributed",
"psutil",
"pip",
"setuptools",
]
def show_info():
print("Featuretools version: %s" % featuretools.__version__)
print("Featuretools installation directory: %s" % get_featuretools_root())
print_sys_info()
print_deps(deps)
def print_sys_info():
print("\nSYSTEM INFO")
print("-----------")
sys_info = get_sys_info()
for k, stat in sys_info:
print("{k}: {stat}".format(k=k, stat=stat))
def print_deps(dependencies):
print("\nINSTALLED VERSIONS")
print("------------------")
installed_packages = get_installed_packages()
package_dep = []
for x in dependencies:
# prevents uninstalled deps from being printed
if x in installed_packages:
package_dep.append((x, installed_packages[x]))
for k, stat in package_dep:
print("{k}: {stat}".format(k=k, stat=stat))
# Modified from here
# https://github.com/pandas-dev/pandas/blob/d9a037ec4ad0aab0f5bf2ad18a30554c38299e57/pandas/util/_print_versions.py#L11
def get_sys_info():
"Returns system information as a dict"
blob = []
try:
(sysname, nodename, release, version, machine, processor) = platform.uname()
blob.extend(
[
("python", ".".join(map(str, sys.version_info))),
("python-bits", struct.calcsize("P") * 8),
("OS", "{sysname}".format(sysname=sysname)),
("OS-release", "{release}".format(release=release)),
("machine", "{machine}".format(machine=machine)),
("processor", "{processor}".format(processor=processor)),
("byteorder", "{byteorder}".format(byteorder=sys.byteorder)),
("LC_ALL", "{lc}".format(lc=os.environ.get("LC_ALL", "None"))),
("LANG", "{lang}".format(lang=os.environ.get("LANG", "None"))),
("LOCALE", ".".join(map(str, locale.getlocale()))),
],
)
except (KeyError, ValueError):
pass
return blob
def get_installed_packages():
installed_packages = {}
for d in pkg_resources.working_set:
installed_packages[d.project_name] = d.version
return installed_packages
def get_featuretools_root():
return os.path.dirname(featuretools.__file__)
| 2,483 | 26.296703 | 119 | py |
featuretools | featuretools-main/featuretools/utils/recommend_primitives.py | import logging
from typing import List
from featuretools.computational_backends import calculate_feature_matrix
from featuretools.entityset import EntitySet
from featuretools.primitives.utils import get_transform_primitives
from featuretools.synthesis import dfs, get_valid_primitives
ORDERED_PRIMITIVES = (
[ # non-numeric primitives that require specific ordering or a time index to be set
"cum_count",
"cumulative_time_since_last_false",
"cumulative_time_since_last_true",
"diff",
"diff_datetime",
"is_first_occurrence",
"is_last_occurrence",
"time_since_previous",
]
)
DEPRECATED_PRIMITIVES = [
"multiply_boolean", # functionality duplicated by 'and' primitive
"numeric_lag", # deprecated and replaced with `lag`
]
REQUIRED_INPUT_PRIMITIVES = [ # non-numeric primitives that require input
"count_string",
"distance_to_holiday",
"is_in_geobox",
"not_equal_scalar",
"equal_scalar",
"time_since",
"isin",
]
OTHER_PRIMITIVES_TO_EXCLUDE = [ # Excluding some primitives that can produce too many features or aren't useful in extracting information
"not",
"and",
"or",
"equal",
"not_equal",
]
DEFAULT_EXCLUDED_PRIMITIVES = (
REQUIRED_INPUT_PRIMITIVES
+ DEPRECATED_PRIMITIVES
+ ORDERED_PRIMITIVES
+ OTHER_PRIMITIVES_TO_EXCLUDE
)
# TODO: Make this list more dynamic
TIME_SERIES_PRIMITIVES = [
"expanding_count",
"expanding_max",
"expanding_mean",
"expanding_min",
"expanding_std",
"expanding_trend",
"lag",
"rolling_count",
"rolling_outlier_count",
"rolling_max",
"rolling_mean",
"rolling_min",
"rolling_std",
"rolling_trend",
]
# TODO: Support multi-table
def get_recommended_primitives(
entityset: EntitySet,
include_time_series_primitives: bool = False,
excluded_primitives: List[str] = DEFAULT_EXCLUDED_PRIMITIVES,
) -> List[str]:
"""Get a list of recommended primitives given an entity set.
Description:
This function works by first getting a list of valid primitives withholding any primitives specified in `excluded_primitives` that could be applied to a single-table EntitySet.
Secondly, engineered features are created for non-numeric fields and are checked for non-uniqueness. If the feature is non-unique, it is added to the recommendation list.
Then, numeric fields are checked for skewness. Depending on how skew a column is `square_root` or `natural_logarithm` will be recommended.
Lastly if `include_time_series_primitives` is specified as `True`, `Lag` will always be recommended,
as well as all Rolling and Expanding primitives if numeric columns are present.
Args:
entityset (EntitySet): EntitySet that only contains one dataframe.
include_time_series_primitives (bool): Whether or not time-series primitives should be considered. Defaults to False.
excluded_primitives (List[str]): List of transform primitives to exclude from recommendations. Defaults to DEFAULT_EXCLUDED_PRIMITIVES.
Note:
The main objective of this function is to recommend primitives that could potentially provide important features to the modeling process.
Non-numeric primitives do a great job in mainly serving as a way to extract information from origin features that may essentially be meaningless by themselves (e.g., NaturalLanguage, Datetime, LatLong).
That is why they are the main focus of this function. Numeric transform primitives are very case-by-case dependent and therefore it is hard to mathematically quantify which should be recommended.
Therefore, only transform primitives that address skewed numeric columns are included, as this is a standard and quantifiable transformation step. The only exception to this rule being
for time series problems. Because there are so few primitives that are only applicable for time series, all of them are included in the recommended primitives list.
Note:
This function currently only works for single table and will only recommend transform primitives.
"""
es_dataframe_list = entityset.dataframes
if len(es_dataframe_list) == 0:
raise IndexError("No DataFrame in EntitySet found. Please add a DataFrame.")
if len(es_dataframe_list) > 1:
raise IndexError(
"Multi-table EntitySets are currently not supported. Please only use a single table EntitySet.",
)
target_dataframe_name = es_dataframe_list[0].ww.name
recommended_primitives = set()
if not include_time_series_primitives:
excluded_primitives += TIME_SERIES_PRIMITIVES
all_trans_primitives = get_transform_primitives()
selected_trans_primitives = [
p for name, p in all_trans_primitives.items() if name not in excluded_primitives
]
valid_primitive_names = [
prim.name
for prim in get_valid_primitives(
entityset,
target_dataframe_name,
1,
selected_trans_primitives,
)[1]
]
recommended_primitives.update(
_recommend_non_numeric_primitives(
entityset,
target_dataframe_name,
valid_primitive_names,
),
)
recommended_primitives.update(
_recommend_skew_numeric_primitives(
entityset,
target_dataframe_name,
valid_primitive_names,
),
)
recommended_primitives.update(
set(TIME_SERIES_PRIMITIVES).intersection(
valid_primitive_names,
),
)
return list(recommended_primitives)
def _recommend_non_numeric_primitives(
entityset: EntitySet,
target_dataframe_name: str,
valid_primitives: List[str],
) -> set:
"""Get a set of non-numeric primitives for a given dataset and a list of primitives.
Description:
Given a single table entity set with a `target_dataframe_name` and an applicable list of `valid_primitives`,
get a set of primitives which produce non-unique features.
Args:
entityset (EntitySet): EntitySet that only contains one dataframe.
target_dataframe_name (str): Name of target dataframe to access in `entityset`.
valid_primitives (List[str]): List of primitives to calculate and check output features.
"""
recommended_non_numeric_primitives: set[str] = set()
# Only want to run feature generation on non numeric primitives
numeric_columns_to_ignore = list(
entityset[target_dataframe_name]
.ww.select(include="numeric", return_schema=True)
.columns,
)
features = dfs(
entityset=entityset,
target_dataframe_name=target_dataframe_name,
trans_primitives=valid_primitives,
max_depth=1,
features_only=True,
ignore_columns={target_dataframe_name: numeric_columns_to_ignore},
)
for f in features:
if (
f.primitive.name is not None
and f.primitive.name not in recommended_non_numeric_primitives
):
try:
matrix = calculate_feature_matrix([f], entityset)
for f_name in f.get_feature_names():
if len(matrix[f_name].unique()) > 1:
recommended_non_numeric_primitives.add(f.primitive.name)
except (
Exception
) as e: # If error in calculating feature matrix pass on the recommendation
logger = logging.getLogger("featuretools")
logger.error(
f"Exception with feature {f.get_name()} with primitive {f.primitive.name}: {str(e)}",
)
return recommended_non_numeric_primitives
def _recommend_skew_numeric_primitives(
entityset: EntitySet,
target_dataframe_name: str,
valid_primitives: List[str],
) -> set:
"""Get a set of recommended skew numeric primitives given an entity set.
Description:
Given woodwork initialized dataframe of origin features with only `numeric` semantic tags and an applicable list of `valid_skew_primitives`,
get a set of primitives which could be applied to address right skewness.
Args:
entityset (EntitySet): EntitySet that only contains one dataframe.
target_dataframe_name (str): Name of target dataframe to access in `entityset`.
valid_primitives (List[str]): List of primitives to compare.
Note:
We currently only have primitives to address right skewness.
"""
recommended_skew_primitives: set[str] = set()
skew_numeric_primitives = set(["square_root", "natural_logarithm"])
valid_skew_primitives = skew_numeric_primitives.intersection(valid_primitives)
if valid_skew_primitives:
numerics_only_df = entityset[target_dataframe_name].ww.select("numeric")
recommended_skew_primitives: set[str] = set()
for col in numerics_only_df:
# Shouldn't recommend log, sqrt if nans, zeros and negative numbers are present
contains_nan = numerics_only_df[col].isnull().any()
all_above_zero = (numerics_only_df[col] > 0).all()
if all_above_zero and not contains_nan:
skew = numerics_only_df[col].skew()
# We currently don't have anything in featuretools to automatically handle left skewed data as well as skewed data with negative values
if skew > 0.5 and skew < 1 and "square_root" in valid_skew_primitives:
recommended_skew_primitives.add("square_root")
# TODO: Add Box Cox here when available
if skew > 1 and "natural_logarithm" in valid_skew_primitives:
recommended_skew_primitives.add("natural_logarithm")
# TODO: Add log base 10 transform primitive when available
return recommended_skew_primitives
| 9,941 | 38.927711 | 210 | py |
featuretools | featuretools-main/featuretools/utils/api.py | # flake8: noqa
from featuretools.utils.entry_point import entry_point
from featuretools.utils.gen_utils import make_tqdm_iterator
from featuretools.utils.time_utils import (
calculate_trend,
convert_time_units,
make_temporal_cutoffs,
)
from featuretools.utils.trie import Trie
from featuretools.utils.utils_info import (
get_featuretools_root,
get_installed_packages,
get_sys_info,
show_info,
)
| 423 | 25.5 | 59 | py |
featuretools | featuretools-main/featuretools/utils/spark_utils.py | import pandas as pd
def replace_tuple_columns(pdf):
new_df = pd.DataFrame()
for c in pdf.columns:
if isinstance(pdf[c].iloc[0], tuple):
new_df[c] = pdf[c].map(lambda x: list(x) if isinstance(x, tuple) else x)
else:
new_df[c] = pdf[c]
return new_df
def replace_nan_with_None(df):
new_df = pd.DataFrame()
def replace_val(val):
if isinstance(val, (tuple, list)):
return list([None if pd.isna(x) else x for x in val])
elif pd.isna(val):
return None
else:
return val
for c in df.columns:
new_df[c] = df[c].apply(replace_val)
new_df[c] = new_df[c].astype(df[c].dtype)
return new_df
def replace_categorical_columns(pdf):
new_df = pd.DataFrame()
for c in pdf.columns:
col = pdf[c]
if col.dtype.name == "category":
new_df[c] = col.astype("string")
else:
new_df[c] = pdf[c]
return new_df
def pd_to_spark_clean(pdf):
steps = [replace_tuple_columns, replace_nan_with_None, replace_categorical_columns]
intermediate_df = pdf
for f in steps:
intermediate_df = f(intermediate_df)
return intermediate_df
| 1,228 | 23.58 | 87 | py |
featuretools | featuretools-main/featuretools/utils/entry_point.py | import time
from functools import wraps
from inspect import signature
import pkg_resources
def entry_point(name):
def inner_function(func):
@wraps(func)
def function_wrapper(*args, **kwargs):
"""function_wrapper of greeting"""
# add positional args as named kwargs
on_call_kwargs = kwargs.copy()
sig = signature(func)
for arg, parameter in zip(args, sig.parameters):
on_call_kwargs[parameter] = arg
# collect and initialize all registered entry points
entry_points = []
for entry_point in pkg_resources.iter_entry_points(name):
entry_point = entry_point.load()
entry_points.append(entry_point())
# send arguments before function is called
for ep in entry_points:
ep.on_call(on_call_kwargs)
try:
# call function
start = time.time()
return_value = func(*args, **kwargs)
runtime = time.time() - start
except Exception as e:
runtime = time.time() - start
# send error
for ep in entry_points:
ep.on_error(error=e, runtime=runtime)
raise e
# send return value
for ep in entry_points:
ep.on_return(return_value=return_value, runtime=runtime)
return return_value
return function_wrapper
return inner_function
| 1,551 | 30.04 | 72 | py |
featuretools | featuretools-main/featuretools/utils/trie.py | class Trie(object):
"""
A trie (prefix tree) where the keys are sequences of hashable objects.
It behaves similarly to a dictionary, except that the keys can be lists or
other sequences.
Examples:
>>> from featuretools.utils import Trie
>>> trie = Trie(default=str)
>>> # Set a value
>>> trie.get_node([1, 2, 3]).value = '123'
>>> # Get a value
>>> trie.get_node([1, 2, 3]).value
'123'
>>> # Overwrite a value
>>> trie.get_node([1, 2, 3]).value = 'updated'
>>> trie.get_node([1, 2, 3]).value
'updated'
>>> # Getting a key that has not been set returns the default value.
>>> trie.get_node([1, 2]).value
''
"""
def __init__(self, default=lambda: None, path_constructor=list):
"""
default: A function returning the value to use for new nodes.
path_constructor: A function which constructs a path from a list. The
path type must support addition (concatenation).
"""
self.value = default()
self._children = {}
self._default = default
self._path_constructor = path_constructor
def children(self):
"""
A list of pairs of the edges from this node and the nodes they point
to.
Examples:
>>> from featuretools.utils import Trie
>>> trie = Trie(default=str)
>>> trie.get_node([1, 2]).value = '12'
>>> trie.get_node([3]).value = '3'
>>> children = trie.children()
>>> first_edge, first_child = children[0]
>>> first_edge
1
>>> first_child.value
''
>>> second_edge, second_child = children[1]
>>> second_edge
3
>>> second_child.value
'3'
"""
return list(self._children.items())
def get_node(self, path):
"""
Get the sub-trie at the given path. If it does not yet exist initialize
it with the default value.
Examples:
>>> from featuretools.utils import Trie
>>> t = Trie()
>>> t.get_node([1, 2, 3]).value = '123'
>>> t.get_node([1, 2, 4]).value = '124'
>>> sub = t.get_node([1, 2])
>>> sub.get_node([3]).value
'123'
>>> sub.get_node([4]).value
'124'
"""
if path:
first = path[0]
rest = path[1:]
if first in self._children:
sub_trie = self._children[first]
else:
sub_trie = Trie(
default=self._default,
path_constructor=self._path_constructor,
)
self._children[first] = sub_trie
return sub_trie.get_node(rest)
else:
return self
def __iter__(self):
"""
Iterate over all values in the trie. Yields tuples of (path, value).
Implemented using depth first search.
"""
yield self._path_constructor([]), self.value
for key, sub_trie in self.children():
path_to_children = self._path_constructor([key])
for sub_path, value in sub_trie:
path = path_to_children + sub_path
yield path, value
| 3,381 | 30.607477 | 79 | py |
featuretools | featuretools-main/featuretools/utils/time_utils.py | from datetime import datetime, timedelta
import numpy as np
import pandas as pd
def make_temporal_cutoffs(
instance_ids,
cutoffs,
window_size=None,
num_windows=None,
start=None,
):
"""Makes a set of equally spaced cutoff times prior to a set of input cutoffs and instance ids.
If window_size and num_windows are provided, then num_windows of size window_size will be created
prior to each cutoff time
If window_size and a start list is provided, then a variable number of windows will be created prior
to each cutoff time, with the corresponding start time as the first cutoff.
If num_windows and a start list is provided, then num_windows of variable size will be created prior
to each cutoff time, with the corresponding start time as the first cutoff
Args:
instance_ids (list, np.ndarray, or pd.Series): list of instance ids. This function will make a
new datetime series of multiple cutoff times for each value in this array.
cutoffs (list, np.ndarray, or pd.Series): list of datetime objects associated with each instance id.
Each one of these will be the last time in the new datetime series for each instance id
window_size (pd.Timedelta, optional): amount of time between each datetime in each new cutoff series
num_windows (int, optional): number of windows in each new cutoff series
start (list, optional): list of start times for each instance id
"""
if window_size is not None and num_windows is not None and start is not None:
raise ValueError(
"Only supply 2 of the 3 optional args, window_size, num_windows and start",
)
out = []
for i, id_time in enumerate(zip(instance_ids, cutoffs)):
_id, time = id_time
_window_size = window_size
_start = None
if start is not None:
if window_size is None:
_window_size = (time - start[i]) / (num_windows - 1)
else:
_start = start[i]
to_add = pd.DataFrame()
to_add["time"] = pd.date_range(
end=time,
periods=num_windows,
freq=_window_size,
start=_start,
)
to_add["instance_id"] = [_id] * len(to_add["time"])
out.append(to_add)
return pd.concat(out).reset_index(drop=True)
def convert_time_units(secs, unit):
"""
Converts a time specified in seconds to a time in the given units
Args:
secs (integer): number of seconds. This function will convert the units of this number.
unit(str): units to be converted to.
acceptable values: years, months, days, hours, minutes, seconds, milliseconds, nanoseconds
"""
unit_divs = {
"years": 31540000,
"months": 2628000,
"days": 86400,
"hours": 3600,
"minutes": 60,
"seconds": 1,
"milliseconds": 0.001,
"nanoseconds": 0.000000001,
}
if unit not in unit_divs:
raise ValueError("Invalid unit given, make sure it is plural")
return secs / (unit_divs[unit])
def convert_datetime_to_floats(x):
first = int(x.iloc[0].value * 1e-9)
x = pd.to_numeric(x).astype(np.float64).values
dividend = find_dividend_by_unit(first)
x *= 1e-9 / dividend
return x
def convert_timedelta_to_floats(x):
first = int(x.iloc[0].total_seconds())
dividend = find_dividend_by_unit(first)
x = pd.TimedeltaIndex(x).total_seconds().astype(np.float64) / dividend
return x
def find_dividend_by_unit(time):
"""Finds whether time best corresponds to a value in
days, hours, minutes, or seconds.
"""
for dividend in [86400, 3600, 60]:
div = time / dividend
if round(div) == div:
return dividend
return 1
def calculate_trend(series):
# numpy can't handle `Int64` values, so cast to float
if series.dtype == "Int64":
series = series.astype("float64")
df = pd.DataFrame({"x": series.index, "y": series.values}).dropna()
if df.shape[0] <= 2:
return np.nan
if isinstance(df["x"].iloc[0], (datetime, pd.Timestamp)):
x = convert_datetime_to_floats(df["x"])
else:
x = df["x"].values
if isinstance(df["y"].iloc[0], (datetime, pd.Timestamp)):
y = convert_datetime_to_floats(df["y"])
elif isinstance(df["y"].iloc[0], (timedelta, pd.Timedelta)):
y = convert_timedelta_to_floats(df["y"])
else:
y = df["y"].values
x = x - x.mean()
y = y - y.mean()
# prevent divide by zero error
if len(np.unique(x)) == 1:
return 0
# consider scipy.stats.linregress for large n cases
coefficients = np.polyfit(x, y, 1)
return coefficients[0]
| 4,766 | 33.05 | 108 | py |
featuretools | featuretools-main/featuretools/utils/__init__.py | # flake8: noqa
from featuretools.utils.api import *
| 52 | 16.666667 | 36 | py |
featuretools | featuretools-main/featuretools/utils/common_tld_utils.py | # put longer TLDs first to avoid catching a small part of a longer TLD and escape periods
COMMON_TLDS = [
"management",
"technology",
"solutions",
"delivery",
"services",
"software",
"digital",
"finance",
"monster",
"network",
"support",
"systems",
"website",
"agency",
"design",
"events",
"global",
"health",
"online",
"stream",
"studio",
"travel",
"apple",
"click",
"cloud",
"email",
"games",
"group",
"media",
"ninja",
"press",
"rocks",
"space",
"store",
"today",
"tools",
"video",
"works",
"world",
"aero",
"arpa",
"asia",
"bank",
"best",
"blog",
"buzz",
"care",
"casa",
"chat",
"club",
"coop",
"cyou",
"desi",
"farm",
"goog",
"guru",
"host",
"info",
"jobs",
"life",
"link",
"live",
"mobi",
"name",
"news",
"page",
"plus",
"shop",
"site",
"team",
"tech",
"work",
"zone",
"app",
"aws",
"bid",
"biz",
"box",
"cam",
"cat",
"com",
"dev",
"edu",
"eus",
"fun",
"gov",
"icu",
"int",
"ltd",
"mil",
"net",
"nyc",
"one",
"onl",
"org",
"ovh",
"pro",
"pub",
"run",
"sap",
"top",
"vip",
"win",
"xxx",
"xyz",
"ac",
"ad",
"ae",
"ag",
"ai",
"al",
"am",
"ar",
"at",
"au",
"az",
"ba",
"bd",
"be",
"bg",
"br",
"by",
"bz",
"ca",
"cc",
"cf",
"ch",
"cl",
"cm",
"cn",
"co",
"cr",
"cu",
"cx",
"cy",
"cz",
"de",
"dk",
"do",
"ec",
"ee",
"eg",
"es",
"eu",
"fi",
"fm",
"fr",
"ga",
"ge",
"gg",
"gl",
"gq",
"gr",
"gs",
"gt",
"hk",
"hn",
"hr",
"hu",
"id",
"ie",
"il",
"im",
"in",
"io",
"ir",
"is",
"it",
"jo",
"jp",
"ke",
"kh",
"ki",
"kr",
"kw",
"kz",
"la",
"lb",
"li",
"lk",
"lt",
"lu",
"lv",
"ly",
"ma",
"md",
"me",
"mk",
"ml",
"mm",
"mn",
"ms",
"mu",
"mx",
"my",
"nf",
"ng",
"nl",
"no",
"np",
"nu",
"nz",
"om",
"pa",
"pe",
"ph",
"pk",
"pl",
"pr",
"ps",
"pt",
"pw",
"py",
"qa",
"re",
"ro",
"rs",
"ru",
"sa",
"sc",
"se",
"sg",
"sh",
"si",
"sk",
"so",
"st",
"su",
"sv",
"sx",
"th",
"tj",
"tk",
"tn",
"to",
"tr",
"tt",
"tv",
"tw",
"ua",
"ug",
"uk",
"us",
"uy",
"vc",
"ve",
"vn",
"ws",
"za",
]
| 2,861 | 10.312253 | 89 | py |
featuretools | featuretools-main/featuretools/primitives/options_utils.py | import logging
import warnings
from itertools import permutations
from featuretools import primitives
from featuretools.feature_base import IdentityFeature
logger = logging.getLogger("featuretools")
def _get_primitive_options():
# all possible option keys: function that verifies value type
return {
"ignore_dataframes": list_dataframe_check,
"include_dataframes": list_dataframe_check,
"ignore_columns": dict_to_list_column_check,
"include_columns": dict_to_list_column_check,
"ignore_groupby_dataframes": list_dataframe_check,
"include_groupby_dataframes": list_dataframe_check,
"ignore_groupby_columns": dict_to_list_column_check,
"include_groupby_columns": dict_to_list_column_check,
}
def dict_to_list_column_check(option, es):
if not (
isinstance(option, dict)
and all([isinstance(option_val, list) for option_val in option.values()])
):
return False
else:
for dataframe, columns in option.items():
if dataframe not in es:
warnings.warn("Dataframe '%s' not in entityset" % (dataframe))
else:
for invalid_col in [
column for column in columns if column not in es[dataframe]
]:
warnings.warn(
"Column '%s' not in dataframe '%s'" % (invalid_col, dataframe),
)
return True
def list_dataframe_check(option, es):
if not isinstance(option, list):
return False
else:
for invalid_dataframe in [
dataframe for dataframe in option if dataframe not in es
]:
warnings.warn("Dataframe '%s' not in entityset" % (invalid_dataframe))
return True
def generate_all_primitive_options(
all_primitives,
primitive_options,
ignore_dataframes,
ignore_columns,
es,
):
dataframe_dict = {
dataframe.ww.name: [col for col in dataframe.columns]
for dataframe in es.dataframes
}
primitive_options = _init_primitive_options(primitive_options, dataframe_dict)
global_ignore_dataframes = ignore_dataframes
global_ignore_columns = ignore_columns.copy()
# for now, only use primitive names as option keys
for primitive in all_primitives:
if primitive in primitive_options and primitive.name in primitive_options:
msg = (
"Options present for primitive instance and generic "
"primitive class (%s), primitive instance will not use generic "
"options" % (primitive.name)
)
warnings.warn(msg)
if primitive in primitive_options or primitive.name in primitive_options:
options = primitive_options.get(
primitive,
primitive_options.get(primitive.name),
)
# Reconcile global options with individually-specified options
included_dataframes = set().union(
*[
option.get("include_dataframes", set()).union(
option.get("include_columns", {}).keys(),
)
for option in options
]
)
global_ignore_dataframes = global_ignore_dataframes.difference(
included_dataframes,
)
for option in options:
# don't globally ignore a column if it's included for a primitive
if "include_columns" in option:
for dataframe, include_cols in option["include_columns"].items():
global_ignore_columns[dataframe] = global_ignore_columns[
dataframe
].difference(include_cols)
option["ignore_dataframes"] = option["ignore_dataframes"].union(
ignore_dataframes.difference(included_dataframes),
)
for dataframe, ignore_cols in ignore_columns.items():
# if already ignoring columns for this dataframe, add globals
for option in options:
if dataframe in option["ignore_columns"]:
option["ignore_columns"][dataframe] = option["ignore_columns"][
dataframe
].union(ignore_cols)
# if no ignore_columns and dataframe is explicitly included, don't ignore the column
elif dataframe in included_dataframes:
continue
# Otherwise, keep the global option
else:
option["ignore_columns"][dataframe] = ignore_cols
else:
# no user specified options, just use global defaults
primitive_options[primitive] = [
{
"ignore_dataframes": ignore_dataframes,
"ignore_columns": ignore_columns,
},
]
return primitive_options, global_ignore_dataframes, global_ignore_columns
def _init_primitive_options(primitive_options, es):
# Flatten all tuple keys, convert value lists into sets, check for
# conflicting keys
flattened_options = {}
for primitive_keys, options in primitive_options.items():
if not isinstance(primitive_keys, tuple):
primitive_keys = (primitive_keys,)
if isinstance(options, list):
for primitive_key in primitive_keys:
if isinstance(primitive_key, str):
primitive = primitives.get_aggregation_primitives().get(
primitive_key,
) or primitives.get_transform_primitives().get(primitive_key)
if not primitive:
msg = "Unknown primitive with name '{}'".format(primitive_key)
raise ValueError(msg)
else:
primitive = primitive_key
assert (
len(primitive.input_types[0]) == len(options)
if isinstance(primitive.input_types[0], list)
else len(primitive.input_types) == len(options)
), (
"Number of options does not match number of inputs for primitive %s"
% (primitive_key)
)
options = [
_init_option_dict(primitive_keys, option, es) for option in options
]
else:
options = [_init_option_dict(primitive_keys, options, es)]
for primitive in primitive_keys:
if isinstance(primitive, type):
primitive = primitive.name
# if primitive is specified more than once, raise error
if primitive in flattened_options:
raise KeyError("Multiple options found for primitive %s" % (primitive))
flattened_options[primitive] = options
return flattened_options
def _init_option_dict(key, option_dict, es):
initialized_option_dict = {}
primitive_options = _get_primitive_options()
# verify all keys are valid and match expected type, convert lists to sets
for option_key, option in option_dict.items():
if option_key not in primitive_options:
raise KeyError(
"Unrecognized primitive option '%s' for %s"
% (option_key, ",".join(key)),
)
if not primitive_options[option_key](option, es):
raise TypeError(
"Incorrect type formatting for '%s' for %s"
% (option_key, ",".join(key)),
)
if isinstance(option, list):
initialized_option_dict[option_key] = set(option)
elif isinstance(option, dict):
initialized_option_dict[option_key] = {
key: set(option[key]) for key in option
}
# initialize ignore_dataframes and ignore_columns to empty sets if not present
if "ignore_columns" not in initialized_option_dict:
initialized_option_dict["ignore_columns"] = dict()
if "ignore_dataframes" not in initialized_option_dict:
initialized_option_dict["ignore_dataframes"] = set()
return initialized_option_dict
def column_filter(f, options, groupby=False):
if groupby and not f.column_schema.semantic_tags.intersection(
{"category", "foreign_key"},
):
return False
include_cols = "include_groupby_columns" if groupby else "include_columns"
ignore_cols = "ignore_groupby_columns" if groupby else "ignore_columns"
include_dataframes = (
"include_groupby_dataframes" if groupby else "include_dataframes"
)
ignore_dataframes = "ignore_groupby_dataframes" if groupby else "ignore_dataframes"
dependencies = f.get_dependencies(deep=True) + [f]
for base_f in dependencies:
if isinstance(base_f, IdentityFeature):
if (
include_cols in options
and base_f.dataframe_name in options[include_cols]
):
if base_f.get_name() in options[include_cols][base_f.dataframe_name]:
continue # this is a valid feature, go to next
else:
return False # this is not an included feature
if ignore_cols in options and base_f.dataframe_name in options[ignore_cols]:
if base_f.get_name() in options[ignore_cols][base_f.dataframe_name]:
return False # ignore this feature
if include_dataframes in options:
return base_f.dataframe_name in options[include_dataframes]
elif (
ignore_dataframes in options
and base_f.dataframe_name in options[ignore_dataframes]
):
return False # ignore the dataframe
return True
def ignore_dataframe_for_primitive(options, dataframe, groupby=False):
# This logic handles whether given options ignore an dataframe or not
def should_ignore_dataframe(option):
if groupby:
if (
"include_groupby_columns" not in option
or dataframe.ww.name not in option["include_groupby_columns"]
):
if (
"include_groupby_dataframes" in option
and dataframe.ww.name not in option["include_groupby_dataframes"]
):
return True
elif (
"ignore_groupby_dataframes" in option
and dataframe.ww.name in option["ignore_groupby_dataframes"]
):
return True
if (
"include_columns" in option
and dataframe.ww.name in option["include_columns"]
):
return False
elif "include_dataframes" in option:
return dataframe.ww.name not in option["include_dataframes"]
elif dataframe.ww.name in option["ignore_dataframes"]:
return True
else:
return False
return any([should_ignore_dataframe(option) for option in options])
def filter_groupby_matches_by_options(groupby_matches, options):
return filter_matches_by_options(
[(groupby_match,) for groupby_match in groupby_matches],
options,
groupby=True,
)
def filter_matches_by_options(matches, options, groupby=False, commutative=False):
# If more than one option, than need to handle each for each input
if len(options) > 1:
def is_valid_match(match):
if all(
[
column_filter(m, option, groupby)
for m, option in zip(match, options)
],
):
return True
else:
return False
else:
def is_valid_match(match):
if all([column_filter(f, options[0], groupby) for f in match]):
return True
else:
return False
valid_matches = set()
for match in matches:
if is_valid_match(match):
valid_matches.add(match)
elif commutative:
for order in permutations(match):
if is_valid_match(order):
valid_matches.add(order)
break
return sorted(
valid_matches,
key=lambda features: ([feature.unique_name() for feature in features]),
)
| 12,514 | 38.355346 | 104 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.