Development Workflow

Unit Tests

Fennel's Python client ships with an (inefficient) mock server inside it - this makes it possible to do local development and run unit tests against the mock server to verify correctness. This works even if you don't have any remote Fennel server - heck it works even if you don't have internet.

This mock server has near parity with the actual server with one notable exception - it doesn't support data connectors to external data systems (after all, it is completely local with zero remote dependencies!)

Let's first see how it will work and later we will see a fully functional unit test example.

1from fennel.testing import mock
2
3
4class TestDataset(unittest.TestCase):
5    @mock
6    def test_dataset(self, client):
7        # client talks to the mock server
8        # ... do any setup
9        # commit the dataset
10        client.commit(datasets=[User])
11        # ... some other stuff
12        client.log("fennel_webhook", 'User', pd.Dataframe(...))
13        # ... some other stuff
14        found = client.query(...)
15        self.assertEqual(found, expected)

python

Here we imported mock from fennel.testing. This is a decorator which can be used to decorate test functions - and the decorator supplies an extra argument called client to the test. Once the client object reaches the body of the test, you can do all operations that are typically done on a real client - you can commit datasets/featuresets, log data, extract features etc.

Since external data integration doesn't work in mock server, the only way to bring data to a dataset in the mock server is by explicitly logging data to a webhook.

Testing Datasets

For testing Datasets, you can use the log method of the client to add some local data to a dataset and then query this or other downstream datasets using the lookup method. Here is an end to end example. Suppose our regular non-test code looks like this:

1from fennel.connectors import source, Webhook
2from fennel.datasets import Count, Sum, Average
3from fennel.datasets import dataset, field, pipeline, Dataset
4from fennel.lib import includes, meta, inputs, outputs
5
6__owner__ = "[email protected]"
7webhook = Webhook(name="fennel_webhook")
8
9
10@source(webhook.endpoint("RatingActivity"), disorder="14d", cdc="append")
11@dataset
12class RatingActivity:
13    userid: int
14    rating: float
15    movie: str
16    t: datetime
17
18
19@dataset(index=True)
20class MovieRating:
21    movie: str = field(key=True)
22    rating: float
23    num_ratings: int
24    sum_ratings: float
25    t: datetime
26
27    @pipeline
28    @inputs(RatingActivity)
29    def pipeline_aggregate(cls, activity: Dataset):
30        return activity.groupby("movie").aggregate(
31            num_ratings=Count(window="7d"),
32            sum_ratings=Sum(window="28d", of="rating"),
33            rating=Average(window="12h", of="rating"),
34        )

python

And you want to test that data reaching RatingActivity dataset correctly propagates to MovieRating dataset via the pipeline. You could write the following unit test to do so:

1import unittest
2from fennel.testing import mock
3
4
5class TestDataset(unittest.TestCase):
6    @mock
7    def test_dataset(self, client):
8        # Sync the dataset
9        client.commit(
10            message="datasets: add RatingActivity and MovieRating",
11            datasets=[MovieRating, RatingActivity],
12        )
13        now = datetime.now(timezone.utc)
14        one_hour_ago = now - timedelta(hours=1)
15        two_hours_ago = now - timedelta(hours=2)
16        three_hours_ago = now - timedelta(hours=3)
17        four_hours_ago = now - timedelta(hours=4)
18        five_hours_ago = now - timedelta(hours=5)
19
20        data = [
21            [18231, 2, "Jumanji", five_hours_ago],
22            [18231, 3, "Jumanji", four_hours_ago],
23            [18231, 2, "Jumanji", three_hours_ago],
24            [18231, 5, "Jumanji", five_hours_ago],
25            [18231, 4, "Titanic", three_hours_ago],
26            [18231, 3, "Titanic", two_hours_ago],
27            [18231, 5, "Titanic", one_hour_ago],
28            [18231, 5, "Titanic", now - timedelta(minutes=1)],
29            [18231, 3, "Titanic", two_hours_ago],
30        ]
31        columns = ["userid", "rating", "movie", "t"]
32        df = pd.DataFrame(data, columns=columns)
33        response = client.log("fennel_webhook", "RatingActivity", df)
34        assert response.status_code == requests.codes.OK
35
36        # Do some lookups to verify pipeline_aggregate
37        # is working as expected
38        ts = pd.Series([now, now])
39        names = pd.Series(["Jumanji", "Titanic"])
40        df, _ = MovieRating.lookup(ts, movie=names)
41        assert df.shape == (2, 5)
42        assert df["movie"].tolist() == ["Jumanji", "Titanic"]
43        assert df["rating"].tolist() == [3, 4]
44        assert df["num_ratings"].tolist() == [4, 5]
45        assert df["sum_ratings"].tolist() == [12, 20]

python

Testing Featuresets

Extractors are simple Python functions and, hence, can be unit tested directly.

1from fennel.featuresets import feature as F, featureset, extractor
2
3
4@meta(owner="[email protected]")
5@featureset
6class UserInfoFeatures:
7    userid: int
8    name: str
9    # The users gender among male/female/non-binary
10    age: int = F().meta(owner="[email protected]")
11    age_squared: int
12    age_cubed: int
13    is_name_common: bool
14
15    @extractor
16    @inputs(age, "name")
17    @outputs("age_squared", "age_cubed", "is_name_common")
18    def get_age_and_name_features(
19        cls, ts: pd.Series, user_age: pd.Series, name: pd.Series
20    ):
21        is_name_common = name.isin(["John", "Mary", "Bob"])
22        df = pd.concat([user_age**2, user_age**3, is_name_common], axis=1)
23        df.columns = [
24            str(cls.age_squared),
25            str(cls.age_cubed),
26            str(cls.is_name_common),
27        ]
28        return df
29
30
31# somewhere in the test file, you can write this
32class TestSimpleExtractor(unittest.TestCase):
33    def test_get_age_and_name_features(self):
34        age = pd.Series([32, 24])
35        name = pd.Series(["John", "Rahul"])
36        ts = pd.Series([datetime(2020, 1, 1), datetime(2020, 1, 1)])
37        df = UserInfoFeatures.get_age_and_name_features(
38            UserInfoFeatures, ts, age, name
39        )
40        self.assertEqual(df.shape, (2, 3))
41        self.assertEqual(
42            df["UserInfoFeatures.age_squared"].tolist(), [1024, 576]
43        )
44        self.assertEqual(
45            df["UserInfoFeatures.age_cubed"].tolist(), [32768, 13824]
46        )
47        self.assertEqual(
48            df["UserInfoFeatures.is_name_common"].tolist(),
49            [True, False],
50        )

python

For extractors that depend on dataset lookups, the setup looks similar to that of testing datasets as shown above - create a mock client, commit some datasets/featuresets, log data to a dataset, and finally use client to extract features. Here is an example:

1@meta(owner="[email protected]")
2@source(webhook.endpoint("UserInfoDataset"), disorder="14d", cdc="upsert")
3@dataset(index=True)
4class UserInfoDataset:
5    user_id: int = field(key=True)
6    name: str
7    age: Optional[int]
8    timestamp: datetime = field(timestamp=True)
9    country: str
10
11
12@meta(owner="[email protected]")
13@featureset
14class UserInfoMultipleExtractor:
15    userid: int
16    name: str
17    country_geoid: int
18    # The users gender among male/female/non-binary
19    age: int = F().meta(owner="[email protected]")
20    age_squared: int
21    age_cubed: int
22    is_name_common: bool
23
24    @extractor(deps=[UserInfoDataset])
25    @inputs("userid")
26    @outputs("age", "name")
27    def get_user_age_and_name(cls, ts: pd.Series, user_id: pd.Series):
28        df, _found = UserInfoDataset.lookup(ts, user_id=user_id)
29        return df[["age", "name"]]
30
31    @extractor
32    @inputs("age", "name")
33    @outputs("age_squared", "age_cubed", "is_name_common")
34    def get_age_and_name_features(
35        cls, ts: pd.Series, user_age: pd.Series, name: pd.Series
36    ):
37        is_name_common = name.isin(["John", "Mary", "Bob"])
38        df = pd.concat([user_age**2, user_age**3, is_name_common], axis=1)
39        df.columns = [
40            "age_squared",
41            "age_cubed",
42            "is_name_common",
43        ]
44        return df
45
46    @extractor(deps=[UserInfoDataset])
47    @includes(get_country_geoid)
48    @inputs("userid")
49    @outputs("country_geoid")
50    def get_country_geoid_extractor(cls, ts: pd.Series, user_id: pd.Series):
51        df, _found = UserInfoDataset.lookup(ts, user_id=user_id)  # type: ignore
52        df["country_geoid"] = df["country"].apply(get_country_geoid)
53        return df["country_geoid"]
54
55
56# this is your test code in some test module
57class TestExtractorDAGResolution(unittest.TestCase):
58    @mock
59    def test_dag_resolution(self, client):
60        client.commit(
61            message="user: add info datasets, featuresets",
62            datasets=[UserInfoDataset],
63            featuresets=[UserInfoMultipleExtractor],
64        )
65        now = datetime.now(timezone.utc)
66        data = [
67            [18232, "John", 32, "USA", now],
68            [18234, "Monica", 24, "Chile", now],
69        ]
70        columns = ["user_id", "name", "age", "country", "timestamp"]
71        df = pd.DataFrame(data, columns=columns)
72        response = client.log("fennel_webhook", "UserInfoDataset", df)
73        assert response.status_code == requests.codes.OK, response.json()
74
75        feature_df = client.query(
76            outputs=[UserInfoMultipleExtractor],
77            inputs=[UserInfoMultipleExtractor.userid],
78            input_dataframe=pd.DataFrame(
79                {"UserInfoMultipleExtractor.userid": [18232, 18234]}
80            ),
81        )
82        self.assertEqual(feature_df.shape, (2, 7))

python

On This Page

Edit this Page on Github