Skip to content

Commit

Permalink
Merge branch 'master' into mariadb_offline_store
Browse files Browse the repository at this point in the history
  • Loading branch information
tmihalac authored May 13, 2024
2 parents 387c56a + bf99640 commit c9d8184
Show file tree
Hide file tree
Showing 50 changed files with 2,536 additions and 1,552 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/master_only.yml
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ jobs:
SNOWFLAKE_CI_WAREHOUSE: ${{ secrets.SNOWFLAKE_CI_WAREHOUSE }}
run: pytest --verbose --color=yes sdk/python/tests --integration --benchmark --benchmark-autosave --benchmark-save-data --durations=5
- name: Upload Benchmark Artifact to S3
run: aws s3 cp --recursive .benchmarks s3://feast-ci-pytest-benchmarks
run: aws s3 cp --recursive .benchmarks s3://feast-ci-pytest-benchmark

build-all-docker-images:
if: github.repository == 'feast-dev/feast'
Expand Down
38 changes: 28 additions & 10 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ install-python-ci-dependencies-uv:
python setup.py build_python_protos --inplace

lock-python-ci-dependencies:
python -m piptools compile -U --extra ci --output-file sdk/python/requirements/py$(PYTHON)-ci-requirements.txt
uv pip compile --system --no-strip-extras setup.py --extra ci --output-file sdk/python/requirements/py$(PYTHON)-ci-requirements.txt

package-protos:
cp -r ${ROOT_DIR}/protos ${ROOT_DIR}/sdk/python/feast/protos
Expand All @@ -60,13 +60,15 @@ install-python:
python setup.py develop

lock-python-dependencies:
python -m piptools compile -U --output-file sdk/python/requirements/py$(PYTHON)-requirements.txt
uv pip compile --system --no-strip-extras setup.py --output-file sdk/python/requirements/py$(PYTHON)-requirements.txt

lock-python-dependencies-all:
pixi run --environment py39 --manifest-path infra/scripts/pixi/pixi.toml "python -m piptools compile -U --output-file sdk/python/requirements/py3.9-requirements.txt"
pixi run --environment py39 --manifest-path infra/scripts/pixi/pixi.toml "python -m piptools compile -U --extra ci --output-file sdk/python/requirements/py3.9-ci-requirements.txt"
pixi run --environment py310 --manifest-path infra/scripts/pixi/pixi.toml "python -m piptools compile -U --output-file sdk/python/requirements/py3.10-requirements.txt"
pixi run --environment py310 --manifest-path infra/scripts/pixi/pixi.toml "python -m piptools compile -U --extra ci --output-file sdk/python/requirements/py3.10-ci-requirements.txt"
pixi run --environment py39 --manifest-path infra/scripts/pixi/pixi.toml "uv pip compile --system --no-strip-extras setup.py --output-file sdk/python/requirements/py3.9-requirements.txt"
pixi run --environment py39 --manifest-path infra/scripts/pixi/pixi.toml "uv pip compile --system --no-strip-extras setup.py --extra ci --output-file sdk/python/requirements/py3.9-ci-requirements.txt"
pixi run --environment py310 --manifest-path infra/scripts/pixi/pixi.toml "uv pip compile --system --no-strip-extras setup.py --output-file sdk/python/requirements/py3.10-requirements.txt"
pixi run --environment py310 --manifest-path infra/scripts/pixi/pixi.toml "uv pip compile --system --no-strip-extras setup.py --extra ci --output-file sdk/python/requirements/py3.10-ci-requirements.txt"
pixi run --environment py311 --manifest-path infra/scripts/pixi/pixi.toml "uv pip compile --system --no-strip-extras setup.py --output-file sdk/python/requirements/py3.11-requirements.txt"
pixi run --environment py311 --manifest-path infra/scripts/pixi/pixi.toml "uv pip compile --system --no-strip-extras setup.py --extra ci --output-file sdk/python/requirements/py3.11-ci-requirements.txt"

benchmark-python:
FEAST_USAGE=False IS_TEST=True python -m pytest --integration --benchmark --benchmark-autosave --benchmark-save-data sdk/python/tests
Expand Down Expand Up @@ -183,7 +185,7 @@ test-python-universal-athena:
ATHENA_DATA_SOURCE=AwsDataCatalog \
ATHENA_DATABASE=default \
ATHENA_WORKGROUP=primary \
ATHENA_S3_BUCKET_NAME=feast-integration-tests \
ATHENA_S3_BUCKET_NAME=feast-int-bucket \
python -m pytest -n 8 --integration \
-k "not test_go_feature_server and \
not test_logged_features_validation and \
Expand Down Expand Up @@ -319,6 +321,25 @@ test-python-universal-cassandra-no-cloud-providers:
not test_snowflake" \
sdk/python/tests

test-python-universal-elasticsearch-online:
PYTHONPATH='.' \
FULL_REPO_CONFIGS_MODULE=sdk.python.feast.infra.online_stores.contrib.elasticsearch_repo_configuration \
PYTEST_PLUGINS=sdk.python.tests.integration.feature_repos.universal.online_store.elasticsearch \
python -m pytest -n 8 --integration \
-k "not test_universal_cli and \
not test_go_feature_server and \
not test_feature_logging and \
not test_reorder_columns and \
not test_logged_features_validation and \
not test_lambda_materialization_consistency and \
not test_offline_write and \
not test_push_features_to_offline_store and \
not gcs_registry and \
not s3_registry and \
not test_universal_types and \
not test_snowflake" \
sdk/python/tests

test-python-universal:
python -m pytest -n 8 --integration sdk/python/tests

Expand Down Expand Up @@ -370,9 +391,6 @@ kill-trino-locally:
install-protoc-dependencies:
pip install --ignore-installed protobuf==4.24.0 "grpcio-tools>=1.56.2,<2" mypy-protobuf==3.1.0

install-feast-ci-locally:
pip install -e ".[ci]"

# Docker

build-docker: build-feature-server-python-aws-docker build-feature-transformation-server-docker build-feature-server-java-docker
Expand Down
1 change: 1 addition & 0 deletions docs/SUMMARY.md
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@
* [Snowflake](reference/offline-stores/snowflake.md)
* [BigQuery](reference/offline-stores/bigquery.md)
* [Redshift](reference/offline-stores/redshift.md)
* [DuckDB](reference/offline-stores/duckdb.md)
* [Spark (contrib)](reference/offline-stores/spark.md)
* [PostgreSQL (contrib)](reference/offline-stores/postgres.md)
* [Trino (contrib)](reference/offline-stores/trino.md)
Expand Down
47 changes: 24 additions & 23 deletions docs/project/development-guide.md
Original file line number Diff line number Diff line change
Expand Up @@ -123,43 +123,44 @@ Note that this means if you are midway through working through a PR and rebase,
Setting up your development environment for Feast Python SDK / CLI:
1. Ensure that you have Docker installed in your environment. Docker is used to provision service dependencies during testing, and build images for feature servers and other components.
- Please note that we use [Docker with BuiltKit](https://docs.docker.com/develop/develop-images/build_enhancements/).
2. Ensure that you have `make`, Python (3.8 and above) with `pip`, installed.
- _Alternatively_ - To use [podman](https://podman.io/) on a Fedora or RHEL machine, follow this [guide](https://github.com/feast-dev/feast/issues/4190)
2. Ensure that you have `make` and Python (3.9 or above) installed.
3. _Recommended:_ Create a virtual environment to isolate development dependencies to be installed
```sh
# create & activate a virtual environment
python -m venv venv/
source venv/bin/activate
```
4. Upgrade `pip` if outdated
```sh
pip install --upgrade pip
```
5. (M1 Mac only): Follow the [dev guide](https://github.com/feast-dev/feast/issues/2105)
6. Install pip-tools
```sh
pip install pip-tools
```
7. (Optional): Install Node & Yarn. Then run the following to build Feast UI artifacts for use in `feast ui`
4. (M1 Mac only): Follow the [dev guide](https://github.com/feast-dev/feast/issues/2105)
5. Install uv
It is recommended to use uv for managing python dependencies.
```sh
curl -LsSf https://astral.sh/uv/install.sh | sh
```
or
```ssh
pip install uv
```
6. (Optional): Install Node & Yarn. Then run the following to build Feast UI artifacts for use in `feast ui`
```
make build-ui
```
8. Install mysql (needed for ci dependencies)
7. (Optional) install pixi
pixi is necessary to run step 8 for all python versions at once.
```sh
brew install mysql
curl -fsSL https://pixi.sh/install.sh | bash
```
9. Install development dependencies for Feast Python SDK / CLI
8. (Optional): Recompile python lock files
If you make changes to requirements or simply want to update python lock files to reflect latest versioons.
```sh
pip install -e ".[dev]"
```

This will allow the installed feast version to automatically reflect changes to your local development version of Feast without needing to reinstall everytime you make code changes.

10. Compile the protubufs
make lock-python-dependencies-all
```
9. Install development dependencies for Feast Python SDK / CLI
This will install package versions from the lock file, install editable version of feast and compile protobufs.
```sh
make compile-protos-python
make install-python-ci-dependencies-uv
```

11. Spin up Docker Image
10. Spin up Docker Image
```sh
docker build -t docker-whale -f ./sdk/python/feast/infra/feature_servers/multicloud/Dockerfile .
```
Expand Down
111 changes: 111 additions & 0 deletions docs/reference/alpha-vector-database.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
# [Alpha] Vector Database
**Warning**: This is an _experimental_ feature. To our knowledge, this is stable, but there are still rough edges in the experience. Contributions are welcome!

## Overview
Vector database allows user to store and retrieve embeddings. Feast provides general APIs to store and retrieve embeddings.

## Integration
Below are supported vector databases and implemented features:

| Vector Database | Retrieval | Indexing |
|-----------------|-----------|----------|
| Pgvector | [x] | [ ] |
| Elasticsearch | [x] | [x] |
| Milvus | [ ] | [ ] |
| Faiss | [ ] | [ ] |


## Example

See [https://github.com/feast-dev/feast-workshop/blob/rag/module_4_rag](https://github.com/feast-dev/feast-workshop/blob/rag/module_4_rag) for an example on how to use vector database.

### **Prepare offline embedding dataset**
Run the following commands to prepare the embedding dataset:
```shell
python pull_states.py
python batch_score_documents.py
```
The output will be stored in `data/city_wikipedia_summaries.csv.`

### **Initialize Feast feature store and materialize the data to the online store**
Use the feature_tore.yaml file to initialize the feature store. This will use the data as offline store, and Pgvector as online store.

```yaml
project: feast_demo_local
provider: local
registry:
registry_type: sql
path: postgresql://@localhost:5432/feast
online_store:
type: postgres
pgvector_enabled: true
vector_len: 384
host: 127.0.0.1
port: 5432
database: feast
user: ""
password: ""


offline_store:
type: file
entity_key_serialization_version: 2
```
Run the following command in terminal to apply the feature store configuration:
```shell
feast apply
```

Note that when you run `feast apply` you are going to apply the following Feature View that we will use for retrieval later:

```python
city_embeddings_feature_view = FeatureView(
name="city_embeddings",
entities=[item],
schema=[
Field(name="Embeddings", dtype=Array(Float32)),
],
source=source,
ttl=timedelta(hours=2),
)
```

Then run the following command in the terminal to materialize the data to the online store:

```shell
CURRENT_TIME=$(date -u +"%Y-%m-%dT%H:%M:%S")
feast materialize-incremental $CURRENT_TIME
```

### **Prepare a query embedding**
```python
from batch_score_documents import run_model, TOKENIZER, MODEL
from transformers import AutoTokenizer, AutoModel

question = "the most populous city in the U.S. state of Texas?"

tokenizer = AutoTokenizer.from_pretrained(TOKENIZER)
model = AutoModel.from_pretrained(MODEL)
query_embedding = run_model(question, tokenizer, model)
query = query_embedding.detach().cpu().numpy().tolist()[0]
```

### **Retrieve the top 5 similar documents**
First create a feature store instance, and use the `retrieve_online_documents` API to retrieve the top 5 similar documents to the specified query.

```python
from feast import FeatureStore
store = FeatureStore(repo_path=".")
features = store.retrieve_online_documents(
feature="city_embeddings:Embeddings",
query=query,
top_k=5
).to_dict()

def print_online_features(features):
for key, value in sorted(features.items()):
print(key, " : ", value)

print_online_features(features)
```
6 changes: 1 addition & 5 deletions docs/reference/data-sources/file.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,7 @@
## Description

File data sources are files on disk or on S3.
Currently only Parquet files are supported.

{% hint style="warning" %}
FileSource is meant for development purposes only and is not optimized for production use.
{% endhint %}
Currently only Parquet and Delta formats are supported.

## Example

Expand Down
4 changes: 2 additions & 2 deletions docs/reference/data-sources/overview.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@

## Functionality

In Feast, each batch data source is associated with a corresponding offline store.
For example, a `SnowflakeSource` can only be processed by the Snowflake offline store.
In Feast, each batch data source is associated with corresponding offline stores.
For example, a `SnowflakeSource` can only be processed by the Snowflake offline store, while a `FileSource` can be processed by both File and DuckDB offline stores.
Otherwise, the primary difference between batch data sources is the set of supported types.
Feast has an internal type system, and aims to support eight primitive types (`bytes`, `string`, `int32`, `int64`, `float32`, `float64`, `bool`, and `timestamp`) along with the corresponding array types.
However, not every batch data source supports all of these types.
Expand Down
4 changes: 4 additions & 0 deletions docs/reference/offline-stores/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,10 @@ Please see [Offline Store](../../getting-started/architecture-and-components/off
[redshift.md](redshift.md)
{% endcontent-ref %}

{% content-ref url="duckdb.md" %}
[duckdb.md](duckdb.md)
{% endcontent-ref %}

{% content-ref url="spark.md" %}
[spark.md](spark.md)
{% endcontent-ref %}
Expand Down
56 changes: 56 additions & 0 deletions docs/reference/offline-stores/duckdb.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
# DuckDB offline store

## Description

The duckdb offline store provides support for reading [FileSources](../data-sources/file.md). It can read both Parquet and Delta formats. DuckDB offline store uses [ibis](https://ibis-project.org/) under the hood to convert offline store operations to DuckDB queries.

* Entity dataframes can be provided as a Pandas dataframe.

## Getting started
In order to use this offline store, you'll need to run `pip install 'feast[duckdb]'`.

## Example

{% code title="feature_store.yaml" %}
```yaml
project: my_project
registry: data/registry.db
provider: local
offline_store:
type: duckdb
online_store:
path: data/online_store.db
```
{% endcode %}
## Functionality Matrix
The set of functionality supported by offline stores is described in detail [here](overview.md#functionality).
Below is a matrix indicating which functionality is supported by the DuckDB offline store.
| | DuckdDB |
| :----------------------------------------------------------------- | :---- |
| `get_historical_features` (point-in-time correct join) | yes |
| `pull_latest_from_table_or_query` (retrieve latest feature values) | yes |
| `pull_all_from_table_or_query` (retrieve a saved dataset) | yes |
| `offline_write_batch` (persist dataframes to offline store) | yes |
| `write_logged_features` (persist logged features to offline store) | yes |

Below is a matrix indicating which functionality is supported by `IbisRetrievalJob`.

| | DuckDB|
| ----------------------------------------------------- | ----- |
| export to dataframe | yes |
| export to arrow table | yes |
| export to arrow batches | no |
| export to SQL | no |
| export to data lake (S3, GCS, etc.) | no |
| export to data warehouse | no |
| export as Spark dataframe | no |
| local execution of Python-based on-demand transforms | yes |
| remote execution of Python-based on-demand transforms | no |
| persist results in the offline store | yes |
| preview the query plan before execution | no |
| read partitioned data | yes |

To compare this set of functionality against other offline stores, please see the full [functionality matrix](overview.md#functionality-matrix).
28 changes: 14 additions & 14 deletions docs/reference/offline-stores/overview.md
Original file line number Diff line number Diff line change
Expand Up @@ -42,17 +42,17 @@ Below is a matrix indicating which offline stores support which methods.

Below is a matrix indicating which `RetrievalJob`s support what functionality.

| | File | BigQuery | Snowflake | Redshift | Postgres | Spark | Trino |
| --------------------------------- | --- | --- | --- | --- | --- | --- | --- |
| export to dataframe | yes | yes | yes | yes | yes | yes | yes |
| export to arrow table | yes | yes | yes | yes | yes | yes | yes |
| export to arrow batches | no | no | no | yes | no | no | no |
| export to SQL | no | yes | yes | yes | yes | no | yes |
| export to data lake (S3, GCS, etc.) | no | no | yes | no | yes | no | no |
| export to data warehouse | no | yes | yes | yes | yes | no | no |
| export as Spark dataframe | no | no | yes | no | no | yes | no |
| local execution of Python-based on-demand transforms | yes | yes | yes | yes | yes | no | yes |
| remote execution of Python-based on-demand transforms | no | no | no | no | no | no | no |
| persist results in the offline store | yes | yes | yes | yes | yes | yes | no |
| preview the query plan before execution | yes | yes | yes | yes | yes | yes | yes |
| read partitioned data | yes | yes | yes | yes | yes | yes | yes |
| | File | BigQuery | Snowflake | Redshift | Postgres | Spark | Trino | DuckDB |
| --------------------------------- | --- | --- | --- | --- | --- | --- | --- | --- |
| export to dataframe | yes | yes | yes | yes | yes | yes | yes | yes |
| export to arrow table | yes | yes | yes | yes | yes | yes | yes | yes |
| export to arrow batches | no | no | no | yes | no | no | no | no |
| export to SQL | no | yes | yes | yes | yes | no | yes | no |
| export to data lake (S3, GCS, etc.) | no | no | yes | no | yes | no | no | no |
| export to data warehouse | no | yes | yes | yes | yes | no | no | no |
| export as Spark dataframe | no | no | yes | no | no | yes | no | no |
| local execution of Python-based on-demand transforms | yes | yes | yes | yes | yes | no | yes | yes |
| remote execution of Python-based on-demand transforms | no | no | no | no | no | no | no | no |
| persist results in the offline store | yes | yes | yes | yes | yes | yes | no | yes |
| preview the query plan before execution | yes | yes | yes | yes | yes | yes | yes | no |
| read partitioned data | yes | yes | yes | yes | yes | yes | yes | yes |
Loading

0 comments on commit c9d8184

Please sign in to comment.