diff --git a/performance_optimization_stack/.gitignore b/performance_optimization_stack/.gitignore new file mode 100644 index 0000000..bc83b30 --- /dev/null +++ b/performance_optimization_stack/.gitignore @@ -0,0 +1,163 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ + +#Desktop Services Store +.DS_Store \ No newline at end of file diff --git a/performance_optimization_stack/README.md b/performance_optimization_stack/README.md new file mode 100644 index 0000000..5e5fb12 --- /dev/null +++ b/performance_optimization_stack/README.md @@ -0,0 +1,294 @@ +# Performance Analytics Stack with Airbyte, dbt, and Datadog + +Welcome to the "Performance Analytics Stack" repository! 🚀 This guide will walk you through setting up a seamless data integration between Datadog, Airbyte, and dbt. By the end, you'll be extracting performance metrics and logs from Datadog using Airbyte, transforming and analyzing the data with dbt to optimize system performance and resource utilization. + +Let's get you started! + +## Table of Contents + +- [Prerequisites](#prerequisites) +- [Architecture](#architecture) +- [Setting an environment for your project](#1-setting-an-environment-for-your-project) +- [Setting Up BigQuery to work with Airbyte and dbt](#2-setting-up-bigquery) +- [Setting Up Airbyte Connectors with Terraform](#3-setting-up-airbyte-connectors-with-terraform) +- [Setting Up the dbt Project](#4-setting-up-the-dbt-project) +- [Orchestrating with Dagster](#5-orchestrating-with-dagster) +- [Next Steps](#next-steps) + +## Prerequisites + +Before you embark on this integration, ensure you have the following set up and ready: + +1. **Python 3.10 or later**: If not installed, download and install it from [Python's official website](https://www.python.org/downloads/). + +2. **Docker and Docker Compose (Docker Desktop)**: Install [Docker](https://docs.docker.com/get-docker/) following the official documentation for your specific OS. + +3. **Airbyte OSS version**: Deploy the open-source version of Airbyte. Follow the installation instructions from the [Airbyte Documentation](https://docs.airbyte.com/quickstart/deploy-airbyte/). + +4. **Terraform**: Terraform will help you provision and manage the Airbyte resources. If you haven't installed it, follow the [official Terraform installation guide](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli). + +5. **Google Cloud account with BigQuery**: You will also need to add the necessary permissions to allow Airbyte and dbt to access the data in BigQuery. A step-by-step guide is provided [below](#2-setting-up-bigquery). + +## Architecture + +![img](assets/a1.png) + +## 1. Setting an environment for your project + +Get the project up and running on your local machine by following these steps: + +1. **Clone the repository (Clone only this quickstart)**: + + ```bash + git clone --filter=blob:none --sparse https://github.com/airbytehq/quickstarts.git + ``` + + ```bash + cd quickstarts + ``` + + ```bash + git sparse-checkout add performance_optimization_stack + ``` + +2. **Navigate to the directory**: + + ```bash + cd performance_optimization_stack + ``` + +3. **Set Up a Virtual Environment**: + + - For Mac: + ```bash + python3 -m venv venv + source venv/bin/activate + ``` + - For Windows: + ```bash + python -m venv venv + .\venv\Scripts\activate + ``` + +4. **Install Dependencies**: + ```bash + pip install -e ".[dev]" + ``` + +## 2. Setting Up BigQuery + +#### 1. **Create a Google Cloud Project** + +- If you have a Google Cloud project, you can skip this step. +- Go to the [Google Cloud Console](https://console.cloud.google.com/). +- Click on the "Select a project" dropdown at the top right and select "New Project". +- Give your project a name and follow the steps to create it. + +#### 2. **Create BigQuery Datasets** + +- In the Google Cloud Console, go to BigQuery. +- Make two new datasets: `raw_data` for Airbyte and `transformed_data` for dbt. + - If you pick different names, remember to change the names in the code too. + +**How to create a dataset:** + +- In the left sidebar, click on your project name. +- Click “Create Dataset”. +- Enter the dataset ID (either `raw_data` or `transformed_data`). +- Click "Create Dataset". + +#### 3. **Create Service Accounts and Assign Roles** + +- Go to “IAM & Admin” > “Service accounts” in the Google Cloud Console. +- Click “Create Service Account”. +- Name your service account (like `airbyte-service-account`). +- Assign the “BigQuery Data Editor” and “BigQuery Job User” roles to the service account. +- Follow the same steps to make another service account for dbt (like `dbt-service-account`) and assign the roles. + +**How to create a service account and assign roles:** + +- While creating the service account, under the “Grant this service account access to project” section, click the “Role” dropdown. +- Choose the “BigQuery Data Editor” and “BigQuery Job User” roles. +- Finish the creation process. + +#### 4. **Generate JSON Keys for Service Accounts** + +- For both service accounts, make a JSON key to let the service accounts sign in. + +**How to generate JSON key:** + +- Find the service account in the “Service accounts” list. +- Click on the service account name. +- In the “Keys” section, click “Add Key” and pick JSON. +- The key will download automatically. Keep it safe and don’t share it. +- Do this for the other service account too. + +## 3. Setting Up Airbyte Connectors with Terraform + +Airbyte allows you to create connectors for sources and destinations, facilitating data synchronization between various platforms. In this project, we're harnessing the power of Terraform to automate the creation of these connectors and the connections between them. Here's how you can set this up: + +1. **Navigate to the Airbyte Configuration Directory**: + + Change to the relevant directory containing the Terraform configuration for Airbyte: + + ```bash + cd infra/airbyte + ``` + +2. **Modify Configuration Files**: + + Within the `infra/airbyte` directory, you'll find three crucial Terraform files: + + - `provider.tf`: Defines the Airbyte provider. + - `main.tf`: Contains the main configuration for creating Airbyte resources. + - `variables.tf`: Holds various variables, including credentials. + + Adjust the configurations in these files to suit your project's needs. Specifically, provide credentials for your BigQuery connection. You can utilize the `variables.tf` file to manage these credentials. + +3. **Initialize Terraform**: + + This step prepares Terraform to create the resources defined in your configuration files. + + ```bash + terraform init + ``` + +4. **Review the Plan**: + + Before applying any changes, review the plan to understand what Terraform will do. + + ```bash + terraform plan + ``` + +5. **Apply Configuration**: + + After reviewing and confirming the plan, apply the Terraform configurations to create the necessary Airbyte resources. + + ```bash + terraform apply + ``` + +6. **Verify in Airbyte UI**: + + Once Terraform completes its tasks, navigate to the [Airbyte UI](http://localhost:8000/). Here, you should see your source and destination connectors, as well as the connection between them, set up and ready to go. + +7. **Add Normalization to the Airbyte Connection**: + + At the moment of creating this Quickstart, it's not possible to select normalization via Terraform, so you need to select that manually. In the Airbyte UI, go to the "Connections" tab, select the "Datadog to BigQuery" connection, go to the "Transformation" tab and select "Normalized tabular data" and save your changes. + +## 4. Setting Up the dbt Project + +[dbt (data build tool)](https://www.getdbt.com/) allows you to transform your data by writing, documenting, and executing SQL workflows. Setting up the dbt project requires specifying connection details for your data platform, in this case, BigQuery. Here’s a step-by-step guide to help you set this up: + +1. **Navigate to the dbt Project Directory**: + + Change to the directory containing the dbt configuration: + + ```bash + cd dbt_project + ``` + +2. **Update Connection Details**: + + You'll find a `profiles.yml` file within the directory. This file contains configurations for dbt to connect with your data platform. Update this file with your BigQuery connection details. + +3. **Utilize Environment Variables (Optional but Recommended)**: + + To keep your credentials secure, you can leverage environment variables. An example is provided within the `profiles.yml` file. + +4. **Test the Connection**: + + Once you’ve updated the connection details, you can test the connection to your BigQuery instance using: + + ```bash + dbt debug + ``` + + If everything is set up correctly, this command should report a successful connection to BigQuery. + +5. **Run the Models**: + + If you would like to run the dbt models manually at this point, you can do so by executing: + + ```bash + dbt run + ``` + + You can verify the data has been transformed by going to BigQuery and checking the `transformed_data` dataset. + +## 5. Orchestrating with Dagster + +[Dagster](https://dagster.io/) is a modern data orchestrator designed to help you build, test, and monitor your data workflows. In this section, we'll walk you through setting up Dagster to oversee both the Airbyte and dbt workflows: + +1. **Navigate to the Orchestration Directory**: + + Switch to the directory containing the Dagster orchestration configurations: + + ```bash + cd orchestration + ``` + +2. **Set Environment Variables**: + + Dagster requires certain environment variables to be set to interact with other tools like dbt and Airbyte. Set the following variables: + + ```bash + export DAGSTER_DBT_PARSE_PROJECT_ON_LOAD=1 + export AIRBYTE_PASSWORD=password + ``` + + Note: The `AIRBYTE_PASSWORD` is set to `password` as a default for local Airbyte instances. If you've changed this during your Airbyte setup, ensure you use the appropriate password here. + +3. **Launch the Dagster UI**: + + With the environment variables in place, kick-start the Dagster UI: + + ```bash + dagster dev + ``` + +4. **Access Dagster in Your Browser**: + + Open your browser and navigate to: + + ``` + http://127.0.0.1:3000 + ``` + + Here, you should see assets for both Airbyte and dbt. To get an overview of how these assets interrelate, click on "view global asset lineage". This will give you a clear picture of the data lineage, visualizing how data flows between the tools. + +5. **Materialize Dagster Assets**: + In the Dagster UI, click on "Materialize all". This should trigger the full pipeline. First the Airbyte sync to extract data from Datadog and load it into BigQuery, and then dbt to transform the raw data, materializing the `staging` and `marts` models. + +## Next Steps + +Bravo on setting up the Performance Analytics Stack! 🥳 Here's what you can do next: + +### 1. **Dive into Performance Insights** + +- Explore the transformed data. What patterns can you recognize? How can these insights help optimize system performance? + +### 2. **Customize Your dbt Models** + +- Tailor your dbt models to better fit your performance analysis needs. Add more complex transformations or aggregations to uncover deeper insights. + +### 3. **Automate Alerting** + +- Integrate alerting systems to notify you of anomalies in performance metrics. This can help in proactive system optimization.. + +### 4. **Enhance Data Quality and Testing** + +- Implement data quality tests in dbt to ensure the reliability and accuracy of your transformations. Use dbt's testing features to validate your data and catch issues early on. + +### 5. **Integrate More Data Sources** + +- Consider pulling data from other systems using Airbyte, enhancing the depth of your performance analytics. + +### 6. **Scale Up** + +- As your data grows, consider scaling up your infrastructure and dbt models to handle the increased load efficiently. + +### 7. **Contribute to the Community** + +- Share your learnings, new dbt models, or any other enhancements with the community. Your contributions can help others optimize their systems too!. diff --git a/performance_optimization_stack/assets/a1.png b/performance_optimization_stack/assets/a1.png new file mode 100644 index 0000000..b9481b9 Binary files /dev/null and b/performance_optimization_stack/assets/a1.png differ diff --git a/performance_optimization_stack/dbt_project/.gitignore b/performance_optimization_stack/dbt_project/.gitignore new file mode 100644 index 0000000..eae4ddb --- /dev/null +++ b/performance_optimization_stack/dbt_project/.gitignore @@ -0,0 +1,6 @@ + +target/ +dbt_packages/ +logs/ + +.user.yml diff --git a/performance_optimization_stack/dbt_project/README.md b/performance_optimization_stack/dbt_project/README.md new file mode 100644 index 0000000..7874ac8 --- /dev/null +++ b/performance_optimization_stack/dbt_project/README.md @@ -0,0 +1,15 @@ +Welcome to your new dbt project! + +### Using the starter project + +Try running the following commands: +- dbt run +- dbt test + + +### Resources: +- Learn more about dbt [in the docs](https://docs.getdbt.com/docs/introduction) +- Check out [Discourse](https://discourse.getdbt.com/) for commonly asked questions and answers +- Join the [chat](https://community.getdbt.com/) on Slack for live discussions and support +- Find [dbt events](https://events.getdbt.com) near you +- Check out [the blog](https://blog.getdbt.com/) for the latest news on dbt's development and best practices diff --git a/performance_optimization_stack/dbt_project/analyses/.gitkeep b/performance_optimization_stack/dbt_project/analyses/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/performance_optimization_stack/dbt_project/dbt_project.yml b/performance_optimization_stack/dbt_project/dbt_project.yml new file mode 100644 index 0000000..125acd1 --- /dev/null +++ b/performance_optimization_stack/dbt_project/dbt_project.yml @@ -0,0 +1,39 @@ + +# Name your project! Project names should contain only lowercase characters +# and underscores. A good package name should reflect your organization's +# name or the intended use of these models +name: 'dbt_project' +version: '1.0.0' +config-version: 2 + +# This setting configures which "profile" dbt uses for this project. +profile: 'dbt_project' + +# These configurations specify where dbt should look for different types of files. +# The `model-paths` config, for example, states that models in this project can be +# found in the "models/" directory. You probably won't need to change these! +model-paths: ["models"] +analysis-paths: ["analyses"] +test-paths: ["tests"] +seed-paths: ["seeds"] +macro-paths: ["macros"] +snapshot-paths: ["snapshots"] + +clean-targets: # directories to be removed by `dbt clean` + - "target" + - "dbt_packages" + + +# Configuring models +# Full documentation: https://docs.getdbt.com/docs/configuring-models + +# In this example config, we tell dbt to build all models in the example/ +# directory as views. These settings can be overridden in the individual model +# files using the `{{ config(...) }}` macro. +models: + dbt_project: + # Config indicated by + and applies to all files under models/example/ + staging: + +materialized: view + marts: + +materialized: view diff --git a/performance_optimization_stack/dbt_project/macros/.gitkeep b/performance_optimization_stack/dbt_project/macros/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/performance_optimization_stack/dbt_project/models/marts/daily_cpu_usage.sql b/performance_optimization_stack/dbt_project/models/marts/daily_cpu_usage.sql new file mode 100644 index 0000000..2e9c19e --- /dev/null +++ b/performance_optimization_stack/dbt_project/models/marts/daily_cpu_usage.sql @@ -0,0 +1,18 @@ +WITH daily_cpu AS ( + SELECT + DATE(metric_timestamp) AS metric_date, + AVG(value) AS avg_cpu_usage, + MAX(value) AS max_cpu_usage, + MIN(value) AS min_cpu_usage + FROM {{ ref('stg_datadog_metrics') }} + WHERE metric_name = 'cpu.usage' + GROUP BY 1 +) + +SELECT + metric_date, + ROUND(avg_cpu_usage, 2) AS avg_cpu, + ROUND(max_cpu_usage, 2) AS max_cpu, + ROUND(min_cpu_usage, 2) AS min_cpu +FROM daily_cpu +ORDER BY metric_date \ No newline at end of file diff --git a/performance_optimization_stack/dbt_project/models/marts/daily_error_logs.sql b/performance_optimization_stack/dbt_project/models/marts/daily_error_logs.sql new file mode 100644 index 0000000..3e4009e --- /dev/null +++ b/performance_optimization_stack/dbt_project/models/marts/daily_error_logs.sql @@ -0,0 +1,7 @@ +SELECT + DATE(log_timestamp) AS log_date, + COUNT(log_id) AS total_errors +FROM {{ ref('stg_datadog_logs') }} +WHERE log_level_normalized = 'ERROR' +GROUP BY 1 +ORDER BY 1 DESC diff --git a/performance_optimization_stack/dbt_project/models/marts/hourly_user_activity.sql b/performance_optimization_stack/dbt_project/models/marts/hourly_user_activity.sql new file mode 100644 index 0000000..770cdbc --- /dev/null +++ b/performance_optimization_stack/dbt_project/models/marts/hourly_user_activity.sql @@ -0,0 +1,8 @@ +SELECT + TIMESTAMP_TRUNC(log_timestamp, HOUR) AS log_hour, + source AS user_id, + COUNT(log_id) AS activity_count +FROM {{ ref('stg_datadog_logs') }} +WHERE service = 'user_activity' +GROUP BY 1, 2 +ORDER BY 1 DESC, 2 diff --git a/performance_optimization_stack/dbt_project/models/sources/datadog_source.yml b/performance_optimization_stack/dbt_project/models/sources/datadog_source.yml new file mode 100644 index 0000000..c20777a --- /dev/null +++ b/performance_optimization_stack/dbt_project/models/sources/datadog_source.yml @@ -0,0 +1,33 @@ +version: 2 + +sources: + - name: datadog + # Use your BigQuery project ID + database: "{{ env_var('BIGQUERY_PROJECT_ID', '') }}" + # Use your BigQuery dataset name + schema: datadog_airbyte + tables: + - name: logs + columns: + - name: log_id + description: "Unique identifier for each log entry" + - name: timestamp + description: "Timestamp when the log was created" + - name: log_level + description: "Level of the log (e.g., ERROR, INFO)" + - name: message + description: "Log message details" + - name: service + description: "Associated service for the log" + - name: source + description: "Source or application that created the log" + - name: metrics + columns: + - name: metric_id + description: "Unique identifier for each metric entry" + - name: timestamp + description: "Timestamp when the metric was recorded" + - name: metric_name + description: "Name of the metric (e.g., cpu.usage)" + - name: value + description: "Value of the recorded metric" diff --git a/performance_optimization_stack/dbt_project/models/staging/stg_datadog_logs.sql b/performance_optimization_stack/dbt_project/models/staging/stg_datadog_logs.sql new file mode 100644 index 0000000..e69de29 diff --git a/performance_optimization_stack/dbt_project/models/staging/stg_datadog_metrics.sql b/performance_optimization_stack/dbt_project/models/staging/stg_datadog_metrics.sql new file mode 100644 index 0000000..385c750 --- /dev/null +++ b/performance_optimization_stack/dbt_project/models/staging/stg_datadog_metrics.sql @@ -0,0 +1,6 @@ +SELECT + metric_id, + TIMESTAMP(timestamp) AS metric_timestamp, + metric_name, + value +FROM {{ source('datadog', 'metrics') }} \ No newline at end of file diff --git a/performance_optimization_stack/dbt_project/profiles.yml b/performance_optimization_stack/dbt_project/profiles.yml new file mode 100644 index 0000000..65cc52f --- /dev/null +++ b/performance_optimization_stack/dbt_project/profiles.yml @@ -0,0 +1,16 @@ +dbt_project: + outputs: + dev: + dataset: transformed_data + job_execution_timeout_seconds: 300 + job_retries: 1 + # Use an env variable to indicate your JSON key file path + keyfile: "{{ env_var('DBT_BIGQUERY_KEYFILE_PATH', '') }}" + location: US + method: service-account + priority: interactive + # Indicate your BigQuery project ID + project: your_project_id + threads: 1 + type: bigquery + target: dev \ No newline at end of file diff --git a/performance_optimization_stack/dbt_project/seeds/.gitkeep b/performance_optimization_stack/dbt_project/seeds/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/performance_optimization_stack/dbt_project/snapshots/.gitkeep b/performance_optimization_stack/dbt_project/snapshots/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/performance_optimization_stack/dbt_project/tests/.gitkeep b/performance_optimization_stack/dbt_project/tests/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/performance_optimization_stack/infra/.gitignore b/performance_optimization_stack/infra/.gitignore new file mode 100644 index 0000000..dc82526 --- /dev/null +++ b/performance_optimization_stack/infra/.gitignore @@ -0,0 +1,34 @@ +# Local .terraform directories +**/.terraform/* + +# .tfstate files +*.tfstate +*.tfstate.* + +# Crash log files +crash.log +crash.*.log + +# Exclude all .tfvars files, which are likely to contain sensitive data, such as +# password, private keys, and other secrets. These should not be part of version +# control as they are data points which are potentially sensitive and subject +# to change depending on the environment. +*.tfvars +*.tfvars.json + +# Ignore override files as they are usually used to override resources locally and so +# are not checked in +override.tf +override.tf.json +*_override.tf +*_override.tf.json + +# Include override files you do wish to add to version control using negated pattern +# !example_override.tf + +# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan +# example: *tfplan* + +# Ignore CLI configuration files +.terraformrc +terraform.rc \ No newline at end of file diff --git a/performance_optimization_stack/infra/airbyte/.terraform.lock.hcl b/performance_optimization_stack/infra/airbyte/.terraform.lock.hcl new file mode 100644 index 0000000..e8d9f86 --- /dev/null +++ b/performance_optimization_stack/infra/airbyte/.terraform.lock.hcl @@ -0,0 +1,25 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/airbytehq/airbyte" { + version = "0.3.3" + constraints = "0.3.3" + hashes = [ + "h1:a6g5uWP/pt1/popVNlKwnTssWNfdYY4KVFPMisN/yvU=", + "zh:0efa470b34d9b912b47efe4469c51713bfc3c2413e52c17e1e903f2a3cddb2f6", + "zh:1bddd69fa2c2d4f3e239d60555446df9bc4ce0c0cabbe7e092fe1d44989ab004", + "zh:2e20540403a0010007b53456663fb037b24e30f6c8943f65da1bcf7fa4dfc8a6", + "zh:2f415369ad884e8b7115a5c5ff229d052f7af1fca27abbfc8ebef379ed11aec4", + "zh:46fd9a906f4b6461112dcc5a5aa01a3fcd7a19a72d4ad0b2e37790da37701fe1", + "zh:83503ebb77bb6d6941c42ba323cf22380d08a1506554a2dcc8ac54e74c0886a1", + "zh:890df766e9b839623b1f0437355032a3c006226a6c200cd911e15ee1a9014e9f", + "zh:8fd770eff726826d3a63b9e3733c5455b5cde004027b04ee3f75888eb8538c90", + "zh:b0fc890ed4f9b077bf70ed121cc3550e7a07d16e7798ad517623274aa62ad7b0", + "zh:c2a01612362da9b73cd5958f281e1aa7ff09af42182e463097d11ed78e778e72", + "zh:c64b2bb1887a0367d64ba3393d4b3a16c418cf5b1792e2e7aae7c0b5413eb334", + "zh:ce14ebbf0ed91913ec62655a511763dec62b5779de9a209bd6f1c336640cddc0", + "zh:e0662ca837eee10f7733ea9a501d995281f56bd9b410ae13ad03eb106011db14", + "zh:e103d480fc6066004bc98e9e04a141a1f55b918cc2912716beebcc6fc4c872fb", + "zh:e2507049098f0f1b21cb56870f4a5ef624bcf6d3959e5612eada1f8117341648", + ] +} diff --git a/performance_optimization_stack/infra/airbyte/main.tf b/performance_optimization_stack/infra/airbyte/main.tf new file mode 100644 index 0000000..bdb1a61 --- /dev/null +++ b/performance_optimization_stack/infra/airbyte/main.tf @@ -0,0 +1,53 @@ +// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs + + +// Sources +resource "airbyte_source_datadog" "my_source_datadog" { + configuration = { + source_type = "datadog" + credentials = { + source_datadog_authentication_api_key = { + api_key = var.datadog_api_key + } + } + start_date = "2020-10-15T00:00:00Z" + } + name = "Datadog Metrics and Logs" + workspace_id = var.workspace_id +} + +// Destinations +resource "airbyte_destination_bigquery" "bigquery" { + configuration = { + dataset_id = var.dataset_id + dataset_location = "US" + destination_type = "bigquery" + project_id = var.project_id + credentials_json = var.credentials_json + loading_method = { + destination_bigquery_loading_method_standard_inserts = { + method = "Standard" + } + } + } + name = "BigQuery" + workspace_id = var.workspace_id +} + +// Connections +resource "airbyte_connection" "datadog_bigquery" { + name = "Datadog to BigQuery" + source_id = airbyte_source_datadog.my_source_datadog.source_id + destination_id = airbyte_destination_bigquery.bigquery.destination_id + configurations = { + streams = [ + { + name = "logs" + }, + { + name = "metrics" + }, + // Add other Datadog streams you are interested in + ] + } +} \ No newline at end of file diff --git a/performance_optimization_stack/infra/airbyte/provider.tf b/performance_optimization_stack/infra/airbyte/provider.tf new file mode 100644 index 0000000..98224e9 --- /dev/null +++ b/performance_optimization_stack/infra/airbyte/provider.tf @@ -0,0 +1,20 @@ +// Airbyte Terraform provider documentation: https://registry.terraform.io/providers/airbytehq/airbyte/latest/docs + +terraform { + required_providers { + airbyte = { + source = "airbytehq/airbyte" + version = "0.3.3" + } + } +} + +provider "airbyte" { + // If running locally (Airbyte OSS) with docker-compose using the airbyte-proxy, + // include the actual password/username you've set up (or use the defaults below) + username = "airbyte" + password = "password" + + // if running locally (Airbyte OSS), include the server url to the airbyte-api-server + server_url = "http://localhost:8006/v1" +} \ No newline at end of file diff --git a/performance_optimization_stack/infra/airbyte/variables.tf b/performance_optimization_stack/infra/airbyte/variables.tf new file mode 100644 index 0000000..6421def --- /dev/null +++ b/performance_optimization_stack/infra/airbyte/variables.tf @@ -0,0 +1,27 @@ +variable "api_token" { + type = string +} + +variable "email" { + type = string +} + +variable "workspace_id" { + type = string +} + +variable "dataset_id" { + type = string +} + +variable "project_id" { + type = string +} + +variable "credentials_json" { + type = string +} + + + + diff --git a/performance_optimization_stack/orchestration/orchestration/__init__.py b/performance_optimization_stack/orchestration/orchestration/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/performance_optimization_stack/orchestration/orchestration/assets.py b/performance_optimization_stack/orchestration/orchestration/assets.py new file mode 100644 index 0000000..3ebe97f --- /dev/null +++ b/performance_optimization_stack/orchestration/orchestration/assets.py @@ -0,0 +1,20 @@ +import os +from dagster import OpExecutionContext +from dagster_dbt import DbtCliResource, dbt_assets +from dagster_airbyte import AirbyteResource, load_assets_from_airbyte_instance + +from .constants import dbt_manifest_path + +@dbt_assets(manifest=dbt_manifest_path) +def dbt_project_dbt_assets(context: OpExecutionContext, dbt: DbtCliResource): + yield from dbt.cli(["build"], context=context).stream() + +airbyte_instance = AirbyteResource( + host="localhost", + port="8000", + # If using basic auth, include username and password: + username="airbyte", + password=os.getenv("AIRBYTE_PASSWORD") +) + +airbyte_assets = load_assets_from_airbyte_instance(airbyte_instance, key_prefix="datadog") \ No newline at end of file diff --git a/performance_optimization_stack/orchestration/orchestration/constants.py b/performance_optimization_stack/orchestration/orchestration/constants.py new file mode 100644 index 0000000..709888c --- /dev/null +++ b/performance_optimization_stack/orchestration/orchestration/constants.py @@ -0,0 +1,15 @@ +import os +from pathlib import Path + +from dagster_dbt import DbtCliResource + +dbt_project_dir = Path(__file__).joinpath("..", "..", "..", "dbt_project").resolve() +dbt = DbtCliResource(project_dir=os.fspath(dbt_project_dir)) + +# If DAGSTER_DBT_PARSE_PROJECT_ON_LOAD is set, a manifest will be created at runtime. +# Otherwise, we expect a manifest to be present in the project's target directory. +if os.getenv("DAGSTER_DBT_PARSE_PROJECT_ON_LOAD"): + dbt_parse_invocation = dbt.cli(["parse"], manifest={}).wait() + dbt_manifest_path = dbt_parse_invocation.target_path.joinpath("manifest.json") +else: + dbt_manifest_path = dbt_project_dir.joinpath("target", "manifest.json") \ No newline at end of file diff --git a/performance_optimization_stack/orchestration/orchestration/definitions.py b/performance_optimization_stack/orchestration/orchestration/definitions.py new file mode 100644 index 0000000..2d92dce --- /dev/null +++ b/performance_optimization_stack/orchestration/orchestration/definitions.py @@ -0,0 +1,16 @@ +import os + +from dagster import Definitions +from dagster_dbt import DbtCliResource + +from .assets import dbt_project_dbt_assets, airbyte_assets +from .constants import dbt_project_dir +from .schedules import schedules + +defs = Definitions( + assets=[dbt_project_dbt_assets, airbyte_assets], + schedules=schedules, + resources={ + "dbt": DbtCliResource(project_dir=os.fspath(dbt_project_dir)), + }, +) \ No newline at end of file diff --git a/performance_optimization_stack/orchestration/orchestration/schedules.py b/performance_optimization_stack/orchestration/orchestration/schedules.py new file mode 100644 index 0000000..9c0ac94 --- /dev/null +++ b/performance_optimization_stack/orchestration/orchestration/schedules.py @@ -0,0 +1,15 @@ +""" +To add a daily schedule that materializes your dbt assets, uncomment the following lines. +""" +from dagster_dbt import build_schedule_from_dbt_selection + +from .assets import dbt_project_dbt_assets + +schedules = [ +# build_schedule_from_dbt_selection( +# [dbt_project_dbt_assets], +# job_name="materialize_dbt_models", +# cron_schedule="0 0 * * *", +# dbt_select="fqn:*", +# ), +] \ No newline at end of file diff --git a/performance_optimization_stack/orchestration/pyproject.toml b/performance_optimization_stack/orchestration/pyproject.toml new file mode 100644 index 0000000..6e83f2f --- /dev/null +++ b/performance_optimization_stack/orchestration/pyproject.toml @@ -0,0 +1,7 @@ +[build-system] +requires = ["setuptools"] +build-backend = "setuptools.build_meta" + +[tool.dagster] +module_name = "orchestration.definitions" +code_location_name = "orchestration" \ No newline at end of file diff --git a/performance_optimization_stack/orchestration/setup.py b/performance_optimization_stack/orchestration/setup.py new file mode 100644 index 0000000..5b102a9 --- /dev/null +++ b/performance_optimization_stack/orchestration/setup.py @@ -0,0 +1,19 @@ +from setuptools import find_packages, setup + +setup( + name="orchestration", + version="0.0.1", + packages=find_packages(), + install_requires=[ + "dagster", + "dagster-cloud", + "dagster-dbt", + "dbt-core>=1.4.0", + "dbt-bigquery", + ], + extras_require={ + "dev": [ + "dagster-webserver", + ] + }, +) \ No newline at end of file diff --git a/performance_optimization_stack/setup.py b/performance_optimization_stack/setup.py new file mode 100644 index 0000000..2f6b135 --- /dev/null +++ b/performance_optimization_stack/setup.py @@ -0,0 +1,14 @@ +from setuptools import find_packages, setup + +setup( + name="airbyte-dbt-dagster", + packages=find_packages(), + install_requires=[ + "dbt-bigquery", + "dagster", + "dagster-cloud", + "dagster-dbt", + "dagster-airbyte", + ], + extras_require={"dev": ["dagit", "pytest"]}, +) \ No newline at end of file