Skip to content

Implement LLM response streaming (#859) #1127

Implement LLM response streaming (#859)

Implement LLM response streaming (#859) #1127

Workflow file for this run

name: E2E Tests
# suppress warning raised by https://github.com/jupyter/jupyter_core/pull/292
env:
JUPYTER_PLATFORM_DIRS: '1'
on:
push:
branches: main
pull_request:
branches: '*'
jobs:
e2e-tests:
name: Linux
runs-on: ubuntu-latest
env:
PLAYWRIGHT_BROWSERS_PATH: ${{ github.workspace }}/pw-browsers
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Base Setup
uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1
- name: Install extension dependencies and build the extension
run: ./scripts/install.sh
- name: Install ui-tests dependencies
working-directory: packages/jupyter-ai/ui-tests
env:
PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD: 1
run: jlpm install
- name: Set up browser cache
uses: actions/cache@v2
with:
path: |
${{ github.workspace }}/pw-browsers
key: ${{ runner.os }}-${{ hashFiles('packages/jupyter-ai/ui-tests/yarn.lock') }}
- name: Install browser
working-directory: packages/jupyter-ai/ui-tests
run: jlpm install-chromium
- name: Execute e2e tests
working-directory: packages/jupyter-ai/ui-tests
run: jlpm test
- name: Upload Playwright Test report
if: always()
uses: actions/upload-artifact@v2
with:
name: jupyter-ai-playwright-tests-linux
path: |
packages/jupyter-ai/ui-tests/test-results
packages/jupyter-ai/ui-tests/playwright-report