# Primary CI workflow. # # Only runs against packages that have changed files. # # Runs: # - Linting (_lint.yml) # - Unit Tests (_test.yml) # - Pydantic compatibility tests (_test_pydantic.yml) # - Integration test compilation checks (_compile_integration_test.yml) # - Extended test suites that require additional dependencies # - Codspeed benchmarks (if not labeled 'codspeed-ignore') # # Reports status to GitHub checks and PR status. name: "๐Ÿ”ง CI" on: push: branches: [master] pull_request: merge_group: # Optimizes CI performance by canceling redundant workflow runs # If another push to the same PR or branch happens while this workflow is still running, # cancel the earlier run in favor of the next run. # # There's no point in testing an outdated version of the code. GitHub only allows # a limited number of job runners to be active at the same time, so it's better to # cancel pointless jobs early so that more useful jobs can run sooner. concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true permissions: contents: read env: UV_FROZEN: "true" UV_NO_SYNC: "true" jobs: # This job analyzes which files changed and creates a dynamic test matrix # to only run tests/lints for the affected packages, improving CI efficiency build: name: "Detect Changes & Set Matrix" runs-on: ubuntu-latest if: ${{ !contains(github.event.pull_request.labels.*.name, 'ci-ignore') }} steps: - name: "๐Ÿ“‹ Checkout Code" uses: actions/checkout@v5 - name: "๐Ÿ Setup Python 3.11" uses: actions/setup-python@v6 with: python-version: "3.11" - name: "๐Ÿ“‚ Get Changed Files" id: files uses: Ana06/get-changed-files@v2.3.0 - name: "๐Ÿ” Analyze Changed Files & Generate Build Matrix" id: set-matrix run: | python -m pip install packaging requests python .github/scripts/check_diff.py ${{ steps.files.outputs.all }} >> $GITHUB_OUTPUT outputs: lint: ${{ steps.set-matrix.outputs.lint }} test: ${{ steps.set-matrix.outputs.test }} extended-tests: ${{ steps.set-matrix.outputs.extended-tests }} compile-integration-tests: ${{ steps.set-matrix.outputs.compile-integration-tests }} dependencies: ${{ steps.set-matrix.outputs.dependencies }} test-pydantic: ${{ steps.set-matrix.outputs.test-pydantic }} codspeed: ${{ steps.set-matrix.outputs.codspeed }} # Run linting only on packages that have changed files lint: needs: [build] if: ${{ needs.build.outputs.lint != '[]' }} strategy: matrix: job-configs: ${{ fromJson(needs.build.outputs.lint) }} fail-fast: false uses: ./.github/workflows/_lint.yml with: working-directory: ${{ matrix.job-configs.working-directory }} python-version: ${{ matrix.job-configs.python-version }} secrets: inherit # Run unit tests only on packages that have changed files test: needs: [build] if: ${{ needs.build.outputs.test != '[]' }} strategy: matrix: job-configs: ${{ fromJson(needs.build.outputs.test) }} fail-fast: false uses: ./.github/workflows/_test.yml with: working-directory: ${{ matrix.job-configs.working-directory }} python-version: ${{ matrix.job-configs.python-version }} secrets: inherit # Test compatibility with different Pydantic versions for affected packages test-pydantic: needs: [build] if: ${{ needs.build.outputs.test-pydantic != '[]' }} strategy: matrix: job-configs: ${{ fromJson(needs.build.outputs.test-pydantic) }} fail-fast: false uses: ./.github/workflows/_test_pydantic.yml with: working-directory: ${{ matrix.job-configs.working-directory }} pydantic-version: ${{ matrix.job-configs.pydantic-version }} secrets: inherit # Verify integration tests compile without actually running them (faster feedback) compile-integration-tests: name: "Compile Integration Tests" needs: [build] if: ${{ needs.build.outputs.compile-integration-tests != '[]' }} strategy: matrix: job-configs: ${{ fromJson(needs.build.outputs.compile-integration-tests) }} fail-fast: false uses: ./.github/workflows/_compile_integration_test.yml with: working-directory: ${{ matrix.job-configs.working-directory }} python-version: ${{ matrix.job-configs.python-version }} secrets: inherit # Run extended test suites that require additional dependencies extended-tests: name: "Extended Tests" needs: [build] if: ${{ needs.build.outputs.extended-tests != '[]' }} strategy: matrix: # note different variable for extended test dirs job-configs: ${{ fromJson(needs.build.outputs.extended-tests) }} fail-fast: false runs-on: ubuntu-latest timeout-minutes: 20 defaults: run: working-directory: ${{ matrix.job-configs.working-directory }} steps: - uses: actions/checkout@v5 - name: "๐Ÿ Set up Python ${{ matrix.job-configs.python-version }} + UV" uses: "./.github/actions/uv_setup" with: python-version: ${{ matrix.job-configs.python-version }} cache-suffix: extended-tests-${{ matrix.job-configs.working-directory }} working-directory: ${{ matrix.job-configs.working-directory }} - name: "๐Ÿ“ฆ Install Dependencies & Run Extended Tests" shell: bash run: | echo "Running extended tests, installing dependencies with uv..." uv venv uv sync --group test VIRTUAL_ENV=.venv uv pip install -r extended_testing_deps.txt VIRTUAL_ENV=.venv make extended_tests - name: "๐Ÿงน Verify Clean Working Directory" shell: bash run: | set -eu STATUS="$(git status)" echo "$STATUS" # grep will exit non-zero if the target message isn't found, # and `set -e` above will cause the step to fail. echo "$STATUS" | grep 'nothing to commit, working tree clean' # Run codspeed benchmarks only on packages that have changed files codspeed: name: "โšก CodSpeed Benchmarks" needs: [build] if: ${{ needs.build.outputs.codspeed != '[]' && !contains(github.event.pull_request.labels.*.name, 'codspeed-ignore') }} runs-on: ubuntu-latest strategy: matrix: job-configs: ${{ fromJson(needs.build.outputs.codspeed) }} fail-fast: false steps: - uses: actions/checkout@v5 # We have to use 3.12 as 3.13 is not yet supported - name: "๐Ÿ“ฆ Install UV Package Manager" uses: astral-sh/setup-uv@v7 with: python-version: "3.12" - uses: actions/setup-python@v6 with: python-version: "3.12" - name: "๐Ÿ“ฆ Install Test Dependencies" run: uv sync --group test working-directory: ${{ matrix.job-configs.working-directory }} - name: "โšก Run Benchmarks: ${{ matrix.job-configs.working-directory }}" uses: CodSpeedHQ/action@v4 env: ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} ANTHROPIC_FILES_API_IMAGE_ID: ${{ secrets.ANTHROPIC_FILES_API_IMAGE_ID }} ANTHROPIC_FILES_API_PDF_ID: ${{ secrets.ANTHROPIC_FILES_API_PDF_ID }} AZURE_OPENAI_API_VERSION: ${{ secrets.AZURE_OPENAI_API_VERSION }} AZURE_OPENAI_API_BASE: ${{ secrets.AZURE_OPENAI_API_BASE }} AZURE_OPENAI_API_KEY: ${{ secrets.AZURE_OPENAI_API_KEY }} AZURE_OPENAI_CHAT_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_CHAT_DEPLOYMENT_NAME }} AZURE_OPENAI_LEGACY_CHAT_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_LEGACY_CHAT_DEPLOYMENT_NAME }} AZURE_OPENAI_LLM_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_LLM_DEPLOYMENT_NAME }} AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME: ${{ secrets.AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT_NAME }} COHERE_API_KEY: ${{ secrets.COHERE_API_KEY }} DEEPSEEK_API_KEY: ${{ secrets.DEEPSEEK_API_KEY }} EXA_API_KEY: ${{ secrets.EXA_API_KEY }} FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }} GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }} HUGGINGFACEHUB_API_TOKEN: ${{ secrets.HUGGINGFACEHUB_API_TOKEN }} MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }} NOMIC_API_KEY: ${{ secrets.NOMIC_API_KEY }} OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} PPLX_API_KEY: ${{ secrets.PPLX_API_KEY }} XAI_API_KEY: ${{ secrets.XAI_API_KEY }} with: token: ${{ secrets.CODSPEED_TOKEN }} run: | cd ${{ matrix.job-configs.working-directory }} if [ "${{ matrix.job-configs.working-directory }}" = "libs/core" ]; then uv run --no-sync pytest ./tests/benchmarks --codspeed else uv run --no-sync pytest ./tests/ --codspeed fi mode: ${{ matrix.job-configs.working-directory == 'libs/core' && 'walltime' || 'instrumentation' }} # Final status check - ensures all required jobs passed before allowing merge ci_success: name: "โœ… CI Success" needs: [ build, lint, test, compile-integration-tests, extended-tests, test-pydantic, codspeed, ] if: | always() runs-on: ubuntu-latest env: JOBS_JSON: ${{ toJSON(needs) }} RESULTS_JSON: ${{ toJSON(needs.*.result) }} EXIT_CODE: ${{!contains(needs.*.result, 'failure') && !contains(needs.*.result, 'cancelled') && '0' || '1'}} steps: - name: "๐ŸŽ‰ All Checks Passed" run: | echo $JOBS_JSON echo $RESULTS_JSON echo "Exiting with $EXIT_CODE" exit $EXIT_CODE