diff --git a/.claude/settings.local.json b/.claude/settings.local.json deleted file mode 100644 index f52b501..0000000 --- a/.claude/settings.local.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "permissions": { - "allow": [ - "Bash(python:*)", - "Bash(pytest:*)", - "Bash(pyright:*)", - "Bash(.venv/bin/python:*)", - "Bash(.venv/bin/pytest:*)", - "Bash(.venv/bin/pytest tests/*)", - "Bash(.venv/bin/pyright:*)", - "Bash(uv run python:*)", - "Bash(uv run pytest:*)", - "Bash(uv run pyright:*)", - "Bash(env BUBUS_LOG_LEVEL=DEBUG timeout 20 .venv/bin/python:*)", - "Bash(env BUBUS_LOG_LEVEL=DEBUG timeout 20 .venv/bin/pytest:*)", - "Bash(env BUBUS_LOG_LEVEL=DEBUG timeout 20 .venv/bin/pytest tests/*)", - "Bash(do)", - "Bash(done)", - "Bash(for)", - "Bash(echo:*)", - "Bash(grep:*)", - "Bash(rg:*)", - "Bash(.venv/bin/pytest tests/test_typed_event_results.py::test_builtin_type_casting -v -s --timeout=10)" - ], - "deny": [] - } -} diff --git a/.cursor/launch.json b/.cursor/launch.json deleted file mode 100644 index fec9446..0000000 --- a/.cursor/launch.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "version": "0.2.0", - "configurations": [ - { - "name": "Python Debugger: Current File", - "type": "debugpy", - "request": "launch", - "program": "${file}", - "justMyCode": false, - "env": { - "PYTHONPATH": "${workspaceFolder}" - }, - "console": "integratedTerminal" - }, - { - "name": "pytest: Debug Current File", - "type": "debugpy", - "request": "launch", - "module": "pytest", - "args": [ - "${file}", - "-v", - "--capture=no" - ], - "console": "integratedTerminal", - "justMyCode": false - } - ] -} diff --git a/.cursor/rules/bubus.mdc b/.cursor/rules/bubus.mdc deleted file mode 100644 index b6ecb6a..0000000 --- a/.cursor/rules/bubus.mdc +++ /dev/null @@ -1,5 +0,0 @@ ---- -description: -globs: -alwaysApply: true ---- diff --git a/.cursor/settings.json b/.cursor/settings.json deleted file mode 100644 index 718ae70..0000000 --- a/.cursor/settings.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "python.analysis.typeCheckingMode": "strict", - "[python]": { - "editor.defaultFormatter": "charliermarsh.ruff", - "editor.formatOnSave": true, - "editor.codeActionsOnSave": { - "source.fixAll": "explicit", - "source.organizeImports": "explicit" - } - }, - "python.analysis.inlayHints.variableTypes": false -} diff --git a/.github/workflows/claude-code-review.yml b/.github/workflows/claude-code-review.yml deleted file mode 100644 index 30cd419..0000000 --- a/.github/workflows/claude-code-review.yml +++ /dev/null @@ -1,77 +0,0 @@ -name: Claude Code Review - -on: - pull_request: - types: [opened, synchronize] - # Optional: Only run on specific file changes - # paths: - # - "src/**/*.ts" - # - "src/**/*.tsx" - # - "src/**/*.js" - # - "src/**/*.jsx" - -jobs: - claude-review: - # Optional: Filter by PR author - # if: | - # github.event.pull_request.user.login == 'external-contributor' || - # github.event.pull_request.user.login == 'new-developer' || - # github.event.pull_request.author_association == 'FIRST_TIME_CONTRIBUTOR' - - runs-on: ubuntu-latest - permissions: - contents: read - pull-requests: read - issues: read - id-token: write - - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - fetch-depth: 1 - - - name: Run Claude Code Review - id: claude-review - uses: anthropics/claude-code-action@beta - with: - anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} - - # Optional: Specify model (defaults to Claude Sonnet 4, uncomment for Claude Opus 4.1) - # model: "claude-opus-4-1-20250805" - - # Direct prompt for automated review (no @claude mention needed) - direct_prompt: | - Please review this pull request and provide feedback on: - - Code quality and best practices - - Potential bugs or issues - - Performance considerations - - Security concerns - - Test coverage - - Be constructive and helpful in your feedback. - - # Optional: Use sticky comments to make Claude reuse the same comment on subsequent pushes to the same PR - # use_sticky_comment: true - - # Optional: Customize review based on file types - # direct_prompt: | - # Review this PR focusing on: - # - For TypeScript files: Type safety and proper interface usage - # - For API endpoints: Security, input validation, and error handling - # - For React components: Performance, accessibility, and best practices - # - For tests: Coverage, edge cases, and test quality - - # Optional: Different prompts for different authors - # direct_prompt: | - # ${{ github.event.pull_request.author_association == 'FIRST_TIME_CONTRIBUTOR' && - # 'Welcome! Please review this PR from a first-time contributor. Be encouraging and provide detailed explanations for any suggestions.' || - # 'Please provide a thorough code review focusing on our coding standards and best practices.' }} - - # Optional: Add specific tools for running tests or linting - # allowed_tools: "Bash(npm run test),Bash(npm run lint),Bash(npm run typecheck)" - - # Optional: Skip review for certain conditions - # if: | - # !contains(github.event.pull_request.title, '[skip-review]') && - # !contains(github.event.pull_request.title, '[WIP]') diff --git a/.github/workflows/claude.yml b/.github/workflows/claude.yml index 72d2b29..e77eac9 100644 --- a/.github/workflows/claude.yml +++ b/.github/workflows/claude.yml @@ -5,8 +5,6 @@ on: types: [created] pull_request_review_comment: types: [created] - issues: - types: [opened, assigned] pull_request_review: types: [submitted] @@ -15,8 +13,7 @@ jobs: if: | (github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) || (github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) || - (github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) || - (github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude'))) + (github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) runs-on: ubuntu-latest permissions: contents: read diff --git a/.github/workflows/pre-commit-hooks.yaml b/.github/workflows/pre-commit-hooks.yaml new file mode 100644 index 0000000..59c9861 --- /dev/null +++ b/.github/workflows/pre-commit-hooks.yaml @@ -0,0 +1,37 @@ +name: pre-commit-hooks + +on: + push: + branches: + - main + - stable + - 'releases/**' + tags: + - '*' + pull_request: + workflow_dispatch: + +jobs: + pre-commit-hooks: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: astral-sh/setup-uv@v6 + with: + enable-cache: true + activate-environment: true + - run: uv sync --dev --all-extras + + - uses: pnpm/action-setup@v4 + with: + version: 10 + - uses: actions/setup-node@v4 + with: + node-version: 22 + cache: pnpm + cache-dependency-path: bubus-ts/pnpm-lock.yaml + - run: pnpm --dir bubus-ts install --frozen-lockfile + + - uses: j178/prek-action@v1 + - run: prek run --all-files diff --git a/.github/workflows/publish-npm.yml b/.github/workflows/publish-npm.yml new file mode 100644 index 0000000..384032b --- /dev/null +++ b/.github/workflows/publish-npm.yml @@ -0,0 +1,81 @@ +name: publish-npm + +on: + release: + types: [published] + workflow_dispatch: + inputs: + tag: + description: npm dist-tag to publish under + required: false + default: latest + +permissions: + contents: read + id-token: write + +jobs: + publish_to_npm: + runs-on: ubuntu-latest + defaults: + run: + working-directory: bubus-ts + steps: + - uses: actions/checkout@v4 + + - uses: pnpm/action-setup@v4 + with: + version: 10 + + - uses: actions/setup-node@v4 + with: + node-version: 22 + cache: pnpm + cache-dependency-path: bubus-ts/pnpm-lock.yaml + registry-url: https://registry.npmjs.org + + - name: Install bridge service binaries + run: | + sudo apt-get update + sudo apt-get install -y redis-server nats-server postgresql sqlite3 + + PG_INITDB_PATH="$(find /usr/lib/postgresql -type f -name initdb | head -n 1)" + PG_BINDIR="$(dirname "${PG_INITDB_PATH}")" + if [[ -z "${PG_BINDIR}" || ! -x "${PG_BINDIR}/initdb" || ! -x "${PG_BINDIR}/postgres" ]]; then + echo "Failed to locate PostgreSQL binaries (initdb/postgres)" > /dev/stderr + exit 1 + fi + echo "${PG_BINDIR}" >> "${GITHUB_PATH}" + export PATH="${PG_BINDIR}:${PATH}" + + redis-server --version + nats-server --version + initdb --version + postgres --version + sqlite3 --version + + - run: pnpm install --frozen-lockfile + - name: Verify bridge optional Node deps + run: | + node - <<'NODE' + const required = ['ioredis', 'nats', 'pg'] + for (const pkg of required) { + require.resolve(pkg) + } + console.log('optional bridge deps resolve OK') + NODE + - run: pnpm run typecheck + - run: pnpm test + - run: pnpm run build + + - name: Publish release tag + if: github.event_name == 'release' + run: pnpm publish --access public --no-git-checks + env: + NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} + + - name: Publish manual tag + if: github.event_name == 'workflow_dispatch' + run: pnpm publish --access public --tag "${{ inputs.tag }}" --no-git-checks + env: + NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index ba9ccaf..db9fe05 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -19,7 +19,35 @@ jobs: with: enable-cache: true activate-environment: true + - name: Install bridge service binaries + run: | + sudo apt-get update + sudo apt-get install -y redis-server nats-server postgresql sqlite3 + + PG_INITDB_PATH="$(find /usr/lib/postgresql -type f -name initdb | head -n 1)" + PG_BINDIR="$(dirname "${PG_INITDB_PATH}")" + if [[ -z "${PG_BINDIR}" || ! -x "${PG_BINDIR}/initdb" || ! -x "${PG_BINDIR}/postgres" ]]; then + echo "Failed to locate PostgreSQL binaries (initdb/postgres)" > /dev/stderr + exit 1 + fi + echo "${PG_BINDIR}" >> "${GITHUB_PATH}" + export PATH="${PG_BINDIR}:${PATH}" + + redis-server --version + nats-server --version + initdb --version + postgres --version + sqlite3 --version - run: uv sync + - name: Verify bridge optional Python deps + run: | + uv run python - <<'PY' + import importlib + modules = ['asyncpg', 'redis', 'nats'] + for module in modules: + importlib.import_module(module) + print('optional bridge deps import OK') + PY - run: uv run ruff check --no-fix --select PLE # quick check for syntax errors to avoid waiting time doing the rest of the build - run: uv build - run: uv run pytest tests # dont push the package to PyPI if the tests fail diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml deleted file mode 100644 index 7a1b98e..0000000 --- a/.github/workflows/test.yaml +++ /dev/null @@ -1,144 +0,0 @@ -name: test -permissions: - actions: read - contents: write - pull-requests: write # Allow writing comments on PRs - issues: write # Allow writing comments on issues - statuses: write # Allow writing statuses on PRs - discussions: write - -on: - push: - branches: - - main - - stable - - 'releases/**' - tags: - - '*' - pull_request: - workflow_dispatch: - -jobs: - find_tests: - runs-on: ubuntu-latest - outputs: - TEST_FILENAMES: ${{ steps.lsgrep.outputs.TEST_FILENAMES }} - # ["test_eventbus", ...] - steps: - - uses: actions/checkout@v4 - - id: lsgrep - run: | - TEST_FILENAMES="$(ls tests/test_*.py | sed 's|^tests/||' | sed 's|\.py$||' | jq -R -s -c 'split("\n")[:-1]')" - echo "TEST_FILENAMES=${TEST_FILENAMES}" >> "$GITHUB_OUTPUT" - echo "$TEST_FILENAMES" - # https://code.dblock.org/2021/09/03/generating-task-matrix-by-looping-over-repo-files-with-github-actions.html - - name: Check that at least one test file is found - run: | - if [ -z "${{ steps.lsgrep.outputs.TEST_FILENAMES }}" ]; then - echo "Failed to find any test_*.py files in tests/ folder!" > /dev/stderr - exit 1 - fi - - tests: - needs: find_tests - runs-on: ubuntu-latest - env: - IN_DOCKER: 'True' - strategy: - matrix: - test_filename: ${{ fromJson(needs.find_tests.outputs.TEST_FILENAMES || '["FAILED_TO_DISCOVER_TESTS"]') }} - # autodiscovers all the files in tests/test_*.py - # - test_eventbus - # ... and more - name: ${{ matrix.test_filename }} - steps: - - name: Check that the previous step managed to find some test files for us to run - run: | - if [[ "${{ matrix.test_filename }}" == "FAILED_TO_DISCOVER_TESTS" ]]; then - echo "Failed get list of test files in tests/test_*.py from find_tests job" > /dev/stderr - exit 1 - fi - - - uses: actions/checkout@v4 - - uses: astral-sh/setup-uv@v6 - with: - enable-cache: true - activate-environment: true - - - run: uv sync --dev --all-extras - - - run: pytest -x tests/${{ matrix.test_filename }}.py --cov=bubus --cov-report=term - - - name: Check coverage files - run: | - echo "Looking for coverage files..." - ls -la .coverage* 2>/dev/null || ls -la | grep coverage || echo "No coverage files found" - if [ -f .coverage ]; then - echo "Found .coverage file, size: $(stat -f%z .coverage 2>/dev/null || stat -c%s .coverage) bytes" - fi - - - name: Upload coverage data - uses: actions/upload-artifact@v4 - with: - name: coverage-${{ matrix.test_filename }} - path: .coverage - retention-days: 7 - include-hidden-files: true - if: always() - - coverage: - needs: tests - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - - uses: astral-sh/setup-uv@v6 - with: - enable-cache: true - activate-environment: true - - - run: uv sync --dev --all-extras - - - name: Download all coverage data - uses: actions/download-artifact@v4 - with: - pattern: coverage-* - path: coverage-data/ - - - name: Combine coverage data - run: | - # Find all .coverage files and copy them with unique names - counter=1 - for coverage_file in $(find coverage-data -name ".coverage" -type f); do - cp "$coverage_file" ".coverage.$counter" - counter=$((counter + 1)) - done - - - name: Combine coverage & fail if it's <80% - run: | - uv tool install 'coverage[toml]' - - coverage combine - coverage html --skip-covered --skip-empty - - # Report and write to summary. - coverage report --format=markdown >> $GITHUB_STEP_SUMMARY - - # Report again and fail if under 80%. - coverage report --fail-under=80 - - - name: Upload combined coverage report - uses: actions/upload-artifact@v4 - with: - name: coverage-report - path: | - htmlcov/ - coverage.xml - retention-days: 7 - - - name: Upload coverage to Codecov (optional) - uses: codecov/codecov-action@v4 - with: - file: ./coverage.xml - fail_ci_if_error: false - continue-on-error: true diff --git a/.github/workflows/test_py.yaml b/.github/workflows/test_py.yaml new file mode 100644 index 0000000..7bea780 --- /dev/null +++ b/.github/workflows/test_py.yaml @@ -0,0 +1,323 @@ +name: test-py +permissions: + actions: read + contents: write + pull-requests: write # Allow writing comments on PRs + issues: write # Allow writing comments on issues + statuses: write # Allow writing statuses on PRs + discussions: write + +on: + push: + branches: + - main + - stable + - 'releases/**' + tags: + - '*' + pull_request: + workflow_dispatch: + +jobs: + lint_py: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: astral-sh/setup-uv@v6 + with: + enable-cache: true + activate-environment: true + + - run: uv sync --dev --all-extras + - run: uv run ruff format --check + - run: uv run ruff check + - run: uv run pyright + + find_py_tests: + runs-on: ubuntu-latest + outputs: + PY_TASKS: ${{ steps.lsgrep.outputs.PY_TASKS }} + # [{ "kind": "test" | "example", "name": "test_eventbus" }, ...] + PY_TEST_TASKS: ${{ steps.lsgrep.outputs.PY_TEST_TASKS }} + # [{ "kind": "test", "name": "test_eventbus" }, ...] + steps: + - uses: actions/checkout@v4 + - id: lsgrep + run: | + PY_TEST_TASKS="$( + find tests -maxdepth 1 -type f -name 'test_*.py' ! -name 'test_eventbus_performance.py' \ + | sort \ + | sed 's|^tests/||' \ + | sed 's|\.py$||' \ + | jq -R -s -c 'split("\n")[:-1] | map({kind: "test", name: .})' + )" + PY_EXAMPLE_TASKS="$( + ( + if [[ -d examples ]]; then + find examples -maxdepth 1 -type f -name '*.py' | sort + fi + ) \ + | sed 's|^examples/||' \ + | sed 's|\.py$||' \ + | jq -R -s -c 'split("\n")[:-1] | map({kind: "example", name: .})' + )" + PY_TASKS="$(jq -cn --argjson tests "$PY_TEST_TASKS" --argjson examples "$PY_EXAMPLE_TASKS" '$tests + $examples')" + + echo "PY_TEST_TASKS=${PY_TEST_TASKS}" >> "$GITHUB_OUTPUT" + echo "PY_TASKS=${PY_TASKS}" >> "$GITHUB_OUTPUT" + echo "$PY_TASKS" + # https://code.dblock.org/2021/09/03/generating-task-matrix-by-looping-over-repo-files-with-github-actions.html + - name: Check that at least one test file is found + run: | + if [[ -z "${{ steps.lsgrep.outputs.PY_TEST_TASKS }}" || "${{ steps.lsgrep.outputs.PY_TEST_TASKS }}" == "[]" ]]; then + echo "Failed to find any test_*.py files in tests/ folder!" > /dev/stderr + exit 1 + fi + + tests: + needs: + - lint_py + - find_py_tests + runs-on: ubuntu-latest + env: + IN_DOCKER: 'True' + strategy: + matrix: + task: ${{ fromJson(needs.find_py_tests.outputs.PY_TASKS || '[{"kind":"error","name":"FAILED_TO_DISCOVER_TASKS"}]') }} + # autodiscovers files in tests/test_*.py and examples/*.py + # - { kind: "test", name: "test_eventbus" } + # - { kind: "example", name: "quickstart" } + # ... and more + name: ${{ matrix.task.kind }}-${{ matrix.task.name }} + steps: + - name: Check that the previous step managed to find some tasks for us to run + run: | + if [[ "${{ matrix.task.kind }}" == "error" ]]; then + echo "Failed get list of tasks in tests/test_*.py and examples/*.py from find_py_tests job" > /dev/stderr + exit 1 + fi + + - uses: actions/checkout@v4 + - uses: astral-sh/setup-uv@v6 + with: + enable-cache: true + activate-environment: true + + - uses: pnpm/action-setup@v4 + if: matrix.task.kind == 'test' && matrix.task.name == 'test_cross_runtime_roundtrip' + with: + version: 10 + + - uses: actions/setup-node@v4 + if: matrix.task.kind == 'test' && matrix.task.name == 'test_cross_runtime_roundtrip' + with: + node-version: 22 + cache: pnpm + cache-dependency-path: bubus-ts/pnpm-lock.yaml + + - name: Install bridge service binaries + if: matrix.task.kind == 'test' && matrix.task.name == 'test_bridges' + run: | + sudo apt-get update + sudo apt-get install -y redis-server nats-server postgresql sqlite3 + + PG_INITDB_PATH="$(find /usr/lib/postgresql -type f -name initdb | head -n 1)" + PG_BINDIR="$(dirname "${PG_INITDB_PATH}")" + if [[ -z "${PG_BINDIR}" || ! -x "${PG_BINDIR}/initdb" || ! -x "${PG_BINDIR}/postgres" ]]; then + echo "Failed to locate PostgreSQL binaries (initdb/postgres)" > /dev/stderr + exit 1 + fi + echo "${PG_BINDIR}" >> "${GITHUB_PATH}" + export PATH="${PG_BINDIR}:${PATH}" + + redis-server --version + nats-server --version + initdb --version + postgres --version + sqlite3 --version + + - run: uv sync --dev --all-extras + + - name: Build TypeScript ESM bundle for cross-runtime roundtrip tests + if: matrix.task.kind == 'test' && matrix.task.name == 'test_cross_runtime_roundtrip' + run: | + pnpm --dir bubus-ts install --frozen-lockfile + pnpm --dir bubus-ts run build + + - name: Verify bridge optional Python deps + if: matrix.task.kind == 'test' && matrix.task.name == 'test_bridges' + run: | + uv run python - <<'PY' + import importlib + modules = ['asyncpg', 'redis', 'nats'] + for module in modules: + importlib.import_module(module) + print('optional bridge deps import OK') + PY + + - name: Run test with coverage + if: matrix.task.kind == 'test' + run: uv run coverage run --parallel-mode --source=bubus -m pytest -x tests/${{ matrix.task.name }}.py + + - name: Run example + if: matrix.task.kind == 'example' + run: uv run coverage run --parallel-mode --source=bubus examples/${{ matrix.task.name }}.py + + - name: Check coverage files + if: always() + run: | + echo "Looking for coverage files..." + ls -la .coverage* 2>/dev/null || ls -la | grep coverage || echo "No coverage files found" + coverage_file="$(find . -maxdepth 1 -type f -name '.coverage*' | head -n 1)" + if [ -n "$coverage_file" ]; then + echo "Found coverage file ($coverage_file), size: $(stat -f%z "$coverage_file" 2>/dev/null || stat -c%s "$coverage_file") bytes" + fi + + - name: Upload coverage data + uses: actions/upload-artifact@v4 + with: + name: coverage-${{ matrix.task.kind }}-${{ matrix.task.name }} + path: | + .coverage* + pyproject.toml + retention-days: 7 + include-hidden-files: true + if: always() + + coverage: + needs: tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: astral-sh/setup-uv@v6 + with: + enable-cache: true + activate-environment: true + + - run: uv sync --dev --all-extras + + - name: Download all coverage data + uses: actions/download-artifact@v4 + with: + pattern: coverage-* + path: coverage-data/ + + - name: Combine coverage data + run: | + # Find all .coverage* files and copy them with unique names + counter=1 + for coverage_file in $(find coverage-data -name ".coverage*" -type f); do + cp "$coverage_file" ".coverage.$counter" + counter=$((counter + 1)) + done + + - name: Combine coverage & fail if it's <50% + run: | + uv tool install 'coverage[toml]' + OMIT='bubus/bridge*.py' + + coverage combine + coverage html --skip-covered --skip-empty --omit="$OMIT" + coverage xml --omit="$OMIT" + + echo "### Python combined coverage" >> "$GITHUB_STEP_SUMMARY" + echo "" >> "$GITHUB_STEP_SUMMARY" + # Report and write a markdown table to summary. + coverage report --omit="$OMIT" --format=markdown >> $GITHUB_STEP_SUMMARY + + # Report again and fail if under 50%. + coverage report --omit="$OMIT" --fail-under=50 + + - name: Upload combined coverage report + id: upload_py_coverage_report + uses: actions/upload-artifact@v4 + with: + name: coverage-report + path: | + htmlcov/ + coverage.xml + pyproject.toml + retention-days: 7 + + - name: Append Python coverage artifact link + run: | + echo "" >> "$GITHUB_STEP_SUMMARY" + echo "[Download Python HTML coverage artifact (coverage-report)](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}/artifacts/${{ steps.upload_py_coverage_report.outputs.artifact-id }})" >> "$GITHUB_STEP_SUMMARY" + + perf: + runs-on: ubuntu-latest + outputs: + perf_stats: ${{ steps.export_perf.outputs.perf_stats }} + steps: + - uses: actions/checkout@v4 + + - uses: astral-sh/setup-uv@v6 + with: + enable-cache: true + activate-environment: true + + - run: uv sync --dev --all-extras + - run: uv run pytest -x tests/test_eventbus_performance.py + - name: Run Python runtime perf + run: uv run python tests/performance_runtime.py | tee python_perf.log + - name: Export Python perf stats + id: export_perf + run: | + python - <<'PY' + import json + import os + import re + from pathlib import Path + + text = Path("python_perf.log").read_text(encoding="utf-8", errors="replace") + lines = [line.strip() for line in text.splitlines()] + stat_lines = [line for line in lines if re.match(r"^\[python\]\s.+:\s.*latency=", line)] + + if not stat_lines: + # Fallback: parse final JSON payload if present. + for index, char in enumerate(text): + if char != "[": + continue + try: + payload = json.loads(text[index:]) + except Exception: + continue + if not isinstance(payload, list): + continue + compact = [] + for item in payload: + if not isinstance(item, dict): + continue + scenario = str(item.get("scenario_id", "unknown")) + latency = item.get("ms_per_event") + unit = str(item.get("ms_per_event_unit", "event")) + throughput = item.get("throughput") + peak_rss = item.get("peak_rss_kb_per_event") + parts = [f"{scenario}:"] + if isinstance(latency, (int, float)): + parts.append(f"latency={float(latency):.3f}ms/{unit}") + if isinstance(throughput, (int, float)): + parts.append(f"throughput={int(throughput)}/s") + if isinstance(peak_rss, (int, float)): + parts.append(f"peak_rss={float(peak_rss):.3f}kb/event") + compact.append(" ".join(parts)) + if compact: + stat_lines = compact + break + + if not stat_lines: + stat_lines = ["unable to parse python perf stats; see job log"] + + stats = "\n".join(stat_lines) + + with open(os.environ["GITHUB_OUTPUT"], "a", encoding="utf-8") as fh: + fh.write("perf_stats<> "$GITHUB_OUTPUT" + echo "TS_TASKS=${TS_TASKS}" >> "$GITHUB_OUTPUT" + echo "$TS_TASKS" + - name: Check that at least one test file is found + run: | + if [[ -z "${{ steps.lsgrep.outputs.TS_TEST_TASKS }}" || "${{ steps.lsgrep.outputs.TS_TEST_TASKS }}" == "[]" ]]; then + echo "Failed to find any *.test.ts files in bubus-ts/tests/ folder!" > /dev/stderr + exit 1 + fi + + tests: + needs: + - lint_ts + - find_ts_tests + runs-on: ubuntu-latest + strategy: + matrix: + task: ${{ fromJson(needs.find_ts_tests.outputs.TS_TASKS || '[{"kind":"error","name":"FAILED_TO_DISCOVER_TASKS"}]') }} + # autodiscovers all files in bubus-ts/tests/*.test.ts and bubus-ts/examples/*.ts + # - { kind: "test", name: "eventbus_basics" } + # - { kind: "example", name: "simple" } + # ... and more + name: ts-${{ matrix.task.kind }}-${{ matrix.task.name }} + defaults: + run: + working-directory: bubus-ts + steps: + - uses: actions/checkout@v4 + + - name: Check that the previous step managed to find some tasks for us to run + run: | + if [[ "${{ matrix.task.kind }}" == "error" ]]; then + echo "Failed get list of tasks from find_ts_tests job" > /dev/stderr + exit 1 + fi + + - uses: pnpm/action-setup@v4 + with: + version: 10 + + - uses: actions/setup-node@v4 + with: + node-version: 22 + cache: pnpm + cache-dependency-path: bubus-ts/pnpm-lock.yaml + + - name: Install bridge service binaries + if: matrix.task.kind == 'test' && matrix.task.name == 'bridges' + run: | + sudo apt-get update + sudo apt-get install -y redis-server nats-server postgresql sqlite3 + + PG_INITDB_PATH="$(find /usr/lib/postgresql -type f -name initdb | head -n 1)" + PG_BINDIR="$(dirname "${PG_INITDB_PATH}")" + if [[ -z "${PG_BINDIR}" || ! -x "${PG_BINDIR}/initdb" || ! -x "${PG_BINDIR}/postgres" ]]; then + echo "Failed to locate PostgreSQL binaries (initdb/postgres)" > /dev/stderr + exit 1 + fi + echo "${PG_BINDIR}" >> "${GITHUB_PATH}" + export PATH="${PG_BINDIR}:${PATH}" + + redis-server --version + nats-server --version + initdb --version + postgres --version + sqlite3 --version + + - run: pnpm install --frozen-lockfile + + - uses: astral-sh/setup-uv@v6 + if: matrix.task.kind == 'test' && matrix.task.name == 'cross_runtime_roundtrip' + with: + enable-cache: true + activate-environment: true + + - name: Install Python deps for cross-runtime roundtrip tests + if: matrix.task.kind == 'test' && matrix.task.name == 'cross_runtime_roundtrip' + run: | + cd .. + uv sync --dev --all-extras + + - name: Verify bridge optional Node deps + if: matrix.task.kind == 'test' && matrix.task.name == 'bridges' + run: | + node - <<'NODE' + const required = ['ioredis', 'nats', 'pg'] + for (const pkg of required) { + require.resolve(pkg) + } + console.log('optional bridge deps resolve OK') + NODE + - name: Prepare coverage directory + run: | + rm -rf .v8-coverage + mkdir -p .v8-coverage + - name: Run test with coverage + if: matrix.task.kind == 'test' + run: NODE_V8_COVERAGE=.v8-coverage NODE_OPTIONS='--expose-gc' node --expose-gc --test --experimental-test-coverage --import tsx tests/${{ matrix.task.name }}.test.ts + - name: Run example + if: matrix.task.kind == 'example' + run: NODE_V8_COVERAGE=.v8-coverage NODE_OPTIONS='--expose-gc' node --expose-gc --import tsx examples/${{ matrix.task.name }}.ts + - name: Upload raw coverage data + uses: actions/upload-artifact@v4 + with: + name: ts-coverage-${{ matrix.task.kind }}-${{ matrix.task.name }} + path: | + bubus-ts/.v8-coverage + pyproject.toml + retention-days: 7 + include-hidden-files: true + if: always() + + coverage: + needs: tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: pnpm/action-setup@v4 + with: + version: 10 + + - uses: actions/setup-node@v4 + with: + node-version: 22 + cache: pnpm + cache-dependency-path: bubus-ts/pnpm-lock.yaml + + - run: cd bubus-ts && pnpm install --frozen-lockfile + + - name: Download all coverage data + uses: actions/download-artifact@v4 + with: + pattern: ts-coverage-* + path: coverage-data/ + + - name: Combine coverage data + run: | + mkdir -p bubus-ts/.v8-coverage-merged + + counter=1 + while IFS= read -r -d '' coverage_file; do + cp "$coverage_file" "bubus-ts/.v8-coverage-merged/$counter-$(basename "$coverage_file")" + counter=$((counter + 1)) + done < <(find coverage-data -type f -name "*.json" -print0) + + if [[ "$counter" -eq 1 ]]; then + echo "No V8 coverage JSON files found in downloaded artifacts" > /dev/stderr + exit 1 + fi + + - name: Build merged coverage report + run: | + cd bubus-ts + set -o pipefail + mkdir -p coverage + pnpm dlx c8 report \ + --temp-directory .v8-coverage-merged \ + --report-dir coverage \ + --reporter=html \ + --reporter=text \ + --reporter=json-summary \ + --exclude-after-remap \ + -n 'src/**/*.ts' \ + -x 'src/bridge*.ts' \ + -x 'src/optional_deps.ts' | tee coverage/text-report.txt + + node <<'NODE' + const fs = require('fs'); + const summaryPath = 'coverage/coverage-summary.json'; + const summary = JSON.parse(fs.readFileSync(summaryPath, 'utf8')); + const entries = Object.entries(summary); + const total = summary.total; + const files = entries + .filter(([name]) => name !== 'total') + .sort((a, b) => String(a[0]).localeCompare(String(b[0]))); + + const esc = (s) => String(s).replace(/\|/g, '\\|'); + const row = (name, m) => { + const stmtsTotal = Number(m.statements.total || 0); + const stmtsCovered = Number(m.statements.covered || 0); + const stmtsMiss = Math.max(stmtsTotal - stmtsCovered, 0); + return `| ${esc(name)} | ${stmtsTotal} | ${stmtsMiss} | ${Number(m.statements.pct || 0).toFixed(2)}% | ${Number(m.branches.pct || 0).toFixed(2)}% | ${Number(m.functions.pct || 0).toFixed(2)}% | ${Number(m.lines.pct || 0).toFixed(2)}% |`; + }; + + const lines = []; + lines.push('### TypeScript combined coverage'); + lines.push(''); + lines.push('| Name | Stmts | Miss | Cover | Branch | Funcs | Lines |'); + lines.push('| --- | ---: | ---: | ---: | ---: | ---: | ---: |'); + lines.push(row('TOTAL', total)); + for (const [name, metrics] of files) { + lines.push(row(name, metrics)); + } + lines.push(''); + + const summaryFile = process.env.GITHUB_STEP_SUMMARY; + fs.appendFileSync(summaryFile, lines.join('\n')); + NODE + + - name: Fail if TypeScript coverage is <50% + run: | + cd bubus-ts + pnpm dlx c8 report \ + --temp-directory .v8-coverage-merged \ + --reporter=text-summary \ + --exclude-after-remap \ + -n 'src/**/*.ts' \ + -x 'src/bridge*.ts' \ + -x 'src/optional_deps.ts' \ + --check-coverage \ + --lines 50 > /dev/null + + - name: Upload merged coverage report + id: upload_ts_coverage_report + uses: actions/upload-artifact@v4 + with: + name: ts-coverage-report + path: | + bubus-ts/coverage/ + pyproject.toml + retention-days: 7 + + - name: Append TypeScript coverage artifact link + run: | + echo "" >> "$GITHUB_STEP_SUMMARY" + echo "[Download TypeScript HTML coverage artifact (ts-coverage-report)](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}/artifacts/${{ steps.upload_ts_coverage_report.outputs.artifact-id }})" >> "$GITHUB_STEP_SUMMARY" + + perf: + runs-on: ubuntu-latest + outputs: + perf_stats: ${{ steps.export_perf.outputs.perf_stats }} + defaults: + run: + working-directory: bubus-ts + steps: + - uses: actions/checkout@v4 + + - uses: pnpm/action-setup@v4 + with: + version: 10 + + - uses: actions/setup-node@v4 + with: + node-version: 22 + cache: pnpm + cache-dependency-path: bubus-ts/pnpm-lock.yaml + + - uses: oven-sh/setup-bun@v2 + + - uses: denoland/setup-deno@v2 + with: + deno-version: v2.x + + - run: pnpm install --frozen-lockfile + - name: Run TypeScript runtime perf + run: pnpm run perf | tee ts_perf.log + - name: Export TypeScript perf stats + id: export_perf + run: | + python - <<'PY' + import os + import re + from pathlib import Path + + text = Path("ts_perf.log").read_text(encoding="utf-8", errors="replace") + lines = [line.strip() for line in text.splitlines()] + stat_lines = [ + line + for line in lines + if re.match(r"^\[(node|bun|deno|browser)\]\s.+:\s.*latency=", line) + ] + + if not stat_lines: + stat_lines = ["unable to parse ts perf stats; see job log"] + + stats = "\n".join(stat_lines) + + with open(os.environ["GITHUB_OUTPUT"], "a", encoding="utf-8") as fh: + fh.write("perf_stats< -It's designed for quickly building event-driven applications with Python in a way that "just works" with async support, proper support for nested events, and real concurrency control. +[![DeepWiki: Python](https://img.shields.io/badge/DeepWiki-bbus%2FPython-yellow.svg?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACwAAAAyCAYAAAAnWDnqAAAAAXNSR0IArs4c6QAAA05JREFUaEPtmUtyEzEQhtWTQyQLHNak2AB7ZnyXZMEjXMGeK/AIi+QuHrMnbChYY7MIh8g01fJoopFb0uhhEqqcbWTp06/uv1saEDv4O3n3dV60RfP947Mm9/SQc0ICFQgzfc4CYZoTPAswgSJCCUJUnAAoRHOAUOcATwbmVLWdGoH//PB8mnKqScAhsD0kYP3j/Yt5LPQe2KvcXmGvRHcDnpxfL2zOYJ1mFwrryWTz0advv1Ut4CJgf5uhDuDj5eUcAUoahrdY/56ebRWeraTjMt/00Sh3UDtjgHtQNHwcRGOC98BJEAEymycmYcWwOprTgcB6VZ5JK5TAJ+fXGLBm3FDAmn6oPPjR4rKCAoJCal2eAiQp2x0vxTPB3ALO2CRkwmDy5WohzBDwSEFKRwPbknEggCPB/imwrycgxX2NzoMCHhPkDwqYMr9tRcP5qNrMZHkVnOjRMWwLCcr8ohBVb1OMjxLwGCvjTikrsBOiA6fNyCrm8V1rP93iVPpwaE+gO0SsWmPiXB+jikdf6SizrT5qKasx5j8ABbHpFTx+vFXp9EnYQmLx02h1QTTrl6eDqxLnGjporxl3NL3agEvXdT0WmEost648sQOYAeJS9Q7bfUVoMGnjo4AZdUMQku50McDcMWcBPvr0SzbTAFDfvJqwLzgxwATnCgnp4wDl6Aa+Ax283gghmj+vj7feE2KBBRMW3FzOpLOADl0Isb5587h/U4gGvkt5v60Z1VLG8BhYjbzRwyQZemwAd6cCR5/XFWLYZRIMpX39AR0tjaGGiGzLVyhse5C9RKC6ai42ppWPKiBagOvaYk8lO7DajerabOZP46Lby5wKjw1HCRx7p9sVMOWGzb/vA1hwiWc6jm3MvQDTogQkiqIhJV0nBQBTU+3okKCFDy9WwferkHjtxib7t3xIUQtHxnIwtx4mpg26/HfwVNVDb4oI9RHmx5WGelRVlrtiw43zboCLaxv46AZeB3IlTkwouebTr1y2NjSpHz68WNFjHvupy3q8TFn3Hos2IAk4Ju5dCo8B3wP7VPr/FGaKiG+T+v+TQqIrOqMTL1VdWV1DdmcbO8KXBz6esmYWYKPwDL5b5FA1a0hwapHiom0r/cKaoqr+27/XcrS5UwSMbQAAAABJRU5ErkJggg==)](https://deepwiki.com/pirate/bbus) [![PyPI - Version](https://img.shields.io/pypi/v/bubus)](https://pypi.org/project/bubus/) [![GitHub License](https://img.shields.io/github/license/pirate/bbus)](https://github.com/pirate/bbus) ![GitHub last commit](https://img.shields.io/github/last-commit/pirate/bbus) -It provides a [pydantic](https://docs.pydantic.dev/latest/)-based API for implementing publish-subscribe patterns with type safety, async/sync handler support, and advanced features like event forwarding between buses. +[![DeepWiki: TS](https://img.shields.io/badge/DeepWiki-bbus%2FTypescript-blue.svg?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACwAAAAyCAYAAAAnWDnqAAAAAXNSR0IArs4c6QAAA05JREFUaEPtmUtyEzEQhtWTQyQLHNak2AB7ZnyXZMEjXMGeK/AIi+QuHrMnbChYY7MIh8g01fJoopFb0uhhEqqcbWTp06/uv1saEDv4O3n3dV60RfP947Mm9/SQc0ICFQgzfc4CYZoTPAswgSJCCUJUnAAoRHOAUOcATwbmVLWdGoH//PB8mnKqScAhsD0kYP3j/Yt5LPQe2KvcXmGvRHcDnpxfL2zOYJ1mFwrryWTz0advv1Ut4CJgf5uhDuDj5eUcAUoahrdY/56ebRWeraTjMt/00Sh3UDtjgHtQNHwcRGOC98BJEAEymycmYcWwOprTgcB6VZ5JK5TAJ+fXGLBm3FDAmn6oPPjR4rKCAoJCal2eAiQp2x0vxTPB3ALO2CRkwmDy5WohzBDwSEFKRwPbknEggCPB/imwrycgxX2NzoMCHhPkDwqYMr9tRcP5qNrMZHkVnOjRMWwLCcr8ohBVb1OMjxLwGCvjTikrsBOiA6fNyCrm8V1rP93iVPpwaE+gO0SsWmPiXB+jikdf6SizrT5qKasx5j8ABbHpFTx+vFXp9EnYQmLx02h1QTTrl6eDqxLnGjporxl3NL3agEvXdT0WmEost648sQOYAeJS9Q7bfUVoMGnjo4AZdUMQku50McDcMWcBPvr0SzbTAFDfvJqwLzgxwATnCgnp4wDl6Aa+Ax283gghmj+vj7feE2KBBRMW3FzOpLOADl0Isb5587h/U4gGvkt5v60Z1VLG8BhYjbzRwyQZemwAd6cCR5/XFWLYZRIMpX39AR0tjaGGiGzLVyhse5C9RKC6ai42ppWPKiBagOvaYk8lO7DajerabOZP46Lby5wKjw1HCRx7p9sVMOWGzb/vA1hwiWc6jm3MvQDTogQkiqIhJV0nBQBTU+3okKCFDy9WwferkHjtxib7t3xIUQtHxnIwtx4mpg26/HfwVNVDb4oI9RHmx5WGelRVlrtiw43zboCLaxv46AZeB3IlTkwouebTr1y2NjSpHz68WNFjHvupy3q8TFn3Hos2IAk4Ju5dCo8B3wP7VPr/FGaKiG+T+v+TQqIrOqMTL1VdWV1DdmcbO8KXBz6esmYWYKPwDL5b5FA1a0hwapHiom0r/cKaoqr+27/XcrS5UwSMbQAAAABJRU5ErkJggg==)](https://deepwiki.com/pirate/bbus/3-typescript-implementation) [![NPM Version](https://img.shields.io/npm/v/bubus)](https://www.npmjs.com/package/bubus) -♾️ It's inspired by the simplicity of async and events in `JS`, we aim to bring a fully type-checked [`EventTarget`](https://developer.mozilla.org/en-US/docs/Web/API/EventTarget)-style API to Python. +Bubus is an in-memory event bus library for async Python and TS (node/browser). + +It's designed for quickly building resilient, predictable, complex event-driven apps. + +It "just works" with an intuitive, but powerful event JSON format + emit API that's consistent across both languages and scales consistently from one event up to millions (~0.2ms/event): + +```python +class SomeEvent(BaseEvent): + some_data: int + +def handle_some_event(event: SomeEvent): + print('hi!') + +bus.on(SomeEvent, some_function) +await bus.emit(SomeEvent({some_data: 132})) +# "hi!"" +``` + +It's async native, has proper automatic nested event tracking, and powerful concurrency control options. The API is inspired by `EventEmitter` or [`emittery`](https://github.com/sindresorhus/emittery) in JS, but it takes it a step further: + +- nice Pydantic / Zod schemas for events that can be exchanged between both languages +- automatic UUIDv7s and monotonic nanosecond timestamps for ordering events globally +- built in locking options to force strict global FIFO processing or fully parallel processing + +--- + +♾️ It's inspired by the simplicity of async and events in `JS` but with baked-in features that allow to eliminate most of the tedious repetitive complexity in event-driven codebases: + +- correct timeout enforcement across multiple levels of events, if a parent times out it correctly aborts all child event processing +- ability to strongly type hint and enforce the return type of event handlers at compile-time +- ability to queue events on the bus, or inline await them for immediate execution like a normal function call +- handles thousands of events/sec/core in both languages; see the runtime matrix below for current measured numbers
+ ## πŸ”’ Quickstart Install bubus and get started with a simple event-driven application: ```bash -pip install bubus +pip install bubus # see ./bubus-ts/README.md for JS instructions ``` ```python @@ -28,15 +60,15 @@ class UserLoginEvent(BaseEvent[str]): is_admin: bool async def handle_login(event: UserLoginEvent) -> str: - auth_request = await event.event_bus.dispatch(AuthRequestEvent(...)) # nested events supported - auth_response = await event.event_bus.expect(AuthResponseEvent, timeout=30.0) + auth_request = await event.event_bus.emit(AuthRequestEvent(...)) # nested events supported + auth_response = await event.event_bus.find(AuthResponseEvent, child_of=auth_request, future=30) return f"User {event.username} logged in admin={event.is_admin} with API response: {await auth_response.event_result()}" bus = EventBus() bus.on(UserLoginEvent, handle_login) bus.on(AuthRequestEvent, AuthAPI.post) -event = bus.dispatch(UserLoginEvent(username="alice", is_admin=True)) +event = bus.emit(UserLoginEvent(username="alice", is_admin=True)) print(await event.event_result()) # User alice logged in admin=True with API response: {...} ``` @@ -51,7 +83,9 @@ print(await event.event_result())
-### πŸ”Ž Event Pattern Matching +
+πŸ”Ž Event Pattern Matching + Subscribe to events using multiple patterns: @@ -68,7 +102,11 @@ bus.on('*', universal_handler)
-### πŸ”€ Async and Sync Handler Support +
+ +
+πŸ”€ Async and Sync Handler Support + Register both synchronous and asynchronous handlers for maximum flexibility: @@ -104,15 +142,19 @@ class SomeService: return 'this works too' # All usage patterns behave the same: -bus.on(SomeEvent, SomeClass().handlers_can_be_methods) -bus.on(SomeEvent, SomeClass.handler_can_be_classmethods) -bus.on(SomeEvent, SomeClass.handlers_can_be_staticmethods) +bus.on(SomeEvent, SomeService().handlers_can_be_methods) +bus.on(SomeEvent, SomeService.handler_can_be_classmethods) +bus.on(SomeEvent, SomeService.handlers_can_be_staticmethods) ```
-### πŸ”  Type-Safe Events with Pydantic +
+ +
+πŸ”  Type-Safe Events with Pydantic + Define events as Pydantic models with full type checking and validation: @@ -142,7 +184,11 @@ event = OrderCreatedEvent( -### ⏩ Forward `Events` Between `EventBus`s +
+ +
+⏩ Forward `Events` Between `EventBus`s + You can define separate `EventBus` instances in different "microservices" to separate different areas of concern. `EventBus`s can be set up to forward events between each other (with automatic loop prevention): @@ -154,21 +200,53 @@ auth_bus = EventBus(name='AuthBus') data_bus = EventBus(name='DataBus') # Share all or specific events between buses -main_bus.on('*', auth_bus.dispatch) # if main bus gets LoginEvent, will forward to AuthBus -auth_bus.on('*', data_bus.dispatch) # auth bus will forward everything to DataBus -data_bus.on('*', main_bus.dispatch) # don't worry! event will only be processed once by each, no infinite loop occurs +main_bus.on('*', auth_bus.emit) # if main bus gets LoginEvent, will forward to AuthBus +auth_bus.on('*', data_bus.emit) # auth bus will forward everything to DataBus +data_bus.on('*', main_bus.emit) # don't worry! event will only be processed once by each, no infinite loop occurs # Events flow through the hierarchy with tracking -event = main_bus.dispatch(LoginEvent()) +event = main_bus.emit(LoginEvent()) await event -print(event.event_path) # ['MainBus', 'AuthBus', 'DataBus'] # list of buses that have already procssed the event +print(event.event_path) # ['MainBus#ab12', 'AuthBus#cd34', 'DataBus#ef56'] # list of bus labels that already processed the event +``` + +
+ +
+ +
+Bridges + + +Bridges are optional extra connectors provided that allow you to send/receive events from an external service, and you do not need to use a bridge to use bubus since it's normally purely in-memory. These are just simple helpers to forward bubus events JSON to storage engines / other processes / other machines; they prevent loops automatically, but beyond that it's only basic forwarding with no handler pickling or anything fancy. + +Bridges all expose a very simple bus-like API with only `.emit()` and `.on()`. + +**Example usage: link a bus to a redis pub/sub channel** +```python +bridge = RedisEventBridge('redis://redis@localhost:6379') + +bus.on('*', bridge.emit) # listen for all events on bus and send them to redis channel +bridge.on('*', bus.emit) # listen for new events in redis channel and emit them to our bus ``` +- `SocketEventBridge('/tmp/bubus_events.sock')` +- `HTTPEventBridge(send_to='https://127.0.0.1:8001/bubus_events', listen_on='http://0.0.0.0:8002/bubus_events')` +- `JSONLEventBridge('/tmp/bubus_events.jsonl')` +- `SQLiteEventBridge('/tmp/bubus_events.sqlite3')` +- `PostgresEventBridge('postgresql://user:pass@localhost:5432/dbname/bubus_events')` +- `RedisEventBridge('redis://user:pass@localhost:6379/1/bubus_events')` +- `NATSEventBridge('nats://localhost:4222', 'bubus_events')` +
-### πŸ”± Event Results Aggregation +
-Collect and aggregate results from multiple handlers: +
+πŸ”± Event Results Aggregation + + +Collect results from multiple handlers: ```python async def load_user_config(event: GetConfigEvent) -> dict[str, Any]: @@ -180,39 +258,42 @@ async def load_system_config(event: GetConfigEvent) -> dict[str, Any]: bus.on(GetConfigEvent, load_user_config) bus.on(GetConfigEvent, load_system_config) -# Get a merger of all dict results -event = await bus.dispatch(GetConfigEvent()) -config = await event.event_results_flat_dict(raise_if_conflicts=False) -# {'debug': False, 'port': 8080, 'timeout': 30} +# Get all handler result values +event = await bus.emit(GetConfigEvent()) +results = await event.event_results_list() -# Or get individual results -await event.event_results_by_handler_id() -await event.event_results_list() +# Inspect per-handler metadata when needed +for handler_id, event_result in event.event_results.items(): + print(handler_id, event_result.handler_name, event_result.result) ```
-### 🚦 FIFO Event Processing +
+ +
+🚦 FIFO Event Processing + Events are processed in strict FIFO order, maintaining consistency: ```python -# Events are processed in the order they were dispatched +# Events are processed in the order they were emitted for i in range(10): - bus.dispatch(ProcessTaskEvent(task_id=i)) + bus.emit(ProcessTaskEvent(task_id=i)) # Even with async handlers, order is preserved await bus.wait_until_idle(timeout=30.0) ``` -If a handler dispatches and awaits any child events during execution, those events will jump the FIFO queue and be processed immediately: +If a handler emits and awaits any child events during execution, those events will jump the FIFO queue and be processed immediately: ```python def child_handler(event: SomeOtherEvent) -> str: return 'xzy123' def main_handler(event: MainEvent) -> str: # enqueue event for processing after main_handler exits - child_event = bus.dispatch(SomeOtherEvent()) + child_event = bus.emit(SomeOtherEvent()) # can also await child events to process immediately instead of adding to FIFO queue completed_child_event = await child_event @@ -221,13 +302,17 @@ def main_handler(event: MainEvent) -> str: bus.on(SomeOtherEvent, child_handler) bus.on(MainEvent, main_handler) -await bus.dispatch(MainEvent()).event_result() +await bus.emit(MainEvent()).event_result() # result from awaiting child event: xyz123 ```
-### πŸͺ† Dispatch Nested Child Events From Handlers +
+ +
+πŸͺ† Emit Nested Child Events From Handlers + Automatically track event relationships and causality tree: @@ -235,15 +320,15 @@ Automatically track event relationships and causality tree: async def parent_handler(event: BaseEvent): # handlers can emit more events to be processed asynchronously after this handler completes child = ChildEvent() - child_event_async = event.event_bus.dispatch(child) # equivalent to bus.dispatch(...) + child_event_async = event.event_bus.emit(child) # equivalent to bus.emit(...) assert child.event_status != 'completed' assert child_event_async.event_parent_id == event.event_id await child_event_async - # or you can dispatch an event and block until it finishes processing by awaiting the event + # or you can emit an event and block until it finishes processing by awaiting the event # this recursively waits for all handlers, including if event is forwarded to other buses # (note: awaiting an event from inside a handler jumps the FIFO queue and will process it immediately, before any other pending events) - child_event_sync = await bus.dispatch(ChildEvent()) + child_event_sync = await bus.emit(ChildEvent()) # ChildEvent handlers run immediately assert child_event_sync.event_status == 'completed' @@ -254,7 +339,7 @@ async def run_main(): bus.on(ChildEvent, child_handler) bus.on(ParentEvent, parent_handler) - parent_event = bus.dispatch(ParentEvent()) + parent_event = bus.emit(ParentEvent()) print(parent_event.event_children) # show all the child events emitted during handling of an event await parent_event print(bus.log_tree()) @@ -270,85 +355,131 @@ if __name__ == '__main__':

-### ⏳ Expect an Event to be Dispatched +
+ +
+πŸ”Ž Find Events in History or Wait for Future Events -Wait for specific events to be seen on a bus with optional filtering: + +`find()` is the single lookup API: search history, wait for future events, or combine both. ```python -# Block until a specific event is seen (with optional timeout) -request_event = await bus.dispatch(RequestEvent(id=123, table='invoices', request_id=999234)) -response_event = await bus.expect(ResponseEvent, timeout=30) +# Default: non-blocking history lookup (past=True, future=False) +existing = await bus.find(ResponseEvent) + +# Wait only for future matches +future = await bus.find(ResponseEvent, past=False, future=5) + +# Combine event predicate + event metadata filters +match = await bus.find( + ResponseEvent, + where=lambda e: e.request_id == my_id, + event_status='completed', + future=5, +) + +# Wildcard: match any event type, filtered by metadata/predicate +any_completed = await bus.find( + '*', + where=lambda e: e.event_type.endswith('ResultEvent'), + event_status='completed', + future=5, +) ``` -A more complex real-world example showing off all the features: +#### Finding Child Events + +When you emit an event that triggers child events, use `child_of` to find specific descendants: ```python -async def on_generate_invoice_pdf(event: GenerateInvoiceEvent) -> pdf: - request_event = await bus.dispatch(APIRequestEvent( # example: fire a backend request via some RPC client using bubus - method='invoices.generatePdf', - invoice_id=event.invoice_id, - request_id=uuid4(), - )) - # ...rpc client should send the request, then call event_bus.dispatch(APIResponseEvent(...)) when it gets a response ... +# Emit a parent event that triggers child events +nav_event = await bus.emit(NavigateToUrlEvent(url="https://example.com")) - # wait for the response event to be fired by the RPC client - is_our_response = lambda response_event: response_event.request_id == request_event.request_id - is_succesful = lambda response_event: response_event.invoice_id == event.invoice_id and response_event.invoice_url - try: - response_event: APIResponseEvent = await bus.expect( - APIResponseEvent, # wait for events of this type (also accepts str name) - include=lambda e: is_our_response(e) and is_succesful(e), # only include events that match a certain filter func - exclude=lambda e: e.status != 'retrying', # optionally exclude certain events, overrides include - timeout=30, # raises asyncio.TimeoutError if no match is seen within 30sec - ) - except TimeoutError: - await bus.dispatch(TimedOutError(msg='timed out while waiting for response from server', request_id=request_event.id)) +# Find a child event (already fired while NavigateToUrlEvent was being handled) +new_tab = await bus.find(TabCreatedEvent, child_of=nav_event, past=5) +if new_tab: + print(f"New tab created: {new_tab.tab_id}") +``` - return response_event.invoice_url +This solves race conditions where child events fire before you start waiting for them. -event_bus.on(GenerateInvoiceEvent, on_generate_invoice_pdf) -``` +See the `EventBus.find(...)` API section below for full parameter details. > [!IMPORTANT] -> `expect()` resolves when the event is first *dispatched* to the `EventBus`, not when it completes. `await response_event` to get the completed event. +> `find()` resolves when the event is first *emitted* to the `EventBus`, not when it completes. +> Use `await event` for immediate-await semantics (queue-jumps when called inside a handler), or `await event.event_completed()` to always wait in normal queue order. +> If no match is found (or future timeout elapses), `find()` returns `None`.
-### 🎯 Event Handler Return Values +
+ +
+πŸ” Event Debouncing + + +Avoid re-running expensive work by reusing recent events. The `find()` method makes debouncing simple: + +```python +# Simple debouncing: reuse event from last 10 seconds, or emit new +event = await ( + await bus.find(ScreenshotEvent, past=10, future=False) # Check last 10s of history (instant) + or bus.emit(ScreenshotEvent()) +) + +# Advanced: check history, wait briefly for new event to appear, fallback to emit new event +event = ( + await bus.find(SyncEvent, past=True, future=False) # Check all history (instant) + or await bus.find(SyncEvent, past=False, future=5) # Wait up to 5s for in-flight + or bus.emit(SyncEvent()) # Fallback: emit new +) +await event # get completed event +``` + +
+ +
+ +
+🎯 Event Handler Return Values + There are two ways to get return values from event handlers: **1. Have handlers return their values directly, which puts them in `event.event_results`:** ```python -class DoSomeMathEvent(BaseEvent[int]): # BaseEvent[int] = expect int returned from all event handlers +class DoSomeMathEvent(BaseEvent[int]): # BaseEvent[int] = handlers are validated as returning int a: int b: int + # int passed above gets saved to: + # event_result_type = int + def do_some_math(event: DoSomeMathEvent) -> int: return event.a + event.b event_bus.on(DoSomeMathEvent, do_some_math) -print(await event_bus.dispatch(DoSomeMathEvent(a=100, b=120)).event_result()) +print(await event_bus.emit(DoSomeMathEvent(a=100, b=120)).event_result()) # 220 ``` You can use these helpers to interact with the results returned by handlers: + - `BaseEvent.event_result()` -- `BaseEvent.event_results_list()`, `BaseEvent.event_results_filtered()` -- `BaseEvent.event_results_by_handler_id()`, `BaseEvent.event_results_by_handler_name()` -- `BaseEvent.event_results_flat_list()`, `BaseEvent.event_results_flat_dict()` +- `BaseEvent.event_results_list()` +- Inspect raw per-handler entries via `BaseEvent.event_results` -**2. Have the handler do the work, then dispatch another event containing the result value, which other code can expect:** +**2. Have the handler do the work, then emit another event containing the result value, which other code can find:** ```python def do_some_math(event: DoSomeMathEvent[int]) -> int: result = event.a + event.b - event.event_bus.dispatch(MathCompleteEvent(final_sum=result)) + event.event_bus.emit(MathCompleteEvent(final_sum=result)) event_bus.on(DoSomeMathEvent, do_some_math) -await event_bus.dispatch(DoSomeMathEvent(a=100, b=120)) -result_event = await event_bus.expect(MathCompleteEvent) +await event_bus.emit(DoSomeMathEvent(a=100, b=120)) +result_event = await event_bus.find(MathCompleteEvent, past=False, future=30) print(result_event.final_sum) # 220 ``` @@ -370,7 +501,7 @@ async def on_ScreenshotEvent(event: ScreenshotEvent) -> bytes: event_bus.on(ScreenshotEvent, on_ScreenshotEvent) # Handler return values are automatically validated against the bytes type -returned_bytes = await event_bus.dispatch(ScreenshotEvent(...)).event_result() +returned_bytes = await event_bus.emit(ScreenshotEvent(...)).event_result() assert isinstance(returned_bytes, bytes) ``` @@ -407,12 +538,85 @@ async def fetch_from_gmail(event: FetchInboxEvent) -> list[EmailMessage]: event_bus.on(FetchInboxEvent, fetch_from_gmail) # Return values are automatically validated as list[EmailMessage] -email_list = await event_bus.dispatch(FetchInboxEvent(account_id='124', ...)).event_result() +email_list = await event_bus.emit(FetchInboxEvent(account_id='124', ...)).event_result() ``` +For pure Python usage, `event_result_type` can be any Python/Pydantic type you want. For cross-language JSON roundtrips, object-like shapes (e.g. `TypedDict`, `dataclass`, model-like dict schemas) rehydrate on Python as Pydantic models, map keys are constrained to JSON object string keys, and fine-grained string constraints/custom field validator logic is not preserved. +
-### 🧹 Memory Management +
+ +
+🧡 ContextVar Propagation + + +ContextVars set before `emit()` are automatically propagated to event handlers. This is essential for request-scoped context like request IDs, user sessions, or tracing spans: + +```python +from contextvars import ContextVar + +# Define your context variables +request_id: ContextVar[str] = ContextVar('request_id', default='') +user_id: ContextVar[str] = ContextVar('user_id', default='') + +async def handler(event: MyEvent) -> str: + # Handler sees the context values that were set before emit() + print(f"Request: {request_id.get()}, User: {user_id.get()}") + return "done" + +bus.on(MyEvent, handler) + +# Set context before emit (e.g., in FastAPI middleware) +request_id.set('req-12345') +user_id.set('user-abc') + +# Handler will see request_id='req-12345' and user_id='user-abc' +await bus.emit(MyEvent()) +``` + +**Context propagates through nested handlers:** + +```python +async def parent_handler(event: ParentEvent) -> str: + # Context is captured at emit time + print(f"Parent sees: {request_id.get()}") # 'req-12345' + + # Child events inherit the same context + await bus.emit(ChildEvent()) + return "parent_done" + +async def child_handler(event: ChildEvent) -> str: + # Child also sees the original emit context + print(f"Child sees: {request_id.get()}") # 'req-12345' + return "child_done" +``` + +**Context isolation between emits:** + +Each emit captures its own context snapshot. Concurrent emits with different context values are properly isolated: + +```python +request_id.set('req-A') +event_a = bus.emit(MyEvent()) # Handler A sees 'req-A' + +request_id.set('req-B') +event_b = bus.emit(MyEvent()) # Handler B sees 'req-B' + +await event_a # Still sees 'req-A' +await event_b # Still sees 'req-B' +``` + +> [!NOTE] +> Context is captured at `emit()` time, not when the handler executes. This ensures handlers see the context from the call site, even if the event is processed later from a queue. + +
+ +
+ +
+🧹 Memory Management + EventBus includes automatic memory management to prevent unbounded growth in long-running applications: @@ -422,10 +626,18 @@ bus = EventBus(max_history_size=100) # Keep max 100 events in history # Or disable memory limits for unlimited history bus = EventBus(max_history_size=None) + +# Or keep only in-flight events in history (drop each event as soon as it completes) +bus = EventBus(max_history_size=0) + +# Or reject new emits when history is full (instead of dropping old history) +bus = EventBus(max_history_size=100, max_history_drop=False) ``` **Automatic Cleanup:** -- When `max_history_size` is set, EventBus automatically removes old events when the limit is exceeded +- When `max_history_size` is set and `max_history_drop=True`, EventBus removes old events when the limit is exceeded +- If `max_history_size=0`, history keeps only pending/started events and drops each event immediately after completion +- If `max_history_drop=True`, the bus may drop oldest history entries even if they are uncompleted events - Completed events are removed first (oldest first), then started events, then pending events - This ensures active events are preserved while cleaning up old completed events @@ -448,7 +660,11 @@ finally:
-### ⛓️ Parallel Handler Execution +
+ +
+⛓️ Parallel Handler Execution + > [!CAUTION] > **Not Recommended.** Only for advanced users willing to implement their own concurrency control. @@ -459,36 +675,79 @@ The harsh tradeoff is less deterministic ordering as handler execution order wil ```python # Create bus with parallel handler execution -bus = EventBus(parallel_handlers=True) +bus = EventBus(event_handler_concurrency='parallel') # Multiple handlers run concurrently for each event bus.on('DataEvent', slow_handler_1) # Takes 1 second bus.on('DataEvent', slow_handler_2) # Takes 1 second start = time.time() -await bus.dispatch(DataEvent()) +await bus.emit(DataEvent()) # Total time: ~1 second (not 2) ```
-### πŸ“ Write-Ahead Logging +
-Persist events automatically to a `jsonl` file for future replay and debugging: +
+🧩 Middlewares + + +Middlewares can observe or mutate the `EventResult` at each step, emit additional events, or trigger other side effects (metrics, retries, auth checks, etc.). ```python -# Enable WAL event log persistence (optional) -bus = EventBus(name='MyBus', wal_path='./events.jsonl') +from bubus import EventBus +from bubus.middlewares import LoggerEventBusMiddleware, WALEventBusMiddleware, SQLiteHistoryMirrorMiddleware, OtelTracingMiddleware + +bus = EventBus( + name='MyBus', + middlewares=[ + SQLiteHistoryMirrorMiddleware('./events.sqlite3'), + WALEventBusMiddleware('./events.jsonl'), + LoggerEventBusMiddleware('./events.log'), + OtelTracingMiddleware(), + # ... + ], +) -# All completed events are automatically appended as JSON lines to the end -bus.dispatch(SecondEventAbc(some_key="banana")) +await bus.emit(SecondEventAbc(some_key="banana")) +# will persist all events to sqlite + events.jsonl + events.log ``` -`./events.jsonl`: -```json -{"event_type": "FirstEventXyz", "event_created_at": "2025-07-10T20:39:56.462000+00:00", "some_key": "some_val", ...} -{"event_type": "SecondEventAbc", ..., "some_key": "banana"} -... +Built-in middlewares you can import from `bubus.middlewares.*`: + +- `AutoErrorEventMiddleware`: on handler error, fire-and-forget emits `OriginalEventTypeErrorEvent` with `{error, error_type}` (skips `*ErrorEvent`/`*ResultEvent` sources). Useful when downstream/remote consumers only see events and need explicit failure notifications. +- `AutoReturnEventMiddleware`: on non-`None` handler return, fire-and-forget emits `OriginalEventTypeResultEvent` with `{data}` (skips `*ErrorEvent`/`*ResultEvent` sources). Useful for bridges/remote systems since handler return values do not cross bridge boundaries, but events do. +- `AutoHandlerChangeEventMiddleware`: emits `BusHandlerRegisteredEvent({handler})` / `BusHandlerUnregisteredEvent({handler})` when handlers are added/removed via `.on()` / `.off()`. +- `OtelTracingMiddleware`: emits OpenTelemetry spans for events and handlers with parent-child linking; can be exported to Sentry via Sentry's OpenTelemetry integration. +- `WALEventBusMiddleware`: persists completed events to JSONL for replay/debugging. +- `LoggerEventBusMiddleware`: writes event/handler transitions to stdout and optionally to file. +- `SQLiteHistoryMirrorMiddleware`: mirrors event and handler snapshots into append-only SQLite `events_log` and `event_results_log` tables for auditing/debugging. + +#### Defining a custom middleware + +Handler middlewares subclass `EventBusMiddleware` and override whichever lifecycle hooks they need (`on_event_change`, `on_event_result_change`, `on_bus_handlers_change`): + +```python +from bubus.middlewares import EventBusMiddleware + +class AnalyticsMiddleware(EventBusMiddleware): + async def on_event_result_change(self, eventbus, event, event_result, status): + if status == 'started': + await analytics_bus.emit(HandlerStartedAnalyticsEvent(event_id=event_result.event_id)) + elif status == 'completed': + await analytics_bus.emit( + HandlerCompletedAnalyticsEvent( + event_id=event_result.event_id, + error=repr(event_result.error) if event_result.error else None, + ) + ) + + async def on_bus_handlers_change(self, eventbus, handler, registered): + await analytics_bus.emit( + HandlerRegistryChangedEvent(handler_id=handler.id, registered=registered, bus=eventbus.name) + ) ```
@@ -498,27 +757,47 @@ bus.dispatch(SecondEventAbc(some_key="banana"))
+
+ ## πŸ“š API Documentation -### `EventBus` +
+Review bus construction, defaults, and core lifecycle methods. + The main event bus class that manages event processing and handler execution. ```python EventBus( name: str | None = None, - wal_path: Path | str | None = None, - parallel_handlers: bool = False, - max_history_size: int | None = 50 + event_handler_concurrency: Literal['serial', 'parallel'] = 'serial', + event_handler_completion: Literal['all', 'first'] = 'all', + event_timeout: float | None = 60.0, + event_slow_timeout: float | None = 300.0, + event_handler_slow_timeout: float | None = 30.0, + event_handler_detect_file_paths: bool = True, + max_history_size: int | None = 50, + max_history_drop: bool = False, + middlewares: Sequence[EventBusMiddleware | type[EventBusMiddleware]] | None = None, ) ``` **Parameters:** - `name`: Optional unique name for the bus (auto-generated if not provided) -- `wal_path`: Path for write-ahead logging of events to a `jsonl` file (optional) -- `parallel_handlers`: If `True`, handlers run concurrently for each event, otherwise serially if `False` (the default) -- `max_history_size`: Maximum number of events to keep in history (default: 50, None = unlimited) +- `event_handler_concurrency`: Default handler execution mode for events on this bus: `'serial'` (default) or `'parallel'` (resolved at processing time when `event.event_handler_concurrency` is unset) +- `event_handler_completion`: Handler completion mode for each event: `'all'` (default, wait for all handlers) or `'first'` (complete once first successful non-`None` result is available) +- `event_timeout`: Default per-event timeout in seconds resolved at processing time when `event.event_timeout` is `None` +- `event_slow_timeout`: Default slow-event warning threshold in seconds +- `event_handler_slow_timeout`: Default slow-handler warning threshold in seconds +- `event_handler_detect_file_paths`: Whether to auto-detect handler source file paths at registration time (slightly slower when enabled) +- `max_history_size`: Maximum number of events to keep in history (default: 50, `None` = unlimited, `0` = keep only in-flight events and drop completed events immediately) +- `max_history_drop`: If `True`, drop oldest history entries when full (even uncompleted events). If `False` (default), reject new emits once history reaches `max_history_size` (except when `max_history_size=0`, which never rejects on history size) +- `middlewares`: Optional list of `EventBusMiddleware` subclasses or instances that hook into handler execution for analytics, logging, retries, etc. (see [Middlewares](#middlewares) for more info) + +Timeout precedence matches TS: +- Effective handler timeout = `min(resolved_handler_timeout, event_timeout)` where `resolved_handler_timeout` resolves in order: `handler.handler_timeout` -> `event.event_handler_timeout` -> `bus.event_timeout`. +- Slow handler warning threshold resolves in order: `handler.handler_slow_timeout` -> `event.event_handler_slow_timeout` -> `event.event_slow_timeout` -> `bus.event_handler_slow_timeout` -> `bus.event_slow_timeout`. #### `EventBus` Properties @@ -530,7 +809,6 @@ EventBus( - `events_completed`: List of completed events - `all_instances`: Class-level WeakSet tracking all active EventBus instances (for memory monitoring) - #### `EventBus` Methods ##### `on(event_type: str | Type[BaseEvent], handler: Callable)` @@ -543,30 +821,77 @@ bus.on(UserEvent, handler_func) # By event class bus.on('*', handler_func) # Wildcard - all events ``` -##### `dispatch(event: BaseEvent) -> BaseEvent` +##### `emit(event: BaseEvent) -> BaseEvent` Enqueue an event for processing and return the pending `Event` immediately (synchronous). ```python -event = bus.dispatch(MyEvent(data="test")) -result = await event # await the pending Event to get the completed Event +event = bus.emit(MyEvent(data="test")) +result = await event # immediate-await path (queue-jumps when called inside a handler) +result_in_queue_order = await event.event_completed() # always waits in normal queue order ``` -**Note:** When `max_history_size` is set, EventBus enforces a hard limit of 100 pending events (queue + processing) to prevent runaway memory usage. Dispatch will raise `RuntimeError` if this limit is exceeded. +**Note:** Queueing is unbounded. History pressure is controlled by `max_history_size` + `max_history_drop`: -##### `expect(event_type: str | Type[BaseEvent], timeout: float | None=None, predicate: Callable[[BaseEvent], bool]=None) -> BaseEvent` +- `max_history_drop=True`: absorb new events and trim old history entries (even uncompleted events). +- `max_history_drop=False`: raise `RuntimeError` when history is full. +- `max_history_size=0`: keep pending/in-flight events only; completed events are immediately removed from history. -Wait for a specific event to occur. +##### `find(event_type: str | Literal['*'] | Type[BaseEvent], *, where: Callable[[BaseEvent], bool]=None, child_of: BaseEvent | None=None, past: bool | float | timedelta=True, future: bool | float=False, **event_fields) -> BaseEvent | None` + +Find an event matching criteria in history and/or future. This is the recommended unified method for event lookup. + +**Parameters:** + +- `event_type`: The event type string, `'*'` wildcard, or model class to find +- `where`: Predicate function for filtering (default: matches all) +- `child_of`: Only match events that are descendants of this parent event +- `past`: Controls history search behavior (default: `True`) + - `True`: search all history + - `False`: skip history search + - `float`/`timedelta`: search events from last N seconds only +- `future`: Controls future wait behavior (default: `False`) + - `True`: wait forever for matching event + - `False`: don't wait for future events + - `float`: wait up to N seconds for matching event +- `**event_fields`: Optional equality filters for any event fields (for example `event_status='completed'`, `user_id='u-1'`) ```python -# Wait for any UserEvent -event = await bus.expect('UserEvent', timeout=30) +# Default call is non-blocking history lookup (past=True, future=False) +event = await bus.find(ResponseEvent) -# Wait with custom filter -event = await bus.expect( - 'UserEvent', - predicate=lambda e: e.user_id == 'specific_user' -) +# Find child of a specific parent event +child = await bus.find(ChildEvent, child_of=parent_event, future=5) + +# Wait only for future events (ignore history) +event = await bus.find(ResponseEvent, past=False, future=5) + +# Search recent history + optionally wait +event = await bus.find(ResponseEvent, past=5, future=5) + +# Filter by event metadata +completed = await bus.find(ResponseEvent, event_status='completed') + +# Wildcard match across all event types +any_completed = await bus.find('*', event_status='completed', past=True, future=False) +``` + +##### `event_is_child_of(event: BaseEvent, ancestor: BaseEvent) -> bool` + +Check if event is a descendant of ancestor (child, grandchild, etc.). + +```python +if bus.event_is_child_of(child_event, parent_event): + print("child_event is a descendant of parent_event") +``` + +##### `event_is_parent_of(event: BaseEvent, descendant: BaseEvent) -> bool` + +Check if event is an ancestor of descendant (parent, grandparent, etc.). + +```python +if bus.event_is_parent_of(parent_event, child_event): + print("parent_event is an ancestor of child_event") ``` ##### `wait_until_idle(timeout: float | None=None)` @@ -591,7 +916,11 @@ await bus.stop(clear=True) # Stop and clear all event history and handlers to --- -### `BaseEvent` +
+ +
+Review event fields, runtime state, and result helper methods. + Base class for all events. Subclass `BaseEvent` to define your own events. @@ -603,75 +932,86 @@ Make sure none of your own event data fields start with `event_` or `model_` to T_EventResultType = TypeVar('T_EventResultType', bound=Any, default=None) class BaseEvent(BaseModel, Generic[T_EventResultType]): - # Framework-managed fields - event_type: str # Defaults to class name + # special config fields event_id: str # Unique UUID7 identifier, auto-generated if not provided - event_timeout: float = 60.0 # Maximum execution in seconds for each handler - event_schema: str # Module.Class@version (auto-set based on class & LIBRARY_VERSION env var) - event_parent_id: str # Parent event ID (auto-set) - event_path: list[str] # List of bus names traversed (auto-set) - event_created_at: datetime # When event was created, auto-generated - event_results: dict[str, EventResult] # Handler results - event_result_type: type[T_EventResultType] | None # Auto-detected from Generic[T] parameter + event_type: str # Defaults to class name e.g. 'BaseEvent' + event_result_type: Any | None # Pydantic model/python type to validate handler return values, defaults to T_EventResultType + event_version: str # Defaults to '0.0.1' (override per class/instance for event payload versioning) + event_timeout: float | None = None # Event timeout in seconds (bus default resolved at processing time if None) + event_handler_timeout: float | None = None # Optional per-event handler timeout cap in seconds + event_handler_slow_timeout: float | None = None # Optional per-event slow-handler warning threshold + event_handler_concurrency: Literal['serial', 'parallel'] | None = None # optional per-event handler scheduling override (None -> bus default at processing time) + event_handler_completion: Literal['all', 'first'] | None = None # optional per-event completion override (None -> bus default at processing time) + + # runtime state fields + event_status: Literal['pending', 'started', 'completed'] # event processing status (auto-set) + event_created_at: str # Canonical ISO timestamp with 9 fractional digits (auto-set) + event_started_at: str | None # Set when first handler starts + event_completed_at: str | None # Set when event processing completes + event_parent_id: str | None # Parent event ID that led to this event during handling (auto-set) + event_path: list[str] # List of bus labels traversed, e.g. BusName#ab12 (auto-set) + event_results: dict[str, EventResult] # Handler results {: EventResult} (auto-set) + event_children: list[BaseEvent] # getter property to list any child events emitted during handling + event_bus: EventBus # getter property to get the bus the event was emitted on - # Data fields - # ... subclass BaseEvent to add your own event data fields here ... + # payload fields + # ... subclass BaseEvent to add your own event payload fields here ... # some_key: str # some_other_key: dict[str, int] # ... + # (they should not start with event_* to avoid conflict with special built-in fields) ``` -`event.event_results` contains a dict of pending `EventResult` objects that will be completed once handlers finish executing. - - -#### `BaseEvent` Properties - -- `event_status`: `Literal['pending', 'started', 'complete']` Event status -- `event_started_at`: `datetime` When first handler started processing -- `event_completed_at`: `datetime` When all handlers completed processing -- `event_children`: `list[BaseEvent]` Get any child events emitted during handling of this event -- `event_bus`: `EventBus` Shortcut to get the bus currently processing this event -- `event_result_type`: `type[Any] | None` Expected handler return type (auto-detected from `BaseEvent[T]` generic parameter) - #### `BaseEvent` Methods ##### `await event` -Await the `Event` object directly to get the completed `Event` object once all handlers have finished executing. +Immediate-await path for the `Event` object. + +- Outside a handler: waits for normal completion and returns the completed event. +- Inside a handler: queue-jumps this child event so it can run immediately, then returns the completed event. ```python -event = bus.dispatch(MyEvent()) +event = bus.emit(MyEvent()) completed_event = await event raw_result_values = [(await event_result) for event_result in completed_event.event_results.values()] # equivalent to: completed_event.event_results_list() (see below) ``` -##### `event_result(timeout: float | None=None, include: EventResultFilter=None, raise_if_any: bool=True, raise_if_none: bool=True) -> Any` - -Utility method helper to execute all the handlers and return the first handler's raw result value. +##### `event_completed() -> Self` -**Parameters:** +Queue-order await path for an event. -- `timeout`: Maximum time to wait for handlers to complete (None = use default event timeout) -- `include`: Filter function to include only specific results (default: only non-None, non-exception results) -- `raise_if_any`: If `True`, raise exception if any handler raises any `Exception` (`default: True`) -- `raise_if_none`: If `True`, raise exception if results are empty / all results are `None` or `Exception` (`default: True`) +- Never queue-jumps. +- Waits until the event is completed by normal runloop queue order. ```python -# by default it returns the first successful non-None result value -result = await event.event_result() +event = bus.emit(MyEvent()) +completed_event = await event.event_completed() +``` -# Get result from first handler that returns a string -valid_result = await event.event_result(include=lambda r: isinstance(r.result, str) and len(r.result) > 100) +##### `first(timeout: float | None=None, *, raise_if_any: bool=False, raise_if_none: bool=False) -> Any` -# Get result but don't raise exceptions or error for 0 results, just return None -result_or_none = await event.event_result(raise_if_any=False, raise_if_none=False) +Set `event_handler_completion='first'`, wait for completion, and return the first successful non-`None` handler result. + +```python +event = bus.emit(MyEvent()) +value = await event.first() ``` -##### `event_results_by_handler_id(timeout: float | None=None, include: EventResultFilter=None, raise_if_any: bool=True, raise_if_none: bool=True) -> dict` +##### `reset() -> Self` + +Return a fresh event copy with runtime processing state reset back to pending. + +- Intended for re-emitting an already-seen event as a fresh event (for example after crossing a bridge boundary). +- The original event object is not mutated, it returns a new copy with some fields reset. +- A new UUIDv7 `event_id` is generated for the returned copy (to allow it to process as a separate event it needs a new unique uuid) +- Runtime completion state is cleared (`event_results`, completion signal/flags, processed timestamp, emit context). -Utility method helper to get all raw result values organized by `{handler_id: result_value}`. +##### `event_result(timeout: float | None=None, include: EventResultFilter=None, raise_if_any: bool=True, raise_if_none: bool=True) -> Any` + +Utility method helper to execute all the handlers and return the first handler's raw result value. **Parameters:** @@ -681,15 +1021,14 @@ Utility method helper to get all raw result values organized by `{handler_id: re - `raise_if_none`: If `True`, raise exception if results are empty / all results are `None` or `Exception` (`default: True`) ```python -# by default it returns all successful non-None result values -results = await event.event_results_by_handler_id() -# {'handler_id_1': result1, 'handler_id_2': result2} +# by default it returns the first successful non-None result value +result = await event.event_result() -# Only include results from handlers that returned integers -int_results = await event.event_results_by_handler_id(include=lambda r: isinstance(r.result, int)) +# Get result from first handler that returns a string +valid_result = await event.event_result(include=lambda r: isinstance(r.result, str) and len(r.result) > 100) -# Get all results including errors and None values -all_results = await event.event_results_by_handler_id(raise_if_any=False, raise_if_none=False) +# Get result but don't raise exceptions or error for 0 results, just return None +result_or_none = await event.event_result(raise_if_any=False, raise_if_none=False) ``` ##### `event_results_list(timeout: float | None=None, include: EventResultFilter=None, raise_if_any: bool=True, raise_if_none: bool=True) -> list[Any]` @@ -715,52 +1054,7 @@ filtered_results = await event.event_results_list(include=lambda r: isinstance(r all_results = await event.event_results_list(raise_if_any=False, raise_if_none=False) ``` -##### `event_results_flat_dict(timeout: float | None=None, include: EventResultFilter=None, raise_if_any: bool=True, raise_if_none: bool=False, raise_if_conflicts: bool=True) -> dict` - -Utility method helper to merge all raw result values that are `dict`s into a single flat `dict`. - -**Parameters:** - -- `timeout`: Maximum time to wait for handlers to complete (None = use default event timeout) -- `include`: Filter function to include only specific results (default: only non-None, non-exception results) -- `raise_if_any`: If `True`, raise exception if any handler raises any `Exception` (`default: True`) -- `raise_if_none`: If `True`, raise exception if results are empty / all results are `None` or `Exception` (`default: False`) -- `raise_if_conflicts`: If `True`, raise exception if dict keys conflict between handlers (`default: True`) - -```python -# by default it merges all successful dict results -results = await event.event_results_flat_dict() -# {'key1': 'value1', 'key2': 'value2'} - -# Merge only dicts with specific keys -config_dicts = await event.event_results_flat_dict(include=lambda r: isinstance(r.result, dict) and 'config' in r.result) - -# Allow conflicts, last handler wins -merged = await event.event_results_flat_dict(raise_if_conflicts=False) -``` - -##### `event_results_flat_list(timeout: float | None=None, include: EventResultFilter=None, raise_if_any: bool=True, raise_if_none: bool=True) -> list` - -Utility method helper to merge all raw result values that are `list`s into a single flat `list`. - -**Parameters:** - -- `timeout`: Maximum time to wait for handlers to complete (None = use default event timeout) -- `include`: Filter function to include only specific results (default: only non-None, non-exception results) -- `raise_if_any`: If `True`, raise exception if any handler raises any `Exception` (`default: True`) -- `raise_if_none`: If `True`, raise exception if results are empty / all results are `None` or `Exception` (`default: True`) - -```python -# by default it merges all successful list results -results = await event.event_results_flat_list() -# ['item1', 'item2', 'item3'] - -# Merge only lists with more than 2 items -long_lists = await event.event_results_flat_list(include=lambda r: isinstance(r.result, list) and len(r.result) > 2) - -# Get all list results without raising on errors -all_items = await event.event_results_flat_list(raise_if_any=False, raise_if_none=False) -``` +`event_results_list()` is the canonical collection helper for multiple handler return values. ##### `event_bus` (property) @@ -770,22 +1064,25 @@ Shortcut to get the `EventBus` that is currently processing this event. Can be u bus = EventBus() async def some_handler(event: MyEvent): - # You can always dispatch directly to any bus you have a reference to - child_event = bus.dispatch(ChildEvent()) + # You can always emit directly to any bus you have a reference to + child_event = bus.emit(ChildEvent()) # OR use the event.event_bus shortcut to get the current bus: - child_event = await event.event_bus.dispatch(ChildEvent()) + child_event = await event.event_bus.emit(ChildEvent()) ``` - --- -### `EventResult` +
+ +
+Review per-handler status, timing, outputs, and captured errors. + The placeholder object that represents the pending result from a single handler executing an event. `Event.event_results` contains a `dict[PythonIdStr, EventResult]` in the shape of `{handler_id: EventResult()}`. -You shouldn't need to ever directly use this class, it's an internal wrapper to track pending and completed results from each handler within `BaseEvent.event_results`. +You generally won't interact with this class directlyβ€”the bus instantiates and updates it for youβ€”but its API is documented here for advanced integrations and custom emit loops. #### `EventResult` Fields @@ -799,12 +1096,12 @@ class EventResult(BaseModel): status: str # 'pending', 'started', 'completed', 'error' result: Any # Handler return value - error: str | None # Error message if failed + error: BaseException | None # Captured exception if the handler failed - started_at: datetime # When handler started - completed_at: datetime # When handler completed - timeout: float # Handler timeout in seconds - child_events: list[BaseEvent] # list of child events emitted during handler execution + started_at: str | None # Canonical ISO timestamp when handler started + completed_at: str | None # Canonical ISO timestamp when handler completed + timeout: float | None # Handler timeout in seconds + event_children: list[BaseEvent] # child events emitted during handler execution ``` #### `EventResult` Methods @@ -818,32 +1115,80 @@ handler_result = event.event_results['handler_id'] value = await handler_result # Returns result or raises an exception if handler hits an error ``` +- `run_handler(event, handler, *, eventbus, timeout, enter_handler_context, exit_handler_context, format_exception_for_log)` + Low-level helper that runs the handler, updates timing/status fields, captures errors, and notifies its completion signal. `EventBus._run_handler()` (private/internal) delegates to this; you generally should not call either directly unless you are extending internals. + +
+ +
+Review handler metadata, registration fields, and serialization helpers. + + +Serializable metadata wrapper around a registered handler callable. + +You usually get an `EventHandler` back from `bus.on(...)`, can pass it to `bus.off(...)`, and may see it in middleware hooks like `on_bus_handlers_change(...)`. + +#### `EventHandler` Fields + +```python +class EventHandler(BaseModel): + id: str # Stable handler identifier + handler_name: str # Callable name + handler_file_path: str | None # Source file path (if known) + handler_timeout: float | None # Optional per-handler timeout override + handler_slow_timeout: float | None # Optional "slow handler" threshold + handler_registered_at: str # Registration timestamp (ISO string, 9 fractional digits) + event_pattern: str # Registered event pattern (type name or '*') + eventbus_name: str # Owning EventBus name + eventbus_id: str # Owning EventBus ID +``` + +The raw callable is stored on `handler`, but is excluded from JSON serialization (`model_dump(mode='json', exclude={'handler'})`). + +#### `EventHandler` Properties and Methods + +- `label` (property): Short display label like `my_handler#abcd`. +- `model_dump(mode='json', exclude={'handler'}) -> dict[str, Any]`: JSON-compatible metadata dict (callable excluded). +- `from_json_dict(data, handler=None) -> EventHandler`: Rebuilds metadata; optional callable reattachment. +- `from_callable(...) -> EventHandler`: Build a new handler entry from a callable plus bus/pattern metadata. + --- +
+ ## 🧡 Advanced Concurrency Control +### `EventBus`, `BaseEvent`, and `EventHandler` concurrency config fields + +These options can be set as bus-level defaults, event-level options, or as handler-specific options. +They control the concurrency of how events are processed within a bus, across all buses, and how handlers execute within a single event. + +- `event_concurrency`: `'global-serial' | 'bus-serial' | 'parallel'` controls event-level scheduling (`None` on events defers to bus default) +- `event_handler_concurrency`: `'serial' | 'parallel'` should handlers on a single event run in parallel or in sequential order +- `event_handler_completion`: `'all' | 'first'` should all handlers run, or should we stop handler execution once any handler returns a non-`None` value + ### `@retry` Decorator -The `@retry` decorator provides automatic retry functionality with built-in concurrency control for any function, including event handlers. This is particularly useful when handlers interact with external services that may temporarily fail. +The `@retry` decorator provides automatic retry functionality with built-in concurrency control for any function, including event handlers. This is particularly useful when handlers interact with external services that may temporarily fail. It can be used completely independently from the rest of the library, it does not require a bus and can be used more generally to control concurrenty/timeouts/retries of any python function. ```python from bubus import EventBus, BaseEvent -from bubus.helpers import retry +from bubus.retry import retry bus = EventBus() -class FetchDataEvent(BaseEvent): +class FetchDataEvent(BaseEvent[dict[str, Any]]): url: str @retry( - wait=2, # Wait 2 seconds between retries - retries=3, # Retry up to 3 times after initial failure + retry_after=2, # Wait 2 seconds between retries + max_attempts=3, # Total attempts including initial call timeout=5, # Each attempt times out after 5 seconds semaphore_limit=5, # Max 5 concurrent executions - backoff_factor=1.5, # Exponential backoff: 2s, 3s, 4.5s - retry_on=(TimeoutError, ConnectionError) # Only retry on specific exceptions + retry_backoff_factor=1.5, # Exponential backoff: 2s, 3s, 4.5s + retry_on_errors=[TimeoutError, ConnectionError], # Only retry on specific exceptions ) -async def fetch_with_retry(event: FetchDataEvent): +async def fetch_with_retry(event: FetchDataEvent) -> dict[str, Any]: # This handler will automatically retry on network failures async with aiohttp.ClientSession() as session: async with session.get(event.url) as response: @@ -854,16 +1199,16 @@ bus.on(FetchDataEvent, fetch_with_retry) #### Retry Parameters -- **`timeout`**: Maximum amount of time function is allowed to take per attempt, in seconds (default: 5) -- **`retries`**: Number of additional retry attempts if function raises an exception (default: 3) -- **`retry_on`**: Tuple of exception types to retry on (default: `None` = retry on any `Exception`) -- **`wait`**: Base seconds to wait between retries (default: 3) -- **`backoff_factor`**: Multiplier for wait time after each retry (default: 1.0) +- **`timeout`**: Maximum amount of time function is allowed to take per attempt, in seconds (`None` = unbounded, default: `None`) +- **`max_attempts`**: Total attempts including the first attempt (minimum effective value: `1`, default: `1`) +- **`retry_on_errors`**: List of exception classes or compiled regex matchers. Regexes are matched against `f"{err.__class__.__name__}: {err}"` (default: `None` = retry on any `Exception`) +- **`retry_after`**: Base seconds to wait between retries (default: 0) +- **`retry_backoff_factor`**: Multiplier for wait time after each retry (default: 1.0) - **`semaphore_limit`**: Maximum number of concurrent calls that can run at the same time -- **`semaphore_scope`**: Scope for the semaphore: `class`, `self`, `global`, or `multiprocess` -- **`semaphore_timeout`**: Maximum time to wait for a semaphore slot before proceeding or failing +- **`semaphore_scope`**: Scope for the semaphore: `class`, `instance`, `global`, or `multiprocess` +- **`semaphore_timeout`**: Maximum time to wait for a semaphore slot before proceeding or failing. If omitted: `timeout * max(1, semaphore_limit - 1)` when `timeout` is set, otherwise wait forever - **`semaphore_lax`**: Continue anyway if semaphore fails to be acquired in within the given time -- **`semaphore_name`**: Unique semaphore name to allow sharing a semaphore between functions +- **`semaphore_name`**: Unique semaphore name (string) or callable getter that receives function args and returns a name #### Semaphore Options @@ -881,7 +1226,7 @@ class MyService: # Per-instance semaphore - each instance gets its own limit class MyService: - @retry(semaphore_limit=1, semaphore_scope='self') + @retry(semaphore_limit=1, semaphore_scope='instance') async def instance_limited_handler(self, event): ... # Cross-process semaphore - all processes share one limit @@ -902,15 +1247,15 @@ class DatabaseEvent(BaseEvent): class DatabaseService: @retry( - wait=1, - retries=5, + retry_after=1, + max_attempts=5, timeout=10, semaphore_limit=10, # Max 10 concurrent DB operations semaphore_scope='class', # Shared across all instances semaphore_timeout=30, # Wait up to 30s for semaphore semaphore_lax=False, # Fail if can't acquire semaphore - backoff_factor=2.0, # Exponential backoff: 1s, 2s, 4s, 8s, 16s - retry_on=(ConnectionError, TimeoutError) + retry_backoff_factor=2.0, # Exponential backoff: 1s, 2s, 4s, 8s, 16s + retry_on_errors=[ConnectionError, TimeoutError], ) async def execute_query(self, event: DatabaseEvent): # Automatically retries on connection failures @@ -925,6 +1270,22 @@ bus.on(DatabaseEvent, db_service.execute_query)
+--- + +
+ +## πŸƒ Performance (Python) + +```bash +uv run tests/performance_runtime.py # run the performance test suite in python +``` + +| Runtime | 1 bus x 50k events x 1 handler | 500 buses x 100 events x 1 handler | 1 bus x 1 event x 50k parallel handlers | 1 bus x 50k events x 50k one-off handlers | Worst case (N buses x N events x N handlers) | +| ------------------ | ------------------ | ------------------ | ------------------ | ------------------ | ------------------ | +| Python | `0.179ms/event`, `0.235kb/event` | `0.191ms/event`, `0.191kb/event` | `0.035ms/handler`, `8.164kb/handler` | `0.255ms/event`, `0.185kb/event` | `0.351ms/event`, `5.867kb/event` | + +
+ --- --- @@ -932,10 +1293,10 @@ bus.on(DatabaseEvent, db_service.execute_query) ## πŸ‘Ύ Development -Set up the development environment using `uv`: +Set up the python development environment using `uv`: ```bash -git clone https://github.com/browser-use/bubus && cd bubus +git clone https://github.com/pirate/bbus && cd bbus # Create virtual environment with Python 3.12 uv venv --python 3.12 @@ -949,6 +1310,13 @@ source .venv/bin/activate # On Unix/macOS uv sync --dev --all-extras ``` +Recommended once per clone: + +```bash +prek install # install pre-commit hooks +prek run --all-files # run pre-commit hooks on all files manually +``` + ```bash # Run linter & type checker uv run ruff check --fix @@ -960,12 +1328,21 @@ uv run pytest -vxs --full-trace tests/ # Run specific test file uv run pytest tests/test_eventbus.py + +# Run Python perf suite +uv run tests/performance_runtime.py + +# Run the entire lint+test+examples+perf suite for both python and ts +./test.sh ``` +> For Bubus-TS development see the `bubus-ts/README.md` `# Development` section. + ## πŸ”— Inspiration - https://www.cosmicpython.com/book/chapter_08_events_and_message_bus.html#message_bus_diagram ⭐️ - https://developer.mozilla.org/en-US/docs/Web/API/EventTarget ⭐️ +- https://github.com/sindresorhus/emittery ⭐️ (equivalent for JS), https://github.com/EventEmitter2/EventEmitter2, https://github.com/vitaly-t/sub-events - https://github.com/pytest-dev/pluggy ⭐️ - https://github.com/teamhide/fastapi-event ⭐️ - https://github.com/ethereum/lahja ⭐️ @@ -985,11 +1362,11 @@ uv run pytest tests/test_eventbus.py --- -> [🧠 DeepWiki Docs](https://deepwiki.com/browser-use/bubus) +> [🧠 DeepWiki Docs](https://deepwiki.com/pirate/bbus) > imageimage ## πŸ›οΈ License -This project is licensed under the MIT License. For more information, see the main browser-use repository: https://github.com/browser-use/browser-use +This project is licensed under the MIT License. diff --git a/TODO_event_history_stores.md b/TODO_event_history_stores.md new file mode 100644 index 0000000..3bdf519 --- /dev/null +++ b/TODO_event_history_stores.md @@ -0,0 +1,468 @@ +# TODO: Distributed EventHistory Stores (PostgreSQL + Redis + SQLite + NATS + Kafka) + +This plan defines a swappable `EventHistory` backend that owns both: +- event history state +- queue/claim/lock state + +Goal: allow multiple nodes/processes to collaborate on the same bus workload without conflicting claims. + +This document is implementation planning only. + +## Scope + +- Add a backend interface that supports in-memory, PostgreSQL, Redis, SQLite, NATS, and Kafka. +- Move queue claim/lease/lock state into `EventHistory`. +- Use one unified runtime model across all backends (no optional alternate mode). +- Preserve current runtime execution shape: + - EventBus level wrappers (`withEventLock` / `with_event_lock`) + - Event level processing wrappers + - EventResult level wrappers (`withHandlerLock` / `with_handler_lock`, timeout, slow monitor, error handling) +- Keep naming aligned across languages: + - TS methods in camelCase + - Python methods in snake_case + - same terminology and method intent in both runtimes + +## Non-goals + +- No middleware/backend plugin porting in this phase. +- No docs/examples migration in this phase beyond internal TODO/spec docs. +- No compatibility shims for old lock/history internals. + +## Hard Requirements + +1. Multi-node safety: +- Two workers must never execute the same event claim concurrently. +- Two workers must never execute the same handler claim concurrently. + +2. Crash safety: +- Stale claims must expire and become reclaimable. +- Stale workers must be unable to write final state after losing lease. + +3. Determinism: +- Claim semantics must be deterministic under contention. +- Tests must enforce no flaky behavior. + +4. Scale: +- Support very high churn (millions of bus instances over days). +- Avoid per-bus schema/key explosions where possible. + +## Architecture Summary + +Canonical state moves into `EventHistory` backend: +- event records +- handler result records +- runnable queue ordering +- claim leases +- lock leases (event-level and handler-level resource locks) + +Canonical entity names across backends: +- `event_buses` +- `event_handlers` +- `events` +- `event_results` +- `locks` + +`event_handlers` payload should use the same schema as `EventHandler` JSON in each runtime (`to_json_dict` / `toJSON`), with distributed-runtime lease fields stored alongside it. + +`LockManager` remains as runtime orchestration API surface, but delegates claim/lease operations to `EventHistory`. + +`EventBus` retains ownership of bus-local concerns: +- find waiters/futures +- middleware hooks +- process orchestration and nested wrappers + +For NATS/Kafka backends, this same model is represented as native broker messages/streams. +There is no separate runtime mode; broker-native mapping is part of the core design. + +## Unified Store Interface (Conceptual) + +### Existing history operations + +Python: +- `add_event(event)` +- `get_event(event_id)` +- `remove_event(event_id)` +- `has_event(event_id)` +- `find(...)` +- `trim_event_history(...)` +- `cleanup_excess_events(...)` + +TypeScript: +- `addEvent(event)` +- `getEvent(event_id)` +- `removeEvent(event_id)` +- `hasEvent(event_id)` +- `find(...)` +- `trimEventHistory(...)` +- `cleanupExcessEvents(...)` + +### New distributed claim/lock operations + +Python: +- `claim_next_event(...)` +- `renew_event_claim(...)` +- `release_event_claim(...)` +- `claim_next_handler_result(...)` +- `renew_handler_claim(...)` +- `release_handler_claim(...)` +- `complete_handler_result(...)` +- `complete_event_if_done(...)` +- `claim_lock(resource_key, ...)` +- `renew_lock(resource_key, ...)` +- `release_lock(resource_key, ...)` + +TypeScript: +- `claimNextEvent(...)` +- `renewEventClaim(...)` +- `releaseEventClaim(...)` +- `claimNextHandlerResult(...)` +- `renewHandlerClaim(...)` +- `releaseHandlerClaim(...)` +- `completeHandlerResult(...)` +- `completeEventIfDone(...)` +- `claimLock(resourceKey, ...)` +- `renewLock(resourceKey, ...)` +- `releaseLock(resourceKey, ...)` + +### Claim model + +All claims/locks must use: +- owner id (`worker_id`) +- opaque claim token (`uuid`) +- lease expiry timestamp +- fencing token (monotonic integer) for stale-writer protection on lock resources + +## Resource Keys + +Standard lock resources: +- `event:global` +- `event:bus:{bus_id}` +- `handler:event:{event_id}` + +Lock policy: +- event concurrency mode resolves which event resource key is used. +- handler serial mode uses `handler:event:{event_id}`. +- handler parallel mode skips handler lock resource. + +## Lifecycle Semantics + +1. Event enqueue: +- Insert event into backend history. +- Mark runnable queue position. + +2. Event claim: +- Atomically claim next eligible event. +- Acquire/validate event-level lock resource claim. +- Return event claim token + lease metadata. + +3. Handler claim: +- Atomically claim next eligible handler result for that event. +- Acquire/validate handler lock claim if serial. +- Return handler claim token + lease metadata. + +4. Execution: +- Existing nested closure/context-manager runtime remains. +- Heartbeat renews event/handler claims while executing. + +5. Completion: +- Complete handler result with token/fence checks. +- Release handler claim and lock claim. +- Complete event when all handlers done. +- Release event claim and lock claim. + +## Broker Mapping (Required for NATS and Kafka Backends) + +EventBus dispatch must publish normal broker events that non-bubus consumers can consume directly. + +Public event lane (domain events): +- topic/subject pattern: `events.{event_type}` (or single stream + `event_type` header) +- payload: normal event JSON +- headers: `event_id`, `bus_id`, `event_type`, `event_parent_id`, tracing/correlation ids + +Bubus control lane (runtime coordination): +- `bubus.event_buses` +- `bubus.event_handlers` +- `bubus.events` +- `bubus.event_results` +- `bubus.locks` + +Rules: +- Public lane messages are first-class outputs of `bus.emit(...)`. +- Bubus workers consume both public + control lanes as needed. +- External consumers can consume only public lane and still see standard event traffic. +- Handler callable code stays local to each machine; only handler metadata/identity is distributed. + +## Backend-Specific Plan + +## PostgreSQL Backend + +Model: +- Shared tables keyed by `bus_id`, not per-bus tables. +- Use short transactions and lease-based claims. + +Core primitives: +- `UPDATE ... WHERE ... RETURNING` claim transitions. +- `INSERT ... ON CONFLICT ... WHERE expired RETURNING` for lock claims. +- Optional `FOR UPDATE SKIP LOCKED` for candidate selection. + +Tables: +- `event_buses` +- `event_handlers` +- `events` +- `event_results` +- `locks` + +Indexes: +- events: `(bus_id, status, queue_seq)`, `(claim_expires_at)` +- handler results: `(event_id, status)`, `(claim_expires_at)` +- locks: `(resource_key)`, `(expires_at)` + +Pros: +- Strong transactional semantics. +- Good for durable multi-node coordination. + +Risks: +- Hot index contention under very high parallelism. +- Requires careful batching/trim policy. + +## Redis Backend + +Model: +- Lease claims and lock ownership via Lua scripts. +- Prefer bounded key cardinality per bus. + +Data layout (recommended), equivalent to SQL table names: +- `event_buses` namespace: + - `eh:event_buses:data` (hash `bus_id -> EventBus JSON`) +- `event_handlers` namespace: + - `eh:event_handlers:data` (hash `handler_id -> EventHandler JSON + lease metadata`) + - `eh:event_handlers:by_bus:{bus_id}` (set/zset of handler ids) +- `events` namespace: + - `eh:events:data` (hash `event_id -> Event JSON + claim metadata`) + - `eh:events:queue:{bus_id}` (zset queue order) +- `event_results` namespace: + - `eh:event_results:data` (hash `result_id -> EventResult JSON + claim metadata`) + - `eh:event_results:by_event:{event_id}` (set/zset of result ids) +- `locks` namespace: + - `eh:locks:data` (hash `resource_key -> owner/token/fence/expires`) + - `eh:locks:fence:{resource_key}` (counter key) + +Core primitives: +- `EVAL` Lua for atomic claim/release/renew. +- `SET NX PX` style lease acquisition wrapped in scripts. +- compare token before release/complete writes. + +Pros: +- High throughput, low-latency claims. +- Good for bursty worker pools. + +Risks: +- Key explosion if naive key-per-event forever. +- Needs strict TTL/cleanup discipline. + +## SQLite Backend + +Model: +- Lease + CAS updates only (no `SELECT FOR UPDATE` assumptions). +- Shared DB with WAL mode and short `BEGIN IMMEDIATE` transactions. + +Core primitives: +- CTE candidate selection + guarded `UPDATE ... RETURNING` claim. +- lock claim via UPSERT guarded by expiry timestamp. + +PRAGMAs: +- `journal_mode=WAL` +- `busy_timeout` configured +- `synchronous=NORMAL` (or stricter if needed) + +Pros: +- Simple deployment. +- Works for local/single-host multi-process collaboration. + +Risks: +- Write contention with many workers. +- Not ideal for geographically distributed nodes. + +## NATS Backend + +Model: +- JetStream streams/consumers are authoritative transport for both public and control lanes. +- Handler registrations and claims are broker-native messages; local callable resolution stays in-process. + +Subjects/streams: +- Public: `events.*` +- Control: + - `bubus.event_buses.*` + - `bubus.event_handlers.*` + - `bubus.events.*` + - `bubus.event_results.*` + - `bubus.locks.*` + +Core primitives: +- durable consumers for work distribution +- idempotent claim/complete events keyed by `(event_id, handler_id, attempt)` or equivalent token +- lock claim/renew/release via lock subjects with fencing semantics + +Pros: +- Native event-first integration for existing NATS consumers. +- Low latency fanout and delivery. + +Risks: +- Exactly-once semantics not guaranteed; idempotency required. +- Requires strict ordering and dedupe strategy in control lane consumers. + +## Kafka Backend + +Model: +- Topics are authoritative transport for both public and control lanes. +- Compacted control topics hold latest state snapshots while event topics preserve append log. + +Topics: +- Public: `events.` (or shared `events` topic keyed by `event_type`) +- Control: + - `bubus.event_buses` + - `bubus.event_handlers` + - `bubus.events` + - `bubus.event_results` + - `bubus.locks` + +Core primitives: +- consumer groups for distributed processing +- compacted topics for registration/lock latest-state materialization +- idempotent producer + transactional writes where available + +Pros: +- Native event-first integration for existing Kafka consumers. +- Strong ecosystem tooling for replay and audit. + +Risks: +- Partitioning strategy can create hot keys. +- Requires materialized control-state consumers for efficient claim decisions. + +## Scale Strategy (Millions of Buses Over Days) + +1. Shared storage namespaces only: +- No per-bus table creation. +- Bus id as indexed column/key prefix. + +2. Lifecycle cleanup: +- hard trim policies required (`max_history_size`/`max_history_drop`) +- backend-level TTL/archive for stopped buses + +3. Active set optimization: +- maintain small active-bus index for schedulers +- avoid scanning cold buses + +4. Batch operations: +- batched trim/delete and bounded cleanup loops +- avoid N+1 claim queries + +5. Cardinality controls: +- Redis: avoid permanent key-per-handler history where possible +- SQL: partition/compact on age if needed + +## Migration Plan + +## Phase 0: Contracts and invariants + +- Define shared claim/lock semantics and error taxonomy. +- Freeze wrapper nesting order and ownership boundaries. +- Add conformance tests for expected store behavior. + +## Phase 1: In-memory backend parity + +- Extend existing in-memory `EventHistory` to implement full claim interface. +- Rewire `LockManager` to delegate through store API. +- Keep behavior equivalent to current runtime. + +## Phase 2: Bus/event integration + +- Replace direct queue/in-flight mutation with store methods. +- Keep `find` waiter ownership on bus. +- Ensure event-level locks only acquired at bus layer, handler-level only at event/result layer. + +## Phase 3: PostgreSQL backend + +- Implement schema + migrations + claim SQL. +- Add lease renewal + fencing checks. +- Add deterministic contention tests across 2+ workers. + +## Phase 4: Redis backend + +- Implement Lua scripts for claim/release/renew/complete. +- Add stale-lease takeover tests. +- Add key-cardinality/load tests. + +## Phase 5: SQLite backend + +- Implement CAS claim SQL and lock UPSERT. +- Tune WAL and busy timeout defaults. +- Add local multi-process contention tests. + +## Phase 6: NATS backend + +- Implement subject/topic mapping for public + control lanes. +- Implement deterministic claim/lock consumer logic. +- Validate interoperability with non-bubus NATS consumers on public lane. + +## Phase 7: Kafka backend + +- Implement topic mapping for public + control lanes. +- Implement compaction + idempotent producer strategy for control topics. +- Validate interoperability with non-bubus Kafka consumers on public lane. + +## Phase 8: Performance + reliability hardening + +- Stress tests at high event volume and worker count. +- Verify no flaky timing-dependent races. +- Ensure total suite time remains under project threshold. + +## Conformance Test Matrix (All Backends) + +1. Single-claim exclusivity: +- two workers racing for same event -> exactly one claim succeeds. + +2. Lease expiry takeover: +- worker A claims and stops renewing -> worker B claims after expiry. + +3. Stale writer rejection: +- worker A loses claim; completion write from A must fail. + +4. Handler serial lock correctness: +- only one handler runs at a time per event when serial. + +5. Handler parallel correctness: +- handler-level lock omitted in parallel mode. + +6. Event concurrency modes: +- `global-serial`, `bus-serial`, `parallel` all enforce intended lock resource semantics. + +7. Recovery: +- restart worker mid-event, ensure task is recoverable and completed once. + +8. Trim correctness: +- retention rules preserve expected history semantics and ordering. + +9. Broker interoperability: +- events emitted by bubus are consumable by normal non-bubus Kafka/NATS consumers. + +## Open Questions + +1. Cross-node `find(future=...)`: +- keep bus-local waiters only, or add distributed waiter mechanism? + +2. Lock lease defaults: +- choose default lease durations and heartbeat cadence per backend. + +3. Fairness policy: +- strict FIFO vs throughput-optimized batching under high contention. + +4. Durability policy: +- should transient pending/started snapshots be fully durable in all backends, or eventual for Redis mode? + +## Deliverables + +- Unified `EventHistory` claim/lock interface in Python + TS. +- In-memory + PostgreSQL + Redis + SQLite + NATS + Kafka implementations. +- Shared conformance test suite run against all backends. +- Deterministic stress/perf tests for multi-node collaborative processing. diff --git a/bubus-ts/.prettierignore b/bubus-ts/.prettierignore new file mode 100644 index 0000000..21e9636 --- /dev/null +++ b/bubus-ts/.prettierignore @@ -0,0 +1,6 @@ +dist/ +*.md +*.yaml +*.json +*.sh +.prettierignore diff --git a/bubus-ts/README.md b/bubus-ts/README.md new file mode 100644 index 0000000..b4eada0 --- /dev/null +++ b/bubus-ts/README.md @@ -0,0 +1,871 @@ +# `bubus`: πŸ“’ Production-ready multi-language event bus + +image + +[![DeepWiki: Python](https://img.shields.io/badge/DeepWiki-bbus%2FPython-yellow.svg?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACwAAAAyCAYAAAAnWDnqAAAAAXNSR0IArs4c6QAAA05JREFUaEPtmUtyEzEQhtWTQyQLHNak2AB7ZnyXZMEjXMGeK/AIi+QuHrMnbChYY7MIh8g01fJoopFb0uhhEqqcbWTp06/uv1saEDv4O3n3dV60RfP947Mm9/SQc0ICFQgzfc4CYZoTPAswgSJCCUJUnAAoRHOAUOcATwbmVLWdGoH//PB8mnKqScAhsD0kYP3j/Yt5LPQe2KvcXmGvRHcDnpxfL2zOYJ1mFwrryWTz0advv1Ut4CJgf5uhDuDj5eUcAUoahrdY/56ebRWeraTjMt/00Sh3UDtjgHtQNHwcRGOC98BJEAEymycmYcWwOprTgcB6VZ5JK5TAJ+fXGLBm3FDAmn6oPPjR4rKCAoJCal2eAiQp2x0vxTPB3ALO2CRkwmDy5WohzBDwSEFKRwPbknEggCPB/imwrycgxX2NzoMCHhPkDwqYMr9tRcP5qNrMZHkVnOjRMWwLCcr8ohBVb1OMjxLwGCvjTikrsBOiA6fNyCrm8V1rP93iVPpwaE+gO0SsWmPiXB+jikdf6SizrT5qKasx5j8ABbHpFTx+vFXp9EnYQmLx02h1QTTrl6eDqxLnGjporxl3NL3agEvXdT0WmEost648sQOYAeJS9Q7bfUVoMGnjo4AZdUMQku50McDcMWcBPvr0SzbTAFDfvJqwLzgxwATnCgnp4wDl6Aa+Ax283gghmj+vj7feE2KBBRMW3FzOpLOADl0Isb5587h/U4gGvkt5v60Z1VLG8BhYjbzRwyQZemwAd6cCR5/XFWLYZRIMpX39AR0tjaGGiGzLVyhse5C9RKC6ai42ppWPKiBagOvaYk8lO7DajerabOZP46Lby5wKjw1HCRx7p9sVMOWGzb/vA1hwiWc6jm3MvQDTogQkiqIhJV0nBQBTU+3okKCFDy9WwferkHjtxib7t3xIUQtHxnIwtx4mpg26/HfwVNVDb4oI9RHmx5WGelRVlrtiw43zboCLaxv46AZeB3IlTkwouebTr1y2NjSpHz68WNFjHvupy3q8TFn3Hos2IAk4Ju5dCo8B3wP7VPr/FGaKiG+T+v+TQqIrOqMTL1VdWV1DdmcbO8KXBz6esmYWYKPwDL5b5FA1a0hwapHiom0r/cKaoqr+27/XcrS5UwSMbQAAAABJRU5ErkJggg==)](https://deepwiki.com/pirate/bbus) [![PyPI - Version](https://img.shields.io/pypi/v/bubus)](https://pypi.org/project/bubus/) [![GitHub License](https://img.shields.io/github/license/pirate/bbus)](https://github.com/pirate/bbus) ![GitHub last commit](https://img.shields.io/github/last-commit/pirate/bbus) + +[![DeepWiki: TS](https://img.shields.io/badge/DeepWiki-bbus%2FTypescript-blue.svg?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACwAAAAyCAYAAAAnWDnqAAAAAXNSR0IArs4c6QAAA05JREFUaEPtmUtyEzEQhtWTQyQLHNak2AB7ZnyXZMEjXMGeK/AIi+QuHrMnbChYY7MIh8g01fJoopFb0uhhEqqcbWTp06/uv1saEDv4O3n3dV60RfP947Mm9/SQc0ICFQgzfc4CYZoTPAswgSJCCUJUnAAoRHOAUOcATwbmVLWdGoH//PB8mnKqScAhsD0kYP3j/Yt5LPQe2KvcXmGvRHcDnpxfL2zOYJ1mFwrryWTz0advv1Ut4CJgf5uhDuDj5eUcAUoahrdY/56ebRWeraTjMt/00Sh3UDtjgHtQNHwcRGOC98BJEAEymycmYcWwOprTgcB6VZ5JK5TAJ+fXGLBm3FDAmn6oPPjR4rKCAoJCal2eAiQp2x0vxTPB3ALO2CRkwmDy5WohzBDwSEFKRwPbknEggCPB/imwrycgxX2NzoMCHhPkDwqYMr9tRcP5qNrMZHkVnOjRMWwLCcr8ohBVb1OMjxLwGCvjTikrsBOiA6fNyCrm8V1rP93iVPpwaE+gO0SsWmPiXB+jikdf6SizrT5qKasx5j8ABbHpFTx+vFXp9EnYQmLx02h1QTTrl6eDqxLnGjporxl3NL3agEvXdT0WmEost648sQOYAeJS9Q7bfUVoMGnjo4AZdUMQku50McDcMWcBPvr0SzbTAFDfvJqwLzgxwATnCgnp4wDl6Aa+Ax283gghmj+vj7feE2KBBRMW3FzOpLOADl0Isb5587h/U4gGvkt5v60Z1VLG8BhYjbzRwyQZemwAd6cCR5/XFWLYZRIMpX39AR0tjaGGiGzLVyhse5C9RKC6ai42ppWPKiBagOvaYk8lO7DajerabOZP46Lby5wKjw1HCRx7p9sVMOWGzb/vA1hwiWc6jm3MvQDTogQkiqIhJV0nBQBTU+3okKCFDy9WwferkHjtxib7t3xIUQtHxnIwtx4mpg26/HfwVNVDb4oI9RHmx5WGelRVlrtiw43zboCLaxv46AZeB3IlTkwouebTr1y2NjSpHz68WNFjHvupy3q8TFn3Hos2IAk4Ju5dCo8B3wP7VPr/FGaKiG+T+v+TQqIrOqMTL1VdWV1DdmcbO8KXBz6esmYWYKPwDL5b5FA1a0hwapHiom0r/cKaoqr+27/XcrS5UwSMbQAAAABJRU5ErkJggg==)](https://deepwiki.com/pirate/bbus/3-typescript-implementation) [![NPM Version](https://img.shields.io/npm/v/bubus)](https://www.npmjs.com/package/bubus) + +Bubus is an in-memory event bus library for async Python and TS (node/bun/deno/browser). + +It's designed for quickly building resilient, predictable, complex event-driven apps. + +It "just works" with an intuitive, but powerful event JSON format + emit API that's consistent across both languages and scales consistently from one event to millions (~0.2ms/event): + +```python +bus.on(SomeEvent, some_function) +bus.emit(SomeEvent({some_data: 132})) +``` + +It's async native, has proper automatic nested event tracking, and powerful concurrency control options. The API is inspired by `EventEmitter` or [`emittery`](https://github.com/sindresorhus/emittery) in JS, but it takes it a step further: + +- nice Zod / Pydantic schemas for events that can be exchanged between both languages +- automatic UUIDv7s and monotonic nanosecond timestamps for ordering events globally +- built in locking options to force strict global FIFO processing or fully parallel processing + +--- + +♾️ It's inspired by the simplicity of async and events in `JS` but with baked-in features that allow to eliminate most of the tedious repetitive complexity in event-driven codebases: + +- correct timeout enforcement across multiple levels of events, if a parent times out it correctly aborts all child event processing +- ability to strongly type hint and enforce the return type of event handlers at compile-time +- ability to queue events on the bus, or inline await them for immediate execution like a normal function call +- handles ~5,000 events/sec/core in both languages, with ~2kb/event RAM consumed per event during active processing + +
+ +## πŸ”’ Quickstart + +```bash +npm install bubus +``` + +```ts +import { BaseEvent, EventBus } from 'bubus' +import { z } from 'zod' + +const CreateUserEvent = BaseEvent.extend('CreateUserEvent', { + email: z.string(), + event_result_type: z.object({ user_id: z.string() }), +}) + +const bus = new EventBus('MyAuthEventBus') + +bus.on(CreateUserEvent, async (event) => { + const user = await yourCreateUserLogic(event.email) + return { user_id: user.id } +}) + +const event = bus.emit(CreateUserEvent({ email: 'someuser@example.com' })) +await event.done() +console.log(event.event_result) // { user_id: 'some-user-uuid' } +``` + +
+ +--- + +
+ +## ✨ Features + +
+See the core TypeScript features and how they map to Python. + +The features offered in TS are broadly similar to the ones offered in the python library. + +- Typed events with Zod schemas (cross-compatible with Pydantic events from python library) +- FIFO event queueing with configurable concurrency +- Nested event support with automatic parent/child tracking +- Cross-bus forwarding with loop prevention +- Handler result tracking + validation + timeout enforcement +- History retention controls (`max_history_size`) for memory bounds +- Optional `@retry` decorator for easy management of per-handler retries, timeouts, and semaphore-limited execution + +See the [Python README](../README.md) for more details. + +
+ +
+ +--- + +
+ +## πŸ“š API Documentation + +
+Review bus construction, defaults, and core lifecycle methods. + +The main bus class that registers handlers, schedules events, and tracks results. + +Constructor: + +```ts +new EventBus(name?: string, options?: { + id?: string + max_history_size?: number | null + event_concurrency?: 'global-serial' | 'bus-serial' | 'parallel' | null + event_timeout?: number | null + event_slow_timeout?: number | null + event_handler_concurrency?: 'serial' | 'parallel' | null + event_handler_completion?: 'all' | 'first' + event_handler_slow_timeout?: number | null + event_handler_detect_file_paths?: boolean +}) +``` + +#### Constructor options + +| Option | Type | Default | Purpose | +| --------------------------------- | ------------------------------------------------------- | -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `id` | `string` | `uuidv7()` | Override bus UUID (mostly for serialization/tests). | +| `max_history_size` | `number \| null` | `100` | Max events kept in `event_history`; `null` = unbounded; `0` = keep only in-flight events and drop completed events immediately. | +| `max_history_drop` | `boolean` | `false` | If `true`, when history is full drop oldest history entries (including uncompleted if needed). If `false`, reject new emits when history reaches `max_history_size`. | +| `event_concurrency` | `'global-serial' \| 'bus-serial' \| 'parallel' \| null` | `'bus-serial'` | Event-level scheduling policy. | +| `event_handler_concurrency` | `'serial' \| 'parallel' \| null` | `'serial'` | Per-event handler scheduling policy. | +| `event_handler_completion` | `'all' \| 'first'` | `'all'` | Event completion mode if event does not override it. | +| `event_timeout` | `number \| null` | `60` | Default per-handler timeout budget in seconds (unless overridden). | +| `event_handler_slow_timeout` | `number \| null` | `30` | Slow handler warning threshold (seconds). | +| `event_slow_timeout` | `number \| null` | `300` | Slow event warning threshold (seconds). | +| `event_handler_detect_file_paths` | `boolean` | `true` | Capture source file:line for handlers (slower, better logs). | + +#### Runtime state properties + +- `id: string` +- `name: string` +- `label: string` (`${name}#${id.slice(-4)}`) +- `handlers: Map` +- `handlers_by_key: Map` +- `event_history: Map` +- `pending_event_queue: BaseEvent[]` +- `in_flight_event_ids: Set` +- `locks: LockManager` + +#### `on()` + +```ts +on( + event_pattern: string | '*' | EventClass, + handler: EventHandlerCallable, + options?: Partial +): EventHandler +``` + +Use during startup/composition to register handlers. + +Advanced `options` fields, these can be used to override defaults per-handler if needed: + +- `handler_timeout?: number | null` hard delay before handler execution is aborted with a `HandlerTimeoutError` +- `handler_slow_timeout?: number | null` delay before emitting a slow handler warning log line +- `handler_name?: string` optional name to use instead of `anonymous` if handler is an unnamed arrow function +- `handler_file_path?: string | null` optional path/to/source/file.js:lineno where the handler is defined, used for logging only +- `id?: string` unique UUID for the handler (normally a hash of bus_id + event_pattern + handler_name + handler_registered_at) + +Notes: + +- Prefer class/factory keys (`bus.on(MyEvent, handler)`) for typed payload/result inference. +- String and `'*'` matching are supported (`bus.on('MyEvent', ...)`, `bus.on('*', ...)`). +- Returns an `EventHandler` object you can later pass to `off()` to de-register the handler if needed. + +#### `off()` + +```ts +off( + event_pattern: EventPattern | '*', + handler?: EventHandlerCallable | string | EventHandler +): void +``` + +Use when tearing down subscriptions (tests, plugin unload, hot-reload). + +- Omit `handler` to remove all handlers for `event_pattern`. +- Pass handler function reference to remove one by function identity. +- Pass handler id (`string`) or `EventHandler` object to remove by id. +- use `bus.off('*')` to remove _all_ registered handlers from the bus + +#### `emit()` + +```ts +emit(event: T): T +``` + +Behavior notes: + +- Per-event config fields stay on the event as provided; when unset (`null`/`undefined`), each bus resolves its own defaults at processing time. +- If same event ends up forwarded through multiple buses, it is loop-protected using `event_path`. +- Emit is synchronous and returns immediately with the same event object (`event.event_status` is initially `'pending'`). + +Normal lifecycle: + +1. Create event instance (`const event = MyEvent({...})`). +2. Emit (`const queued = bus.emit(event)`). +3. Await with `await queued.done()` (immediate/queue-jump semantics) or `await queued.eventCompleted()` (bus queue order). +4. Inspect `queued.event_results`, `queued.event_result`, `queued.event_errors`, etc. if you need to access handler return values + +#### `find()` + +```ts +find(event_pattern: EventPattern | '*', options?: FindOptions): Promise +find( + event_pattern: EventPattern | '*', + where: (event: T) => boolean, + options?: FindOptions +): Promise +``` + +Where: + +```ts +type FindOptions = { + past?: boolean | number // true to look through all past events, or number in seconds to filter time range + future?: boolean | number // true to wait for event to appear indefinitely, or number in seconds to wait for event to appear + child_of?: BaseEvent | null // filter to only match events that are a child_of: some_parent_event +} & { + // event_status: 'pending' | 'started' | 'completed' + // event_id: 'some-exact-event-uuid-here', + // event_started_at: string | null (exact iso datetime string or null) + // ... any event field can be passed to filter events using simple equality checks + [key: string]: unknown +} +``` + +`bus.find()` returns the first matching event (in emit timestamp order). +To find multiple matching events, iterate through `bus.event_history.filter((event) => ...some condition...)` manually. + +`where` behavior: +Any filter predicate function in the form of `(event) => true | false`, returning true to consider the event a match. + +```ts +const matching_event = bus.find(SomeEvent, (event) => event.some_field == 123) +// or to match all event types: +const matching_event = bus.find('*', (event) => event.some_field == 123) +``` + +`past` behavior: + +- `true`: search all history. +- `false`: skip searching past event history. +- `number`: search events emitted within last `N` seconds. + +`future` behavior: + +- `true`: wait forever for future match. +- `false`: do not wait. +- `number`: wait up to `N` seconds. + +Lifecycle use: + +- Use for idempotency / de-dupe before emit (`past: ...`). +- Use for synchronization/waiting (`future: ...`). +- Combine both to "check recent then wait". +- Add `child_of` to constrain by parent/ancestor event chain. +- Add any event field (e.g. `event_status`, `event_id`, `event_timeout`, `user_id`) to filter by strict equality. +- Use wildcard matching with predicates when you want to search all event types: `bus.find('*', (event) => ...)`. + +Debouncing expensive events with `find()`: + +```ts +const some_expensive_event = (await bus.find(ExpensiveEvent, { past: 15, future: 5 })) ?? bus.emit(ExpensiveEvent({})) +await some_expensive_event.done() +``` + +Important semantics: + +- Past lookup matches any emitted events, not just completed events. +- Past/future matches resolve as soon as event is emitted. If you need the completed event, await `event.done()` or pass `{event_status: 'completed'}` to filter only for completed events. +- If both `past` and `future` are omitted, defaults are `past: true, future: false`. +- If both `past` and `future` are `false`, it returns `null` immediately. +- Detailed behavior matrix is covered in `bubus-ts/tests/eventbus_find.test.ts`. + +#### `waitUntilIdle(timeout?)` + +`await bus.waitUntilIdle()` is the normal "drain bus work" call to wait until bus is done processing everything queued. +Pass an optional timeout in seconds (`await bus.waitUntilIdle(5)`) for a bounded wait. + +```ts +bus.emit(OneEvent(...)) +bus.emit(TwoEvent(...)) +bus.emit(ThreeEvent(...)) +await bus.waitUntilIdle() // this resolves once all three events have finished processing +await bus.waitUntilIdle(5) // wait up to 5 seconds, then continue even if work is still in-flight +``` + +#### Parent/child/event lookup helpers + +```ts +eventIsChildOf(child_event: BaseEvent, paret_event: BaseEvent): boolean +eventIsParentOf(parent_event: BaseEvent, child_event: BaseEvent): boolean +findEventById(event_id: string): BaseEvent | null +``` + +#### `toString()` / `toJSON()` / `fromJSON()` + +```ts +toString(): string +toJSON(): EventBusJSON +EventBus.fromJSON(data: unknown): EventBus +``` + +- `toString()` returns `BusName#abcd` style labels used in logs/errors. +- `toJSON()` exports full bus state snapshot (config, handlers, indexes, event_history, pending queue, in-flight ids, find-waiter snapshots). +- `fromJSON()` restores a new bus instance from that payload (handler functions are restored as no-op stubs). + +#### `logTree()` + +```ts +logTree(): string +``` + +- `logTree()` returns a full event log hierarchy tree diagram for debugging. + +#### `destroy()` + +```ts +destroy(): void +``` + +- `destroy()` clears handlers/history/locks and removes this bus from global weak registry. +- `destroy()`/GC behavior is exercised in `bubus-ts/tests/eventbus.test.ts` and `bubus-ts/tests/eventbus_performance.test.ts`. + +
+ +
+Review event fields, runtime state, and helper methods. + +Base class + factory builder for typed event models. + +Define your own strongly typed events with `BaseEvent.extend('EventName', {...zod fields...})`: + +```ts +const MyEvent = BaseEvent.extend('MyEvent', { + some_key: z.string(), + some_other_key: z.number(), + // ... + // any other payload fields you want to include can go here + + // fields that start with event_* are reserved for metadata used by the library + event_result_type: z.string().optional(), + event_timeout: 60, + // ... +}) + +const pending_event = MyEvent({ some_key: 'abc', some_other_key: 234 }) +const queued_event = bus.emit(pending_event) +const completed_event = await queued_event.done() +``` + +API behavior and lifecycle examples: + +- `bubus-ts/examples/simple.ts` +- `bubus-ts/examples/immediate_event_processing.ts` +- `bubus-ts/examples/forwarding_between_busses.ts` +- `bubus-ts/tests/eventbus.test.ts` +- `bubus-ts/tests/eventbus_find.test.ts` +- `bubus-ts/tests/event_handler_first.test.ts` +- `bubus-ts/tests/base_event_event_bus_proxy.test.ts` +- `bubus-ts/tests/eventbus_timeout.test.ts` +- `bubus-ts/tests/event_result.test.ts` + +#### Event configuration fields + +Special configuration fields you can set on each event to control processing: + +- `event_result_type?: z.ZodTypeAny | String | Number | Boolean | Array | Object` +- `event_version?: string` (default: `'0.0.1'`; useful for your own schema/data migrations) +- `event_timeout?: number | null` +- `event_handler_timeout?: number | null` +- `event_handler_slow_timeout?: number | null` +- `event_concurrency?: 'global-serial' | 'bus-serial' | 'parallel' | null` +- `event_handler_concurrency?: 'serial' | 'parallel' | null` +- `event_handler_completion?: 'all' | 'first'` + +#### Runtime state fields + +- `event_id`, `event_type`, `event_version` +- `event_path: string[]` (bus labels like `BusName#ab12`) +- `event_parent_id: string | null` +- `event_emitted_by_handler_id: string | null` +- `event_status: 'pending' | 'started' | 'completed'` +- `event_results: Map` +- `event_pending_bus_count: number` +- `event_created_at: string` +- `event_started_at: string | null` +- `event_completed_at: string | null` + +#### Read-only attributes + +- `event_parent` -> `BaseEvent | undefined` +- `event_children` -> `BaseEvent[]` +- `event_descendants` -> `BaseEvent[]` +- `event_errors` -> `Error[]` +- `event_result` -> `EventResultType | undefined` + +#### `done()` + +```ts +done(): Promise +``` + +- If called from inside a running handler, it queue-jumps child processing immediately. +- If called outside handler context, it waits for normal completion (or processes immediately if already next). +- Rejects if event is not attached to a bus (`event has no bus attached`). +- Queue-jump behavior is demonstrated in `bubus-ts/examples/immediate_event_processing.ts` and `bubus-ts/tests/base_event_event_bus_proxy.test.ts`. + +#### `eventCompleted()` + +```ts +eventCompleted(): Promise +``` + +- Waits for completion in normal runloop order. +- Use inside handlers when you explicitly do not want queue-jump behavior. + +#### `first()` + +```ts +first(): Promise | undefined> +``` + +- Forces `event_handler_completion = 'first'` for this run. +- Returns temporally first non-`undefined` successful handler result. +- Cancels pending/running losing handlers on the same bus. +- Returns `undefined` when no handler produces a successful non-`undefined` value. +- Cancellation and winner-selection behavior is covered in `bubus-ts/tests/event_handler_first.test.ts`. + +#### `eventResultsList(include?, options?)` + +```ts +eventResultsList( + include?: (result: EventResultType | undefined, event_result: EventResult) => boolean, + options?: { + timeout?: number | null + include?: (result: EventResultType | undefined, event_result: EventResult) => boolean + raise_if_any?: boolean + raise_if_none?: boolean + } +): Promise | undefined>> +``` + +- Returns handler result values in `event_results` order. +- Default filter includes completed non-`null`/non-`undefined` non-error, non-forwarded (`BaseEvent`) values. +- `raise_if_any` defaults to `true` and throws when any handler result has an error. +- `raise_if_none` defaults to `true` and throws when no results match `include`. +- `timeout` is in seconds and bounds how long to wait for completion. +- Examples: + - `await event.eventResultsList({ raise_if_any: false, raise_if_none: false })` + - `await event.eventResultsList((result) => typeof result === 'object', { raise_if_any: false })` + +#### `eventResultUpdate(handler, options?)` + +```ts +eventResultUpdate( + handler: EventHandler | EventHandlerCallable, + options?: { + eventbus?: EventBus + status?: 'pending' | 'started' | 'completed' | 'error' + result?: EventResultType | BaseEvent | undefined + error?: unknown + } +): EventResult +``` + +- Creates (if missing) or updates one `event_results` entry for the given handler id. +- Useful for deterministic seeding/rehydration paths before resuming normal dispatch. +- Example: + - `const seeded = event.eventResultUpdate(handler_entry, { eventbus: bus, status: 'pending' })` + - `seeded.update({ status: 'completed', result: 'seeded' })` + +#### `reset()` + +```ts +reset(): this +``` + +- Returns a fresh event copy with runtime state reset to pending so it can be emitted again safely. +- Original event object is unchanged. +- Generates a new UUIDv7 `event_id` for the returned copy. +- Clears runtime completion state (`event_results`, status/timestamps, captured async context, done signal, local bus binding). + +#### `toString()` / `toJSON()` / `fromJSON()` + +```ts +toString(): string +toJSON(): BaseEventData +BaseEvent.fromJSON(data: unknown): BaseEvent +EventFactory.fromJSON?.(data: unknown): TypedEvent +``` + +- JSON format is cross-language compatible with Python implementation. +- `event_result_type` is serialized as JSON Schema when possible and rehydrated on `fromJSON`. +- In TypeScript-only usage, `event_result_type` can be any Zod schema shape or base type like `number | string | boolean | etc.`. For cross-language roundtrips, object-like schemas (including Python `TypedDict`/`dataclass`-style shapes) are reconstructed on Python as Pydantic models, JSON object keys are always strings, and some fine-grained string-shape constraints may be normalized between Zod and Pydantic. +- Round-trip coverage is in `bubus-ts/tests/event_result_typed_results.test.ts` and `bubus-ts/tests/eventbus.test.ts`. + +
+ +
+Review per-handler status, timing, outputs, and captured errors. + +Each handler execution creates one `EventResult` stored in `event.event_results`. + +#### Main fields + +- `id: string` (uuidv7 string) +- `status: 'pending' | 'started' | 'completed' | 'error'` +- `event: BaseEvent` +- `handler: EventHandler` +- `result: EventResultType | undefined` +- `error: unknown | undefined` +- `started_at: string | null` (ISO datetime string) +- `completed_at: string | null` (ISO datetime string) +- `event_children: BaseEvent[]` + +#### Read-only getters + +- `event_id` -> `string` uuiv7 of the event the result is for +- `bus` -> `EventBus` instance it's associated with +- `handler_id` -> `string` uuidv5 of the `EventHandler` +- `handler_name` -> `string | 'anonymous'` function name of the handler method +- `handler_file_path` -> `string | null` path/to/file.js:lineno where the handler method is defined +- `eventbus_name` -> `string` name, same as `this.bus.name` +- `eventbus_id` -> `string` uuidv7, same as `this.bus.id` +- `eventbus_label` -> `string` label, same as `this.bus.label` +- `value` -> `EventResultType | undefined` alias of `this.result` +- `raw_value` -> `any` raw result value before schema validation, available when handler return value validation fails +- `handler_timeout` -> `number` seconds before handler execution is aborted (precedence: handler config -> event config -> bus level defaults) +- `handler_slow_timeout` -> `number` seconds before logging a slow execution warning (same prececence as `handler_timeout`) + +#### `toString()` / `toJSON()` / `fromJSON()` + +```ts +toString(): string +toJSON(): EventResultJSON +EventResult.fromJSON(event, data): EventResult +``` + +
+ +
+Review handler metadata, registration fields, and serialization helpers. + +Represents one registered handler entry on a bus. You usually get these from `bus.on(...)`, then pass them to `bus.off(...)` to remove. + +#### Main fields + +- `id` unique handler UUIDv5 (deterministic hash from bus/event/handler metadata unless overridden) +- `handler` function reference that executes for matching events +- `handler_name` function name (or `'anonymous'`) +- `handler_file_path` detected source path (`~/path/file.ts:line`) or `null` +- `handler_timeout` optional timeout override in seconds (`null` disables timeout limit) +- `handler_slow_timeout` optional slow-warning threshold in seconds (`null` disables slow warning) +- `handler_registered_at` ISO timestamp +- `event_pattern` subscribed key (`'SomeEvent'` or `'*'`) +- `eventbus_name` bus name where this handler was registered +- `eventbus_id` bus UUID where this handler was registered + +#### `toString()` / `toJSON()` / `fromJSON()` + +```ts +toString(): string +toJSON(): EventHandlerJSON +EventHandler.fromJSON(data: unknown, handler?: EventHandlerCallable): EventHandler +``` + +- `toString()` returns `handlerName() (path:line)` when path/name are available, otherwise `function#abcd()`. +- `toJSON()` emits only serializable handler metadata (never function bodies). +- `fromJSON()` reconstructs the handler entry and accepts an optional real function to re-bind execution behavior. + +
+ +--- + +
+ +
+ +## 🧡 Advanced Concurrency Control + +### Concurrency Config Options + +#### Bus-level config options (`new EventBus(name, {...options...})`) + +- `max_history_size?: number | null` (default: `100`) + - Max events kept in history. `null` = unlimited. `bus.find(...)` uses this log to query recently emitted events + - `0` keeps only pending/in-flight events; each event is removed from history immediately after completion. +- `max_history_drop?: boolean` (default: `false`) + - If `true`, drop oldest history entries when history is full (including uncompleted entries if needed). + - If `false`, reject new emits when history is full. +- `event_concurrency?: 'global-serial' | 'bus-serial' | 'parallel' | null` (default: `'bus-serial'`) + - Event-level scheduling policy (`global-serial`: FIFO across all buses, `bus-serial`: FIFO per bus, `parallel`: concurrent events per bus). +- `event_handler_concurrency?: 'serial' | 'parallel' | null` (default: `'serial'`) + - Handler-level scheduling policy for each event (`serial`: one handler at a time per event, `parallel`: all handlers for the event can run concurrently). +- `event_handler_completion?: 'all' | 'first'` (default: `'all'`) + - Completion strategy (`all`: wait for all handlers, `first`: stop after first non-`undefined` result). +- `event_timeout?: number | null` (default: `60`) + - Default handler timeout budget in seconds. +- `event_handler_slow_timeout?: number | null` (default: `30`) + - Slow-handler warning threshold in seconds. +- `event_slow_timeout?: number | null` (default: `300`) + - Slow-event warning threshold in seconds. + +#### Event-level config options + +Override the bus defaults on a per-event basis by using these special fields in the event: + +```ts +const event = MyEvent({ + event_concurrency: 'parallel', + event_handler_concurrency: 'parallel', + event_handler_completion: 'first', + event_timeout: 10, + event_handler_timeout: 3, +}) +``` + +Notes: + +- `null` means "inherit/fall back to bus default" for event-level concurrency and timeout fields. +- Forwarded events are processed under the target bus's config; source bus config is not inherited. +- `event_handler_completion` is independent from handler scheduling mode (`serial` vs `parallel`). + +#### Handler-level config options + +Set at registration: + +```ts +bus.on(MyEvent, handler, { handler_timeout: 2 }) // max time in seconds this handler is allowed to run before it's aborted +``` + +#### Precedence and interaction + +Event and handler concurrency precedence: + +1. Event instance override (`event.event_concurrency`, `event.event_handler_concurrency`) +2. Bus defaults (`EventBus` options) +3. Built-in defaults (`bus-serial`, `serial`) + +Timeout resolution for each handler run: + +1. Resolve handler timeout source: + - `bus.on(..., { handler_timeout })` + - else `event.event_handler_timeout` + - else bus `event_timeout` +2. Apply event cap: + - effective timeout is `min(resolved_handler_timeout, event.event_timeout)` when both are non-null + - if either is `null`, the non-null value wins; both null means no timeout + +Additional timeout nuance: + +- `BaseEvent.event_timeout` starts as `null` unless set; each processing bus resolves its own `event_timeout` default when still unset. +- Bus/event timeouts are outer budgets for handler execution; use `@retry({ timeout })` for per-attempt timeouts. + +Use `@retry` for per-handler execution timeout/retry/backoff/semaphore control. Keep bus/event timeouts as outer execution budgets. + +### Runtime lifecycle (bus -> event -> handler) + +Emit flow: + +1. `emit()` normalizes to original event and captures async context when available. +2. Bus appends itself to `event_path` and records runtime ownership for this processing pass. +3. Event enters `event_history`, `pending_event_queue`, and runloop starts. +4. Runloop dequeues and calls `processEvent()`. +5. Event-level semaphore (`event_concurrency`) is applied. +6. Handler results are created and executed under handler-level semaphore (`event_handler_concurrency`), with timeout/concurrency defaults resolved at processing time on the current bus when event fields are unset. +7. Event completion and child completion propagate through `event_pending_bus_count` and result states. +8. History trimming evicts completed events first; if still over limit, oldest pending events can be dropped (with warning), then cleanup runs. + +Locking model: + +- Global event semaphore: `global-serial` +- Bus event semaphore: `bus-serial` +- Per-event handler semaphore: `serial` handler mode + +### Queue-jumping (`await event.done()` inside handlers) + +Want to emit and await an event like a function call? simply `await event.done()`. +When called inside a handler, the awaited event is processed immediately (queue-jump behavior) before normal queued work continues. + +### `@retry` Decorator + +`retry()` adds retry logic and optional semaphore-based concurrency limiting to async functions/handlers. + +#### Why retry is handler-level + +Retry and timeout belong on handlers, not emit sites: + +- Handlers fail; events are messages. +- Handler-level retries preserve replay semantics (one event emit, internal retry attempts). +- Bus concurrency and retry concerns are orthogonal and compose cleanly. + +#### Recommended pattern: `@retry()` on class methods + +```ts +import { retry, EventBus } from 'bubus' + +class ScreenshotService { + constructor(private bus: InstanceType) { + bus.on(ScreenshotRequestEvent, this.onScreenshot.bind(this)) + } + + @retry({ + max_attempts: 4, + retry_on_errors: [/timeout/i], + timeout: 5, + semaphore_scope: 'global', + semaphore_name: 'Screenshots', + semaphore_limit: 2, + }) + async onScreenshot(event: InstanceType): Promise { + return await takeScreenshot(event.data.url) + } +} + +const ev = bus.emit(ScreenshotRequestEvent({ url: 'https://example.com' })) +await ev.done() +``` + +#### Also works: inline HOF + +```ts +bus.on( + MyEvent, + retry({ max_attempts: 3, timeout: 10 })(async (event) => { + await riskyOperation(event.data) + }) +) +``` + +#### Options + +| Option | Type | Default | Description | +| ---------------------- | ----------------------------------------- | ----------- | ----------------------------------------------- | +| `max_attempts` | `number` | `1` | Total attempts including first call. | +| `retry_after` | `number` | `0` | Seconds between retries. | +| `retry_backoff_factor` | `number` | `1.0` | Multiplier for retry delay. | +| `retry_on_errors` | `(ErrorClass \| string \| RegExp)[]` | `undefined` | Retry filter. `undefined` retries on any error. | +| `timeout` | `number \| null` | `undefined` | Per-attempt timeout in seconds. | +| `semaphore_limit` | `number \| null` | `undefined` | Max concurrent executions sharing semaphore. | +| `semaphore_name` | `string \| ((...args) => string) \| null` | fn name | Semaphore key. | +| `semaphore_lax` | `boolean` | `true` | Continue if semaphore acquisition times out. | +| `semaphore_scope` | `'global' \| 'class' \| 'instance'` | `'global'` | Scope for semaphore identity. | +| `semaphore_timeout` | `number \| null` | `undefined` | Max seconds waiting for semaphore. | + +#### Error types + +- `RetryTimeoutError`: per-attempt timeout exceeded. +- `SemaphoreTimeoutError`: semaphore acquisition timeout (`semaphore_lax=false`). + +#### Re-entrancy + +On Node.js/Bun, `AsyncLocalStorage` tracks held semaphores and avoids deadlocks for nested calls using the same semaphore. +In browsers, this tracking is unavailable, avoid recursive/nested same-semaphore patterns there. + +#### Interaction with bus concurrency + +Execution order when used on bus handlers: + +1. Bus acquires handler semaphore (`event_handler_concurrency`) +2. `retry()` acquires retry semaphore (if configured) +3. Handler executes (with retries) +4. `retry()` releases retry semaphore +5. Bus releases handler semaphore + +Use bus/event timeouts for outer deadlines and `retry({ timeout })` for per-handler-attempt deadlines. + +#### Discouraged: retrying emit sites + +Avoid wrapping `emit()/done()` in `retry()` unless you intentionally want multiple event emits (a new event for every retry). +Keep retries on handlers so that your logs represent the original high-level intent, with a single event per call even if handling it took multiple tries. +Emitting a new event for each retry is only recommended if you are using the logs for debugging more than for replayability / time-travel. + +
+ +--- + +
+ +## Bridges + +Bridges are optional extra connectors provided that allow you to send/receive events from an external service, and you do not need to use a bridge to use bubus since it's normally purely in-memory. These are just simple helpers to forward bubus events JSON to storage engines / other processes / other machines; they prevent loops automatically, but beyond that it's only basic forwarding with no handler pickling or anything fancy. + +Bridges all expose a very simple bus-like API with only `.emit()` and `.on()`. + +**Example usage: link a bus to a redis pub/sub channel** + +```ts +const bridge = new RedisEventBridge('redis://redis@localhost:6379') + +bus.on('*', bridge.emit) // listen for all events on bus and send them to redis channel +bridge.on('*', bus.emit) // listen for new events in redis channel and emit them on our bus +``` + +- `new SocketEventBridge('/tmp/bubus_events.sock')` +- `new HTTPEventBridge({ send_to: 'https://127.0.0.1:8001/bubus_events', listen_on: 'http://0.0.0.0:8002/bubus_events' })` +- `new JSONLEventBridge('/tmp/bubus_events.jsonl')` +- `new SQLiteEventBridge('/tmp/bubus_events.sqlite3')` +- `new PostgresEventBridge('postgresql://user:pass@localhost:5432/dbname/bubus_events')` +- `new RedisEventBridge('redis://user:pass@localhost:6379/1/bubus_events')` +- `new NATSEventBridge('nats://localhost:4222', 'bubus_events')` + +
+ +--- + +
+ +## πŸƒ Runtimes + +`bubus-ts` supports all major JS runtimes. + +- Node.js (default development and test runtime) +- Browsers (ESM) +- Bun +- Deno + +### Browser support notes + +- The package output is ESM (`./dist/esm`) which is supported by all browsers [released after 2018](https://caniuse.com/?search=ESM) +- `AsyncLocalStorage` is preserved at emit and used during handling when available (Node/Bun), otel/tracing context will work normally in those environments + +### Performance comparison (local run, per-event) + +Measured locally on an `Apple M4 Pro` with: + +- `pnpm run perf:node` (`node v22.21.1`) +- `pnpm run perf:bun` (`bun v1.3.9`) +- `pnpm run perf:deno` (`deno v2.6.8`) +- `pnpm run perf:browser` (`chrome v145.0.7632.6`) + +| Runtime | 1 bus x 50k events x 1 handler | 500 buses x 100 events x 1 handler | 1 bus x 1 event x 50k parallel handlers | 1 bus x 50k events x 50k one-off handlers | Worst case (N buses x N events x N handlers) | +| ------------------ | ------------------------------ | ---------------------------------- | --------------------------------------- | ----------------------------------------- | -------------------------------------------- | +| Node | `0.015ms/event`, `0.6kb/event` | `0.058ms/event`, `0.1kb/event` | `0.021ms/handler`, `3.8kb/handler` | `0.028ms/event`, `0.6kb/event` | `0.442ms/event`, `0.9kb/event` | +| Bun | `0.011ms/event`, `2.5kb/event` | `0.054ms/event`, `1.0kb/event` | `0.006ms/handler`, `4.5kb/handler` | `0.019ms/event`, `2.8kb/event` | `0.441ms/event`, `3.1kb/event` | +| Deno | `0.018ms/event`, `1.2kb/event` | `0.063ms/event`, `0.4kb/event` | `0.024ms/handler`, `3.1kb/handler` | `0.064ms/event`, `2.6kb/event` | `0.461ms/event`, `7.9kb/event` | +| Browser (Chromium) | `0.030ms/event` | `0.197ms/event` | `0.022ms/handler` | `0.022ms/event` | `1.566ms/event` | + +Notes: + +- `kb/event` is peak RSS delta per event during active processing (most representative of OS-visible RAM in Activity Monitor / Task Manager, with `EventBus.max_history_size=1`) +- In `1 bus x 1 event x 50k parallel handlers` stats are shown per-handler for clarity, `0.02ms/handler * 50k handlers ~= 1000ms` for the entire event +- Browser runtime does not expose memory usage directly, in practice memory performance in-browser is comparable to Node (they both use V8) + +
+ +--- + +
+ +## πŸ‘Ύ Development + +```bash +git clone https://github.com/pirate/bbus bubus && cd bubus + +cd ./bubus-ts +pnpm install + +prek install # install pre-commit hooks +prek run --all-files # run pre-commit hooks on all files manually + +pnpm lint +pnpm test +``` diff --git a/bubus-ts/eslint.config.js b/bubus-ts/eslint.config.js new file mode 100644 index 0000000..458a8b7 --- /dev/null +++ b/bubus-ts/eslint.config.js @@ -0,0 +1,25 @@ +import ts_parser from '@typescript-eslint/parser' +import ts_eslint_plugin from '@typescript-eslint/eslint-plugin' + +export default [ + { + ignores: ['dist/**', 'README.md'], + }, + { + files: ['**/*.ts'], + languageOptions: { + parser: ts_parser, + parserOptions: { + sourceType: 'module', + ecmaVersion: 'latest', + }, + }, + plugins: { + '@typescript-eslint': ts_eslint_plugin, + }, + rules: { + 'no-unused-vars': 'off', + '@typescript-eslint/no-unused-vars': ['error', { argsIgnorePattern: '^_' }], + }, + }, +] diff --git a/bubus-ts/examples/concurrency_options.ts b/bubus-ts/examples/concurrency_options.ts new file mode 100755 index 0000000..0f587ad --- /dev/null +++ b/bubus-ts/examples/concurrency_options.ts @@ -0,0 +1,222 @@ +#!/usr/bin/env -S node --import tsx +// Run: node --import tsx examples/concurrency_options.ts + +import { z } from 'zod' +import { BaseEvent, EventBus, EventHandlerTimeoutError } from '../src/index.js' +const sleep = (ms: number): Promise => + new Promise((resolve) => { + setTimeout(resolve, ms) + }) + +const makeLogger = (section: string) => { + const started_at = performance.now() + return (message: string) => { + const elapsed = (performance.now() - started_at).toFixed(1) + console.log(`[${section}] +${elapsed}ms ${message}`) + } +} +const WorkEvent = BaseEvent.extend('ConcurrencyOptionsWorkEvent', { lane: z.string(), order: z.number(), ms: z.number() }) +const HandlerEvent = BaseEvent.extend('ConcurrencyOptionsHandlerEvent', { label: z.string() }) +const OverrideEvent = BaseEvent.extend('ConcurrencyOptionsOverrideEvent', { label: z.string(), order: z.number(), ms: z.number() }) +const TimeoutEvent = BaseEvent.extend('ConcurrencyOptionsTimeoutEvent', { ms: z.number() }) + +// 1) Event concurrency at bus level: global-serial vs bus-serial. +// Observe how max in-flight events differs across two buses. +async function eventConcurrencyDemo(): Promise { + const global_log = makeLogger('event:global-serial') + const global_a = new EventBus('GlobalSerialA', { event_concurrency: 'global-serial', event_handler_concurrency: 'serial' }) + const global_b = new EventBus('GlobalSerialB', { event_concurrency: 'global-serial', event_handler_concurrency: 'serial' }) + let global_in_flight = 0 + let global_max = 0 + const global_handler = async (event: InstanceType) => { + global_in_flight += 1 + global_max = Math.max(global_max, global_in_flight) + global_log(`${event.lane}${event.order} start (global in-flight=${global_in_flight})`) + await sleep(event.ms) + global_log(`${event.lane}${event.order} end`) + global_in_flight -= 1 + } + global_a.on(WorkEvent, global_handler) + global_b.on(WorkEvent, global_handler) + global_a.emit(WorkEvent({ lane: 'A', order: 0, ms: 45 })) + global_b.emit(WorkEvent({ lane: 'B', order: 0, ms: 45 })) + global_a.emit(WorkEvent({ lane: 'A', order: 1, ms: 45 })) + global_b.emit(WorkEvent({ lane: 'B', order: 1, ms: 45 })) + await Promise.all([global_a.waitUntilIdle(), global_b.waitUntilIdle()]) + global_log(`max in-flight across both buses: ${global_max} (expect 1 in global-serial)`) + console.log('\n=== global_a.logTree() ===') + console.log(global_a.logTree()) + console.log('\n=== global_b.logTree() ===') + console.log(global_b.logTree()) + const bus_log = makeLogger('event:bus-serial') + const bus_a = new EventBus('BusSerialA', { event_concurrency: 'bus-serial', event_handler_concurrency: 'serial' }) + const bus_b = new EventBus('BusSerialB', { event_concurrency: 'bus-serial', event_handler_concurrency: 'serial' }) + const per_bus_in_flight = { A: 0, B: 0 } + const per_bus_max = { A: 0, B: 0 } + let mixed_global_in_flight = 0 + let mixed_global_max = 0 + const bus_handler = async (event: InstanceType) => { + const lane = event.lane as 'A' | 'B' + mixed_global_in_flight += 1 + mixed_global_max = Math.max(mixed_global_max, mixed_global_in_flight) + per_bus_in_flight[lane] += 1 + per_bus_max[lane] = Math.max(per_bus_max[lane], per_bus_in_flight[lane]) + bus_log(`${lane}${event.order} start (global=${mixed_global_in_flight}, lane=${per_bus_in_flight[lane]})`) + await sleep(event.ms) + bus_log(`${lane}${event.order} end`) + per_bus_in_flight[lane] -= 1 + mixed_global_in_flight -= 1 + } + bus_a.on(WorkEvent, bus_handler) + bus_b.on(WorkEvent, bus_handler) + bus_a.emit(WorkEvent({ lane: 'A', order: 0, ms: 45 })) + bus_b.emit(WorkEvent({ lane: 'B', order: 0, ms: 45 })) + bus_a.emit(WorkEvent({ lane: 'A', order: 1, ms: 45 })) + bus_b.emit(WorkEvent({ lane: 'B', order: 1, ms: 45 })) + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]) + bus_log(`max in-flight global=${mixed_global_max}, per-bus A=${per_bus_max.A}, B=${per_bus_max.B} (expect global >= 2, per-bus = 1)`) + console.log('\n=== bus_a.logTree() ===') + console.log(bus_a.logTree()) + console.log('\n=== bus_b.logTree() ===') + console.log(bus_b.logTree()) +} + +// 2) Handler concurrency at bus level: serial vs parallel on the same event. +// Observe handler overlap for one event with two handlers. +async function handlerConcurrencyDemo(): Promise { + const run_case = async (mode: 'serial' | 'parallel') => { + const log = makeLogger(`handler:${mode}`) + const bus = new EventBus(`HandlerMode-${mode}`, { event_concurrency: 'parallel', event_handler_concurrency: mode }) + let in_flight = 0 + let max_in_flight = 0 + const make_handler = (name: string, ms: number) => async (event: InstanceType) => { + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + log(`${event.label}:${name} start (handlers in-flight=${in_flight})`) + await sleep(ms) + log(`${event.label}:${name} end`) + in_flight -= 1 + } + bus.on(HandlerEvent, make_handler('slow', 60)) + bus.on(HandlerEvent, make_handler('fast', 20)) + const event = bus.emit(HandlerEvent({ label: mode })) + await event.done() + await bus.waitUntilIdle() + log(`max handler overlap: ${max_in_flight} (expect 1 for serial, >= 2 for parallel)`) + console.log(`\n=== ${bus.name}.logTree() ===`) + console.log(bus.logTree()) + } + await run_case('serial') + await run_case('parallel') +} + +// 3) Event-level overrides take precedence over bus defaults. +// Bus defaults are strict (bus-serial + serial), then we override both to parallel on event instances. +async function eventOverrideDemo(): Promise { + const log = makeLogger('override:precedence') + const bus = new EventBus('OverrideBus', { event_concurrency: 'bus-serial', event_handler_concurrency: 'serial' }) + let active_events = new Set() + let per_event_handlers = new Map() + let active_handlers = 0 + let max_handlers = 0 + let max_events = 0 + + const reset_metrics = () => { + active_events = new Set() + per_event_handlers = new Map() + active_handlers = 0 + max_handlers = 0 + max_events = 0 + } + const track_start = (event: InstanceType, handler_name: string, label: string) => { + active_handlers += 1 + max_handlers = Math.max(max_handlers, active_handlers) + const count = (per_event_handlers.get(event.event_id) ?? 0) + 1 + per_event_handlers.set(event.event_id, count) + active_events.add(event.event_id) + max_events = Math.max(max_events, active_events.size) + log(`${label}:${event.order}:${handler_name} start (events=${active_events.size}, handlers=${active_handlers})`) + } + const track_end = (event: InstanceType, handler_name: string, label: string) => { + active_handlers -= 1 + const count = (per_event_handlers.get(event.event_id) ?? 1) - 1 + if (count <= 0) { + per_event_handlers.delete(event.event_id) + active_events.delete(event.event_id) + } else { + per_event_handlers.set(event.event_id, count) + } + log(`${label}:${event.order}:${handler_name} end`) + } + + const run_pair = async (label: string, use_override: boolean) => { + reset_metrics() + const handler_a = async (event: InstanceType) => { + track_start(event, 'A', label) + await sleep(event.ms) + track_end(event, 'A', label) + } + const handler_b = async (event: InstanceType) => { + track_start(event, 'B', label) + await sleep(event.ms) + track_end(event, 'B', label) + } + bus.off(OverrideEvent) + bus.on(OverrideEvent, handler_a) + bus.on(OverrideEvent, handler_b) + const overrides = use_override ? ({ event_concurrency: 'parallel', event_handler_concurrency: 'parallel' } as const) : {} + bus.emit(OverrideEvent({ label, order: 0, ms: 45, ...overrides })) + bus.emit(OverrideEvent({ label, order: 1, ms: 45, ...overrides })) + await bus.waitUntilIdle() + log(`${label} summary -> max events=${max_events}, max handlers=${max_handlers}`) + } + + await run_pair('bus-defaults', false) + await run_pair('event-overrides', true) + console.log('\n=== OverrideBus.logTree() ===') + console.log(bus.logTree()) +} + +// 4) Handler-level timeout via bus.on(..., { handler_timeout }). +// Observe one handler timing out while another succeeds on the same event. +async function handlerTimeoutDemo(): Promise { + const log = makeLogger('timeout:handler-option') + const bus = new EventBus('TimeoutBus', { event_concurrency: 'parallel', event_handler_concurrency: 'parallel', event_timeout: 0.2 }) + + const slow_entry = bus.on( + TimeoutEvent, + async (event) => { + log('slow handler start') + await sleep(event.ms) + log('slow handler finished body (but may already be timed out)') + return 'slow' + }, + { handler_timeout: 0.03 } + ) + bus.on( + TimeoutEvent, + async () => { + log('fast handler start') + await sleep(10) + log('fast handler end') + return 'fast' + }, + { handler_timeout: 0.1 } + ) + const event = bus.emit(TimeoutEvent({ ms: 60, event_handler_timeout: 0.5 })) + await event.done() + const slow_result = event.event_results.get(slow_entry.id) + const handler_timed_out = slow_result?.error instanceof EventHandlerTimeoutError + log(`slow handler status=${slow_result?.status}, timeout_error=${handler_timed_out ? 'yes' : 'no'}`) + await bus.waitUntilIdle() + console.log('\n=== TimeoutBus.logTree() ===') + console.log(bus.logTree()) +} + +async function main(): Promise { + await eventConcurrencyDemo() + await handlerConcurrencyDemo() + await eventOverrideDemo() + await handlerTimeoutDemo() +} +await main() diff --git a/bubus-ts/examples/forwarding_between_busses.ts b/bubus-ts/examples/forwarding_between_busses.ts new file mode 100755 index 0000000..49f7361 --- /dev/null +++ b/bubus-ts/examples/forwarding_between_busses.ts @@ -0,0 +1,96 @@ +#!/usr/bin/env -S node --import tsx +// Run: node --import tsx examples/forwarding_between_busses.ts + +import { z } from 'zod' + +import { BaseEvent, EventBus } from '../src/index.js' + +const ForwardedEvent = BaseEvent.extend('ForwardedEvent', { + message: z.string(), +}) + +async function main(): Promise { + const busA = new EventBus('BusA') + const busB = new EventBus('BusB') + const busC = new EventBus('BusC') + + const handleCounts = { + BusA: 0, + BusB: 0, + BusC: 0, + } + + const seenEventIds = { + BusA: new Set(), + BusB: new Set(), + BusC: new Set(), + } + + // Each bus handles the typed event locally. + // In a forwarding cycle, loop prevention should keep each bus to one handle. + busA.on(ForwardedEvent, (event) => { + handleCounts.BusA += 1 + seenEventIds.BusA.add(event.event_id) + console.log(`[BusA] handled ${event.event_id} (count=${handleCounts.BusA})`) + }) + + busB.on(ForwardedEvent, (event) => { + handleCounts.BusB += 1 + seenEventIds.BusB.add(event.event_id) + console.log(`[BusB] handled ${event.event_id} (count=${handleCounts.BusB})`) + }) + + busC.on(ForwardedEvent, (event) => { + handleCounts.BusC += 1 + seenEventIds.BusC.add(event.event_id) + console.log(`[BusC] handled ${event.event_id} (count=${handleCounts.BusC})`) + }) + + // Forward all events in a ring: + // A -> B -> C -> A + // Expected for one dispatch from A: event path becomes [A, B, C] and stops. + // The C -> A edge is skipped because A is already in event_path. + busA.on('*', busB.emit) + busB.on('*', busC.emit) + busC.on('*', busA.emit) + + console.log('Dispatching ForwardedEvent on BusA with cyclic forwarding A -> B -> C -> A') + + const event = busA.emit( + ForwardedEvent({ + message: 'hello across 3 buses', + }) + ) + + // done() waits for handlers on all forwarded buses, not just the origin bus. + await event.done() + await Promise.all([busA.waitUntilIdle(), busB.waitUntilIdle(), busC.waitUntilIdle()]) + + const path = event.event_path + const totalHandles = handleCounts.BusA + handleCounts.BusB + handleCounts.BusC + + console.log('\nFinal propagation summary:') + console.log(`- event_id: ${event.event_id}`) + console.log(`- event_path: ${path.join(' -> ')}`) + console.log(`- handle counts: ${JSON.stringify(handleCounts)}`) + console.log(`- unique ids seen per bus: A=${seenEventIds.BusA.size}, B=${seenEventIds.BusB.size}, C=${seenEventIds.BusC.size}`) + console.log(`- total handles: ${totalHandles}`) + + const handledOncePerBus = handleCounts.BusA === 1 && handleCounts.BusB === 1 && handleCounts.BusC === 1 + const visitedThreeBuses = path.length === 3 + + if (handledOncePerBus && visitedThreeBuses) { + console.log('\nLoop prevention confirmed: each bus handled the event at most once.') + } else { + console.log('\nUnexpected forwarding result. Check handlers/forwarding setup.') + } + + console.log('\n=== BusA logTree() ===') + console.log(busA.logTree()) + console.log('\n=== BusB logTree() ===') + console.log(busB.logTree()) + console.log('\n=== BusC logTree() ===') + console.log(busC.logTree()) +} + +await main() diff --git a/bubus-ts/examples/immediate_event_processing.ts b/bubus-ts/examples/immediate_event_processing.ts new file mode 100755 index 0000000..7fd6be1 --- /dev/null +++ b/bubus-ts/examples/immediate_event_processing.ts @@ -0,0 +1,138 @@ +#!/usr/bin/env -S node --import tsx +// Run: node --import tsx examples/immediate_event_processing.ts + +import { z } from 'zod' + +import { BaseEvent, EventBus } from '../src/index.js' + +// Parent handler runs two scenarios: +// 1) await child.done() -> immediate queue-jump processing +// 2) await child.eventCompleted() -> normal queue processing +const ParentEvent = BaseEvent.extend('ImmediateProcessingParentEvent', { + mode: z.enum(['immediate', 'queued']), +}) + +const ChildEvent = BaseEvent.extend('ImmediateProcessingChildEvent', { + scenario: z.enum(['immediate', 'queued']), +}) + +const SiblingEvent = BaseEvent.extend('ImmediateProcessingSiblingEvent', { + scenario: z.enum(['immediate', 'queued']), +}) + +const delay = (ms: number): Promise => + new Promise((resolve) => { + setTimeout(resolve, ms) + }) + +type Scenario = 'immediate' | 'queued' + +async function main(): Promise { + // Two buses: bus_a is the source, bus_b is the forward target. + const bus_a = new EventBus('QueueJumpDemoA', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'serial', + }) + const bus_b = new EventBus('QueueJumpDemoB', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'serial', + }) + + // Simple step counter so ordering is easy to read in stdout. + let step = 0 + const log = (message: string): void => { + step += 1 + console.log(`${String(step).padStart(2, '0')}. ${message}`) + } + + // Forwarding setup: both sibling/child events emitted on bus_a are forwarded to bus_b. + bus_a.on(ChildEvent, (event) => { + log(`[forward] ${event.event_type}(${event.scenario}) bus_a -> bus_b`) + bus_b.emit(event) + }) + bus_a.on(SiblingEvent, (event) => { + log(`[forward] ${event.event_type}(${event.scenario}) bus_a -> bus_b`) + bus_b.emit(event) + }) + + // Local handlers on bus_a. + bus_a.on(ChildEvent, async (event) => { + log(`[bus_a] child start (${event.scenario})`) + await delay(8) + log(`[bus_a] child end (${event.scenario})`) + }) + bus_a.on(SiblingEvent, async (event) => { + log(`[bus_a] sibling start (${event.scenario})`) + await delay(14) + log(`[bus_a] sibling end (${event.scenario})`) + }) + + // Forwarded handlers on bus_b. + bus_b.on(ChildEvent, async (event) => { + log(`[bus_b] child start (${event.scenario})`) + await delay(4) + log(`[bus_b] child end (${event.scenario})`) + }) + bus_b.on(SiblingEvent, async (event) => { + log(`[bus_b] sibling start (${event.scenario})`) + await delay(6) + log(`[bus_b] sibling end (${event.scenario})`) + }) + + // Parent handler queues sibling first, then child, then compares await behavior. + bus_a.on(ParentEvent, async (event) => { + log(`[parent:${event.mode}] start`) + + // Queue a sibling first so normal queue order has sibling ahead of child. + event.bus?.emit(SiblingEvent({ scenario: event.mode })) + log(`[parent:${event.mode}] sibling queued`) + + // Queue child second; this is the event we await in two different ways. + const child = event.bus?.emit(ChildEvent({ scenario: event.mode }))! + log(`[parent:${event.mode}] child queued`) + + if (event.mode === 'immediate') { + // Queue-jump: child processes immediately while still inside parent handler. + log(`[parent:${event.mode}] await child.done()`) + await child.done() + log(`[parent:${event.mode}] child.done() resolved`) + } else { + // Normal queue wait: child waits its turn behind already-queued sibling work. + log(`[parent:${event.mode}] await child.eventCompleted()`) + await child.eventCompleted() + log(`[parent:${event.mode}] child.eventCompleted() resolved`) + } + + log(`[parent:${event.mode}] end`) + }) + + const runScenario = async (mode: Scenario): Promise => { + log(`----- scenario=${mode} -----`) + + // Parent event uses parallel concurrency so eventCompleted() in handler + // can wait safely while other queued events continue to run. + const parent = bus_a.emit( + ParentEvent({ + mode, + event_concurrency: 'parallel', + }) + ) + + await parent.eventCompleted() + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]) + log(`----- done scenario=${mode} -----`) + } + + await runScenario('immediate') + await runScenario('queued') + + console.log('\nExpected behavior:') + console.log('- immediate: child runs before sibling (queue-jump) and parent resumes right after child.') + console.log('- queued: sibling runs first, child waits in normal queue order, parent resumes later.') + console.log('\n=== bus_a.logTree() ===') + console.log(bus_a.logTree()) + console.log('\n=== bus_b.logTree() ===') + console.log(bus_b.logTree()) +} + +await main() diff --git a/bubus-ts/examples/log_tree_demo.ts b/bubus-ts/examples/log_tree_demo.ts new file mode 100755 index 0000000..e369011 --- /dev/null +++ b/bubus-ts/examples/log_tree_demo.ts @@ -0,0 +1,95 @@ +import { z } from 'zod' + +import { BaseEvent, EventBus } from '../src/index.js' + +const RootEvent = BaseEvent.extend('RootEvent', { + url: z.string(), + event_result_type: z.string(), +}) + +const ChildEvent = BaseEvent.extend('ChildEvent', { + tab_id: z.string(), + event_result_type: z.string(), +}) + +const GrandchildEvent = BaseEvent.extend('GrandchildEvent', { + status: z.string(), + event_result_type: z.string(), +}) + +const delay = (ms: number): Promise => + new Promise((resolve) => { + setTimeout(resolve, ms) + }) + +async function main(): Promise { + const bus_a = new EventBus('BusA') + const bus_b = new EventBus('BusB') + + async function forward_to_bus_b(event: InstanceType): Promise { + await delay(20) + bus_b.emit(event) + return 'forwarded_to_bus_b' + } + + bus_a.on('*', forward_to_bus_b) + + async function root_fast_handler(event: InstanceType): Promise { + await delay(10) + const child = event.bus?.emit(ChildEvent({ tab_id: 'tab-123', event_timeout: 0.1 })) + if (child) { + await child.done() + } + return 'root_fast_handler_ok' + } + + async function root_slow_handler(event: InstanceType): Promise { + event.bus?.emit(ChildEvent({ tab_id: 'tab-timeout', event_timeout: 0.1 })) + await delay(400) + return 'root_slow_handler_timeout' + } + + bus_a.on(RootEvent, root_fast_handler) + bus_a.on(RootEvent, root_slow_handler) + + async function child_slow_handler(_event: InstanceType): Promise { + await delay(150) + return 'child_slow_handler_done' + } + + async function child_fast_handler(event: InstanceType): Promise { + await delay(10) + const grandchild = event.bus?.emit(GrandchildEvent({ status: 'ok', event_timeout: 0.05 })) + if (grandchild) { + await grandchild.done() + } + return 'child_handler_ok' + } + + async function grandchild_fast_handler(): Promise { + await delay(5) + return 'grandchild_fast_handler_ok' + } + + async function grandchild_slow_handler(): Promise { + await delay(60) + return 'grandchild_slow_handler_timeout' + } + + bus_b.on(ChildEvent, child_slow_handler) + bus_b.on(ChildEvent, child_fast_handler) + bus_b.on(GrandchildEvent, grandchild_fast_handler) + bus_b.on(GrandchildEvent, grandchild_slow_handler) + + const root_event = bus_a.emit(RootEvent({ url: 'https://example.com', event_timeout: 0.25 })) + + await root_event.done() + + console.log('\n=== BusA logTree ===') + console.log(bus_a.logTree()) + + console.log('\n=== BusB logTree ===') + console.log(bus_b.logTree()) +} + +await main() diff --git a/bubus-ts/examples/parent_child_tracking.ts b/bubus-ts/examples/parent_child_tracking.ts new file mode 100755 index 0000000..7fd26a4 --- /dev/null +++ b/bubus-ts/examples/parent_child_tracking.ts @@ -0,0 +1,130 @@ +#!/usr/bin/env -S node --import tsx +// Run: node --import tsx examples/parent_child_tracking.ts + +import { z } from 'zod' + +import { BaseEvent, EventBus } from '../src/index.js' + +// Step 1: Define a tiny parent -> child -> grandchild event model. +const ParentEvent = BaseEvent.extend('ParentEvent', { + workflow: z.string(), +}) + +const ChildEvent = BaseEvent.extend('ChildEvent', { + stage: z.string(), +}) + +const GrandchildEvent = BaseEvent.extend('GrandchildEvent', { + note: z.string(), +}) + +const shortId = (id?: string | null): string => (id ? id.slice(-8) : 'none') + +async function main(): Promise { + // Step 2: Create one bus so parent/child linkage is easy to inspect in one history. + const bus = new EventBus('ParentChildTrackingBus') + + // Step 3: Child handler dispatches a grandchild through event.bus. + // Because this runs inside ChildEvent handling, grandchild gets linked automatically. + bus.on(ChildEvent, async (event: InstanceType): Promise => { + console.log(`child handler start: ${event.event_type}#${shortId(event.event_id)}`) + + const grandchild = event.bus?.emit( + GrandchildEvent({ + note: `spawned by ${event.stage}`, + }) + ) + + if (grandchild) { + console.log( + ` child dispatched grandchild: ${grandchild.event_type}#${shortId(grandchild.event_id)} parent_id=${shortId(grandchild.event_parent_id)}` + ) + + // Step 4: Await a nested event so ordering and linkage are explicit in output. + await grandchild.done() + console.log(` child resumed after grandchild.done(): ${shortId(grandchild.event_id)}`) + } + + return `child_completed:${event.stage}` + }) + + // Step 5: Grandchild handler is simple; it just marks completion with a string result. + bus.on(GrandchildEvent, async (event: InstanceType): Promise => { + console.log(`grandchild handler: ${event.event_type}#${shortId(event.event_id)} note="${event.note}"`) + return `grandchild_completed:${event.note}` + }) + + // Step 6: Parent handler emits/dispatches child events via event.bus. + // One child is awaited with .done() to clearly show queue-jump + linkage behavior. + bus.on(ParentEvent, async (event: InstanceType): Promise => { + console.log(`parent handler start: ${event.event_type}#${shortId(event.event_id)} workflow="${event.workflow}"`) + + const awaitedChild = event.bus?.emit(ChildEvent({ stage: 'awaited-child' })) + if (awaitedChild) { + console.log( + ` parent emitted child: ${awaitedChild.event_type}#${shortId(awaitedChild.event_id)} parent_id=${shortId(awaitedChild.event_parent_id)}` + ) + + // Required by this example: await at least one child so parent/child linkage is obvious. + await awaitedChild.done() + console.log(` parent resumed after awaited child.done(): ${shortId(awaitedChild.event_id)}`) + } + + const backgroundChild = event.bus?.emit(ChildEvent({ stage: 'background-child' })) + if (backgroundChild) { + console.log( + ` parent dispatched second child: ${backgroundChild.event_type}#${shortId(backgroundChild.event_id)} parent_id=${shortId(backgroundChild.event_parent_id)}` + ) + } + + // Parent also dispatches a GrandchildEvent type directly via event.bus. + // This is still automatically linked to the parent event. + const directGrandchild = event.bus?.emit(GrandchildEvent({ note: 'directly from parent' })) + if (directGrandchild) { + console.log( + ` parent dispatched grandchild type directly: ${directGrandchild.event_type}#${shortId(directGrandchild.event_id)} parent_id=${shortId(directGrandchild.event_parent_id)}` + ) + await directGrandchild.done() + } + + return 'parent_completed' + }) + + // Step 7: Dispatch parent and wait for full bus idle so history is complete. + const parent = bus.emit(ParentEvent({ workflow: 'demo-parent-child-tracking' })) + await parent.done() + await bus.waitUntilIdle() + + // Step 8: Print IDs + relationship checks from event history. + console.log('\n=== Event History Relationships ===') + const history = Array.from(bus.event_history.values()).sort((a, b) => a.event_created_at.localeCompare(b.event_created_at)) + + for (const item of history) { + const parentEvent = item.event_parent + console.log( + [ + `${item.event_type}#${shortId(item.event_id)}`, + `parent=${parentEvent ? `${parentEvent.event_type}#${shortId(parentEvent.event_id)}` : 'none'}`, + `isChildOfRoot=${bus.eventIsChildOf(item, parent)}`, + `rootIsParentOf=${bus.eventIsParentOf(parent, item)}`, + ].join(' | ') + ) + } + + const firstChild = history.find((event) => event.event_type === 'ChildEvent') + const nestedGrandchild = history.find( + (event) => event.event_type === 'GrandchildEvent' && firstChild && event.event_parent_id === firstChild.event_id + ) + if (firstChild && nestedGrandchild) { + console.log( + `grandchild->child relationship check: ${nestedGrandchild.event_type}#${shortId(nestedGrandchild.event_id)} is child of ${firstChild.event_type}#${shortId(firstChild.event_id)} = ${bus.eventIsChildOf(nestedGrandchild, firstChild)}` + ) + } + + // Step 9: Print the built-in tree view from event history. + console.log('\n=== bus.logTree() ===') + const tree = bus.logTree() + console.log(tree) +} + +await main() diff --git a/bubus-ts/examples/simple.ts b/bubus-ts/examples/simple.ts new file mode 100755 index 0000000..d7274ea --- /dev/null +++ b/bubus-ts/examples/simple.ts @@ -0,0 +1,95 @@ +#!/usr/bin/env -S node --import tsx +// Run: node --import tsx examples/simple.ts + +import { BaseEvent, EventBus } from '../src/index.js' +import { z } from 'zod' + +// 1) Define typed events with BaseEvent.extend(...) +const RegisterUserEvent = BaseEvent.extend('RegisterUserEvent', { + email: z.string().email(), + plan: z.enum(['free', 'pro']), + // Handler return values for this event are validated against this schema. + event_result_type: z.object({ + user_id: z.string(), + welcome_email_sent: z.boolean(), + }), +}) + +const AuditEvent = BaseEvent.extend('AuditEvent', { + message: z.string(), +}) + +async function main(): Promise { + const bus = new EventBus('SimpleExampleBus') + + // 2) Register a wildcard handler to observe every event flowing through this bus. + bus.on('*', (event: BaseEvent) => { + console.log(`[wildcard] ${event.event_type}#${event.event_id.slice(-8)}`) + }) + + // 3) Register by EventClass/factory (best type inference for payload + return type). + bus.on(RegisterUserEvent, async (event) => { + console.log(`[class handler] Creating account for ${event.email} (${event.plan})`) + return { + user_id: `user_${event.email.split('@')[0]}`, + welcome_email_sent: true, + } + }) + + // 4) Register by string event type (more dynamic, weaker compile-time checks). + bus.on('AuditEvent', (event: InstanceType) => { + console.log(`[string handler] Audit log: ${event.message}`) + }) + + // 5) Intentionally return an invalid result shape. + // This compiles because string-based registration is best-effort, but will fail + // at runtime because RegisterUserEvent has event_result_type enforcement. + bus.on('RegisterUserEvent', () => { + return { user_id: 123, welcome_email_sent: 'yes' } as unknown + }) + + // Dispatch a simple event handled by a string registration. + await bus.emit(AuditEvent({ message: 'Starting simple bubus example' })).done() + + // Dispatch the typed event; one handler returns valid data, one returns invalid data. + const register_event = bus.emit( + RegisterUserEvent({ + email: 'ada@example.com', + plan: 'pro', + }) + ) + await register_event.done() + + // 6) Inspect per-handler results (completed vs error) from event.event_results. + console.log('\nRegisterUserEvent handler outcomes:') + for (const result of register_event.event_results.values()) { + if (result.status === 'completed') { + console.log(`- ${result.handler_name}: completed -> ${JSON.stringify(result.result)}`) + continue + } + if (result.status === 'error') { + const message = result.error instanceof Error ? result.error.message : String(result.error) + console.log(`- ${result.handler_name}: error -> ${message}`) + console.log(` raw invalid return: ${JSON.stringify(result.raw_value)}`) + continue + } + console.log(`- ${result.handler_name}: ${result.status}`) + } + + // 7) Convenience getters for aggregate inspection. + console.log('\nFirst valid parsed result:', register_event.event_result) + console.log(`Total event errors: ${register_event.event_errors.length}`) + for (const [index, error] of register_event.event_errors.entries()) { + const message = error instanceof Error ? error.message : String(error) + console.log(` ${index + 1}. ${message}`) + } + + await bus.waitUntilIdle() + console.log('\n=== bus.logTree() ===') + console.log(bus.logTree()) +} + +main().catch((error) => { + console.error('Example failed:', error) + process.exitCode = 1 +}) diff --git a/bubus-ts/package.json b/bubus-ts/package.json new file mode 100644 index 0000000..3f57712 --- /dev/null +++ b/bubus-ts/package.json @@ -0,0 +1,88 @@ +{ + "name": "bubus", + "version": "2.2.1", + "description": "Event bus library for browsers and ESM Node.js", + "type": "module", + "sideEffects": false, + "main": "./dist/esm/index.js", + "module": "./dist/esm/index.js", + "types": "./dist/types/index.d.ts", + "exports": { + ".": { + "types": "./dist/types/index.d.ts", + "import": "./dist/esm/index.js", + "default": "./dist/esm/index.js" + }, + "./*": { + "types": "./dist/types/*.d.ts", + "import": "./dist/esm/*.js", + "default": "./dist/esm/*.js" + } + }, + "files": [ + "dist/esm", + "dist/types" + ], + "scripts": { + "build": "pnpm run build:esm && pnpm run build:types", + "build:esm": "node ./scripts/build_esm.mjs", + "build:types": "tsc -p tsconfig.json --emitDeclarationOnly", + "typecheck": "tsc -p tsconfig.typecheck.json", + "prettier": "prettier --write .", + "eslint": "eslint .", + "lint": "pnpm run prettier && pnpm run eslint && pnpm run typecheck", + "test": "NODE_OPTIONS='--expose-gc' node --expose-gc --test --import tsx \"tests/**/*.test.ts\"", + "perf": "pnpm run perf:node && pnpm run perf:bun && pnpm run perf:deno && pnpm run perf:browser", + "debug:node": "NODE_OPTIONS='--expose-gc' node --expose-gc --import tsx", + "debug:bun": "bun --expose-gc run", + "debug:deno": "deno run --sloppy-imports --v8-flags=--expose-gc", + "perf:node": "pnpm run build && pnpm run debug:node -- tests/performance.runtime.ts --scenario 50k-events && pnpm run debug:node -- tests/performance.runtime.ts --scenario 500-buses-x-100-events && pnpm run debug:node -- tests/performance.runtime.ts --scenario 1-event-x-50k-parallel-handlers && pnpm run debug:node -- tests/performance.runtime.ts --scenario 50k-one-off-handlers && pnpm run debug:node -- tests/performance.runtime.ts --scenario worst-case-forwarding-timeouts && pnpm run debug:node -- tests/performance.runtime.ts --scenario cleanup-equivalence", + "perf:bun": "pnpm run build && pnpm run debug:bun -- tests/performance.runtime.ts --scenario 50k-events && pnpm run debug:bun -- tests/performance.runtime.ts --scenario 500-buses-x-100-events && pnpm run debug:bun -- tests/performance.runtime.ts --scenario 1-event-x-50k-parallel-handlers && pnpm run debug:bun -- tests/performance.runtime.ts --scenario 50k-one-off-handlers && pnpm run debug:bun -- tests/performance.runtime.ts --scenario worst-case-forwarding-timeouts && pnpm run debug:bun -- tests/performance.runtime.ts --scenario cleanup-equivalence", + "perf:deno": "pnpm run build && pnpm run debug:deno -- tests/performance.runtime.ts --scenario 50k-events && pnpm run debug:deno -- tests/performance.runtime.ts --scenario 500-buses-x-100-events && pnpm run debug:deno -- tests/performance.runtime.ts --scenario 1-event-x-50k-parallel-handlers && pnpm run debug:deno -- tests/performance.runtime.ts --scenario 50k-one-off-handlers && pnpm run debug:deno -- tests/performance.runtime.ts --scenario worst-case-forwarding-timeouts && pnpm run debug:deno -- tests/performance.runtime.ts --scenario cleanup-equivalence", + "perf:browser": "pnpm run build && bash ./scripts/run_browser_perf.sh", + "release:dry-run": "pnpm publish --access public --dry-run --no-git-checks", + "release:check": "pnpm run typecheck && pnpm test && pnpm run build" + }, + "keywords": [], + "author": "", + "license": "MIT", + "packageManager": "pnpm@10.29.3", + "dependencies": { + "uuid": "^11.1.0", + "zod": "^4.3.6" + }, + "devDependencies": { + "@types/node": "^25.2.3", + "@typescript-eslint/eslint-plugin": "^8.55.0", + "@typescript-eslint/parser": "^8.55.0", + "esbuild": "^0.27.3", + "eslint": "^9.39.2", + "prettier": "^3.8.1", + "tsc-files": "^1.1.4", + "tsx": "^4.21.0", + "typescript": "^5.9.3" + }, + "repository": { + "type": "git", + "url": "git+https://github.com/pirate/bbus.git", + "directory": "bubus-ts" + }, + "bugs": { + "url": "https://github.com/pirate/bbus/issues" + }, + "homepage": "https://github.com/pirate/bbus/tree/main/bubus-ts", + "publishConfig": { + "access": "public", + "registry": "https://registry.npmjs.org/" + }, + "pnpm": { + "onlyBuiltDependencies": [ + "esbuild" + ] + }, + "optionalDependencies": { + "ioredis": "^5.9.3", + "nats": "^2.29.3", + "pg": "^8.18.0" + } +} diff --git a/bubus-ts/playwright.perf.config.cjs b/bubus-ts/playwright.perf.config.cjs new file mode 100644 index 0000000..acdff63 --- /dev/null +++ b/bubus-ts/playwright.perf.config.cjs @@ -0,0 +1,20 @@ +const executablePath = process.env.PW_CHROMIUM_EXECUTABLE_PATH + +/** @type {import('playwright/test').PlaywrightTestConfig} */ +module.exports = { + projects: [ + { + name: 'browser-perf', + use: { + browserName: 'chromium', + ...(executablePath + ? { + launchOptions: { + executablePath, + }, + } + : {}), + }, + }, + ], +} diff --git a/bubus-ts/pnpm-lock.yaml b/bubus-ts/pnpm-lock.yaml new file mode 100644 index 0000000..ac44e98 --- /dev/null +++ b/bubus-ts/pnpm-lock.yaml @@ -0,0 +1,1489 @@ +lockfileVersion: '9.0' + +settings: + autoInstallPeers: true + excludeLinksFromLockfile: false + +importers: + .: + dependencies: + uuid: + specifier: ^11.1.0 + version: 11.1.0 + zod: + specifier: ^4.3.6 + version: 4.3.6 + devDependencies: + '@types/node': + specifier: ^25.2.3 + version: 25.2.3 + '@typescript-eslint/eslint-plugin': + specifier: ^8.55.0 + version: 8.55.0(@typescript-eslint/parser@8.55.0(eslint@9.39.2)(typescript@5.9.3))(eslint@9.39.2)(typescript@5.9.3) + '@typescript-eslint/parser': + specifier: ^8.55.0 + version: 8.55.0(eslint@9.39.2)(typescript@5.9.3) + esbuild: + specifier: ^0.27.3 + version: 0.27.3 + eslint: + specifier: ^9.39.2 + version: 9.39.2 + prettier: + specifier: ^3.8.1 + version: 3.8.1 + tsc-files: + specifier: ^1.1.4 + version: 1.1.4(typescript@5.9.3) + tsx: + specifier: ^4.21.0 + version: 4.21.0 + typescript: + specifier: ^5.9.3 + version: 5.9.3 + optionalDependencies: + ioredis: + specifier: ^5.9.3 + version: 5.9.3 + nats: + specifier: ^2.29.3 + version: 2.29.3 + pg: + specifier: ^8.18.0 + version: 8.18.0 + +packages: + '@esbuild/aix-ppc64@0.27.3': + resolution: { integrity: sha512-9fJMTNFTWZMh5qwrBItuziu834eOCUcEqymSH7pY+zoMVEZg3gcPuBNxH1EvfVYe9h0x/Ptw8KBzv7qxb7l8dg== } + engines: { node: '>=18' } + cpu: [ppc64] + os: [aix] + + '@esbuild/android-arm64@0.27.3': + resolution: { integrity: sha512-YdghPYUmj/FX2SYKJ0OZxf+iaKgMsKHVPF1MAq/P8WirnSpCStzKJFjOjzsW0QQ7oIAiccHdcqjbHmJxRb/dmg== } + engines: { node: '>=18' } + cpu: [arm64] + os: [android] + + '@esbuild/android-arm@0.27.3': + resolution: { integrity: sha512-i5D1hPY7GIQmXlXhs2w8AWHhenb00+GxjxRncS2ZM7YNVGNfaMxgzSGuO8o8SJzRc/oZwU2bcScvVERk03QhzA== } + engines: { node: '>=18' } + cpu: [arm] + os: [android] + + '@esbuild/android-x64@0.27.3': + resolution: { integrity: sha512-IN/0BNTkHtk8lkOM8JWAYFg4ORxBkZQf9zXiEOfERX/CzxW3Vg1ewAhU7QSWQpVIzTW+b8Xy+lGzdYXV6UZObQ== } + engines: { node: '>=18' } + cpu: [x64] + os: [android] + + '@esbuild/darwin-arm64@0.27.3': + resolution: { integrity: sha512-Re491k7ByTVRy0t3EKWajdLIr0gz2kKKfzafkth4Q8A5n1xTHrkqZgLLjFEHVD+AXdUGgQMq+Godfq45mGpCKg== } + engines: { node: '>=18' } + cpu: [arm64] + os: [darwin] + + '@esbuild/darwin-x64@0.27.3': + resolution: { integrity: sha512-vHk/hA7/1AckjGzRqi6wbo+jaShzRowYip6rt6q7VYEDX4LEy1pZfDpdxCBnGtl+A5zq8iXDcyuxwtv3hNtHFg== } + engines: { node: '>=18' } + cpu: [x64] + os: [darwin] + + '@esbuild/freebsd-arm64@0.27.3': + resolution: { integrity: sha512-ipTYM2fjt3kQAYOvo6vcxJx3nBYAzPjgTCk7QEgZG8AUO3ydUhvelmhrbOheMnGOlaSFUoHXB6un+A7q4ygY9w== } + engines: { node: '>=18' } + cpu: [arm64] + os: [freebsd] + + '@esbuild/freebsd-x64@0.27.3': + resolution: { integrity: sha512-dDk0X87T7mI6U3K9VjWtHOXqwAMJBNN2r7bejDsc+j03SEjtD9HrOl8gVFByeM0aJksoUuUVU9TBaZa2rgj0oA== } + engines: { node: '>=18' } + cpu: [x64] + os: [freebsd] + + '@esbuild/linux-arm64@0.27.3': + resolution: { integrity: sha512-sZOuFz/xWnZ4KH3YfFrKCf1WyPZHakVzTiqji3WDc0BCl2kBwiJLCXpzLzUBLgmp4veFZdvN5ChW4Eq/8Fc2Fg== } + engines: { node: '>=18' } + cpu: [arm64] + os: [linux] + + '@esbuild/linux-arm@0.27.3': + resolution: { integrity: sha512-s6nPv2QkSupJwLYyfS+gwdirm0ukyTFNl3KTgZEAiJDd+iHZcbTPPcWCcRYH+WlNbwChgH2QkE9NSlNrMT8Gfw== } + engines: { node: '>=18' } + cpu: [arm] + os: [linux] + + '@esbuild/linux-ia32@0.27.3': + resolution: { integrity: sha512-yGlQYjdxtLdh0a3jHjuwOrxQjOZYD/C9PfdbgJJF3TIZWnm/tMd/RcNiLngiu4iwcBAOezdnSLAwQDPqTmtTYg== } + engines: { node: '>=18' } + cpu: [ia32] + os: [linux] + + '@esbuild/linux-loong64@0.27.3': + resolution: { integrity: sha512-WO60Sn8ly3gtzhyjATDgieJNet/KqsDlX5nRC5Y3oTFcS1l0KWba+SEa9Ja1GfDqSF1z6hif/SkpQJbL63cgOA== } + engines: { node: '>=18' } + cpu: [loong64] + os: [linux] + + '@esbuild/linux-mips64el@0.27.3': + resolution: { integrity: sha512-APsymYA6sGcZ4pD6k+UxbDjOFSvPWyZhjaiPyl/f79xKxwTnrn5QUnXR5prvetuaSMsb4jgeHewIDCIWljrSxw== } + engines: { node: '>=18' } + cpu: [mips64el] + os: [linux] + + '@esbuild/linux-ppc64@0.27.3': + resolution: { integrity: sha512-eizBnTeBefojtDb9nSh4vvVQ3V9Qf9Df01PfawPcRzJH4gFSgrObw+LveUyDoKU3kxi5+9RJTCWlj4FjYXVPEA== } + engines: { node: '>=18' } + cpu: [ppc64] + os: [linux] + + '@esbuild/linux-riscv64@0.27.3': + resolution: { integrity: sha512-3Emwh0r5wmfm3ssTWRQSyVhbOHvqegUDRd0WhmXKX2mkHJe1SFCMJhagUleMq+Uci34wLSipf8Lagt4LlpRFWQ== } + engines: { node: '>=18' } + cpu: [riscv64] + os: [linux] + + '@esbuild/linux-s390x@0.27.3': + resolution: { integrity: sha512-pBHUx9LzXWBc7MFIEEL0yD/ZVtNgLytvx60gES28GcWMqil8ElCYR4kvbV2BDqsHOvVDRrOxGySBM9Fcv744hw== } + engines: { node: '>=18' } + cpu: [s390x] + os: [linux] + + '@esbuild/linux-x64@0.27.3': + resolution: { integrity: sha512-Czi8yzXUWIQYAtL/2y6vogER8pvcsOsk5cpwL4Gk5nJqH5UZiVByIY8Eorm5R13gq+DQKYg0+JyQoytLQas4dA== } + engines: { node: '>=18' } + cpu: [x64] + os: [linux] + + '@esbuild/netbsd-arm64@0.27.3': + resolution: { integrity: sha512-sDpk0RgmTCR/5HguIZa9n9u+HVKf40fbEUt+iTzSnCaGvY9kFP0YKBWZtJaraonFnqef5SlJ8/TiPAxzyS+UoA== } + engines: { node: '>=18' } + cpu: [arm64] + os: [netbsd] + + '@esbuild/netbsd-x64@0.27.3': + resolution: { integrity: sha512-P14lFKJl/DdaE00LItAukUdZO5iqNH7+PjoBm+fLQjtxfcfFE20Xf5CrLsmZdq5LFFZzb5JMZ9grUwvtVYzjiA== } + engines: { node: '>=18' } + cpu: [x64] + os: [netbsd] + + '@esbuild/openbsd-arm64@0.27.3': + resolution: { integrity: sha512-AIcMP77AvirGbRl/UZFTq5hjXK+2wC7qFRGoHSDrZ5v5b8DK/GYpXW3CPRL53NkvDqb9D+alBiC/dV0Fb7eJcw== } + engines: { node: '>=18' } + cpu: [arm64] + os: [openbsd] + + '@esbuild/openbsd-x64@0.27.3': + resolution: { integrity: sha512-DnW2sRrBzA+YnE70LKqnM3P+z8vehfJWHXECbwBmH/CU51z6FiqTQTHFenPlHmo3a8UgpLyH3PT+87OViOh1AQ== } + engines: { node: '>=18' } + cpu: [x64] + os: [openbsd] + + '@esbuild/openharmony-arm64@0.27.3': + resolution: { integrity: sha512-NinAEgr/etERPTsZJ7aEZQvvg/A6IsZG/LgZy+81wON2huV7SrK3e63dU0XhyZP4RKGyTm7aOgmQk0bGp0fy2g== } + engines: { node: '>=18' } + cpu: [arm64] + os: [openharmony] + + '@esbuild/sunos-x64@0.27.3': + resolution: { integrity: sha512-PanZ+nEz+eWoBJ8/f8HKxTTD172SKwdXebZ0ndd953gt1HRBbhMsaNqjTyYLGLPdoWHy4zLU7bDVJztF5f3BHA== } + engines: { node: '>=18' } + cpu: [x64] + os: [sunos] + + '@esbuild/win32-arm64@0.27.3': + resolution: { integrity: sha512-B2t59lWWYrbRDw/tjiWOuzSsFh1Y/E95ofKz7rIVYSQkUYBjfSgf6oeYPNWHToFRr2zx52JKApIcAS/D5TUBnA== } + engines: { node: '>=18' } + cpu: [arm64] + os: [win32] + + '@esbuild/win32-ia32@0.27.3': + resolution: { integrity: sha512-QLKSFeXNS8+tHW7tZpMtjlNb7HKau0QDpwm49u0vUp9y1WOF+PEzkU84y9GqYaAVW8aH8f3GcBck26jh54cX4Q== } + engines: { node: '>=18' } + cpu: [ia32] + os: [win32] + + '@esbuild/win32-x64@0.27.3': + resolution: { integrity: sha512-4uJGhsxuptu3OcpVAzli+/gWusVGwZZHTlS63hh++ehExkVT8SgiEf7/uC/PclrPPkLhZqGgCTjd0VWLo6xMqA== } + engines: { node: '>=18' } + cpu: [x64] + os: [win32] + + '@eslint-community/eslint-utils@4.9.1': + resolution: { integrity: sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ== } + engines: { node: ^12.22.0 || ^14.17.0 || >=16.0.0 } + peerDependencies: + eslint: ^6.0.0 || ^7.0.0 || >=8.0.0 + + '@eslint-community/regexpp@4.12.2': + resolution: { integrity: sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew== } + engines: { node: ^12.0.0 || ^14.0.0 || >=16.0.0 } + + '@eslint/config-array@0.21.1': + resolution: { integrity: sha512-aw1gNayWpdI/jSYVgzN5pL0cfzU02GT3NBpeT/DXbx1/1x7ZKxFPd9bwrzygx/qiwIQiJ1sw/zD8qY/kRvlGHA== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + + '@eslint/config-helpers@0.4.2': + resolution: { integrity: sha512-gBrxN88gOIf3R7ja5K9slwNayVcZgK6SOUORm2uBzTeIEfeVaIhOpCtTox3P6R7o2jLFwLFTLnC7kU/RGcYEgw== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + + '@eslint/core@0.17.0': + resolution: { integrity: sha512-yL/sLrpmtDaFEiUj1osRP4TI2MDz1AddJL+jZ7KSqvBuliN4xqYY54IfdN8qD8Toa6g1iloph1fxQNkjOxrrpQ== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + + '@eslint/eslintrc@3.3.3': + resolution: { integrity: sha512-Kr+LPIUVKz2qkx1HAMH8q1q6azbqBAsXJUxBl/ODDuVPX45Z9DfwB8tPjTi6nNZ8BuM3nbJxC5zCAg5elnBUTQ== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + + '@eslint/js@9.39.2': + resolution: { integrity: sha512-q1mjIoW1VX4IvSocvM/vbTiveKC4k9eLrajNEuSsmjymSDEbpGddtpfOoN7YGAqBK3NG+uqo8ia4PDTt8buCYA== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + + '@eslint/object-schema@2.1.7': + resolution: { integrity: sha512-VtAOaymWVfZcmZbp6E2mympDIHvyjXs/12LqWYjVw6qjrfF+VK+fyG33kChz3nnK+SU5/NeHOqrTEHS8sXO3OA== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + + '@eslint/plugin-kit@0.4.1': + resolution: { integrity: sha512-43/qtrDUokr7LJqoF2c3+RInu/t4zfrpYdoSDfYyhg52rwLV6TnOvdG4fXm7IkSB3wErkcmJS9iEhjVtOSEjjA== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + + '@humanfs/core@0.19.1': + resolution: { integrity: sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA== } + engines: { node: '>=18.18.0' } + + '@humanfs/node@0.16.7': + resolution: { integrity: sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ== } + engines: { node: '>=18.18.0' } + + '@humanwhocodes/module-importer@1.0.1': + resolution: { integrity: sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA== } + engines: { node: '>=12.22' } + + '@humanwhocodes/retry@0.4.3': + resolution: { integrity: sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ== } + engines: { node: '>=18.18' } + + '@ioredis/commands@1.5.0': + resolution: { integrity: sha512-eUgLqrMf8nJkZxT24JvVRrQya1vZkQh8BBeYNwGDqa5I0VUi8ACx7uFvAaLxintokpTenkK6DASvo/bvNbBGow== } + + '@types/estree@1.0.8': + resolution: { integrity: sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w== } + + '@types/json-schema@7.0.15': + resolution: { integrity: sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA== } + + '@types/node@25.2.3': + resolution: { integrity: sha512-m0jEgYlYz+mDJZ2+F4v8D1AyQb+QzsNqRuI7xg1VQX/KlKS0qT9r1Mo16yo5F/MtifXFgaofIFsdFMox2SxIbQ== } + + '@typescript-eslint/eslint-plugin@8.55.0': + resolution: { integrity: sha512-1y/MVSz0NglV1ijHC8OT49mPJ4qhPYjiK08YUQVbIOyu+5k862LKUHFkpKHWu//zmr7hDR2rhwUm6gnCGNmGBQ== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + peerDependencies: + '@typescript-eslint/parser': ^8.55.0 + eslint: ^8.57.0 || ^9.0.0 + typescript: '>=4.8.4 <6.0.0' + + '@typescript-eslint/parser@8.55.0': + resolution: { integrity: sha512-4z2nCSBfVIMnbuu8uinj+f0o4qOeggYJLbjpPHka3KH1om7e+H9yLKTYgksTaHcGco+NClhhY2vyO3HsMH1RGw== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + peerDependencies: + eslint: ^8.57.0 || ^9.0.0 + typescript: '>=4.8.4 <6.0.0' + + '@typescript-eslint/project-service@8.55.0': + resolution: { integrity: sha512-zRcVVPFUYWa3kNnjaZGXSu3xkKV1zXy8M4nO/pElzQhFweb7PPtluDLQtKArEOGmjXoRjnUZ29NjOiF0eCDkcQ== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + peerDependencies: + typescript: '>=4.8.4 <6.0.0' + + '@typescript-eslint/scope-manager@8.55.0': + resolution: { integrity: sha512-fVu5Omrd3jeqeQLiB9f1YsuK/iHFOwb04bCtY4BSCLgjNbOD33ZdV6KyEqplHr+IlpgT0QTZ/iJ+wT7hvTx49Q== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + + '@typescript-eslint/tsconfig-utils@8.55.0': + resolution: { integrity: sha512-1R9cXqY7RQd7WuqSN47PK9EDpgFUK3VqdmbYrvWJZYDd0cavROGn+74ktWBlmJ13NXUQKlZ/iAEQHI/V0kKe0Q== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + peerDependencies: + typescript: '>=4.8.4 <6.0.0' + + '@typescript-eslint/type-utils@8.55.0': + resolution: { integrity: sha512-x1iH2unH4qAt6I37I2CGlsNs+B9WGxurP2uyZLRz6UJoZWDBx9cJL1xVN/FiOmHEONEg6RIufdvyT0TEYIgC5g== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + peerDependencies: + eslint: ^8.57.0 || ^9.0.0 + typescript: '>=4.8.4 <6.0.0' + + '@typescript-eslint/types@8.55.0': + resolution: { integrity: sha512-ujT0Je8GI5BJWi+/mMoR0wxwVEQaxM+pi30xuMiJETlX80OPovb2p9E8ss87gnSVtYXtJoU9U1Cowcr6w2FE0w== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + + '@typescript-eslint/typescript-estree@8.55.0': + resolution: { integrity: sha512-EwrH67bSWdx/3aRQhCoxDaHM+CrZjotc2UCCpEDVqfCE+7OjKAGWNY2HsCSTEVvWH2clYQK8pdeLp42EVs+xQw== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + peerDependencies: + typescript: '>=4.8.4 <6.0.0' + + '@typescript-eslint/utils@8.55.0': + resolution: { integrity: sha512-BqZEsnPGdYpgyEIkDC1BadNY8oMwckftxBT+C8W0g1iKPdeqKZBtTfnvcq0nf60u7MkjFO8RBvpRGZBPw4L2ow== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + peerDependencies: + eslint: ^8.57.0 || ^9.0.0 + typescript: '>=4.8.4 <6.0.0' + + '@typescript-eslint/visitor-keys@8.55.0': + resolution: { integrity: sha512-AxNRwEie8Nn4eFS1FzDMJWIISMGoXMb037sgCBJ3UR6o0fQTzr2tqN9WT+DkWJPhIdQCfV7T6D387566VtnCJA== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + + acorn-jsx@5.3.2: + resolution: { integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ== } + peerDependencies: + acorn: ^6.0.0 || ^7.0.0 || ^8.0.0 + + acorn@8.15.0: + resolution: { integrity: sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg== } + engines: { node: '>=0.4.0' } + hasBin: true + + ajv@6.12.6: + resolution: { integrity: sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g== } + + ansi-styles@4.3.0: + resolution: { integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg== } + engines: { node: '>=8' } + + argparse@2.0.1: + resolution: { integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q== } + + balanced-match@1.0.2: + resolution: { integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== } + + brace-expansion@1.1.12: + resolution: { integrity: sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg== } + + brace-expansion@2.0.2: + resolution: { integrity: sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ== } + + callsites@3.1.0: + resolution: { integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ== } + engines: { node: '>=6' } + + chalk@4.1.2: + resolution: { integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA== } + engines: { node: '>=10' } + + cluster-key-slot@1.1.2: + resolution: { integrity: sha512-RMr0FhtfXemyinomL4hrWcYJxmX6deFdCxpJzhDttxgO1+bcCnkk+9drydLVDmAMG7NE6aN/fl4F7ucU/90gAA== } + engines: { node: '>=0.10.0' } + + color-convert@2.0.1: + resolution: { integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ== } + engines: { node: '>=7.0.0' } + + color-name@1.1.4: + resolution: { integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== } + + concat-map@0.0.1: + resolution: { integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg== } + + cross-spawn@7.0.6: + resolution: { integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA== } + engines: { node: '>= 8' } + + debug@4.4.3: + resolution: { integrity: sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA== } + engines: { node: '>=6.0' } + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + + deep-is@0.1.4: + resolution: { integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ== } + + denque@2.1.0: + resolution: { integrity: sha512-HVQE3AAb/pxF8fQAoiqpvg9i3evqug3hoiwakOyZAwJm+6vZehbkYXZ0l4JxS+I3QxM97v5aaRNhj8v5oBhekw== } + engines: { node: '>=0.10' } + + esbuild@0.27.3: + resolution: { integrity: sha512-8VwMnyGCONIs6cWue2IdpHxHnAjzxnw2Zr7MkVxB2vjmQ2ivqGFb4LEG3SMnv0Gb2F/G/2yA8zUaiL1gywDCCg== } + engines: { node: '>=18' } + hasBin: true + + escape-string-regexp@4.0.0: + resolution: { integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA== } + engines: { node: '>=10' } + + eslint-scope@8.4.0: + resolution: { integrity: sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + + eslint-visitor-keys@3.4.3: + resolution: { integrity: sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag== } + engines: { node: ^12.22.0 || ^14.17.0 || >=16.0.0 } + + eslint-visitor-keys@4.2.1: + resolution: { integrity: sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + + eslint@9.39.2: + resolution: { integrity: sha512-LEyamqS7W5HB3ujJyvi0HQK/dtVINZvd5mAAp9eT5S/ujByGjiZLCzPcHVzuXbpJDJF/cxwHlfceVUDZ2lnSTw== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + hasBin: true + peerDependencies: + jiti: '*' + peerDependenciesMeta: + jiti: + optional: true + + espree@10.4.0: + resolution: { integrity: sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ== } + engines: { node: ^18.18.0 || ^20.9.0 || >=21.1.0 } + + esquery@1.7.0: + resolution: { integrity: sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g== } + engines: { node: '>=0.10' } + + esrecurse@4.3.0: + resolution: { integrity: sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag== } + engines: { node: '>=4.0' } + + estraverse@5.3.0: + resolution: { integrity: sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA== } + engines: { node: '>=4.0' } + + esutils@2.0.3: + resolution: { integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g== } + engines: { node: '>=0.10.0' } + + fast-deep-equal@3.1.3: + resolution: { integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q== } + + fast-json-stable-stringify@2.1.0: + resolution: { integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw== } + + fast-levenshtein@2.0.6: + resolution: { integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw== } + + fdir@6.5.0: + resolution: { integrity: sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg== } + engines: { node: '>=12.0.0' } + peerDependencies: + picomatch: ^3 || ^4 + peerDependenciesMeta: + picomatch: + optional: true + + file-entry-cache@8.0.0: + resolution: { integrity: sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ== } + engines: { node: '>=16.0.0' } + + find-up@5.0.0: + resolution: { integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng== } + engines: { node: '>=10' } + + flat-cache@4.0.1: + resolution: { integrity: sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw== } + engines: { node: '>=16' } + + flatted@3.3.3: + resolution: { integrity: sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg== } + + fsevents@2.3.3: + resolution: { integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw== } + engines: { node: ^8.16.0 || ^10.6.0 || >=11.0.0 } + os: [darwin] + + get-tsconfig@4.13.6: + resolution: { integrity: sha512-shZT/QMiSHc/YBLxxOkMtgSid5HFoauqCE3/exfsEcwg1WkeqjG+V40yBbBrsD+jW2HDXcs28xOfcbm2jI8Ddw== } + + glob-parent@6.0.2: + resolution: { integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A== } + engines: { node: '>=10.13.0' } + + globals@14.0.0: + resolution: { integrity: sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ== } + engines: { node: '>=18' } + + has-flag@4.0.0: + resolution: { integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ== } + engines: { node: '>=8' } + + ignore@5.3.2: + resolution: { integrity: sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g== } + engines: { node: '>= 4' } + + ignore@7.0.5: + resolution: { integrity: sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg== } + engines: { node: '>= 4' } + + import-fresh@3.3.1: + resolution: { integrity: sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ== } + engines: { node: '>=6' } + + imurmurhash@0.1.4: + resolution: { integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA== } + engines: { node: '>=0.8.19' } + + ioredis@5.9.3: + resolution: { integrity: sha512-VI5tMCdeoxZWU5vjHWsiE/Su76JGhBvWF1MJnV9ZtGltHk9BmD48oDq8Tj8haZ85aceXZMxLNDQZRVo5QKNgXA== } + engines: { node: '>=12.22.0' } + + is-extglob@2.1.1: + resolution: { integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ== } + engines: { node: '>=0.10.0' } + + is-glob@4.0.3: + resolution: { integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg== } + engines: { node: '>=0.10.0' } + + isexe@2.0.0: + resolution: { integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw== } + + js-yaml@4.1.1: + resolution: { integrity: sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA== } + hasBin: true + + json-buffer@3.0.1: + resolution: { integrity: sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ== } + + json-schema-traverse@0.4.1: + resolution: { integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg== } + + json-stable-stringify-without-jsonify@1.0.1: + resolution: { integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw== } + + keyv@4.5.4: + resolution: { integrity: sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw== } + + levn@0.4.1: + resolution: { integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ== } + engines: { node: '>= 0.8.0' } + + locate-path@6.0.0: + resolution: { integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw== } + engines: { node: '>=10' } + + lodash.defaults@4.2.0: + resolution: { integrity: sha512-qjxPLHd3r5DnsdGacqOMU6pb/avJzdh9tFX2ymgoZE27BmjXrNy/y4LoaiTeAb+O3gL8AfpJGtqfX/ae2leYYQ== } + + lodash.isarguments@3.1.0: + resolution: { integrity: sha512-chi4NHZlZqZD18a0imDHnZPrDeBbTtVN7GXMwuGdRH9qotxAjYs3aVLKc7zNOG9eddR5Ksd8rvFEBc9SsggPpg== } + + lodash.merge@4.6.2: + resolution: { integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ== } + + minimatch@3.1.2: + resolution: { integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw== } + + minimatch@9.0.5: + resolution: { integrity: sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow== } + engines: { node: '>=16 || 14 >=14.17' } + + ms@2.1.3: + resolution: { integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== } + + nats@2.29.3: + resolution: { integrity: sha512-tOQCRCwC74DgBTk4pWZ9V45sk4d7peoE2njVprMRCBXrhJ5q5cYM7i6W+Uvw2qUrcfOSnuisrX7bEx3b3Wx4QA== } + engines: { node: '>= 14.0.0' } + + natural-compare@1.4.0: + resolution: { integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw== } + + nkeys.js@1.1.0: + resolution: { integrity: sha512-tB/a0shZL5UZWSwsoeyqfTszONTt4k2YS0tuQioMOD180+MbombYVgzDUYHlx+gejYK6rgf08n/2Df99WY0Sxg== } + engines: { node: '>=10.0.0' } + + optionator@0.9.4: + resolution: { integrity: sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g== } + engines: { node: '>= 0.8.0' } + + p-limit@3.1.0: + resolution: { integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ== } + engines: { node: '>=10' } + + p-locate@5.0.0: + resolution: { integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw== } + engines: { node: '>=10' } + + parent-module@1.0.1: + resolution: { integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g== } + engines: { node: '>=6' } + + path-exists@4.0.0: + resolution: { integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w== } + engines: { node: '>=8' } + + path-key@3.1.1: + resolution: { integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q== } + engines: { node: '>=8' } + + pg-cloudflare@1.3.0: + resolution: { integrity: sha512-6lswVVSztmHiRtD6I8hw4qP/nDm1EJbKMRhf3HCYaqud7frGysPv7FYJ5noZQdhQtN2xJnimfMtvQq21pdbzyQ== } + + pg-connection-string@2.11.0: + resolution: { integrity: sha512-kecgoJwhOpxYU21rZjULrmrBJ698U2RxXofKVzOn5UDj61BPj/qMb7diYUR1nLScCDbrztQFl1TaQZT0t1EtzQ== } + + pg-int8@1.0.1: + resolution: { integrity: sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw== } + engines: { node: '>=4.0.0' } + + pg-pool@3.11.0: + resolution: { integrity: sha512-MJYfvHwtGp870aeusDh+hg9apvOe2zmpZJpyt+BMtzUWlVqbhFmMK6bOBXLBUPd7iRtIF9fZplDc7KrPN3PN7w== } + peerDependencies: + pg: '>=8.0' + + pg-protocol@1.11.0: + resolution: { integrity: sha512-pfsxk2M9M3BuGgDOfuy37VNRRX3jmKgMjcvAcWqNDpZSf4cUmv8HSOl5ViRQFsfARFn0KuUQTgLxVMbNq5NW3g== } + + pg-types@2.2.0: + resolution: { integrity: sha512-qTAAlrEsl8s4OiEQY69wDvcMIdQN6wdz5ojQiOy6YRMuynxenON0O5oCpJI6lshc6scgAY8qvJ2On/p+CXY0GA== } + engines: { node: '>=4' } + + pg@8.18.0: + resolution: { integrity: sha512-xqrUDL1b9MbkydY/s+VZ6v+xiMUmOUk7SS9d/1kpyQxoJ6U9AO1oIJyUWVZojbfe5Cc/oluutcgFG4L9RDP1iQ== } + engines: { node: '>= 16.0.0' } + peerDependencies: + pg-native: '>=3.0.1' + peerDependenciesMeta: + pg-native: + optional: true + + pgpass@1.0.5: + resolution: { integrity: sha512-FdW9r/jQZhSeohs1Z3sI1yxFQNFvMcnmfuj4WBMUTxOrAyLMaTcE1aAMBiTlbMNaXvBCQuVi0R7hd8udDSP7ug== } + + picomatch@4.0.3: + resolution: { integrity: sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q== } + engines: { node: '>=12' } + + postgres-array@2.0.0: + resolution: { integrity: sha512-VpZrUqU5A69eQyW2c5CA1jtLecCsN2U/bD6VilrFDWq5+5UIEVO7nazS3TEcHf1zuPYO/sqGvUvW62g86RXZuA== } + engines: { node: '>=4' } + + postgres-bytea@1.0.1: + resolution: { integrity: sha512-5+5HqXnsZPE65IJZSMkZtURARZelel2oXUEO8rH83VS/hxH5vv1uHquPg5wZs8yMAfdv971IU+kcPUczi7NVBQ== } + engines: { node: '>=0.10.0' } + + postgres-date@1.0.7: + resolution: { integrity: sha512-suDmjLVQg78nMK2UZ454hAG+OAW+HQPZ6n++TNDUX+L0+uUlLywnoxJKDou51Zm+zTCjrCl0Nq6J9C5hP9vK/Q== } + engines: { node: '>=0.10.0' } + + postgres-interval@1.2.0: + resolution: { integrity: sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ== } + engines: { node: '>=0.10.0' } + + prelude-ls@1.2.1: + resolution: { integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g== } + engines: { node: '>= 0.8.0' } + + prettier@3.8.1: + resolution: { integrity: sha512-UOnG6LftzbdaHZcKoPFtOcCKztrQ57WkHDeRD9t/PTQtmT0NHSeWWepj6pS0z/N7+08BHFDQVUrfmfMRcZwbMg== } + engines: { node: '>=14' } + hasBin: true + + punycode@2.3.1: + resolution: { integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg== } + engines: { node: '>=6' } + + redis-errors@1.2.0: + resolution: { integrity: sha512-1qny3OExCf0UvUV/5wpYKf2YwPcOqXzkwKKSmKHiE6ZMQs5heeE/c8eXK+PNllPvmjgAbfnsbpkGZWy8cBpn9w== } + engines: { node: '>=4' } + + redis-parser@3.0.0: + resolution: { integrity: sha512-DJnGAeenTdpMEH6uAJRK/uiyEIH9WVsUmoLwzudwGJUwZPp80PDBWPHXSAGNPwNvIXAbe7MSUB1zQFugFml66A== } + engines: { node: '>=4' } + + resolve-from@4.0.0: + resolution: { integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g== } + engines: { node: '>=4' } + + resolve-pkg-maps@1.0.0: + resolution: { integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw== } + + semver@7.7.4: + resolution: { integrity: sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA== } + engines: { node: '>=10' } + hasBin: true + + shebang-command@2.0.0: + resolution: { integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA== } + engines: { node: '>=8' } + + shebang-regex@3.0.0: + resolution: { integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A== } + engines: { node: '>=8' } + + split2@4.2.0: + resolution: { integrity: sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg== } + engines: { node: '>= 10.x' } + + standard-as-callback@2.1.0: + resolution: { integrity: sha512-qoRRSyROncaz1z0mvYqIE4lCd9p2R90i6GxW3uZv5ucSu8tU7B5HXUP1gG8pVZsYNVaXjk8ClXHPttLyxAL48A== } + + strip-json-comments@3.1.1: + resolution: { integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig== } + engines: { node: '>=8' } + + supports-color@7.2.0: + resolution: { integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw== } + engines: { node: '>=8' } + + tinyglobby@0.2.15: + resolution: { integrity: sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ== } + engines: { node: '>=12.0.0' } + + ts-api-utils@2.4.0: + resolution: { integrity: sha512-3TaVTaAv2gTiMB35i3FiGJaRfwb3Pyn/j3m/bfAvGe8FB7CF6u+LMYqYlDh7reQf7UNvoTvdfAqHGmPGOSsPmA== } + engines: { node: '>=18.12' } + peerDependencies: + typescript: '>=4.8.4' + + tsc-files@1.1.4: + resolution: { integrity: sha512-RePsRsOLru3BPpnf237y1Xe1oCGta8rmSYzM76kYo5tLGsv5R2r3s64yapYorGTPuuLyfS9NVbh9ydzmvNie2w== } + hasBin: true + peerDependencies: + typescript: '>=3' + + tsx@4.21.0: + resolution: { integrity: sha512-5C1sg4USs1lfG0GFb2RLXsdpXqBSEhAaA/0kPL01wxzpMqLILNxIxIOKiILz+cdg/pLnOUxFYOR5yhHU666wbw== } + engines: { node: '>=18.0.0' } + hasBin: true + + tweetnacl@1.0.3: + resolution: { integrity: sha512-6rt+RN7aOi1nGMyC4Xa5DdYiukl2UWCbcJft7YhxReBGQD7OAM8Pbxw6YMo4r2diNEA8FEmu32YOn9rhaiE5yw== } + + type-check@0.4.0: + resolution: { integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew== } + engines: { node: '>= 0.8.0' } + + typescript@5.9.3: + resolution: { integrity: sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw== } + engines: { node: '>=14.17' } + hasBin: true + + undici-types@7.16.0: + resolution: { integrity: sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw== } + + uri-js@4.4.1: + resolution: { integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg== } + + uuid@11.1.0: + resolution: { integrity: sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A== } + hasBin: true + + which@2.0.2: + resolution: { integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA== } + engines: { node: '>= 8' } + hasBin: true + + word-wrap@1.2.5: + resolution: { integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA== } + engines: { node: '>=0.10.0' } + + xtend@4.0.2: + resolution: { integrity: sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ== } + engines: { node: '>=0.4' } + + yocto-queue@0.1.0: + resolution: { integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q== } + engines: { node: '>=10' } + + zod@4.3.6: + resolution: { integrity: sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg== } + +snapshots: + '@esbuild/aix-ppc64@0.27.3': + optional: true + + '@esbuild/android-arm64@0.27.3': + optional: true + + '@esbuild/android-arm@0.27.3': + optional: true + + '@esbuild/android-x64@0.27.3': + optional: true + + '@esbuild/darwin-arm64@0.27.3': + optional: true + + '@esbuild/darwin-x64@0.27.3': + optional: true + + '@esbuild/freebsd-arm64@0.27.3': + optional: true + + '@esbuild/freebsd-x64@0.27.3': + optional: true + + '@esbuild/linux-arm64@0.27.3': + optional: true + + '@esbuild/linux-arm@0.27.3': + optional: true + + '@esbuild/linux-ia32@0.27.3': + optional: true + + '@esbuild/linux-loong64@0.27.3': + optional: true + + '@esbuild/linux-mips64el@0.27.3': + optional: true + + '@esbuild/linux-ppc64@0.27.3': + optional: true + + '@esbuild/linux-riscv64@0.27.3': + optional: true + + '@esbuild/linux-s390x@0.27.3': + optional: true + + '@esbuild/linux-x64@0.27.3': + optional: true + + '@esbuild/netbsd-arm64@0.27.3': + optional: true + + '@esbuild/netbsd-x64@0.27.3': + optional: true + + '@esbuild/openbsd-arm64@0.27.3': + optional: true + + '@esbuild/openbsd-x64@0.27.3': + optional: true + + '@esbuild/openharmony-arm64@0.27.3': + optional: true + + '@esbuild/sunos-x64@0.27.3': + optional: true + + '@esbuild/win32-arm64@0.27.3': + optional: true + + '@esbuild/win32-ia32@0.27.3': + optional: true + + '@esbuild/win32-x64@0.27.3': + optional: true + + '@eslint-community/eslint-utils@4.9.1(eslint@9.39.2)': + dependencies: + eslint: 9.39.2 + eslint-visitor-keys: 3.4.3 + + '@eslint-community/regexpp@4.12.2': {} + + '@eslint/config-array@0.21.1': + dependencies: + '@eslint/object-schema': 2.1.7 + debug: 4.4.3 + minimatch: 3.1.2 + transitivePeerDependencies: + - supports-color + + '@eslint/config-helpers@0.4.2': + dependencies: + '@eslint/core': 0.17.0 + + '@eslint/core@0.17.0': + dependencies: + '@types/json-schema': 7.0.15 + + '@eslint/eslintrc@3.3.3': + dependencies: + ajv: 6.12.6 + debug: 4.4.3 + espree: 10.4.0 + globals: 14.0.0 + ignore: 5.3.2 + import-fresh: 3.3.1 + js-yaml: 4.1.1 + minimatch: 3.1.2 + strip-json-comments: 3.1.1 + transitivePeerDependencies: + - supports-color + + '@eslint/js@9.39.2': {} + + '@eslint/object-schema@2.1.7': {} + + '@eslint/plugin-kit@0.4.1': + dependencies: + '@eslint/core': 0.17.0 + levn: 0.4.1 + + '@humanfs/core@0.19.1': {} + + '@humanfs/node@0.16.7': + dependencies: + '@humanfs/core': 0.19.1 + '@humanwhocodes/retry': 0.4.3 + + '@humanwhocodes/module-importer@1.0.1': {} + + '@humanwhocodes/retry@0.4.3': {} + + '@ioredis/commands@1.5.0': + optional: true + + '@types/estree@1.0.8': {} + + '@types/json-schema@7.0.15': {} + + '@types/node@25.2.3': + dependencies: + undici-types: 7.16.0 + + '@typescript-eslint/eslint-plugin@8.55.0(@typescript-eslint/parser@8.55.0(eslint@9.39.2)(typescript@5.9.3))(eslint@9.39.2)(typescript@5.9.3)': + dependencies: + '@eslint-community/regexpp': 4.12.2 + '@typescript-eslint/parser': 8.55.0(eslint@9.39.2)(typescript@5.9.3) + '@typescript-eslint/scope-manager': 8.55.0 + '@typescript-eslint/type-utils': 8.55.0(eslint@9.39.2)(typescript@5.9.3) + '@typescript-eslint/utils': 8.55.0(eslint@9.39.2)(typescript@5.9.3) + '@typescript-eslint/visitor-keys': 8.55.0 + eslint: 9.39.2 + ignore: 7.0.5 + natural-compare: 1.4.0 + ts-api-utils: 2.4.0(typescript@5.9.3) + typescript: 5.9.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/parser@8.55.0(eslint@9.39.2)(typescript@5.9.3)': + dependencies: + '@typescript-eslint/scope-manager': 8.55.0 + '@typescript-eslint/types': 8.55.0 + '@typescript-eslint/typescript-estree': 8.55.0(typescript@5.9.3) + '@typescript-eslint/visitor-keys': 8.55.0 + debug: 4.4.3 + eslint: 9.39.2 + typescript: 5.9.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/project-service@8.55.0(typescript@5.9.3)': + dependencies: + '@typescript-eslint/tsconfig-utils': 8.55.0(typescript@5.9.3) + '@typescript-eslint/types': 8.55.0 + debug: 4.4.3 + typescript: 5.9.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/scope-manager@8.55.0': + dependencies: + '@typescript-eslint/types': 8.55.0 + '@typescript-eslint/visitor-keys': 8.55.0 + + '@typescript-eslint/tsconfig-utils@8.55.0(typescript@5.9.3)': + dependencies: + typescript: 5.9.3 + + '@typescript-eslint/type-utils@8.55.0(eslint@9.39.2)(typescript@5.9.3)': + dependencies: + '@typescript-eslint/types': 8.55.0 + '@typescript-eslint/typescript-estree': 8.55.0(typescript@5.9.3) + '@typescript-eslint/utils': 8.55.0(eslint@9.39.2)(typescript@5.9.3) + debug: 4.4.3 + eslint: 9.39.2 + ts-api-utils: 2.4.0(typescript@5.9.3) + typescript: 5.9.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/types@8.55.0': {} + + '@typescript-eslint/typescript-estree@8.55.0(typescript@5.9.3)': + dependencies: + '@typescript-eslint/project-service': 8.55.0(typescript@5.9.3) + '@typescript-eslint/tsconfig-utils': 8.55.0(typescript@5.9.3) + '@typescript-eslint/types': 8.55.0 + '@typescript-eslint/visitor-keys': 8.55.0 + debug: 4.4.3 + minimatch: 9.0.5 + semver: 7.7.4 + tinyglobby: 0.2.15 + ts-api-utils: 2.4.0(typescript@5.9.3) + typescript: 5.9.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/utils@8.55.0(eslint@9.39.2)(typescript@5.9.3)': + dependencies: + '@eslint-community/eslint-utils': 4.9.1(eslint@9.39.2) + '@typescript-eslint/scope-manager': 8.55.0 + '@typescript-eslint/types': 8.55.0 + '@typescript-eslint/typescript-estree': 8.55.0(typescript@5.9.3) + eslint: 9.39.2 + typescript: 5.9.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/visitor-keys@8.55.0': + dependencies: + '@typescript-eslint/types': 8.55.0 + eslint-visitor-keys: 4.2.1 + + acorn-jsx@5.3.2(acorn@8.15.0): + dependencies: + acorn: 8.15.0 + + acorn@8.15.0: {} + + ajv@6.12.6: + dependencies: + fast-deep-equal: 3.1.3 + fast-json-stable-stringify: 2.1.0 + json-schema-traverse: 0.4.1 + uri-js: 4.4.1 + + ansi-styles@4.3.0: + dependencies: + color-convert: 2.0.1 + + argparse@2.0.1: {} + + balanced-match@1.0.2: {} + + brace-expansion@1.1.12: + dependencies: + balanced-match: 1.0.2 + concat-map: 0.0.1 + + brace-expansion@2.0.2: + dependencies: + balanced-match: 1.0.2 + + callsites@3.1.0: {} + + chalk@4.1.2: + dependencies: + ansi-styles: 4.3.0 + supports-color: 7.2.0 + + cluster-key-slot@1.1.2: + optional: true + + color-convert@2.0.1: + dependencies: + color-name: 1.1.4 + + color-name@1.1.4: {} + + concat-map@0.0.1: {} + + cross-spawn@7.0.6: + dependencies: + path-key: 3.1.1 + shebang-command: 2.0.0 + which: 2.0.2 + + debug@4.4.3: + dependencies: + ms: 2.1.3 + + deep-is@0.1.4: {} + + denque@2.1.0: + optional: true + + esbuild@0.27.3: + optionalDependencies: + '@esbuild/aix-ppc64': 0.27.3 + '@esbuild/android-arm': 0.27.3 + '@esbuild/android-arm64': 0.27.3 + '@esbuild/android-x64': 0.27.3 + '@esbuild/darwin-arm64': 0.27.3 + '@esbuild/darwin-x64': 0.27.3 + '@esbuild/freebsd-arm64': 0.27.3 + '@esbuild/freebsd-x64': 0.27.3 + '@esbuild/linux-arm': 0.27.3 + '@esbuild/linux-arm64': 0.27.3 + '@esbuild/linux-ia32': 0.27.3 + '@esbuild/linux-loong64': 0.27.3 + '@esbuild/linux-mips64el': 0.27.3 + '@esbuild/linux-ppc64': 0.27.3 + '@esbuild/linux-riscv64': 0.27.3 + '@esbuild/linux-s390x': 0.27.3 + '@esbuild/linux-x64': 0.27.3 + '@esbuild/netbsd-arm64': 0.27.3 + '@esbuild/netbsd-x64': 0.27.3 + '@esbuild/openbsd-arm64': 0.27.3 + '@esbuild/openbsd-x64': 0.27.3 + '@esbuild/openharmony-arm64': 0.27.3 + '@esbuild/sunos-x64': 0.27.3 + '@esbuild/win32-arm64': 0.27.3 + '@esbuild/win32-ia32': 0.27.3 + '@esbuild/win32-x64': 0.27.3 + + escape-string-regexp@4.0.0: {} + + eslint-scope@8.4.0: + dependencies: + esrecurse: 4.3.0 + estraverse: 5.3.0 + + eslint-visitor-keys@3.4.3: {} + + eslint-visitor-keys@4.2.1: {} + + eslint@9.39.2: + dependencies: + '@eslint-community/eslint-utils': 4.9.1(eslint@9.39.2) + '@eslint-community/regexpp': 4.12.2 + '@eslint/config-array': 0.21.1 + '@eslint/config-helpers': 0.4.2 + '@eslint/core': 0.17.0 + '@eslint/eslintrc': 3.3.3 + '@eslint/js': 9.39.2 + '@eslint/plugin-kit': 0.4.1 + '@humanfs/node': 0.16.7 + '@humanwhocodes/module-importer': 1.0.1 + '@humanwhocodes/retry': 0.4.3 + '@types/estree': 1.0.8 + ajv: 6.12.6 + chalk: 4.1.2 + cross-spawn: 7.0.6 + debug: 4.4.3 + escape-string-regexp: 4.0.0 + eslint-scope: 8.4.0 + eslint-visitor-keys: 4.2.1 + espree: 10.4.0 + esquery: 1.7.0 + esutils: 2.0.3 + fast-deep-equal: 3.1.3 + file-entry-cache: 8.0.0 + find-up: 5.0.0 + glob-parent: 6.0.2 + ignore: 5.3.2 + imurmurhash: 0.1.4 + is-glob: 4.0.3 + json-stable-stringify-without-jsonify: 1.0.1 + lodash.merge: 4.6.2 + minimatch: 3.1.2 + natural-compare: 1.4.0 + optionator: 0.9.4 + transitivePeerDependencies: + - supports-color + + espree@10.4.0: + dependencies: + acorn: 8.15.0 + acorn-jsx: 5.3.2(acorn@8.15.0) + eslint-visitor-keys: 4.2.1 + + esquery@1.7.0: + dependencies: + estraverse: 5.3.0 + + esrecurse@4.3.0: + dependencies: + estraverse: 5.3.0 + + estraverse@5.3.0: {} + + esutils@2.0.3: {} + + fast-deep-equal@3.1.3: {} + + fast-json-stable-stringify@2.1.0: {} + + fast-levenshtein@2.0.6: {} + + fdir@6.5.0(picomatch@4.0.3): + optionalDependencies: + picomatch: 4.0.3 + + file-entry-cache@8.0.0: + dependencies: + flat-cache: 4.0.1 + + find-up@5.0.0: + dependencies: + locate-path: 6.0.0 + path-exists: 4.0.0 + + flat-cache@4.0.1: + dependencies: + flatted: 3.3.3 + keyv: 4.5.4 + + flatted@3.3.3: {} + + fsevents@2.3.3: + optional: true + + get-tsconfig@4.13.6: + dependencies: + resolve-pkg-maps: 1.0.0 + + glob-parent@6.0.2: + dependencies: + is-glob: 4.0.3 + + globals@14.0.0: {} + + has-flag@4.0.0: {} + + ignore@5.3.2: {} + + ignore@7.0.5: {} + + import-fresh@3.3.1: + dependencies: + parent-module: 1.0.1 + resolve-from: 4.0.0 + + imurmurhash@0.1.4: {} + + ioredis@5.9.3: + dependencies: + '@ioredis/commands': 1.5.0 + cluster-key-slot: 1.1.2 + debug: 4.4.3 + denque: 2.1.0 + lodash.defaults: 4.2.0 + lodash.isarguments: 3.1.0 + redis-errors: 1.2.0 + redis-parser: 3.0.0 + standard-as-callback: 2.1.0 + transitivePeerDependencies: + - supports-color + optional: true + + is-extglob@2.1.1: {} + + is-glob@4.0.3: + dependencies: + is-extglob: 2.1.1 + + isexe@2.0.0: {} + + js-yaml@4.1.1: + dependencies: + argparse: 2.0.1 + + json-buffer@3.0.1: {} + + json-schema-traverse@0.4.1: {} + + json-stable-stringify-without-jsonify@1.0.1: {} + + keyv@4.5.4: + dependencies: + json-buffer: 3.0.1 + + levn@0.4.1: + dependencies: + prelude-ls: 1.2.1 + type-check: 0.4.0 + + locate-path@6.0.0: + dependencies: + p-locate: 5.0.0 + + lodash.defaults@4.2.0: + optional: true + + lodash.isarguments@3.1.0: + optional: true + + lodash.merge@4.6.2: {} + + minimatch@3.1.2: + dependencies: + brace-expansion: 1.1.12 + + minimatch@9.0.5: + dependencies: + brace-expansion: 2.0.2 + + ms@2.1.3: {} + + nats@2.29.3: + dependencies: + nkeys.js: 1.1.0 + optional: true + + natural-compare@1.4.0: {} + + nkeys.js@1.1.0: + dependencies: + tweetnacl: 1.0.3 + optional: true + + optionator@0.9.4: + dependencies: + deep-is: 0.1.4 + fast-levenshtein: 2.0.6 + levn: 0.4.1 + prelude-ls: 1.2.1 + type-check: 0.4.0 + word-wrap: 1.2.5 + + p-limit@3.1.0: + dependencies: + yocto-queue: 0.1.0 + + p-locate@5.0.0: + dependencies: + p-limit: 3.1.0 + + parent-module@1.0.1: + dependencies: + callsites: 3.1.0 + + path-exists@4.0.0: {} + + path-key@3.1.1: {} + + pg-cloudflare@1.3.0: + optional: true + + pg-connection-string@2.11.0: + optional: true + + pg-int8@1.0.1: + optional: true + + pg-pool@3.11.0(pg@8.18.0): + dependencies: + pg: 8.18.0 + optional: true + + pg-protocol@1.11.0: + optional: true + + pg-types@2.2.0: + dependencies: + pg-int8: 1.0.1 + postgres-array: 2.0.0 + postgres-bytea: 1.0.1 + postgres-date: 1.0.7 + postgres-interval: 1.2.0 + optional: true + + pg@8.18.0: + dependencies: + pg-connection-string: 2.11.0 + pg-pool: 3.11.0(pg@8.18.0) + pg-protocol: 1.11.0 + pg-types: 2.2.0 + pgpass: 1.0.5 + optionalDependencies: + pg-cloudflare: 1.3.0 + optional: true + + pgpass@1.0.5: + dependencies: + split2: 4.2.0 + optional: true + + picomatch@4.0.3: {} + + postgres-array@2.0.0: + optional: true + + postgres-bytea@1.0.1: + optional: true + + postgres-date@1.0.7: + optional: true + + postgres-interval@1.2.0: + dependencies: + xtend: 4.0.2 + optional: true + + prelude-ls@1.2.1: {} + + prettier@3.8.1: {} + + punycode@2.3.1: {} + + redis-errors@1.2.0: + optional: true + + redis-parser@3.0.0: + dependencies: + redis-errors: 1.2.0 + optional: true + + resolve-from@4.0.0: {} + + resolve-pkg-maps@1.0.0: {} + + semver@7.7.4: {} + + shebang-command@2.0.0: + dependencies: + shebang-regex: 3.0.0 + + shebang-regex@3.0.0: {} + + split2@4.2.0: + optional: true + + standard-as-callback@2.1.0: + optional: true + + strip-json-comments@3.1.1: {} + + supports-color@7.2.0: + dependencies: + has-flag: 4.0.0 + + tinyglobby@0.2.15: + dependencies: + fdir: 6.5.0(picomatch@4.0.3) + picomatch: 4.0.3 + + ts-api-utils@2.4.0(typescript@5.9.3): + dependencies: + typescript: 5.9.3 + + tsc-files@1.1.4(typescript@5.9.3): + dependencies: + typescript: 5.9.3 + + tsx@4.21.0: + dependencies: + esbuild: 0.27.3 + get-tsconfig: 4.13.6 + optionalDependencies: + fsevents: 2.3.3 + + tweetnacl@1.0.3: + optional: true + + type-check@0.4.0: + dependencies: + prelude-ls: 1.2.1 + + typescript@5.9.3: {} + + undici-types@7.16.0: {} + + uri-js@4.4.1: + dependencies: + punycode: 2.3.1 + + uuid@11.1.0: {} + + which@2.0.2: + dependencies: + isexe: 2.0.0 + + word-wrap@1.2.5: {} + + xtend@4.0.2: + optional: true + + yocto-queue@0.1.0: {} + + zod@4.3.6: {} diff --git a/bubus-ts/pnpm-workspace.yaml b/bubus-ts/pnpm-workspace.yaml new file mode 100644 index 0000000..e4a4b5b --- /dev/null +++ b/bubus-ts/pnpm-workspace.yaml @@ -0,0 +1,2 @@ +onlyBuiltDependencies: + - better-sqlite3 diff --git a/bubus-ts/prettier.config.js b/bubus-ts/prettier.config.js new file mode 100644 index 0000000..98b89f5 --- /dev/null +++ b/bubus-ts/prettier.config.js @@ -0,0 +1,8 @@ +const config = { + semi: false, + singleQuote: true, + trailingComma: 'es5', + printWidth: 140, +} + +export default config diff --git a/bubus-ts/scripts/build_esm.mjs b/bubus-ts/scripts/build_esm.mjs new file mode 100644 index 0000000..76b413c --- /dev/null +++ b/bubus-ts/scripts/build_esm.mjs @@ -0,0 +1,24 @@ +import { readdir, rm } from 'node:fs/promises' +import { join } from 'node:path' + +import { build } from 'esbuild' + +const src_dir = 'src' +const dist_dir = 'dist/esm' + +const entries = (await readdir(src_dir)) + .filter((name) => name.endsWith('.ts') && !name.endsWith('.test.ts')) + .map((name) => join(src_dir, name)) + +await rm(dist_dir, { recursive: true, force: true }) + +await build({ + entryPoints: entries, + bundle: false, + format: 'esm', + platform: 'neutral', + target: 'es2022', + sourcemap: true, + outbase: src_dir, + outdir: dist_dir, +}) diff --git a/bubus-ts/scripts/run_browser_perf.sh b/bubus-ts/scripts/run_browser_perf.sh new file mode 100755 index 0000000..37285ed --- /dev/null +++ b/bubus-ts/scripts/run_browser_perf.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +set -euo pipefail + +SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)" +ROOT_DIR="$(cd -- "$SCRIPT_DIR/.." && pwd)" +cd "$ROOT_DIR" + +if [[ -x /usr/bin/chromium ]]; then + echo "[perf:browser] using system chromium executable: /usr/bin/chromium" + PW_CHROMIUM_EXECUTABLE_PATH=/usr/bin/chromium \ + npx --yes --package=playwright -c 'PW_BIN="$(command -v playwright)"; PW_NODE_MODULES="$(cd "$(dirname "$PW_BIN")/.." && pwd)"; NODE_PATH="$PW_NODE_MODULES" playwright test tests/performance.browser.spec.cjs --config=playwright.perf.config.cjs --project=browser-perf --workers=1 --reporter=line --output=/tmp/bubus-playwright-results' +else + echo "[perf:browser] /usr/bin/chromium not found; using Playwright-managed chromium" + npx --yes --package=playwright -c 'PW_BIN="$(command -v playwright)"; PW_NODE_MODULES="$(cd "$(dirname "$PW_BIN")/.." && pwd)"; NODE_PATH="$PW_NODE_MODULES" playwright test tests/performance.browser.spec.cjs --config=playwright.perf.config.cjs --project=browser-perf --workers=1 --reporter=line --output=/tmp/bubus-playwright-results' +fi diff --git a/bubus-ts/src/async_context.ts b/bubus-ts/src/async_context.ts new file mode 100644 index 0000000..20834ea --- /dev/null +++ b/bubus-ts/src/async_context.ts @@ -0,0 +1,53 @@ +declare const process: { versions?: { node?: string } } | undefined + +type AsyncLocalStorageLike = { + getStore(): unknown + run(store: unknown, callback: () => T): T + enterWith?(store: unknown): void +} + +export type { AsyncLocalStorageLike } + +// Cache the AsyncLocalStorage constructor so multiple modules can create separate instances. +let _AsyncLocalStorageClass: (new () => AsyncLocalStorageLike) | null = null + +const is_node = typeof process !== 'undefined' && typeof process.versions !== 'undefined' && typeof process.versions.node === 'string' + +if (is_node) { + try { + const importer = new Function('specifier', 'return import(specifier)') as ( + specifier: string + ) => Promise<{ AsyncLocalStorage?: new () => AsyncLocalStorageLike }> + const mod = await importer('node:async_hooks') + if (mod?.AsyncLocalStorage) { + _AsyncLocalStorageClass = mod.AsyncLocalStorage + } + } catch { + _AsyncLocalStorageClass = null + } +} + +/** Create a new AsyncLocalStorage instance, or null if unavailable (e.g. in browsers). */ +export const createAsyncLocalStorage = (): AsyncLocalStorageLike | null => { + if (!_AsyncLocalStorageClass) return null + return new _AsyncLocalStorageClass() +} + +// The primary AsyncLocalStorage instance used for event dispatch context propagation. +export let async_local_storage: AsyncLocalStorageLike | null = _AsyncLocalStorageClass ? new _AsyncLocalStorageClass() : null + +export const captureAsyncContext = (): unknown | null => { + if (!async_local_storage) { + return null + } + return async_local_storage.getStore() ?? null +} + +export const _runWithAsyncContext = (context: unknown | null, fn: () => T): T => { + if (!async_local_storage) { + return fn() + } + return async_local_storage.run(context ?? undefined, fn) +} + +export const hasAsyncLocalStorage = (): boolean => async_local_storage !== null diff --git a/bubus-ts/src/base_event.ts b/bubus-ts/src/base_event.ts new file mode 100644 index 0000000..06b6749 --- /dev/null +++ b/bubus-ts/src/base_event.ts @@ -0,0 +1,1083 @@ +import { z } from 'zod' +import { v7 as uuidv7 } from 'uuid' + +import { EventBus } from './event_bus.js' +import { EventResult } from './event_result.js' +import { EventHandler, EventHandlerAbortedError, EventHandlerCancelledError, EventHandlerTimeoutError } from './event_handler.js' +import type { EventConcurrencyMode, EventHandlerConcurrencyMode, EventHandlerCompletionMode, Deferred } from './lock_manager.js' +import { + AsyncLock, + EVENT_CONCURRENCY_MODES, + EVENT_HANDLER_CONCURRENCY_MODES, + EVENT_HANDLER_COMPLETION_MODES, + withResolvers, +} from './lock_manager.js' +import { _runWithTimeout } from './timing.js' +import { extractZodShape, normalizeEventResultType, toJsonSchema } from './types.js' +import type { EventHandlerCallable, EventResultType } from './types.js' +import { monotonicDatetime } from './helpers.js' + +const RESERVED_USER_EVENT_FIELDS = new Set(['bus', 'first', 'toString', 'toJSON', 'fromJSON']) + +function assertNoReservedUserEventFields(data: Record, context: string): void { + for (const field_name of RESERVED_USER_EVENT_FIELDS) { + if (Object.prototype.hasOwnProperty.call(data, field_name)) { + throw new Error(`${context} field "${field_name}" is reserved for EventBus runtime context and cannot be set in event payload`) + } + } +} + +function assertNoUnknownEventPrefixedFields(data: Record, context: string): void { + for (const field_name of Object.keys(data)) { + if (field_name.startsWith('event_') && !KNOWN_BASE_EVENT_FIELDS.has(field_name)) { + throw new Error(`${context} field "${field_name}" starts with "event_" but is not a recognized BaseEvent field`) + } + } +} + +function assertNoModelPrefixedFields(data: Record, context: string): void { + for (const field_name of Object.keys(data)) { + if (field_name.startsWith('model_')) { + throw new Error(`${context} field "${field_name}" starts with "model_" and is reserved for model internals`) + } + } +} + +export const BaseEventSchema = z + .object({ + event_id: z.string().uuid(), + event_created_at: z.string().datetime(), + event_type: z.string(), + event_version: z.string().default('0.0.1'), + event_timeout: z.number().positive().nullable(), + event_slow_timeout: z.number().positive().nullable().optional(), + event_handler_timeout: z.number().positive().nullable().optional(), + event_handler_slow_timeout: z.number().positive().nullable().optional(), + event_parent_id: z.string().uuid().nullable().optional(), + event_path: z.array(z.string()).optional(), + event_result_type: z.unknown().optional(), + event_emitted_by_handler_id: z.string().uuid().nullable().optional(), + event_pending_bus_count: z.number().nonnegative().optional(), + event_status: z.enum(['pending', 'started', 'completed']).optional(), + event_started_at: z.string().datetime().nullable().optional(), + event_completed_at: z.string().datetime().nullable().optional(), + event_results: z.array(z.unknown()).optional(), + event_concurrency: z.enum(EVENT_CONCURRENCY_MODES).nullable().optional(), + event_handler_concurrency: z.enum(EVENT_HANDLER_CONCURRENCY_MODES).nullable().optional(), + event_handler_completion: z.enum(EVENT_HANDLER_COMPLETION_MODES).nullable().optional(), + }) + .loose() + +const KNOWN_BASE_EVENT_FIELDS = new Set(Object.keys(BaseEventSchema.shape)) + +export type BaseEventData = z.infer +export type BaseEventJSON = BaseEventData & Record +type BaseEventFields = Pick< + BaseEventData, + | 'event_id' + | 'event_created_at' + | 'event_type' + | 'event_version' + | 'event_timeout' + | 'event_slow_timeout' + | 'event_handler_timeout' + | 'event_handler_slow_timeout' + | 'event_parent_id' + | 'event_path' + | 'event_result_type' + | 'event_emitted_by_handler_id' + | 'event_pending_bus_count' + | 'event_status' + | 'event_started_at' + | 'event_completed_at' + | 'event_results' + | 'event_concurrency' + | 'event_handler_concurrency' + | 'event_handler_completion' +> + +export type BaseEventInit> = TFields & Partial + +type BaseEventSchemaShape = typeof BaseEventSchema.shape + +export type EventSchema = z.ZodObject +type EventPayload = TShape extends Record ? {} : z.infer> + +type EventInput = z.input> +export type EventInit = Omit, keyof BaseEventFields> & Partial + +type EventWithResultSchema = BaseEvent & { __event_result_type__?: TResult } + +type ResultTypeFromEventResultTypeInput = TInput extends z.ZodTypeAny + ? z.infer + : TInput extends StringConstructor + ? string + : TInput extends NumberConstructor + ? number + : TInput extends BooleanConstructor + ? boolean + : TInput extends ArrayConstructor + ? unknown[] + : TInput extends ObjectConstructor + ? Record + : unknown + +type ResultSchemaFromShape = TShape extends { event_result_type: infer S } ? ResultTypeFromEventResultTypeInput : unknown +type EventResultsListInclude = ( + result: EventResultType | undefined, + event_result: EventResult +) => boolean +type EventResultsListOptions = { + timeout?: number | null + include?: EventResultsListInclude + raise_if_any?: boolean + raise_if_none?: boolean +} +type EventResultUpdateOptions = { + eventbus?: EventBus + status?: 'pending' | 'started' | 'completed' | 'error' + result?: EventResultType | BaseEvent | undefined + error?: unknown +} + +const EVENT_CLASS_DEFAULTS = new WeakMap>() +const ROOT_EVENTBUS_ID = '00000000-0000-0000-0000-000000000000' + +export type EventFactory = { + (data: EventInit): EventWithResultSchema & EventPayload + new (data: EventInit): EventWithResultSchema & EventPayload + schema: EventSchema + class?: new (data: EventInit) => EventWithResultSchema & EventPayload + event_type?: string + event_version?: string + event_result_type?: z.ZodTypeAny + fromJSON?: (data: unknown) => EventWithResultSchema & EventPayload +} + +type ZodShapeFrom> = { + [K in keyof TShape as K extends 'event_result_type' ? never : TShape[K] extends z.ZodTypeAny ? K : never]: Extract< + TShape[K], + z.ZodTypeAny + > +} + +export class BaseEvent { + // event metadata fields + event_id!: string // unique uuidv7 identifier for the event + event_created_at!: string + event_type!: string // should match the class name of the event, e.g. BaseEvent.extend("MyEvent").event_type === "MyEvent" + event_version!: string // event schema/version tag managed by callers for migration-friendly payload handling + event_timeout!: number | null // maximum time in seconds that the event is allowed to run before it is aborted + event_slow_timeout?: number | null // optional per-event slow warning threshold in seconds + event_handler_timeout?: number | null // optional per-event handler timeout override in seconds + event_handler_slow_timeout?: number | null // optional per-event slow handler warning threshold in seconds + event_parent_id!: string | null // id of the parent event that triggered this event, if this event was emitted during handling of another event, else null + event_path!: string[] // list of bus labels (name#id) that the event has been dispatched to, including the current bus + event_result_type?: z.ZodTypeAny // optional zod schema to enforce the shape of return values from handlers + event_results!: Map> // map of handler ids to EventResult objects for the event + event_emitted_by_handler_id!: string | null // if event was emitted inside a handler while it was running, this is set to the enclosing handler's handler id, else null + event_pending_bus_count!: number // number of buses that have accepted this event and not yet finished processing or removed it from their queues (for queue-jump processing) + event_status!: 'pending' | 'started' | 'completed' // processing status of the event as a whole, no separate 'error' state because events can not error, only individual handlers can + event_started_at!: string | null + event_completed_at!: string | null + event_concurrency?: EventConcurrencyMode | null // concurrency mode for the event as a whole in relation to other events + event_handler_concurrency?: EventHandlerConcurrencyMode | null // concurrency mode for the handlers within the event + event_handler_completion?: EventHandlerCompletionMode | null // completion strategy: 'all' (default) waits for every handler, 'first' returns earliest non-undefined result and cancels the rest + + static event_type?: string // class name of the event, e.g. BaseEvent.extend("MyEvent").event_type === "MyEvent" + static event_version = '0.0.1' + static schema = BaseEventSchema // zod schema for the event data fields, used to parse and validate event data when creating a new event + + // internal runtime state + bus?: EventBus // shortcut to the bus that dispatched this event, for event.bus.emit(event) auto-child tracking via proxy wrapping + _event_original?: BaseEvent // underlying event object that was dispatched, if this is a bus-scoped proxy wrapping it + _event_dispatch_context?: unknown | null // captured AsyncLocalStorage context at dispatch site, used to restore that context when running handlers + + _event_completed_signal: Deferred | null + _lock_for_event_handler: AsyncLock | null + + get event_bus(): EventBus { + return this.bus as EventBus + } + + constructor(data: BaseEventInit> = {}) { + assertNoReservedUserEventFields(data as Record, 'BaseEvent') + assertNoUnknownEventPrefixedFields(data as Record, 'BaseEvent') + assertNoModelPrefixedFields(data as Record, 'BaseEvent') + const ctor = this.constructor as typeof BaseEvent & { + event_version?: string + event_result_type?: z.ZodTypeAny + } + const ctor_defaults = EVENT_CLASS_DEFAULTS.get(ctor) ?? {} + const merged_data = { + ...ctor_defaults, + ...data, + } as BaseEventInit> + const event_type = merged_data.event_type ?? ctor.event_type ?? ctor.name + const event_version = merged_data.event_version ?? ctor.event_version ?? '0.0.1' + const raw_event_result_type = merged_data.event_result_type ?? ctor.event_result_type + const event_result_type = normalizeEventResultType(raw_event_result_type) + const event_id = merged_data.event_id ?? uuidv7() + const event_created_at = monotonicDatetime(merged_data.event_created_at) + const event_timeout = merged_data.event_timeout ?? null + + const base_data = { + ...merged_data, + event_id, + event_created_at, + event_type, + event_version, + event_timeout, + event_result_type, + } + + const schema = ctor.schema ?? BaseEventSchema + const parsed = schema.parse(base_data) as BaseEventData & Record + + Object.assign(this, parsed) + + const parsed_path = (parsed as { event_path?: string[] }).event_path + this.event_path = Array.isArray(parsed_path) ? [...parsed_path] : [] + + // load event results from potentially raw objects from JSON to proper EventResult objects + this.event_results = hydrateEventResults(this, (parsed as { event_results?: unknown }).event_results) + this.event_pending_bus_count = + typeof (parsed as { event_pending_bus_count?: unknown }).event_pending_bus_count === 'number' + ? Math.max(0, Number((parsed as { event_pending_bus_count?: number }).event_pending_bus_count)) + : 0 + const parsed_status = (parsed as { event_status?: unknown }).event_status + this.event_status = + parsed_status === 'pending' || parsed_status === 'started' || parsed_status === 'completed' ? parsed_status : 'pending' + + this.event_started_at = + parsed.event_started_at === null || parsed.event_started_at === undefined ? null : monotonicDatetime(parsed.event_started_at) + this.event_completed_at = + parsed.event_completed_at === null || parsed.event_completed_at === undefined ? null : monotonicDatetime(parsed.event_completed_at) + this.event_parent_id = + typeof (parsed as { event_parent_id?: unknown }).event_parent_id === 'string' + ? (parsed as { event_parent_id: string }).event_parent_id + : null + this.event_emitted_by_handler_id = + typeof (parsed as { event_emitted_by_handler_id?: unknown }).event_emitted_by_handler_id === 'string' + ? (parsed as { event_emitted_by_handler_id: string }).event_emitted_by_handler_id + : null + + this.event_result_type = event_result_type + + this._event_completed_signal = null + this._lock_for_event_handler = null + this._event_dispatch_context = undefined + } + + // "MyEvent#a48f" + toString(): string { + return `${this.event_type}#${this.event_id.slice(-4)}` + } + + // main entry point for users to define their own event types + // BaseEvent.extend("MyEvent", { some_custom_field: z.string(), event_result_type: z.string(), event_timeout: 25, ... }) -> MyEvent + static extend(event_type: string, shape?: TShape): EventFactory> + static extend>( + event_type: string, + shape?: TShape + ): EventFactory, ResultSchemaFromShape> + static extend>( + event_type: string, + shape: TShape = {} as TShape + ): EventFactory, ResultSchemaFromShape> { + const raw_shape = shape as Record + assertNoReservedUserEventFields(raw_shape, `BaseEvent.extend(${event_type})`) + assertNoUnknownEventPrefixedFields(raw_shape, `BaseEvent.extend(${event_type})`) + assertNoModelPrefixedFields(raw_shape, `BaseEvent.extend(${event_type})`) + const raw_event_result_type = raw_shape.event_result_type + const event_result_type = normalizeEventResultType(raw_event_result_type) + const event_version = typeof raw_shape.event_version === 'string' ? raw_shape.event_version : undefined + const event_defaults = Object.fromEntries( + Object.entries(raw_shape).filter( + ([key, value]) => key !== 'event_result_type' && key !== 'event_version' && !(value instanceof z.ZodType) + ) + ) + + const zod_shape = extractZodShape(raw_shape) + const full_schema = BaseEventSchema.extend(zod_shape) + + // create a new event class that extends BaseEvent and adds the custom fields + class ExtendedEvent extends BaseEvent { + static schema = full_schema as unknown as typeof BaseEvent.schema + static event_type = event_type + static event_version = event_version ?? BaseEvent.event_version + static event_result_type = event_result_type + + constructor(data: EventInit>) { + super(data as BaseEventInit>) + } + } + + type FactoryResult = EventWithResultSchema> & EventPayload> + + function EventFactory(data: EventInit>): FactoryResult { + return new ExtendedEvent(data) as FactoryResult + } + + EventFactory.schema = full_schema as EventSchema> + EventFactory.event_type = event_type + EventFactory.event_version = event_version ?? BaseEvent.event_version + EventFactory.event_result_type = event_result_type + EventFactory.class = ExtendedEvent as unknown as new ( + data: EventInit> + ) => EventWithResultSchema> & EventPayload> + EventFactory.fromJSON = (data: unknown) => (ExtendedEvent.fromJSON as (data: unknown) => FactoryResult)(data) + EventFactory.prototype = ExtendedEvent.prototype + EVENT_CLASS_DEFAULTS.set(ExtendedEvent, event_defaults) + + return EventFactory as unknown as EventFactory, ResultSchemaFromShape> + } + + static fromJSON(this: T, data: unknown): InstanceType { + if (!data || typeof data !== 'object') { + const schema = this.schema ?? BaseEventSchema + const parsed = schema.parse(data) + return new this(parsed) as InstanceType + } + const record = { ...(data as Record) } + if (record.event_result_type !== undefined && record.event_result_type !== null) { + record.event_result_type = normalizeEventResultType(record.event_result_type) + } + return new this(record as BaseEventInit>) as InstanceType + } + + static toJSONArray(events: Iterable): BaseEventJSON[] { + return Array.from(events, (event) => { + const original = event._event_original ?? event + return original.toJSON() + }) + } + + static fromJSONArray(data: unknown): BaseEvent[] { + if (!Array.isArray(data)) { + return [] + } + return data.map((item) => BaseEvent.fromJSON(item)) + } + + toJSON(): BaseEventJSON { + const record: Record = {} + for (const [key, value] of Object.entries(this as unknown as Record)) { + if (key.startsWith('_') || key === 'bus' || key === 'event_results') continue + if (value === undefined || typeof value === 'function') continue + record[key] = value + } + const event_results = Array.from(this.event_results.values()).map((result) => result.toJSON()) + + return { + ...record, + event_id: this.event_id, + event_type: this.event_type, + event_version: this.event_version, + event_result_type: this.event_result_type ? toJsonSchema(this.event_result_type) : this.event_result_type, + + // static configuration options + event_timeout: this.event_timeout, + event_slow_timeout: this.event_slow_timeout, + event_concurrency: this.event_concurrency, + event_handler_concurrency: this.event_handler_concurrency, + event_handler_completion: this.event_handler_completion, + event_handler_slow_timeout: this.event_handler_slow_timeout, + event_handler_timeout: this.event_handler_timeout, + + // mutable parent/child/bus tracking runtime state + event_parent_id: this.event_parent_id, + event_path: this.event_path, + event_emitted_by_handler_id: this.event_emitted_by_handler_id, + event_pending_bus_count: this.event_pending_bus_count, + + // mutable runtime status and timestamps + event_status: this.event_status, + event_created_at: this.event_created_at, + event_started_at: this.event_started_at ?? null, + event_completed_at: this.event_completed_at ?? null, + + // mutable result state + ...(event_results.length > 0 ? { event_results } : {}), + } + } + + _createSlowEventWarningTimer(): ReturnType | null { + const event_slow_timeout = this.event_slow_timeout ?? this.bus?.event_slow_timeout ?? null + const event_warn_ms = event_slow_timeout === null ? null : event_slow_timeout * 1000 + if (event_warn_ms === null) { + return null + } + const name = this.bus?.name ?? 'EventBus' + return setTimeout(() => { + if (this.event_status === 'completed') { + return + } + const running_handler_count = [...this.event_results.values()].filter((result) => result.status === 'started').length + const started_at = this.event_started_at ?? this.event_created_at + const elapsed_ms = Math.max(0, Date.now() - Date.parse(started_at)) + const elapsed_seconds = (elapsed_ms / 1000).toFixed(2) + console.warn( + `[bubus] Slow event processing: ${name}.on(${this.event_type}#${this.event_id.slice(-4)}, ${running_handler_count} handlers) still running after ${elapsed_seconds}s` + ) + }, event_warn_ms) + } + + eventResultUpdate(handler: EventHandler | EventHandlerCallable, options: EventResultUpdateOptions = {}): EventResult { + const original_event = (this._event_original ?? this) as this + let resolved_eventbus = options.eventbus + let handler_entry: EventHandler + + if (handler instanceof EventHandler) { + handler_entry = handler + if (!resolved_eventbus && handler_entry.eventbus_id !== ROOT_EVENTBUS_ID && original_event.bus) { + resolved_eventbus = + original_event.bus.all_instances.findBusById(handler_entry.eventbus_id) ?? + (original_event.bus.id === handler_entry.eventbus_id ? original_event.bus : undefined) + } + } else { + handler_entry = EventHandler.fromCallable({ + handler, + event_pattern: original_event.event_type, + eventbus_name: resolved_eventbus?.name ?? 'EventBus', + eventbus_id: resolved_eventbus?.id ?? ROOT_EVENTBUS_ID, + }) + } + + const scoped_event = resolved_eventbus ? resolved_eventbus._getEventProxyScopedToThisBus(original_event) : original_event + const handler_id = handler_entry.id + const existing = original_event.event_results.get(handler_id) + const event_result: EventResult = + existing ?? (new EventResult({ event: scoped_event as this, handler: handler_entry }) as EventResult) + if (!existing) { + original_event.event_results.set(handler_id, event_result) + } else { + if (existing.event !== scoped_event) { + existing.event = scoped_event as this + } + if (existing.handler.id !== handler_entry.id) { + existing.handler = handler_entry + } + } + + if (options.status !== undefined || options.result !== undefined || options.error !== undefined) { + event_result.update({ + status: options.status, + result: options.result, + error: options.error, + }) + if (event_result.status === 'started' && event_result.started_at !== null) { + original_event._markStarted(event_result.started_at, false) + } + if (options.status === 'pending' || options.status === 'started') { + original_event.event_completed_at = null + } + } + + return event_result + } + + _createPendingHandlerResults(bus: EventBus): Array<{ + handler: EventHandler + result: EventResult + }> { + const original_event = this._event_original ?? this + const scoped_event = bus._getEventProxyScopedToThisBus(original_event) + const handlers = bus._getHandlersForEvent(original_event) + return handlers.map((entry) => { + const handler_id = entry.id + const existing = original_event.event_results.get(handler_id) + const result = existing ?? new EventResult({ event: scoped_event, handler: entry }) + if (!existing) { + original_event.event_results.set(handler_id, result) + } else if (existing.event !== scoped_event) { + existing.event = scoped_event + } + return { handler: entry, result } + }) + } + + private _collectPendingResults( + original: BaseEvent, + pending_entries?: Array<{ + handler: EventHandler + result: EventResult + }> + ): EventResult[] { + if (pending_entries) { + return pending_entries.map((entry) => entry.result) + } + if (!this.bus?.id) { + return Array.from(original.event_results.values()) + } + return Array.from(original.event_results.values()).filter((result) => result.eventbus_id === this.bus!.id) + } + + private _isFirstModeWinningResult(entry: EventResult): boolean { + return entry.status === 'completed' && entry.result !== undefined && entry.result !== null && !(entry.result instanceof BaseEvent) + } + + private _markFirstModeWinnerIfNeeded(original: BaseEvent, entry: EventResult, first_state: { found: boolean }): void { + if (first_state.found || !this._isFirstModeWinningResult(entry)) { + return + } + first_state.found = true + original._markRemainingFirstModeResultCancelled(entry) + } + + private async _runHandlerWithLock(original: BaseEvent, entry: EventResult): Promise { + if (!this.bus) { + throw new Error('event has no bus attached') + } + await this.bus.locks._runWithHandlerLock(original, this.bus.event_handler_concurrency, async (handler_lock) => { + await entry.runHandler(handler_lock) + }) + } + + // Run all pending handler results for the current bus context. + async _runHandlers( + pending_entries?: Array<{ + handler: EventHandler + result: EventResult + }> + ): Promise { + const original = this._event_original ?? this + const pending_results = this._collectPendingResults(original, pending_entries) + if (pending_results.length === 0) { + return + } + const resolved_completion = original.event_handler_completion ?? this.bus?.event_handler_completion ?? 'all' + if (resolved_completion === 'first') { + if (original._getHandlerLock(this.bus?.event_handler_concurrency) !== null) { + for (const entry of pending_results) { + await this._runHandlerWithLock(original, entry) + if (!this._isFirstModeWinningResult(entry)) { + continue + } + original._markRemainingFirstModeResultCancelled(entry) + break + } + return + } + const first_state = { found: false } + const handler_promises = pending_results.map((entry) => this._runHandlerWithLock(original, entry)) + const monitored = pending_results.map((entry, index) => + handler_promises[index].then(() => { + this._markFirstModeWinnerIfNeeded(original, entry, first_state) + }) + ) + await Promise.all(monitored) + return + } else { + const handler_promises = pending_results.map((entry) => this._runHandlerWithLock(original, entry)) + await Promise.all(handler_promises) + } + } + + _getHandlerLock(default_concurrency?: EventHandlerConcurrencyMode): AsyncLock | null { + const original = this._event_original ?? this + const resolved = original.event_handler_concurrency ?? default_concurrency ?? original.bus?.event_handler_concurrency ?? 'serial' + if (resolved === 'parallel') { + return null + } + if (!original._lock_for_event_handler) { + original._lock_for_event_handler = new AsyncLock(1) + } + return original._lock_for_event_handler + } + + _setHandlerLock(lock: AsyncLock | null): void { + const original = this._event_original ?? this + original._lock_for_event_handler = lock + } + + _getDispatchContext(): unknown | null | undefined { + const original = this._event_original ?? this + return original._event_dispatch_context + } + + _setDispatchContext(dispatch_context: unknown | null | undefined): void { + const original = this._event_original ?? this + original._event_dispatch_context = dispatch_context + } + + // Get parent event object from event_parent_id (checks across all buses) + get event_parent(): BaseEvent | undefined { + const original = this._event_original ?? this + const parent_id = original.event_parent_id + if (!parent_id) { + return undefined + } + return original.bus?.findEventById(parent_id) ?? undefined + } + + // get all direct children of this event + get event_children(): BaseEvent[] { + const children: BaseEvent[] = [] + const seen = new Set() + for (const result of this.event_results.values()) { + for (const child of result.event_children) { + if (!seen.has(child.event_id)) { + seen.add(child.event_id) + children.push(child) + } + } + } + return children + } + + // get all children grandchildren etc. recursively + get event_descendants(): BaseEvent[] { + const descendants: BaseEvent[] = [] + const visited = new Set() + const root_id = this.event_id + const stack = [...this.event_children] + + while (stack.length > 0) { + const child = stack.pop() + if (!child) { + continue + } + const child_id = child.event_id + if (child_id === root_id) { + continue + } + if (visited.has(child_id)) { + continue + } + visited.add(child_id) + descendants.push(child) + if (child.event_children.length > 0) { + stack.push(...child.event_children) + } + } + + return descendants + } + + // force-abort processing of all pending descendants of an event regardless of whether they have already started + _cancelPendingChildProcessing(reason: unknown): void { + const original = this._event_original ?? this + const cancellation_cause = + reason instanceof EventHandlerTimeoutError + ? reason + : reason instanceof EventHandlerCancelledError || reason instanceof EventHandlerAbortedError + ? reason.cause instanceof Error + ? reason.cause + : reason + : reason instanceof Error + ? reason + : new Error(String(reason)) + const visited = new Set() + const cancelChildEvent = (child: BaseEvent): void => { + const original_child = child._event_original ?? child + if (visited.has(original_child.event_id)) { + return + } + visited.add(original_child.event_id) + + // Depth-first: cancel grandchildren before parent so + // _areAllChildrenComplete() returns true when we get back up. + for (const grandchild of original_child.event_children) { + cancelChildEvent(grandchild) + } + + original_child._markCancelled(cancellation_cause) + + // Force-complete the child event. In JS we can't stop running async + // handlers, but _markCompleted() resolves the done() promise so callers + // aren't blocked waiting for background work to finish. The background + // handler's eventual _markCompleted/_markError is a no-op (terminal guard). + if (original_child.event_status !== 'completed') { + original_child._markCompleted() + } + } + + for (const child of original.event_children) { + cancelChildEvent(child) + } + } + + // Cancel all handler results for an event except the winner, used by first() mode. + // Cancels pending handlers immediately, aborts started handlers via _signalAbort(), + // and cancels any child events emitted by the losing handlers. + _markRemainingFirstModeResultCancelled(winner: EventResult): void { + const cause = new Error('first() resolved: another handler returned a result first') + const bus_id = winner.eventbus_id + + for (const result of this.event_results.values()) { + if (result === winner) continue + if (result.eventbus_id !== bus_id) continue + + if (result.status === 'pending') { + result._markError( + new EventHandlerCancelledError(`Cancelled: first() resolved`, { + event_result: result, + cause, + }) + ) + } else if (result.status === 'started') { + // Cancel child events emitted by this handler before aborting it + for (const child of result.event_children) { + const original_child = child._event_original ?? child + original_child._cancelPendingChildProcessing(cause) + original_child._markCancelled(cause) + } + + // Abort the handler itself + result._lock?.exitHandlerRun() + const aborted_error = new EventHandlerAbortedError(`Aborted: first() resolved`, { + event_result: result, + cause, + }) + result._markError(aborted_error) + result._signalAbort(aborted_error) + } + } + } + + // force-abort processing of this event regardless of whether it is pending or has already started + _markCancelled(cause: Error): void { + const original = this._event_original ?? this + if (!this.bus) { + if (original.event_status !== 'completed') { + original._markCompleted() + } + return + } + const path = Array.isArray(original.event_path) ? original.event_path : [] + const buses_to_cancel = new Set(path) + for (const bus of this.bus.all_instances) { + if (!buses_to_cancel.has(bus.label)) { + continue + } + + const handler_entries = original._createPendingHandlerResults(bus) + let updated = false + for (const entry of handler_entries) { + if (entry.result.status === 'pending') { + const cancelled_error = new EventHandlerCancelledError(`Cancelled pending handler due to parent error: ${cause.message}`, { + event_result: entry.result, + cause, + }) + entry.result._markError(cancelled_error) + updated = true + } else if (entry.result.status === 'started') { + entry.result._lock?.exitHandlerRun() + const aborted_error = new EventHandlerAbortedError(`Aborted running handler due to parent error: ${cause.message}`, { + event_result: entry.result, + cause, + }) + entry.result._markError(aborted_error) + entry.result._signalAbort(aborted_error) + updated = true + } + } + + const removed = bus.removeEventFromPendingQueue(original) + + if (removed > 0 && !bus.isEventInFlightOrQueued(original.event_id)) { + original.event_pending_bus_count = Math.max(0, original.event_pending_bus_count - 1) + } + + if (updated || removed > 0) { + original._markCompleted(false) + } + } + + if (original.event_status !== 'completed') { + original._markCompleted() + } + } + + _notifyEventParentsOfCompletion(): void { + const original = this._event_original ?? this + if (!this.bus) { + return + } + const visited = new Set() + let parent_id = original.event_parent_id + while (parent_id && !visited.has(parent_id)) { + visited.add(parent_id) + const parent = this.bus.findEventById(parent_id) + if (!parent) { + break + } + parent._markCompleted(false, false) + if (parent.event_status !== 'completed') { + break + } + parent_id = parent.event_parent_id + } + } + + // awaitable that triggers immediate (queue-jump) processing of the event on all buses where it is queued + // use eventCompleted() to wait for normal queue-order completion without queue-jumping. + done(): Promise { + if (!this.bus) { + return Promise.reject(new Error('event has no bus attached')) + } + if (this.event_status === 'completed') { + return Promise.resolve(this) + } + // Always delegate to _processEventImmediately β€” it walks up the parent event tree + // to determine whether we're inside a handler (works cross-bus). If no + // ancestor handler is in-flight, it falls back to eventCompleted(). + return this.bus._processEventImmediately(this) + } + + // returns the first non-undefined handler result value, cancelling remaining handlers + // when any handler completes. Works with all event_handler_concurrency modes: + // parallel: races all handlers, returns first non-undefined, aborts the rest + // serial: runs handlers sequentially, returns first non-undefined, skips remaining + first(): Promise | undefined> { + if (!this.bus) { + return Promise.reject(new Error('event has no bus attached')) + } + const original = this._event_original ?? this + original.event_handler_completion = 'first' + return this.done().then((completed_event) => { + const orig = completed_event._event_original ?? completed_event + return Array.from(orig.event_results.values()) + .filter( + (result) => + result.status === 'completed' && result.result !== undefined && result.result !== null && !(result.result instanceof BaseEvent) + ) + .sort((a, b) => (a.completed_at ?? '').localeCompare(b.completed_at ?? '')) + .map((result) => result.result as EventResultType) + .at(0) + }) + } + + // returns handler result values in event_results insertion order. + // equivalent to await event.done(); Array.from(event.event_results.values()).map((entry) => entry.result) + eventResultsList( + include: EventResultsListInclude, + options?: EventResultsListOptions + ): Promise | undefined>> + eventResultsList(options?: EventResultsListOptions): Promise | undefined>> + async eventResultsList( + include_or_options?: EventResultsListInclude | EventResultsListOptions, + maybe_options?: EventResultsListOptions + ): Promise | undefined>> { + const default_include: EventResultsListInclude = (_result, event_result) => + event_result.status === 'completed' && + event_result.result !== undefined && + event_result.result !== null && + !(event_result.result instanceof Error) && + !(event_result.result instanceof BaseEvent) && + event_result.error === undefined + + let options: EventResultsListOptions + let include: EventResultsListInclude + if (typeof include_or_options === 'function') { + options = maybe_options ?? {} + include = include_or_options + } else { + options = include_or_options ?? {} + include = options.include ?? default_include + } + const raise_if_any = options.raise_if_any ?? true + const raise_if_none = options.raise_if_none ?? true + + const original = this._event_original ?? this + const resolved_timeout_seconds = options.timeout ?? original.event_timeout ?? this.bus?.event_timeout ?? null + let completed_event: this + + if (resolved_timeout_seconds === null) { + completed_event = await this.done() + } else { + completed_event = await _runWithTimeout( + resolved_timeout_seconds, + () => new Error(`Timed out waiting for ${original.event_type} results after ${resolved_timeout_seconds}s`), + () => this.done() + ) + } + + const all_results: EventResult[] = Array.from(completed_event.event_results.values()) + const error_results = all_results.filter((event_result) => event_result.error !== undefined || event_result.result instanceof Error) + + if (raise_if_any && error_results.length > 0) { + if (error_results.length === 1) { + const first_error = error_results[0] + if (first_error.error instanceof Error) { + throw first_error.error + } + if (first_error.result instanceof Error) { + throw first_error.result + } + throw new Error(String(first_error.error ?? first_error.result)) + } + + const errors = error_results.map((event_result) => { + if (event_result.error instanceof Error) { + return event_result.error + } + if (event_result.result instanceof Error) { + return event_result.result + } + return new Error(String(event_result.error ?? event_result.result)) + }) + throw new AggregateError( + errors, + `Event ${completed_event.event_type}#${completed_event.event_id.slice(-4)} had ${errors.length} handler error(s)` + ) + } + + const included_results = all_results.filter((event_result) => include(event_result.result, event_result)) + if (raise_if_none && included_results.length === 0) { + throw new Error( + `Expected at least one handler to return a non-null result, but none did: ${completed_event.event_type}#${completed_event.event_id.slice(-4)}` + ) + } + + return included_results.map((event_result) => event_result.result) + } + + // awaitable that waits for the event to be processed in normal queue order by the _runloop + eventCompleted(): Promise { + if (this.event_status === 'completed') { + return Promise.resolve(this) + } + this._notifyDoneListeners() + return this._event_completed_signal!.promise + } + + _markPending(): this { + const original = this._event_original ?? this + original.event_status = 'pending' + original.event_started_at = null + original.event_completed_at = null + original.event_results.clear() + original.event_pending_bus_count = 0 + original._setDispatchContext(undefined) + original._event_completed_signal = null + original._lock_for_event_handler = null + original.bus = undefined + return this + } + + eventReset(): this { + const original = this._event_original ?? this + const ctor = original.constructor as typeof BaseEvent + const fresh_event = ctor.fromJSON(original.toJSON()) as this + fresh_event.event_id = uuidv7() + return fresh_event._markPending() + } + + _markStarted(started_at: string | null = null, notify_hook: boolean = true): void { + const original = this._event_original ?? this + if (original.event_status !== 'pending') { + return + } + original.event_status = 'started' + original.event_started_at = started_at === null ? monotonicDatetime() : monotonicDatetime(started_at) + if (notify_hook && original.bus) { + const bus_for_hook = original.bus + const event_for_bus = bus_for_hook._getEventProxyScopedToThisBus(original) + void bus_for_hook.onEventChange(event_for_bus, 'started') + } + } + + _markCompleted(force: boolean = true, notify_parents: boolean = true): void { + const original = this._event_original ?? this + if (original.event_status === 'completed') { + return + } + if (!force) { + if (original.event_pending_bus_count > 0) { + return + } + if (!original._areAllChildrenComplete()) { + return + } + } + original.event_status = 'completed' + original.event_completed_at = monotonicDatetime() + if (original.bus) { + const bus_for_hook = original.bus + const event_for_bus = bus_for_hook._getEventProxyScopedToThisBus(original) + void bus_for_hook.onEventChange(event_for_bus, 'completed') + } + original._setDispatchContext(null) + original._notifyDoneListeners() + original._event_completed_signal!.resolve(original) + original._event_completed_signal = null + original.dropFromZeroHistoryBuses() + if (notify_parents && original.bus) { + original._notifyEventParentsOfCompletion() + } + } + + private dropFromZeroHistoryBuses(): void { + if (!this.bus) { + return + } + const original = this._event_original ?? this + for (const bus of this.bus.all_instances) { + if (bus.event_history.max_history_size !== 0) { + continue + } + bus.removeEventFromHistory(original.event_id) + } + } + + get event_errors(): unknown[] { + return ( + Array.from(this.event_results.values()) + // filter for events that have completed + have non-undefined error values + .filter((event_result) => event_result.error !== undefined && event_result.completed_at !== null) + // sort by completion time + .sort((event_result_a, event_result_b) => (event_result_a.completed_at ?? '').localeCompare(event_result_b.completed_at ?? '')) + // assemble array of flat error values + .map((event_result) => event_result.error) + ) + } + + // Returns the first non-undefined completed handler result, sorted by completion time. + // Useful after first() or done() to get the winning result value. + get event_result(): EventResultType | undefined { + return Array.from(this.event_results.values()) + .filter((event_result) => event_result.completed_at !== null && event_result.result !== undefined) + .sort((event_result_a, event_result_b) => (event_result_a.completed_at ?? '').localeCompare(event_result_b.completed_at ?? '')) + .map((event_result) => event_result.result as EventResultType) + .at(0) + } + + _areAllChildrenComplete(): boolean { + return this.event_descendants.every((descendant) => descendant.event_status === 'completed') + } + + private _notifyDoneListeners(): void { + if (this._event_completed_signal) { + return + } + this._event_completed_signal = withResolvers() + } + + // Break internal reference chains so a completed event can be GC'd when + // Evicted from event_history. Called by EventHistory.trimEventHistory(). + _gc(): void { + this._event_completed_signal = null + this._setDispatchContext(null) + this.bus = undefined + this._lock_for_event_handler = null + for (const result of this.event_results.values()) { + result.event_children = [] + } + this.event_results.clear() + } +} + +const hydrateEventResults = (event: TEvent, raw_event_results: unknown): Map> => { + const event_results = new Map>() + if (!Array.isArray(raw_event_results)) { + return event_results + } + for (const item of raw_event_results) { + const result = EventResult.fromJSON(event, item) + const map_key = typeof result.handler_id === 'string' && result.handler_id.length > 0 ? result.handler_id : result.id + event_results.set(map_key, result) + } + return event_results +} diff --git a/bubus-ts/src/bridge_jsonl.ts b/bubus-ts/src/bridge_jsonl.ts new file mode 100644 index 0000000..c320aa7 --- /dev/null +++ b/bubus-ts/src/bridge_jsonl.ts @@ -0,0 +1,174 @@ +import { BaseEvent } from './base_event.js' +import { EventBus } from './event_bus.js' +import type { EventClass, EventHandlerCallable, EventPattern, UntypedEventHandlerFunction } from './types.js' + +const isNodeRuntime = (): boolean => { + const maybe_process = (globalThis as { process?: { versions?: { node?: string } } }).process + return typeof maybe_process?.versions?.node === 'string' +} + +const importNodeModule = async (specifier: string): Promise => { + const dynamic_import = Function('module_name', 'return import(module_name)') as (module_name: string) => Promise + return dynamic_import(specifier) as Promise +} + +const randomSuffix = (): string => Math.random().toString(36).slice(2, 10) + +export class JSONLEventBridge { + readonly path: string + readonly poll_interval: number + readonly name: string + + private readonly inbound_bus: EventBus + private running: boolean + private byte_offset: number + private pending_line: string + private listener_task: Promise | null + + constructor(path: string, poll_interval: number = 0.25, name?: string) { + this.path = path + this.poll_interval = poll_interval + this.name = name ?? `JSONLEventBridge_${randomSuffix()}` + this.inbound_bus = new EventBus(this.name, { max_history_size: 0 }) + this.running = false + this.byte_offset = 0 + this.pending_line = '' + this.listener_task = null + + this.dispatch = this.dispatch.bind(this) + this.emit = this.emit.bind(this) + this.on = this.on.bind(this) + } + + on(event_pattern: EventClass, handler: EventHandlerCallable): void + on(event_pattern: string | '*', handler: UntypedEventHandlerFunction): void + on(event_pattern: EventPattern | '*', handler: EventHandlerCallable | UntypedEventHandlerFunction): void { + this.ensureStarted() + if (typeof event_pattern === 'string') { + this.inbound_bus.on(event_pattern, handler as UntypedEventHandlerFunction) + return + } + this.inbound_bus.on(event_pattern as EventClass, handler as EventHandlerCallable) + } + + async emit(event: T): Promise { + this.ensureStarted() + const fs = await this.loadFs() + await fs.promises.mkdir(this.dirname(this.path), { recursive: true }) + const payload = JSON.stringify(event.toJSON()) + '\n' + await fs.promises.appendFile(this.path, payload, 'utf8') + } + + async dispatch(event: T): Promise { + return this.emit(event) + } + + async start(): Promise { + if (this.running) return + const fs = await this.loadFs() + await fs.promises.mkdir(this.dirname(this.path), { recursive: true }) + await fs.promises.appendFile(this.path, '', 'utf8') + const stats = await fs.promises.stat(this.path) + this.byte_offset = Number(stats.size ?? 0) + this.pending_line = '' + this.running = true + this.listener_task = this.listenLoop() + } + + async close(): Promise { + this.running = false + await Promise.allSettled(this.listener_task ? [this.listener_task] : []) + this.listener_task = null + this.inbound_bus.destroy() + } + + private ensureStarted(): void { + if (this.running || this.listener_task) return + void this.start().catch((error: unknown) => { + console.error('[bubus] JSONLEventBridge failed to start', error) + }) + } + + private async listenLoop(): Promise { + while (this.running) { + try { + await this.pollNewLines() + } catch { + // Keep polling on transient errors. + } + await new Promise((resolve) => setTimeout(resolve, Math.max(1, this.poll_interval * 1000))) + } + } + + private async pollNewLines(): Promise { + const previous_offset = this.byte_offset + const { chunk, next_offset } = await this.readAppended(previous_offset) + this.byte_offset = next_offset + if (next_offset < previous_offset) { + this.pending_line = '' + } + if (!chunk) return + + const new_lines = (this.pending_line + chunk).split('\n') + this.pending_line = new_lines.pop() ?? '' + + for (const line of new_lines) { + const trimmed = line.trim() + if (!trimmed) continue + try { + const payload = JSON.parse(trimmed) + await this.dispatchInboundPayload(payload) + } catch { + // Ignore malformed line. + } + } + } + + private async dispatchInboundPayload(payload: unknown): Promise { + const event = BaseEvent.fromJSON(payload).eventReset() + this.inbound_bus.emit(event) + } + + private async readAppended(offset: number): Promise<{ chunk: string; next_offset: number }> { + const fs = await this.loadFs() + let size = 0 + try { + const stats = await fs.promises.stat(this.path) + size = Number(stats.size ?? 0) + } catch (error: unknown) { + const code = (error as { code?: string }).code + if (code === 'ENOENT') { + return { chunk: '', next_offset: 0 } + } + throw error + } + + const start_offset = size < offset ? 0 : offset + if (size === start_offset) { + return { chunk: '', next_offset: size } + } + + const handle = await fs.promises.open(this.path, 'r') + try { + const byte_count = size - start_offset + const bytes = new Uint8Array(byte_count) + const { bytesRead } = await handle.read(bytes, 0, byte_count, start_offset) + const chunk = new TextDecoder().decode(bytes.subarray(0, Number(bytesRead ?? 0))) + return { chunk, next_offset: start_offset + Number(bytesRead ?? 0) } + } finally { + await handle.close() + } + } + + private dirname(path: string): string { + const idx = path.lastIndexOf('/') + return idx >= 0 ? path.slice(0, idx) || '.' : '.' + } + + private async loadFs(): Promise { + if (!isNodeRuntime()) { + throw new Error('JSONLEventBridge is only supported in Node.js runtimes') + } + return importNodeModule('node:fs') + } +} diff --git a/bubus-ts/src/bridge_nats.ts b/bubus-ts/src/bridge_nats.ts new file mode 100644 index 0000000..ffc5faa --- /dev/null +++ b/bubus-ts/src/bridge_nats.ts @@ -0,0 +1,104 @@ +import { BaseEvent } from './base_event.js' +import { EventBus } from './event_bus.js' +import { assertOptionalDependencyAvailable, importOptionalDependency, isNodeRuntime } from './optional_deps.js' +import type { EventClass, EventHandlerCallable, EventPattern, UntypedEventHandlerFunction } from './types.js' + +const randomSuffix = (): string => Math.random().toString(36).slice(2, 10) + +export class NATSEventBridge { + readonly server: string + readonly subject: string + readonly name: string + + private readonly inbound_bus: EventBus + private running: boolean + private nc: any | null + private sub_task: Promise | null + + constructor(server: string, subject: string, name?: string) { + assertOptionalDependencyAvailable('NATSEventBridge', 'nats') + + this.server = server + this.subject = subject + this.name = name ?? `NATSEventBridge_${randomSuffix()}` + this.inbound_bus = new EventBus(this.name, { max_history_size: 0 }) + this.running = false + this.nc = null + this.sub_task = null + + this.dispatch = this.dispatch.bind(this) + this.emit = this.emit.bind(this) + this.on = this.on.bind(this) + } + + on(event_pattern: EventClass, handler: EventHandlerCallable): void + on(event_pattern: string | '*', handler: UntypedEventHandlerFunction): void + on(event_pattern: EventPattern | '*', handler: EventHandlerCallable | UntypedEventHandlerFunction): void { + this.ensureStarted() + if (typeof event_pattern === 'string') { + this.inbound_bus.on(event_pattern, handler as UntypedEventHandlerFunction) + return + } + this.inbound_bus.on(event_pattern as EventClass, handler as EventHandlerCallable) + } + + async emit(event: T): Promise { + this.ensureStarted() + if (!this.nc) await this.start() + + const payload = JSON.stringify(event.toJSON()) + this.nc.publish(this.subject, new TextEncoder().encode(payload)) + } + + async dispatch(event: T): Promise { + return this.emit(event) + } + + async start(): Promise { + if (this.running) return + if (!isNodeRuntime()) { + throw new Error('NATSEventBridge is only supported in Node.js runtimes') + } + + const mod = await importOptionalDependency('NATSEventBridge', 'nats') + const connect = mod.connect + this.nc = await connect({ servers: this.server }) + const sub = this.nc.subscribe(this.subject) + + this.running = true + this.sub_task = (async () => { + for await (const msg of sub) { + try { + const payload = JSON.parse(new TextDecoder().decode(msg.data)) + await this.dispatchInboundPayload(payload) + } catch { + // Ignore malformed payloads. + } + } + })() + } + + async close(): Promise { + this.running = false + if (this.nc) { + await this.nc.drain() + await this.nc.close() + this.nc = null + } + await Promise.allSettled(this.sub_task ? [this.sub_task] : []) + this.sub_task = null + this.inbound_bus.destroy() + } + + private ensureStarted(): void { + if (this.running) return + void this.start().catch((error: unknown) => { + console.error('[bubus] NATSEventBridge failed to start', error) + }) + } + + private async dispatchInboundPayload(payload: unknown): Promise { + const event = BaseEvent.fromJSON(payload).eventReset() + this.inbound_bus.emit(event) + } +} diff --git a/bubus-ts/src/bridge_postgres.ts b/bubus-ts/src/bridge_postgres.ts new file mode 100644 index 0000000..d333d8c --- /dev/null +++ b/bubus-ts/src/bridge_postgres.ts @@ -0,0 +1,277 @@ +/** + * PostgreSQL LISTEN/NOTIFY + flat-table bridge for forwarding events. + */ +import { BaseEvent } from './base_event.js' +import { EventBus } from './event_bus.js' +import { assertOptionalDependencyAvailable, importOptionalDependency, isNodeRuntime } from './optional_deps.js' +import type { EventClass, EventHandlerCallable, EventPattern, UntypedEventHandlerFunction } from './types.js' + +const randomSuffix = (): string => Math.random().toString(36).slice(2, 10) +const IDENTIFIER_RE = /^[A-Za-z_][A-Za-z0-9_]*$/ +const DEFAULT_POSTGRES_TABLE = 'bubus_events' +const DEFAULT_POSTGRES_CHANNEL = 'bubus_events' +const EVENT_PAYLOAD_COLUMN = 'event_payload' + +const validateIdentifier = (value: string, label: string): string => { + if (!IDENTIFIER_RE.test(value)) { + throw new Error(`Invalid ${label}: ${JSON.stringify(value)}. Use only [A-Za-z0-9_] and start with a letter/_`) + } + return value +} + +const indexName = (table: string, suffix: string): string => validateIdentifier(`${table}_${suffix}`.slice(0, 63), 'index name') + +const parseTableUrl = (table_url: string): { dsn: string; table: string } => { + let parsed: URL + try { + parsed = new URL(table_url) + } catch { + throw new Error( + 'PostgresEventBridge URL must include at least database in path, e.g. postgresql://user:pass@host:5432/dbname[/tablename]' + ) + } + + const segments = parsed.pathname.split('/').filter(Boolean) + if (segments.length < 1) { + throw new Error( + 'PostgresEventBridge URL must include at least database in path, e.g. postgresql://user:pass@host:5432/dbname[/tablename]' + ) + } + + const db_name = segments[0] + const table = segments.length >= 2 ? validateIdentifier(segments[1], 'table name') : DEFAULT_POSTGRES_TABLE + const dsn_url = new URL(parsed.toString()) + dsn_url.pathname = `/${db_name}` + return { dsn: dsn_url.toString(), table } +} + +const splitBridgePayload = ( + payload: Record +): { event_fields: Record; event_payload: Record } => { + const event_fields: Record = {} + const event_payload: Record = { ...payload } + for (const [key, value] of Object.entries(payload)) { + if (key.startsWith('event_')) { + event_fields[key] = value + } + } + return { event_fields, event_payload } +} + +export class PostgresEventBridge { + readonly table_url: string + readonly dsn: string + readonly table: string + readonly channel: string + readonly name: string + + private readonly inbound_bus: EventBus + private running: boolean + private client: any | null + private table_columns: Set + private notification_handler: ((msg: { channel: string; payload?: string }) => void) | null + + constructor(table_url: string, channel?: string, name?: string) { + assertOptionalDependencyAvailable('PostgresEventBridge', 'pg') + + const parsed = parseTableUrl(table_url) + this.table_url = table_url + this.dsn = parsed.dsn + this.table = parsed.table + + const derived_channel = channel ?? DEFAULT_POSTGRES_CHANNEL + this.channel = validateIdentifier(derived_channel.slice(0, 63), 'channel name') + this.name = name ?? `PostgresEventBridge_${randomSuffix()}` + + this.inbound_bus = new EventBus(this.name, { max_history_size: 0 }) + this.running = false + this.client = null + this.table_columns = new Set(['event_id', 'event_created_at', 'event_type', EVENT_PAYLOAD_COLUMN]) + this.notification_handler = null + + this.dispatch = this.dispatch.bind(this) + this.emit = this.emit.bind(this) + this.on = this.on.bind(this) + } + + on(event_pattern: EventClass, handler: EventHandlerCallable): void + on(event_pattern: string | '*', handler: UntypedEventHandlerFunction): void + on(event_pattern: EventPattern | '*', handler: EventHandlerCallable | UntypedEventHandlerFunction): void { + this.ensureStarted() + if (typeof event_pattern === 'string') { + this.inbound_bus.on(event_pattern, handler as UntypedEventHandlerFunction) + return + } + this.inbound_bus.on(event_pattern as EventClass, handler as EventHandlerCallable) + } + + async emit(event: T): Promise { + this.ensureStarted() + if (!this.client) await this.start() + + const payload = event.toJSON() as Record + const { event_fields, event_payload } = splitBridgePayload(payload) + const write_payload: Record = { ...event_fields, [EVENT_PAYLOAD_COLUMN]: event_payload } + const keys = Object.keys(write_payload).sort() + await this.ensureColumns(keys) + + const columns_sql = keys.map((key) => `"${key}"`).join(', ') + const placeholders_sql = keys.map((_, index) => `$${index + 1}`).join(', ') + const values = keys.map((key) => + write_payload[key] === null || write_payload[key] === undefined ? null : JSON.stringify(write_payload[key]) + ) + + const update_fields = keys.filter((key) => key !== 'event_id') + let upsert_sql = `INSERT INTO "${this.table}" (${columns_sql}) VALUES (${placeholders_sql})` + if (update_fields.length > 0) { + const updates_sql = update_fields.map((key) => `"${key}" = EXCLUDED."${key}"`).join(', ') + upsert_sql += ` ON CONFLICT ("event_id") DO UPDATE SET ${updates_sql}` + } else { + upsert_sql += ' ON CONFLICT ("event_id") DO NOTHING' + } + + await this.client.query(upsert_sql, values) + await this.client.query('SELECT pg_notify($1, $2)', [this.channel, JSON.stringify(String(event.event_id))]) + } + + async dispatch(event: T): Promise { + return this.emit(event) + } + + async start(): Promise { + if (this.running) return + if (!isNodeRuntime()) { + throw new Error('PostgresEventBridge is only supported in Node.js runtimes') + } + + const mod = await importOptionalDependency('PostgresEventBridge', 'pg') + const Client = mod.Client ?? mod.default?.Client + this.client = new Client({ connectionString: this.dsn }) + this.client.on('error', () => {}) + await this.client.connect() + + await this.ensureTableExists() + await this.refreshColumnCache() + await this.ensureColumns(['event_id', 'event_created_at', 'event_type', EVENT_PAYLOAD_COLUMN]) + await this.ensureBaseIndexes() + + this.notification_handler = (msg: { channel: string; payload?: string }) => { + if (msg.channel !== this.channel || !msg.payload) return + void this.dispatchByEventId(msg.payload).catch(() => { + // Ignore transient shutdown races while closing connections. + }) + } + + this.client.on('notification', this.notification_handler) + await this.client.query(`LISTEN ${this.channel}`) + this.running = true + } + + async close(): Promise { + this.running = false + if (this.client) { + try { + await this.client.query(`UNLISTEN ${this.channel}`) + } catch { + // ignore + } + if (this.notification_handler) { + this.client.off('notification', this.notification_handler) + this.notification_handler = null + } + await this.client.end() + this.client = null + } + this.inbound_bus.destroy() + } + + private ensureStarted(): void { + if (this.running) return + void this.start().catch((error: unknown) => { + console.error('[bubus] PostgresEventBridge failed to start', error) + }) + } + + private async dispatchByEventId(event_id: string): Promise { + if (!this.running || !this.client) return + const result = await this.client.query(`SELECT * FROM "${this.table}" WHERE "event_id" = $1`, [event_id]) + const row = result.rows?.[0] as Record | undefined + if (!row) return + + const payload: Record = {} + const raw_event_payload = row[EVENT_PAYLOAD_COLUMN] + if (typeof raw_event_payload === 'string') { + try { + const decoded_event_payload = JSON.parse(raw_event_payload) + if (decoded_event_payload && typeof decoded_event_payload === 'object' && !Array.isArray(decoded_event_payload)) { + Object.assign(payload, decoded_event_payload as Record) + } + } catch { + // ignore malformed payload column + } + } + + for (const [key, raw_value] of Object.entries(row)) { + if (key === EVENT_PAYLOAD_COLUMN || !key.startsWith('event_')) continue + if (raw_value === null || raw_value === undefined) continue + if (typeof raw_value !== 'string') { + payload[key] = raw_value + continue + } + try { + payload[key] = JSON.parse(raw_value) + } catch { + payload[key] = raw_value + } + } + + await this.dispatchInboundPayload(payload) + } + + private async dispatchInboundPayload(payload: unknown): Promise { + const event = BaseEvent.fromJSON(payload).eventReset() + this.inbound_bus.emit(event) + } + + private async ensureTableExists(): Promise { + if (!this.client) return + await this.client.query( + `CREATE TABLE IF NOT EXISTS "${this.table}" ("event_id" TEXT PRIMARY KEY, "event_created_at" TEXT, "event_type" TEXT, "event_payload" TEXT)` + ) + } + + private async ensureBaseIndexes(): Promise { + if (!this.client) return + + const event_created_at_idx = indexName(this.table, 'event_created_at_idx') + const event_type_idx = indexName(this.table, 'event_type_idx') + + await this.client.query(`CREATE INDEX IF NOT EXISTS "${event_created_at_idx}" ON "${this.table}" ("event_created_at")`) + await this.client.query(`CREATE INDEX IF NOT EXISTS "${event_type_idx}" ON "${this.table}" ("event_type")`) + } + + private async refreshColumnCache(): Promise { + if (!this.client) return + const result = await this.client.query( + `SELECT column_name FROM information_schema.columns WHERE table_schema = 'public' AND table_name = $1`, + [this.table] + ) + this.table_columns = new Set((result.rows as Array<{ column_name: string }>).map((row) => row.column_name)) + } + + private async ensureColumns(keys: string[]): Promise { + if (!this.client) return + for (const key of keys) { + validateIdentifier(key, 'event field name') + if (key !== EVENT_PAYLOAD_COLUMN && !key.startsWith('event_')) { + throw new Error(`Invalid event field name for bridge column: ${JSON.stringify(key)}. Only event_* fields become columns`) + } + } + + const missing = keys.filter((key) => !this.table_columns.has(key)) + for (const key of missing) { + await this.client.query(`ALTER TABLE "${this.table}" ADD COLUMN IF NOT EXISTS "${key}" TEXT`) + this.table_columns.add(key) + } + } +} diff --git a/bubus-ts/src/bridge_redis.ts b/bubus-ts/src/bridge_redis.ts new file mode 100644 index 0000000..a3c02f0 --- /dev/null +++ b/bubus-ts/src/bridge_redis.ts @@ -0,0 +1,194 @@ +/** + * Redis pub/sub bridge for forwarding events between runtimes. + * + * Usage: + * // channel from URL path + * const bridge = new RedisEventBridge('redis://user:pass@localhost:6379/1/my_channel') + * + * // explicit channel override + * const bridge2 = new RedisEventBridge('redis://user:pass@localhost:6379/1', 'my_channel') + * + * URL format: + * redis://user:pass@host:6379// + */ +import { BaseEvent } from './base_event.js' +import { EventBus } from './event_bus.js' +import { assertOptionalDependencyAvailable, importOptionalDependency, isNodeRuntime } from './optional_deps.js' +import type { EventClass, EventHandlerCallable, EventPattern, UntypedEventHandlerFunction } from './types.js' + +const randomSuffix = (): string => Math.random().toString(36).slice(2, 10) +const DEFAULT_REDIS_CHANNEL = 'bubus_events' +const DB_INIT_KEY = '__bubus:bridge_init__' + +const parseRedisUrl = (redis_url: string, channel?: string): { url: string; channel: string } => { + let parsed: URL + try { + parsed = new URL(redis_url) + } catch { + throw new Error(`RedisEventBridge URL must be a valid redis:// or rediss:// URL, got: ${redis_url}`) + } + + const protocol = parsed.protocol.replace(/:$/, '').toLowerCase() + if (protocol !== 'redis' && protocol !== 'rediss') { + throw new Error(`RedisEventBridge URL must use redis:// or rediss://, got: ${redis_url}`) + } + + const segments = parsed.pathname.split('/').filter(Boolean) + if (segments.length > 2) { + throw new Error(`RedisEventBridge URL path must be / or //, got: ${parsed.pathname || '/'}`) + } + + let db_index = '0' + let channel_from_url: string | undefined + + if (segments.length > 0) { + db_index = segments[0] + if (!/^\d+$/.test(db_index)) { + throw new Error(`RedisEventBridge URL db path segment must be numeric, got: ${JSON.stringify(db_index)} in ${redis_url}`) + } + if (segments.length === 2) { + channel_from_url = segments[1] + } + } + + const resolved_channel = channel ?? channel_from_url ?? DEFAULT_REDIS_CHANNEL + if (!resolved_channel) { + throw new Error('RedisEventBridge channel must not be empty') + } + + const normalized = new URL(parsed.toString()) + normalized.pathname = `/${db_index}` + return { url: normalized.toString(), channel: resolved_channel } +} + +export class RedisEventBridge { + readonly url: string + readonly channel: string + readonly name: string + + private readonly inbound_bus: EventBus + private running: boolean + private start_promise: Promise | null + private redis_pub: any | null + private redis_sub: any | null + + constructor(redis_url: string, channel?: string, name?: string) { + assertOptionalDependencyAvailable('RedisEventBridge', 'ioredis') + + const parsed = parseRedisUrl(redis_url, channel) + this.url = parsed.url + this.channel = parsed.channel + this.name = name ?? `RedisEventBridge_${randomSuffix()}` + this.inbound_bus = new EventBus(this.name, { max_history_size: 0 }) + this.running = false + this.start_promise = null + this.redis_pub = null + this.redis_sub = null + + this.dispatch = this.dispatch.bind(this) + this.emit = this.emit.bind(this) + this.on = this.on.bind(this) + } + + on(event_pattern: EventClass, handler: EventHandlerCallable): void + on(event_pattern: string | '*', handler: UntypedEventHandlerFunction): void + on(event_pattern: EventPattern | '*', handler: EventHandlerCallable | UntypedEventHandlerFunction): void { + this.ensureStarted() + if (typeof event_pattern === 'string') { + this.inbound_bus.on(event_pattern, handler as UntypedEventHandlerFunction) + return + } + this.inbound_bus.on(event_pattern as EventClass, handler as EventHandlerCallable) + } + + async emit(event: T): Promise { + this.ensureStarted() + if (!this.redis_pub) await this.start() + const payload = JSON.stringify(event.toJSON()) + await this.redis_pub.publish(this.channel, payload) + } + + async dispatch(event: T): Promise { + return this.emit(event) + } + + async start(): Promise { + if (this.running) return + if (this.start_promise) { + await this.start_promise + return + } + + // `on(...)` auto-start and explicit `await start()` can happen back-to-back; use one in-flight + // startup promise so we do not leak extra Redis clients. + this.start_promise = (async () => { + if (!isNodeRuntime()) { + throw new Error('RedisEventBridge is only supported in Node.js runtimes') + } + + const mod = await importOptionalDependency('RedisEventBridge', 'ioredis') + const Redis = mod.default ?? mod.Redis ?? mod + const redis_pub = new Redis(this.url) + const redis_sub = new Redis(this.url) + + redis_pub.on('error', () => {}) + redis_sub.on('error', () => {}) + + // Redis logical DBs are created lazily; writing a short-lived key initializes/validates the selected DB. + await redis_pub.set(DB_INIT_KEY, '1', 'EX', 60, 'NX') + redis_sub.on('message', (channel_name: string, message: string) => { + if (channel_name !== this.channel) return + try { + const payload = JSON.parse(message) + void this.dispatchInboundPayload(payload) + } catch { + // Ignore malformed payloads. + } + }) + await redis_sub.subscribe(this.channel) + this.redis_pub = redis_pub + this.redis_sub = redis_sub + this.running = true + })() + + try { + await this.start_promise + } finally { + this.start_promise = null + } + } + + async close(): Promise { + if (this.start_promise) { + await this.start_promise.catch(() => {}) + } + this.running = false + if (this.redis_sub) { + try { + await this.redis_sub.unsubscribe(this.channel) + } catch { + // ignore + } + await this.redis_sub.quit() + this.redis_sub = null + } + if (this.redis_pub) { + await this.redis_pub.quit() + this.redis_pub = null + } + this.inbound_bus.destroy() + } + + private ensureStarted(): void { + if (this.running) return + if (this.start_promise) return + void this.start().catch((error: unknown) => { + console.error('[bubus] RedisEventBridge failed to start', error) + }) + } + + private async dispatchInboundPayload(payload: unknown): Promise { + const event = BaseEvent.fromJSON(payload).eventReset() + this.inbound_bus.emit(event) + } +} diff --git a/bubus-ts/src/bridge_sqlite.ts b/bubus-ts/src/bridge_sqlite.ts new file mode 100644 index 0000000..e50c104 --- /dev/null +++ b/bubus-ts/src/bridge_sqlite.ts @@ -0,0 +1,289 @@ +import { BaseEvent } from './base_event.js' +import { EventBus } from './event_bus.js' +import { isNodeRuntime } from './optional_deps.js' +import type { EventClass, EventHandlerCallable, EventPattern, UntypedEventHandlerFunction } from './types.js' + +const randomSuffix = (): string => Math.random().toString(36).slice(2, 10) +const IDENTIFIER_RE = /^[A-Za-z_][A-Za-z0-9_]*$/ +const EVENT_PAYLOAD_COLUMN = 'event_payload' + +const validateIdentifier = (value: string, label: string): string => { + if (!IDENTIFIER_RE.test(value)) { + throw new Error(`Invalid ${label}: ${JSON.stringify(value)}. Use only [A-Za-z0-9_] and start with a letter/_`) + } + return value +} + +const loadNodeSqlite = async (): Promise => { + const dynamic_import = Function('module_name', 'return import(module_name)') as (module_name: string) => Promise + try { + return (await dynamic_import('node:sqlite')) as any + } catch { + throw new Error('SQLiteEventBridge requires Node.js with built-in "node:sqlite" support (Node 22+).') + } +} + +const splitBridgePayload = ( + payload: Record +): { event_fields: Record; event_payload: Record } => { + const event_fields: Record = {} + const event_payload: Record = { ...payload } + for (const [key, value] of Object.entries(payload)) { + if (key.startsWith('event_')) { + event_fields[key] = value + } + } + return { event_fields, event_payload } +} + +export class SQLiteEventBridge { + readonly path: string + readonly table: string + readonly poll_interval: number + readonly name: string + + private readonly inbound_bus: EventBus + private running: boolean + private last_seen_event_created_at: string + private last_seen_event_id: string + private listener_task: Promise | null + private start_task: Promise | null + private db: any | null + private table_columns: Set + + constructor(path: string, table: string = 'bubus_events', poll_interval: number = 0.25, name?: string) { + this.path = path + this.table = validateIdentifier(table, 'table name') + this.poll_interval = poll_interval + this.name = name ?? `SQLiteEventBridge_${randomSuffix()}` + this.inbound_bus = new EventBus(this.name, { max_history_size: 0 }) + this.running = false + this.last_seen_event_created_at = '' + this.last_seen_event_id = '' + this.listener_task = null + this.start_task = null + this.db = null + this.table_columns = new Set(['event_id', 'event_created_at', 'event_type', EVENT_PAYLOAD_COLUMN]) + + this.dispatch = this.dispatch.bind(this) + this.emit = this.emit.bind(this) + this.on = this.on.bind(this) + } + + on(event_pattern: EventClass, handler: EventHandlerCallable): void + on(event_pattern: string | '*', handler: UntypedEventHandlerFunction): void + on(event_pattern: EventPattern | '*', handler: EventHandlerCallable | UntypedEventHandlerFunction): void { + this.ensureStarted() + if (typeof event_pattern === 'string') { + this.inbound_bus.on(event_pattern, handler as UntypedEventHandlerFunction) + return + } + this.inbound_bus.on(event_pattern as EventClass, handler as EventHandlerCallable) + } + + async emit(event: T): Promise { + this.ensureStarted() + if (!this.running) { + await this.start() + } + if (!this.db) { + throw new Error('SQLiteEventBridge database not initialized') + } + + const payload = event.toJSON() as Record + const { event_fields, event_payload } = splitBridgePayload(payload) + const write_payload: Record = { ...event_fields, [EVENT_PAYLOAD_COLUMN]: event_payload } + const payload_keys = Object.keys(write_payload).sort() + this.ensureColumns(payload_keys) + + const columns_sql = payload_keys.map((key) => `"${key}"`).join(', ') + const placeholders_sql = payload_keys.map((key) => (key === EVENT_PAYLOAD_COLUMN ? 'json(?)' : '?')).join(', ') + const values = payload_keys.map((key) => + write_payload[key] === null || write_payload[key] === undefined ? null : JSON.stringify(write_payload[key]) + ) + + const update_fields = payload_keys.filter((key) => key !== 'event_id') + let upsert_sql = `INSERT INTO "${this.table}" (${columns_sql}) VALUES (${placeholders_sql})` + if (update_fields.length > 0) { + const updates_sql = update_fields.map((key) => `"${key}" = excluded."${key}"`).join(', ') + upsert_sql += ` ON CONFLICT("event_id") DO UPDATE SET ${updates_sql}` + } else { + upsert_sql += ' ON CONFLICT("event_id") DO NOTHING' + } + + this.db.prepare(upsert_sql).run(...values) + } + + async dispatch(event: T): Promise { + return this.emit(event) + } + + async start(): Promise { + if (this.running) return + if (this.start_task) { + await this.start_task + return + } + + this.start_task = (async (): Promise => { + if (!isNodeRuntime()) { + throw new Error('SQLiteEventBridge is only supported in Node.js runtimes') + } + + const mod = await loadNodeSqlite() + const Database = mod.DatabaseSync ?? mod.default?.DatabaseSync + if (typeof Database !== 'function') { + throw new Error('SQLiteEventBridge could not load DatabaseSync from node:sqlite. Please use Node.js 22+.') + } + this.db = new Database(this.path) + this.db.exec('PRAGMA journal_mode = WAL') + this.db + .prepare( + `CREATE TABLE IF NOT EXISTS "${this.table}" ("event_id" TEXT PRIMARY KEY, "event_created_at" TEXT, "event_type" TEXT, "event_payload" JSON)` + ) + .run() + + this.refreshColumnCache() + this.ensureColumns(['event_id', 'event_created_at', 'event_type', EVENT_PAYLOAD_COLUMN]) + this.ensureBaseIndexes() + this.setCursorToLatestRow() + + this.running = true + this.listener_task = this.listenLoop() + })() + + try { + await this.start_task + } finally { + this.start_task = null + } + } + + async close(): Promise { + await Promise.allSettled(this.start_task ? [this.start_task] : []) + this.running = false + await Promise.allSettled(this.listener_task ? [this.listener_task] : []) + this.listener_task = null + + if (this.db) { + this.db.close() + this.db = null + } + + this.inbound_bus.destroy() + } + + private ensureStarted(): void { + if (this.running || this.listener_task || this.start_task) return + void this.start().catch((error: unknown) => { + console.error('[bubus] SQLiteEventBridge failed to start', error) + }) + } + + private async listenLoop(): Promise { + while (this.running) { + try { + if (this.db) { + const rows = this.db + .prepare( + `SELECT * FROM "${this.table}" WHERE COALESCE("event_created_at", '') > ? OR (COALESCE("event_created_at", '') = ? AND COALESCE("event_id", '') > ?) ORDER BY COALESCE("event_created_at", '') ASC, COALESCE("event_id", '') ASC` + ) + .all(this.last_seen_event_created_at, this.last_seen_event_created_at, this.last_seen_event_id) as Array< + Record + > + + for (const row of rows) { + this.last_seen_event_created_at = String(row.event_created_at ?? '') + this.last_seen_event_id = String(row.event_id ?? '') + + const raw_payload_blob = row[EVENT_PAYLOAD_COLUMN] + const payload: Record = {} + if (typeof raw_payload_blob === 'string') { + try { + const decoded_event_payload = JSON.parse(raw_payload_blob) + if (decoded_event_payload && typeof decoded_event_payload === 'object' && !Array.isArray(decoded_event_payload)) { + Object.assign(payload, decoded_event_payload as Record) + } + } catch { + // ignore malformed payload column + } + } + + for (const [key, raw_value] of Object.entries(row)) { + if (key === EVENT_PAYLOAD_COLUMN || !key.startsWith('event_')) continue + if (raw_value === null || raw_value === undefined) continue + + if (typeof raw_value !== 'string') { + payload[key] = raw_value + continue + } + + try { + payload[key] = JSON.parse(raw_value) + } catch { + payload[key] = raw_value + } + } + + await this.dispatchInboundPayload(payload) + } + } + } catch { + // Keep polling on transient errors. + } + await new Promise((resolve) => setTimeout(resolve, Math.max(1, this.poll_interval * 1000))) + } + } + + private async dispatchInboundPayload(payload: unknown): Promise { + const event = BaseEvent.fromJSON(payload).eventReset() + this.inbound_bus.emit(event) + } + + private refreshColumnCache(): void { + if (!this.db) return + const rows = this.db.prepare(`PRAGMA table_info("${this.table}")`).all() as Array<{ name: string }> + this.table_columns = new Set(rows.map((row) => String(row.name))) + } + + private ensureColumns(keys: string[]): void { + if (!this.db) return + + for (const key of keys) { + validateIdentifier(key, 'event field name') + if (key !== EVENT_PAYLOAD_COLUMN && !key.startsWith('event_')) { + throw new Error(`Invalid event field name for bridge column: ${JSON.stringify(key)}. Only event_* fields become columns`) + } + } + + const missing_columns = keys.filter((key) => !this.table_columns.has(key)) + for (const key of missing_columns) { + const column_type = key === EVENT_PAYLOAD_COLUMN ? 'JSON' : 'TEXT' + this.db.prepare(`ALTER TABLE "${this.table}" ADD COLUMN "${key}" ${column_type}`).run() + this.table_columns.add(key) + } + } + + private ensureBaseIndexes(): void { + if (!this.db) return + + const event_created_at_index = `${this.table}_event_created_at_idx` + const event_type_index = `${this.table}_event_type_idx` + + this.db.prepare(`CREATE INDEX IF NOT EXISTS "${event_created_at_index}" ON "${this.table}" ("event_created_at")`).run() + this.db.prepare(`CREATE INDEX IF NOT EXISTS "${event_type_index}" ON "${this.table}" ("event_type")`).run() + } + + private setCursorToLatestRow(): void { + if (!this.db) return + + const row = this.db + .prepare( + `SELECT COALESCE("event_created_at", '') AS event_created_at, COALESCE("event_id", '') AS event_id FROM "${this.table}" ORDER BY COALESCE("event_created_at", '') DESC, COALESCE("event_id", '') DESC LIMIT 1` + ) + .get() as { event_created_at?: string; event_id?: string } | undefined + + this.last_seen_event_created_at = String(row?.event_created_at ?? '') + this.last_seen_event_id = String(row?.event_id ?? '') + } +} diff --git a/bubus-ts/src/bridges.ts b/bubus-ts/src/bridges.ts new file mode 100644 index 0000000..3bfbc4a --- /dev/null +++ b/bubus-ts/src/bridges.ts @@ -0,0 +1,376 @@ +import { BaseEvent } from './base_event.js' +import { EventBus } from './event_bus.js' +import type { EventClass, EventHandlerCallable, EventPattern, UntypedEventHandlerFunction } from './types.js' + +type EndpointScheme = 'unix' | 'http' | 'https' + +type ParsedEndpoint = { + raw: string + scheme: EndpointScheme + host?: string + port?: number + path?: string +} + +export type HTTPEventBridgeOptions = { + send_to?: string | null + listen_on?: string | null + name?: string +} + +const isNodeRuntime = (): boolean => { + const maybe_process = (globalThis as { process?: { versions?: { node?: string } } }).process + return typeof maybe_process?.versions?.node === 'string' +} + +const isBrowserRuntime = (): boolean => !isNodeRuntime() && typeof globalThis.window !== 'undefined' + +const randomSuffix = (): string => Math.random().toString(36).slice(2, 10) +const UNIX_SOCKET_MAX_PATH_CHARS = 90 + +const parseEndpoint = (raw_endpoint: string): ParsedEndpoint => { + let parsed: URL + try { + parsed = new URL(raw_endpoint) + } catch { + throw new Error(`Invalid endpoint URL: ${raw_endpoint}`) + } + + const protocol = parsed.protocol.replace(/:$/, '').toLowerCase() + if (protocol !== 'unix' && protocol !== 'http' && protocol !== 'https') { + throw new Error(`Unsupported endpoint scheme: ${raw_endpoint}`) + } + + if (protocol === 'unix') { + const socket_path = decodeURIComponent(parsed.pathname || '') + if (!socket_path) { + throw new Error(`Invalid unix endpoint (missing socket path): ${raw_endpoint}`) + } + const socket_path_len = new TextEncoder().encode(socket_path).length + if (socket_path_len > UNIX_SOCKET_MAX_PATH_CHARS) { + throw new Error(`Unix socket path is too long (${socket_path_len} chars), max is ${UNIX_SOCKET_MAX_PATH_CHARS}: ${socket_path}`) + } + return { raw: raw_endpoint, scheme: 'unix', path: socket_path } + } + + if (!parsed.hostname) { + throw new Error(`Invalid HTTP endpoint (missing hostname): ${raw_endpoint}`) + } + + const default_port = protocol === 'https' ? 443 : 80 + return { + raw: raw_endpoint, + scheme: protocol, + host: parsed.hostname, + port: parsed.port ? Number(parsed.port) : default_port, + path: `${parsed.pathname || '/'}${parsed.search || ''}`, + } +} + +const importNodeModule = async (specifier: string): Promise => { + const dynamic_import = Function('module_name', 'return import(module_name)') as (module_name: string) => Promise + return dynamic_import(specifier) as Promise +} + +class _EventBridge { + readonly send_to: ParsedEndpoint | null + readonly listen_on: ParsedEndpoint | null + readonly name: string + + protected readonly inbound_bus: EventBus + private start_promise: Promise | null + private node_server: any | null + + constructor(send_to?: string | null, listen_on?: string | null, name?: string) { + this.send_to = send_to ? parseEndpoint(send_to) : null + this.listen_on = listen_on ? parseEndpoint(listen_on) : null + this.name = name ?? `EventBridge_${randomSuffix()}` + this.inbound_bus = new EventBus(this.name, { max_history_size: 0 }) + this.start_promise = null + this.node_server = null + + if (this.listen_on && isBrowserRuntime()) { + throw new Error(`${this.constructor.name} listen_on is not supported in browser runtimes`) + } + + this.dispatch = this.dispatch.bind(this) + this.emit = this.emit.bind(this) + this.on = this.on.bind(this) + } + + on(event_pattern: EventClass, handler: EventHandlerCallable): void + on(event_pattern: string | '*', handler: UntypedEventHandlerFunction): void + on(event_pattern: EventPattern | '*', handler: EventHandlerCallable | UntypedEventHandlerFunction): void { + this.ensureListenerStarted() + if (typeof event_pattern === 'string') { + this.inbound_bus.on(event_pattern, handler as UntypedEventHandlerFunction) + return + } + this.inbound_bus.on(event_pattern as EventClass, handler as EventHandlerCallable) + } + + async emit(event: T): Promise { + if (!this.send_to) { + throw new Error(`${this.constructor.name}.emit() requires send_to`) + } + + const payload = event.toJSON() + + if (this.send_to.scheme === 'unix') { + await this.sendUnix(this.send_to, payload) + return + } + + await this.sendHttp(this.send_to, payload) + } + + async dispatch(event: T): Promise { + return this.emit(event) + } + + async start(): Promise { + if (!this.listen_on) return + if (this.node_server) return + if (this.start_promise) { + await this.start_promise + return + } + + if (!isNodeRuntime()) { + throw new Error(`${this.constructor.name} listen_on is only supported in Node.js runtimes`) + } + + const launch = (async () => { + const endpoint = this.listen_on + if (!endpoint) return + + if (endpoint.scheme === 'unix') { + await this.startUnixListener(endpoint) + return + } + + if (endpoint.scheme !== 'http') { + throw new Error(`listen_on only supports unix:// or http:// endpoints, got: ${endpoint.raw}`) + } + + await this.startHttpListener(endpoint) + })() + this.start_promise = launch + + try { + await launch + } finally { + if (this.start_promise === launch) { + this.start_promise = null + } + } + } + + async close(): Promise { + if (this.start_promise) { + await Promise.allSettled([this.start_promise]) + this.start_promise = null + } + + if (this.node_server) { + const server = this.node_server + await new Promise((resolve) => { + server.close(() => resolve()) + }) + this.node_server = null + } + + this.inbound_bus.destroy() + } + + private ensureListenerStarted(): void { + if (!this.listen_on || this.node_server || this.start_promise) { + return + } + void this.start().catch((error: unknown) => { + console.error('[bubus] EventBridge failed to start listener', error) + }) + } + + private async handleIncomingPayload(payload: unknown): Promise { + const event = BaseEvent.fromJSON(payload).eventReset() + this.inbound_bus.emit(event) + } + + private async sendHttp(endpoint: ParsedEndpoint, payload: unknown): Promise { + const response = await fetch(endpoint.raw, { + method: 'POST', + headers: { 'content-type': 'application/json' }, + body: JSON.stringify(payload), + }) + if (!response.ok) { + throw new Error(`IPC HTTP send failed with status ${response.status}: ${endpoint.raw}`) + } + } + + private async sendUnix(endpoint: ParsedEndpoint, payload: unknown): Promise { + if (!isNodeRuntime()) { + throw new Error('unix:// send_to is only supported in Node.js runtimes') + } + + const socket_path = endpoint.path + if (!socket_path) { + throw new Error(`Invalid unix endpoint: ${endpoint.raw}`) + } + + const node_net = await importNodeModule('node:net') + await new Promise((resolve, reject) => { + const socket = node_net.createConnection(socket_path, () => { + socket.end(`${JSON.stringify(payload)}\n`) + }) + socket.on('error', (error: unknown) => reject(error)) + socket.on('close', () => resolve()) + }) + } + + private async startHttpListener(endpoint: ParsedEndpoint): Promise { + const node_http = await importNodeModule('node:http') + const expected_path = endpoint.path || '/' + + this.node_server = node_http.createServer((req: any, res: any) => { + const method = (req.method || '').toUpperCase() + const request_url = String(req.url || '/') + + if (method !== 'POST') { + res.statusCode = 405 + res.end('method not allowed') + return + } + if (request_url !== expected_path) { + res.statusCode = 404 + res.end('not found') + return + } + + let body = '' + req.setEncoding('utf8') + req.on('data', (chunk: string) => { + body += chunk + }) + req.on('end', () => { + let parsed_payload: unknown + try { + parsed_payload = JSON.parse(body) + } catch { + res.statusCode = 400 + res.end('invalid json') + return + } + + void this.handleIncomingPayload(parsed_payload) + .then(() => { + res.statusCode = 202 + res.end('accepted') + }) + .catch((error: unknown) => { + res.statusCode = 500 + res.end('failed to process event') + console.error('[bubus] EventBridge HTTP listener error', error) + }) + }) + }) + + await new Promise((resolve, reject) => { + this.node_server.once('error', (error: unknown) => reject(error)) + this.node_server.listen(endpoint.port, endpoint.host, () => resolve()) + }) + } + + private async startUnixListener(endpoint: ParsedEndpoint): Promise { + const socket_path = endpoint.path + if (!socket_path) { + throw new Error(`Invalid unix endpoint: ${endpoint.raw}`) + } + + const node_net = await importNodeModule('node:net') + const node_fs = await importNodeModule('node:fs') + + try { + await node_fs.promises.unlink(socket_path) + } catch (error: unknown) { + const code = (error as { code?: string }).code + if (code !== 'ENOENT') { + throw error + } + } + + this.node_server = node_net.createServer((socket: any) => { + let buffer = '' + socket.setEncoding('utf8') + socket.on('data', (chunk: string) => { + buffer += chunk + while (true) { + const newline_index = buffer.indexOf('\n') + if (newline_index < 0) break + const line = buffer.slice(0, newline_index).trim() + buffer = buffer.slice(newline_index + 1) + if (!line) continue + try { + const parsed_payload = JSON.parse(line) + void this.handleIncomingPayload(parsed_payload) + } catch { + // Ignore malformed lines and continue reading next frames. + } + } + }) + socket.on('end', () => { + const remainder = buffer.trim() + if (!remainder) return + try { + const parsed_payload = JSON.parse(remainder) + void this.handleIncomingPayload(parsed_payload) + } catch { + // Ignore malformed trailing frame. + } + }) + }) + + await new Promise((resolve, reject) => { + this.node_server.once('error', (error: unknown) => reject(error)) + this.node_server.listen(socket_path, () => resolve()) + }) + } +} + +export class HTTPEventBridge extends _EventBridge { + constructor(send_to?: string | null, listen_on?: string | null, name?: string) + constructor(options?: HTTPEventBridgeOptions) + constructor(send_to_or_options?: string | null | HTTPEventBridgeOptions, listen_on?: string | null, name?: string) { + const options: HTTPEventBridgeOptions = + typeof send_to_or_options === 'object' + ? (send_to_or_options ?? {}) + : { send_to: send_to_or_options ?? undefined, listen_on: listen_on ?? undefined, name } + + if (options.send_to && parseEndpoint(options.send_to).scheme === 'unix') { + throw new Error('HTTPEventBridge send_to must be http:// or https://') + } + if (options.listen_on && parseEndpoint(options.listen_on).scheme !== 'http') { + throw new Error('HTTPEventBridge listen_on must be http://') + } + + super(options.send_to, options.listen_on, options.name ?? `HTTPEventBridge_${randomSuffix()}`) + } +} + +export class SocketEventBridge extends _EventBridge { + constructor(path?: string | null, name?: string) { + const normalized = path ? (path.startsWith('unix://') ? path.slice(7) : path) : null + if (normalized === '') { + throw new Error('SocketEventBridge path must not be empty') + } + + const endpoint = normalized ? `unix://${normalized}` : null + super(endpoint, endpoint, name ?? `SocketEventBridge_${randomSuffix()}`) + } +} + +export { NATSEventBridge } from './bridge_nats.js' +export { RedisEventBridge } from './bridge_redis.js' +export { PostgresEventBridge } from './bridge_postgres.js' +export { JSONLEventBridge } from './bridge_jsonl.js' +export { SQLiteEventBridge } from './bridge_sqlite.js' diff --git a/bubus-ts/src/event_bus.ts b/bubus-ts/src/event_bus.ts new file mode 100644 index 0000000..2b254c0 --- /dev/null +++ b/bubus-ts/src/event_bus.ts @@ -0,0 +1,1298 @@ +import { BaseEvent, type BaseEventJSON } from './base_event.js' +import { EventHistory } from './event_history.js' +import { EventResult } from './event_result.js' +import { captureAsyncContext } from './async_context.js' +import { _runWithSlowMonitor, _runWithTimeout } from './timing.js' +import { + AsyncLock, + type EventConcurrencyMode, + type EventHandlerConcurrencyMode, + type EventHandlerCompletionMode, + LockManager, +} from './lock_manager.js' +import { + EventHandler, + EventHandlerAbortedError, + EventHandlerCancelledError, + EventHandlerTimeoutError, + type EphemeralFindEventHandler, + type EventHandlerJSON, +} from './event_handler.js' +import type { EventBusMiddleware, EventBusMiddlewareCtor, EventBusMiddlewareInput } from './middlewares.js' +import { logTree } from './logging.js' +import { v7 as uuidv7 } from 'uuid' +import { monotonicDatetime } from './helpers.js' + +import { normalizeEventPattern } from './types.js' +import type { EventClass, EventHandlerCallable, EventPattern, FindOptions, UntypedEventHandlerFunction } from './types.js' + +export type EventBusOptions = { + id?: string + max_history_size?: number | null + max_history_drop?: boolean + + // per-event options + event_concurrency?: EventConcurrencyMode | null + event_timeout?: number | null // default handler timeout in seconds, applied when event.event_timeout is undefined + event_slow_timeout?: number | null // threshold before a warning is logged about slow event processing + + // per-event-handler options + event_handler_concurrency?: EventHandlerConcurrencyMode | null + event_handler_completion?: EventHandlerCompletionMode + event_handler_slow_timeout?: number | null // threshold before a warning is logged about slow handler execution + event_handler_detect_file_paths?: boolean // autodetect source code file and lineno where handlers are defined for better logs (slightly slower because Error().stack introspection to fine files is expensive) + middlewares?: EventBusMiddlewareInput[] +} + +export type EventBusJSON = { + id: string + name: string + max_history_size: number | null + max_history_drop: boolean + event_concurrency: EventConcurrencyMode + event_timeout: number | null + event_slow_timeout: number | null + event_handler_concurrency: EventHandlerConcurrencyMode + event_handler_completion: EventHandlerCompletionMode + event_handler_slow_timeout: number | null + event_handler_detect_file_paths: boolean + handlers: Record + handlers_by_key: Record + event_history: Record + pending_event_queue: string[] +} + +// Global registry of all EventBus instances to allow for cross-bus coordination +// when global-serial concurrency mode is used. +export class GlobalEventBusRegistry { + private _bus_refs = new Set>() + + add(bus: EventBus): void { + this._bus_refs.add(new WeakRef(bus)) + } + + discard(bus: EventBus): void { + for (const ref of this._bus_refs) { + const current = ref.deref() + if (!current || current === bus) { + this._bus_refs.delete(ref) + } + } + } + + has(bus: EventBus): boolean { + for (const ref of this._bus_refs) { + const current = ref.deref() + if (!current) { + this._bus_refs.delete(ref) + continue + } + if (current === bus) { + return true + } + } + return false + } + + get size(): number { + let count = 0 + for (const ref of this._bus_refs) { + if (ref.deref()) { + count += 1 + } else { + this._bus_refs.delete(ref) + } + } + return count + } + + *[Symbol.iterator](): IterableIterator { + for (const ref of this._bus_refs) { + const bus = ref.deref() + if (bus) { + yield bus + } else { + this._bus_refs.delete(ref) + } + } + } + + findBusById(bus_id: string): EventBus | undefined { + for (const bus of this) { + if (bus.id === bus_id) { + return bus + } + } + return undefined + } + + findEventById(event_id: string): BaseEvent | null { + for (const bus of this) { + const event = bus.event_history.getEvent(event_id) + if (event) { + return event + } + } + return null + } +} + +export class EventBus { + private static _registry_by_constructor = new WeakMap() + private static _global_event_lock_by_constructor = new WeakMap() + + private static getRegistryForConstructor(constructor_fn: Function): GlobalEventBusRegistry { + const existing_registry = EventBus._registry_by_constructor.get(constructor_fn) + if (existing_registry) { + return existing_registry + } + const created_registry = new GlobalEventBusRegistry() + EventBus._registry_by_constructor.set(constructor_fn, created_registry) + return created_registry + } + + private static getGlobalEventLockForConstructor(constructor_fn: Function): AsyncLock { + const existing_lock = EventBus._global_event_lock_by_constructor.get(constructor_fn) + if (existing_lock) { + return existing_lock + } + const created_lock = new AsyncLock(1) + EventBus._global_event_lock_by_constructor.set(constructor_fn, created_lock) + return created_lock + } + + static get all_instances(): GlobalEventBusRegistry { + return EventBus.getRegistryForConstructor(this) + } + + get all_instances(): GlobalEventBusRegistry { + return EventBus.getRegistryForConstructor(this.constructor as Function) + } + + get _lock_for_event_global_serial(): AsyncLock { + return EventBus.getGlobalEventLockForConstructor(this.constructor as Function) + } + + id: string // unique uuidv7 identifier for the event bus + name: string // name of the event bus, recommended to include the word "Bus" in the name for clarity in logs + + // configuration options + event_timeout: number | null + event_concurrency: EventConcurrencyMode + event_handler_concurrency: EventHandlerConcurrencyMode + event_handler_completion: EventHandlerCompletionMode + event_handler_detect_file_paths: boolean + + // slow processing warning timeout settings + event_handler_slow_timeout: number | null + event_slow_timeout: number | null + + // public runtime state + handlers: Map // map of handler uuidv5 ids to EventHandler objects + handlers_by_key: Map // map of normalized event_pattern to ordered handler ids + event_history: EventHistory // map of event uuidv7 ids to processed BaseEvent objects + + // internal runtime state + pending_event_queue: BaseEvent[] // queue of events that have been emitted to the bus but not yet processed + in_flight_event_ids: Set // set of event ids that are currently being processed by the bus + runloop_running: boolean + locks: LockManager + find_waiters: Set // set of EphemeralFindEventHandler objects that are waiting for a matching future event + middlewares: EventBusMiddleware[] + + private static normalizeMiddlewares(middlewares?: EventBusMiddlewareInput[]): EventBusMiddleware[] { + const normalized: EventBusMiddleware[] = [] + for (const middleware of middlewares ?? []) { + if (!middleware) { + continue + } + if (typeof middleware === 'function') { + normalized.push(new (middleware as EventBusMiddlewareCtor)()) + } else { + normalized.push(middleware as EventBusMiddleware) + } + } + return normalized + } + + constructor(name: string = 'EventBus', options: EventBusOptions = {}) { + this.id = options.id ?? uuidv7() + this.name = name + + // set configuration options + this.event_concurrency = options.event_concurrency ?? 'bus-serial' + this.event_handler_concurrency = options.event_handler_concurrency ?? 'serial' + this.event_handler_completion = options.event_handler_completion ?? 'all' + this.event_handler_detect_file_paths = options.event_handler_detect_file_paths ?? true + this.event_timeout = options.event_timeout === undefined ? 60 : options.event_timeout + this.event_handler_slow_timeout = options.event_handler_slow_timeout === undefined ? 30 : options.event_handler_slow_timeout + this.event_slow_timeout = options.event_slow_timeout === undefined ? 300 : options.event_slow_timeout + + // initialize runtime state + this.runloop_running = false + this.handlers = new Map() + this.handlers_by_key = new Map() + this.find_waiters = new Set() + this.event_history = new EventHistory({ + max_history_size: options.max_history_size === undefined ? 100 : options.max_history_size, + max_history_drop: options.max_history_drop ?? false, + }) + this.pending_event_queue = [] + this.in_flight_event_ids = new Set() + this.locks = new LockManager(this) + this.middlewares = EventBus.normalizeMiddlewares(options.middlewares) + + this.all_instances.add(this) + + this.dispatch = this.dispatch.bind(this) + this.emit = this.emit.bind(this) + } + + toString(): string { + return `${this.name}#${this.id.slice(-4)}` + } + + scheduleMicrotask(fn: () => void): void { + if (typeof queueMicrotask === 'function') { + queueMicrotask(fn) + return + } + void Promise.resolve().then(fn) + } + + private async _runMiddlewareHook(hook: keyof EventBusMiddleware, args: unknown[]): Promise { + if (this.middlewares.length === 0) { + return + } + for (const middleware of this.middlewares) { + const callback = middleware[hook] + if (!callback) { + continue + } + await (callback as (...hook_args: unknown[]) => void | Promise).apply(middleware, args) + } + } + + async onEventChange(event: BaseEvent, status: 'pending' | 'started' | 'completed'): Promise { + await this._onEventChange(event, status) + } + + async onEventResultChange(event: BaseEvent, result: EventResult, status: 'pending' | 'started' | 'completed'): Promise { + await this._onEventResultChange(event, result, status) + } + + private async _onEventChange(event: BaseEvent, status: 'pending' | 'started' | 'completed'): Promise { + await this._runMiddlewareHook('onEventChange', [this, event, status]) + } + + private async _onEventResultChange(event: BaseEvent, result: EventResult, status: 'pending' | 'started' | 'completed'): Promise { + await this._runMiddlewareHook('onEventResultChange', [this, event, result, status]) + } + + private async _onBusHandlersChange(handler: EventHandler, registered: boolean): Promise { + await this._runMiddlewareHook('onBusHandlersChange', [this, handler, registered]) + } + + private _finalizeEventTimeout( + event: BaseEvent, + pending_entries: Array<{ + handler: EventHandler + result: EventResult + }>, + timeout_error: EventHandlerTimeoutError + ): void { + const timeout_seconds = timeout_error.timeout_seconds ?? event.event_timeout ?? null + event._cancelPendingChildProcessing(timeout_error) + + for (const entry of pending_entries) { + const result = entry.result + if (result.status === 'completed') { + continue + } + if (result.status === 'error') { + continue + } + if (result.status === 'started') { + result._lock?.exitHandlerRun() + result._releaseQueueJumpPauses() + const aborted_error = new EventHandlerAbortedError(`Aborted running handler due to event timeout`, { + event_result: result, + timeout_seconds, + cause: timeout_error, + }) + result._markError(aborted_error) + result._signalAbort(aborted_error) + continue + } + const cancelled_error = new EventHandlerCancelledError(`Cancelled pending handler due to event timeout`, { + event_result: result, + timeout_seconds, + cause: timeout_error, + }) + result._markError(cancelled_error) + } + + event.event_pending_bus_count = Math.max(0, event.event_pending_bus_count - 1) + event._markCompleted() + } + + private _createEventTimeoutError( + event: BaseEvent, + pending_entries: Array<{ + handler: EventHandler + result: EventResult + }>, + timeout_seconds: number + ): EventHandlerTimeoutError { + const timeout_anchor = + pending_entries.find((entry) => entry.result.status === 'started') ?? + pending_entries.find((entry) => entry.result.status === 'pending') ?? + pending_entries[0]! + return new EventHandlerTimeoutError( + `${this.toString()}.on(${event.toString()}, ${timeout_anchor.result.handler.toString()}) timed out after ${timeout_seconds}s`, + { + event_result: timeout_anchor.result, + timeout_seconds, + } + ) + } + + private async _runHandlersWithTimeout( + event: BaseEvent, + pending_entries: Array<{ + handler: EventHandler + result: EventResult + }>, + event_timeout: number | null, + fn: () => Promise + ): Promise { + try { + if (event_timeout === null || pending_entries.length === 0) { + await fn() + } else { + await _runWithTimeout(event_timeout, () => this._createEventTimeoutError(event, pending_entries, event_timeout), fn) + } + } catch (error) { + if (error instanceof EventHandlerTimeoutError) { + this._finalizeEventTimeout(event, pending_entries, error) + return + } + throw error + } + } + + private _markEventCompletedIfNeeded(event: BaseEvent): void { + if (event.event_status !== 'completed') { + event.event_pending_bus_count = Math.max(0, event.event_pending_bus_count - 1) + event._markCompleted(false) + } + if ( + this.event_history.max_history_size !== null && + this.event_history.max_history_size > 0 && + this.event_history.size > this.event_history.max_history_size + ) { + this.event_history.trimEventHistory({ + is_event_complete: (candidate_event) => candidate_event.event_status === 'completed', + on_remove: (candidate_event) => candidate_event._gc(), + owner_label: this.toString(), + max_history_size: this.event_history.max_history_size, + max_history_drop: this.event_history.max_history_drop, + }) + } + } + + toJSON(): EventBusJSON { + const handlers: Record = {} + for (const [handler_id, handler] of this.handlers.entries()) { + handlers[handler_id] = handler.toJSON() + } + + const handlers_by_key: Record = {} + for (const [key, ids] of this.handlers_by_key.entries()) { + handlers_by_key[key] = [...ids] + } + + const event_history: Record = {} + for (const [event_id, event] of this.event_history.entries()) { + event_history[event_id] = event.toJSON() + } + + const pending_event_queue: string[] = [] + for (const event of this.pending_event_queue) { + const event_id = event.event_id + if (!event_history[event_id]) { + event_history[event_id] = event.toJSON() + } + pending_event_queue.push(event_id) + } + + return { + id: this.id, + name: this.name, + max_history_size: this.event_history.max_history_size, + max_history_drop: this.event_history.max_history_drop, + event_concurrency: this.event_concurrency, + event_timeout: this.event_timeout, + event_slow_timeout: this.event_slow_timeout, + event_handler_concurrency: this.event_handler_concurrency, + event_handler_completion: this.event_handler_completion, + event_handler_slow_timeout: this.event_handler_slow_timeout, + event_handler_detect_file_paths: this.event_handler_detect_file_paths, + handlers, + handlers_by_key, + event_history, + pending_event_queue, + } + } + + private static _stubHandlerFn(): EventHandlerCallable { + return (() => undefined) as EventHandlerCallable + } + + private static _upsertHandlerIndex(bus: EventBus, event_pattern: string, handler_id: string): void { + const ids = bus.handlers_by_key.get(event_pattern) + if (ids) { + if (!ids.includes(handler_id)) { + ids.push(handler_id) + } + return + } + bus.handlers_by_key.set(event_pattern, [handler_id]) + } + + private static _linkEventResultHandlers(event: BaseEvent, bus: EventBus): void { + for (const [map_key, result] of Array.from(event.event_results.entries())) { + const handler_id = result.handler_id + const existing_handler = bus.handlers.get(handler_id) + if (existing_handler) { + result.handler = existing_handler + } else { + const source = result.handler + const handler_entry = EventHandler.fromJSON( + { + ...source.toJSON(), + id: handler_id, + event_pattern: source.event_pattern || event.event_type, + eventbus_name: source.eventbus_name || bus.name, + eventbus_id: source.eventbus_id || bus.id, + }, + EventBus._stubHandlerFn() + ) + bus.handlers.set(handler_entry.id, handler_entry) + EventBus._upsertHandlerIndex(bus, handler_entry.event_pattern, handler_entry.id) + result.handler = handler_entry + } + + if (map_key !== handler_id) { + event.event_results.delete(map_key) + event.event_results.set(handler_id, result) + } + } + } + + static fromJSON(data: unknown): EventBus { + if (!data || typeof data !== 'object') { + throw new Error('EventBus.fromJSON(data) requires an object') + } + const record = data as Record + const name = typeof record.name === 'string' ? record.name : 'EventBus' + const options: EventBusOptions = {} + + if (typeof record.id === 'string') options.id = record.id + if (typeof record.max_history_size === 'number' || record.max_history_size === null) options.max_history_size = record.max_history_size + if (typeof record.max_history_drop === 'boolean') options.max_history_drop = record.max_history_drop + if ( + record.event_concurrency === 'global-serial' || + record.event_concurrency === 'bus-serial' || + record.event_concurrency === 'parallel' + ) { + options.event_concurrency = record.event_concurrency + } + if (typeof record.event_timeout === 'number' || record.event_timeout === null) options.event_timeout = record.event_timeout + if (typeof record.event_slow_timeout === 'number' || record.event_slow_timeout === null) + options.event_slow_timeout = record.event_slow_timeout + if (record.event_handler_concurrency === 'serial' || record.event_handler_concurrency === 'parallel') { + options.event_handler_concurrency = record.event_handler_concurrency + } + if (record.event_handler_completion === 'all' || record.event_handler_completion === 'first') { + options.event_handler_completion = record.event_handler_completion + } + if (typeof record.event_handler_slow_timeout === 'number' || record.event_handler_slow_timeout === null) { + options.event_handler_slow_timeout = record.event_handler_slow_timeout + } + if (typeof record.event_handler_detect_file_paths === 'boolean') { + options.event_handler_detect_file_paths = record.event_handler_detect_file_paths + } + const bus = new EventBus(name, options) + + if (!record.handlers || typeof record.handlers !== 'object' || Array.isArray(record.handlers)) { + throw new Error('EventBus.fromJSON(data) requires handlers as an id-keyed object') + } + for (const [handler_id, payload] of Object.entries(record.handlers as Record)) { + if (!payload || typeof payload !== 'object') { + continue + } + const parsed = EventHandler.fromJSON( + { + ...(payload as Record), + id: typeof (payload as { id?: unknown }).id === 'string' ? (payload as { id: string }).id : handler_id, + }, + EventBus._stubHandlerFn() + ) + bus.handlers.set(parsed.id, parsed) + } + + if (!record.handlers_by_key || typeof record.handlers_by_key !== 'object' || Array.isArray(record.handlers_by_key)) { + throw new Error('EventBus.fromJSON(data) requires handlers_by_key as an object') + } + bus.handlers_by_key.clear() + for (const [raw_key, raw_ids] of Object.entries(record.handlers_by_key as Record)) { + if (!Array.isArray(raw_ids)) { + continue + } + const ids = raw_ids.filter((id): id is string => typeof id === 'string') + bus.handlers_by_key.set(raw_key, ids) + } + + if (!record.event_history || typeof record.event_history !== 'object' || Array.isArray(record.event_history)) { + throw new Error('EventBus.fromJSON(data) requires event_history as an id-keyed object') + } + for (const [event_id, payload] of Object.entries(record.event_history as Record)) { + if (!payload || typeof payload !== 'object') { + continue + } + const event = BaseEvent.fromJSON({ + ...(payload as Record), + event_id: typeof (payload as { event_id?: unknown }).event_id === 'string' ? (payload as { event_id: string }).event_id : event_id, + }) + event.bus = bus + bus.event_history.set(event.event_id, event) + } + + if (!Array.isArray(record.pending_event_queue)) { + throw new Error('EventBus.fromJSON(data) requires pending_event_queue as an array of event ids') + } + const raw_pending_event_queue = record.pending_event_queue + const pending_event_ids: string[] = [] + for (const item of raw_pending_event_queue) { + if (typeof item === 'string') { + pending_event_ids.push(item) + } + } + bus.pending_event_queue = pending_event_ids + .map((event_id) => bus.event_history.get(event_id)) + .filter((event): event is BaseEvent => Boolean(event)) + + for (const event of bus.event_history.values()) { + EventBus._linkEventResultHandlers(event, bus) + } + + // Reset runtime execution state after restore. Queue/history/handlers are restored, + // but lock internals should always restart from a clean default state. + bus.in_flight_event_ids.clear() + bus.runloop_running = false + bus.locks.clear() + bus.find_waiters.clear() + + return bus + } + + get label(): string { + return `${this.name}#${this.id.slice(-4)}` + } + + removeEventFromPendingQueue(event: BaseEvent): number { + const original_event = event._event_original ?? event + let removed_count = 0 + for (let index = this.pending_event_queue.length - 1; index >= 0; index -= 1) { + const queued_event = this.pending_event_queue[index] + const queued_original = queued_event._event_original ?? queued_event + if (queued_original.event_id !== original_event.event_id) { + continue + } + this.pending_event_queue.splice(index, 1) + removed_count += 1 + } + return removed_count + } + + isEventInFlightOrQueued(event_id: string): boolean { + if (this.in_flight_event_ids.has(event_id)) { + return true + } + for (const queued_event of this.pending_event_queue) { + const queued_original = queued_event._event_original ?? queued_event + if (queued_original.event_id === event_id) { + return true + } + } + return false + } + + removeEventFromHistory(event_id: string): boolean { + return this.event_history.delete(event_id) + } + + // destroy the event bus and all its state to allow for garbage collection + destroy(): void { + this.all_instances.discard(this) + this.handlers.clear() + this.handlers_by_key.clear() + for (const event of this.event_history.values()) { + event._gc() + } + this.event_history.clear() + this.pending_event_queue.length = 0 + this.in_flight_event_ids.clear() + this.find_waiters.clear() + this.locks.clear() + } + + on(event_pattern: EventClass, handler: EventHandlerCallable, options?: Partial): EventHandler + on( + event_pattern: string | '*', + handler: UntypedEventHandlerFunction, + options?: Partial + ): EventHandler + on( + event_pattern: EventPattern | '*', + handler: EventHandlerCallable | UntypedEventHandlerFunction, + options: Partial = {} + ): EventHandler { + const normalized_key = normalizeEventPattern(event_pattern) // get string event_type or '*' + const handler_name = handler.name || 'anonymous' // get handler function name or 'anonymous' if the handler is an anonymous/arrow function + const handler_entry = new EventHandler({ + handler: handler as EventHandlerCallable, + handler_name, + handler_registered_at: monotonicDatetime(), + event_pattern: normalized_key, + eventbus_name: this.name, + eventbus_id: this.id, + ...options, + }) + if (this.event_handler_detect_file_paths) { + // optionally perform (expensive) file path detection for the handler using Error().stack introspection + // makes logs much more useful for debugging, but is expensive to do if not needed + handler_entry._detectHandlerFilePath() + } + + this.handlers.set(handler_entry.id, handler_entry) + const ids = this.handlers_by_key.get(handler_entry.event_pattern) + if (ids) ids.push(handler_entry.id) + else this.handlers_by_key.set(handler_entry.event_pattern, [handler_entry.id]) + this.scheduleMicrotask(() => { + void this._onBusHandlersChange(handler_entry, true) + }) + return handler_entry + } + + off(event_pattern: EventPattern | '*', handler?: EventHandlerCallable | string | EventHandler): void { + const normalized_key = normalizeEventPattern(event_pattern) + if (typeof handler === 'object' && handler instanceof EventHandler && handler.id !== undefined) { + handler = handler.id + } + const match_by_id = typeof handler === 'string' + for (const entry of this.handlers.values()) { + if (entry.event_pattern !== normalized_key) { + continue + } + const handler_id = entry.id + if (handler === undefined || (match_by_id ? handler_id === handler : entry.handler === (handler as EventHandlerCallable))) { + this.handlers.delete(handler_id) + this._removeIndexedHandler(entry.event_pattern, handler_id) + this.scheduleMicrotask(() => { + void this._onBusHandlersChange(entry, false) + }) + } + } + } + + emit(event: T): T { + const original_event = event._event_original ?? event // if event is a bus-scoped proxy already, get the original underlying event object + if (!original_event.bus) { + // if we are the first bus to emit this event, set the bus property on the original event object + original_event.bus = this + } + if (!Array.isArray(original_event.event_path)) { + original_event.event_path = [] + } + if (original_event._getDispatchContext() === undefined) { + // when used in fastify/nextjs/other contexts with tracing based on AsyncLocalStorage in node + // we want to capture the context at the emit site and use it when running handlers + // because events may be handled async in a separate context than the emit site + original_event._setDispatchContext(captureAsyncContext()) + } + if (original_event.event_path.includes(this.label) || this._hasProcessedEvent(original_event)) { + return this._getEventProxyScopedToThisBus(original_event) as T + } + + if (!original_event.event_path.includes(this.label)) { + original_event.event_path.push(this.label) + } + + if (!original_event.event_parent_id && !original_event.event_emitted_by_handler_id) { + this._resolveImplicitParentHandlerResult()?._linkEmittedChildEvent(original_event) + } + + if (original_event.event_parent_id && original_event.event_emitted_by_handler_id) { + const parent_result = original_event.event_parent?.event_results.get(original_event.event_emitted_by_handler_id) + if (parent_result) { + parent_result._linkEmittedChildEvent(original_event) + } + } + + if ( + this.event_history.max_history_size !== null && + this.event_history.max_history_size > 0 && + !this.event_history.max_history_drop && + this.event_history.size >= this.event_history.max_history_size + ) { + throw new Error( + `${this.toString()}.emit(${original_event.event_type}) rejected: history limit reached (${this.event_history.size}/${this.event_history.max_history_size}); set event_history.max_history_drop=true to drop old history instead.` + ) + } + + this.event_history.addEvent(original_event) + this.event_history.trimEventHistory({ + is_event_complete: (candidate_event) => candidate_event.event_status === 'completed', + on_remove: (candidate_event) => candidate_event._gc(), + owner_label: this.toString(), + max_history_size: this.event_history.max_history_size, + max_history_drop: this.event_history.max_history_drop, + }) + this._resolveFindWaiters(original_event) + + original_event.event_pending_bus_count += 1 + this.pending_event_queue.push(original_event) + this._startRunloop() + + return this._getEventProxyScopedToThisBus(original_event) as T + } + + // alias for emit + dispatch(event: T): T { + return this.emit(event) + } + + // find a recent event or wait for a future event that matches some criteria + find(event_pattern: '*', options?: FindOptions): Promise + find(event_pattern: '*', where: (event: BaseEvent) => boolean, options?: FindOptions): Promise + find(event_pattern: EventPattern, options?: FindOptions): Promise + find(event_pattern: EventPattern, where: (event: T) => boolean, options?: FindOptions): Promise + async find( + event_pattern: EventPattern | '*', + where_or_options: ((event: T) => boolean) | FindOptions = {}, + maybe_options: FindOptions = {} + ): Promise { + const where = typeof where_or_options === 'function' ? where_or_options : () => true + const options = typeof where_or_options === 'function' ? maybe_options : where_or_options + const match = await this.event_history.find(event_pattern as EventPattern | '*', where, { + ...options, + event_is_child_of: (event, ancestor) => this.eventIsChildOf(event, ancestor), + wait_for_future_match: (normalized_event_pattern, matches, future) => + this._waitForFutureMatch(normalized_event_pattern, matches, future), + }) + if (!match) { + return null + } + return this._getEventProxyScopedToThisBus(match) as T + } + + private async _waitForFutureMatch( + event_pattern: string | '*', + matches: (event: BaseEvent) => boolean, + future: boolean | number + ): Promise { + if (future === false) { + return null + } + return await new Promise((resolve) => { + const waiter: EphemeralFindEventHandler = { + event_pattern, + matches, + resolve: (event) => resolve(event), + } + if (future !== true) { + const timeout_ms = Math.max(0, Number(future)) * 1000 + waiter.timeout_id = setTimeout(() => { + this.find_waiters.delete(waiter) + resolve(null) + }, timeout_ms) + } + this.find_waiters.add(waiter) + }) + } + + async waitUntilIdle(timeout: number | null = null): Promise { + return await this.locks.waitForIdle(timeout) + } + + // Weak idle check: only checks if handlers are idle, doesnt check that the queue is empty + isIdle(): boolean { + for (const event of this.event_history.values()) { + for (const result of event.event_results.values()) { + if (result.eventbus_id !== this.id) { + continue + } + if (result.status === 'pending' || result.status === 'started') { + return false + } + } + } + return true // no handlers are pending or started + } + + // Stronger idle check: no queued work, no in-flight processing, _runloop not + // active, and no handlers pending/running for this bus. + isIdleAndQueueEmpty(): boolean { + return this.pending_event_queue.length === 0 && this.in_flight_event_ids.size === 0 && this.isIdle() && !this.runloop_running + } + + eventIsChildOf(child_event: BaseEvent, parent_event: BaseEvent): boolean { + if (child_event.event_id === parent_event.event_id) { + return false + } + + let current_parent_id = child_event.event_parent_id + while (current_parent_id) { + if (current_parent_id === parent_event.event_id) { + return true + } + const parent = this.event_history.get(current_parent_id) + if (!parent) { + return false + } + current_parent_id = parent.event_parent_id + } + return false + } + + eventIsParentOf(parent_event: BaseEvent, child_event: BaseEvent): boolean { + return this.eventIsChildOf(child_event, parent_event) + } + + // return a full detailed tree diagram of all events and results on this bus + logTree(): string { + return logTree(this) + } + + // Resolve an event id from this bus first, then across all known buses. + findEventById(event_id: string): BaseEvent | null { + return this.event_history.get(event_id) ?? this.all_instances.findEventById(event_id) + } + + // Walk up the parent event chain to find an in-flight ancestor handler result. + // Returns the result if found, null otherwise. Used by _processEventImmediately to detect + // cross-bus queue-jump scenarios where the calling handler is on a different bus. + _getParentEventResultAcrossAllBuses(event: BaseEvent): EventResult | null { + const original = event._event_original ?? event + let current_parent_id = original.event_parent_id + let current_handler_id = original.event_emitted_by_handler_id + while (current_handler_id && current_parent_id) { + const parent = this.all_instances.findEventById(current_parent_id) + if (!parent) break + const handler_result = parent.event_results.get(current_handler_id) + if (handler_result && handler_result.status === 'started') return handler_result + current_parent_id = parent.event_parent_id + current_handler_id = parent.event_emitted_by_handler_id + } + return null + } + + private _startRunloop(): void { + if (this.runloop_running) { + return + } + this.runloop_running = true + this.scheduleMicrotask(() => { + void this._runloop() + }) + } + + // schedule the processing of an event on the event bus by its normal _runloop + // optionally using a pre-acquired lock if we're inside handling of a parent event + private async _processEvent( + event: BaseEvent, + options: { + bypass_event_locks?: boolean + pre_acquired_lock?: AsyncLock | null + } = {} + ): Promise { + let pending_entries: Array<{ + handler: EventHandler + result: EventResult + }> = [] + try { + if (this._hasProcessedEvent(event)) { + return + } + const scoped_event = this._getEventProxyScopedToThisBus(event) + await this._onEventChange(scoped_event, 'pending') + event._markStarted() + pending_entries = event._createPendingHandlerResults(this) + const resolved_event_timeout = event.event_timeout ?? this.event_timeout + if (this.middlewares.length > 0) { + for (const entry of pending_entries) { + await this._onEventResultChange(scoped_event, entry.result, 'pending') + } + } + await this.locks._runWithEventLock( + event, + () => + this._runHandlersWithTimeout(event, pending_entries, resolved_event_timeout, () => + _runWithSlowMonitor(event._createSlowEventWarningTimer(), () => scoped_event._runHandlers(pending_entries)) + ), + options + ) + this._markEventCompletedIfNeeded(event) + } finally { + if (options.pre_acquired_lock) { + options.pre_acquired_lock.release() + } + this.in_flight_event_ids.delete(event.event_id) + this.locks._notifyIdleListeners() + } + } + + // Called when a handler does `await child.done()` β€” processes the child event + // immediately ("queue-jump") instead of waiting for the _runloop to pick it up. + // + // Yield-and-reacquire: if the calling handler holds a handler concurrency lock, + // we temporarily release it so child handlers on the same bus can acquire it + // (preventing deadlock for serial handler mode). We re-acquire after + // the child completes so the parent handler can continue with the lock held. + async _processEventImmediately(event: T, handler_result?: EventResult): Promise { + const original_event = event._event_original ?? event + // Find the parent handler's result: prefer the proxy-provided one (only if + // the handler is still running), then this bus's stack, then walk up the + // parent event tree (cross-bus case). If none found, we're not inside a + // handler and should fall back to eventCompleted(). + const proxy_result = handler_result?.status === 'started' ? handler_result : undefined + const currently_active_event_result = + proxy_result ?? this.locks._getActiveHandlerResult() ?? this._getParentEventResultAcrossAllBuses(original_event) ?? undefined + if (!currently_active_event_result) { + // Not inside any handler scope β€” avoid queue-jump, but if this event is + // next in line we can process it immediately without waiting on the _runloop. + // We must acquire/revalidate the event lock first to avoid racing the runloop + // and accidentally reordering/removing the wrong queue head. + const queue_index = this.pending_event_queue.indexOf(original_event) + const can_process_now = + queue_index === 0 && + !this.locks._isPaused() && + !this.in_flight_event_ids.has(original_event.event_id) && + !this._hasProcessedEvent(original_event) + if (can_process_now) { + const event_lock = this.locks.getLockForEvent(original_event) + let pre_acquired_lock: AsyncLock | null = null + if (event_lock) { + await event_lock.acquire() + pre_acquired_lock = event_lock + } + const queue_head = this.pending_event_queue[0] + const queue_head_original = queue_head?._event_original ?? queue_head + const still_can_process_now = + queue_head_original === original_event && + !this.locks._isPaused() && + !this.in_flight_event_ids.has(original_event.event_id) && + !this._hasProcessedEvent(original_event) + if (still_can_process_now) { + this.pending_event_queue.shift() + this.in_flight_event_ids.add(original_event.event_id) + await this._processEvent(original_event, { + bypass_event_locks: true, + pre_acquired_lock, + }) + if (original_event.event_status !== 'completed') { + await original_event.eventCompleted() + } + return event + } + if (pre_acquired_lock) { + pre_acquired_lock.release() + } + } + await original_event.eventCompleted() + return event + } + + // ensure a pause request is set so the bus _runloop pauses and (will resume when the handler exits) + currently_active_event_result._ensureQueueJumpPause(this) + if (original_event.event_status === 'completed') { + return event + } + + // re-endter event-level handler lock if needed + if (currently_active_event_result._lock) { + await currently_active_event_result._lock.runQueueJump(this._processEventImmediatelyAcrossBuses.bind(this, original_event)) + return event + } + + await this._processEventImmediatelyAcrossBuses(original_event) + return event + } + + // Processes a queue-jumped event across all buses that have it emitted. + // Called from _processEventImmediately after the parent handler's lock has been yielded. + private async _processEventImmediatelyAcrossBuses(event: BaseEvent): Promise { + // Use event_path ordering to pick candidate buses and filter out buses that + // haven't seen the event or already processed it. + const ordered: EventBus[] = [] + const seen = new Set() + const event_path = Array.isArray(event.event_path) ? event.event_path : [] + for (const label of event_path) { + for (const bus of this.all_instances) { + if (bus.label !== label) { + continue + } + if (!bus.event_history.has(event.event_id)) { + continue + } + if (bus._hasProcessedEvent(event)) { + continue + } + if (!seen.has(bus)) { + ordered.push(bus) + seen.add(bus) + } + } + } + if (!seen.has(this) && this.event_history.has(event.event_id)) { + ordered.push(this) + } + if (ordered.length === 0) { + await event.eventCompleted() + return + } + + // Determine which event lock the initiating bus resolves to, so we can + // detect when other buses share the same instance (global-serial). + const initiating_event_lock = this.locks.getLockForEvent(event) + const pause_releases: Array<() => void> = [] + + try { + for (const bus of ordered) { + if (bus !== this) { + pause_releases.push(bus.locks._requestRunloopPause()) + } + } + + for (const bus of ordered) { + const index = bus.pending_event_queue.indexOf(event) + if (index >= 0) { + bus.pending_event_queue.splice(index, 1) + } + if (bus._hasProcessedEvent(event)) { + continue + } + if (bus.in_flight_event_ids.has(event.event_id)) { + continue + } + bus.in_flight_event_ids.add(event.event_id) + + // Bypass event lock on the initiating bus (we're already inside a handler + // that acquired it). For other buses, only bypass if they resolve to the same + // lock instance (global-serial shares one lock across all buses). + const bus_event_lock = bus.locks.getLockForEvent(event) + const should_bypass_event_lock = bus === this || (initiating_event_lock !== null && bus_event_lock === initiating_event_lock) + + await bus._processEvent(event, { + bypass_event_locks: should_bypass_event_lock, + }) + } + + if (event.event_status !== 'completed') { + await event.eventCompleted() + } + } finally { + for (const release of pause_releases) { + release() + } + } + } + + private async _runloop(): Promise { + for (;;) { + while (this.pending_event_queue.length > 0) { + await Promise.resolve() + if (this.locks._isPaused()) { + await this.locks._waitUntilRunloopResumed() + continue + } + const next_event = this.pending_event_queue[0] + if (!next_event) { + continue + } + const original_event = next_event._event_original ?? next_event + if (this._hasProcessedEvent(original_event)) { + this.pending_event_queue.shift() + continue + } + let pre_acquired_lock: AsyncLock | null = null + const event_lock = this.locks.getLockForEvent(original_event) + if (event_lock) { + await event_lock.acquire() + pre_acquired_lock = event_lock + } + // Queue head may have changed while waiting for the lock + // (e.g. done() processing the head immediately). Revalidate + // before mutating the queue to avoid removing the wrong event. + const current_head = this.pending_event_queue[0] + const current_head_original = current_head?._event_original ?? current_head + if (current_head_original !== original_event) { + if (pre_acquired_lock) { + pre_acquired_lock.release() + } + continue + } + this.pending_event_queue.shift() + if (this.in_flight_event_ids.has(original_event.event_id)) { + if (pre_acquired_lock) { + pre_acquired_lock.release() + } + continue + } + this.in_flight_event_ids.add(original_event.event_id) + void this._processEvent(original_event, { + bypass_event_locks: true, + pre_acquired_lock, + }) + await Promise.resolve() + } + this.runloop_running = false + if (this.pending_event_queue.length > 0) { + this._startRunloop() + return + } + this.locks._notifyIdleListeners() + return + } + } + + // check if an event has been processed (and completed) by this bus + _hasProcessedEvent(event: BaseEvent): boolean { + const results = Array.from(event.event_results.values()).filter((result) => result.eventbus_id === this.id) + if (results.length === 0) { + return false + } + return results.every((result) => result.status === 'completed' || result.status === 'error') + } + + private _resolveImplicitParentHandlerResult(): EventResult | null { + const active_on_target_bus = this.locks._getActiveHandlerResults().filter((result) => result.status === 'started') + if (active_on_target_bus.length === 1) { + return active_on_target_bus[0] + } + + const active_globally: EventResult[] = [] + for (const bus of this.all_instances) { + for (const result of bus.locks._getActiveHandlerResults()) { + if (result.status === 'started') { + active_globally.push(result) + } + } + } + if (active_globally.length === 1) { + return active_globally[0] + } + return null + } + + // get a proxy wrapper around an Event that will automatically link emitted child events to this bus and handler + // proxy is what gets passed into the handler, if handler does event.bus.emit(...) to dispatch child events, + // the proxy auto-sets event.parent_event_id and event.event_emitted_by_handler_id + _getEventProxyScopedToThisBus(event: T, handler_result?: EventResult): T { + const original_event = event._event_original ?? event + const bus = this + const parent_event_id = original_event.event_id + const bus_proxy = new Proxy(bus, { + get(target, prop, receiver) { + if (prop === '_processEventImmediately') { + const runner = Reflect.get(target, prop, receiver) as EventBus['_processEventImmediately'] + const process_event_immediately = (child_event: TChild): Promise => { + return runner.call(target, child_event, handler_result) as Promise + } + return process_event_immediately + } + if (prop === 'dispatch' || prop === 'emit') { + const emit_child_event = (child_event: TChild): TChild => { + const original_child = child_event._event_original ?? child_event + if (handler_result) { + handler_result._linkEmittedChildEvent(original_child) + } else if (!original_child.event_parent_id && original_child.event_id !== parent_event_id) { + // fallback for non-handler scoped emit/dispatch + original_child.event_parent_id = parent_event_id + } + const dispatcher = Reflect.get(target, prop, receiver) as EventBus['dispatch'] + const dispatched = dispatcher.call(target, original_child) + return target._getEventProxyScopedToThisBus(dispatched as TChild, handler_result) + } + return emit_child_event + } + return Reflect.get(target, prop, receiver) + }, + }) + const scoped = new Proxy(original_event, { + get(target, prop, receiver) { + if (prop === 'bus') { + return bus_proxy + } + if (prop === '_event_original') { + return target + } + return Reflect.get(target, prop, receiver) + }, + set(target, prop, value) { + if (prop === 'bus') { + return true + } + return Reflect.set(target, prop, value, target) + }, + has(target, prop) { + if (prop === 'bus') { + return true + } + if (prop === '_event_original') { + return true + } + return Reflect.has(target, prop) + }, + }) + + return scoped as T + } + + private _resolveFindWaiters(event: BaseEvent): void { + for (const waiter of Array.from(this.find_waiters)) { + if ((waiter.event_pattern !== '*' && event.event_type !== waiter.event_pattern) || !waiter.matches(event)) { + continue + } + if (waiter.timeout_id) { + clearTimeout(waiter.timeout_id) + } + this.find_waiters.delete(waiter) + waiter.resolve(event) + } + } + + _getHandlersForEvent(event: BaseEvent): EventHandler[] { + const handlers: EventHandler[] = [] + for (const key of [event.event_type, '*']) { + const ids = this.handlers_by_key.get(key) + if (!ids) continue + for (const id of ids) { + const entry = this.handlers.get(id) + if (entry) handlers.push(entry) + } + } + return handlers + } + + private _removeIndexedHandler(event_pattern: string | '*', handler_id: string): void { + const ids = this.handlers_by_key.get(event_pattern) + if (!ids) return + const idx = ids.indexOf(handler_id) + if (idx < 0) return + ids.splice(idx, 1) + if (ids.length === 0) this.handlers_by_key.delete(event_pattern) + } +} diff --git a/bubus-ts/src/event_handler.ts b/bubus-ts/src/event_handler.ts new file mode 100644 index 0000000..37bf72c --- /dev/null +++ b/bubus-ts/src/event_handler.ts @@ -0,0 +1,348 @@ +import { z } from 'zod' +import { v5 as uuidv5 } from 'uuid' + +import { normalizeEventPattern, type EventHandlerCallable, type EventPattern } from './types.js' +import { BaseEvent } from './base_event.js' +import type { EventResult } from './event_result.js' +import { monotonicDatetime } from './helpers.js' + +const HANDLER_ID_NAMESPACE = uuidv5('bubus-handler', uuidv5.DNS) + +export type EphemeralFindEventHandler = { + // Similar to a handler, except it's for .find() calls. + // Resolved on dispatch, ephemeral, and never shows up in the processing tree. + event_pattern: string | '*' + matches: (event: BaseEvent) => boolean + resolve: (event: BaseEvent) => void + timeout_id?: ReturnType +} + +export const FindWaiterJSONSchema = z + .object({ + event_pattern: z.union([z.string(), z.literal('*')]), + has_timeout: z.boolean(), + }) + .strict() + +export type FindWaiterJSON = z.infer + +export class FindWaiter { + static toJSON(waiter: EphemeralFindEventHandler): FindWaiterJSON { + return { + event_pattern: waiter.event_pattern, + has_timeout: waiter.timeout_id !== undefined, + } + } + + static fromJSON( + data: unknown, + overrides: { + matches?: (event: BaseEvent) => boolean + resolve?: (event: BaseEvent) => void + } = {} + ): EphemeralFindEventHandler { + const record = FindWaiterJSONSchema.parse(data) + const event_pattern = record.event_pattern + const defaultMatches = (event: BaseEvent): boolean => event_pattern === '*' || event.event_type === event_pattern + return { + event_pattern, + matches: overrides.matches ?? defaultMatches, + resolve: overrides.resolve ?? (() => {}), + } + } + + static toJSONArray(waiters: Iterable): FindWaiterJSON[] { + return Array.from(waiters, (waiter) => FindWaiter.toJSON(waiter)) + } + + static fromJSONArray( + data: unknown, + overrides: { + matches?: (event: BaseEvent) => boolean + resolve?: (event: BaseEvent) => void + } = {} + ): EphemeralFindEventHandler[] { + if (!Array.isArray(data)) { + return [] + } + return data.map((item) => FindWaiter.fromJSON(item, overrides)) + } +} + +export const EventHandlerJSONSchema = z + .object({ + id: z.string(), + eventbus_name: z.string(), + eventbus_id: z.string().uuid(), + event_pattern: z.union([z.string(), z.literal('*')]), + handler_name: z.string(), + handler_file_path: z.string().nullable().optional(), + handler_timeout: z.number().nullable().optional(), + handler_slow_timeout: z.number().nullable().optional(), + handler_registered_at: z.string().datetime(), + }) + .strict() + +export type EventHandlerJSON = z.infer + +// an entry in the list of event handlers that are registered on a bus +export class EventHandler { + id: string // unique uuidv5 based on hash of bus name, handler name, handler file path:lineno, registered at timestamp, and event key + handler: EventHandlerCallable // original callable passed to on() + handler_name: string // name of the handler function, or 'anonymous' if the handler is an anonymous/arrow function + handler_file_path: string | null // ~/path/to/source/file.ts:123, or null when unknown + handler_timeout?: number | null // maximum time in seconds that the handler is allowed to run before it is aborted, resolved at runtime if not set + handler_slow_timeout?: number | null // warning threshold in seconds for slow handler execution + handler_registered_at: string // ISO datetime used in the deterministic handler-id seed + event_pattern: string | '*' // event_type string to match against, or '*' to match all events + eventbus_name: string // name of the event bus that the handler is registered on + eventbus_id: string // uuidv7 identifier of the event bus that the handler is registered on + + constructor(params: { + id?: string + handler: EventHandlerCallable + handler_name: string + handler_file_path?: string | null + handler_timeout?: number | null + handler_slow_timeout?: number | null + handler_registered_at: string + event_pattern: string | '*' + eventbus_name: string + eventbus_id: string + }) { + const handler_registered_at = monotonicDatetime(params.handler_registered_at) + this.id = + params.id ?? + EventHandler.computeHandlerId({ + eventbus_id: params.eventbus_id, + handler_name: params.handler_name, + handler_file_path: params.handler_file_path, + handler_registered_at, + event_pattern: params.event_pattern, + }) + this.handler = params.handler + this.handler_name = params.handler_name + this.handler_file_path = params.handler_file_path ?? null + this.handler_timeout = params.handler_timeout + this.handler_slow_timeout = params.handler_slow_timeout + this.handler_registered_at = handler_registered_at + this.event_pattern = params.event_pattern + this.eventbus_name = params.eventbus_name + this.eventbus_id = params.eventbus_id + } + + get _handler_async(): EventHandlerCallable { + const handler = this.handler + if (Object.prototype.toString.call(handler) === '[object AsyncFunction]') { + return handler + } + return async (event: BaseEvent) => await handler(event) + } + + // compute globally unique handler uuid as a hash of the bus name, handler name, handler file path, registered at timestamp, and event key + static computeHandlerId(params: { + eventbus_id: string + handler_name: string + handler_file_path?: string | null + handler_registered_at: string + event_pattern: string | '*' + }): string { + const file_path = params.handler_file_path ?? 'unknown' + const seed = `${params.eventbus_id}|${params.handler_name}|${file_path}|${params.handler_registered_at}|${params.event_pattern}` + return uuidv5(seed, HANDLER_ID_NAMESPACE) + } + + static fromCallable(params: { + handler: EventHandlerCallable + event_pattern: EventPattern | '*' + eventbus_name: string + eventbus_id: string + handler_name?: string + handler_file_path?: string | null + handler_timeout?: number | null + handler_slow_timeout?: number | null + handler_registered_at?: string + }): EventHandler { + return new EventHandler({ + handler: params.handler as EventHandlerCallable, + handler_name: params.handler_name || params.handler.name || 'anonymous', + handler_file_path: params.handler_file_path ?? null, + handler_timeout: params.handler_timeout, + handler_slow_timeout: params.handler_slow_timeout, + handler_registered_at: monotonicDatetime(params.handler_registered_at), + event_pattern: normalizeEventPattern(params.event_pattern), + eventbus_name: params.eventbus_name, + eventbus_id: params.eventbus_id, + }) + } + + // "someHandlerName() @ ~/path/to/source/file.ts:123" <- best case when file path is available and its a named function + // "function#1234()" <- worst case when no file path is available and its an anonymous/arrow function defined inline + toString(): string { + const label = this.handler_name && this.handler_name !== 'anonymous' ? `${this.handler_name}()` : `function#${this.id.slice(-4)}()` + return this.handler_file_path ? `${label} @ ${this.handler_file_path}` : label + } + + // autodetect the path/to/source/file.ts:lineno where the handler is defined for better logs + // optional (controlled by EventBus.event_handler_detect_file_paths) because it can slow down performance to introspect stack traces and find file paths + _detectHandlerFilePath(): void { + const line = new Error().stack + ?.split('\n') + .map((l) => l.trim()) + .filter(Boolean)[4] + if (!line) return + const resolved_path = + line.trim().match(/\(([^)]+)\)$/)?.[1] ?? + line.trim().match(/^\s*at\s+(.+)$/)?.[1] ?? + line.trim().match(/^[^@]+@(.+)$/)?.[1] ?? + line.trim() + const match = resolved_path.match(/^(.*?):(\d+)(?::\d+)?$/) + let normalized = match ? match[1] : resolved_path + const line_number = match?.[2] + if (normalized.startsWith('file://')) { + let path = normalized.slice('file://'.length) + if (path.startsWith('localhost/')) path = path.slice('localhost'.length) + if (!path.startsWith('/')) path = `/${path}` + try { + normalized = decodeURIComponent(path) + } catch { + normalized = path + } + } + normalized = normalized.replace(/\/users\/[^/]+\//i, '~/').replace(/\/home\/[^/]+\//i, '~/') + this.handler_file_path = line_number ? `${normalized}:${line_number}` : normalized + } + + toJSON(): EventHandlerJSON { + return { + id: this.id, + eventbus_name: this.eventbus_name, + eventbus_id: this.eventbus_id, + event_pattern: this.event_pattern, + handler_name: this.handler_name, + handler_file_path: this.handler_file_path, + handler_timeout: this.handler_timeout, + handler_slow_timeout: this.handler_slow_timeout, + handler_registered_at: this.handler_registered_at, + } + } + + static fromJSON(data: unknown, handler?: EventHandlerCallable): EventHandler { + const record = EventHandlerJSONSchema.parse(data) + const handler_fn = handler ?? ((() => undefined) as EventHandlerCallable) + const handler_name = record.handler_name || handler_fn.name || 'anonymous' // 'anonymous' is the default name for anonymous/arrow functions + return new EventHandler({ + id: record.id, + handler: handler_fn, + handler_name, + handler_file_path: record.handler_file_path ?? null, + handler_timeout: record.handler_timeout, + handler_slow_timeout: record.handler_slow_timeout, + handler_registered_at: record.handler_registered_at, + event_pattern: record.event_pattern, + eventbus_name: record.eventbus_name, + eventbus_id: record.eventbus_id, + }) + } + + static toJSONArray(handlers: Iterable): EventHandlerJSON[] { + return Array.from(handlers, (handler) => handler.toJSON()) + } + + static fromJSONArray(data: unknown, handler?: EventHandlerCallable): EventHandler[] { + if (!Array.isArray(data)) { + return [] + } + return data.map((item) => EventHandler.fromJSON(item, handler)) + } + + get eventbus_label(): string { + return `${this.eventbus_name}#${this.eventbus_id.slice(-4)}` + } +} + +// Generic base TimeoutError used for EventHandlerTimeoutError.cause default value if +export class TimeoutError extends Error { + constructor(message: string) { + super(message) + this.name = 'TimeoutError' + } +} + +// Base class for all errors that can occur while running an event handler +export class EventHandlerError extends Error { + event_result: EventResult + timeout_seconds: number | null + cause: Error + + constructor(message: string, params: { event_result: EventResult; timeout_seconds?: number | null; cause: Error }) { + super(message) + this.name = 'EventHandlerError' + this.event_result = params.event_result + this.cause = params.cause + this.timeout_seconds = params.timeout_seconds ?? this.event_result.event.event_timeout ?? null + } + + get event(): BaseEvent { + return this.event_result.event + } + + get event_type(): string { + return this.event.event_type + } + + get handler_name(): string { + return this.event_result.handler_name + } + + get handler_id(): string { + return this.event_result.handler_id + } + + get event_timeout(): number | null { + return this.event.event_timeout + } +} + +// When the handler itself timed out while executing (due to handler.handler_timeout being exceeded) +export class EventHandlerTimeoutError extends EventHandlerError { + constructor(message: string, params: { event_result: EventResult; timeout_seconds?: number | null; cause?: Error }) { + super(message, { + event_result: params.event_result, + timeout_seconds: params.timeout_seconds, + cause: params.cause ?? new TimeoutError(message), + }) + this.name = 'EventHandlerTimeoutError' + } +} + +// When a pending handler was cancelled and never run due to an error (e.g. timeout) in a parent scope +export class EventHandlerCancelledError extends EventHandlerError { + constructor(message: string, params: { event_result: EventResult; timeout_seconds?: number | null; cause: Error }) { + super(message, params) + this.name = 'EventHandlerCancelledError' + } +} + +// When a handler that was already running was aborted due to an error in the parent scope, not due to an error in its own logic / exceeding its own timeout +export class EventHandlerAbortedError extends EventHandlerError { + constructor(message: string, params: { event_result: EventResult; timeout_seconds?: number | null; cause: Error }) { + super(message, params) + this.name = 'EventHandlerAbortedError' + } +} + +// When a handler run successfully but returned a value that failed event_result_type validation +export class EventHandlerResultSchemaError extends EventHandlerError { + raw_value: unknown + + constructor(message: string, params: { event_result: EventResult; timeout_seconds?: number | null; cause: Error; raw_value: unknown }) { + super(message, params) + this.name = 'EventHandlerResultSchemaError' + this.raw_value = params.raw_value + } + + get expected_schema(): any { + return this.event_result.event.event_result_type + } +} diff --git a/bubus-ts/src/event_history.ts b/bubus-ts/src/event_history.ts new file mode 100644 index 0000000..42ced8a --- /dev/null +++ b/bubus-ts/src/event_history.ts @@ -0,0 +1,276 @@ +import { BaseEvent } from './base_event.js' +import type { EventPattern, FindWindow } from './types.js' +import { normalizeEventPattern } from './types.js' +import { monotonicDatetime } from './helpers.js' + +export type EventHistoryFindOptions = { + past?: FindWindow + future?: FindWindow + child_of?: BaseEvent | null + event_is_child_of?: (event: BaseEvent, ancestor: BaseEvent) => boolean + wait_for_future_match?: ( + event_pattern: string | '*', + matches: (event: BaseEvent) => boolean, + future: FindWindow + ) => Promise +} & Record + +export type EventHistoryTrimOptions = { + is_event_complete?: (event: TEvent) => boolean + on_remove?: (event: TEvent) => void + owner_label?: string + max_history_size?: number | null + max_history_drop?: boolean +} + +export class EventHistory implements Iterable<[string, TEvent]> { + max_history_size: number | null + max_history_drop: boolean + + private _events: Map + private _warned_about_dropping_uncompleted_events: boolean + + constructor(options: { max_history_size?: number | null; max_history_drop?: boolean } = {}) { + this.max_history_size = options.max_history_size === undefined ? 100 : options.max_history_size + this.max_history_drop = options.max_history_drop ?? false + this._events = new Map() + this._warned_about_dropping_uncompleted_events = false + } + + get size(): number { + return this._events.size + } + + [Symbol.iterator](): Iterator<[string, TEvent]> { + return this._events[Symbol.iterator]() + } + + entries(): IterableIterator<[string, TEvent]> { + return this._events.entries() + } + + keys(): IterableIterator { + return this._events.keys() + } + + values(): IterableIterator { + return this._events.values() + } + + clear(): void { + this._events.clear() + } + + get(event_id: string): TEvent | undefined { + return this._events.get(event_id) + } + + set(event_id: string, event: TEvent): this { + this._events.set(event_id, event) + return this + } + + has(event_id: string): boolean { + return this._events.has(event_id) + } + + delete(event_id: string): boolean { + return this._events.delete(event_id) + } + + addEvent(event: TEvent): void { + this._events.set(event.event_id, event) + } + + getEvent(event_id: string): TEvent | undefined { + return this._events.get(event_id) + } + + removeEvent(event_id: string): boolean { + return this._events.delete(event_id) + } + + hasEvent(event_id: string): boolean { + return this._events.has(event_id) + } + + static normalizeEventPattern(event_pattern: EventPattern | '*'): string | '*' { + return normalizeEventPattern(event_pattern) + } + + find(event_pattern: '*', where?: (event: TEvent) => boolean, options?: EventHistoryFindOptions): Promise + find( + event_pattern: EventPattern, + where?: (event: TMatch) => boolean, + options?: EventHistoryFindOptions + ): Promise + async find( + event_pattern: EventPattern | '*', + where: (event: TEvent) => boolean = () => true, + options: EventHistoryFindOptions = {} + ): Promise { + const past = options.past ?? true + const future = options.future ?? false + const child_of = options.child_of ?? null + const eventIsChildOf = options.event_is_child_of ?? ((event: BaseEvent, ancestor: BaseEvent) => this.eventIsChildOf(event, ancestor)) + const waitForFutureMatch = options.wait_for_future_match + if (past === false && future === false) { + return null + } + + const event_key = EventHistory.normalizeEventPattern(event_pattern) + const cutoff_at = past === true ? null : monotonicDatetime(new Date(Date.now() - Math.max(0, Number(past)) * 1000).toISOString()) + + const event_field_filters = Object.entries(options).filter( + ([key, value]) => + key !== 'past' && + key !== 'future' && + key !== 'child_of' && + key !== 'event_is_child_of' && + key !== 'wait_for_future_match' && + value !== undefined + ) + + const matches = (event: BaseEvent): boolean => + (event_key === '*' || event.event_type === event_key) && + (!child_of || eventIsChildOf(event, child_of)) && + event_field_filters.every(([field_name, expected]) => (event as unknown as Record)[field_name] === expected) && + where(event as TEvent) + + if (past !== false) { + const history_values = Array.from(this._events.values()) + for (let i = history_values.length - 1; i >= 0; i -= 1) { + const event = history_values[i] + if (cutoff_at !== null && event.event_created_at < cutoff_at) { + continue + } + if (matches(event)) { + return event + } + } + } + + if (future === false || !waitForFutureMatch) { + return null + } + + return (await waitForFutureMatch(event_key, matches, future)) as TEvent | null + } + + cleanupExcessEvents(options: EventHistoryTrimOptions = {}): number { + const max_history_size = options.max_history_size ?? this.max_history_size + if (max_history_size === null) { + return 0 + } + if (max_history_size === 0) { + return this.trimEventHistory(options) + } + const remove_count = this.size - max_history_size + if (remove_count <= 0) { + return 0 + } + + const on_remove = options.on_remove + let removed_count = 0 + + for (const event_id of Array.from(this._events.keys()).slice(0, remove_count)) { + const event = this._events.get(event_id) + if (!event) { + continue + } + this._events.delete(event_id) + on_remove?.(event) + removed_count += 1 + } + + return removed_count + } + + trimEventHistory(options: EventHistoryTrimOptions = {}): number { + const max_history_size = options.max_history_size ?? this.max_history_size + const max_history_drop = options.max_history_drop ?? this.max_history_drop + if (max_history_size === null) { + return 0 + } + + const is_event_complete = options.is_event_complete ?? ((event: TEvent) => event.event_status === 'completed') + const on_remove = options.on_remove + + if (max_history_size === 0) { + let removed_count = 0 + for (const [event_id, event] of Array.from(this._events.entries())) { + if (!is_event_complete(event)) { + continue + } + this._events.delete(event_id) + on_remove?.(event) + removed_count += 1 + } + return removed_count + } + + if (!max_history_drop || this.size <= max_history_size) { + return 0 + } + + let remaining_overage = this.size - max_history_size + let removed_count = 0 + const remove_event = (event_id: string, event: TEvent): void => { + this._events.delete(event_id) + on_remove?.(event) + removed_count += 1 + } + + for (const [event_id, event] of Array.from(this._events.entries())) { + if (remaining_overage <= 0) { + break + } + if (!is_event_complete(event)) { + continue + } + remove_event(event_id, event) + remaining_overage -= 1 + } + + let dropped_uncompleted = 0 + for (const [event_id, event] of Array.from(this._events.entries())) { + if (remaining_overage <= 0) { + break + } + if (!is_event_complete(event)) { + dropped_uncompleted += 1 + } + remove_event(event_id, event) + remaining_overage -= 1 + } + + if (dropped_uncompleted > 0 && !this._warned_about_dropping_uncompleted_events) { + this._warned_about_dropping_uncompleted_events = true + const owner_label = options.owner_label ?? 'EventBus' + console.error( + `[bubus] ⚠️ Bus ${owner_label} has exceeded max_history_size=${max_history_size} and is dropping oldest history entries (even uncompleted events). Increase max_history_size or set max_history_drop=false to reject.` + ) + } + + return removed_count + } + + private eventIsChildOf(event: BaseEvent, ancestor: BaseEvent): boolean { + let current_parent_id = event.event_parent_id + const visited = new Set() + + while (current_parent_id && !visited.has(current_parent_id)) { + if (current_parent_id === ancestor.event_id) { + return true + } + visited.add(current_parent_id) + const parent = this._events.get(current_parent_id) + if (!parent) { + return false + } + current_parent_id = parent.event_parent_id + } + + return false + } +} diff --git a/bubus-ts/src/event_result.ts b/bubus-ts/src/event_result.ts new file mode 100644 index 0000000..1852e1e --- /dev/null +++ b/bubus-ts/src/event_result.ts @@ -0,0 +1,485 @@ +import { v7 as uuidv7 } from 'uuid' + +import { z } from 'zod' + +import { BaseEvent } from './base_event.js' +import type { EventBus } from './event_bus.js' +import { EventHandler, EventHandlerCancelledError, EventHandlerResultSchemaError, EventHandlerTimeoutError } from './event_handler.js' +import { withResolvers, type HandlerLock } from './lock_manager.js' +import type { Deferred } from './lock_manager.js' +import type { EventHandlerCallable, EventResultType } from './types.js' +import { isZodSchema } from './types.js' +import { _runWithAsyncContext } from './async_context.js' +import { RetryTimeoutError } from './retry.js' +import { _runWithAbortMonitor, _runWithSlowMonitor, _runWithTimeout } from './timing.js' +import { monotonicDatetime } from './helpers.js' + +// More precise than event.event_status, includes separate 'error' state for handlers that throw errors during execution +export type EventResultStatus = 'pending' | 'started' | 'completed' | 'error' + +export const EventResultJSONSchema = z + .object({ + id: z.string(), + status: z.enum(['pending', 'started', 'completed', 'error']), + event_id: z.string(), + handler_id: z.string(), + handler_name: z.string(), + handler_file_path: z.string().nullable().optional(), + handler_timeout: z.number().nullable().optional(), + handler_slow_timeout: z.number().nullable().optional(), + handler_registered_at: z.string().datetime().optional(), + handler_event_pattern: z.union([z.string(), z.literal('*')]).optional(), + eventbus_name: z.string(), + eventbus_id: z.string().uuid(), + started_at: z.string().datetime().nullable().optional(), + completed_at: z.string().datetime().nullable().optional(), + result: z.unknown().optional(), + error: z.unknown().optional(), + event_children: z.array(z.string()), + }) + .strict() + +export type EventResultJSON = z.infer + +// Object that tracks the pending or completed execution of a single event handler +export class EventResult { + id: string // unique uuidv7 identifier for the event result + status: EventResultStatus // 'pending', 'started', 'completed', or 'error' + event: TEvent // the Event that the handler is processing + handler: EventHandler // the EventHandler object that going to process the event + started_at: string | null + completed_at: string | null + result?: EventResultType // parsed return value from the event handler + error?: unknown // error object thrown by the event handler, or null if the handler completed successfully + event_children: BaseEvent[] // list of emitted child events + + // Abort signal: created when handler starts, rejected by _signalAbort() to + // interrupt runHandler's await via Promise.race. + _abort: Deferred | null + // Handler lock: tracks ownership of the handler concurrency lock + // during handler execution. Set by runHandler(), used by + // _processEventImmediately for yield-and-reacquire during queue-jumps. + _lock: HandlerLock | null + // Runloop pause releases keyed by bus for queue-jump; released when handler exits. + _queue_jump_pause_releases: Map void> | null + + constructor(params: { event: TEvent; handler: EventHandler }) { + this.id = uuidv7() + this.status = 'pending' + this.event = params.event + this.handler = params.handler + this.started_at = null + this.completed_at = null + this.result = undefined + this.error = undefined + this.event_children = [] + this._abort = null + this._lock = null + this._queue_jump_pause_releases = null + } + + toString(): string { + return `${this.result ?? 'null'} (${this.status})` + } + + get event_id(): string { + return this.event.event_id + } + + get bus(): EventBus { + return this.event.bus! + } + + get handler_id(): string { + return this.handler.id + } + + get handler_name(): string { + return this.handler.handler_name + } + + get handler_file_path(): string | null { + return this.handler.handler_file_path + } + + get eventbus_name(): string { + return this.handler.eventbus_name + } + + get eventbus_id(): string { + return this.handler.eventbus_id + } + + get eventbus_label(): string { + return `${this.handler.eventbus_name}#${this.handler.eventbus_id.slice(-4)}` + } + + private getHookBus(): EventBus | undefined { + const root_bus = this.event.bus + if (!root_bus) { + return undefined + } + return root_bus.all_instances.findBusById(this.eventbus_id) ?? root_bus + } + + private async _notifyStatusHook(status: 'started' | 'completed'): Promise { + const hook_bus = this.getHookBus() + if (!hook_bus) { + return + } + const event_for_hook = hook_bus._getEventProxyScopedToThisBus(this.event._event_original ?? this.event, this) + await hook_bus.onEventResultChange(event_for_hook, this, status) + } + + // shortcut for the result value so users can do event_result.value instead of event_result.result + get value(): EventResultType | undefined { + return this.result + } + + // Per-result schema reference derives from the parent event schema. + // It is intentionally not serialized with each EventResult to avoid duplication. + get result_type(): TEvent['event_result_type'] { + const original_event = this.event._event_original ?? this.event + return original_event.event_result_type as TEvent['event_result_type'] + } + + // Link a child event emitted by this handler run to the parent event/result. + _linkEmittedChildEvent(child_event: BaseEvent): void { + const original_child = child_event._event_original ?? child_event + const parent_event = this.event._event_original ?? this.event + if (original_child.event_id === parent_event.event_id) { + return + } + if (!original_child.event_parent_id) { + original_child.event_parent_id = parent_event.event_id + } + if (!original_child.event_emitted_by_handler_id) { + original_child.event_emitted_by_handler_id = this.handler_id + } + if (!this.event_children.some((child) => child.event_id === original_child.event_id)) { + this.event_children.push(original_child) + } + } + + // Get the raw return value from the handler, even if it threw an error / failed validation + get raw_value(): EventResultType | undefined { + if (this.error && (this.error as any).raw_value !== undefined) { + return (this.error as any).raw_value + } + return this.result + } + + // Resolve handler timeout in seconds using precedence: handler -> event -> bus defaults. + get handler_timeout(): number | null { + const original = this.event._event_original ?? this.event + const resolved_event_timeout = original.event_timeout ?? this.bus.event_timeout + + let resolved_handler_timeout: number | null + if (this.handler.handler_timeout !== undefined) { + resolved_handler_timeout = this.handler.handler_timeout + } else if (original.event_handler_timeout !== undefined) { + resolved_handler_timeout = original.event_handler_timeout + } else { + resolved_handler_timeout = this.bus.event_timeout + } + + if (resolved_handler_timeout === null && resolved_event_timeout === null) { + return null + } + if (resolved_handler_timeout === null) { + return resolved_event_timeout + } + if (resolved_event_timeout === null) { + return resolved_handler_timeout + } + return Math.min(resolved_handler_timeout, resolved_event_timeout) + } + + // Resolve slow handler warning threshold in seconds using precedence: handler -> event -> bus defaults. + get handler_slow_timeout(): number | null { + const original = this.event._event_original ?? this.event + + if (this.handler.handler_slow_timeout !== undefined) { + return this.handler.handler_slow_timeout + } + if (original.event_handler_slow_timeout !== undefined) { + return original.event_handler_slow_timeout + } + const event_slow_timeout = (original as { event_slow_timeout?: number | null }).event_slow_timeout + if (event_slow_timeout !== undefined) { + return event_slow_timeout + } + if (this.bus?.event_handler_slow_timeout !== undefined) { + return this.bus.event_handler_slow_timeout + } + return this.bus?.event_slow_timeout ?? null + } + + // Create a slow-handler warning timer that logs if the handler runs too long. + _createSlowHandlerWarningTimer(effective_timeout: number | null): ReturnType | null { + const handler_warn_timeout = this.handler_slow_timeout + const warn_ms = handler_warn_timeout === null ? null : handler_warn_timeout * 1000 + const should_warn = warn_ms !== null && (effective_timeout === null || effective_timeout * 1000 > warn_ms) + if (!should_warn || warn_ms === null) { + return null + } + const event = this.event._event_original ?? this.event + const bus_name = this.handler.eventbus_name + const started_at_ms = performance.now() + return setTimeout(() => { + if (this.status !== 'started') { + return + } + const elapsed_ms = performance.now() - started_at_ms + const elapsed_seconds = (elapsed_ms / 1000).toFixed(1) + console.warn( + `[bubus] Slow event handler: ${bus_name}.on(${event.toString()}, ${this.handler.toString()}) still running after ${elapsed_seconds}s` + ) + }, warn_ms) + } + + _ensureQueueJumpPause(bus: EventBus): void { + if (!this._queue_jump_pause_releases) { + this._queue_jump_pause_releases = new Map() + } + if (this._queue_jump_pause_releases.has(bus)) { + return + } + this._queue_jump_pause_releases.set(bus, bus.locks._requestRunloopPause()) + } + + _releaseQueueJumpPauses(): void { + if (!this._queue_jump_pause_releases) { + return + } + for (const release of this._queue_jump_pause_releases.values()) { + release() + } + this._queue_jump_pause_releases.clear() + } + + update(params: { status?: EventResultStatus; result?: EventResultType | BaseEvent | undefined; error?: unknown }): this { + const has_status = params.status !== undefined + const has_result = params.result !== undefined || params.status === 'completed' + const has_error = params.error !== undefined + + if (has_status && params.status !== undefined) { + this.status = params.status + } + + if (has_result) { + const raw_result = params.result + this.status = 'completed' + if ( + this.event.event_result_type && + raw_result !== undefined && + !(raw_result instanceof BaseEvent) && + isZodSchema(this.event.event_result_type) + ) { + const parsed = this.event.event_result_type.safeParse(raw_result) + if (parsed.success) { + this.result = parsed.data as EventResultType + this.error = undefined + } else { + const error = new EventHandlerResultSchemaError( + `Event handler return value ${JSON.stringify(raw_result).slice(0, 20)}... did not match event_result_type: ${parsed.error.message}`, + { event_result: this, cause: parsed.error, raw_value: raw_result } + ) + this.error = error + this.result = undefined + this.status = 'error' + } + } else { + this.result = raw_result as EventResultType | undefined + this.error = undefined + } + } + + if (has_error) { + this.error = params.error + this.status = 'error' + } + + if (this.status !== 'pending' && this.started_at === null) { + this.started_at = monotonicDatetime() + } + if ((this.status === 'completed' || this.status === 'error') && this.completed_at === null) { + this.completed_at = monotonicDatetime() + } + + return this + } + + private _createHandlerTimeoutError(event: BaseEvent): EventHandlerTimeoutError { + return new EventHandlerTimeoutError( + `${this.bus.toString()}.on(${event.toString()}, ${this.handler.toString()}) timed out after ${this.handler_timeout}s`, + { + event_result: this, + timeout_seconds: this.handler_timeout, + } + ) + } + + private _handleHandlerError(event: BaseEvent, error: unknown): void { + const normalized_error = + error instanceof RetryTimeoutError + ? new EventHandlerTimeoutError(error.message, { event_result: this, timeout_seconds: error.timeout_seconds, cause: error }) + : error + if (normalized_error instanceof EventHandlerTimeoutError) { + this._markError(normalized_error, false) + event._cancelPendingChildProcessing(normalized_error) + } else { + this._markError(normalized_error, false) + } + } + + private _onHandlerExit(slow_handler_warning_timer: ReturnType | null): void { + this._abort = null + this._lock = null + this._releaseQueueJumpPauses() + if (slow_handler_warning_timer) { + clearTimeout(slow_handler_warning_timer) + } + } + + // Run one handler invocation with timeout/slow-monitor/error handling. + // Handler lock acquisition is owned by BaseEvent._runHandlers(...). + async runHandler(handler_lock: HandlerLock | null): Promise { + if (this.status === 'error' && this.error instanceof EventHandlerCancelledError) { + return + } + + const event = this.event._event_original ?? this.event + const handler_event = this.bus._getEventProxyScopedToThisBus(event, this) + if (this._lock) { + this._lock.exitHandlerRun() + } + + let slow_handler_warning_timer: ReturnType | null = null + // if the result is already in an error or completed state, exit early + if (this.status === 'error' || this.status === 'completed') { + return + } + + this._lock = handler_lock + await this.bus.locks._runWithHandlerDispatchContext(this, async () => { + await _runWithAsyncContext(event._getDispatchContext() ?? null, async () => { + try { + const should_notify_started = this.status === 'pending' + const abort_signal = this._markStarted(false) + if (should_notify_started) { + await this._notifyStatusHook('started') + } + slow_handler_warning_timer = this._createSlowHandlerWarningTimer(this.handler_timeout) + const handler_result = await _runWithTimeout( + this.handler_timeout, + () => this._createHandlerTimeoutError(event), + () => + _runWithSlowMonitor(slow_handler_warning_timer, () => + _runWithAbortMonitor(() => this.handler._handler_async(handler_event), abort_signal) + ) + ) + this._markCompleted(handler_result as EventResultType | BaseEvent | undefined, false) + } catch (error) { + this._handleHandlerError(event, error) + } finally { + if (this.status === 'completed' || this.status === 'error') { + await this._notifyStatusHook('completed') + } + this._onHandlerExit(slow_handler_warning_timer) + } + }) + }) + } + + // Reject the abort promise, causing runHandler's Promise.race to + // throw immediately β€” even if the handler has no timeout. + _signalAbort(error: Error): void { + if (this._abort) { + this._abort.reject(error) + this._abort = null + } + } + + // Mark started and return the abort promise for Promise.race. + _markStarted(notify_hook: boolean = true): Promise { + if (!this._abort) { + this._abort = withResolvers() + } + if (this.status === 'pending') { + this.update({ status: 'started' }) + if (notify_hook) { + void this._notifyStatusHook('started') + } + } + return this._abort.promise + } + + _markCompleted(result: EventResultType | BaseEvent | undefined, notify_hook: boolean = true): void { + if (this.status === 'completed' || this.status === 'error') return + this.update({ status: 'completed', result }) + if (notify_hook) { + void this._notifyStatusHook('completed') + } + } + + _markError(error: unknown, notify_hook: boolean = true): void { + if (this.status === 'completed' || this.status === 'error') return + this.update({ status: 'error', error }) + if (notify_hook) { + void this._notifyStatusHook('completed') + } + } + + toJSON(): EventResultJSON { + return { + id: this.id, + status: this.status, + event_id: this.event.event_id, + handler_id: this.handler_id, + handler_name: this.handler_name, + handler_file_path: this.handler_file_path, + handler_timeout: this.handler.handler_timeout, + handler_slow_timeout: this.handler.handler_slow_timeout, + handler_registered_at: this.handler.handler_registered_at, + handler_event_pattern: this.handler.event_pattern, + eventbus_name: this.eventbus_name, + eventbus_id: this.eventbus_id, + started_at: this.started_at, + completed_at: this.completed_at, + result: this.result, + error: this.error, + event_children: this.event_children.map((child) => child.event_id), + } + } + + static fromJSON(event: TEvent, data: unknown): EventResult { + const record = EventResultJSONSchema.parse(data) + const handler_record = { + id: record.handler_id, + eventbus_name: record.eventbus_name, + eventbus_id: record.eventbus_id, + event_pattern: record.handler_event_pattern ?? event.event_type, + handler_name: record.handler_name, + handler_file_path: record.handler_file_path ?? null, + handler_timeout: record.handler_timeout, + handler_slow_timeout: record.handler_slow_timeout, + handler_registered_at: record.handler_registered_at ?? event.event_created_at, + } as const + const handler_stub = EventHandler.fromJSON(handler_record, (() => undefined) as EventHandlerCallable) + + const result = new EventResult({ event, handler: handler_stub }) + result.id = record.id + result.status = record.status + result.started_at = record.started_at === null || record.started_at === undefined ? null : monotonicDatetime(record.started_at) + result.completed_at = record.completed_at === null || record.completed_at === undefined ? null : monotonicDatetime(record.completed_at) + if ('result' in record) { + result.result = record.result as EventResultType + } + if ('error' in record) { + result.error = record.error + } + result.event_children = [] + return result + } +} diff --git a/bubus-ts/src/events_suck.ts b/bubus-ts/src/events_suck.ts new file mode 100644 index 0000000..f562f10 --- /dev/null +++ b/bubus-ts/src/events_suck.ts @@ -0,0 +1,96 @@ +import { EventBus } from './event_bus.js' +import { BaseEvent } from './base_event.js' + +import type { EventClass, EventResultType } from './types.js' + +type EventMap = Record> +type AnyFn = (...args: any[]) => any +type FunctionMap = Record +type ExtraDict = Record + +type EventFieldsFromFn = + Parameters extends [infer TArg] ? (TArg extends Record ? TArg : ExtraDict) : ExtraDict + +type GeneratedEvent = { + ( + data: EventFieldsFromFn & ExtraDict + ): BaseEvent & EventFieldsFromFn & { __event_result_type__?: Awaited> } + new ( + data: EventFieldsFromFn & ExtraDict + ): BaseEvent & EventFieldsFromFn & { __event_result_type__?: Awaited> } + event_type?: string +} + +export type GeneratedEvents = { + by_name: { [K in keyof TEvents]: GeneratedEvent } +} & { + [K in keyof TEvents]: GeneratedEvent +} + +type EventInit> = + ConstructorParameters extends [infer TInit, ...unknown[]] ? TInit : never + +type EventMethodArgs> = + {} extends EventInit + ? [init?: EventInit, extra?: Record] + : [init: EventInit, extra?: Record] + +type EventMethodResult> = EventResultType> | undefined + +export type EventsSuckClient = { + bus: EventBus +} & { + [K in keyof TEvents]: (...args: EventMethodArgs) => Promise> +} + +export type EventsSuckClientClass = new (bus?: EventBus) => EventsSuckClient + +type DynamicWrappedClient = { + bus: EventBus +} & Record Promise> + +export const make_events = (events: TEvents): GeneratedEvents => { + const by_name = {} as { [K in keyof TEvents]: GeneratedEvent } + for (const [event_name] of Object.entries(events) as Array<[keyof TEvents, TEvents[keyof TEvents]]>) { + if (!/^[A-Za-z_$][\w$]*$/.test(String(event_name))) { + throw new Error(`Invalid event name: ${String(event_name)}`) + } + by_name[event_name] = BaseEvent.extend(String(event_name), {}) as unknown as GeneratedEvent + } + return Object.assign({ by_name }, by_name) as GeneratedEvents +} + +export const wrap = (class_name: string, methods: TEvents): EventsSuckClientClass => { + class WrappedClient { + bus: EventBus + + constructor(bus?: EventBus) { + this.bus = bus ?? new EventBus(`${class_name}Bus`) + } + } + + Object.defineProperty(WrappedClient, 'name', { value: class_name }) + + for (const [method_name, EventCtor] of Object.entries(methods)) { + Object.defineProperty(WrappedClient.prototype, method_name, { + value: async function (this: DynamicWrappedClient, init?: Record, extra?: Record) { + const payload = { ...(init ?? {}), ...(extra ?? {}) } + return await this.bus.emit(new EventCtor(payload)).first() + }, + writable: true, + configurable: true, + }) + } + + return WrappedClient as unknown as EventsSuckClientClass +} + +// Intentionally no make_event()/make_handler() helpers in TypeScript. +// Prefer the explicit inline pattern: +// const FooCreateEvent = BaseEvent.extend('FooCreateEvent', { +// id: z.string().nullable().optional(), +// name: z.string(), +// age: z.number(), +// }) +// bus.on(FooCreateEvent, ({ id, name, age, ...extra }) => impl.create(id, { name, age })) +export const events_suck = { make_events, wrap } as const diff --git a/bubus-ts/src/helpers.ts b/bubus-ts/src/helpers.ts new file mode 100644 index 0000000..2a7f6a4 --- /dev/null +++ b/bubus-ts/src/helpers.ts @@ -0,0 +1,65 @@ +const MONOTONIC_DATETIME_REGEX = /^(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})(?:\.(\d{1,9}))?(Z|[+-]\d{2}:\d{2})$/ +const MONOTONIC_DATETIME_LENGTH = 30 // YYYY-MM-DDTHH:MM:SS.fffffffffZ +const NS_PER_MS = 1_000_000n +const NS_PER_SECOND = 1_000_000_000n + +const has_performance_now = typeof performance !== 'undefined' && typeof performance.now === 'function' +const monotonic_clock_anchor_ms = has_performance_now ? performance.now() : 0 +const monotonic_epoch_anchor_ns = BigInt(Date.now()) * NS_PER_MS +let last_monotonic_datetime_ns = monotonic_epoch_anchor_ns + +function assertYearRange(date: Date, context: string): void { + const year = date.getUTCFullYear() + if (year <= 1990 || year >= 2500) { + throw new Error(`${context} year must be >1990 and <2500, got ${year}`) + } +} + +function formatEpochNs(epoch_ns: bigint): string { + const epoch_ms = Number(epoch_ns / NS_PER_MS) + const date = new Date(epoch_ms) + if (Number.isNaN(date.getTime())) { + throw new Error(`Failed to format datetime from epoch ns: ${epoch_ns.toString()}`) + } + assertYearRange(date, 'monotonicDatetime()') + const base = date.toISOString().slice(0, 19) + const fraction = (epoch_ns % NS_PER_SECOND).toString().padStart(9, '0') + const normalized = `${base}.${fraction}Z` + if (normalized.length !== MONOTONIC_DATETIME_LENGTH) { + throw new Error(`Expected canonical datetime length ${MONOTONIC_DATETIME_LENGTH}, got ${normalized.length}: ${normalized}`) + } + return normalized +} + +export function monotonicDatetime(isostring?: string): string { + if (isostring !== undefined) { + if (typeof isostring !== 'string') { + throw new Error(`monotonicDatetime(isostring?) requires string | undefined, got ${typeof isostring}`) + } + const match = MONOTONIC_DATETIME_REGEX.exec(isostring) + if (!match) { + throw new Error(`Invalid ISO datetime: ${isostring}`) + } + const parsed = new Date(isostring) + if (Number.isNaN(parsed.getTime())) { + throw new Error(`Invalid ISO datetime: ${isostring}`) + } + assertYearRange(parsed, 'monotonicDatetime(isostring)') + const base = parsed.toISOString().slice(0, 19) + const fraction = (match[7] ?? '').padEnd(9, '0') + const normalized = `${base}.${fraction}Z` + if (normalized.length !== MONOTONIC_DATETIME_LENGTH) { + throw new Error(`Expected canonical datetime length ${MONOTONIC_DATETIME_LENGTH}, got ${normalized.length}: ${normalized}`) + } + return normalized + } + + const elapsed_ms = has_performance_now ? performance.now() - monotonic_clock_anchor_ms : 0 + const elapsed_ns = BigInt(Math.max(0, Math.floor(elapsed_ms * 1_000_000))) + let epoch_ns = monotonic_epoch_anchor_ns + elapsed_ns + if (epoch_ns <= last_monotonic_datetime_ns) { + epoch_ns = last_monotonic_datetime_ns + 1n + } + last_monotonic_datetime_ns = epoch_ns + return formatEpochNs(epoch_ns) +} diff --git a/bubus-ts/src/index.ts b/bubus-ts/src/index.ts new file mode 100644 index 0000000..8f9ab2b --- /dev/null +++ b/bubus-ts/src/index.ts @@ -0,0 +1,35 @@ +export { BaseEvent, BaseEventSchema } from './base_event.js' +export { EventHistory } from './event_history.js' +export type { EventHistoryFindOptions, EventHistoryTrimOptions } from './event_history.js' +export { EventResult } from './event_result.js' +export { EventBus } from './event_bus.js' +export type { EventBusJSON, EventBusOptions } from './event_bus.js' +export { monotonicDatetime } from './helpers.js' +export type { EventBusMiddleware, EventBusMiddlewareCtor, EventBusMiddlewareInput } from './middlewares.js' +export { + EventHandlerTimeoutError, + EventHandlerCancelledError, + EventHandlerAbortedError, + EventHandlerResultSchemaError, +} from './event_handler.js' +export type { + EventConcurrencyMode, + EventHandlerConcurrencyMode, + EventHandlerCompletionMode, + EventBusInterfaceForLockManager, +} from './lock_manager.js' +export type { EventClass, EventHandlerCallable as EventHandler, EventPattern, EventStatus, FindOptions, FindWindow } from './types.js' +export { retry, clearSemaphoreRegistry, RetryTimeoutError, SemaphoreTimeoutError } from './retry.js' +export type { RetryOptions } from './retry.js' +export { + HTTPEventBridge, + SocketEventBridge, + NATSEventBridge, + RedisEventBridge, + PostgresEventBridge, + JSONLEventBridge, + SQLiteEventBridge, +} from './bridges.js' +export type { HTTPEventBridgeOptions } from './bridges.js' +export { events_suck } from './events_suck.js' +export type { EventsSuckClient, EventsSuckClientClass, GeneratedEvents } from './events_suck.js' diff --git a/bubus-ts/src/lock_manager.ts b/bubus-ts/src/lock_manager.ts new file mode 100644 index 0000000..b8bd72f --- /dev/null +++ b/bubus-ts/src/lock_manager.ts @@ -0,0 +1,397 @@ +import type { BaseEvent } from './base_event.js' +import type { EventResult } from './event_result.js' + +// ─── Deferred / withResolvers ──────────────────────────────────────────────── + +export type Deferred = { + promise: Promise + resolve: (value: T | PromiseLike) => void + reject: (reason?: unknown) => void +} + +export const withResolvers = (): Deferred => { + if (typeof Promise.withResolvers === 'function') { + return Promise.withResolvers() + } + let resolve!: (value: T | PromiseLike) => void + let reject!: (reason?: unknown) => void + const promise = new Promise((resolve_fn, reject_fn) => { + resolve = resolve_fn + reject = reject_fn + }) + return { promise, resolve, reject } +} + +// ─── Concurrency modes ────────────────────────────────────────────────────── + +export const EVENT_CONCURRENCY_MODES = ['global-serial', 'bus-serial', 'parallel'] as const +export type EventConcurrencyMode = (typeof EVENT_CONCURRENCY_MODES)[number] + +export const EVENT_HANDLER_CONCURRENCY_MODES = ['serial', 'parallel'] as const +export type EventHandlerConcurrencyMode = (typeof EVENT_HANDLER_CONCURRENCY_MODES)[number] + +export const EVENT_HANDLER_COMPLETION_MODES = ['all', 'first'] as const +export type EventHandlerCompletionMode = (typeof EVENT_HANDLER_COMPLETION_MODES)[number] + +// ─── AsyncLock ─────────────────────────────────────────────────────────────── + +export class AsyncLock { + size: number + in_use: number + waiters: Array<() => void> + + constructor(size: number) { + this.size = size + this.in_use = 0 + this.waiters = [] + } + + async acquire(): Promise { + if (this.size === Infinity) { + return + } + if (this.in_use < this.size) { + this.in_use += 1 + return + } + await new Promise((resolve) => { + this.waiters.push(resolve) + }) + } + + release(): void { + if (this.size === Infinity) { + return + } + const next = this.waiters.shift() + if (next) { + // Handoff: keep permit accounted for and transfer directly to next waiter. + next() + return + } + this.in_use = Math.max(0, this.in_use - 1) + } +} + +export const runWithLock = async (lock: AsyncLock | null, fn: () => Promise): Promise => { + if (!lock) { + return await fn() + } + await lock.acquire() + try { + return await fn() + } finally { + lock.release() + } +} + +// ─── HandlerLock ───────────────────────────────────────────────────────────── + +export type HandlerExecutionState = 'held' | 'yielded' | 'closed' + +// Tracks a single handler execution's ownership of a handler lock. +// Reacquire is race-safe: if the handler exits while waiting to reclaim, +// the reclaimed lock is immediately released to avoid leaks. +export class HandlerLock { + private lock: AsyncLock | null + private state: HandlerExecutionState + + constructor(lock: AsyncLock | null) { + this.lock = lock + this.state = 'held' + } + + // used by EventBus._processEventImmediately to yield the parent handler's lock to the child event so it can be processed immediately + yieldHandlerLockForChildRun(): boolean { + if (!this.lock || this.state !== 'held') { + return false + } + this.state = 'yielded' + this.lock.release() + return true + } + + // used by EventBus._processEventImmediately to reacquire the handler lock after the child event has been processed + async reclaimHandlerLockIfRunning(): Promise { + if (!this.lock || this.state !== 'yielded') { + return false + } + await this.lock.acquire() + if (this.state !== 'yielded') { + // Handler exited while this reacquire was pending. + this.lock.release() + return false + } + this.state = 'held' + return true + } + + // used by EventResult.runHandler to exit the handler lock after the handler has finished executing + exitHandlerRun(): void { + if (this.state === 'closed') { + return + } + const should_release = !!this.lock && this.state === 'held' + this.state = 'closed' + if (should_release) { + this.lock!.release() + } + } + + // used by EventBus._processEventImmediately to yield the handler lock and reacquire it after the child event has been processed + async runQueueJump(fn: () => Promise): Promise { + const yielded = this.yieldHandlerLockForChildRun() + try { + return await fn() + } finally { + if (yielded) { + await this.reclaimHandlerLockIfRunning() + } + } + } +} + +// ─── LockManager ───────────────────────────────────────────────────────────── + +// Interface that must be implemented by the EventBus class to be used by the LockManager +export type EventBusInterfaceForLockManager = { + isIdleAndQueueEmpty: () => boolean + event_concurrency: EventConcurrencyMode + _lock_for_event_global_serial: AsyncLock +} + +export type LockManagerOptions = { + auto_schedule_idle_checks?: boolean +} + +// The LockManager is responsible for managing the concurrency of events and handlers +export class LockManager { + private bus: EventBusInterfaceForLockManager // Live bus reference; used to read defaults and idle state. + private auto_schedule_idle_checks: boolean + + readonly bus_event_lock: AsyncLock // Per-bus event lock; created with LockManager and never swapped. + private pause_depth: number // Re-entrant pause counter; increments on _requestRunloopPause, decrements on release. + private pause_waiters: Array<() => void> // Resolvers for _waitUntilRunloopResumed; drained when pause_depth hits 0. + private active_handler_results: EventResult[] // Stack of active handler results for "inside handler" detection. + + private idle_waiters: Array<(became_idle: boolean) => void> // Resolvers waiting for stable idle; cleared when idle confirmed. + private idle_check_pending: boolean // Debounce flag to avoid scheduling redundant idle checks. + private idle_check_streak: number // Counts consecutive idle checks; used to require two ticks of idle. + + constructor(bus: EventBusInterfaceForLockManager, options: LockManagerOptions = {}) { + this.bus = bus + this.auto_schedule_idle_checks = options.auto_schedule_idle_checks ?? true + this.bus_event_lock = new AsyncLock(1) // used for the bus-serial concurrency mode + + this.pause_depth = 0 + this.pause_waiters = [] + this.active_handler_results = [] + + this.idle_waiters = [] + this.idle_check_pending = false + this.idle_check_streak = 0 + } + + // Low-level runloop pause: increments a re-entrant counter and returns a release + // function. Used for broad, bus-scoped pauses during queue-jump across buses. + _requestRunloopPause(): () => void { + this.pause_depth += 1 + let released = false + return () => { + if (released) { + return + } + released = true + this.pause_depth = Math.max(0, this.pause_depth - 1) + if (this.pause_depth !== 0) { + return + } + const waiters = this.pause_waiters + this.pause_waiters = [] + for (const resolve of waiters) { + resolve() + } + } + } + + _waitUntilRunloopResumed(): Promise { + if (this.pause_depth === 0) { + return Promise.resolve() + } + return new Promise((resolve) => { + this.pause_waiters.push(resolve) + }) + } + + _isPaused(): boolean { + return this.pause_depth > 0 + } + + async _runWithHandlerDispatchContext(result: EventResult, fn: () => Promise): Promise { + this.active_handler_results.push(result) + try { + return await fn() + } finally { + const idx = this.active_handler_results.indexOf(result) + if (idx >= 0) { + this.active_handler_results.splice(idx, 1) + } + } + } + + _getActiveHandlerResult(): EventResult | undefined { + return this.active_handler_results[this.active_handler_results.length - 1] + } + + _getActiveHandlerResults(): EventResult[] { + return [...this.active_handler_results] + } + + // Per-bus check: true only if this specific bus has a handler on its stack. + // For cross-bus queue-jumping, EventBus._processEventImmediately uses getParentEventResultAcrossAllBuses() + // to walk up the parent event tree, and the bus proxy passes handler_result + // to _processEventImmediately so it can yield/reacquire the correct lock. + _isAnyHandlerActive(): boolean { + return this.active_handler_results.length > 0 + } + + waitForIdle(timeout_seconds: number | null = null): Promise { + return new Promise((resolve) => { + let done = false + let timeout_id: ReturnType | null = null + + const finish = (became_idle: boolean): void => { + if (done) { + return + } + done = true + if (timeout_id !== null) { + clearTimeout(timeout_id) + timeout_id = null + } + resolve(became_idle) + } + + this.idle_waiters.push(finish) + this.scheduleIdleCheck() + + if (timeout_seconds === null || timeout_seconds === undefined) { + return + } + + const timeout_ms = Math.max(0, Number(timeout_seconds)) * 1000 + if (!Number.isFinite(timeout_ms)) { + return + } + + timeout_id = setTimeout(() => { + const index = this.idle_waiters.indexOf(finish) + if (index >= 0) { + this.idle_waiters.splice(index, 1) + } + finish(false) + }, timeout_ms) + }) + } + + // Called by EventBus.markEventCompleted and EventBus.markHandlerCompleted to notify + // waitUntilIdle() callers that the bus may now be idle. + _notifyIdleListeners(): void { + // Fast-path: most completions have no waitUntilIdle() callers waiting, + // so skip expensive idle snapshot scans in that common case. + if (this.idle_waiters.length === 0) { + this.idle_check_streak = 0 + return + } + + if (!this.bus.isIdleAndQueueEmpty()) { + this.idle_check_streak = 0 + if (this.idle_waiters.length > 0) { + this.scheduleIdleCheck() + } + return + } + + this.idle_check_streak += 1 + if (this.idle_check_streak < 2) { + if (this.idle_waiters.length > 0) { + this.scheduleIdleCheck() + } + return + } + + this.idle_check_streak = 0 + const waiters = this.idle_waiters + this.idle_waiters = [] + for (const resolve of waiters) { + resolve(true) + } + } + + // get the bus-level lock that prevents/allows multiple events to be processed concurrently on the same bus + getLockForEvent(event: BaseEvent): AsyncLock | null { + const resolved = event.event_concurrency ?? this.bus.event_concurrency + if (resolved === 'parallel') { + return null + } + if (resolved === 'global-serial') { + return this.bus._lock_for_event_global_serial + } + return this.bus_event_lock + } + + async _runWithEventLock( + event: BaseEvent, + fn: () => Promise, + options: { bypass_event_locks?: boolean; pre_acquired_lock?: AsyncLock | null } = {} + ): Promise { + const pre_acquired = options.pre_acquired_lock ?? null + if (options.bypass_event_locks || pre_acquired) { + return await fn() + } + return await runWithLock(this.getLockForEvent(event), fn) + } + + async _runWithHandlerLock( + event: BaseEvent, + default_handler_concurrency: EventHandlerConcurrencyMode | undefined, + fn: (lock: HandlerLock | null) => Promise + ): Promise { + const lock = event._getHandlerLock(default_handler_concurrency) + if (lock) { + await lock.acquire() + } + const handler_lock = lock ? new HandlerLock(lock) : null + try { + return await fn(handler_lock) + } finally { + handler_lock?.exitHandlerRun() + } + } + + // Schedules a debounced idle check to run after a short delay. Used to gate + // waitUntilIdle() calls during handler execution and after event completion. + private scheduleIdleCheck(): void { + if (!this.auto_schedule_idle_checks) { + return + } + if (this.idle_check_pending) { + return + } + this.idle_check_pending = true + setTimeout(() => { + this.idle_check_pending = false + this._notifyIdleListeners() + }, 0) + } + + // Reset all state to initial values + clear(): void { + this.pause_depth = 0 + this.pause_waiters = [] + this.active_handler_results = [] + this.idle_waiters = [] + this.idle_check_pending = false + this.idle_check_streak = 0 + } +} diff --git a/bubus-ts/src/logging.ts b/bubus-ts/src/logging.ts new file mode 100644 index 0000000..0112388 --- /dev/null +++ b/bubus-ts/src/logging.ts @@ -0,0 +1,247 @@ +import { BaseEvent } from './base_event.js' +import { EventResult } from './event_result.js' +import { EventHandlerCancelledError, EventHandlerTimeoutError } from './event_handler.js' + +type LogTreeBus = { + name: string + event_history: { + values(): IterableIterator + has(event_id: string): boolean + } + toString?: () => string +} + +export const logTree = (bus: LogTreeBus): string => { + const parent_to_children = new Map() + + const addChild = (parent_id: string, child: BaseEvent): void => { + const existing = parent_to_children.get(parent_id) ?? [] + existing.push(child) + parent_to_children.set(parent_id, existing) + } + + const root_events: BaseEvent[] = [] + const seen = new Set() + + for (const event of bus.event_history.values()) { + const parent_id = event.event_parent_id + if (!parent_id || parent_id === event.event_id || !bus.event_history.has(parent_id)) { + if (!seen.has(event.event_id)) { + root_events.push(event) + seen.add(event.event_id) + } + } + } + + if (root_events.length === 0) { + return '(No events in history)' + } + + const nodes_by_id = new Map() + for (const root of root_events) { + nodes_by_id.set(root.event_id, root) + for (const descendant of root.event_descendants) { + nodes_by_id.set(descendant.event_id, descendant) + } + } + + for (const node of nodes_by_id.values()) { + const parent_id = node.event_parent_id + if (!parent_id || parent_id === node.event_id) { + continue + } + if (!nodes_by_id.has(parent_id)) { + continue + } + addChild(parent_id, node) + } + + for (const children of parent_to_children.values()) { + children.sort((a, b) => (a.event_created_at < b.event_created_at ? -1 : a.event_created_at > b.event_created_at ? 1 : 0)) + } + + const lines: string[] = [] + const bus_label = typeof bus.toString === 'function' ? bus.toString() : bus.name + lines.push(`πŸ“Š Event History Tree for ${bus_label}`) + lines.push('='.repeat(80)) + + root_events.sort((a, b) => (a.event_created_at < b.event_created_at ? -1 : a.event_created_at > b.event_created_at ? 1 : 0)) + const visited = new Set() + root_events.forEach((event, index) => { + lines.push(buildTreeLine(event, '', index === root_events.length - 1, parent_to_children, visited)) + }) + + lines.push('='.repeat(80)) + + return lines.join('\n') +} + +export const buildTreeLine = ( + event: BaseEvent, + indent: string, + is_last: boolean, + parent_to_children: Map, + visited: Set +): string => { + const connector = is_last ? '└── ' : 'β”œβ”€β”€ ' + const status_icon = event.event_status === 'completed' ? 'βœ…' : event.event_status === 'started' ? 'πŸƒ' : '⏳' + + const created_at = formatTimestamp(event.event_created_at) + let timing = `[${created_at}` + if (event.event_completed_at) { + const created_ms = Date.parse(event.event_created_at) + const completed_ms = Date.parse(event.event_completed_at) + if (!Number.isNaN(created_ms) && !Number.isNaN(completed_ms)) { + const duration = (completed_ms - created_ms) / 1000 + timing += ` (${duration.toFixed(3)}s)` + } + } + timing += ']' + + const line = `${indent}${connector}${status_icon} ${event.event_type}#${event.event_id.slice(-4)} ${timing}` + + if (visited.has(event.event_id)) { + return line + } + visited.add(event.event_id) + + const extension = is_last ? ' ' : 'β”‚ ' + const new_indent = indent + extension + + const result_items: Array<{ type: 'result'; result: EventResult } | { type: 'child'; child: BaseEvent }> = [] + for (const result of event.event_results.values()) { + result_items.push({ type: 'result', result }) + } + const children = parent_to_children.get(event.event_id) ?? [] + const printed_child_ids = new Set(event.event_results.size > 0 ? event.event_results.keys() : []) + for (const child of children) { + if (!printed_child_ids.has(child.event_id) && !child.event_emitted_by_handler_id) { + result_items.push({ type: 'child', child }) + printed_child_ids.add(child.event_id) + } + } + + if (result_items.length === 0) { + return line + } + + const child_lines: string[] = [] + result_items.forEach((item, index) => { + const is_last_item = index === result_items.length - 1 + if (item.type === 'result') { + child_lines.push(buildResultLine(item.result, new_indent, is_last_item, parent_to_children, visited)) + } else { + child_lines.push(buildTreeLine(item.child, new_indent, is_last_item, parent_to_children, visited)) + } + }) + + return [line, ...child_lines].join('\n') +} + +export const buildResultLine = ( + result: EventResult, + indent: string, + is_last: boolean, + parent_to_children: Map, + visited: Set +): string => { + const connector = is_last ? '└── ' : 'β”œβ”€β”€ ' + const status_icon = result.status === 'completed' ? 'βœ…' : result.status === 'error' ? '❌' : result.status === 'started' ? 'πŸƒ' : '⏳' + + const handler_label = + result.handler_name && result.handler_name !== 'anonymous' + ? result.handler_name + : result.handler_file_path + ? result.handler_file_path + : 'anonymous' + const handler_display = `${result.eventbus_label}.${handler_label}#${result.handler_id.slice(-4)}` + let line = `${indent}${connector}${status_icon} ${handler_display}` + + if (result.started_at) { + line += ` [${formatTimestamp(result.started_at)}` + if (result.completed_at) { + const started_ms = Date.parse(result.started_at) + const completed_ms = Date.parse(result.completed_at) + if (!Number.isNaN(started_ms) && !Number.isNaN(completed_ms)) { + const duration = (completed_ms - started_ms) / 1000 + line += ` (${duration.toFixed(3)}s)` + } + } + line += ']' + } + + if (result.status === 'error' && result.error) { + if (result.error instanceof EventHandlerTimeoutError) { + line += ` ⏱️ Timeout: ${result.error.message}` + } else if (result.error instanceof EventHandlerCancelledError) { + line += ` 🚫 Cancelled: ${result.error.message}` + } else { + const error_name = result.error instanceof Error ? result.error.name : 'Error' + const error_message = result.error instanceof Error ? result.error.message : String(result.error) + line += ` ☠️ ${error_name}: ${error_message}` + } + } else if (result.status === 'completed') { + line += ` β†’ ${formatResultValue(result.result)}` + } + + const extension = is_last ? ' ' : 'β”‚ ' + const new_indent = indent + extension + + const direct_children = result.event_children + if (direct_children.length === 0) { + return line + } + + const child_lines: string[] = [] + const parent_children = parent_to_children.get(result.event_id) ?? [] + const emitted_children = parent_children.filter((child) => child.event_emitted_by_handler_id === result.handler_id) + const children_by_id = new Map() + direct_children.forEach((child) => { + children_by_id.set(child.event_id, child) + }) + emitted_children.forEach((child) => { + if (!children_by_id.has(child.event_id)) { + children_by_id.set(child.event_id, child) + } + }) + const children_to_print = Array.from(children_by_id.values()).filter((child) => !visited.has(child.event_id)) + + children_to_print.forEach((child, index) => { + child_lines.push(buildTreeLine(child, new_indent, index === children_to_print.length - 1, parent_to_children, visited)) + }) + + return [line, ...child_lines].join('\n') +} + +export const formatTimestamp = (value?: string): string => { + if (!value) { + return 'N/A' + } + const date = new Date(value) + if (Number.isNaN(date.getTime())) { + return 'N/A' + } + return date.toISOString().slice(11, 23) +} + +export const formatResultValue = (value: unknown): string => { + if (value === null || value === undefined) { + return 'None' + } + if (value instanceof BaseEvent) { + return `Event(${value.event_type}#${value.event_id.slice(-4)})` + } + if (typeof value === 'string') { + return JSON.stringify(value) + } + if (typeof value === 'number' || typeof value === 'boolean') { + return String(value) + } + if (Array.isArray(value)) { + return `list(${value.length} items)` + } + if (typeof value === 'object') { + return `dict(${Object.keys(value as Record).length} items)` + } + return `${typeof value}(...)` +} diff --git a/bubus-ts/src/middlewares.ts b/bubus-ts/src/middlewares.ts new file mode 100644 index 0000000..8c9ccca --- /dev/null +++ b/bubus-ts/src/middlewares.ts @@ -0,0 +1,16 @@ +import type { BaseEvent } from './base_event.js' +import type { EventBus } from './event_bus.js' +import type { EventHandler } from './event_handler.js' +import type { EventResult } from './event_result.js' +import type { EventStatus } from './types.js' + +export type { EventStatus } from './types.js' + +export interface EventBusMiddleware { + onEventChange?(eventbus: EventBus, event: BaseEvent, status: EventStatus): void | Promise + onEventResultChange?(eventbus: EventBus, event: BaseEvent, event_result: EventResult, status: EventStatus): void | Promise + onBusHandlersChange?(eventbus: EventBus, handler: EventHandler, registered: boolean): void | Promise +} + +export type EventBusMiddlewareCtor = new () => EventBusMiddleware +export type EventBusMiddlewareInput = EventBusMiddleware | EventBusMiddlewareCtor diff --git a/bubus-ts/src/optional_deps.ts b/bubus-ts/src/optional_deps.ts new file mode 100644 index 0000000..3a4e5be --- /dev/null +++ b/bubus-ts/src/optional_deps.ts @@ -0,0 +1,35 @@ +export const isNodeRuntime = (): boolean => { + const maybe_process = (globalThis as { process?: { versions?: { node?: string } } }).process + return typeof maybe_process?.versions?.node === 'string' +} + +const missingDependencyError = (bridge_name: string, package_name: string): Error => + new Error(`${bridge_name} requires optional dependency "${package_name}". Install it with: npm install ${package_name}`) + +export const assertOptionalDependencyAvailable = (bridge_name: string, package_name: string): void => { + if (!isNodeRuntime()) return + + const maybe_process = (globalThis as { process?: { getBuiltinModule?: (name: string) => any } }).process + const get_builtin_module = maybe_process?.getBuiltinModule + if (typeof get_builtin_module !== 'function') return + + const module_builtin = get_builtin_module('module') + const create_require = module_builtin?.createRequire + if (typeof create_require !== 'function') return + + const require_fn = create_require(import.meta.url) as { resolve: (specifier: string) => string } + try { + require_fn.resolve(package_name) + } catch { + throw missingDependencyError(bridge_name, package_name) + } +} + +export const importOptionalDependency = async (bridge_name: string, package_name: string): Promise => { + const dynamic_import = Function('module_name', 'return import(module_name)') as (module_name: string) => Promise + try { + return (await dynamic_import(package_name)) as any + } catch { + throw missingDependencyError(bridge_name, package_name) + } +} diff --git a/bubus-ts/src/retry.ts b/bubus-ts/src/retry.ts new file mode 100644 index 0000000..1520106 --- /dev/null +++ b/bubus-ts/src/retry.ts @@ -0,0 +1,381 @@ +import { createAsyncLocalStorage, type AsyncLocalStorageLike } from './async_context.js' + +// ─── Types ─────────────────────────────────────────────────────────────────── + +export interface RetryOptions { + /** Total number of attempts including the initial call (1 = no retry, 3 = up to 2 retries). Default: 1 */ + max_attempts?: number + + /** Seconds to wait between retries. Default: 0 */ + retry_after?: number + + /** Multiplier applied to retry_after after each attempt for exponential backoff. Default: 1.0 (constant delay) */ + retry_backoff_factor?: number + + /** Only retry when the thrown error matches one of these matchers. Accepts error class constructors, + * string error names (matched against error.name), or RegExp patterns (tested against String(error)). + * Default: undefined (retry on any error) */ + retry_on_errors?: Array<(new (...args: any[]) => Error) | string | RegExp> + + /** Per-attempt timeout in seconds. Default: undefined (no per-attempt timeout) */ + timeout?: number | null + + /** Maximum concurrent executions sharing this semaphore. Default: undefined (no concurrency limit) */ + semaphore_limit?: number | null + + /** Semaphore identifier. Functions with the same name share the same concurrency slot pool. Default: function name. + * If a function is provided, it receives the same arguments as the wrapped function. */ + semaphore_name?: string | ((...args: any[]) => string) | null + + /** If true, proceed without concurrency limit when semaphore acquisition times out. Default: true */ + semaphore_lax?: boolean + + /** Semaphore scoping strategy. Default: 'global' + * - 'global': all calls share one semaphore (keyed by semaphore_name) + * - 'class': all instances of the same class share one semaphore (keyed by className.semaphore_name) + * - 'instance': each object instance gets its own semaphore (keyed by instanceId.semaphore_name) + * 'class' and 'instance' require `this` to be an object; they fall back to 'global' for standalone calls. */ + semaphore_scope?: 'global' | 'class' | 'instance' + + /** Maximum seconds to wait for semaphore acquisition. Default: undefined β†’ timeout * max(1, limit - 1) */ + semaphore_timeout?: number | null +} + +// ─── Errors ────────────────────────────────────────────────────────────────── + +/** Thrown when a single attempt exceeds the per-attempt timeout. */ +export class RetryTimeoutError extends Error { + timeout_seconds: number + attempt: number + + constructor(message: string, params: { timeout_seconds: number; attempt: number }) { + super(message) + this.name = 'RetryTimeoutError' + this.timeout_seconds = params.timeout_seconds + this.attempt = params.attempt + } +} + +/** Thrown (when semaphore_lax=false) if the semaphore cannot be acquired within the timeout. */ +export class SemaphoreTimeoutError extends Error { + semaphore_name: string + semaphore_limit: number + timeout_seconds: number + + constructor(message: string, params: { semaphore_name: string; semaphore_limit: number; timeout_seconds: number }) { + super(message) + this.name = 'SemaphoreTimeoutError' + this.semaphore_name = params.semaphore_name + this.semaphore_limit = params.semaphore_limit + this.timeout_seconds = params.timeout_seconds + } +} + +// ─── Re-entrancy tracking via AsyncLocalStorage ────────────────────────────── +// +// Prevents deadlocks when a retry()-wrapped function calls another retry()-wrapped +// function that shares the same semaphore (or calls itself recursively). +// +// Each async call stack tracks which semaphore names it currently holds. When a +// nested call encounters a semaphore it already holds, it skips acquisition and +// runs directly within the parent's slot. +// +// Uses the same AsyncLocalStorage polyfill as the rest of bubus (see async_context.ts) +// so it works in Node.js and gracefully degrades to a no-op in browsers. + +type ReentrantStore = Set + +// Separate AsyncLocalStorage instance for retry re-entrancy tracking. +// Created via the shared factory in async_context.ts (returns null in browsers). +const retry_context_storage: AsyncLocalStorageLike | null = createAsyncLocalStorage() + +function getHeldSemaphores(): ReentrantStore { + return (retry_context_storage?.getStore() as ReentrantStore | undefined) ?? new Set() +} + +function runWithHeldSemaphores(held: ReentrantStore, fn: () => T): T { + if (!retry_context_storage) return fn() + return retry_context_storage.run(held, fn) +} + +// ─── Semaphore scope helpers ───────────────────────────────────────────────── + +let _next_instance_id = 1 +const _instance_ids = new WeakMap() + +function scopedSemaphoreKey(base_name: string, scope: 'global' | 'class' | 'instance', context: unknown): string { + if (scope === 'class' && context && typeof context === 'object') { + return `${(context as object).constructor?.name ?? 'Object'}.${base_name}` + } + if (scope === 'instance' && context && typeof context === 'object') { + let id = _instance_ids.get(context as object) + if (id === undefined) { + id = _next_instance_id++ + _instance_ids.set(context as object, id) + } + return `${id}.${base_name}` + } + return base_name +} + +// ─── Global semaphore registry ─────────────────────────────────────────────── + +class RetrySemaphore { + readonly size: number + private inUse: number + private waiters: Array<() => void> + + constructor(size: number) { + this.size = size + this.inUse = 0 + this.waiters = [] + } + + async acquire(): Promise { + if (this.size === Infinity) { + return + } + if (this.inUse < this.size) { + this.inUse += 1 + return + } + await new Promise((resolve) => { + this.waiters.push(resolve) + }) + } + + release(): void { + if (this.size === Infinity) { + return + } + const next = this.waiters.shift() + if (next) { + // Handoff: keep the permit accounted for and transfer it directly to the waiter. + next() + return + } + this.inUse = Math.max(0, this.inUse - 1) + } +} + +const SEMAPHORE_REGISTRY = new Map() + +function getOrCreateSemaphore(name: string, limit: number): RetrySemaphore { + const existing = SEMAPHORE_REGISTRY.get(name) + if (existing && existing.size === limit) return existing + const sem = new RetrySemaphore(limit) + SEMAPHORE_REGISTRY.set(name, sem) + return sem +} + +/** Reset the global semaphore registry. Useful in tests. */ +export function clearSemaphoreRegistry(): void { + SEMAPHORE_REGISTRY.clear() +} + +// ─── retry() decorator / higher-order wrapper ──────────────────────────────── +// +// Usage as a higher-order function (works on any async function): +// +// const fetchWithRetry = retry({ max_attempts: 3, retry_after: 1 })(async (url: string) => { +// return await fetch(url) +// }) +// +// Usage as a TC39 Stage 3 decorator on class methods (TS 5.0+): +// +// class ApiClient { +// @retry({ max_attempts: 3, retry_after: 1 }) +// async fetchData(): Promise { ... } +// } +// +// Usage on event bus handlers: +// +// bus.on(MyEvent, retry({ max_attempts: 3 })(async (event) => { +// await riskyOperation(event.data) +// })) + +export function retry(options: RetryOptions = {}) { + const { + max_attempts = 1, + retry_after = 0, + retry_backoff_factor = 1.0, + retry_on_errors, + timeout, + semaphore_limit, + semaphore_name: semaphore_name_option, + semaphore_lax = true, + semaphore_scope = 'global', + semaphore_timeout, + } = options + + return function decorator any>(target: T, _context?: ClassMethodDecoratorContext): T { + const fn_name = target.name || (_context?.name as string) || 'anonymous' + const effective_max_attempts = Math.max(1, max_attempts) + const effective_retry_after = Math.max(0, retry_after) + + async function retryWrapper(this: any, ...args: any[]): Promise { + const base_name = typeof semaphore_name_option === 'function' ? semaphore_name_option(...args) : (semaphore_name_option ?? fn_name) + const sem_name = typeof base_name === 'string' ? base_name : String(base_name) + // ── Resolve scoped semaphore key at call time (uses `this` for class/instance scopes) ── + const scoped_key = scopedSemaphoreKey(sem_name, semaphore_scope, this) + + // ── Check re-entrancy: skip semaphore if we already hold it in this async context ── + const held = getHeldSemaphores() + const needs_semaphore = semaphore_limit != null && semaphore_limit > 0 + const is_reentrant = needs_semaphore && held.has(scoped_key) + + // ── Semaphore acquisition (held across all retry attempts, skipped if re-entrant) ── + let semaphore: RetrySemaphore | null = null + let semaphore_acquired = false + + if (needs_semaphore && !is_reentrant) { + semaphore = getOrCreateSemaphore(scoped_key, semaphore_limit!) + + const effective_sem_timeout = + semaphore_timeout != null ? semaphore_timeout : timeout != null ? timeout * Math.max(1, semaphore_limit! - 1) : null + + if (effective_sem_timeout != null && effective_sem_timeout > 0) { + semaphore_acquired = await acquireWithTimeout(semaphore, effective_sem_timeout * 1000) + if (!semaphore_acquired) { + if (!semaphore_lax) { + throw new SemaphoreTimeoutError( + `Failed to acquire semaphore "${scoped_key}" within ${effective_sem_timeout}s (limit=${semaphore_limit})`, + { semaphore_name: scoped_key, semaphore_limit: semaphore_limit!, timeout_seconds: effective_sem_timeout } + ) + } + // lax mode: proceed without concurrency limit + } + } else { + // No timeout configured: wait indefinitely for a slot + await semaphore.acquire() + semaphore_acquired = true + } + } + + // ── Build the set of held semaphores for nested calls ── + const new_held = new Set(held) + if (semaphore_acquired) { + new_held.add(scoped_key) + } + + // ── Retry loop (runs inside the semaphore and re-entrancy context) ── + const runRetryLoop = async (): Promise => { + for (let attempt = 1; attempt <= effective_max_attempts; attempt++) { + try { + if (timeout != null && timeout > 0) { + return await _runWithTimeout(() => Promise.resolve(target.apply(this, args)), timeout * 1000, attempt) + } else { + return await Promise.resolve(target.apply(this, args)) + } + } catch (error) { + // Check if this error type should trigger a retry + if (retry_on_errors && retry_on_errors.length > 0) { + const is_retryable = retry_on_errors.some((matcher) => + typeof matcher === 'string' + ? (error as Error)?.name === matcher + : matcher instanceof RegExp + ? matcher.test(String(error)) + : error instanceof matcher + ) + if (!is_retryable) throw error + } + + // Last attempt: rethrow + if (attempt >= effective_max_attempts) throw error + + // Wait before next attempt with exponential backoff + const delay_seconds = effective_retry_after * Math.pow(retry_backoff_factor, attempt - 1) + if (delay_seconds > 0) { + await sleep(delay_seconds * 1000) + } + } + } + + // Unreachable, but satisfies the type checker + throw new Error(`retry(${fn_name}): unexpected end of retry loop`) + } + + try { + return await runWithHeldSemaphores(new_held, runRetryLoop) + } finally { + if (semaphore_acquired && semaphore) { + semaphore.release() + } + } + } + + Object.defineProperty(retryWrapper, 'name', { value: fn_name, configurable: true }) + return retryWrapper as unknown as T + } +} + +// ─── Internal helpers ──────────────────────────────────────────────────────── + +/** + * Try to acquire a semaphore within a timeout. Returns true if acquired, false if timed out. + * If the semaphore is acquired after the timeout (due to the waiter remaining queued), + * it is immediately released to avoid leaking slots. + */ +async function acquireWithTimeout(semaphore: RetrySemaphore, timeout_ms: number): Promise { + return new Promise((resolve) => { + let settled = false + + const timer = setTimeout(() => { + if (!settled) { + settled = true + resolve(false) + } + }, timeout_ms) + + semaphore.acquire().then(() => { + if (!settled) { + settled = true + clearTimeout(timer) + resolve(true) + } else { + // Acquired after timeout fired β€” release immediately to avoid slot leak + semaphore.release() + } + }) + }) +} + +/** Run fn() with a timeout. Rejects with RetryTimeoutError if the timeout fires first. */ +async function _runWithTimeout(fn: () => Promise, timeout_ms: number, attempt: number): Promise { + return new Promise((resolve, reject) => { + let settled = false + + const timer = setTimeout(() => { + if (!settled) { + settled = true + reject( + new RetryTimeoutError(`Timed out after ${timeout_ms / 1000}s (attempt ${attempt})`, { + timeout_seconds: timeout_ms / 1000, + attempt, + }) + ) + } + }, timeout_ms) + + fn().then( + (value) => { + if (!settled) { + settled = true + clearTimeout(timer) + resolve(value) + } + }, + (error) => { + if (!settled) { + settled = true + clearTimeout(timer) + reject(error) + } + } + ) + }) +} + +function sleep(ms: number): Promise { + return new Promise((resolve) => setTimeout(resolve, ms)) +} diff --git a/bubus-ts/src/timing.ts b/bubus-ts/src/timing.ts new file mode 100644 index 0000000..6c603c7 --- /dev/null +++ b/bubus-ts/src/timing.ts @@ -0,0 +1,52 @@ +export async function _runWithTimeout(timeout_seconds: number | null, on_timeout: () => Error, fn: () => Promise): Promise { + const task = Promise.resolve().then(fn) + if (timeout_seconds === null) { + return await task + } + const timeout_ms = timeout_seconds * 1000 + return await new Promise((resolve, reject) => { + let settled = false + const finishResolve = (value: T) => { + if (settled) { + return + } + settled = true + clearTimeout(timer) + resolve(value) + } + const finishReject = (error: unknown) => { + if (settled) { + return + } + settled = true + clearTimeout(timer) + reject(error) + } + const timer = setTimeout(() => { + if (settled) { + return + } + settled = true + reject(on_timeout()) + void task.catch(() => undefined) + }, timeout_ms) + task.then(finishResolve).catch(finishReject) + }) +} + +export async function _runWithSlowMonitor(slow_timer: ReturnType | null, fn: () => Promise): Promise { + try { + return await fn() + } finally { + if (slow_timer) { + clearTimeout(slow_timer) + } + } +} + +export async function _runWithAbortMonitor(fn: () => T | Promise, abort_signal: Promise): Promise { + const task = Promise.resolve().then(fn) + const raced = Promise.race([task, abort_signal]) + void task.catch(() => undefined) + return await raced +} diff --git a/bubus-ts/src/type_inference.test.ts b/bubus-ts/src/type_inference.test.ts new file mode 100644 index 0000000..b1d3754 --- /dev/null +++ b/bubus-ts/src/type_inference.test.ts @@ -0,0 +1,132 @@ +/* eslint-disable @typescript-eslint/no-unused-vars */ +// Do not remove the unused type/const names below; they are used to test type inference at compile time. + +import { z } from 'zod' + +import { BaseEvent } from './base_event.js' +import { EventBus } from './event_bus.js' +import { events_suck } from './events_suck.js' +import type { EventResult } from './event_result.js' +import type { EventResultType } from './types.js' + +type IsEqual = (() => T extends A ? 1 : 2) extends () => T extends B ? 1 : 2 ? true : false +type Assert = T + +const InferableResultEvent = BaseEvent.extend('InferableResultEvent', { + target_id: z.string(), + event_result_type: z.object({ ok: z.boolean() }), +}) + +type InferableResult = EventResultType> +type _assert_inferable_result = Assert> +type InferableEventResultEntry = + InstanceType['event_results'] extends Map ? TResultEntry : never +type _assert_inferable_event_result_entry = Assert< + IsEqual>> +> +type InferableEventResultValue = InferableEventResultEntry extends { result?: infer TResultValue } ? TResultValue : never +type _assert_inferable_event_result_value = Assert> + +const NoSchemaEvent = BaseEvent.extend('NoSchemaEventForInference', {}) +type NoSchemaResult = EventResultType> +type _assert_no_schema_result = Assert> + +const ConstructorStringResultEvent = BaseEvent.extend('ConstructorStringResultEventForInference', { + event_result_type: String, +}) +type ConstructorStringResult = EventResultType> +type _assert_constructor_string_result = Assert> + +const ConstructorNumberResultEvent = BaseEvent.extend('ConstructorNumberResultEventForInference', { + event_result_type: Number, +}) +type ConstructorNumberResult = EventResultType> +type _assert_constructor_number_result = Assert> + +const ConstructorBooleanResultEvent = BaseEvent.extend('ConstructorBooleanResultEventForInference', { + event_result_type: Boolean, +}) +type ConstructorBooleanResult = EventResultType> +type _assert_constructor_boolean_result = Assert> + +const ConstructorArrayResultEvent = BaseEvent.extend('ConstructorArrayResultEventForInference', { + event_result_type: Array, +}) +type ConstructorArrayResult = EventResultType> +type _assert_constructor_array_result = Assert> + +const ConstructorObjectResultEvent = BaseEvent.extend('ConstructorObjectResultEventForInference', { + event_result_type: Object, +}) +type ConstructorObjectResult = EventResultType> +type _assert_constructor_object_result = Assert>> + +const bus = new EventBus('TypeInferenceBus') + +const find_by_class_call = bus.find(InferableResultEvent, { past: true, future: false }) +type FindByClassReturn = Awaited +type _assert_find_by_class_return = Assert | null>> + +const find_by_class_with_where_call = bus.find( + InferableResultEvent, + (event) => { + const target: string = event.target_id + return target.length > 0 + }, + { past: true, future: false } +) +type FindByClassWithWhereReturn = Awaited +type _assert_find_by_class_with_where_return = Assert | null>> + +const find_history_by_class_call = bus.event_history.find(InferableResultEvent, (event) => event.target_id.length > 0, { past: true }) +type FindHistoryByClassReturn = Awaited +type _assert_find_history_by_class_return = Assert | null>> + +const find_by_wildcard_call = bus.find('*', { past: true, future: false }) +type FindByWildcardReturn = Awaited +type _assert_find_by_wildcard_return = Assert> + +bus.on(InferableResultEvent, (event) => { + const target: string = event.target_id + return { ok: true } +}) + +bus.on(InferableResultEvent, () => undefined) + +// @ts-expect-error non-void return must match event_result_type for inferable event keys +bus.on(InferableResultEvent, () => 'not-ok') + +// String/wildcard keys remain best-effort and do not strongly enforce return shapes. +bus.on('InferableResultEvent', () => 'anything') +bus.on('*', () => 123) + +const WrappedClient = events_suck.wrap('WrappedClient', { + create: InferableResultEvent, + update: ConstructorBooleanResultEvent, +}) + +const wrapped_client = new WrappedClient(new EventBus('WrappedClientBus')) + +const wrapped_create_call = wrapped_client.create({ target_id: 'abc-123' }, { debug_tag: 'create' }) +type WrappedCreateReturn = Awaited +type _assert_wrapped_create_return = Assert> + +const wrapped_update_call = wrapped_client.update() +type WrappedUpdateReturn = Awaited +type _assert_wrapped_update_return = Assert> + +// @ts-expect-error missing required InferableResultEvent field +wrapped_client.create({}) + +const make_events_demo = events_suck.make_events({ + FooBarAPIObjEvent: (payload: { id: string; age?: number }) => payload.id.length > 0, +}) + +const generated_event = make_events_demo.FooBarAPIObjEvent({ id: 'abc' }) +const _generated_event_id: string = generated_event.id +bus.on(make_events_demo.FooBarAPIObjEvent, (event) => { + const id: string = event.id + return id.length > 0 +}) +// @ts-expect-error event_result_type inferred from make_events() function return type (boolean) +bus.on(make_events_demo.FooBarAPIObjEvent, () => 'not-boolean') diff --git a/bubus-ts/src/types.ts b/bubus-ts/src/types.ts new file mode 100644 index 0000000..26ab6c5 --- /dev/null +++ b/bubus-ts/src/types.ts @@ -0,0 +1,132 @@ +import { z } from 'zod' +import type { BaseEvent } from './base_event.js' + +export type EventStatus = 'pending' | 'started' | 'completed' + +export type EventClass = { event_type?: string } & (new (...args: any[]) => T) + +export type EventPattern = string | EventClass + +export type EventWithResultSchema = BaseEvent & { __event_result_type__?: TResult } + +export type EventResultType = TEvent extends { __event_result_type__?: infer TResult } ? TResult : unknown + +export type EventResultTypeConstructor = StringConstructor | NumberConstructor | BooleanConstructor | ArrayConstructor | ObjectConstructor + +export type EventResultTypeInput = z.ZodTypeAny | EventResultTypeConstructor | unknown + +export type EventHandlerReturn = EventResultType | BaseEvent | null | void + +export type EventHandlerCallable = (event: T) => EventHandlerReturn | Promise> + +// For string and wildcard subscriptions we cannot reliably infer which event +// type will arrive, so return type checking intentionally degrades to unknown. +export type UntypedEventHandlerFunction = ( + event: T +) => EventHandlerReturn | unknown | Promise | unknown> + +export type FindWindow = boolean | number + +type FindReservedOptionKeys = 'past' | 'future' | 'child_of' + +type EventFilterFields = { + [K in keyof T as string extends K + ? never + : number extends K + ? never + : symbol extends K + ? never + : K extends FindReservedOptionKeys + ? never + : T[K] extends (...args: any[]) => any + ? never + : K]?: T[K] +} + +export type FindOptions = { + past?: FindWindow + future?: FindWindow + child_of?: BaseEvent | null +} & EventFilterFields & + Record + +export const normalizeEventPattern = (event_pattern: EventPattern | '*'): string | '*' => { + if (event_pattern === '*') { + return '*' + } + if (typeof event_pattern === 'string') { + return event_pattern + } + const event_type = (event_pattern as { event_type?: unknown }).event_type + if (typeof event_type === 'string' && event_type.length > 0 && event_type !== 'BaseEvent') { + return event_type + } + const class_name = (event_pattern as { name?: unknown }).name + if (typeof class_name === 'string' && class_name.length > 0 && class_name !== 'BaseEvent') { + return class_name + } + let preview: string + try { + const encoded = JSON.stringify(event_pattern) + preview = typeof encoded === 'string' ? encoded.slice(0, 30) : String(event_pattern).slice(0, 30) + } catch { + preview = String(event_pattern).slice(0, 30) + } + throw new Error('bus.on(match_pattern, ...) must be a string event type, "*", or a BaseEvent class, got: ' + preview) +} + +export const isZodSchema = (value: unknown): value is z.ZodTypeAny => !!value && typeof (value as z.ZodTypeAny).safeParse === 'function' + +export const eventResultTypeFromConstructor = (value: unknown): z.ZodTypeAny | undefined => { + if (value === String) { + return z.string() + } + if (value === Number) { + return z.number() + } + if (value === Boolean) { + return z.boolean() + } + if (value === Array) { + return z.array(z.unknown()) + } + if (value === Object) { + return z.record(z.string(), z.unknown()) + } + return undefined +} + +export const extractZodShape = (raw: Record): z.ZodRawShape => { + const shape: Record = {} + for (const [key, value] of Object.entries(raw)) { + if (key === 'event_result_type') continue + if (isZodSchema(value)) shape[key] = value + } + return shape as z.ZodRawShape +} + +export const toJsonSchema = (schema: unknown): unknown => { + if (!schema || !isZodSchema(schema)) return schema + const zod_any = z as unknown as { toJSONSchema: (input: z.ZodTypeAny) => unknown } + // Cross-language roundtrips preserve core structural types; constraint keywords may not roundtrip exactly. + return zod_any.toJSONSchema(schema) +} + +export const fromJsonSchema = (schema: unknown): z.ZodTypeAny => { + const zod_any = z as unknown as { fromJSONSchema: (input: unknown) => z.ZodTypeAny } + return zod_any.fromJSONSchema(schema) +} + +export const normalizeEventResultType = (value: EventResultTypeInput): z.ZodTypeAny | undefined => { + if (value === undefined || value === null) { + return undefined + } + if (isZodSchema(value)) { + return value + } + const constructor_schema = eventResultTypeFromConstructor(value) + if (constructor_schema) { + return constructor_schema + } + return fromJsonSchema(value) +} diff --git a/bubus-ts/tests/_perf_profile.ts b/bubus-ts/tests/_perf_profile.ts new file mode 100644 index 0000000..1334719 --- /dev/null +++ b/bubus-ts/tests/_perf_profile.ts @@ -0,0 +1,60 @@ +import { BaseEvent, EventBus } from '../src/index.js' + +const SimpleEvent = BaseEvent.extend('SimpleEvent', {}) + +const total_events = 200_000 +// Keep full history to avoid trimming inflight events during perf profiling. +const bus = new EventBus('PerfBus', { max_history_size: total_events }) + +let processed_count = 0 +bus.on(SimpleEvent, () => { + processed_count += 1 +}) + +// Baseline memory +global.gc?.() +const mem_before = process.memoryUsage() +console.log(`Memory before: RSS=${(mem_before.rss / 1024 / 1024).toFixed(1)}MB, Heap=${(mem_before.heapUsed / 1024 / 1024).toFixed(1)}MB`) + +// Phase 1: Dispatch all events (measure dispatch throughput) +const t0 = performance.now() +const pending: Array> = [] +for (let i = 0; i < total_events; i++) { + pending.push(bus.emit(SimpleEvent({}))) +} +const t1 = performance.now() +console.log(`Dispatch ${total_events} events: ${(t1 - t0).toFixed(0)}ms (${(total_events / ((t1 - t0) / 1000)).toFixed(0)} events/s)`) + +const mem_after_dispatch = process.memoryUsage() +console.log( + `Memory after dispatch: RSS=${(mem_after_dispatch.rss / 1024 / 1024).toFixed(1)}MB, Heap=${(mem_after_dispatch.heapUsed / 1024 / 1024).toFixed(1)}MB` +) + +// Phase 2: Wait for all to complete +const t2 = performance.now() +await Promise.all(pending.map((e) => e.done())) +await bus.waitUntilIdle() +const t3 = performance.now() +console.log(`Await completion: ${(t3 - t2).toFixed(0)}ms`) +console.log(`Total: ${(t3 - t0).toFixed(0)}ms (${(total_events / ((t3 - t0) / 1000)).toFixed(0)} events/s)`) + +const mem_after = process.memoryUsage() +console.log( + `Memory after complete: RSS=${(mem_after.rss / 1024 / 1024).toFixed(1)}MB, Heap=${(mem_after.heapUsed / 1024 / 1024).toFixed(1)}MB` +) + +global.gc?.() +const mem_gc = process.memoryUsage() +console.log(`Memory after GC: RSS=${(mem_gc.rss / 1024 / 1024).toFixed(1)}MB, Heap=${(mem_gc.heapUsed / 1024 / 1024).toFixed(1)}MB`) + +const total_ms = t3 - t0 +console.log( + `Per-event: time=${(total_ms / total_events).toFixed(4)}ms, ` + + `heap=${((mem_after.heapUsed - mem_before.heapUsed) / total_events / 1024).toFixed(2)}KB, ` + + `heap_gc=${((mem_gc.heapUsed - mem_before.heapUsed) / total_events / 1024).toFixed(2)}KB` +) + +console.log(`\nProcessed: ${processed_count}/${total_events}`) +console.log(`History size: ${bus.event_history.size} (max: ${bus.event_history.max_history_size})`) +console.log(`Heap delta (before GC): +${((mem_after.heapUsed - mem_before.heapUsed) / 1024 / 1024).toFixed(1)}MB`) +console.log(`Heap delta (after GC): +${((mem_gc.heapUsed - mem_before.heapUsed) / 1024 / 1024).toFixed(1)}MB`) diff --git a/bubus-ts/tests/base_event.test.ts b/bubus-ts/tests/base_event.test.ts new file mode 100644 index 0000000..ef7886b --- /dev/null +++ b/bubus-ts/tests/base_event.test.ts @@ -0,0 +1,314 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { z } from 'zod' + +import { BaseEvent, EventBus, monotonicDatetime } from '../src/index.js' + +test('BaseEvent lifecycle transitions are explicit and awaitable', async () => { + const LifecycleEvent = BaseEvent.extend('BaseEventLifecycleTestEvent', {}) + + const standalone = LifecycleEvent({}) + assert.equal(standalone.event_status, 'pending') + assert.equal(standalone.event_started_at, null) + assert.equal(standalone.event_completed_at, null) + + standalone._markStarted() + assert.equal(standalone.event_status, 'started') + assert.equal(typeof standalone.event_started_at, 'string') + + standalone._markCompleted(false) + assert.equal(standalone.event_status, 'completed') + assert.equal(typeof standalone.event_completed_at, 'string') + await standalone.eventCompleted() +}) + +test('BaseEvent.eventResultUpdate creates and updates typed handler results', async () => { + const TypedEvent = BaseEvent.extend('BaseEventEventResultUpdateEvent', { event_result_type: z.string() }) + const bus = new EventBus('BaseEventEventResultUpdateBus') + const event = TypedEvent({}) + const handler_entry = bus.on(TypedEvent, async () => 'ok') + + const pending = event.eventResultUpdate(handler_entry, { eventbus: bus, status: 'pending' }) + assert.equal(event.event_results.get(handler_entry.id), pending) + assert.equal(pending.status, 'pending') + + const completed = event.eventResultUpdate(handler_entry, { eventbus: bus, status: 'completed', result: 'seeded' }) + assert.equal(completed, pending) + assert.equal(completed.status, 'completed') + assert.equal(completed.result, 'seeded') + + bus.destroy() +}) + +test('await event.done() queue-jumps child processing inside handlers', async () => { + const ParentEvent = BaseEvent.extend('BaseEventImmediateParentEvent', {}) + const ChildEvent = BaseEvent.extend('BaseEventImmediateChildEvent', {}) + const SiblingEvent = BaseEvent.extend('BaseEventImmediateSiblingEvent', {}) + + const bus = new EventBus('BaseEventImmediateQueueJumpBus', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'serial', + }) + const order: string[] = [] + + bus.on(ParentEvent, async (event) => { + order.push('parent_start') + event.bus?.emit(SiblingEvent({})) + const child = event.bus?.emit(ChildEvent({})) + assert.ok(child) + await child.done() + order.push('parent_end') + }) + + bus.on(ChildEvent, async () => { + order.push('child') + }) + + bus.on(SiblingEvent, async () => { + order.push('sibling') + }) + + await bus.emit(ParentEvent({})).done() + await bus.waitUntilIdle() + + assert.deepEqual(order, ['parent_start', 'child', 'parent_end', 'sibling']) + bus.destroy() +}) + +test('await event.eventCompleted() preserves normal queue order inside handlers', async () => { + const ParentEvent = BaseEvent.extend('BaseEventQueuedParentEvent', {}) + const ChildEvent = BaseEvent.extend('BaseEventQueuedChildEvent', {}) + const SiblingEvent = BaseEvent.extend('BaseEventQueuedSiblingEvent', {}) + + const bus = new EventBus('BaseEventQueueOrderBus', { + event_concurrency: 'parallel', + event_handler_concurrency: 'parallel', + }) + const order: string[] = [] + + bus.on(ParentEvent, async (event) => { + order.push('parent_start') + event.bus?.emit(SiblingEvent({})) + const child = event.bus?.emit(ChildEvent({})) + assert.ok(child) + await child.eventCompleted() + order.push('parent_end') + }) + + bus.on(ChildEvent, async () => { + order.push('child_start') + await new Promise((resolve) => setTimeout(resolve, 1)) + order.push('child_end') + }) + + bus.on(SiblingEvent, async () => { + order.push('sibling_start') + await new Promise((resolve) => setTimeout(resolve, 1)) + order.push('sibling_end') + }) + + await bus.emit(ParentEvent({})).done() + await bus.waitUntilIdle() + + assert.ok(order.indexOf('sibling_start') < order.indexOf('child_start')) + assert.ok(order.indexOf('child_end') < order.indexOf('parent_end')) + bus.destroy() +}) + +test('monotonicDatetime emits parseable, monotonic ISO timestamps', () => { + const first = monotonicDatetime() + const second = monotonicDatetime() + + assert.equal(typeof first, 'string') + assert.equal(typeof second, 'string') + assert.equal(Number.isInteger(Date.parse(first)), true) + assert.equal(Number.isInteger(Date.parse(second)), true) + assert.ok(second > first) +}) + +test('BaseEvent rejects reserved runtime fields in payload and event shape', () => { + const ReservedFieldEvent = BaseEvent.extend('BaseEventReservedFieldEvent', {}) + + assert.throws(() => { + void ReservedFieldEvent({ bus: 'payload_bus_field' } as unknown as never) + }, /field "bus" is reserved/i) + + assert.throws(() => { + void BaseEvent.extend('BaseEventReservedFieldShapeEvent', { bus: z.string() }) + }, /field "bus" is reserved/i) + + assert.throws(() => { + void ReservedFieldEvent({ first: 'payload_first_field' } as unknown as never) + }, /field "first" is reserved/i) + + assert.throws(() => { + void BaseEvent.extend('BaseEventReservedFirstShapeEvent', { first: z.string() }) + }, /field "first" is reserved/i) + + assert.throws(() => { + void ReservedFieldEvent({ toString: 'payload_to_string_field' } as unknown as never) + }, /field "toString" is reserved/i) + + assert.throws(() => { + void BaseEvent.extend('BaseEventReservedToStringShapeEvent', { toString: z.string() }) + }, /field "toString" is reserved/i) + + assert.throws(() => { + void ReservedFieldEvent({ toJSON: 'payload_to_json_field' } as unknown as never) + }, /field "toJSON" is reserved/i) + + assert.throws(() => { + void BaseEvent.extend('BaseEventReservedToJSONShapeEvent', { toJSON: z.string() }) + }, /field "toJSON" is reserved/i) + + assert.throws(() => { + void ReservedFieldEvent({ fromJSON: 'payload_from_json_field' } as unknown as never) + }, /field "fromJSON" is reserved/i) + + assert.throws(() => { + void BaseEvent.extend('BaseEventReservedFromJSONShapeEvent', { fromJSON: z.string() }) + }, /field "fromJSON" is reserved/i) +}) + +test('BaseEvent rejects unknown event_* fields while allowing known event_* overrides', () => { + const AllowedEvent = BaseEvent.extend('BaseEventAllowedEventConfigEvent', { + event_timeout: 123, + event_slow_timeout: 9, + event_handler_timeout: 45, + value: z.string(), + }) + + const event = AllowedEvent({ value: 'ok' }) + assert.equal(event.event_timeout, 123) + assert.equal(event.event_slow_timeout, 9) + assert.equal(event.event_handler_timeout, 45) + + assert.throws(() => { + void BaseEvent.extend('BaseEventUnknownEventShapeFieldEvent', { event_some_field_we_dont_recognize: 1 }) + }, /starts with "event_" but is not a recognized BaseEvent field/i) + + assert.throws(() => { + void AllowedEvent({ + value: 'ok', + event_some_field_we_dont_recognize: 1, + } as unknown as never) + }, /starts with "event_" but is not a recognized BaseEvent field/i) +}) + +test('BaseEvent rejects model_* fields in payload and event shape', () => { + const ModelReservedEvent = BaseEvent.extend('BaseEventModelReservedEvent', {}) + + assert.throws(() => { + void BaseEvent.extend('BaseEventModelReservedShapeEvent', { model_something_random: 1 }) + }, /starts with "model_" and is reserved/i) + + assert.throws(() => { + void ModelReservedEvent({ model_something_random: 1 } as unknown as never) + }, /starts with "model_" and is reserved/i) +}) + +test('BaseEvent toJSON/fromJSON roundtrips runtime fields and event_results', async () => { + const RuntimeEvent = BaseEvent.extend('BaseEventRuntimeSerializationEvent', { + event_result_type: z.string(), + }) + const bus = new EventBus('BaseEventRuntimeSerializationBus') + + bus.on(RuntimeEvent, () => 'ok') + + const event = bus.emit(RuntimeEvent({})) + await event.done() + + const json = event.toJSON() as Record + assert.equal(json.event_status, 'completed') + assert.equal(typeof json.event_created_at, 'string') + assert.equal(typeof json.event_started_at, 'string') + assert.equal(typeof json.event_completed_at, 'string') + assert.equal(json.event_pending_bus_count, 0) + + const json_results = json.event_results as Array> + assert.equal(json_results.length, 1) + assert.equal(json_results[0].status, 'completed') + assert.equal(json_results[0].result, 'ok') + + const restored = RuntimeEvent.fromJSON?.(json) ?? RuntimeEvent(json as never) + assert.equal(restored.event_status, 'completed') + assert.equal(restored.event_created_at, event.event_created_at) + assert.equal(restored.event_results.size, 1) + assert.equal(Array.from(restored.event_results.values())[0].result, 'ok') + + bus.destroy() +}) + +test('BaseEvent reset returns a fresh pending event that can be redispatched', async () => { + const ResetEvent = BaseEvent.extend('BaseEventResetEvent', { + label: z.string(), + }) + + const bus_a = new EventBus('BaseEventResetBusA') + const bus_b = new EventBus('BaseEventResetBusB') + + bus_a.on(ResetEvent, (event) => `a:${event.label}`) + bus_b.on(ResetEvent, (event) => `b:${event.label}`) + + const completed = await bus_a.emit(ResetEvent({ label: 'hello' })).done() + const fresh = completed.eventReset() + + assert.notEqual(fresh.event_id, completed.event_id) + assert.equal(fresh.event_status, 'pending') + assert.equal(fresh.event_results.size, 0) + assert.equal(fresh.event_started_at, null) + assert.equal(fresh.event_completed_at, null) + + const forwarded = await bus_b.emit(fresh).done() + assert.equal(forwarded.event_status, 'completed') + assert.equal( + Array.from(forwarded.event_results.values()).some((result) => result.result === 'b:hello'), + true + ) + + bus_a.destroy() + bus_b.destroy() +}) + +test('BaseEvent fromJSON preserves nullable parent/emitted metadata', () => { + const event = BaseEvent.fromJSON({ + event_id: '018f8e40-1234-7000-8000-00000000123a', + event_created_at: new Date('2025-01-01T00:00:00.000Z').toISOString(), + event_type: 'BaseEventFromJsonNullFieldsEvent', + event_parent_id: null, + event_emitted_by_handler_id: null, + event_timeout: null, + }) + + assert.equal(event.event_parent_id, null) + assert.equal(event.event_emitted_by_handler_id, null) + + const roundtrip = event.toJSON() as Record + assert.equal(roundtrip.event_parent_id, null) + assert.equal(roundtrip.event_emitted_by_handler_id, null) +}) + +test('BaseEvent status hooks capture bus reference before event gc', async () => { + const HookEvent = BaseEvent.extend('BaseEventHookCaptureEvent', {}) + + class HookCaptureBus extends EventBus { + seen_statuses: string[] = [] + + async onEventChange(_event: BaseEvent, status: 'pending' | 'started' | 'completed'): Promise { + this.seen_statuses.push(status) + } + } + + const bus = new HookCaptureBus('BaseEventHookCaptureBus') + const event = HookEvent({}) + event.bus = bus + + event._markStarted() + event._markCompleted() + event._gc() + + assert.deepEqual(bus.seen_statuses, ['started', 'completed']) + + bus.destroy() +}) diff --git a/bubus-ts/tests/base_event_event_bus_proxy.test.ts b/bubus-ts/tests/base_event_event_bus_proxy.test.ts new file mode 100644 index 0000000..0ab48e5 --- /dev/null +++ b/bubus-ts/tests/base_event_event_bus_proxy.test.ts @@ -0,0 +1,366 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { BaseEvent, EventBus } from '../src/index.js' + +const MainEvent = BaseEvent.extend('MainEvent', {}) +const ChildEvent = BaseEvent.extend('ChildEvent', {}) +const GrandchildEvent = BaseEvent.extend('GrandchildEvent', {}) + +test('event.bus inside handler returns the dispatching bus', async () => { + const bus = new EventBus('TestBus') + + let handler_called = false + let handler_bus_name: string | undefined + let child_event: BaseEvent | undefined + + bus.on(MainEvent, (event) => { + handler_called = true + handler_bus_name = event.bus?.name + + // Should be able to dispatch child events using event.bus + child_event = event.bus?.emit(ChildEvent({})) + }) + + bus.on(ChildEvent, () => {}) + + bus.emit(MainEvent({})) + await bus.waitUntilIdle() + + assert.equal(handler_called, true) + assert.equal(handler_bus_name, 'TestBus') + assert.ok(child_event, 'child event should have been dispatched via event.bus') + assert.equal(child_event!.event_type, 'ChildEvent') +}) + +test('event.event_bus inside handler returns the dispatching bus', async () => { + const bus = new EventBus('EventBusPropertyBus') + let handler_bus_name: string | undefined + + bus.on(MainEvent, (event) => { + handler_bus_name = event.event_bus.name + assert.equal(event.event_bus, event.bus) + }) + + await bus.emit(MainEvent({})).done() + assert.equal(handler_bus_name, 'EventBusPropertyBus') +}) + +test('event.event_bus aliases event.bus for child events emitted in handler', async () => { + const bus = new EventBus('EventBusPropertyFallbackBus') + let child_bus_name: string | undefined + + bus.on(MainEvent, (event) => { + const child = event.event_bus.emit(ChildEvent({})) + child_bus_name = child.event_bus.name + assert.equal(child.event_bus, child.bus) + }) + bus.on(ChildEvent, () => {}) + + await bus.emit(MainEvent({})).done() + assert.equal(child_bus_name, 'EventBusPropertyFallbackBus') +}) + +test('event.event_bus aliases event.bus for detached events', async () => { + const bus = new EventBus('EventBusPropertyDetachedBus') + bus.on(MainEvent, () => {}) + + const original = bus.emit(MainEvent({})) + await original.done() + + const detached = BaseEvent.fromJSON(original.toJSON()) + assert.equal(detached.bus, undefined) + assert.deepEqual(detached.event_path, [bus.label]) + assert.equal(detached.event_bus, detached.bus) +}) + +test('event.event_bus aliases event.bus outside handler context', async () => { + const bus = new EventBus('EventBusPropertyOutsideHandlerBus') + const event = bus.emit(MainEvent({})) + await event.done() + + assert.equal(event.event_bus, event.bus) +}) + +test('event.bus returns correct bus when multiple buses exist', async () => { + const bus1 = new EventBus('Bus1') + const bus2 = new EventBus('Bus2') + + let handler1_bus_name: string | undefined + let handler2_bus_name: string | undefined + + bus1.on(MainEvent, (event) => { + handler1_bus_name = event.bus?.name + }) + + bus2.on(MainEvent, (event) => { + handler2_bus_name = event.bus?.name + }) + + bus1.emit(MainEvent({})) + await bus1.waitUntilIdle() + + bus2.emit(MainEvent({})) + await bus2.waitUntilIdle() + + assert.equal(handler1_bus_name, 'Bus1') + assert.equal(handler2_bus_name, 'Bus2') +}) + +test('event.bus reflects the currently-processing bus when forwarded', async () => { + const bus1 = new EventBus('Bus1') + const bus2 = new EventBus('Bus2') + + // Forward all events from bus1 to bus2 + bus1.on('*', bus2.emit) + + let bus2_handler_bus_name: string | undefined + + bus2.on(MainEvent, (event) => { + bus2_handler_bus_name = event.bus?.name + }) + + const event = bus1.emit(MainEvent({})) + await bus1.waitUntilIdle() + await bus2.waitUntilIdle() + + // The handler on bus2 should see bus2 as event.bus, not bus1 + assert.equal(bus2_handler_bus_name, 'Bus2') + assert.deepEqual(event.event_path, [bus1.label, bus2.label]) +}) + +test('event.bus in nested handlers sees the same bus', async () => { + const bus = new EventBus('MainBus') + + let outer_bus_name: string | undefined + let inner_bus_name: string | undefined + + bus.on(MainEvent, async (event) => { + outer_bus_name = event.bus?.name + + // Dispatch child using event.bus + const child = event.bus!.emit(ChildEvent({})) + await child.done() + }) + + bus.on(ChildEvent, (event) => { + inner_bus_name = event.bus?.name + }) + + const parent = bus.emit(MainEvent({})) + await parent.done() + + assert.equal(outer_bus_name, 'MainBus') + assert.equal(inner_bus_name, 'MainBus') +}) + +test('event.bus.emit sets parent-child relationships through 3 levels', async () => { + const bus = new EventBus('MainBus') + + const execution_order: string[] = [] + let child_ref: BaseEvent | undefined + let grandchild_ref: BaseEvent | undefined + + bus.on(MainEvent, async (event) => { + execution_order.push('parent_start') + assert.equal(event.bus?.name, 'MainBus') + + child_ref = event.bus!.emit(ChildEvent({})) + await child_ref.done() + + execution_order.push('parent_end') + }) + + bus.on(ChildEvent, async (event) => { + execution_order.push('child_start') + assert.equal(event.bus?.name, 'MainBus') + + grandchild_ref = event.bus!.emit(GrandchildEvent({})) + await grandchild_ref.done() + + execution_order.push('child_end') + }) + + bus.on(GrandchildEvent, (event) => { + execution_order.push('grandchild_start') + assert.equal(event.bus?.name, 'MainBus') + execution_order.push('grandchild_end') + }) + + const parent_event = bus.emit(MainEvent({})) + await parent_event.done() + + // Child events should queue-jump and complete before their parents return + assert.deepEqual(execution_order, ['parent_start', 'child_start', 'grandchild_start', 'grandchild_end', 'child_end', 'parent_end']) + + // All events completed + assert.equal(parent_event.event_status, 'completed') + assert.ok(child_ref) + assert.equal(child_ref!.event_status, 'completed') + assert.ok(grandchild_ref) + assert.equal(grandchild_ref!.event_status, 'completed') + + // Parent-child relationships are set correctly + assert.equal(child_ref!.event_parent_id, parent_event.event_id) + assert.equal(grandchild_ref!.event_parent_id, child_ref!.event_id) + assert.equal(child_ref!.event_parent?.event_id, parent_event.event_id) + assert.equal(grandchild_ref!.event_parent?.event_id, child_ref!.event_id) +}) + +test('event.bus with forwarding: child dispatched via event.bus goes to the correct bus', async () => { + const bus1 = new EventBus('Bus1') + const bus2 = new EventBus('Bus2') + + // Forward all events from bus1 to bus2 + bus1.on('*', bus2.emit) + + let child_handler_bus_name: string | undefined + + // Handlers only on bus2 + bus2.on(MainEvent, async (event) => { + // Handler runs on bus2 (forwarded from bus1) + assert.equal(event.bus?.name, 'Bus2') + + // Child dispatched via event.bus should go to bus2 + const child = event.bus!.emit(ChildEvent({})) + await child.done() + }) + + bus2.on(ChildEvent, (event) => { + child_handler_bus_name = event.bus?.name + }) + + bus1.emit(MainEvent({})) + await bus1.waitUntilIdle() + await bus2.waitUntilIdle() + + // Child handler should have seen bus2 + assert.equal(child_handler_bus_name, 'Bus2') +}) + +test('event.bus is set on the event after dispatch (outside handler)', async () => { + const bus = new EventBus('TestBus') + + // Before dispatch, bus is not set + const raw_event = MainEvent({}) + assert.equal(raw_event.bus, undefined) + + // After dispatch, bus is set on the original event + const dispatched = bus.emit(raw_event) + assert.ok(dispatched.bus, 'event.bus should be set after dispatch') + + await bus.waitUntilIdle() +}) + +test('event.bus.emit from handler correctly attributes event_emitted_by_handler_id', async () => { + const bus = new EventBus('TestBus') + + bus.on(MainEvent, (event) => { + event.bus?.emit(ChildEvent({})) + }) + + bus.on(ChildEvent, () => {}) + + const parent = bus.emit(MainEvent({})) + await bus.waitUntilIdle() + + // Find the child event in history + const child = Array.from(bus.event_history.values()).find((e) => e.event_type === 'ChildEvent') + assert.ok(child, 'child event should be in history') + assert.equal(child!.event_parent_id, parent.event_id) + assert.equal(child!.event_parent?.event_id, parent.event_id) + + // The child should have event_emitted_by_handler_id set to the handler that emitted it + assert.ok(child!.event_emitted_by_handler_id, 'event_emitted_by_handler_id should be set on child events dispatched via event.bus') + + // The handler id should correspond to a handler result on the parent event + const parent_from_history = Array.from(bus.event_history.values()).find((e) => e.event_type === 'MainEvent') + assert.ok(parent_from_history) + const handler_result = parent_from_history!.event_results.get(child!.event_emitted_by_handler_id!) + assert.ok(handler_result, 'handler_id on child should match a handler result on the parent') +}) + +test('dispatch preserves explicit event_parent_id and does not override it', async () => { + const bus = new EventBus('ExplicitParentBus') + const explicit_parent_id = '018f8e40-1234-7000-8000-000000001234' + + bus.on(MainEvent, (event) => { + const child = ChildEvent({ + event_parent_id: explicit_parent_id, + }) + event.bus?.emit(child) + }) + + const parent = bus.emit(MainEvent({})) + await bus.waitUntilIdle() + + const child = Array.from(bus.event_history.values()).find((event) => event.event_type === 'ChildEvent') + assert.ok(child, 'child event should be in history') + assert.equal(child.event_parent_id, explicit_parent_id) + assert.notEqual(child.event_parent_id, parent.event_id) +}) + +// Consolidated from tests/parent_child.test.ts + +const LineageParentEvent = BaseEvent.extend('LineageParentEvent', {}) +const LineageChildEvent = BaseEvent.extend('LineageChildEvent', {}) +const LineageGrandchildEvent = BaseEvent.extend('LineageGrandchildEvent', {}) +const LineageUnrelatedEvent = BaseEvent.extend('LineageUnrelatedEvent', {}) + +test('eventIsChildOf and eventIsParentOf work for direct children', async () => { + const bus = new EventBus('ParentChildBus') + + bus.on(LineageParentEvent, (event) => { + event.bus?.emit(LineageChildEvent({})) + }) + + const parent_event = bus.emit(LineageParentEvent({})) + await bus.waitUntilIdle() + + const child_event = Array.from(bus.event_history.values()).find((event) => event.event_type === 'LineageChildEvent') + assert.ok(child_event) + + assert.equal(child_event.event_parent_id, parent_event.event_id) + assert.equal(child_event.event_parent?.event_id, parent_event.event_id) + assert.equal(bus.eventIsChildOf(child_event, parent_event), true) + assert.equal(bus.eventIsParentOf(parent_event, child_event), true) +}) + +test('eventIsChildOf works for grandchildren', async () => { + const bus = new EventBus('GrandchildBus') + + bus.on(LineageParentEvent, (event) => { + event.bus?.emit(LineageChildEvent({})) + }) + + bus.on(LineageChildEvent, (event) => { + event.bus?.emit(LineageGrandchildEvent({})) + }) + + const parent_event = bus.emit(LineageParentEvent({})) + await bus.waitUntilIdle() + + const child_event = Array.from(bus.event_history.values()).find((event) => event.event_type === 'LineageChildEvent') + const grandchild_event = Array.from(bus.event_history.values()).find((event) => event.event_type === 'LineageGrandchildEvent') + + assert.ok(child_event) + assert.ok(grandchild_event) + + assert.equal(bus.eventIsChildOf(child_event, parent_event), true) + assert.equal(bus.eventIsChildOf(grandchild_event, parent_event), true) + assert.equal(child_event.event_parent?.event_id, parent_event.event_id) + assert.equal(grandchild_event.event_parent?.event_id, child_event.event_id) + assert.equal(bus.eventIsParentOf(parent_event, grandchild_event), true) +}) + +test('eventIsChildOf returns false for unrelated events', async () => { + const bus = new EventBus('UnrelatedBus') + + const parent_event = bus.emit(LineageParentEvent({})) + const unrelated_event = bus.emit(LineageUnrelatedEvent({})) + await parent_event.done() + await unrelated_event.done() + + assert.equal(bus.eventIsChildOf(unrelated_event, parent_event), false) + assert.equal(bus.eventIsParentOf(parent_event, unrelated_event), false) +}) diff --git a/bubus-ts/tests/bridge_listener_worker.ts b/bubus-ts/tests/bridge_listener_worker.ts new file mode 100644 index 0000000..f413823 --- /dev/null +++ b/bubus-ts/tests/bridge_listener_worker.ts @@ -0,0 +1,56 @@ +import { readFileSync, writeFileSync } from 'node:fs' + +import { + HTTPEventBridge, + JSONLEventBridge, + NATSEventBridge, + PostgresEventBridge, + RedisEventBridge, + SQLiteEventBridge, + SocketEventBridge, +} from '../src/index.js' + +type WorkerConfig = { + kind: string + ready_path: string + output_path: string + endpoint?: string + path?: string + table?: string + url?: string + server?: string + subject?: string +} + +const makeListenerBridge = (config: WorkerConfig): any => { + if (config.kind === 'http') return new HTTPEventBridge({ listen_on: config.endpoint }) + if (config.kind === 'socket') return new SocketEventBridge(config.path) + if (config.kind === 'jsonl') return new JSONLEventBridge(config.path ?? '', 0.05) + if (config.kind === 'sqlite') return new SQLiteEventBridge(config.path ?? '', config.table ?? 'bubus_events', 0.05) + if (config.kind === 'redis') return new RedisEventBridge(config.url ?? '') + if (config.kind === 'nats') return new NATSEventBridge(config.server ?? '', config.subject ?? '') + if (config.kind === 'postgres') return new PostgresEventBridge(config.url ?? '') + throw new Error(`Unsupported bridge kind: ${config.kind}`) +} + +const main = async (): Promise => { + const config_path = process.argv[2] + const config = JSON.parse(readFileSync(config_path, 'utf8')) as WorkerConfig + const bridge = makeListenerBridge(config) + + let resolve_done: (() => void) | null = null + const done = new Promise((resolve) => { + resolve_done = resolve + }) + + await bridge.start() + bridge.on('*', (event: { toJSON: () => unknown }) => { + writeFileSync(config.output_path, JSON.stringify(event.toJSON()), 'utf8') + resolve_done?.() + }) + writeFileSync(config.ready_path, 'ready', 'utf8') + await Promise.race([done, new Promise((_, reject) => setTimeout(() => reject(new Error('worker timeout')), 30000))]) + await bridge.close() +} + +await main() diff --git a/bubus-ts/tests/bridges.test.ts b/bubus-ts/tests/bridges.test.ts new file mode 100644 index 0000000..3ccee2f --- /dev/null +++ b/bubus-ts/tests/bridges.test.ts @@ -0,0 +1,457 @@ +import assert from 'node:assert/strict' +import { spawn, spawnSync, type ChildProcess } from 'node:child_process' +import { existsSync, mkdtempSync, readFileSync, rmSync, writeFileSync } from 'node:fs' +import { createConnection, createServer as createNetServer } from 'node:net' +import { tmpdir } from 'node:os' +import { dirname, join } from 'node:path' +import { fileURLToPath } from 'node:url' +import { test } from 'node:test' + +import { z } from 'zod' + +import { + BaseEvent, + HTTPEventBridge, + JSONLEventBridge, + NATSEventBridge, + PostgresEventBridge, + RedisEventBridge, + SQLiteEventBridge, + SocketEventBridge, +} from '../src/index.js' + +const tests_dir = dirname(fileURLToPath(import.meta.url)) +const TEST_RUN_ID = `${process.pid}-${Date.now().toString(36)}-${Math.random().toString(36).slice(2, 10)}` + +const makeTempDir = (prefix: string): string => mkdtempSync(join(tmpdir(), `${prefix}-${TEST_RUN_ID}-`)) + +const IPCPingEvent = BaseEvent.extend('IPCPingEvent', { + label: z.string(), +}) + +const getFreePort = async (): Promise => + await new Promise((resolve, reject) => { + const server = createNetServer() + server.once('error', reject) + server.listen(0, '127.0.0.1', () => { + const address = server.address() + if (!address || typeof address === 'string') { + server.close(() => reject(new Error('failed to allocate test port'))) + return + } + server.close(() => resolve(address.port)) + }) + }) + +const sleep = async (ms: number): Promise => await new Promise((resolve) => setTimeout(resolve, ms)) + +const importDynamicModule = async (module_name: string): Promise => { + const dynamic_import = Function('module_name', 'return import(module_name)') as (module_name: string) => Promise + return dynamic_import(module_name) as Promise +} + +const canonical = (payload: Record): Record => { + const normalized: Record = {} + for (const [key, value] of Object.entries(payload)) { + if (key.endsWith('_at') && typeof value === 'string') { + const ts = Date.parse(value) + if (!Number.isNaN(ts)) { + normalized[key] = ts + continue + } + } + normalized[key] = value + } + return normalized +} + +const normalizeRoundtripPayload = (payload: Record): Record => { + const normalized = canonical(payload) + const dynamic_keys = [ + 'event_id', + 'event_path', + 'event_result_type', + 'event_results', + 'event_pending_bus_count', + 'event_status', + 'event_started_at', + 'event_completed_at', + 'event_timeout', + 'event_handler_completion', + 'event_handler_concurrency', + 'event_handler_slow_timeout', + 'event_handler_timeout', + 'event_parent_id', + 'event_emitted_by_handler_id', + 'event_concurrency', + ] + for (const key of dynamic_keys) { + delete normalized[key] + } + for (const [key, value] of Object.entries(normalized)) { + if (value === undefined) { + delete normalized[key] + } + } + return normalized +} + +const waitForPort = async (port: number, timeout_ms = 30000): Promise => { + const started = Date.now() + while (Date.now() - started < timeout_ms) { + const ok = await new Promise((resolve) => { + const socket = createConnection({ host: '127.0.0.1', port }, () => { + socket.end() + resolve(true) + }) + socket.once('error', () => resolve(false)) + }) + if (ok) return + await sleep(50) + } + throw new Error(`port did not open in time: ${port}`) +} + +const waitForPath = async ( + path: string, + worker: ChildProcess, + stdout_log: { value: string }, + stderr_log: { value: string }, + timeout_ms = 30000 +): Promise => { + const started = Date.now() + while (Date.now() - started < timeout_ms) { + if (existsSync(path)) return + if (worker.exitCode !== null) { + throw new Error(`worker exited early (${worker.exitCode})\nstdout:\n${stdout_log.value}\nstderr:\n${stderr_log.value}`) + } + await sleep(50) + } + throw new Error(`path did not appear in time: ${path}`) +} + +const stopProcess = async (proc: ChildProcess): Promise => { + if (proc.exitCode !== null) return + proc.kill('SIGTERM') + await sleep(250) + if (proc.exitCode === null) { + proc.kill('SIGKILL') + await sleep(250) + } +} + +const runChecked = (cmd: string, args: string[], cwd?: string): void => { + const result = spawnSync(cmd, args, { cwd, encoding: 'utf8' }) + assert.equal(result.status, 0, `${cmd} failed\nstdout:\n${result.stdout ?? ''}\nstderr:\n${result.stderr ?? ''}`) +} + +const makeSenderBridge = (kind: string, config: Record, low_latency: boolean = false): any => { + if (kind === 'http') return new HTTPEventBridge({ send_to: config.endpoint }) + if (kind === 'socket') return new SocketEventBridge(config.path) + if (kind === 'jsonl') return new JSONLEventBridge(config.path, low_latency ? 0.001 : 0.05) + if (kind === 'sqlite') return new SQLiteEventBridge(config.path, config.table, low_latency ? 0.001 : 0.05) + if (kind === 'redis') return new RedisEventBridge(config.url) + if (kind === 'nats') return new NATSEventBridge(config.server, config.subject) + if (kind === 'postgres') return new PostgresEventBridge(config.url) + throw new Error(`unsupported bridge kind: ${kind}`) +} + +const makeListenerBridge = (kind: string, config: Record, low_latency: boolean = false): any => { + if (kind === 'http') return new HTTPEventBridge({ listen_on: config.endpoint }) + if (kind === 'socket') return new SocketEventBridge(config.path) + if (kind === 'jsonl') return new JSONLEventBridge(config.path, low_latency ? 0.001 : 0.05) + if (kind === 'sqlite') return new SQLiteEventBridge(config.path, config.table, low_latency ? 0.001 : 0.05) + if (kind === 'redis') return new RedisEventBridge(config.url) + if (kind === 'nats') return new NATSEventBridge(config.server, config.subject) + if (kind === 'postgres') return new PostgresEventBridge(config.url) + throw new Error(`unsupported bridge kind: ${kind}`) +} + +const waitForEvent = async (event: Promise, timeout_ms: number): Promise => { + let timer: ReturnType | null = null + try { + await Promise.race([ + event, + new Promise((_, reject) => { + timer = setTimeout(() => reject(new Error(`timed out waiting for bridge event after ${timeout_ms}ms`)), timeout_ms) + }), + ]) + } finally { + if (timer) clearTimeout(timer) + } +} + +const measureWarmLatencyMs = async (kind: string, config: Record): Promise => { + const attempts = 3 + let last_error: unknown + + for (let attempt = 0; attempt < attempts; attempt += 1) { + const sender = makeSenderBridge(kind, config, true) + const receiver = makeListenerBridge(kind, config, true) + + const run_suffix = Math.random().toString(36).slice(2, 10) + const warmup_prefix = `warmup_${run_suffix}_` + const measured_prefix = `measured_${run_suffix}_` + const warmup_count_target = 5 + const measured_count_target = 1000 + + let warmup_seen_count = 0 + let measured_seen_count = 0 + let warmup_resolve: (() => void) | null = null + let measured_resolve: (() => void) | null = null + const warmup_seen = new Promise((resolve) => { + warmup_resolve = resolve + }) + const measured_seen = new Promise((resolve) => { + measured_resolve = resolve + }) + + const onEvent = (event: { label?: unknown }): void => { + const label = typeof event.label === 'string' ? event.label : '' + if (label.startsWith(warmup_prefix)) { + warmup_seen_count += 1 + if (warmup_seen_count >= warmup_count_target) { + warmup_resolve?.() + warmup_resolve = null + } + return + } + if (label.startsWith(measured_prefix)) { + measured_seen_count += 1 + if (measured_seen_count >= measured_count_target) { + measured_resolve?.() + measured_resolve = null + } + } + } + + const emitBatch = async (prefix: string, count: number): Promise => { + for (let i = 0; i < count; i += 1) { + await sender.emit(IPCPingEvent({ label: `${prefix}${i}` })) + } + } + + try { + await sender.start() + await receiver.start() + receiver.on('IPCPingEvent', onEvent) + await sleep(100) + + await emitBatch(warmup_prefix, warmup_count_target) + await waitForEvent(warmup_seen, 60000) + + const start_ms = performance.now() + await emitBatch(measured_prefix, measured_count_target) + await waitForEvent(measured_seen, 120000) + return (performance.now() - start_ms) / measured_count_target + } catch (error: unknown) { + last_error = error + } finally { + await sender.close() + await receiver.close() + } + + await sleep(200) + } + + throw new Error(`bridge latency measurement timed out after ${attempts} attempts: ${kind} (${String(last_error)})`) +} + +const assertRoundtrip = async (kind: string, config: Record): Promise => { + const temp_dir = makeTempDir(`bubus-bridge-${kind}`) + const ready_path = join(temp_dir, 'worker.ready') + const output_path = join(temp_dir, 'received.json') + const config_path = join(temp_dir, 'worker_config.json') + const worker_payload = { + ...config, + kind, + ready_path, + output_path, + } + writeFileSync(config_path, JSON.stringify(worker_payload), 'utf8') + + const sender = makeSenderBridge(kind, config) + + const worker = spawn(process.execPath, ['--import', 'tsx', join(tests_dir, 'bridge_listener_worker.ts'), config_path], { + cwd: tests_dir, + stdio: ['ignore', 'pipe', 'pipe'], + }) + const worker_stdout = { value: '' } + const worker_stderr = { value: '' } + worker.stdout?.on('data', (chunk) => { + worker_stdout.value += String(chunk) + }) + worker.stderr?.on('data', (chunk) => { + worker_stderr.value += String(chunk) + }) + + try { + await waitForPath(ready_path, worker, worker_stdout, worker_stderr) + if (kind === 'postgres') { + await sender.start() + } + const outbound = IPCPingEvent({ label: `${kind}_ok` }) + await sender.emit(outbound) + await waitForPath(output_path, worker, worker_stdout, worker_stderr) + const received_payload = JSON.parse(readFileSync(output_path, 'utf8')) as Record + assert.deepEqual(normalizeRoundtripPayload(received_payload), normalizeRoundtripPayload(outbound.toJSON() as Record)) + } finally { + await sender.close() + await stopProcess(worker) + rmSync(temp_dir, { recursive: true, force: true }) + } +} + +test('HTTPEventBridge roundtrip between processes', async () => { + const endpoint = `http://127.0.0.1:${await getFreePort()}/events` + await assertRoundtrip('http', { endpoint }) + const latency_ms = await measureWarmLatencyMs('http', { endpoint }) + console.log(`LATENCY ts http ${latency_ms.toFixed(3)}ms`) +}) + +test('SocketEventBridge roundtrip between processes', async () => { + const socket_path = `/tmp/bb-${TEST_RUN_ID}-${Math.random().toString(16).slice(2)}.sock` + await assertRoundtrip('socket', { path: socket_path }) + const latency_ms = await measureWarmLatencyMs('socket', { path: socket_path }) + console.log(`LATENCY ts socket ${latency_ms.toFixed(3)}ms`) +}) + +test('SocketEventBridge rejects long socket paths', async () => { + const long_path = `/tmp/${'a'.repeat(100)}.sock` + assert.throws(() => { + new SocketEventBridge(long_path) + }) +}) + +test('JSONLEventBridge roundtrip between processes', async () => { + const temp_dir = makeTempDir('bubus-jsonl') + try { + const config = { path: join(temp_dir, 'events.jsonl') } + await assertRoundtrip('jsonl', config) + const latency_ms = await measureWarmLatencyMs('jsonl', config) + console.log(`LATENCY ts jsonl ${latency_ms.toFixed(3)}ms`) + } finally { + rmSync(temp_dir, { recursive: true, force: true }) + } +}) + +test('SQLiteEventBridge roundtrip between processes', async () => { + const temp_dir = makeTempDir('bubus-sqlite') + try { + const sqlite_path = join(temp_dir, 'events.sqlite3') + const config = { path: sqlite_path, table: 'bubus_events' } + await assertRoundtrip('sqlite', config) + + const sqlite_mod = await importDynamicModule('node:sqlite') + const Database = sqlite_mod.DatabaseSync ?? sqlite_mod.default?.DatabaseSync + assert.equal(typeof Database, 'function', 'expected DatabaseSync from node:sqlite') + const db = new Database(sqlite_path) + try { + const columns = new Set( + (db.prepare('PRAGMA table_info("bubus_events")').all() as Array<{ name: string }>).map((row) => String(row.name)) + ) + assert.ok(columns.has('event_payload')) + assert.ok(!columns.has('label')) + for (const column of columns) { + assert.ok(column === 'event_payload' || column.startsWith('event_')) + } + + const row = db.prepare('SELECT event_payload FROM "bubus_events" ORDER BY COALESCE("event_created_at", \'\') DESC LIMIT 1').get() as + | { event_payload?: string } + | undefined + assert.ok(row?.event_payload, 'expected event_payload row') + const payload = JSON.parse(String(row.event_payload)) as Record + assert.equal(payload.label, 'sqlite_ok') + } finally { + db.close() + } + + const latency_ms = await measureWarmLatencyMs('sqlite', config) + console.log(`LATENCY ts sqlite ${latency_ms.toFixed(3)}ms`) + } finally { + rmSync(temp_dir, { recursive: true, force: true }) + } +}) + +test('RedisEventBridge roundtrip between processes', async () => { + const temp_dir = makeTempDir('bubus-redis') + const port = await getFreePort() + const redis = spawn( + 'redis-server', + ['--save', '', '--appendonly', 'no', '--bind', '127.0.0.1', '--port', String(port), '--dir', temp_dir], + { stdio: ['ignore', 'pipe', 'pipe'] } + ) + try { + await waitForPort(port) + const config = { url: `redis://127.0.0.1:${port}/1/bubus_events` } + await assertRoundtrip('redis', config) + const latency_ms = await measureWarmLatencyMs('redis', config) + console.log(`LATENCY ts redis ${latency_ms.toFixed(3)}ms`) + } finally { + await stopProcess(redis) + rmSync(temp_dir, { recursive: true, force: true }) + } +}) + +test('NATSEventBridge roundtrip between processes', async () => { + const port = await getFreePort() + const nats = spawn('nats-server', ['-a', '127.0.0.1', '-p', String(port)], { stdio: ['ignore', 'pipe', 'pipe'] }) + try { + await waitForPort(port) + const config = { server: `nats://127.0.0.1:${port}`, subject: 'bubus_events' } + await assertRoundtrip('nats', config) + const latency_ms = await measureWarmLatencyMs('nats', config) + console.log(`LATENCY ts nats ${latency_ms.toFixed(3)}ms`) + } finally { + await stopProcess(nats) + } +}) + +test('PostgresEventBridge roundtrip between processes', async () => { + const temp_dir = makeTempDir('bubus-postgres') + const data_dir = join(temp_dir, 'pgdata') + runChecked('initdb', ['-D', data_dir, '-A', 'trust', '-U', 'postgres']) + const port = await getFreePort() + const postgres = spawn('postgres', ['-D', data_dir, '-h', '127.0.0.1', '-p', String(port), '-k', '/tmp'], { + stdio: ['ignore', 'pipe', 'pipe'], + }) + try { + await waitForPort(port) + const config = { url: `postgresql://postgres@127.0.0.1:${port}/postgres/bubus_events` } + await assertRoundtrip('postgres', config) + + const pg_mod = await importDynamicModule('pg') + const Client = pg_mod.Client ?? pg_mod.default?.Client + assert.equal(typeof Client, 'function', 'expected pg Client') + const client = new Client({ connectionString: `postgresql://postgres@127.0.0.1:${port}/postgres` }) + await client.connect() + try { + const columns_result = await client.query( + `SELECT column_name FROM information_schema.columns WHERE table_schema = 'public' AND table_name = $1`, + ['bubus_events'] + ) + const columns = new Set((columns_result.rows as Array<{ column_name: string }>).map((row) => String(row.column_name))) + assert.ok(columns.has('event_payload')) + assert.ok(!columns.has('label')) + for (const column of columns) { + assert.ok(column === 'event_payload' || column.startsWith('event_')) + } + + const payload_result = await client.query( + `SELECT event_payload FROM "bubus_events" ORDER BY COALESCE("event_created_at", '') DESC LIMIT 1` + ) + const payload_raw = payload_result.rows?.[0]?.event_payload + assert.equal(typeof payload_raw, 'string') + const payload = JSON.parse(payload_raw) as Record + assert.equal(payload.label, 'postgres_ok') + } finally { + await client.end() + } + + const latency_ms = await measureWarmLatencyMs('postgres', config) + console.log(`LATENCY ts postgres ${latency_ms.toFixed(3)}ms`) + } finally { + await stopProcess(postgres) + rmSync(temp_dir, { recursive: true, force: true }) + } +}) diff --git a/bubus-ts/tests/comprehensive_patterns.test.ts b/bubus-ts/tests/comprehensive_patterns.test.ts new file mode 100644 index 0000000..19d4d29 --- /dev/null +++ b/bubus-ts/tests/comprehensive_patterns.test.ts @@ -0,0 +1,1317 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { BaseEvent, EventBus, retry } from '../src/index.js' +import { z } from 'zod' + +const ParentEvent = BaseEvent.extend('ParentEvent', {}) +const ImmediateChildEvent = BaseEvent.extend('ImmediateChildEvent', {}) +const QueuedChildEvent = BaseEvent.extend('QueuedChildEvent', {}) + +const delay = (ms: number): Promise => + new Promise((resolve) => { + setTimeout(resolve, ms) + }) + +test('comprehensive patterns: forwarding, async/sync dispatch, parent tracking', async () => { + const bus_1 = new EventBus('bus1') + const bus_2 = new EventBus('bus2') + + const results: Array<[number, string]> = [] + const execution_counter = { count: 0 } + + const child_bus2_event_handler = (event: BaseEvent): string => { + execution_counter.count += 1 + const seq = execution_counter.count + const event_type_short = event.event_type.replace(/Event$/, '') + results.push([seq, `bus2_handler_${event_type_short}`]) + return 'forwarded bus result' + } + + bus_2.on('*', child_bus2_event_handler) + bus_1.on('*', bus_2.emit) + + const parent_bus1_handler = async (event: BaseEvent): Promise => { + execution_counter.count += 1 + const seq = execution_counter.count + results.push([seq, 'parent_start']) + + const child_event_async = event.bus?.emit(QueuedChildEvent({}))! + assert.notEqual(child_event_async.event_status, 'completed') + + const child_event_sync = await event.bus?.emit(ImmediateChildEvent({})).done()! + assert.equal(child_event_sync.event_status, 'completed') + + assert.ok(child_event_sync.event_path.includes(bus_2.label)) + assert.ok(Array.from(child_event_sync.event_results.values()).some((result) => result.handler_name.includes('emit'))) + + assert.equal(child_event_async.event_parent_id, event.event_id) + assert.equal(child_event_sync.event_parent_id, event.event_id) + + execution_counter.count += 1 + const end_seq = execution_counter.count + results.push([end_seq, 'parent_end']) + return 'parent_done' + } + + bus_1.on(ParentEvent, parent_bus1_handler) + + const parent_event = bus_1.emit(ParentEvent({})) + await parent_event.done() + await bus_1.waitUntilIdle() + await bus_2.waitUntilIdle() + + const event_children = Array.from(bus_1.event_history.values()).filter( + (event) => event.event_type === 'ImmediateChildEvent' || event.event_type === 'QueuedChildEvent' + ) + assert.ok(event_children.length > 0) + assert.ok(event_children.every((event) => event.event_parent_id === parent_event.event_id)) + + const sorted_results = results.slice().sort((a, b) => a[0] - b[0]) + const execution_order = sorted_results.map((item) => item[1]) + + assert.equal(execution_order[0], 'parent_start') + assert.ok(execution_order.includes('bus2_handler_ImmediateChild')) + + if (execution_order.includes('parent_end')) { + const parent_end_idx = execution_order.indexOf('parent_end') + assert.ok(parent_end_idx > 1) + } + + assert.equal(execution_order.filter((value) => value === 'bus2_handler_ImmediateChild').length, 1) + assert.equal(execution_order.filter((value) => value === 'bus2_handler_QueuedChild').length, 1) + assert.equal(execution_order.filter((value) => value === 'bus2_handler_Parent').length, 1) +}) + +test('race condition stress', async () => { + const bus_1 = new EventBus('bus1') + const bus_2 = new EventBus('bus2') + const RootEvent = BaseEvent.extend('RootEvent', {}) + + const results: string[] = [] + + const child_handler = async (event: BaseEvent): Promise => { + const bus_label = event.event_path[event.event_path.length - 1] ?? 'unknown' + results.push(`child_${bus_label}`) + await delay(1) + return `child_done_${bus_label}` + } + + const parent_handler = async (event: BaseEvent): Promise => { + const children: BaseEvent[] = [] + + for (let i = 0; i < 3; i += 1) { + children.push(event.bus?.emit(QueuedChildEvent({}))!) + } + + for (let i = 0; i < 3; i += 1) { + const child = await event.bus?.emit(ImmediateChildEvent({})).done()! + assert.equal(child.event_status, 'completed') + children.push(child) + } + + assert.ok(children.every((child) => child.event_parent_id === event.event_id)) + return 'parent_done' + } + + const bad_handler = (_bad: BaseEvent): void => {} + + bus_1.on('*', bus_2.emit) + bus_1.on(QueuedChildEvent, child_handler) + bus_1.on(ImmediateChildEvent, child_handler) + bus_2.on(QueuedChildEvent, child_handler) + bus_2.on(ImmediateChildEvent, child_handler) + bus_1.on(RootEvent, parent_handler) + bus_1.on(RootEvent, bad_handler) + + for (let run = 0; run < 5; run += 1) { + results.length = 0 + + const event = bus_1.emit(RootEvent({})) + await event.done() + await bus_1.waitUntilIdle() + await bus_2.waitUntilIdle() + + assert.equal( + results.filter((value) => value === `child_${bus_1.label}`).length, + 6, + `Run ${run}: Expected 6 child_${bus_1.label}, got ${results.filter((value) => value === `child_${bus_1.label}`).length}` + ) + assert.equal( + results.filter((value) => value === `child_${bus_2.label}`).length, + 6, + `Run ${run}: Expected 6 child_${bus_2.label}, got ${results.filter((value) => value === `child_${bus_2.label}`).length}` + ) + } +}) + +test('awaited child jumps queue without overshoot', async () => { + const bus = new EventBus('TestBus', { max_history_size: 100 }) + const execution_order: string[] = [] + const debug_order: Array<{ label: string; at: string }> = [] + + const Event1 = BaseEvent.extend('Event1', {}) + const Event2 = BaseEvent.extend('Event2', {}) + const Event3 = BaseEvent.extend('Event3', {}) + const LocalChildEvent = BaseEvent.extend('ChildEvent', {}) + + const event1_handler = async (_event: BaseEvent): Promise => { + execution_order.push('Event1_start') + debug_order.push({ label: 'Event1_start', at: new Date().toISOString() }) + const child = _event.bus?.emit(LocalChildEvent({}))! + execution_order.push('Child_dispatched') + debug_order.push({ label: 'Child_dispatched', at: new Date().toISOString() }) + await child.done() + execution_order.push('Child_await_returned') + debug_order.push({ label: 'Child_await_returned', at: new Date().toISOString() }) + execution_order.push('Event1_end') + debug_order.push({ label: 'Event1_end', at: new Date().toISOString() }) + return 'event1_done' + } + + const event2_handler = async (): Promise => { + execution_order.push('Event2_start') + debug_order.push({ label: 'Event2_start', at: new Date().toISOString() }) + execution_order.push('Event2_end') + debug_order.push({ label: 'Event2_end', at: new Date().toISOString() }) + return 'event2_done' + } + + const event3_handler = async (): Promise => { + execution_order.push('Event3_start') + debug_order.push({ label: 'Event3_start', at: new Date().toISOString() }) + execution_order.push('Event3_end') + debug_order.push({ label: 'Event3_end', at: new Date().toISOString() }) + return 'event3_done' + } + + const child_handler = async (): Promise => { + execution_order.push('Child_start') + debug_order.push({ label: 'Child_start', at: new Date().toISOString() }) + execution_order.push('Child_end') + debug_order.push({ label: 'Child_end', at: new Date().toISOString() }) + return 'child_done' + } + + bus.on(Event1, event1_handler) + bus.on(Event2, event2_handler) + bus.on(Event3, event3_handler) + bus.on(LocalChildEvent, child_handler) + + const event_1 = bus.emit(Event1({})) + const event_2 = bus.emit(Event2({})) + const event_3 = bus.emit(Event3({})) + + // Wait for everything to complete + await event_1.done() + await bus.waitUntilIdle() + + // Core assertion: child jumped the queue and ran DURING Event1's handler + assert.ok(execution_order.includes('Child_start')) + assert.ok(execution_order.includes('Child_end')) + const child_start_idx = execution_order.indexOf('Child_start') + const child_end_idx = execution_order.indexOf('Child_end') + const event1_end_idx = execution_order.indexOf('Event1_end') + assert.ok(child_start_idx < event1_end_idx, 'child must start before Event1 handler returns') + assert.ok(child_end_idx < event1_end_idx, 'child must end before Event1 handler returns') + + // No overshoot: Event2 and Event3 must only start AFTER Event1's handler fully completes. + // In JS, the microtask-based runloop processes them after Event1 completes (so they may + // already be done by this point), but the key guarantee is ordering, not timing. + const event2_start_idx = execution_order.indexOf('Event2_start') + const event3_start_idx = execution_order.indexOf('Event3_start') + assert.ok(event2_start_idx > event1_end_idx, 'Event2 must not start until Event1 handler returns') + assert.ok(event3_start_idx > event1_end_idx, 'Event3 must not start until Event1 handler returns') + + // FIFO preserved among queued events + assert.ok(event2_start_idx < event3_start_idx, 'Event2 must start before Event3 (FIFO)') + + // All events completed + assert.equal(event_1.event_status, 'completed') + assert.equal(event_2.event_status, 'completed') + assert.equal(event_3.event_status, 'completed') + + // Timestamp ordering confirms the same + const history_list = Array.from(bus.event_history.values()) + const child_event = history_list.find((event) => event.event_type === 'ChildEvent') + const event2_from_history = history_list.find((event) => event.event_type === 'Event2') + const event3_from_history = history_list.find((event) => event.event_type === 'Event3') + + assert.ok(child_event?.event_started_at) + assert.ok(event2_from_history?.event_started_at) + assert.ok(event3_from_history?.event_started_at) + + assert.ok(child_event!.event_started_at! <= event2_from_history!.event_started_at!) + assert.ok(child_event!.event_started_at! <= event3_from_history!.event_started_at!) +}) + +test('done() on non-proxied event keeps bus paused during queue-jump', async () => { + const bus = new EventBus('RawDoneBus', { max_history_size: 100 }) + const Event1 = BaseEvent.extend('Event1', {}) + const ChildEvent = BaseEvent.extend('RawChild', {}) + + let paused_after_done = false + + bus.on(ChildEvent, () => {}) + + bus.on(Event1, async (_event) => { + // Dispatch child via the raw bus (not the proxied event.bus) + const child = bus.emit(ChildEvent({})) + // Get the raw (non-proxied) event + const raw_child = child._event_original ?? child + // done() on raw event bypasses handler_result injection from proxy + await raw_child.done() + // After done() returns, bus should still be paused because + // we're still inside a handler doing queue-jump processing + // NOTE: Test-only exception to our no-private-access rule: + // we intentionally read internal pause state to verify this transient invariant. + paused_after_done = bus.locks._isPaused() + }) + + bus.emit(Event1({})) + await bus.waitUntilIdle() + + assert.equal(paused_after_done, true, 'bus should be paused after raw done() but before handler returns') +}) + +test('bus pause state clears after queue-jump completes', async () => { + const bus = new EventBus('DepthBalanceBus', { max_history_size: 100 }) + const Event1 = BaseEvent.extend('DepthEvent1', {}) + const ChildA = BaseEvent.extend('DepthChildA', {}) + const ChildB = BaseEvent.extend('DepthChildB', {}) + + let paused_during_handler = false + let paused_between_dones = false + let paused_after_second_done = false + + bus.on(ChildA, () => {}) + bus.on(ChildB, () => {}) + + bus.on(Event1, async (event) => { + // NOTE: Test-only exception to our no-private-access rule: + // we intentionally read internal pause state to verify transient pause-depth behavior. + // First queue-jump + const child_a = event.bus?.emit(ChildA({}))! + await child_a.done() + paused_during_handler = bus.locks._isPaused() + + // Second queue-jump β€” bus should remain paused across both awaits. + const child_b = event.bus?.emit(ChildB({}))! + paused_between_dones = bus.locks._isPaused() + await child_b.done() + paused_after_second_done = bus.locks._isPaused() + }) + + bus.emit(Event1({})) + await bus.waitUntilIdle() + + // During handler, pause should still be held. + assert.equal(paused_during_handler, true, 'bus should remain paused after first done()') + + // Between done() calls, pause should still be held. + assert.equal(paused_between_dones, true, 'bus should remain paused between done() calls') + + // After second done(), pause is still held until handler returns. + assert.equal(paused_after_second_done, true, 'bus should remain paused after second done()') + + // After handler finishes and bus is idle, pause must be released. + // Intentional private-access exception for this test; verifies pause release at steady state. + assert.equal(bus.locks._isPaused(), false, 'bus should no longer be paused after handler completes') +}) + +test('isInsideHandler() is per-bus, not global', async () => { + const bus_a = new EventBus('InsideHandlerA', { max_history_size: 100 }) + const bus_b = new EventBus('InsideHandlerB', { max_history_size: 100 }) + + const EventA = BaseEvent.extend('InsideHandlerEventA', {}) + const EventB = BaseEvent.extend('InsideHandlerEventB', {}) + + let bus_a_inside_during_a_handler = false + let bus_b_inside_during_a_handler = false + let bus_a_inside_during_b_handler = false + let bus_b_inside_during_b_handler = false + + bus_a.on(EventA, () => { + // NOTE: Test-only exception to our no-private-access rule: + // we intentionally read internal handler-stack state for cross-bus invariants. + bus_a_inside_during_a_handler = bus_a.locks._isAnyHandlerActive() + bus_b_inside_during_a_handler = bus_b.locks._isAnyHandlerActive() + }) + + bus_b.on(EventB, () => { + bus_a_inside_during_b_handler = bus_a.locks._isAnyHandlerActive() + bus_b_inside_during_b_handler = bus_b.locks._isAnyHandlerActive() + }) + + // Dispatch to bus_a first, wait for completion so bus_b has no active handlers + await bus_a.emit(EventA({})).done() + await bus_a.waitUntilIdle() + + // Then dispatch to bus_b so bus_a has no active handlers + await bus_b.emit(EventB({})).done() + await bus_b.waitUntilIdle() + + // During bus_a's handler: bus_a should report inside, bus_b should not + assert.equal(bus_a_inside_during_a_handler, true, 'bus_a.locks._isAnyHandlerActive() should be true during bus_a handler') + assert.equal(bus_b_inside_during_a_handler, false, 'bus_b.locks._isAnyHandlerActive() should be false during bus_a handler') + + // During bus_b's handler: bus_b should report inside, bus_a should not + assert.equal(bus_b_inside_during_b_handler, true, 'bus_b.locks._isAnyHandlerActive() should be true during bus_b handler') + assert.equal(bus_a_inside_during_b_handler, false, 'bus_a.locks._isAnyHandlerActive() should be false during bus_b handler') + + // After all handlers complete, neither bus should report inside + // Intentional private-access exception for this test; verifies post-idle handler-stack teardown. + assert.equal(bus_a.locks._isAnyHandlerActive(), false, 'bus_a.locks._isAnyHandlerActive() should be false after idle') + assert.equal(bus_b.locks._isAnyHandlerActive(), false, 'bus_b.locks._isAnyHandlerActive() should be false after idle') +}) + +test('dispatch multiple, await one skips others until after handler completes', async () => { + const bus = new EventBus('MultiDispatchBus', { max_history_size: 100 }) + const execution_order: string[] = [] + + const Event1 = BaseEvent.extend('Event1', {}) + const Event2 = BaseEvent.extend('Event2', {}) + const Event3 = BaseEvent.extend('Event3', {}) + const ChildA = BaseEvent.extend('ChildA', {}) + const ChildB = BaseEvent.extend('ChildB', {}) + const ChildC = BaseEvent.extend('ChildC', {}) + + const event1_handler = async (event: BaseEvent): Promise => { + execution_order.push('Event1_start') + + event.bus?.emit(ChildA({})) + execution_order.push('ChildA_dispatched') + + const child_b = event.bus?.emit(ChildB({}))! + execution_order.push('ChildB_dispatched') + + event.bus?.emit(ChildC({})) + execution_order.push('ChildC_dispatched') + + await child_b.done() + execution_order.push('ChildB_await_returned') + + execution_order.push('Event1_end') + return 'event1_done' + } + + const event2_handler = async (): Promise => { + execution_order.push('Event2_start') + execution_order.push('Event2_end') + return 'event2_done' + } + + const event3_handler = async (): Promise => { + execution_order.push('Event3_start') + execution_order.push('Event3_end') + return 'event3_done' + } + + const child_a_handler = async (): Promise => { + execution_order.push('ChildA_start') + execution_order.push('ChildA_end') + return 'child_a_done' + } + + const child_b_handler = async (): Promise => { + execution_order.push('ChildB_start') + execution_order.push('ChildB_end') + return 'child_b_done' + } + + const child_c_handler = async (): Promise => { + execution_order.push('ChildC_start') + execution_order.push('ChildC_end') + return 'child_c_done' + } + + bus.on(Event1, event1_handler) + bus.on(Event2, event2_handler) + bus.on(Event3, event3_handler) + bus.on(ChildA, child_a_handler) + bus.on(ChildB, child_b_handler) + bus.on(ChildC, child_c_handler) + + const event_1 = bus.emit(Event1({})) + bus.emit(Event2({})) + bus.emit(Event3({})) + + await event_1.done() + + assert.ok(execution_order.includes('ChildB_start')) + assert.ok(execution_order.includes('ChildB_end')) + + const child_b_end_idx = execution_order.indexOf('ChildB_end') + const event1_end_idx = execution_order.indexOf('Event1_end') + assert.ok(child_b_end_idx < event1_end_idx) + + if (execution_order.includes('ChildA_start')) { + const child_a_start_idx = execution_order.indexOf('ChildA_start') + assert.ok(child_a_start_idx > event1_end_idx) + } + if (execution_order.includes('ChildC_start')) { + const child_c_start_idx = execution_order.indexOf('ChildC_start') + assert.ok(child_c_start_idx > event1_end_idx) + } + if (execution_order.includes('Event2_start')) { + const event2_start_idx = execution_order.indexOf('Event2_start') + assert.ok(event2_start_idx > event1_end_idx) + } + if (execution_order.includes('Event3_start')) { + const event3_start_idx = execution_order.indexOf('Event3_start') + assert.ok(event3_start_idx > event1_end_idx) + } + + await bus.waitUntilIdle() + + const event2_start_idx = execution_order.indexOf('Event2_start') + const event3_start_idx = execution_order.indexOf('Event3_start') + const child_a_start_idx = execution_order.indexOf('ChildA_start') + const child_c_start_idx = execution_order.indexOf('ChildC_start') + + assert.ok(event2_start_idx < event3_start_idx) + assert.ok(event3_start_idx < child_a_start_idx) + assert.ok(child_a_start_idx < child_c_start_idx) +}) + +test('multi-bus queues are independent when awaiting child', async () => { + const bus_1 = new EventBus('Bus1', { max_history_size: 100 }) + const bus_2 = new EventBus('Bus2', { max_history_size: 100 }) + const execution_order: string[] = [] + + const Event1 = BaseEvent.extend('Event1', {}) + const Event2 = BaseEvent.extend('Event2', {}) + const Event3 = BaseEvent.extend('Event3', {}) + const Event4 = BaseEvent.extend('Event4', {}) + const LocalChildEvent = BaseEvent.extend('ChildEvent', {}) + + const event1_handler = async (event: BaseEvent): Promise => { + execution_order.push('Bus1_Event1_start') + const child = event.bus?.emit(LocalChildEvent({}))! + execution_order.push('Child_dispatched_to_Bus1') + await child.done() + execution_order.push('Child_await_returned') + execution_order.push('Bus1_Event1_end') + return 'event1_done' + } + + const event2_handler = async (): Promise => { + execution_order.push('Bus1_Event2_start') + execution_order.push('Bus1_Event2_end') + return 'event2_done' + } + + const event3_handler = async (): Promise => { + execution_order.push('Bus2_Event3_start') + execution_order.push('Bus2_Event3_end') + return 'event3_done' + } + + const event4_handler = async (): Promise => { + execution_order.push('Bus2_Event4_start') + execution_order.push('Bus2_Event4_end') + return 'event4_done' + } + + const child_handler = async (): Promise => { + execution_order.push('Child_start') + execution_order.push('Child_end') + return 'child_done' + } + + bus_1.on(Event1, event1_handler) + bus_1.on(Event2, event2_handler) + bus_1.on(LocalChildEvent, child_handler) + + bus_2.on(Event3, event3_handler) + bus_2.on(Event4, event4_handler) + + const event_1 = bus_1.emit(Event1({})) + bus_1.emit(Event2({})) + bus_2.emit(Event3({})) + bus_2.emit(Event4({})) + + await delay(0) + + await event_1.done() + + assert.ok(execution_order.includes('Child_start')) + assert.ok(execution_order.includes('Child_end')) + + const child_end_idx = execution_order.indexOf('Child_end') + const event1_end_idx = execution_order.indexOf('Bus1_Event1_end') + assert.ok(child_end_idx < event1_end_idx) + + const bus1_event2_start_idx = execution_order.indexOf('Bus1_Event2_start') + if (bus1_event2_start_idx !== -1) { + assert.ok(bus1_event2_start_idx > event1_end_idx) + } + + const bus2_event3_start_idx = execution_order.indexOf('Bus2_Event3_start') + const bus2_event4_start_idx = execution_order.indexOf('Bus2_Event4_start') + assert.ok(bus2_event3_start_idx !== -1 || bus2_event4_start_idx !== -1) + const bus2_start_idx = + bus2_event3_start_idx === -1 + ? bus2_event4_start_idx + : bus2_event4_start_idx === -1 + ? bus2_event3_start_idx + : Math.min(bus2_event3_start_idx, bus2_event4_start_idx) + assert.ok(bus2_start_idx < event1_end_idx) + + await bus_1.waitUntilIdle() + await bus_2.waitUntilIdle() + + assert.ok(execution_order.includes('Bus1_Event2_start')) + assert.ok(execution_order.includes('Bus2_Event3_start')) + assert.ok(execution_order.includes('Bus2_Event4_start')) +}) + +test('awaiting an already completed event is a no-op', async () => { + const bus = new EventBus('AlreadyCompletedBus', { max_history_size: 100 }) + const execution_order: string[] = [] + + const Event1 = BaseEvent.extend('Event1', {}) + const Event2 = BaseEvent.extend('Event2', {}) + + const event1_handler = async (): Promise => { + execution_order.push('Event1_start') + execution_order.push('Event1_end') + return 'event1_done' + } + + const event2_handler = async (): Promise => { + execution_order.push('Event2_start') + execution_order.push('Event2_end') + return 'event2_done' + } + + bus.on(Event1, event1_handler) + bus.on(Event2, event2_handler) + + const event_1 = await bus.emit(Event1({})).done() + assert.equal(event_1.event_status, 'completed') + + const event_2 = bus.emit(Event2({})) + + await event_1.done() + + assert.equal(event_2.event_status, 'pending') + + await bus.waitUntilIdle() +}) + +test('multiple awaits on same event', async () => { + const bus = new EventBus('MultiAwaitBus', { max_history_size: 100 }) + const execution_order: string[] = [] + const await_results: string[] = [] + + const Event1 = BaseEvent.extend('Event1', {}) + const Event2 = BaseEvent.extend('Event2', {}) + const LocalChildEvent = BaseEvent.extend('ChildEvent', {}) + + const event1_handler = async (event: BaseEvent): Promise => { + execution_order.push('Event1_start') + + const child = event.bus?.emit(LocalChildEvent({}))! + + const await_child = async (name: string): Promise => { + await child.done() + await_results.push(`${name}_completed`) + } + + await Promise.all([await_child('await1'), await_child('await2')]) + execution_order.push('Both_awaits_completed') + execution_order.push('Event1_end') + return 'event1_done' + } + + const event2_handler = async (): Promise => { + execution_order.push('Event2_start') + execution_order.push('Event2_end') + return 'event2_done' + } + + const child_handler = async (): Promise => { + execution_order.push('Child_start') + await delay(10) + execution_order.push('Child_end') + return 'child_done' + } + + bus.on(Event1, event1_handler) + bus.on(Event2, event2_handler) + bus.on(LocalChildEvent, child_handler) + + const event_1 = bus.emit(Event1({})) + bus.emit(Event2({})) + + await event_1.done() + + assert.equal(await_results.length, 2) + assert.ok(await_results.includes('await1_completed')) + assert.ok(await_results.includes('await2_completed')) + + assert.ok(execution_order.includes('Child_start')) + assert.ok(execution_order.includes('Child_end')) + const child_end_idx = execution_order.indexOf('Child_end') + const event1_end_idx = execution_order.indexOf('Event1_end') + assert.ok(child_end_idx < event1_end_idx) + + assert.ok(!execution_order.includes('Event2_start')) + + await bus.waitUntilIdle() +}) + +test('deeply nested awaited children', async () => { + const bus = new EventBus('DeepNestedBus', { max_history_size: 100 }) + const execution_order: string[] = [] + + const Event1 = BaseEvent.extend('Event1', {}) + const Event2 = BaseEvent.extend('Event2', {}) + const Child1 = BaseEvent.extend('Child1', {}) + const Child2 = BaseEvent.extend('Child2', {}) + + const event1_handler = async (event: BaseEvent): Promise => { + execution_order.push('Event1_start') + const child1 = event.bus?.emit(Child1({}))! + await child1.done() + execution_order.push('Event1_end') + return 'event1_done' + } + + const child1_handler = async (event: BaseEvent): Promise => { + execution_order.push('Child1_start') + const child2 = event.bus?.emit(Child2({}))! + await child2.done() + execution_order.push('Child1_end') + return 'child1_done' + } + + const child2_handler = async (): Promise => { + execution_order.push('Child2_start') + execution_order.push('Child2_end') + return 'child2_done' + } + + const event2_handler = async (): Promise => { + execution_order.push('Event2_start') + execution_order.push('Event2_end') + return 'event2_done' + } + + bus.on(Event1, event1_handler) + bus.on(Child1, child1_handler) + bus.on(Child2, child2_handler) + bus.on(Event2, event2_handler) + + const event_1 = bus.emit(Event1({})) + bus.emit(Event2({})) + + await event_1.done() + + assert.ok(execution_order.includes('Child1_start')) + assert.ok(execution_order.includes('Child1_end')) + assert.ok(execution_order.includes('Child2_start')) + assert.ok(execution_order.includes('Child2_end')) + + const child2_end_idx = execution_order.indexOf('Child2_end') + const child1_end_idx = execution_order.indexOf('Child1_end') + const event1_end_idx = execution_order.indexOf('Event1_end') + assert.ok(child2_end_idx < child1_end_idx) + assert.ok(child1_end_idx < event1_end_idx) + + assert.ok(!execution_order.includes('Event2_start')) + + await bus.waitUntilIdle() + + const event2_start_idx = execution_order.indexOf('Event2_start') + assert.ok(event2_start_idx > event1_end_idx) +}) + +// ============================================================================= +// Queue-Jump Concurrency Tests (Two-Bus) +// +// BUG: _processEventImmediately (queue-jump across buses) passes { bypass_handler_locks: true, +// bypass_event_locks: true } for ALL buses. This causes: +// 1. Handlers to run in parallel regardless of configured concurrency +// 2. Event locks on remote buses to be skipped +// +// The fix requires "yield-and-reacquire": +// - Before processing the child, temporarily RELEASE the lock the parent +// handler holds (the parent is suspended in `await child.done()` and isn't +// using it). +// - Process the child event NORMALLY β€” handlers acquire/release the real +// lock, serializing among themselves as configured. +// - After the child completes, RE-ACQUIRE the lock for the parent handler +// before it resumes. +// +// For event locks, only bypass on the initiating bus (where the parent holds +// the lock). On other buses, respect their event concurrency β€” bypass only +// if they resolve to the SAME lock instance (i.e. global-serial). +// +// All tests use two buses. The pattern is: +// bus_a: origin bus where TriggerEvent handler dispatches a child +// bus_b: forward bus that also handles the child event +// The trigger handler dispatches the child on bus_a and also to bus_b, +// then awaits child.done(), which queue-jumps the child on both buses. +// ============================================================================= + +test('BUG: queue-jump two-bus serial handlers should serialize on each bus', async () => { + const TriggerEvent = BaseEvent.extend('QJ2BS_Trigger', {}) + const ChildEvent = BaseEvent.extend('QJ2BS_Child', {}) + + const bus_a = new EventBus('QJ2BS_A', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'serial', + }) + const bus_b = new EventBus('QJ2BS_B', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'serial', + }) + + const log: string[] = [] + + // Two handlers per bus. handler_1 is slow (15ms), handler_2 is fast (5ms). + // With serial handlers, handler_1 must finish before handler_2 starts ON EACH BUS. + // With buggy parallel, both start simultaneously and handler_2 finishes first. + const a_handler_1 = async () => { + log.push('a1_start') + await delay(15) + log.push('a1_end') + } + const a_handler_2 = async () => { + log.push('a2_start') + await delay(5) + log.push('a2_end') + } + const b_handler_1 = async () => { + log.push('b1_start') + await delay(15) + log.push('b1_end') + } + const b_handler_2 = async () => { + log.push('b2_start') + await delay(5) + log.push('b2_end') + } + + bus_a.on(TriggerEvent, async (event: InstanceType) => { + const child = event.bus?.emit(ChildEvent({ event_timeout: null }))! + bus_b.emit(child) + await child.done() + }) + bus_a.on(ChildEvent, a_handler_1) + bus_a.on(ChildEvent, a_handler_2) + bus_b.on(ChildEvent, b_handler_1) + bus_b.on(ChildEvent, b_handler_2) + + const top = bus_a.emit(TriggerEvent({ event_timeout: null })) + await top.done() + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + + // Bus A: handlers must serialize (a1 finishes before a2 starts) + const a1_end = log.indexOf('a1_end') + const a2_start = log.indexOf('a2_start') + assert.ok(a1_end >= 0 && a2_start >= 0, 'bus_a handlers should have run') + assert.ok(a1_end < a2_start, `bus_a (serial handlers): a1 should finish before a2 starts. Got: [${log.join(', ')}]`) + + // Bus B: handlers must serialize (b1 finishes before b2 starts) + const b1_end = log.indexOf('b1_end') + const b2_start = log.indexOf('b2_start') + assert.ok(b1_end >= 0 && b2_start >= 0, 'bus_b handlers should have run') + assert.ok(b1_end < b2_start, `bus_b (serial handlers): b1 should finish before b2 starts. Got: [${log.join(', ')}]`) +}) + +test('BUG: queue-jump two-bus global handler lock should serialize across both buses', async () => { + const TriggerEvent = BaseEvent.extend('QJ2GS_Trigger', {}) + const ChildEvent = BaseEvent.extend('QJ2GS_Child', {}) + + // Global retry semaphore means ONE handler at a time GLOBALLY, across all buses. + const bus_a = new EventBus('QJ2GS_A', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'serial', + }) + const bus_b = new EventBus('QJ2GS_B', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'serial', + }) + + const log: string[] = [] + + const a_handler_1 = retry({ semaphore_scope: 'global', semaphore_name: 'qj2gs_handler', semaphore_limit: 1 })(async () => { + log.push('a1_start') + await delay(15) + log.push('a1_end') + }) + const a_handler_2 = retry({ semaphore_scope: 'global', semaphore_name: 'qj2gs_handler', semaphore_limit: 1 })(async () => { + log.push('a2_start') + await delay(5) + log.push('a2_end') + }) + const b_handler_1 = retry({ semaphore_scope: 'global', semaphore_name: 'qj2gs_handler', semaphore_limit: 1 })(async () => { + log.push('b1_start') + await delay(15) + log.push('b1_end') + }) + const b_handler_2 = retry({ semaphore_scope: 'global', semaphore_name: 'qj2gs_handler', semaphore_limit: 1 })(async () => { + log.push('b2_start') + await delay(5) + log.push('b2_end') + }) + + bus_a.on(TriggerEvent, async (event: InstanceType) => { + const child = event.bus?.emit(ChildEvent({ event_timeout: null }))! + bus_b.emit(child) + await child.done() + }) + bus_a.on(ChildEvent, a_handler_1) + bus_a.on(ChildEvent, a_handler_2) + bus_b.on(ChildEvent, b_handler_1) + bus_b.on(ChildEvent, b_handler_2) + + const top = bus_a.emit(TriggerEvent({ event_timeout: null })) + await top.done() + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + + // With a global retry semaphore, no two handlers should overlap anywhere. + // _processEventImmediately processes buses sequentially (bus_a first, + // then bus_b), so the expected order is strictly serial: + // a1_start, a1_end, a2_start, a2_end, b1_start, b1_end, b2_start, b2_end + // + // With the bug (bypass), all handlers on a bus run in parallel: + // a1_start, a2_start, a2_end, a1_end, b1_start, b2_start, b2_end, b1_end + + // Check: within bus_a, handlers are serial + const a1_end = log.indexOf('a1_end') + const a2_start = log.indexOf('a2_start') + assert.ok(a1_end < a2_start, `global lock: a1 should finish before a2 starts. Got: [${log.join(', ')}]`) + + // Check: within bus_b, handlers are serial + const b1_end = log.indexOf('b1_end') + const b2_start = log.indexOf('b2_start') + assert.ok(b1_end < b2_start, `global lock: b1 should finish before b2 starts. Got: [${log.join(', ')}]`) + + // Check: bus_a handlers all finish before bus_b handlers start + // (_processEventImmediately processes sequentially and the retry + // semaphore enforces a global handler lock) + const a2_end = log.indexOf('a2_end') + const b1_start = log.indexOf('b1_start') + assert.ok(a2_end < b1_start, `global lock: bus_a should finish before bus_b starts. Got: [${log.join(', ')}]`) +}) + +test('BUG: queue-jump two-bus mixed: bus_a serial, bus_b parallel', async () => { + const TriggerEvent = BaseEvent.extend('QJ2Mix1_Trigger', {}) + const ChildEvent = BaseEvent.extend('QJ2Mix1_Child', {}) + + const bus_a = new EventBus('QJ2Mix1_A', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'serial', + }) + const bus_b = new EventBus('QJ2Mix1_B', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'parallel', // bus_b handlers should run in parallel + }) + + const log: string[] = [] + + const a_handler_1 = async () => { + log.push('a1_start') + await delay(15) + log.push('a1_end') + } + const a_handler_2 = async () => { + log.push('a2_start') + await delay(5) + log.push('a2_end') + } + const b_handler_1 = async () => { + log.push('b1_start') + await delay(15) + log.push('b1_end') + } + const b_handler_2 = async () => { + log.push('b2_start') + await delay(5) + log.push('b2_end') + } + + bus_a.on(TriggerEvent, async (event: InstanceType) => { + const child = event.bus?.emit(ChildEvent({ event_timeout: null }))! + bus_b.emit(child) + await child.done() + }) + bus_a.on(ChildEvent, a_handler_1) + bus_a.on(ChildEvent, a_handler_2) + bus_b.on(ChildEvent, b_handler_1) + bus_b.on(ChildEvent, b_handler_2) + + const top = bus_a.emit(TriggerEvent({ event_timeout: null })) + await top.done() + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + + // Bus A (serial handlers): a1 must finish before a2 starts + const a1_end = log.indexOf('a1_end') + const a2_start = log.indexOf('a2_start') + assert.ok(a1_end < a2_start, `bus_a (serial handlers): a1 should finish before a2 starts. Got: [${log.join(', ')}]`) + + // Bus B (parallel): both handlers should start before the slower one finishes. + // b2 (5ms) starts and finishes before b1 (15ms) finishes. + const b1_end = log.indexOf('b1_end') + const b2_start = log.indexOf('b2_start') + assert.ok(b2_start < b1_end, `bus_b (parallel): b2 should start before b1 finishes. Got: [${log.join(', ')}]`) +}) + +test('BUG: queue-jump two-bus mixed: bus_a parallel, bus_b serial', async () => { + const TriggerEvent = BaseEvent.extend('QJ2Mix2_Trigger', {}) + const ChildEvent = BaseEvent.extend('QJ2Mix2_Child', {}) + + const bus_a = new EventBus('QJ2Mix2_A', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'parallel', // bus_a handlers should run in parallel + }) + const bus_b = new EventBus('QJ2Mix2_B', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'serial', + }) + + const log: string[] = [] + + const a_handler_1 = async () => { + log.push('a1_start') + await delay(15) + log.push('a1_end') + } + const a_handler_2 = async () => { + log.push('a2_start') + await delay(5) + log.push('a2_end') + } + const b_handler_1 = async () => { + log.push('b1_start') + await delay(15) + log.push('b1_end') + } + const b_handler_2 = async () => { + log.push('b2_start') + await delay(5) + log.push('b2_end') + } + + bus_a.on(TriggerEvent, async (event: InstanceType) => { + const child = event.bus?.emit(ChildEvent({ event_timeout: null }))! + bus_b.emit(child) + await child.done() + }) + bus_a.on(ChildEvent, a_handler_1) + bus_a.on(ChildEvent, a_handler_2) + bus_b.on(ChildEvent, b_handler_1) + bus_b.on(ChildEvent, b_handler_2) + + const top = bus_a.emit(TriggerEvent({ event_timeout: null })) + await top.done() + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + + // Bus A (parallel): handlers should overlap + const a1_end = log.indexOf('a1_end') + const a2_start = log.indexOf('a2_start') + assert.ok(a2_start < a1_end, `bus_a (parallel): a2 should start before a1 finishes. Got: [${log.join(', ')}]`) + + // Bus B (serial handlers): b1 must finish before b2 starts + const b1_end = log.indexOf('b1_end') + const b2_start = log.indexOf('b2_start') + assert.ok(b1_end < b2_start, `bus_b (serial handlers): b1 should finish before b2 starts. Got: [${log.join(', ')}]`) +}) + +test('forwarded event uses processing-bus defaults unless explicit overrides are set', async () => { + const TriggerEvent = BaseEvent.extend('QJDefaults_Trigger', {}) + const ChildEvent = BaseEvent.extend('QJDefaults_Child', { + mode: z.enum(['inherited', 'override']), + }) + + const bus_a = new EventBus('QJDefaults_A', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'serial', + }) + const bus_b = new EventBus('QJDefaults_B', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'parallel', + }) + + const log: string[] = [] + + bus_b.on(ChildEvent, async (event: InstanceType) => { + log.push(`${event.mode}:b1_start`) + await delay(15) + log.push(`${event.mode}:b1_end`) + }) + bus_b.on(ChildEvent, async (event: InstanceType) => { + log.push(`${event.mode}:b2_start`) + await delay(5) + log.push(`${event.mode}:b2_end`) + }) + + bus_a.on(TriggerEvent, async (event: InstanceType) => { + const inherited = event.bus?.emit(ChildEvent({ mode: 'inherited', event_timeout: null }))! + bus_b.emit(inherited) + await inherited.done() + + const override = event.bus?.emit( + ChildEvent({ + mode: 'override', + event_timeout: null, + event_handler_concurrency: 'serial', + }) + )! + bus_b.emit(override) + await override.done() + }) + + const top = bus_a.emit(TriggerEvent({ event_timeout: null })) + await top.done() + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + + const inherited_b1_end = log.indexOf('inherited:b1_end') + const inherited_b2_start = log.indexOf('inherited:b2_start') + assert.ok(inherited_b2_start < inherited_b1_end, `inherited defaults should use bus_b parallel concurrency. Got: [${log.join(', ')}]`) + + const override_b1_end = log.indexOf('override:b1_end') + const override_b2_start = log.indexOf('override:b2_start') + assert.ok(override_b1_end < override_b2_start, `explicit override should force serial handler concurrency. Got: [${log.join(', ')}]`) +}) + +test('forwarded first-mode uses processing-bus handler concurrency defaults', async () => { + const ForwardedFirstEvent = BaseEvent.extend('ForwardedFirstDefaultsEvent', { + event_result_type: z.string(), + }) + + const bus_a = new EventBus('ForwardedFirstDefaults_A', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'serial', + event_handler_completion: 'all', + }) + const bus_b = new EventBus('ForwardedFirstDefaults_B', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'parallel', + event_handler_completion: 'first', + }) + + const log: string[] = [] + bus_a.on('*', bus_b.emit) + + bus_b.on(ForwardedFirstEvent, async () => { + log.push('slow_start') + await delay(20) + log.push('slow_end') + return 'slow' + }) + + bus_b.on(ForwardedFirstEvent, async () => { + log.push('fast_start') + await delay(1) + log.push('fast_end') + return 'fast' + }) + + const result = await bus_a.emit(ForwardedFirstEvent({ event_timeout: null })).first() + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + + assert.equal(result, 'fast', `first-mode on processing bus should pick fast handler. Got: ${String(result)}. log=[${log.join(', ')}]`) + const slow_start = log.indexOf('slow_start') + const fast_start = log.indexOf('fast_start') + assert.ok(slow_start >= 0, `slow handler should have started. log=[${log.join(', ')}]`) + assert.ok(fast_start >= 0, `fast handler should have started. log=[${log.join(', ')}]`) + assert.ok(slow_start < fast_start, `both handlers should start before first-mode completion resolves. log=[${log.join(', ')}]`) +}) + +// ============================================================================= +// Event-level concurrency on the forward bus. +// +// When the forward bus (bus_b) has bus-serial event concurrency and is already +// processing an event, a queue-jumped child should WAIT for bus_b's in-flight +// event to finish. The current code bypasses event locks for ALL buses, +// causing the child to cut in front of the in-flight event. +// +// The fix should only bypass event locks on the INITIATING bus (where the +// parent event holds the lock). On other buses, bypass only if they resolve +// to the SAME lock instance (global-serial shares one global lock). +// ============================================================================= + +test('BUG: queue-jump should respect bus-serial event concurrency on forward bus', async () => { + const TriggerEvent = BaseEvent.extend('QJEvt_Trigger', {}) + const ChildEvent = BaseEvent.extend('QJEvt_Child', {}) + const SlowEvent = BaseEvent.extend('QJEvt_Slow', {}) + + const bus_a = new EventBus('QJEvt_A', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'serial', + }) + const bus_b = new EventBus('QJEvt_B', { + event_concurrency: 'bus-serial', // only one event at a time on bus_b + event_handler_concurrency: 'serial', + }) + + const log: string[] = [] + + // SlowEvent handler: occupies bus_b's event lock for 40ms + bus_b.on(SlowEvent, async () => { + log.push('slow_start') + await delay(40) + log.push('slow_end') + }) + + // ChildEvent handler on bus_b: should only run after SlowEvent finishes + bus_b.on(ChildEvent, async () => { + log.push('child_b_start') + await delay(5) + log.push('child_b_end') + }) + + // ChildEvent handler on bus_a (so bus_a also processes the child) + bus_a.on(ChildEvent, async () => { + log.push('child_a_start') + await delay(5) + log.push('child_a_end') + }) + + // TriggerEvent handler: dispatches child to both buses, awaits completion + bus_a.on(TriggerEvent, async (event: InstanceType) => { + const child = event.bus?.emit(ChildEvent({ event_timeout: null }))! + bus_b.emit(child) + await child.done() + }) + + // Step 1: Start a slow event on bus_b so it's busy + bus_b.emit(SlowEvent({ event_timeout: null })) + await delay(5) // let slow_handler start + + // Step 2: Trigger the queue-jump on bus_a + const top = bus_a.emit(TriggerEvent({ event_timeout: null })) + await top.done() + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + + // The child on bus_b should start AFTER the slow event finishes, + // because bus_b has bus-serial event concurrency. + const slow_end = log.indexOf('slow_end') + const child_b_start = log.indexOf('child_b_start') + assert.ok(slow_end >= 0, 'slow event should have completed') + assert.ok(child_b_start >= 0, 'child on bus_b should have run') + assert.ok( + slow_end < child_b_start, + `bus_b (bus-serial events): child should wait for slow event to finish. ` + `Got: [${log.join(', ')}]` + ) + + // The child on bus_a should have processed (queue-jumped, bypasses bus_a's event lock) + assert.ok(log.includes('child_a_start'), 'child on bus_a should have run') + assert.ok(log.includes('child_a_end'), 'child on bus_a should have completed') +}) + +test('queue-jump with fully-parallel forward bus starts immediately', async () => { + // When bus_b uses parallel event AND handler concurrency, the queue-jumped + // child should start immediately even while another event's handler is running. + + const TriggerEvent = BaseEvent.extend('QJFullPar_Trigger', {}) + const ChildEvent = BaseEvent.extend('QJFullPar_Child', {}) + const SlowEvent = BaseEvent.extend('QJFullPar_Slow', {}) + + const bus_a = new EventBus('QJFullPar_A', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'serial', + }) + const bus_b = new EventBus('QJFullPar_B', { + event_concurrency: 'parallel', + event_handler_concurrency: 'parallel', + }) + + const log: string[] = [] + + bus_b.on(SlowEvent, async () => { + log.push('slow_start') + await delay(40) + log.push('slow_end') + }) + + bus_b.on(ChildEvent, async () => { + log.push('child_b_start') + await delay(5) + log.push('child_b_end') + }) + + bus_a.on(TriggerEvent, async (event: InstanceType) => { + const child = event.bus?.emit(ChildEvent({ event_timeout: null }))! + bus_b.emit(child) + await child.done() + }) + + bus_b.emit(SlowEvent({ event_timeout: null })) + await delay(5) + + const top = bus_a.emit(TriggerEvent({ event_timeout: null })) + await top.done() + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + + const slow_end = log.indexOf('slow_end') + const child_b_start = log.indexOf('child_b_start') + assert.ok(child_b_start >= 0, 'child on bus_b should have run') + assert.ok(child_b_start < slow_end, `bus_b (fully parallel): child should start before slow finishes. ` + `Got: [${log.join(', ')}]`) +}) + +test('queue-jump with parallel events and serial handlers on forward bus still overlaps across events', async () => { + // When bus_b has parallel event concurrency but serial handler concurrency, + // the child event can start processing immediately (event lock is parallel), + // but its handler must wait for the slow handler to release the handler lock. + + const TriggerEvent = BaseEvent.extend('QJEvtParHSer_Trigger', {}) + const ChildEvent = BaseEvent.extend('QJEvtParHSer_Child', {}) + const SlowEvent = BaseEvent.extend('QJEvtParHSer_Slow', {}) + + const bus_a = new EventBus('QJEvtParHSer_A', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'serial', + }) + const bus_b = new EventBus('QJEvtParHSer_B', { + event_concurrency: 'parallel', // events can start concurrently + event_handler_concurrency: 'serial', // but handlers serialize per event + }) + + const log: string[] = [] + + bus_b.on(SlowEvent, async () => { + log.push('slow_start') + await delay(40) + log.push('slow_end') + }) + + bus_b.on(ChildEvent, async () => { + log.push('child_b_start') + await delay(5) + log.push('child_b_end') + }) + + bus_a.on(TriggerEvent, async (event: InstanceType) => { + const child = event.bus?.emit(ChildEvent({ event_timeout: null }))! + bus_b.emit(child) + await child.done() + }) + + bus_b.emit(SlowEvent({ event_timeout: null })) + await delay(5) + + const top = bus_a.emit(TriggerEvent({ event_timeout: null })) + await top.done() + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + + // With per-event serial handler concurrency, different events can overlap + const slow_end = log.indexOf('slow_end') + const child_b_start = log.indexOf('child_b_start') + assert.ok(child_b_start >= 0, 'child on bus_b should have run') + assert.ok(child_b_start < slow_end, `bus_b (per-event serial): child handler should overlap slow handler. ` + `Got: [${log.join(', ')}]`) +}) diff --git a/bubus-ts/tests/cross_runtime_roundtrip.test.ts b/bubus-ts/tests/cross_runtime_roundtrip.test.ts new file mode 100644 index 0000000..1f48c75 --- /dev/null +++ b/bubus-ts/tests/cross_runtime_roundtrip.test.ts @@ -0,0 +1,707 @@ +import assert from 'node:assert/strict' +import { spawnSync } from 'node:child_process' +import { existsSync, mkdtempSync, readFileSync, rmSync, writeFileSync } from 'node:fs' +import { tmpdir } from 'node:os' +import { dirname, join, resolve } from 'node:path' +import { fileURLToPath } from 'node:url' +import { test } from 'node:test' +import { z } from 'zod' + +import { BaseEvent, EventBus } from '../src/index.js' +import { fromJsonSchema } from '../src/types.js' + +const tests_dir = dirname(fileURLToPath(import.meta.url)) +const ts_root = resolve(tests_dir, '..') +const repo_root = resolve(ts_root, '..') +const PROCESS_TIMEOUT_MS = 30_000 +const EVENT_WAIT_TIMEOUT_MS = 15_000 + +const jsonSafe = (value: T): T => JSON.parse(JSON.stringify(value)) as T + +type ResultSemanticsCase = { + event: BaseEvent + valid_results: unknown[] + invalid_results: unknown[] +} + +const assertFieldEqual = (key: string, actual: unknown, expected: unknown, context: string): void => { + if (key.endsWith('_at') && typeof actual === 'string' && typeof expected === 'string') { + assert.equal(actual, expected, `${context}: ${key}`) + return + } + assert.deepEqual(actual, expected, `${context}: ${key}`) +} + +const stableValue = (value: unknown): string => { + if (value === undefined) { + return 'undefined' + } + try { + return JSON.stringify(value) + } catch { + return String(value) + } +} + +const assertSchemaSemanticsEqual = ( + original_schema_json: unknown, + candidate_schema_json: unknown, + valid_results: unknown[], + invalid_results: unknown[], + context: string +): void => { + const original_schema = fromJsonSchema(original_schema_json) + const candidate_schema = fromJsonSchema(candidate_schema_json) + + for (const result of valid_results) { + const original_ok = original_schema.safeParse(result).success + const candidate_ok = candidate_schema.safeParse(result).success + assert.equal(original_ok, true, `${context}: original schema should accept ${stableValue(result)}`) + assert.equal(candidate_ok, true, `${context}: candidate schema should accept ${stableValue(result)}`) + } + + for (const result of invalid_results) { + const original_ok = original_schema.safeParse(result).success + const candidate_ok = candidate_schema.safeParse(result).success + assert.equal(original_ok, false, `${context}: original schema should reject ${stableValue(result)}`) + assert.equal(candidate_ok, false, `${context}: candidate schema should reject ${stableValue(result)}`) + } + + for (const result of [...valid_results, ...invalid_results]) { + const original_ok = original_schema.safeParse(result).success + const candidate_ok = candidate_schema.safeParse(result).success + assert.equal( + candidate_ok, + original_ok, + `${context}: schema decision mismatch for ${stableValue(result)} (expected ${original_ok}, got ${candidate_ok})` + ) + } +} + +const buildRoundtripCases = (): ResultSemanticsCase[] => { + const NumberResultEvent = BaseEvent.extend('TsPy_NumberResultEvent', { + value: z.number(), + label: z.string(), + event_result_type: z.number(), + }) + const StringResultEvent = BaseEvent.extend('TsPy_StringResultEvent', { + id: z.string(), + event_result_type: z.string(), + }) + const BooleanResultEvent = BaseEvent.extend('TsPy_BooleanResultEvent', { + id: z.string(), + event_result_type: z.boolean(), + }) + const NullResultEvent = BaseEvent.extend('TsPy_NullResultEvent', { + id: z.string(), + event_result_type: z.null(), + }) + const StringCtorResultEvent = BaseEvent.extend('TsPy_StringCtorResultEvent', { + id: z.string(), + event_result_type: String, + }) + const NumberCtorResultEvent = BaseEvent.extend('TsPy_NumberCtorResultEvent', { + id: z.string(), + event_result_type: Number, + }) + const BooleanCtorResultEvent = BaseEvent.extend('TsPy_BooleanCtorResultEvent', { + id: z.string(), + event_result_type: Boolean, + }) + const ArrayResultEvent = BaseEvent.extend('TsPy_ArrayResultEvent', { + id: z.string(), + event_result_type: z.array(z.string()), + }) + const ArrayCtorResultEvent = BaseEvent.extend('TsPy_ArrayCtorResultEvent', { + id: z.string(), + event_result_type: Array, + }) + const RecordResultEvent = BaseEvent.extend('TsPy_RecordResultEvent', { + id: z.string(), + event_result_type: z.record(z.string(), z.array(z.number())), + }) + const ObjectCtorResultEvent = BaseEvent.extend('TsPy_ObjectCtorResultEvent', { + id: z.string(), + event_result_type: Object, + }) + const ScreenshotResultEvent = BaseEvent.extend('TsPy_ScreenshotResultEvent', { + target_id: z.string(), + quality: z.string(), + event_result_type: z.object({ + image_url: z.string(), + width: z.number(), + height: z.number(), + tags: z.array(z.string()), + is_animated: z.boolean(), + confidence_scores: z.array(z.number()), + metadata: z.record(z.string(), z.number()), + regions: z.array( + z.object({ + id: z.string(), + label: z.string(), + score: z.number(), + visible: z.boolean(), + }) + ), + }), + }) + + const number_event = NumberResultEvent({ + value: 7, + label: 'parent', + event_path: ['TsBus#aaaa'], + event_timeout: 12.5, + }) + + const screenshot_event = ScreenshotResultEvent({ + target_id: '0c1ccf21-65c0-7390-8b64-9182e985740e', + quality: 'high', + event_parent_id: number_event.event_id, + event_path: ['TsBus#aaaa', 'PyBridge#bbbb'], + event_timeout: 33.0, + }) + + const string_event = StringResultEvent({ + id: 'ecea6334-c939-7540-89b9-29b439c9a1f4', + event_parent_id: number_event.event_id, + event_path: ['TsBus#aaaa'], + }) + const bool_event = BooleanResultEvent({ + id: '87dc4d01-be2d-7057-834e-5faf35705400', + event_path: ['TsBus#aaaa'], + }) + const null_event = NullResultEvent({ + id: '5fc19a35-064c-7ec1-8d1a-4fb33f119abc', + event_path: ['TsBus#aaaa'], + }) + const string_ctor_event = StringCtorResultEvent({ + id: 'df54dc78-e988-75bc-8457-87d5bd2d7c4c', + event_path: ['TsBus#aaaa'], + }) + const number_ctor_event = NumberCtorResultEvent({ + id: 'bfe9459c-c1a4-7906-8a13-c9855aac0001', + event_path: ['TsBus#aaaa'], + }) + const boolean_ctor_event = BooleanCtorResultEvent({ + id: 'f472d2e0-5815-7dad-8fb1-a9ce4315cd6e', + event_path: ['TsBus#aaaa'], + }) + const array_event = ArrayResultEvent({ + id: 'e35d91b5-1ca9-7833-8b3d-1516e2896f1e', + event_path: ['TsBus#aaaa'], + }) + const array_ctor_event = ArrayCtorResultEvent({ + id: 'f21399dd-6162-7ac2-832d-a3870373278a', + event_path: ['TsBus#aaaa'], + }) + const record_event = RecordResultEvent({ + id: 'ba1a8735-0955-737f-8b4d-7337d2169a3c', + event_path: ['TsBus#aaaa'], + }) + const object_ctor_event = ObjectCtorResultEvent({ + id: '2aa37066-45e8-7f65-8ada-7c30ac8982d5', + event_path: ['TsBus#aaaa'], + }) + + return [ + { + event: number_event, + valid_results: [0, -1, 1.5], + invalid_results: ['1', true, { value: 1 }], + }, + { + event: string_event, + valid_results: ['ok', ''], + invalid_results: [123, false, ['x']], + }, + { + event: bool_event, + valid_results: [true, false], + invalid_results: ['false', 0, {}], + }, + { + event: null_event, + valid_results: [null], + invalid_results: [0, false, 'not-null', {}, []], + }, + { + event: string_ctor_event, + valid_results: ['ok', ''], + invalid_results: [123, false, ['x']], + }, + { + event: number_ctor_event, + valid_results: [3.14, 42], + invalid_results: ['42', false, {}], + }, + { + event: boolean_ctor_event, + valid_results: [true, false], + invalid_results: ['true', 1, []], + }, + { + event: array_event, + valid_results: [['a', 'b'], []], + invalid_results: [['a', 1], {}, 'not-array'], + }, + { + event: array_ctor_event, + valid_results: [[1, 'two', false], []], + invalid_results: ['not-array', { 0: 'x' }, true], + }, + { + event: record_event, + valid_results: [{ a: [1, 2], b: [] }, {}], + invalid_results: [{ a: ['1'] }, ['not-object'], 12], + }, + { + event: object_ctor_event, + valid_results: [{ any: 'shape', count: 2 }, {}], + invalid_results: ['not-object', [1, 2], true], + }, + { + event: screenshot_event, + valid_results: [ + { + image_url: 'https://img.local/1.png', + width: 1920, + height: 1080, + tags: ['hero', 'dashboard'], + is_animated: false, + confidence_scores: [0.95, 0.89], + metadata: { score: 0.99, variance: 0.01 }, + regions: [ + { id: '98f51f1d-b10a-7cd9-8ee6-cb706153f717', label: 'face', score: 0.9, visible: true }, + { id: '5f234e9d-29e9-7921-8cf2-2a65f6ba3bdd', label: 'button', score: 0.7, visible: false }, + ], + }, + ], + invalid_results: [ + { + image_url: 123, + width: '1920', + height: 1080, + tags: ['hero'], + is_animated: false, + confidence_scores: [0.95], + metadata: { score: 0.99 }, + regions: [{ id: '98f51f1d-b10a-7cd9-8ee6-cb706153f717', label: 'face', score: 0.9, visible: true }], + }, + { + image_url: 'https://img.local/1.png', + width: 1920, + height: 1080, + tags: ['hero'], + is_animated: false, + confidence_scores: [0.95], + metadata: { score: 0.99 }, + regions: [{ id: 123, label: 'face', score: 0.9, visible: true }], + }, + ], + }, + ] +} + +const runCommand = (cmd: string, args: string[], cwd = repo_root): ReturnType => + spawnSync(cmd, args, { + cwd, + env: process.env, + encoding: 'utf8', + timeout: PROCESS_TIMEOUT_MS, + maxBuffer: 10 * 1024 * 1024, + }) + +const assertProcessSucceeded = (proc: ReturnType, label: string): void => { + if (proc.error) { + throw new Error(`${label} failed: ${proc.error.message}\nstdout:\n${proc.stdout ?? ''}\nstderr:\n${proc.stderr ?? ''}`) + } + if (proc.signal) { + throw new Error(`${label} terminated by signal ${proc.signal}\nstdout:\n${proc.stdout ?? ''}\nstderr:\n${proc.stderr ?? ''}`) + } + assert.equal(proc.status, 0, `${label} failed:\nstdout:\n${proc.stdout ?? ''}\nstderr:\n${proc.stderr ?? ''}`) +} + +const runWithTimeout = async (promise: Promise, timeout_ms: number, label: string): Promise => + new Promise((resolve, reject) => { + const timeout_id = setTimeout(() => { + reject(new Error(`${label} timed out after ${timeout_ms}ms`)) + }, timeout_ms) + promise.then( + (value) => { + clearTimeout(timeout_id) + resolve(value) + }, + (error) => { + clearTimeout(timeout_id) + reject(error) + } + ) + }) + +type PythonRunner = { + command: string + args_prefix: string[] + label: string +} + +const resolvePython = (): PythonRunner | null => { + const candidates = [ + process.env.BUBUS_PYTHON_BIN, + resolve(repo_root, '.venv', 'bin', 'python'), + resolve(repo_root, '.venv', 'Scripts', 'python.exe'), + 'python3', + 'python', + ].filter((candidate): candidate is string => typeof candidate === 'string' && candidate.length > 0) + + for (const candidate of candidates) { + if ((candidate.includes('/') || candidate.includes('\\')) && !existsSync(candidate)) { + continue + } + const probe = runCommand(candidate, ['--version']) + if (probe.status === 0) { + return { command: candidate, args_prefix: [], label: candidate } + } + } + + const uv_probe = runCommand('uv', ['--version']) + if (uv_probe.status === 0) { + const uv_python_probe = runCommand('uv', ['run', 'python', '--version']) + if (uv_python_probe.status === 0) { + return { command: 'uv', args_prefix: ['run', 'python'], label: 'uv run python' } + } + } + + return null +} + +const runPythonCommand = ( + python_runner: PythonRunner, + args: string[], + extra_env: Record = {} +): ReturnType => + spawnSync(python_runner.command, [...python_runner.args_prefix, ...args], { + cwd: repo_root, + env: { + ...process.env, + ...extra_env, + }, + encoding: 'utf8', + timeout: PROCESS_TIMEOUT_MS, + maxBuffer: 10 * 1024 * 1024, + }) + +const assertPythonCanImportBubus = (python_runner: PythonRunner): void => { + const probe = runPythonCommand(python_runner, ['-c', 'import pydantic; import bubus']) + if (probe.status !== 0) { + throw new Error( + `python environment (${python_runner.label}) cannot import bubus/pydantic:\nstdout:\n${probe.stdout ?? ''}\nstderr:\n${probe.stderr ?? ''}` + ) + } +} + +const runPythonRoundtrip = (python_runner: PythonRunner, payload: Array>): Array> => { + const temp_dir = mkdtempSync(join(tmpdir(), 'bubus-ts-to-python-')) + const input_path = join(temp_dir, 'ts_events.json') + const output_path = join(temp_dir, 'python_events.json') + + const python_script = ` +import json +import os +from typing import Any +from bubus import BaseEvent + +input_path = os.environ.get('BUBUS_TS_PY_INPUT_PATH') +output_path = os.environ.get('BUBUS_TS_PY_OUTPUT_PATH') +if not input_path or not output_path: + raise RuntimeError('missing BUBUS_TS_PY_INPUT_PATH or BUBUS_TS_PY_OUTPUT_PATH') + +with open(input_path, 'r', encoding='utf-8') as f: + raw = json.load(f) + +if not isinstance(raw, list): + raise TypeError('expected array payload') + +roundtripped: list[dict[str, Any]] = [] +for item in raw: + event = BaseEvent[Any].model_validate(item) + roundtripped.append(event.model_dump(mode='json')) + +with open(output_path, 'w', encoding='utf-8') as f: + json.dump(roundtripped, f, indent=2) +` + + try { + writeFileSync(input_path, JSON.stringify(payload, null, 2), 'utf8') + const proc = runPythonCommand(python_runner, ['-c', python_script], { + BUBUS_TS_PY_INPUT_PATH: input_path, + BUBUS_TS_PY_OUTPUT_PATH: output_path, + }) + + assertProcessSucceeded(proc, 'python roundtrip') + assert.ok(existsSync(output_path), 'python roundtrip did not produce output payload') + + return JSON.parse(readFileSync(output_path, 'utf8')) as Array> + } finally { + rmSync(temp_dir, { recursive: true, force: true }) + } +} + +const runPythonBusRoundtrip = (python_runner: PythonRunner, payload: Record): Record => { + const temp_dir = mkdtempSync(join(tmpdir(), 'bubus-ts-bus-to-python-')) + const input_path = join(temp_dir, 'ts_bus.json') + const output_path = join(temp_dir, 'python_bus.json') + + const python_script = ` +import json +import os +from bubus import EventBus + +input_path = os.environ.get('BUBUS_TS_PY_BUS_INPUT_PATH') +output_path = os.environ.get('BUBUS_TS_PY_BUS_OUTPUT_PATH') +if not input_path or not output_path: + raise RuntimeError('missing BUBUS_TS_PY_BUS_INPUT_PATH or BUBUS_TS_PY_BUS_OUTPUT_PATH') + +with open(input_path, 'r', encoding='utf-8') as f: + raw = json.load(f) + +if not isinstance(raw, dict): + raise TypeError('expected object payload') + +bus = EventBus.validate(raw) +roundtripped = bus.model_dump() + +with open(output_path, 'w', encoding='utf-8') as f: + json.dump(roundtripped, f, indent=2) +` + + try { + writeFileSync(input_path, JSON.stringify(payload, null, 2), 'utf8') + const proc = runPythonCommand(python_runner, ['-c', python_script], { + BUBUS_TS_PY_BUS_INPUT_PATH: input_path, + BUBUS_TS_PY_BUS_OUTPUT_PATH: output_path, + }) + + assertProcessSucceeded(proc, 'python bus roundtrip') + assert.ok(existsSync(output_path), 'python bus roundtrip did not produce output payload') + return JSON.parse(readFileSync(output_path, 'utf8')) as Record + } finally { + rmSync(temp_dir, { recursive: true, force: true }) + } +} + +test('ts_to_python_roundtrip preserves event fields and result type semantics', async () => { + const python_runner = resolvePython() + assert.ok(python_runner, 'python is required for ts<->python roundtrip tests') + assertPythonCanImportBubus(python_runner) + + const roundtrip_cases = buildRoundtripCases() + const events = roundtrip_cases.map((entry) => entry.event) + const roundtrip_cases_by_type = new Map(roundtrip_cases.map((entry) => [entry.event.event_type, entry])) + const ts_dumped = events.map((event) => jsonSafe(event.toJSON())) + + for (const event_dump of ts_dumped) { + assert.ok('event_result_type' in event_dump) + assert.equal(typeof event_dump.event_result_type, 'object') + } + + const python_roundtripped = runPythonRoundtrip(python_runner, ts_dumped) + assert.equal(python_roundtripped.length, ts_dumped.length) + + for (let i = 0; i < ts_dumped.length; i += 1) { + const original = ts_dumped[i] + const python_event = python_roundtripped[i] + + const event_type = String(original.event_type) + const semantics_case = roundtrip_cases_by_type.get(event_type) + assert.ok(semantics_case, `missing semantics case for event_type=${event_type}`) + + for (const [key, value] of Object.entries(original)) { + assert.ok(key in python_event, `missing key after python roundtrip: ${key}`) + if (key === 'event_result_type') { + assert.equal(typeof python_event[key], 'object') + assertSchemaSemanticsEqual( + value, + python_event[key], + semantics_case.valid_results, + semantics_case.invalid_results, + `python roundtrip ${event_type}` + ) + continue + } + assertFieldEqual(key, python_event[key], value, 'field changed after python roundtrip') + } + + const restored = BaseEvent.fromJSON(python_event) + const restored_dump = jsonSafe(restored.toJSON()) + + for (const [key, value] of Object.entries(original)) { + assert.ok(key in restored_dump, `missing key after ts reload: ${key}`) + if (key === 'event_result_type') { + assert.equal(typeof restored_dump[key], 'object') + assertSchemaSemanticsEqual( + value, + restored_dump[key], + semantics_case.valid_results, + semantics_case.invalid_results, + `ts reload ${event_type}` + ) + continue + } + assertFieldEqual(key, restored_dump[key], value, 'field changed after ts reload') + } + } + + const screenshot_payload = python_roundtripped.find((event) => event.event_type === 'TsPy_ScreenshotResultEvent') + assert.ok(screenshot_payload, 'missing TsPy_ScreenshotResultEvent in roundtrip payload') + assert.equal(typeof screenshot_payload.event_result_type, 'object') + + const wrong_bus = new EventBus('TsPyTsWrongShape') + wrong_bus.on('TsPy_ScreenshotResultEvent', () => ({ + image_url: 123, + width: '1920', + height: 1080, + tags: ['hero', 'dashboard'], + is_animated: 'false', + confidence_scores: [0.95, 0.89], + metadata: { score: 0.99 }, + regions: [{ id: '98f51f1d-b10a-7cd9-8ee6-cb706153f717', label: 'face', score: 0.9, visible: true }], + })) + const wrong_event = BaseEvent.fromJSON(screenshot_payload) + assert.equal(typeof (wrong_event.event_result_type as { safeParse?: unknown } | undefined)?.safeParse, 'function') + const wrong_dispatched = wrong_bus.emit(wrong_event) + await runWithTimeout(wrong_dispatched.done(), EVENT_WAIT_TIMEOUT_MS, 'wrong-shape event completion') + const wrong_result = Array.from(wrong_dispatched.event_results.values())[0] + assert.equal(wrong_result.status, 'error') + wrong_bus.destroy() + + const right_bus = new EventBus('TsPyTsRightShape') + right_bus.on('TsPy_ScreenshotResultEvent', () => ({ + image_url: 'https://img.local/1.png', + width: 1920, + height: 1080, + tags: ['hero', 'dashboard'], + is_animated: false, + confidence_scores: [0.95, 0.89], + metadata: { score: 0.99, variance: 0.01 }, + regions: [ + { id: '98f51f1d-b10a-7cd9-8ee6-cb706153f717', label: 'face', score: 0.9, visible: true }, + { id: '5f234e9d-29e9-7921-8cf2-2a65f6ba3bdd', label: 'button', score: 0.7, visible: false }, + ], + })) + const right_event = BaseEvent.fromJSON(screenshot_payload) + assert.equal(typeof (right_event.event_result_type as { safeParse?: unknown } | undefined)?.safeParse, 'function') + const right_dispatched = right_bus.emit(right_event) + await runWithTimeout(right_dispatched.done(), EVENT_WAIT_TIMEOUT_MS, 'right-shape event completion') + const right_result = Array.from(right_dispatched.event_results.values())[0] + assert.equal(right_result.status, 'completed') + assert.deepEqual(right_result.result, { + image_url: 'https://img.local/1.png', + width: 1920, + height: 1080, + tags: ['hero', 'dashboard'], + is_animated: false, + confidence_scores: [0.95, 0.89], + metadata: { score: 0.99, variance: 0.01 }, + regions: [ + { id: '98f51f1d-b10a-7cd9-8ee6-cb706153f717', label: 'face', score: 0.9, visible: true }, + { id: '5f234e9d-29e9-7921-8cf2-2a65f6ba3bdd', label: 'button', score: 0.7, visible: false }, + ], + }) + right_bus.destroy() +}) + +test('ts -> python -> ts bus roundtrip rehydrates and resumes pending queue', async () => { + const python_runner = resolvePython() + assert.ok(python_runner, 'python is required for ts<->python roundtrip tests') + assertPythonCanImportBubus(python_runner) + + const ResumeEvent = BaseEvent.extend('TsPyBusResumeEvent', { + label: z.string(), + event_result_type: z.string(), + }) + + const source_bus = new EventBus('TsPyBusSource', { + id: '018f8e40-1234-7000-8000-00000000aa11', + event_handler_detect_file_paths: false, + event_handler_concurrency: 'serial', + event_handler_completion: 'all', + }) + + const handler_one = source_bus.on(ResumeEvent, (event) => `h1:${(event as unknown as { label: string }).label}`) + const handler_two = source_bus.on(ResumeEvent, (event) => `h2:${(event as unknown as { label: string }).label}`) + + const event_one = ResumeEvent({ label: 'e1' }) + const event_two = ResumeEvent({ label: 'e2' }) + + const seeded = event_one.eventResultUpdate(handler_one, { eventbus: source_bus, status: 'pending' }) + event_one.eventResultUpdate(handler_two, { eventbus: source_bus, status: 'pending' }) + seeded.update({ status: 'completed', result: 'seeded' }) + + source_bus.event_history.set(event_one.event_id, event_one) + source_bus.event_history.set(event_two.event_id, event_two) + source_bus.pending_event_queue = [event_one, event_two] + + const source_dump = source_bus.toJSON() + const py_roundtripped = runPythonBusRoundtrip(python_runner, source_dump) + const restored = EventBus.fromJSON(py_roundtripped) + const restored_dump = restored.toJSON() + + assert.deepEqual(Object.keys(restored_dump.handlers), Object.keys(source_dump.handlers)) + for (const [handler_id, handler_payload] of Object.entries(source_dump.handlers as Record>)) { + const restored_handler = (restored_dump.handlers as Record>)[handler_id] + assert.ok(restored_handler, `missing handler ${handler_id}`) + assert.equal(restored_handler.eventbus_id, handler_payload.eventbus_id) + assert.equal(restored_handler.eventbus_name, handler_payload.eventbus_name) + assert.equal(restored_handler.event_pattern, handler_payload.event_pattern) + } + assert.deepEqual(restored_dump.handlers_by_key, source_dump.handlers_by_key) + assert.deepEqual(restored_dump.pending_event_queue, source_dump.pending_event_queue) + assert.deepEqual(Object.keys(restored_dump.event_history), Object.keys(source_dump.event_history)) + + const restored_event_one = restored.event_history.get(event_one.event_id) + assert.ok(restored_event_one) + const preseeded = Array.from(restored_event_one!.event_results.values()).find((result) => result.result === 'seeded') + assert.ok(preseeded) + assert.equal(preseeded!.status, 'completed') + assert.equal(preseeded!.result, 'seeded') + assert.equal(preseeded!.handler, restored.handlers.get(preseeded!.handler_id)) + + const run_order: string[] = [] + const restored_handler_one = restored.handlers.get(handler_one.id) + const restored_handler_two = restored.handlers.get(handler_two.id) + assert.ok(restored_handler_one) + assert.ok(restored_handler_two) + const restored_handler_one_fn = (event: BaseEvent): string => { + const label = Reflect.get(event, 'label') + run_order.push(`h1:${String(label)}`) + return `h1:${String(label)}` + } + const restored_handler_two_fn = (event: BaseEvent): string => { + const label = Reflect.get(event, 'label') + run_order.push(`h2:${String(label)}`) + return `h2:${String(label)}` + } + restored_handler_one.handler = restored_handler_one_fn + restored_handler_two.handler = restored_handler_two_fn + + const trigger = restored.emit(ResumeEvent({ label: 'e3' })) + await runWithTimeout(trigger.done(), EVENT_WAIT_TIMEOUT_MS, 'bus resume completion') + + const done_one = restored.event_history.get(event_one.event_id) + const done_two = restored.event_history.get(event_two.event_id) + const done_three = restored.event_history.get(trigger.event_id) + assert.equal(done_three?.event_status, 'completed') + assert.equal(restored.pending_event_queue.length, 0) + assert.ok(Array.from(done_one?.event_results.values() ?? []).every((result) => result.status === 'completed')) + assert.ok(Array.from(done_two?.event_results.values() ?? []).every((result) => result.status === 'completed')) + assert.equal(done_one?.event_results.get(handler_one.id)?.result, 'seeded') + assert.equal(done_one?.event_results.get(handler_two.id)?.result, 'h2:e1') + assert.equal(done_two?.event_results.get(handler_one.id)?.result, 'h1:e2') + assert.equal(done_two?.event_results.get(handler_two.id)?.result, 'h2:e2') + assert.equal(done_three?.event_results.get(handler_one.id)?.result, 'h1:e3') + assert.equal(done_three?.event_results.get(handler_two.id)?.result, 'h2:e3') + assert.deepEqual(run_order, ['h2:e1', 'h1:e2', 'h2:e2', 'h1:e3', 'h2:e3']) + + source_bus.destroy() + restored.destroy() +}) diff --git a/bubus-ts/tests/event_handler.test.ts b/bubus-ts/tests/event_handler.test.ts new file mode 100644 index 0000000..dc03f2f --- /dev/null +++ b/bubus-ts/tests/event_handler.test.ts @@ -0,0 +1,170 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { z } from 'zod' + +import { BaseEvent, EventBus } from '../src/index.js' + +const UserActionEvent = BaseEvent.extend('UserActionEvent', { + action: z.string(), + user_id: z.string(), +}) + +const SystemEventModel = BaseEvent.extend('SystemEventModel', { + name: z.string(), +}) + +test('handler registration via string, class, and wildcard', async () => { + const bus = new EventBus('HandlerRegistrationBus') + const results: Record = { + specific: [], + model: [], + universal: [], + } + + const user_handler = async (event: InstanceType): Promise => { + results.specific.push(event.action) + return 'user_handled' + } + + const system_handler = async (event: InstanceType): Promise => { + results.model.push(event.name) + return 'system_handled' + } + + const universal_handler = async (event: BaseEvent): Promise => { + results.universal.push(event.event_type) + return 'universal' + } + + bus.on('UserActionEvent', user_handler) + bus.on(SystemEventModel, system_handler) + bus.on('*', universal_handler) + + bus.emit(UserActionEvent({ action: 'login', user_id: 'e692b6cb-ae63-773b-8557-3218f7ce5ced' })) + bus.emit(SystemEventModel({ name: 'startup' })) + await bus.waitUntilIdle() + + assert.deepEqual(results.specific, ['login']) + assert.deepEqual(results.model, ['startup']) + assert.deepEqual(new Set(results.universal), new Set(['UserActionEvent', 'SystemEventModel'])) +}) + +test('handlers can be sync or async', async () => { + const bus = new EventBus('SyncAsyncHandlersBus') + + const sync_handler = (_event: BaseEvent): string => 'sync' + const async_handler = async (_event: BaseEvent): Promise => 'async' + + bus.on('TestEvent', sync_handler) + bus.on('TestEvent', async_handler) + + const handler_count = Array.from(bus.handlers.values()).filter((entry) => entry.event_pattern === 'TestEvent').length + assert.equal(handler_count, 2) + + const event = bus.emit(BaseEvent.extend('TestEvent', {})({})) + await event.done() + + const results = Array.from(event.event_results.values()).map((result) => result.result) + assert.ok(results.includes('sync')) + assert.ok(results.includes('async')) +}) + +test('class matcher falls back to class name and matches generic BaseEvent event_type', async () => { + const bus = new EventBus('ClassNameFallbackBus') + + class DifferentNameFromClass extends BaseEvent {} + + const seen: string[] = [] + bus.on(DifferentNameFromClass, (event: BaseEvent) => { + seen.push(`class:${event.event_type}`) + }) + bus.on('DifferentNameFromClass', (event: BaseEvent) => { + seen.push(`string:${event.event_type}`) + }) + bus.on('*', (event: BaseEvent) => { + seen.push(`wildcard:${event.event_type}`) + }) + + await bus.emit(new BaseEvent({ event_type: 'DifferentNameFromClass' })).done() + + assert.deepEqual(seen, ['class:DifferentNameFromClass', 'string:DifferentNameFromClass', 'wildcard:DifferentNameFromClass']) + assert.equal(bus.handlers_by_key.get('DifferentNameFromClass')?.length, 2) +}) + +test('instance, class, and static method handlers', async () => { + const bus = new EventBus('MethodHandlersBus') + const results: string[] = [] + + class EventProcessor { + name: string + value: number + + constructor(name: string, value: number) { + this.name = name + this.value = value + } + + sync_method_handler = (event: InstanceType): Record => { + results.push(`${this.name}_sync`) + return { processor: this.name, value: this.value, action: event.action } + } + + async async_method_handler(event: InstanceType): Promise> { + await new Promise((resolve) => setTimeout(resolve, 10)) + results.push(`${this.name}_async`) + return { processor: this.name, value: this.value * 2, action: event.action } + } + + static class_method_handler(event: InstanceType): string { + results.push('classmethod') + return `Handled by ${event.event_type}` + } + + static static_method_handler(_event: InstanceType): string { + results.push('staticmethod') + return 'Handled by static method' + } + } + + const processor1 = new EventProcessor('Processor1', 10) + const processor2 = new EventProcessor('Processor2', 20) + + bus.on(UserActionEvent, processor1.sync_method_handler) + bus.on(UserActionEvent, processor1.async_method_handler.bind(processor1)) + bus.on(UserActionEvent, processor2.sync_method_handler) + bus.on('UserActionEvent', EventProcessor.class_method_handler) + bus.on('UserActionEvent', EventProcessor.static_method_handler) + + const event = UserActionEvent({ action: 'test_methods', user_id: 'dab45f48-9e3a-7042-80f8-ac8f07b6cfe3' }) + const completed_event = bus.emit(event) + await completed_event.done() + + assert.equal(results.length, 5) + assert.ok(results.includes('Processor1_sync')) + assert.ok(results.includes('Processor1_async')) + assert.ok(results.includes('Processor2_sync')) + assert.ok(results.includes('classmethod')) + assert.ok(results.includes('staticmethod')) + + const result_values = Array.from(completed_event.event_results.values()).map((result) => result.result) + + const p1_sync = result_values.find( + (result) => + typeof result === 'object' && + result !== null && + (result as { processor?: string; value?: number }).processor === 'Processor1' && + (result as { value?: number }).value === 10 + ) as { action?: string } | undefined + + const p1_async = result_values.find( + (result) => + typeof result === 'object' && + result !== null && + (result as { processor?: string; value?: number }).processor === 'Processor1' && + (result as { value?: number }).value === 20 + ) as { action?: string } | undefined + + assert.equal(p1_sync?.action, 'test_methods') + assert.equal(p1_async?.action, 'test_methods') +}) diff --git a/bubus-ts/tests/event_handler_first.test.ts b/bubus-ts/tests/event_handler_first.test.ts new file mode 100644 index 0000000..fa8e5de --- /dev/null +++ b/bubus-ts/tests/event_handler_first.test.ts @@ -0,0 +1,550 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' +import { z } from 'zod' + +import { BaseEvent, EventBus, retry, clearSemaphoreRegistry } from '../src/index.js' + +const delay = (ms: number): Promise => new Promise((resolve) => setTimeout(resolve, ms)) + +// ─── first() with parallel handlers ───────────────────────────────────────── + +test('first: returns the first non-undefined result from parallel handlers', async () => { + const bus = new EventBus('FirstParallelBus', { event_timeout: null, event_handler_concurrency: 'parallel' }) + const TestEvent = BaseEvent.extend('FirstParallelEvent', { event_result_type: z.string() }) + + bus.on(TestEvent, async (_event) => { + await delay(100) + return 'slow handler' + }) + + await delay(2) + + bus.on(TestEvent, async (_event) => { + await delay(10) + return 'fast handler' + }) + + const result = await bus.emit(TestEvent({})).first() + + assert.equal(result, 'fast handler', 'should return the temporally first non-undefined result') +}) + +test('first: cancels remaining parallel handlers after first result', async () => { + const bus = new EventBus('FirstCancelBus', { event_timeout: null, event_handler_concurrency: 'parallel' }) + const TestEvent = BaseEvent.extend('FirstCancelEvent', { event_result_type: z.string() }) + + let slow_handler_completed = false + + bus.on(TestEvent, async (_event) => { + await delay(10) + return 'fast result' + }) + + await delay(2) + + bus.on(TestEvent, async (_event) => { + await delay(500) + slow_handler_completed = true + return 'slow result' + }) + + const event = bus.emit(TestEvent({})) + const result = await event.first() + + assert.equal(result, 'fast result') + assert.equal(slow_handler_completed, false, 'slow handler should have been aborted') + + // Verify the slow handler was aborted + const results = Array.from(event.event_results.values()) + const aborted = results.filter((r) => r.status === 'error') + assert.equal(aborted.length, 1, 'one handler should be aborted') +}) + +// ─── first() with serial handlers ─────────────────────────────────────────── + +test('first: returns the first non-undefined result from serial handlers', async () => { + const bus = new EventBus('FirstSerialBus', { event_timeout: null, event_handler_concurrency: 'serial' }) + const TestEvent = BaseEvent.extend('FirstSerialEvent', { event_result_type: z.string() }) + + let second_handler_called = false + + bus.on(TestEvent, async (_event) => { + return 'first handler result' + }) + + await delay(2) + + bus.on(TestEvent, async (_event) => { + second_handler_called = true + return 'second handler result' + }) + + const result = await bus.emit(TestEvent({})).first() + + assert.equal(result, 'first handler result') + assert.equal(second_handler_called, false, 'second handler should not have run') +}) + +test('first: serial mode skips first handler returning undefined, takes second', async () => { + const bus = new EventBus('FirstSerialSkipBus', { event_timeout: null, event_handler_concurrency: 'serial' }) + const TestEvent = BaseEvent.extend('FirstSerialSkipEvent', { event_result_type: z.string() }) + + bus.on(TestEvent, async (_event) => { + return undefined // no result + }) + + await delay(2) + + bus.on(TestEvent, async (_event) => { + return 'second handler has it' + }) + + const result = await bus.emit(TestEvent({})).first() + + assert.equal(result, 'second handler has it') +}) + +// ─── first() edge cases ───────────────────────────────────────────────────── + +test('first: returns undefined when all handlers return undefined', async () => { + const bus = new EventBus('FirstUndefinedBus', { event_timeout: null, event_handler_concurrency: 'parallel' }) + const TestEvent = BaseEvent.extend('FirstUndefinedEvent', {}) + + bus.on(TestEvent, async (_event) => { + return undefined + }) + + await delay(2) + + bus.on(TestEvent, async (_event) => { + // no return (void) + }) + + const result = await bus.emit(TestEvent({})).first() + + assert.equal(result, undefined) +}) + +test('first: returns undefined when all handlers throw errors', async () => { + const bus = new EventBus('FirstErrorBus', { event_timeout: null, event_handler_concurrency: 'parallel' }) + const TestEvent = BaseEvent.extend('FirstErrorEvent', { event_result_type: z.string() }) + + bus.on(TestEvent, async (_event) => { + throw new Error('handler 1 error') + }) + + await delay(2) + + bus.on(TestEvent, async (_event) => { + throw new Error('handler 2 error') + }) + + const result = await bus.emit(TestEvent({})).first() + + assert.equal(result, undefined, 'should return undefined when no handler succeeds') +}) + +test('first: skips error handlers and returns the successful one', async () => { + const bus = new EventBus('FirstMixBus', { event_timeout: null, event_handler_concurrency: 'parallel' }) + const TestEvent = BaseEvent.extend('FirstMixEvent', { event_result_type: z.string() }) + + bus.on(TestEvent, async (_event) => { + throw new Error('fast but fails') + }) + + await delay(2) + + bus.on(TestEvent, async (_event) => { + await delay(20) + return 'slow but succeeds' + }) + + const result = await bus.emit(TestEvent({})).first() + + assert.equal(result, 'slow but succeeds') +}) + +test('first: returns undefined when no handlers are registered', async () => { + const bus = new EventBus('FirstNoHandlerBus', { event_timeout: null }) + const TestEvent = BaseEvent.extend('FirstNoHandlerEvent', {}) + + const result = await bus.emit(TestEvent({})).first() + + assert.equal(result, undefined) +}) + +test('first: rejects when event has no bus attached', async () => { + const TestEvent = BaseEvent.extend('FirstNoBusEvent', {}) + const event = TestEvent({}) + + await assert.rejects(event.first(), { message: 'event has no bus attached' }) +}) + +// ─── first() with @retry() decorated handlers ────────────────────────────── + +test('first: @retry decorated handler retries before first() resolves', async () => { + clearSemaphoreRegistry() + + const bus = new EventBus('FirstRetryBus', { event_timeout: null, event_handler_concurrency: 'parallel' }) + const TestEvent = BaseEvent.extend('FirstRetryEvent', { event_result_type: z.string() }) + + let fast_attempts = 0 + + class Service { + constructor(b: InstanceType) { + b.on(TestEvent, this.on_fast.bind(this)) + } + + @retry({ max_attempts: 3 }) + async on_fast(_event: InstanceType): Promise { + fast_attempts++ + if (fast_attempts < 3) throw new Error(`attempt ${fast_attempts} failed`) + return 'succeeded after retries' + } + } + + new Service(bus) + + const result = await bus.emit(TestEvent({})).first() + + assert.equal(result, 'succeeded after retries') + assert.equal(fast_attempts, 3) +}) + +test('first: fast handler wins and slow @retry handler gets cancelled', async () => { + clearSemaphoreRegistry() + + const bus = new EventBus('FirstRetryRaceBus', { event_timeout: null, event_handler_concurrency: 'parallel' }) + const TestEvent = BaseEvent.extend('FirstRetryRaceEvent', { event_result_type: z.string() }) + + let slow_attempts = 0 + + // fast handler returns immediately + bus.on(TestEvent, async (_event) => { + return 'fast path' + }) + + await delay(2) + + class SlowService { + constructor(b: InstanceType) { + b.on(TestEvent, this.on_slow.bind(this)) + } + + @retry({ max_attempts: 5, retry_after: 0.1 }) + async on_slow(_event: InstanceType): Promise { + slow_attempts++ + await delay(200) + return 'slow path' + } + } + + new SlowService(bus) + + const result = await bus.emit(TestEvent({})).first() + + assert.equal(result, 'fast path') + assert.equal(slow_attempts <= 1, true, 'slow handler should have been aborted after at most 1 attempt') +}) + +// ─── first() with the recommended @retry decorator pattern ────────────────── + +test('first: screenshot-service pattern β€” fast path wins, slow path with retry cancelled', async () => { + clearSemaphoreRegistry() + + const bus = new EventBus('ScreenshotBus', { event_timeout: null, event_handler_concurrency: 'parallel' }) + const ScreenshotEvent = BaseEvent.extend('ScreenshotEvent', { + page_id: z.string(), + event_result_type: z.string(), + }) + + let fast_called = false + + class ScreenshotService { + constructor(b: InstanceType) { + b.on(ScreenshotEvent, this.on_ScreenshotEvent_fast.bind(this)) + // small delay so handler IDs don't collide + } + + async on_ScreenshotEvent_fast(_event: InstanceType): Promise { + fast_called = true + return 'fast_screenshot_data' + } + } + + class SlowScreenshotService { + constructor(b: InstanceType) { + b.on(ScreenshotEvent, this.on_ScreenshotEvent_slow.bind(this)) + } + + @retry({ max_attempts: 3, timeout: 15, semaphore_scope: 'global', semaphore_limit: 1, semaphore_name: 'Screenshots' }) + async on_ScreenshotEvent_slow(_event: InstanceType): Promise { + await delay(500) + return 'slow_screenshot_data' + } + } + + new ScreenshotService(bus) + await delay(2) + new SlowScreenshotService(bus) + + const screenshot = await bus.emit(ScreenshotEvent({ page_id: '2e0736d6-e947-74be-8d1c-fa8040515f2c' })).first() + + assert.equal(screenshot, 'fast_screenshot_data') + assert.equal(fast_called, true) + // slow handler may or may not have started, but should be aborted before completing +}) + +test('first: screenshot-service pattern β€” fast path fails, slow path with retry succeeds', async () => { + clearSemaphoreRegistry() + + const bus = new EventBus('ScreenshotFallbackBus', { event_timeout: null, event_handler_concurrency: 'parallel' }) + const ScreenshotEvent = BaseEvent.extend('ScreenshotFallbackEvent', { + page_id: z.string(), + event_result_type: z.string(), + }) + + let slow_attempts = 0 + + class ScreenshotService { + constructor(b: InstanceType) { + b.on(ScreenshotEvent, this.on_fast.bind(this)) + } + + async on_fast(_event: InstanceType): Promise { + // fast path fails, returns undefined to signal "I can't handle this" + return undefined + } + } + + class SlowScreenshotService { + constructor(b: InstanceType) { + b.on(ScreenshotEvent, this.on_slow.bind(this)) + } + + @retry({ max_attempts: 3 }) + async on_slow(_event: InstanceType): Promise { + slow_attempts++ + if (slow_attempts < 2) throw new Error('screenshot timeout') + return 'slow_screenshot_data' + } + } + + new ScreenshotService(bus) + await delay(2) + new SlowScreenshotService(bus) + + const screenshot = await bus.emit(ScreenshotEvent({ page_id: 'd8942b12-5198-70da-8914-e0a0c00ca14a' })).first() + + assert.equal(screenshot, 'slow_screenshot_data') + assert.equal(slow_attempts, 2, 'slow handler needed 2 attempts') +}) + +// ─── first() with single handler ──────────────────────────────────────────── + +test('first: works with a single handler', async () => { + const bus = new EventBus('FirstSingleBus', { event_timeout: null }) + const TestEvent = BaseEvent.extend('FirstSingleEvent', { event_result_type: z.number() }) + + bus.on(TestEvent, async (_event) => { + return 42 + }) + + const result = await bus.emit(TestEvent({})).first() + + assert.equal(result, 42) +}) + +// ─── first() preserves non-undefined falsy values ─────────────────────────── + +test('first: skips null result and uses the next handler winner', async () => { + const bus = new EventBus('FirstNullSkipBus', { event_timeout: null, event_handler_concurrency: 'serial' }) + const TestEvent = BaseEvent.extend('FirstNullSkipEvent', {}) + let third_handler_called = false + + bus.on(TestEvent, async (_event) => { + return null + }) + bus.on(TestEvent, async (_event) => { + return 'winner' + }) + bus.on(TestEvent, async (_event) => { + third_handler_called = true + return 'third' + }) + + const event = bus.emit(TestEvent({})) + const result = await event.first() + + assert.equal(result, 'winner') + assert.equal(third_handler_called, false) + + const null_result = Array.from(event.event_results.values()).find((entry) => entry.result === null) + const winner_result = Array.from(event.event_results.values()).find((entry) => entry.result === 'winner') + assert.ok(null_result, 'expected null-producing handler result metadata') + assert.equal(null_result.status, 'completed') + assert.equal(null_result.error, undefined) + assert.equal(null_result.result, null) + assert.ok(winner_result, 'expected winner handler result metadata') + assert.equal(winner_result.status, 'completed') + assert.equal(winner_result.error, undefined) + assert.equal(winner_result.result, 'winner') +}) + +test('first: returns 0 as a valid first result', async () => { + const bus = new EventBus('FirstZeroBus', { event_timeout: null }) + const TestEvent = BaseEvent.extend('FirstZeroEvent', { event_result_type: z.number() }) + + bus.on(TestEvent, async (_event) => { + return 0 + }) + + const result = await bus.emit(TestEvent({})).first() + + assert.equal(result, 0, '0 is a valid non-undefined result') +}) + +test('first: returns empty string as a valid first result', async () => { + const bus = new EventBus('FirstEmptyBus', { event_timeout: null }) + const TestEvent = BaseEvent.extend('FirstEmptyEvent', { event_result_type: z.string() }) + + bus.on(TestEvent, async (_event) => { + return '' + }) + + const result = await bus.emit(TestEvent({})).first() + + assert.equal(result, '', 'empty string is a valid non-undefined result') +}) + +test('first: returns false as a valid first result', async () => { + const bus = new EventBus('FirstFalseBus', { event_timeout: null }) + const TestEvent = BaseEvent.extend('FirstFalseEvent', { event_result_type: z.boolean() }) + + bus.on(TestEvent, async (_event) => { + return false + }) + + const result = await bus.emit(TestEvent({})).first() + + assert.equal(result, false, 'false is a valid non-undefined result') +}) + +test('first: skips BaseEvent return values and uses the next scalar winner', async () => { + const bus = new EventBus('FirstBaseEventSkipBus', { event_timeout: null, event_handler_concurrency: 'serial' }) + const ParentEvent = BaseEvent.extend('FirstBaseEventSkipParent', {}) + const ChildEvent = BaseEvent.extend('FirstBaseEventSkipChild', {}) + let third_handler_called = false + + bus.on(ParentEvent, async (_event) => { + return ChildEvent({}) + }) + bus.on(ParentEvent, async (_event) => { + return 'winner' + }) + bus.on(ParentEvent, async (_event) => { + third_handler_called = true + return 'third' + }) + + const result = await bus.emit(ParentEvent({})).first() + assert.equal(result, 'winner') + assert.equal(third_handler_called, false) +}) + +// ─── first() cancels child events of losing handlers ──────────────────────── + +test('first: cancels child events emitted by losing handlers', async () => { + const bus = new EventBus('FirstChildBus', { event_timeout: null, event_handler_concurrency: 'parallel' }) + const ParentEvent = BaseEvent.extend('FirstChildParent', { event_result_type: z.string() }) + const ChildEvent = BaseEvent.extend('FirstChildChild', {}) + + bus.on(ChildEvent, async (_event) => { + await delay(500) // very slow + return 'child result' + }) + + // Fast handler: returns immediately + bus.on(ParentEvent, async (_event) => { + return 'fast parent' + }) + + await delay(2) + + // Slow handler: emits a child event, then waits + bus.on(ParentEvent, async (event) => { + const child = event.bus!.emit(ChildEvent({})) + await child.done() + return 'slow parent with child' + }) + + const result = await bus.emit(ParentEvent({})).first() + + assert.equal(result, 'fast parent') + // Give a moment for any async cleanup + await delay(50) + // The child event emitted by the slow handler should have been cancelled +}) + +// ─── event_handler_completion field visibility ────────────────────────────── + +test('first: event_handler_completion is set to "first" after calling first()', async () => { + const bus = new EventBus('FirstFieldBus', { event_timeout: null }) + const TestEvent = BaseEvent.extend('FirstFieldEvent', { event_result_type: z.string() }) + + bus.on(TestEvent, async (_event) => { + return 'result' + }) + + const event = bus.emit(TestEvent({})) + const original = (event as any)._event_original ?? event + + // before first(), completion mode remains unset on the event object + assert.equal(original.event_handler_completion ?? null, null) + + const result = await event.first() + + // after first(), completion mode is 'first' + assert.equal(original.event_handler_completion, 'first') + assert.equal(result, 'result') +}) + +test('first: event_handler_completion appears in toJSON output', async () => { + const bus = new EventBus('FirstJsonBus', { event_timeout: null }) + const TestEvent = BaseEvent.extend('FirstJsonEvent', { event_result_type: z.string() }) + + bus.on(TestEvent, async (_event) => { + return 'json result' + }) + + const event = bus.emit(TestEvent({})) + await event.first() + + const original = (event as any)._event_original ?? event + const json = original.toJSON() + + assert.equal(json.event_handler_completion, 'first', 'toJSON should include event_handler_completion') +}) + +test('first: event_handler_completion can be set via event constructor', async () => { + const bus = new EventBus('FirstCtorBus', { event_timeout: null, event_handler_concurrency: 'parallel' }) + const TestEvent = BaseEvent.extend('FirstCtorEvent', { event_result_type: z.string() }) + + bus.on(TestEvent, async (_event) => { + await delay(100) + return 'slow handler' + }) + + await delay(2) + + bus.on(TestEvent, async (_event) => { + await delay(10) + return 'fast handler' + }) + + // Set event_handler_completion directly on the event data + const event = bus.emit(TestEvent({ event_handler_completion: 'first' } as any)) + const result = await event.first() + + assert.equal(result, 'fast handler', 'should still use first-mode when set via constructor') +}) diff --git a/bubus-ts/tests/event_result.test.ts b/bubus-ts/tests/event_result.test.ts new file mode 100644 index 0000000..8455281 --- /dev/null +++ b/bubus-ts/tests/event_result.test.ts @@ -0,0 +1,260 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { v5 as uuidv5 } from 'uuid' +import { z } from 'zod' + +import { BaseEvent, EventBus } from '../src/index.js' +import { EventHandler } from '../src/event_handler.js' +import { EventResult } from '../src/event_result.js' + +const StringResultEvent = BaseEvent.extend('StringResultEvent', { + event_result_type: z.string(), +}) + +const ObjectResultEvent = BaseEvent.extend('ObjectResultEvent', { + event_result_type: z.object({ value: z.string(), count: z.number() }), +}) + +const NoResultSchemaEvent = BaseEvent.extend('NoResultSchemaEvent', {}) + +test('event results capture handler return values', async () => { + const bus = new EventBus('ResultCaptureBus') + + bus.on(StringResultEvent, () => 'ok') + + const event = bus.emit(StringResultEvent({})) + await event.done() + + assert.equal(event.event_results.size, 1) + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.equal(result.result, 'ok') +}) + +test('event_result_type validates handler results', async () => { + const bus = new EventBus('ResultSchemaBus') + + bus.on(ObjectResultEvent, () => ({ value: 'hello', count: 2 })) + + const event = bus.emit(ObjectResultEvent({})) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.deepEqual(result.result, { value: 'hello', count: 2 }) +}) + +test('event_result_type allows undefined handler return values', async () => { + const bus = new EventBus('ResultSchemaUndefinedBus') + + bus.on(ObjectResultEvent, () => {}) + + const event = bus.emit(ObjectResultEvent({})) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.equal(result.result, undefined) +}) + +test('invalid result marks handler error', async () => { + const bus = new EventBus('ResultSchemaErrorBus') + + bus.on(ObjectResultEvent, () => JSON.parse('{"value":"bad","count":"nope"}')) + + const event = bus.emit(ObjectResultEvent({})) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'error') + assert.ok(result.error instanceof Error) +}) + +test('event with no result schema stores raw values', async () => { + const bus = new EventBus('NoSchemaBus') + + bus.on(NoResultSchemaEvent, () => ({ raw: true })) + + const event = bus.emit(NoResultSchemaEvent({})) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.deepEqual(result.result, { raw: true }) +}) + +test('event result JSON omits result_type and derives from parent event', async () => { + const bus = new EventBus('ResultTypeDeriveBus') + + bus.on(StringResultEvent, () => 'ok') + + const event = bus.emit(StringResultEvent({})) + await event.done() + + const result = Array.from(event.event_results.values())[0] + const json = result.toJSON() as Record + + assert.equal('result_type' in json, false) + assert.equal('handler' in json, false) + assert.equal(typeof json.handler_id, 'string') + assert.equal(typeof json.handler_name, 'string') + assert.equal(typeof json.handler_event_pattern, 'string') + assert.equal(typeof json.eventbus_name, 'string') + assert.equal(typeof json.eventbus_id, 'string') + assert.equal(typeof json.handler_registered_at, 'string') + assert.equal(result.result_type, event.event_result_type) +}) + +test('EventHandler JSON roundtrips handler metadata', () => { + const handler = (event: BaseEvent): string => event.event_type + const entry = new EventHandler({ + handler, + handler_name: 'pkg.module.handler', + handler_file_path: '~/project/app.ts:123', + handler_registered_at: '2025-01-02T03:04:05.678Z', + event_pattern: 'StandaloneEvent', + eventbus_name: 'StandaloneBus', + eventbus_id: '018f8e40-1234-7000-8000-000000001234', + }) + + const dumped = entry.toJSON() + const loaded = EventHandler.fromJSON(dumped) + + assert.equal(loaded.id, entry.id) + assert.equal(loaded.event_pattern, 'StandaloneEvent') + assert.equal(loaded.eventbus_name, 'StandaloneBus') + assert.equal(loaded.eventbus_id, '018f8e40-1234-7000-8000-000000001234') + assert.equal(loaded.handler_name, 'pkg.module.handler') + assert.equal(loaded.handler_file_path, '~/project/app.ts:123') +}) + +test('EventHandler.computeHandlerId matches uuidv5 seed algorithm', () => { + const namespace = uuidv5('bubus-handler', uuidv5.DNS) + const expected_seed = + '018f8e40-1234-7000-8000-000000001234|pkg.module.handler|~/project/app.ts:123|2025-01-02T03:04:05.678Z|StandaloneEvent' + const expected_id = uuidv5(expected_seed, namespace) + + const computed_id = EventHandler.computeHandlerId({ + eventbus_id: '018f8e40-1234-7000-8000-000000001234', + handler_name: 'pkg.module.handler', + handler_file_path: '~/project/app.ts:123', + handler_registered_at: '2025-01-02T03:04:05.678Z', + event_pattern: 'StandaloneEvent', + }) + + assert.equal(computed_id, expected_id) +}) + +test('runHandler is a no-op for already-settled results', async () => { + const SettledEvent = BaseEvent.extend('RunHandlerSettledEvent', {}) + const bus = new EventBus('RunHandlerSettledBus') + let handler_calls = 0 + const handler = bus.on(SettledEvent, () => { + handler_calls += 1 + return 'ok' + }) + + const event = SettledEvent({}) + event.bus = bus + + const result = new EventResult({ event, handler }) + result.status = 'completed' + + await result.runHandler(null) + + assert.equal(handler_calls, 0) + assert.equal(result.status, 'completed') + bus.destroy() +}) + +test('handler result stays pending while waiting for handler lock entry', async () => { + const LockWaitEvent = BaseEvent.extend('RunHandlerLockWaitEvent', {}) + const bus = new EventBus('RunHandlerLockWaitBus', { event_handler_concurrency: 'serial' }) + + bus.on(LockWaitEvent, async function first_handler() { + await new Promise((resolve) => setTimeout(resolve, 40)) + return 'first' + }) + bus.on(LockWaitEvent, async function second_handler() { + await new Promise((resolve) => setTimeout(resolve, 1)) + return 'second' + }) + + const event = bus.emit(LockWaitEvent({})) + const start = Date.now() + while (event.event_results.size < 2) { + if (Date.now() - start > 1_000) { + throw new Error('Timed out waiting for pending handler result') + } + await new Promise((resolve) => setTimeout(resolve, 0)) + } + + const second_result = Array.from(event.event_results.values()).find((result) => result.handler_name === 'second_handler') + assert.ok(second_result) + assert.equal(second_result.status, 'pending') + + await new Promise((resolve) => setTimeout(resolve, 20)) + assert.equal(second_result.status, 'pending') + await event.done() + assert.equal(second_result.status, 'completed') + bus.destroy() +}) + +test('slow handler warning is based on handler runtime after lock wait', async () => { + const SlowAfterLockWaitEvent = BaseEvent.extend('RunHandlerSlowAfterLockWaitEvent', {}) + const bus = new EventBus('RunHandlerSlowAfterLockWaitBus', { + event_handler_concurrency: 'serial', + event_handler_slow_timeout: 0.01, + }) + const warnings: string[] = [] + const original_warn = console.warn + console.warn = (message?: unknown, ...args: unknown[]) => { + warnings.push(String(message)) + if (args.length > 0) { + warnings.push(args.map(String).join(' ')) + } + } + try { + bus.on(SlowAfterLockWaitEvent, async function first_handler() { + await new Promise((resolve) => setTimeout(resolve, 40)) + return 'first' + }) + bus.on(SlowAfterLockWaitEvent, async function second_handler() { + await new Promise((resolve) => setTimeout(resolve, 30)) + return 'second' + }) + + const event = bus.emit(SlowAfterLockWaitEvent({})) + const start = Date.now() + while (event.event_results.size < 2) { + if (Date.now() - start > 1_000) { + throw new Error('Timed out waiting for pending handler result') + } + await new Promise((resolve) => setTimeout(resolve, 0)) + } + + const second_result = Array.from(event.event_results.values()).find((result) => result.handler_name === 'second_handler') + assert.ok(second_result) + assert.equal(second_result.status, 'pending') + await new Promise((resolve) => setTimeout(resolve, 20)) + assert.equal(second_result.status, 'pending') + await event.done() + + assert.equal( + warnings.some((message) => message.toLowerCase().includes('slow event handler')), + true + ) + assert.equal( + warnings.some((message) => message.includes('first_handler')), + true + ) + assert.equal( + warnings.some((message) => message.includes('second_handler')), + true + ) + } finally { + console.warn = original_warn + bus.destroy() + } +}) diff --git a/bubus-ts/tests/event_result_typed_results.test.ts b/bubus-ts/tests/event_result_typed_results.test.ts new file mode 100644 index 0000000..f9c9050 --- /dev/null +++ b/bubus-ts/tests/event_result_typed_results.test.ts @@ -0,0 +1,266 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { z } from 'zod' + +import { BaseEvent, EventBus } from '../src/index.js' + +const typed_result_type = z.object({ + value: z.string(), + count: z.number(), +}) + +const TypedResultEvent = BaseEvent.extend('TypedResultEvent', { + event_result_type: typed_result_type, +}) + +const StringResultEvent = BaseEvent.extend('StringResultEvent', { + event_result_type: z.string(), +}) + +const NumberResultEvent = BaseEvent.extend('NumberResultEvent', { + event_result_type: z.number(), +}) + +const ConstructorStringResultEvent = BaseEvent.extend('ConstructorStringResultEvent', { + event_result_type: String, +}) + +const ConstructorNumberResultEvent = BaseEvent.extend('ConstructorNumberResultEvent', { + event_result_type: Number, +}) + +const ConstructorBooleanResultEvent = BaseEvent.extend('ConstructorBooleanResultEvent', { + event_result_type: Boolean, +}) + +const ConstructorArrayResultEvent = BaseEvent.extend('ConstructorArrayResultEvent', { + event_result_type: Array, +}) + +const ConstructorObjectResultEvent = BaseEvent.extend('ConstructorObjectResultEvent', { + event_result_type: Object, +}) + +const ComplexResultEvent = BaseEvent.extend('ComplexResultEvent', { + event_result_type: z.object({ + items: z.array(z.string()), + metadata: z.record(z.string(), z.number()), + }), +}) + +const NoSchemaEvent = BaseEvent.extend('NoSchemaEvent', {}) + +test('typed result schema validates and parses handler result', async () => { + const bus = new EventBus('TypedResultBus') + + bus.on(TypedResultEvent, () => ({ value: 'hello', count: 42 })) + + const event = bus.emit(TypedResultEvent({})) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.deepEqual(result.result, { value: 'hello', count: 42 }) +}) + +test('built-in result schemas validate handler results', async () => { + const bus = new EventBus('BuiltinResultBus') + + bus.on(StringResultEvent, () => '42') + bus.on(NumberResultEvent, () => 123) + + const string_event = bus.emit(StringResultEvent({})) + const number_event = bus.emit(NumberResultEvent({})) + await string_event.done() + await number_event.done() + + const string_result = Array.from(string_event.event_results.values())[0] + const number_result = Array.from(number_event.event_results.values())[0] + + assert.equal(string_result.status, 'completed') + assert.equal(string_result.result, '42') + assert.equal(number_result.status, 'completed') + assert.equal(number_result.result, 123) +}) + +test('event_result_type supports constructor shorthands and enforces them', async () => { + const bus = new EventBus('ConstructorResultTypeBus') + + bus.on(ConstructorStringResultEvent, () => 'ok') + bus.on(ConstructorNumberResultEvent, () => 123) + bus.on(ConstructorBooleanResultEvent, () => true) + bus.on(ConstructorArrayResultEvent, () => [1, 'two', false]) + bus.on(ConstructorObjectResultEvent, () => ({ id: 1, ok: true })) + + const string_event = bus.emit(ConstructorStringResultEvent({})) + const number_event = bus.emit(ConstructorNumberResultEvent({})) + const boolean_event = bus.emit(ConstructorBooleanResultEvent({})) + const array_event = bus.emit(ConstructorArrayResultEvent({})) + const object_event = bus.emit(ConstructorObjectResultEvent({})) + + await Promise.all([string_event.done(), number_event.done(), boolean_event.done(), array_event.done(), object_event.done()]) + + assert.equal(typeof (string_event.event_result_type as { safeParse?: unknown } | undefined)?.safeParse, 'function') + assert.equal(typeof (number_event.event_result_type as { safeParse?: unknown } | undefined)?.safeParse, 'function') + assert.equal(typeof (boolean_event.event_result_type as { safeParse?: unknown } | undefined)?.safeParse, 'function') + assert.equal(typeof (array_event.event_result_type as { safeParse?: unknown } | undefined)?.safeParse, 'function') + assert.equal(typeof (object_event.event_result_type as { safeParse?: unknown } | undefined)?.safeParse, 'function') + + assert.equal(Array.from(string_event.event_results.values())[0]?.status, 'completed') + assert.equal(Array.from(number_event.event_results.values())[0]?.status, 'completed') + assert.equal(Array.from(boolean_event.event_results.values())[0]?.status, 'completed') + assert.equal(Array.from(array_event.event_results.values())[0]?.status, 'completed') + assert.equal(Array.from(object_event.event_results.values())[0]?.status, 'completed') + + const invalid_number_event = BaseEvent.extend('ConstructorNumberResultEventInvalid', { + event_result_type: Number, + }) + bus.on(invalid_number_event, () => JSON.parse('"not-a-number"')) + const invalid = bus.emit(invalid_number_event({})) + await invalid.done() + assert.equal(Array.from(invalid.event_results.values())[0]?.status, 'error') +}) + +test('invalid handler result marks error when schema is defined', async () => { + const bus = new EventBus('ResultValidationErrorBus') + + bus.on(NumberResultEvent, () => JSON.parse('"not-a-number"')) + + const event = bus.emit(NumberResultEvent({})) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'error') + assert.ok(result.error instanceof Error) + assert.ok(event.event_errors.length > 0) +}) + +test('no schema leaves raw handler result untouched', async () => { + const bus = new EventBus('NoSchemaResultBus') + + bus.on(NoSchemaEvent, () => ({ raw: true })) + + const event = bus.emit(NoSchemaEvent({})) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.deepEqual(result.result, { raw: true }) +}) + +test('complex result schema validates nested data', async () => { + const bus = new EventBus('ComplexResultBus') + + bus.on(ComplexResultEvent, () => ({ + items: ['a', 'b'], + metadata: { a: 1, b: 2 }, + })) + + const event = bus.emit(ComplexResultEvent({})) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.deepEqual(result.result, { items: ['a', 'b'], metadata: { a: 1, b: 2 } }) +}) + +test('fromJSON converts event_result_type into zod schema', async () => { + const bus = new EventBus('FromJsonResultBus') + + const original = TypedResultEvent({ + event_result_type: typed_result_type, + }) + const json = original.toJSON() + + const restored = TypedResultEvent.fromJSON?.(json) ?? TypedResultEvent(json as never) + + assert.ok(restored.event_result_type) + assert.equal(typeof (restored.event_result_type as { safeParse?: unknown }).safeParse, 'function') + + bus.on(TypedResultEvent, () => ({ value: 'from-json', count: 7 })) + + const dispatched = bus.emit(restored) + await dispatched.done() + + const result = Array.from(dispatched.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.deepEqual(result.result, { value: 'from-json', count: 7 }) +}) + +test('fromJSON reconstructs primitive JSON schema', async () => { + const bus = new EventBus('PrimitiveFromJsonBus') + + const source = new BaseEvent({ + event_type: 'PrimitiveResultEvent', + event_result_type: z.boolean(), + }).toJSON() as Record + + const restored = BaseEvent.fromJSON(source) + + assert.ok(restored.event_result_type) + assert.equal(typeof (restored.event_result_type as { safeParse?: unknown }).safeParse, 'function') + + bus.on('PrimitiveResultEvent', () => true) + const dispatched = bus.emit(restored) + await dispatched.done() + + const result = Array.from(dispatched.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.equal(result.result, true) +}) + +test('roundtrip preserves complex result schema types', async () => { + const bus = new EventBus('RoundtripSchemaBus') + + const complex_schema = z.object({ + title: z.string(), + count: z.number(), + flags: z.array(z.boolean()), + active: z.boolean(), + meta: z.object({ + tags: z.array(z.string()), + rating: z.number(), + }), + }) + + const ComplexRoundtripEvent = BaseEvent.extend('ComplexRoundtripEvent', { + event_result_type: complex_schema, + }) + + const original = ComplexRoundtripEvent({ + event_result_type: complex_schema, + }) + + const roundtripped = ComplexRoundtripEvent.fromJSON?.(original.toJSON()) ?? ComplexRoundtripEvent(original.toJSON() as never) + + const zod_any = z as unknown as { + toJSONSchema?: (schema: unknown) => unknown + } + if (typeof zod_any.toJSONSchema === 'function') { + const original_schema_json = zod_any.toJSONSchema(complex_schema) + const roundtrip_schema_json = zod_any.toJSONSchema(roundtripped.event_result_type) + assert.deepEqual(roundtrip_schema_json, original_schema_json) + } + + bus.on(ComplexRoundtripEvent, () => ({ + title: 'ok', + count: 3, + flags: [true, false, true], + active: false, + meta: { tags: ['a', 'b'], rating: 4 }, + })) + + const dispatched = bus.emit(roundtripped) + await dispatched.done() + + const result = Array.from(dispatched.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.deepEqual(result.result, { + title: 'ok', + count: 3, + flags: [true, false, true], + active: false, + meta: { tags: ['a', 'b'], rating: 4 }, + }) +}) diff --git a/bubus-ts/tests/eventbus.test.ts b/bubus-ts/tests/eventbus.test.ts new file mode 100644 index 0000000..2dabbda --- /dev/null +++ b/bubus-ts/tests/eventbus.test.ts @@ -0,0 +1,1075 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { BaseEvent, EventBus } from '../src/index.js' +import { GlobalEventBusRegistry } from '../src/event_bus.js' +import { AsyncLock } from '../src/lock_manager.js' +import { z } from 'zod' + +const delay = (ms: number): Promise => + new Promise((resolve) => { + setTimeout(resolve, ms) + }) + +// ─── Constructor defaults ──────────────────────────────────────────────────── + +test('EventBus initializes with correct defaults', async () => { + const bus = new EventBus('DefaultsBus') + + assert.equal(bus.name, 'DefaultsBus') + assert.equal(bus.event_history.max_history_size, 100) + assert.equal(bus.event_history.max_history_drop, false) + assert.equal(bus.event_concurrency, 'bus-serial') + assert.equal(bus.event_handler_concurrency, 'serial') + assert.equal(bus.event_handler_completion, 'all') + assert.equal(bus.event_timeout, 60) + assert.equal(bus.event_history.size, 0) + assert.ok(EventBus.all_instances.has(bus)) + await bus.waitUntilIdle() +}) + +test('waitUntilIdle(timeout) returns after timeout when work is still in-flight', async () => { + const WaitForIdleTimeoutEvent = BaseEvent.extend('WaitForIdleTimeoutEvent', {}) + const bus = new EventBus('WaitForIdleTimeoutBus') + + let release_handler!: () => void + const handler_gate = new Promise((resolve) => { + release_handler = resolve + }) + + bus.on(WaitForIdleTimeoutEvent, async () => { + await handler_gate + }) + + bus.emit(WaitForIdleTimeoutEvent({})) + + const start_ms = performance.now() + const became_idle = await bus.waitUntilIdle(0.05) + const elapsed_ms = performance.now() - start_ms + + try { + assert.ok(elapsed_ms >= 30, `expected timeout wait to be >=30ms, got ${elapsed_ms}ms`) + assert.ok(elapsed_ms < 1000, `expected timeout wait to be <1000ms, got ${elapsed_ms}ms`) + assert.equal(became_idle, false) + assert.equal(bus.isIdleAndQueueEmpty(), false) + } finally { + release_handler() + assert.equal(await bus.waitUntilIdle(), true) + } +}) + +test('EventBus applies custom options', () => { + const bus = new EventBus('CustomBus', { + max_history_size: 500, + max_history_drop: false, + event_concurrency: 'parallel', + event_handler_concurrency: 'serial', + event_handler_completion: 'first', + event_timeout: 30, + }) + + assert.equal(bus.event_history.max_history_size, 500) + assert.equal(bus.event_history.max_history_drop, false) + assert.equal(bus.event_concurrency, 'parallel') + assert.equal(bus.event_handler_concurrency, 'serial') + assert.equal(bus.event_handler_completion, 'first') + assert.equal(bus.event_timeout, 30) +}) + +test('EventBus with null max_history_size means unlimited', () => { + const bus = new EventBus('UnlimitedBus', { max_history_size: null }) + assert.equal(bus.event_history.max_history_size, null) +}) + +test('EventBus with null event_timeout disables timeouts', () => { + const bus = new EventBus('NoTimeoutBus', { event_timeout: null }) + assert.equal(bus.event_timeout, null) +}) + +test('EventBus auto-generates name when not provided', () => { + const bus = new EventBus() + assert.equal(bus.name, 'EventBus') +}) + +test('EventBus exposes locks API surface', () => { + const bus = new EventBus('GateSurfaceBus') + const locks = bus.locks as unknown as Record + + assert.equal(typeof locks._requestRunloopPause, 'function') + assert.equal(typeof locks._waitUntilRunloopResumed, 'function') + assert.equal(typeof locks._isPaused, 'function') + assert.equal(typeof locks.waitForIdle, 'function') + assert.equal(typeof locks._notifyIdleListeners, 'function') + assert.equal(typeof locks.getLockForEvent, 'function') +}) + +test('EventBus locks methods are callable and preserve lock resolution behavior', async () => { + const bus = new EventBus('GateInvocationBus', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'serial', + }) + const GateEvent = BaseEvent.extend('GateInvocationEvent', {}) + + const release_pause = bus.locks._requestRunloopPause() + assert.equal(bus.locks._isPaused(), true) + + let resumed = false + const resumed_promise = bus.locks._waitUntilRunloopResumed().then(() => { + resumed = true + }) + await Promise.resolve() + assert.equal(resumed, false) + + release_pause() + await resumed_promise + assert.equal(bus.locks._isPaused(), false) + + const event_with_global = GateEvent({ + event_concurrency: 'global-serial', + event_handler_concurrency: 'serial', + }) + assert.equal(bus.locks.getLockForEvent(event_with_global), bus._lock_for_event_global_serial) + const handler_lock = event_with_global._getHandlerLock(bus.event_handler_concurrency) + assert.ok(handler_lock) + + const event_with_parallel = GateEvent({ + event_concurrency: 'parallel', + event_handler_concurrency: 'parallel', + }) + assert.equal(bus.locks.getLockForEvent(event_with_parallel), null) + assert.equal(event_with_parallel._getHandlerLock(bus.event_handler_concurrency), null) + + const another_serial_event = GateEvent({ event_handler_concurrency: 'serial' }) + const another_lock = another_serial_event._getHandlerLock(bus.event_handler_concurrency) + assert.notEqual(handler_lock, another_lock) + + bus.emit(GateEvent({})) + bus.locks._notifyIdleListeners() + await bus.locks.waitForIdle() +}) + +test('BaseEvent lifecycle methods are callable and preserve lifecycle behavior', async () => { + const LifecycleEvent = BaseEvent.extend('LifecycleMethodInvocationEvent', {}) + + const standalone = LifecycleEvent({}) + standalone._markStarted() + assert.equal(standalone.event_status, 'started') + standalone._markCompleted(false) + assert.equal(standalone.event_status, 'completed') + await standalone.eventCompleted() + + const bus = new EventBus('LifecycleMethodInvocationBus') + const dispatched = bus.emit(LifecycleEvent({})) + await dispatched.eventCompleted() + assert.equal(dispatched.event_status, 'completed') +}) + +test('BaseEvent toJSON/fromJSON roundtrips runtime fields and event_results', async () => { + const RuntimeEvent = BaseEvent.extend('RuntimeSerializationEvent', { + event_result_type: z.string(), + }) + const bus = new EventBus('RuntimeSerializationBus') + + bus.on(RuntimeEvent, () => 'ok') + + const event = bus.emit(RuntimeEvent({})) + await event.done() + + const json = event.toJSON() as Record + assert.equal(json.event_status, 'completed') + assert.equal(typeof json.event_created_at, 'string') + assert.equal(typeof json.event_started_at, 'string') + assert.equal(typeof json.event_completed_at, 'string') + assert.equal(json.event_pending_bus_count, 0) + assert.ok(Array.isArray(json.event_results)) + const json_results = json.event_results as Array> + assert.equal(json_results.length, 1) + assert.equal(json_results[0].status, 'completed') + assert.equal(json_results[0].result, 'ok') + assert.equal(json_results[0].handler_id, Array.from(event.event_results.values())[0].handler_id) + + const restored = RuntimeEvent.fromJSON?.(json) ?? RuntimeEvent(json as never) + assert.equal(restored.event_status, 'completed') + assert.equal(restored.event_created_at, event.event_created_at) + assert.equal(restored.event_pending_bus_count, 0) + assert.equal(restored.event_results.size, 1) + const restored_result = Array.from(restored.event_results.values())[0] + assert.equal(restored_result.status, 'completed') + assert.equal(restored_result.result, 'ok') +}) + +test('event_version supports defaults, extend-time defaults, runtime override, and JSON roundtrip', () => { + const DefaultEvent = BaseEvent.extend('DefaultVersionEvent', {}) + const ExtendVersionEvent = BaseEvent.extend('ExtendVersionEvent', { event_version: '1.2.3' }) + + class StaticVersionEvent extends BaseEvent { + static event_type = 'StaticVersionEvent' + static event_version = '4.5.6' + } + + const default_event = DefaultEvent({}) + assert.equal(default_event.event_version, '0.0.1') + + const extended_default = ExtendVersionEvent({}) + assert.equal(extended_default.event_version, '1.2.3') + + const static_default = new StaticVersionEvent({}) + assert.equal(static_default.event_version, '4.5.6') + + const runtime_override = ExtendVersionEvent({ event_version: '9.9.9' }) + assert.equal(runtime_override.event_version, '9.9.9') + + const restored = BaseEvent.fromJSON(runtime_override.toJSON()) + assert.equal(restored.event_version, '9.9.9') +}) + +test('fromJSON accepts event_parent_id: null and preserves it in toJSON output', () => { + const missing_field_event = BaseEvent.fromJSON({ + event_id: '018f8e40-1234-7000-8000-000000001233', + event_created_at: new Date('2025-01-01T00:00:00.000Z').toISOString(), + event_type: 'MissingParentIdEvent', + event_timeout: null, + }) + assert.equal(missing_field_event.event_parent_id, null) + + const event = BaseEvent.fromJSON({ + event_id: '018f8e40-1234-7000-8000-000000001234', + event_created_at: new Date('2025-01-01T00:00:00.000Z').toISOString(), + event_type: 'NullParentIdEvent', + event_parent_id: null, + event_timeout: null, + }) + + assert.equal(event.event_parent_id, null) + assert.equal((event.toJSON() as Record).event_parent_id, null) +}) + +test('event_emitted_by_handler_id defaults to null and accepts null in fromJSON', () => { + const fresh_event = BaseEvent.extend('NullEmittedByDefaultEvent')({}) + assert.equal(fresh_event.event_emitted_by_handler_id, null) + + const missing_field_event = BaseEvent.fromJSON({ + event_id: '018f8e40-1234-7000-8000-000000001239', + event_created_at: new Date('2025-01-01T00:00:00.000Z').toISOString(), + event_type: 'MissingEmittedByIdEvent', + event_timeout: null, + }) + assert.equal(missing_field_event.event_emitted_by_handler_id, null) + + const json_event = BaseEvent.fromJSON({ + event_id: '018f8e40-1234-7000-8000-00000000123a', + event_created_at: new Date('2025-01-01T00:00:00.000Z').toISOString(), + event_type: 'NullEmittedByIdEvent', + event_emitted_by_handler_id: null, + event_timeout: null, + }) + + assert.equal(json_event.event_emitted_by_handler_id, null) + assert.equal((json_event.toJSON() as Record).event_emitted_by_handler_id, null) +}) + +test('fromJSON deserializes event_result_type and toJSON reserializes schema', () => { + const raw_schema = { type: 'integer' } + const event = BaseEvent.fromJSON({ + event_id: '018f8e40-1234-7000-8000-000000001235', + event_created_at: new Date('2025-01-01T00:00:01.000Z').toISOString(), + event_type: 'RawSchemaEvent', + event_timeout: null, + event_result_type: raw_schema, + }) + const json = event.toJSON() as Record + assert.equal(typeof (event.event_result_type as { safeParse?: unknown } | undefined)?.safeParse, 'function') + assert.equal(typeof json.event_result_type, 'object') + assert.ok(['integer', 'number'].includes(String((json.event_result_type as { type?: unknown }).type))) +}) + +test('fromJSON reconstructs integer and null schemas for runtime validation', async () => { + const bus = new EventBus('SchemaPrimitiveRuntimeBus') + + const int_event = BaseEvent.fromJSON({ + event_id: '018f8e40-1234-7000-8000-000000001236', + event_created_at: new Date('2025-01-01T00:00:02.000Z').toISOString(), + event_type: 'RawIntegerEvent', + event_timeout: null, + event_result_type: { type: 'integer' }, + }) + bus.on('RawIntegerEvent', () => 123) + await bus.emit(int_event).done() + const int_result = Array.from(int_event.event_results.values())[0] + assert.equal(int_result.status, 'completed') + + const int_bad_event = BaseEvent.fromJSON({ + event_id: '018f8e40-1234-7000-8000-000000001237', + event_created_at: new Date('2025-01-01T00:00:03.000Z').toISOString(), + event_type: 'RawIntegerEventBad', + event_timeout: null, + event_result_type: { type: 'integer' }, + }) + bus.on('RawIntegerEventBad', () => 1.5) + await bus.emit(int_bad_event).done() + const int_bad_result = Array.from(int_bad_event.event_results.values())[0] + assert.equal(int_bad_result.status, 'error') + + const null_event = BaseEvent.fromJSON({ + event_id: '018f8e40-1234-7000-8000-000000001238', + event_created_at: new Date('2025-01-01T00:00:04.000Z').toISOString(), + event_type: 'RawNullEvent', + event_timeout: null, + event_result_type: { type: 'null' }, + }) + bus.on('RawNullEvent', () => null) + await bus.emit(null_event).done() + const null_result = Array.from(null_event.event_results.values())[0] + assert.equal(null_result.status, 'completed') + + await bus.waitUntilIdle() +}) + +test('fromJSON reconstructs nested object/array result schemas', async () => { + const bus = new EventBus('SchemaNestedRuntimeBus') + const raw_nested_schema = { + type: 'object', + properties: { + items: { type: 'array', items: { type: 'integer' } }, + meta: { type: 'object', additionalProperties: { type: 'boolean' } }, + }, + required: ['items', 'meta'], + } + + const valid_event = BaseEvent.fromJSON({ + event_id: '018f8e40-1234-7000-8000-000000001239', + event_created_at: new Date('2025-01-01T00:00:05.000Z').toISOString(), + event_type: 'RawNestedSchemaEvent', + event_timeout: null, + event_result_type: raw_nested_schema, + }) + bus.on('RawNestedSchemaEvent', () => ({ items: [1, 2, 3], meta: { ok: true } })) + await bus.emit(valid_event).done() + const valid_result = Array.from(valid_event.event_results.values())[0] + assert.equal(valid_result.status, 'completed') + + const invalid_event = BaseEvent.fromJSON({ + event_id: '018f8e40-1234-7000-8000-000000001240', + event_created_at: new Date('2025-01-01T00:00:06.000Z').toISOString(), + event_type: 'RawNestedSchemaEventBad', + event_timeout: null, + event_result_type: raw_nested_schema, + }) + bus.on('RawNestedSchemaEventBad', () => ({ items: ['bad'], meta: { ok: 'yes' } })) + await bus.emit(invalid_event).done() + const invalid_result = Array.from(invalid_event.event_results.values())[0] + assert.equal(invalid_result.status, 'error') + + await bus.waitUntilIdle() +}) + +// ─── Event dispatch and status lifecycle ───────────────────────────────────── + +test('dispatch returns pending event with correct initial state', async () => { + const bus = new EventBus('LifecycleBus', { max_history_size: 100 }) + const TestEvent = BaseEvent.extend('TestEvent', { data: z.string() }) + + const event = bus.emit(TestEvent({ data: 'hello' })) + + // Immediate state after dispatch (before any microtask runs) + assert.equal(event.event_type, 'TestEvent') + assert.ok(event.event_id) + assert.ok(event.event_created_at) + assert.equal((event as any).data, 'hello') + + // event_path should include the bus label + const original = event._event_original ?? event + assert.ok(original.event_path.includes(bus.label)) + + await bus.waitUntilIdle() +}) + +test('event transitions through pending -> started -> completed', async () => { + const bus = new EventBus('StatusBus', { max_history_size: 100 }) + const TestEvent = BaseEvent.extend('TestEvent', {}) + let status_during_handler: string | undefined + + bus.on(TestEvent, (event: BaseEvent) => { + status_during_handler = event.event_status + return 'done' + }) + + const event = bus.emit(TestEvent({})) + const original = event._event_original ?? event + + await event.done() + + assert.equal(status_during_handler, 'started') + assert.equal(original.event_status, 'completed') + assert.ok(original.event_started_at, 'event_started_at should be set') + assert.ok(original.event_completed_at, 'event_completed_at should be set') +}) + +test('event with no handlers completes immediately', async () => { + const bus = new EventBus('NoHandlerBus', { max_history_size: 100 }) + const OrphanEvent = BaseEvent.extend('OrphanEvent', {}) + + const event = bus.emit(OrphanEvent({})) + await event.done() + + const original = event._event_original ?? event + assert.equal(original.event_status, 'completed') + assert.equal(original.event_results.size, 0) +}) + +// ─── Event history tracking ────────────────────────────────────────────────── + +test('dispatched events appear in event_history', async () => { + const bus = new EventBus('HistoryBus', { max_history_size: 100 }) + const EventA = BaseEvent.extend('EventA', {}) + const EventB = BaseEvent.extend('EventB', {}) + + bus.emit(EventA({})) + bus.emit(EventB({})) + await bus.waitUntilIdle() + + assert.equal(bus.event_history.size, 2) + const history = Array.from(bus.event_history.values()) + assert.equal(history[0].event_type, 'EventA') + assert.equal(history[1].event_type, 'EventB') + + // All events are accessible by id + for (const event of bus.event_history.values()) { + assert.ok(bus.event_history.has(event.event_id)) + } +}) + +// ─── History trimming (max_history_size) ───────────────────────────────────── + +test('history is trimmed to max_history_size, completed events removed first', async () => { + const bus = new EventBus('TrimBus', { max_history_size: 5, max_history_drop: true }) + const TrimEvent = BaseEvent.extend('TrimEvent', { seq: z.number() }) + + bus.on(TrimEvent, () => 'ok') + + // Dispatch 10 events; they'll process and complete in FIFO order + for (let i = 0; i < 10; i++) { + bus.emit(TrimEvent({ seq: i })) + } + await bus.waitUntilIdle() + + // History should be trimmed to at most max_history_size + assert.ok(bus.event_history.size <= 5, `expected <= 5, got ${bus.event_history.size}`) + + // The remaining events should be the MOST RECENT ones (oldest completed removed first) + const seqs = Array.from(bus.event_history.values()).map((e) => (e as any).seq as number) + for (let i = 1; i < seqs.length; i++) { + assert.ok(seqs[i] > seqs[i - 1], 'remaining history should be in order') + } +}) + +test('unlimited history (max_history_size: null) keeps all events', async () => { + const bus = new EventBus('UnlimitedHistBus', { max_history_size: null }) + const PingEvent = BaseEvent.extend('PingEvent', {}) + + bus.on(PingEvent, () => 'pong') + + for (let i = 0; i < 150; i++) { + bus.emit(PingEvent({})) + } + await bus.waitUntilIdle() + + assert.equal(bus.event_history.size, 150) + + // All completed + for (const event of bus.event_history.values()) { + assert.equal(event.event_status, 'completed') + } +}) + +test('max_history_drop=false rejects new dispatch when history is full', async () => { + const bus = new EventBus('NoDropHistBus', { max_history_size: 2, max_history_drop: false }) + const NoDropEvent = BaseEvent.extend('NoDropEvent', { seq: z.number() }) + + bus.on(NoDropEvent, () => 'ok') + + await bus.emit(NoDropEvent({ seq: 1 })).done() + await bus.emit(NoDropEvent({ seq: 2 })).done() + + assert.equal(bus.event_history.size, 2) + assert.throws(() => bus.emit(NoDropEvent({ seq: 3 })), /history limit reached \(2\/2\); set event_history\.max_history_drop=true/) + assert.equal(bus.event_history.size, 2) + assert.equal(bus.pending_event_queue.length, 0) +}) + +test('max_history_size=0 with max_history_drop=false still allows unbounded queueing and drops completed events', async () => { + const bus = new EventBus('ZeroHistNoDropBus', { max_history_size: 0, max_history_drop: false }) + const BurstEvent = BaseEvent.extend('BurstEvent', {}) + + let release!: () => void + const unblock = new Promise((resolve) => { + release = resolve + }) + + bus.on(BurstEvent, async () => { + await unblock + }) + + const events: BaseEvent[] = [] + for (let i = 0; i < 25; i++) { + events.push(bus.emit(BurstEvent({}))) + } + + await delay(10) + assert.ok(bus.pending_event_queue.length > 1) + assert.ok(bus.event_history.size >= 1) + + release() + await Promise.all(events.map((event) => event.done())) + await bus.waitUntilIdle() + + assert.equal(bus.event_history.size, 0) + assert.equal(bus.pending_event_queue.length, 0) +}) + +test('max_history_size=0 keeps in-flight events and drops them on completion', async () => { + const bus = new EventBus('ZeroHistBus', { max_history_size: 0 }) + const SlowEvent = BaseEvent.extend('SlowEvent', {}) + + let release!: () => void + const unblock = new Promise((resolve) => { + release = resolve + }) + + bus.on(SlowEvent, async () => { + await unblock + }) + + const first = bus.emit(SlowEvent({})) + const second = bus.emit(SlowEvent({})) + + await delay(10) + assert.ok(bus.event_history.has(first.event_id)) + assert.ok(bus.event_history.has(second.event_id)) + + release() + await Promise.all([first.done(), second.done()]) + await bus.waitUntilIdle() + + assert.equal(bus.event_history.size, 0) +}) + +// ─── Event type derivation ─────────────────────────────────────────────────── + +test('event_type is derived from extend() name argument', () => { + const MyCustomEvent = BaseEvent.extend('MyCustomEvent', { val: z.number() }) + const event = MyCustomEvent({ val: 42 }) + assert.equal(event.event_type, 'MyCustomEvent') +}) + +test('event_type can be overridden at instantiation', () => { + const FlexEvent = BaseEvent.extend('FlexEvent', {}) + const event = FlexEvent({ event_type: 'OverriddenType' }) + assert.equal(event.event_type, 'OverriddenType') +}) + +test('handler registration by string matches extend() name', async () => { + const bus = new EventBus('StringMatchBus', { max_history_size: 100 }) + const NamedEvent = BaseEvent.extend('NamedEvent', {}) + const received: string[] = [] + + bus.on('NamedEvent', () => { + received.push('string_handler') + }) + + bus.emit(NamedEvent({})) + await bus.waitUntilIdle() + + assert.equal(received.length, 1) + assert.equal(received[0], 'string_handler') +}) + +test('wildcard handler receives all events', async () => { + const bus = new EventBus('WildcardBus', { max_history_size: 100 }) + const EventA = BaseEvent.extend('EventA', {}) + const EventB = BaseEvent.extend('EventB', {}) + const types: string[] = [] + + bus.on('*', (event: BaseEvent) => { + types.push(event.event_type) + }) + + bus.emit(EventA({})) + bus.emit(EventB({})) + await bus.waitUntilIdle() + + assert.deepEqual(types, ['EventA', 'EventB']) +}) + +// ─── Error handling and isolation ──────────────────────────────────────────── + +test('handler error is captured without crashing the bus', async () => { + const bus = new EventBus('ErrorBus', { max_history_size: 100 }) + const ErrorEvent = BaseEvent.extend('ErrorEvent', {}) + + bus.on(ErrorEvent, () => { + throw new Error('handler blew up') + }) + + const event = bus.emit(ErrorEvent({})) + await event.done() + + const original = event._event_original ?? event + assert.equal(original.event_status, 'completed') + assert.ok(original.event_errors.length > 0, 'event should record the error') + + // The handler result should have error status + const results = Array.from(original.event_results.values()) + assert.equal(results.length, 1) + assert.equal(results[0].status, 'error') + assert.ok(results[0].error instanceof Error) + assert.equal((results[0].error as Error).message, 'handler blew up') +}) + +test('one handler error does not prevent other handlers from running', async () => { + const bus = new EventBus('IsolationBus', { + max_history_size: 100, + event_handler_concurrency: 'parallel', + }) + const MultiEvent = BaseEvent.extend('MultiEvent', {}) + + const results_seen: string[] = [] + + bus.on(MultiEvent, () => { + results_seen.push('handler_1_ok') + return 'result_1' + }) + bus.on(MultiEvent, () => { + throw new Error('handler_2_fails') + }) + bus.on(MultiEvent, () => { + results_seen.push('handler_3_ok') + return 'result_3' + }) + + const event = bus.emit(MultiEvent({})) + await event.done() + + const original = event._event_original ?? event + assert.equal(original.event_status, 'completed') + + // Both non-erroring handlers should have run + assert.ok(results_seen.includes('handler_1_ok')) + assert.ok(results_seen.includes('handler_3_ok')) + + // Check individual results + const all_results = Array.from(original.event_results.values()) + const completed_results = all_results.filter((r) => r.status === 'completed') + const error_results = all_results.filter((r) => r.status === 'error') + assert.equal(completed_results.length, 2) + assert.equal(error_results.length, 1) +}) + +test('eventResultsList returns filtered values by default and can return raw values with include', async () => { + const bus = new EventBus('EventResultsListBus', { event_handler_concurrency: 'serial' }) + const ResultListEvent = BaseEvent.extend('ResultListEvent', {}) + + bus.on(ResultListEvent, () => ({ one: 1 })) + bus.on(ResultListEvent, () => ['two']) + bus.on(ResultListEvent, () => undefined) + + const values = await bus.emit(ResultListEvent({})).eventResultsList() + assert.deepEqual(values, [{ one: 1 }, ['two']]) + + const raw_values = await bus.emit(ResultListEvent({})).eventResultsList(() => true, { + raise_if_any: false, + raise_if_none: false, + }) + assert.deepEqual(raw_values, [{ one: 1 }, ['two'], undefined]) +}) + +test('eventResultsList supports timeout/include/raise_if_any/raise_if_none arguments', async () => { + const bus = new EventBus('EventResultsListArgsBus', { event_handler_concurrency: 'serial' }) + const ArgsEvent = BaseEvent.extend('ArgsEvent', {}) + const EmptyEvent = BaseEvent.extend('EmptyEvent', {}) + const IncludeEvent = BaseEvent.extend('IncludeEvent', {}) + const MixedEvent = BaseEvent.extend('MixedEvent', {}) + const TimeoutEvent = BaseEvent.extend('TimeoutEvent', {}) + + bus.on(ArgsEvent, () => 'ok') + bus.on(ArgsEvent, () => { + throw new Error('boom') + }) + await assert.rejects(async () => bus.emit(ArgsEvent({})).eventResultsList(), /boom/) + + const values_without_errors = await bus.emit(ArgsEvent({})).eventResultsList({ raise_if_any: false, raise_if_none: true }) + assert.deepEqual(values_without_errors, ['ok']) + + bus.on(EmptyEvent, () => undefined) + await assert.rejects(async () => bus.emit(EmptyEvent({})).eventResultsList(), /Expected at least one handler/) + const empty_values = await bus.emit(EmptyEvent({})).eventResultsList({ raise_if_any: false, raise_if_none: false }) + assert.deepEqual(empty_values, []) + + bus.on(MixedEvent, () => undefined) + bus.on(MixedEvent, () => 'valid') + const mixed_values = await bus.emit(MixedEvent({})).eventResultsList({ raise_if_any: false, raise_if_none: true }) + assert.deepEqual(mixed_values, ['valid']) + + bus.on(IncludeEvent, () => 'keep') + bus.on(IncludeEvent, () => 'drop') + const filtered_values = await bus + .emit(IncludeEvent({})) + .eventResultsList((result) => result === 'keep', { raise_if_any: false, raise_if_none: true }) + assert.deepEqual(filtered_values, ['keep']) + + bus.on(TimeoutEvent, async () => { + await delay(50) + return 'late' + }) + await assert.rejects(async () => bus.emit(TimeoutEvent({})).eventResultsList({ timeout: 0.01 }), /Timed out waiting/) +}) + +// ─── Concurrent dispatch ───────────────────────────────────────────────────── + +test('many events dispatched concurrently all complete', async () => { + const bus = new EventBus('ConcurrentBus', { max_history_size: null }) + const BatchEvent = BaseEvent.extend('BatchEvent', { idx: z.number() }) + let processed = 0 + + bus.on(BatchEvent, () => { + processed += 1 + return 'ok' + }) + + const events: BaseEvent[] = [] + for (let i = 0; i < 100; i++) { + events.push(bus.emit(BatchEvent({ idx: i }))) + } + + // Wait for all to complete + await Promise.all(events.map((e) => e.done())) + await bus.waitUntilIdle() + + assert.equal(processed, 100) + assert.equal(bus.event_history.size, 100) + + for (const event of bus.event_history.values()) { + assert.equal(event.event_status, 'completed') + } +}) + +// ─── event_timeout default application ─────────────────────────────────────── + +test('dispatch leaves event_timeout unset and processing uses bus timeout default', async () => { + const bus = new EventBus('TimeoutDefaultBus', { + max_history_size: 100, + event_timeout: 0.01, + }) + const TEvent = BaseEvent.extend('TEvent', {}) + bus.on(TEvent, async () => { + await delay(30) + }) + + const event = bus.emit(TEvent({})) + const original = event._event_original ?? event + + assert.equal(original.event_timeout, null) + + await event.done() + assert.equal(original.event_errors.length, 1) +}) + +test('event with explicit timeout is not overridden by bus default', async () => { + const bus = new EventBus('TimeoutOverrideBus', { + max_history_size: 100, + event_timeout: 42, + }) + const TEvent = BaseEvent.extend('TEvent', {}) + + const event = bus.emit(TEvent({ event_timeout: 10 })) + const original = event._event_original ?? event + + assert.equal(original.event_timeout, 10) + + await bus.waitUntilIdle() +}) + +// ─── EventBus.all_instances tracking ───────────────────────────────────────────── + +test('EventBus.all_instances tracks all created buses', () => { + const initial_count = EventBus.all_instances.size + const bus_a = new EventBus('TrackA') + const bus_b = new EventBus('TrackB') + + assert.ok(EventBus.all_instances.has(bus_a)) + assert.ok(EventBus.all_instances.has(bus_b)) + assert.equal(EventBus.all_instances.size, initial_count + 2) +}) + +test('EventBus subclasses isolate registries and global-serial locks', () => { + class IsolatedBusA extends EventBus {} + class IsolatedBusB extends EventBus {} + + const bus_a1 = new IsolatedBusA('IsolatedBusA1', { event_concurrency: 'global-serial' }) + const bus_a2 = new IsolatedBusA('IsolatedBusA2', { event_concurrency: 'global-serial' }) + const bus_b1 = new IsolatedBusB('IsolatedBusB1', { event_concurrency: 'global-serial' }) + + assert.equal(IsolatedBusA.all_instances.has(bus_a1), true) + assert.equal(IsolatedBusA.all_instances.has(bus_a2), true) + assert.equal(IsolatedBusA.all_instances.has(bus_b1), false) + assert.equal(IsolatedBusB.all_instances.has(bus_b1), true) + assert.equal(IsolatedBusB.all_instances.has(bus_a1), false) + assert.equal(EventBus.all_instances.has(bus_a1), false) + assert.equal(EventBus.all_instances.has(bus_b1), false) + + const lock_a1 = bus_a1.locks.getLockForEvent(new BaseEvent()) + const lock_a2 = bus_a2.locks.getLockForEvent(new BaseEvent()) + const lock_b1 = bus_b1.locks.getLockForEvent(new BaseEvent()) + assert.notEqual(lock_a1, null) + assert.notEqual(lock_a2, null) + assert.notEqual(lock_b1, null) + assert.equal(lock_a1, lock_a2) + assert.notEqual(lock_a1, lock_b1) + + bus_a1.destroy() + bus_a2.destroy() + bus_b1.destroy() +}) + +// ─── Circular forwarding prevention ────────────────────────────────────────── + +test('circular forwarding does not cause infinite loop', async () => { + const bus_a = new EventBus('CircA', { max_history_size: 100 }) + const bus_b = new EventBus('CircB', { max_history_size: 100 }) + const bus_c = new EventBus('CircC', { max_history_size: 100 }) + + // A -> B -> C -> A (circular) + bus_a.on('*', bus_b.emit) + bus_b.on('*', bus_c.emit) + bus_c.on('*', bus_a.emit) + + const CircEvent = BaseEvent.extend('CircEvent', {}) + const handler_calls: string[] = [] + + // Register real handlers on each bus + bus_a.on(CircEvent, () => { + handler_calls.push('A') + return 'a' + }) + bus_b.on(CircEvent, () => { + handler_calls.push('B') + return 'b' + }) + bus_c.on(CircEvent, () => { + handler_calls.push('C') + return 'c' + }) + + const event = bus_a.emit(CircEvent({})) + await event.done() + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + await bus_c.waitUntilIdle() + + // Each bus should process the event exactly once (loop prevention via event_path) + assert.equal(handler_calls.filter((h) => h === 'A').length, 1) + assert.equal(handler_calls.filter((h) => h === 'B').length, 1) + assert.equal(handler_calls.filter((h) => h === 'C').length, 1) + + // event_path should contain all three buses + const original = event._event_original ?? event + assert.ok(original.event_path.includes(bus_a.label)) + assert.ok(original.event_path.includes(bus_b.label)) + assert.ok(original.event_path.includes(bus_c.label)) +}) + +// ─── EventBus GC / memory leak ─────────────────────────────────────────────── + +const flush_gc_cycles = async (gc: () => void, cycles: number): Promise => { + for (let i = 0; i < cycles; i += 1) { + gc() + await new Promise((resolve) => setImmediate(resolve)) + } +} + +test('unreferenced EventBus can be garbage collected (not retained by all_instances)', async () => { + const gc = globalThis.gc + if (typeof gc !== 'function') { + assert.fail('GC tests require --expose-gc') + } + + let weak_ref: WeakRef | null = null + + // Create a bus inside an IIFE so the only reference is the WeakRef + ;(() => { + const bus = new EventBus('GCTestBus') + weak_ref = new WeakRef(bus) + })() + + await flush_gc_cycles(gc, 20) + + // If EventBus.all_instances holds a strong reference (Set), + // the bus will NOT be collected β€” proving the memory leak. + // After the fix (WeakRef-based storage), the bus should be collected. + assert.notEqual(weak_ref, null, 'WeakRef should be assigned by the test setup IIFE') + assert.equal( + weak_ref.deref(), + undefined, + 'bus should be garbage collected when no external references remain β€” ' + + 'EventBus.all_instances is holding a strong reference (memory leak)' + ) +}) + +test('subclass registry and global lock are collectable when subclass goes out of scope', async () => { + const gc = globalThis.gc + if (typeof gc !== 'function') { + assert.fail('GC tests require --expose-gc') + } + + let subclass_ref!: WeakRef + let registry_ref!: WeakRef + let lock_ref!: WeakRef + let bus_ref!: WeakRef + ;(() => { + class ScopedSubclassBus extends EventBus {} + const bus = new ScopedSubclassBus('ScopedSubclassBus', { event_concurrency: 'global-serial' }) + subclass_ref = new WeakRef(ScopedSubclassBus) + registry_ref = new WeakRef(ScopedSubclassBus.all_instances) + lock_ref = new WeakRef(bus._lock_for_event_global_serial) + bus_ref = new WeakRef(bus) + })() + + await flush_gc_cycles(gc, 300) + + assert.equal(bus_ref.deref(), undefined, 'subclass bus instance should be collectable') + assert.equal(subclass_ref.deref(), undefined, 'subclass type should be collectable') + assert.equal(registry_ref.deref(), undefined, 'subclass all_instances registry should be collectable') + assert.equal(lock_ref.deref(), undefined, 'subclass global lock should be collectable') +}) + +test('unreferenced buses with event history are garbage collected without destroy()', async () => { + const gc = globalThis.gc + if (typeof gc !== 'function') { + assert.fail('GC tests require --expose-gc') + } + + class IsolatedRegistryBus extends EventBus {} + + const GcEvent = BaseEvent.extend('GcNoDestroyEvent', {}) + const weak_refs: Array> = [] + const created_bus_ids: string[] = [] + + await flush_gc_cycles(gc, 10) + const heap_before = process.memoryUsage().heapUsed + + const create_and_run_bus = async (index: number): Promise<{ ref: WeakRef; id: string }> => { + const bus = new IsolatedRegistryBus(`GC-NoDestroy-${index}`, { max_history_size: 200 }) + bus.on(GcEvent, () => {}) + for (let i = 0; i < 200; i += 1) { + const event = bus.emit(GcEvent({})) + await event.done() + } + await bus.waitUntilIdle() + return { ref: new WeakRef(bus), id: bus.id } + } + + for (let i = 0; i < 120; i += 1) { + const { ref, id } = await create_and_run_bus(i) + weak_refs.push(ref) + created_bus_ids.push(id) + } + + await flush_gc_cycles(gc, 30) + + const alive_count = weak_refs.reduce((count, ref) => count + (ref.deref() ? 1 : 0), 0) + const remaining_ids = new Set(Array.from(IsolatedRegistryBus.all_instances).map((bus) => bus.id)) + const heap_after = process.memoryUsage().heapUsed + + assert.equal(alive_count, 0, 'all unreferenced buses should be garbage collected without explicit destroy()') + for (const id of created_bus_ids) { + assert.equal(remaining_ids.has(id), false, `all_instances should not retain unreferenced bus ${id}`) + } + assert.ok( + heap_after <= heap_before + 20 * 1024 * 1024, + `heap should return near baseline after GC, before=${(heap_before / 1024 / 1024).toFixed(1)}MB after=${(heap_after / 1024 / 1024).toFixed(1)}MB` + ) +}) + +// Consolidated from tests/coverage_gaps.test.ts + +test('reset creates a fresh pending event for cross-bus dispatch', async () => { + const ResetEvent = BaseEvent.extend('ResetCoverageEvent', { + label: z.string(), + }) + + const bus_a = new EventBus('ResetCoverageBusA') + const bus_b = new EventBus('ResetCoverageBusB') + + bus_a.on(ResetEvent, (event) => `a:${event.label}`) + bus_b.on(ResetEvent, (event) => `b:${event.label}`) + + const completed = await bus_a.emit(ResetEvent({ label: 'hello' })).done() + const fresh = completed.eventReset() + + assert.notEqual(fresh.event_id, completed.event_id) + assert.equal(fresh.event_status, 'pending') + assert.equal(fresh.event_results.size, 0) + assert.equal(fresh.event_started_at, null) + assert.equal(fresh.event_completed_at, null) + + const forwarded = await bus_b.emit(fresh).done() + assert.equal(forwarded.event_status, 'completed') + assert.equal( + Array.from(forwarded.event_results.values()).some((result) => result.result === 'b:hello'), + true + ) + assert.equal( + forwarded.event_path.some((entry) => entry.startsWith('ResetCoverageBusA#')), + true + ) + assert.equal( + forwarded.event_path.some((entry) => entry.startsWith('ResetCoverageBusB#')), + true + ) + + bus_a.destroy() + bus_b.destroy() +}) + +test('scoped handler event reports bus and _event_original via in-operator', async () => { + const ProxyEvent = BaseEvent.extend('ProxyHasCoverageEvent', {}) + const bus = new EventBus('ProxyHasCoverageBus') + let has_bus = false + let has_original = false + + bus.on(ProxyEvent, (event) => { + has_bus = 'bus' in event + has_original = '_event_original' in event + }) + + await bus.emit(ProxyEvent({})).done() + + assert.equal(has_bus, true) + assert.equal(has_original, true) + bus.destroy() +}) + +test('on() rejects BaseEvent matcher without a concrete event type', () => { + const bus = new EventBus('InvalidMatcherCoverageBus') + assert.throws(() => bus.on(BaseEvent as unknown as any, () => undefined), /must be a string event type/) + bus.destroy() +}) + +test('max_history_size=0 prunes previously completed events on later dispatch', async () => { + const HistEvent = BaseEvent.extend('ZeroHistoryCoverageEvent', { + label: z.string(), + }) + const bus = new EventBus('ZeroHistoryCoverageBus', { max_history_size: 1 }) + bus.on(HistEvent, () => undefined) + + const first = await bus.emit(HistEvent({ label: 'first' })).done() + assert.equal(bus.event_history.has(first.event_id), true) + + bus.event_history.max_history_size = 0 + const second = await bus.emit(HistEvent({ label: 'second' })).done() + assert.equal(bus.event_history.has(first.event_id), false) + assert.equal(bus.event_history.has(second.event_id), false) + assert.equal(bus.event_history.size, 0) + + bus.destroy() +}) diff --git a/bubus-ts/tests/eventbus_debounce.test.ts b/bubus-ts/tests/eventbus_debounce.test.ts new file mode 100644 index 0000000..e36d312 --- /dev/null +++ b/bubus-ts/tests/eventbus_debounce.test.ts @@ -0,0 +1,136 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { z } from 'zod' + +import { BaseEvent, EventBus } from '../src/index.js' + +const ParentEvent = BaseEvent.extend('ParentEvent', {}) + +const ScreenshotEvent = BaseEvent.extend('ScreenshotEvent', { target_id: z.string() }) + +const SyncEvent = BaseEvent.extend('SyncEvent', {}) +const TARGET_ID_1 = '9b447756-908c-7b75-8a51-4a2c2b4d9b14' +const TARGET_ID_2 = '194870e1-fa02-70a4-8101-d10d57c3449c' + +test('simple debounce uses recent history or dispatches new', async () => { + const bus = new EventBus('DebounceBus') + + const parent_event = bus.emit(ParentEvent({})) + await parent_event.done() + + const child_event = parent_event.bus?.emit(ScreenshotEvent({ target_id: TARGET_ID_1 })) + assert.ok(child_event) + await child_event.done() + + const reused_event = + (await bus.find(ScreenshotEvent, { + past: 10, + future: false, + child_of: parent_event, + })) ?? bus.emit(ScreenshotEvent({ target_id: TARGET_ID_2 })) + await reused_event.done() + + assert.equal(reused_event.event_id, child_event.event_id) + assert.equal(reused_event.event_parent_id, parent_event.event_id) +}) + +test('advanced debounce prefers history, then waits for future, then dispatches', async () => { + const bus = new EventBus('AdvancedDebounceBus') + + const pending_event = bus.find(SyncEvent, { past: false, future: 0.5 }) + + setTimeout(() => { + bus.emit(SyncEvent({})) + }, 50) + + const resolved_event = (await bus.find(SyncEvent, { past: true, future: false })) ?? (await pending_event) ?? bus.emit(SyncEvent({})) + await resolved_event.done() + + assert.ok(resolved_event) + assert.equal(resolved_event.event_type, 'SyncEvent') +}) + +test('debounce returns existing fresh event', async () => { + const bus = new EventBus('DebounceFreshBus') + + const original = await bus.emit(ScreenshotEvent({ target_id: TARGET_ID_1 })).done() + + const is_fresh = (event: typeof original): boolean => { + const completed_at = event.event_completed_at ? Date.parse(event.event_completed_at) : 0 + return Date.now() - completed_at < 5000 + } + + const result = + (await bus.find(ScreenshotEvent, (event) => event.target_id === TARGET_ID_1 && is_fresh(event), { past: true, future: false })) ?? + bus.emit(ScreenshotEvent({ target_id: TARGET_ID_1 })) + await result.done() + + assert.equal(result.event_id, original.event_id) +}) + +test('debounce dispatches new when no match', async () => { + const bus = new EventBus('DebounceNoMatchBus') + + const result = + (await bus.find(ScreenshotEvent, (event) => event.target_id === TARGET_ID_1, { past: true, future: false })) ?? + bus.emit(ScreenshotEvent({ target_id: TARGET_ID_1 })) + await result.done() + + assert.ok(result) + assert.equal(result.target_id, TARGET_ID_1) + assert.equal(result.event_status, 'completed') +}) + +test('debounce dispatches new when existing is stale', async () => { + const bus = new EventBus('DebounceStaleBus') + + await bus.emit(ScreenshotEvent({ target_id: TARGET_ID_1 })).done() + + const result = + (await bus.find(ScreenshotEvent, (event) => event.target_id === TARGET_ID_1 && false, { past: true, future: false })) ?? + bus.emit(ScreenshotEvent({ target_id: TARGET_ID_1 })) + await result.done() + + assert.ok(result) + const screenshots = Array.from(bus.event_history.values()).filter((event) => event.event_type === 'ScreenshotEvent') + assert.equal(screenshots.length, 2) +}) + +test('debounce or-chain handles sequential lookups without blocking', async () => { + const bus = new EventBus('DebounceSequentialBus') + + const result1 = + (await bus.find(ScreenshotEvent, (event) => event.target_id === TARGET_ID_1, { past: true, future: false })) ?? + bus.emit(ScreenshotEvent({ target_id: TARGET_ID_1 })) + + const result2 = + (await bus.find(ScreenshotEvent, (event) => event.target_id === TARGET_ID_1, { past: true, future: false })) ?? + bus.emit(ScreenshotEvent({ target_id: TARGET_ID_1 })) + + const result3 = + (await bus.find(ScreenshotEvent, (event) => event.target_id === TARGET_ID_2, { past: true, future: false })) ?? + bus.emit(ScreenshotEvent({ target_id: TARGET_ID_2 })) + await Promise.all([result1.done(), result2.done(), result3.done()]) + + assert.equal(result1.event_id, result2.event_id) + assert.notEqual(result1.event_id, result3.event_id) + assert.equal(result3.target_id, TARGET_ID_2) +}) + +test('debounce past-only and past-window lookups return immediately when empty', async () => { + const bus = new EventBus('DebounceImmediateLookupBus') + + const past_start = Date.now() + const found_past = await bus.find(ParentEvent, { past: true, future: false }) + const past_elapsed_ms = Date.now() - past_start + + const window_start = Date.now() + const found_window = await bus.find(ParentEvent, { past: 5, future: false }) + const window_elapsed_ms = Date.now() - window_start + + assert.equal(found_past, null) + assert.equal(found_window, null) + assert.ok(past_elapsed_ms < 100) + assert.ok(window_elapsed_ms < 100) +}) diff --git a/bubus-ts/tests/eventbus_dispatch_contextvars.test.ts b/bubus-ts/tests/eventbus_dispatch_contextvars.test.ts new file mode 100644 index 0000000..ebea4ae --- /dev/null +++ b/bubus-ts/tests/eventbus_dispatch_contextvars.test.ts @@ -0,0 +1,319 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { BaseEvent, EventBus } from '../src/index.js' +import { async_local_storage, hasAsyncLocalStorage } from '../src/async_context.js' + +type ContextStore = { + request_id?: string + user_id?: string + trace_id?: string +} + +const SimpleEvent = BaseEvent.extend('SimpleEvent', {}) +const ChildEvent = BaseEvent.extend('ChildEvent', {}) + +const require_async_local_storage = () => { + assert.ok(async_local_storage, 'AsyncLocalStorage not available') + return async_local_storage +} + +assert.ok(hasAsyncLocalStorage(), 'AsyncLocalStorage must be available for context propagation tests') + +const get_store = (store: ContextStore | undefined | null): ContextStore => store ?? {} + +test('context propagates to handler', async () => { + const bus = new EventBus('ContextTestBus') + const captured_values: ContextStore = {} + const storage = require_async_local_storage() + const request_id = 'a9e03792-9be2-700b-82c9-b46f260cb0cd' + const user_id = '930eaeb0-5ebe-7f3b-82b2-a824c3a57bae' + + bus.on(SimpleEvent, () => { + const store = storage.getStore() as ContextStore | undefined + captured_values.request_id = store?.request_id + captured_values.user_id = store?.user_id + }) + + await storage.run({ request_id, user_id }, async () => { + const event = bus.emit(SimpleEvent({})) + await event.done() + }) + + assert.equal(captured_values.request_id, request_id) + assert.equal(captured_values.user_id, user_id) +}) + +test('context propagates through nested handlers', async () => { + const bus = new EventBus('NestedContextBus') + const captured_parent: ContextStore = {} + const captured_child: ContextStore = {} + const storage = require_async_local_storage() + const request_id = '623d15fd-1410-7da3-8533-ae995961aa0a' + const trace_id = '4c000901-1ec0-780f-8b2e-fa740047ac91' + + bus.on(SimpleEvent, async (event) => { + const store = storage.getStore() as ContextStore | undefined + captured_parent.request_id = store?.request_id + captured_parent.trace_id = store?.trace_id + + const child = event.bus?.emit(ChildEvent({})) + if (child) { + await child.done() + } + }) + + bus.on(ChildEvent, () => { + const store = storage.getStore() as ContextStore | undefined + captured_child.request_id = store?.request_id + captured_child.trace_id = store?.trace_id + }) + + await storage.run({ request_id, trace_id }, async () => { + const event = bus.emit(SimpleEvent({})) + await event.done() + }) + + assert.equal(captured_parent.request_id, request_id) + assert.equal(captured_parent.trace_id, trace_id) + assert.equal(captured_child.request_id, request_id) + assert.equal(captured_child.trace_id, trace_id) +}) + +test('context isolation between dispatches', async () => { + const bus = new EventBus('IsolationTestBus') + const captured_values: string[] = [] + const storage = require_async_local_storage() + const request_id_a = '27c90da2-7243-7272-8a0c-4e0b838da162' + const request_id_b = '0b30a5e6-2d24-7026-82b1-32eb3bee012a' + + bus.on(SimpleEvent, async () => { + const store = storage.getStore() as ContextStore | undefined + captured_values.push(store?.request_id ?? '') + }) + + const event_a = storage.run({ request_id: request_id_a }, () => bus.emit(SimpleEvent({}))) + const event_b = storage.run({ request_id: request_id_b }, () => bus.emit(SimpleEvent({}))) + + await event_a.done() + await event_b.done() + + assert.ok(captured_values.includes(request_id_a)) + assert.ok(captured_values.includes(request_id_b)) +}) + +test('context propagates to multiple handlers', async () => { + const bus = new EventBus('ParallelContextBus') + const captured_values: string[] = [] + const storage = require_async_local_storage() + const request_id = '6e2d89e5-3274-7c5d-8d33-3969cb2cb90a' + + bus.on(SimpleEvent, () => { + const store = storage.getStore() as ContextStore | undefined + captured_values.push(`h1:${store?.request_id ?? ''}`) + }) + + bus.on(SimpleEvent, () => { + const store = storage.getStore() as ContextStore | undefined + captured_values.push(`h2:${store?.request_id ?? ''}`) + }) + + await storage.run({ request_id }, async () => { + const event = bus.emit(SimpleEvent({})) + await event.done() + }) + + assert.ok(captured_values.includes(`h1:${request_id}`)) + assert.ok(captured_values.includes(`h2:${request_id}`)) +}) + +test('context propagates through event forwarding', async () => { + const bus_a = new EventBus('BusA') + const bus_b = new EventBus('BusB') + const captured_bus_a: ContextStore = {} + const captured_bus_b: ContextStore = {} + const storage = require_async_local_storage() + const request_id = '0709aae5-f00f-72e5-8cbe-c08092ed3bb3' + + bus_a.on(SimpleEvent, () => { + const store = storage.getStore() as ContextStore | undefined + captured_bus_a.request_id = store?.request_id + }) + + bus_b.on(SimpleEvent, () => { + const store = storage.getStore() as ContextStore | undefined + captured_bus_b.request_id = store?.request_id + }) + + bus_a.on('*', bus_b.emit) + + await storage.run({ request_id }, async () => { + const event = bus_a.emit(SimpleEvent({})) + await event.done() + await bus_b.waitUntilIdle() + }) + + assert.equal(captured_bus_a.request_id, request_id) + assert.equal(captured_bus_b.request_id, request_id) +}) + +test('handler can modify context without affecting parent', async () => { + const bus = new EventBus('ModifyContextBus') + const storage = require_async_local_storage() + let parent_value_after_child = '' + const parent_request_id = 'bd1374df-0716-77a5-8846-8564a9e75abc' + const child_request_id = '51969726-2c10-7abc-875f-2607ff63f6e7' + + bus.on(SimpleEvent, async (event) => { + if (!storage.enterWith) { + throw new Error('AsyncLocalStorage.enterWith is required for this test') + } + storage.enterWith({ request_id: parent_request_id }) + const child = event.bus?.emit(ChildEvent({})) + if (child) { + await child.done() + } + const store = get_store(storage.getStore() as ContextStore | undefined) + parent_value_after_child = store.request_id ?? '' + }) + + bus.on(ChildEvent, () => { + if (!storage.enterWith) { + throw new Error('AsyncLocalStorage.enterWith is required for this test') + } + storage.enterWith({ request_id: child_request_id }) + }) + + await storage.run({}, async () => { + const event = bus.emit(SimpleEvent({})) + await event.done() + }) + + assert.equal(parent_value_after_child, parent_request_id) +}) + +test('event parent_id tracking still works with context propagation', async () => { + const bus = new EventBus('ParentIdTrackingBus') + const storage = require_async_local_storage() + let parent_event_id: string | undefined + let child_event_parent_id: string | null | undefined + + bus.on(SimpleEvent, async (event) => { + parent_event_id = event.event_id + const child = event.bus?.emit(ChildEvent({})) + if (child) { + await child.done() + } + }) + + bus.on(ChildEvent, (event) => { + child_event_parent_id = event.event_parent_id + }) + + await storage.run({ request_id: '36a584b5-40c5-7c8b-8627-f9a2e9ce6f82' }, async () => { + const event = bus.emit(SimpleEvent({})) + await event.done() + }) + + assert.ok(parent_event_id) + assert.ok(child_event_parent_id) + assert.equal(child_event_parent_id, parent_event_id) +}) + +test('dispatch context and parent_id both work together', async () => { + const bus = new EventBus('CombinedContextBus') + const storage = require_async_local_storage() + const results: Record = {} + const request_id = '3259fa1a-254b-7368-8f50-1ef37ce95f86' + + bus.on(SimpleEvent, async (event) => { + const store = storage.getStore() as ContextStore | undefined + results.parent_request_id = store?.request_id + results.parent_event_id = event.event_id + const child = event.bus?.emit(ChildEvent({})) + if (child) { + await child.done() + } + }) + + bus.on(ChildEvent, (event) => { + const store = storage.getStore() as ContextStore | undefined + results.child_request_id = store?.request_id + results.child_event_parent_id = event.event_parent_id + }) + + await storage.run({ request_id }, async () => { + const event = bus.emit(SimpleEvent({})) + await event.done() + }) + + assert.equal(results.parent_request_id, request_id) + assert.equal(results.child_request_id, request_id) + assert.equal(results.child_event_parent_id, results.parent_event_id) +}) + +test('deeply nested context and parent tracking', async () => { + const bus = new EventBus('DeepNestingBus') + const storage = require_async_local_storage() + const request_id = 'fbe90c6c-8193-79de-81d8-e732135fb217' + const results: Array<{ + level: number + request_id?: string + event_id: string + parent_id?: string | null + }> = [] + + const Level2Event = BaseEvent.extend('Level2Event', {}) + const Level3Event = BaseEvent.extend('Level3Event', {}) + + bus.on(SimpleEvent, async (event) => { + const store = storage.getStore() as ContextStore | undefined + results.push({ + level: 1, + request_id: store?.request_id, + event_id: event.event_id, + parent_id: event.event_parent_id, + }) + const child = event.bus?.emit(Level2Event({})) + if (child) { + await child.done() + } + }) + + bus.on(Level2Event, async (event) => { + const store = storage.getStore() as ContextStore | undefined + results.push({ + level: 2, + request_id: store?.request_id, + event_id: event.event_id, + parent_id: event.event_parent_id, + }) + const child = event.bus?.emit(Level3Event({})) + if (child) { + await child.done() + } + }) + + bus.on(Level3Event, (event) => { + const store = storage.getStore() as ContextStore | undefined + results.push({ + level: 3, + request_id: store?.request_id, + event_id: event.event_id, + parent_id: event.event_parent_id, + }) + }) + + await storage.run({ request_id }, async () => { + const event = bus.emit(SimpleEvent({})) + await event.done() + }) + + assert.equal(results.length, 3) + for (const result of results) { + assert.equal(result.request_id, request_id) + } + assert.equal(results[0].parent_id, null) + assert.equal(results[1].parent_id, results[0].event_id) + assert.equal(results[2].parent_id, results[1].event_id) +}) diff --git a/bubus-ts/tests/eventbus_dispatch_defaults.test.ts b/bubus-ts/tests/eventbus_dispatch_defaults.test.ts new file mode 100644 index 0000000..cf312d2 --- /dev/null +++ b/bubus-ts/tests/eventbus_dispatch_defaults.test.ts @@ -0,0 +1,73 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { BaseEvent, EventBus } from '../src/index.js' + +const PropagationEvent = BaseEvent.extend('PropagationEvent', {}) +const ConcurrencyOverrideEvent = BaseEvent.extend('ConcurrencyOverrideEvent', { + event_concurrency: 'global-serial', +}) +const HandlerOverrideEvent = BaseEvent.extend('HandlerOverrideEvent', { + event_handler_concurrency: 'serial', + event_handler_completion: 'all', +}) + +test('event_concurrency remains unset on dispatch and resolves during processing', async () => { + const bus = new EventBus('EventConcurrencyDefaultBus', { event_concurrency: 'parallel' }) + bus.on(PropagationEvent, async () => 'ok') + + const implicit = bus.emit(PropagationEvent({})) + const explicit_null = bus.emit(PropagationEvent({ event_concurrency: null })) + + assert.equal(implicit.event_concurrency ?? null, null) + assert.equal(explicit_null.event_concurrency ?? null, null) + + await implicit.done() + await explicit_null.done() +}) + +test('event_concurrency class override beats bus default', async () => { + const bus = new EventBus('EventConcurrencyOverrideBus', { event_concurrency: 'parallel' }) + bus.on(ConcurrencyOverrideEvent, async () => 'ok') + + const event = bus.emit(ConcurrencyOverrideEvent({})) + assert.equal(event.event_concurrency, 'global-serial') + await event.done() +}) + +test('handler defaults remain unset on dispatch and resolve during processing', async () => { + const bus = new EventBus('HandlerDefaultsBus', { + event_handler_concurrency: 'parallel', + event_handler_completion: 'first', + }) + bus.on(PropagationEvent, async () => 'ok') + + const implicit = bus.emit(PropagationEvent({})) + const explicit_null = bus.emit( + PropagationEvent({ + event_handler_concurrency: null, + event_handler_completion: null, + }) + ) + + assert.equal(implicit.event_handler_concurrency ?? null, null) + assert.equal(implicit.event_handler_completion ?? null, null) + assert.equal(explicit_null.event_handler_concurrency ?? null, null) + assert.equal(explicit_null.event_handler_completion ?? null, null) + + await implicit.done() + await explicit_null.done() +}) + +test('handler class override beats bus defaults', async () => { + const bus = new EventBus('HandlerDefaultsOverrideBus', { + event_handler_concurrency: 'parallel', + event_handler_completion: 'first', + }) + bus.on(HandlerOverrideEvent, async () => 'ok') + + const event = bus.emit(HandlerOverrideEvent({})) + assert.equal(event.event_handler_concurrency, 'serial') + assert.equal(event.event_handler_completion, 'all') + await event.done() +}) diff --git a/bubus-ts/tests/eventbus_dispatch_parent_tracking.test.ts b/bubus-ts/tests/eventbus_dispatch_parent_tracking.test.ts new file mode 100644 index 0000000..97ec5fc --- /dev/null +++ b/bubus-ts/tests/eventbus_dispatch_parent_tracking.test.ts @@ -0,0 +1,499 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { z } from 'zod' + +import { BaseEvent, EventBus } from '../src/index.js' + +const ParentEvent = BaseEvent.extend('ParentEvent', { + message: z.string().optional(), +}) +const ChildEvent = BaseEvent.extend('ChildEvent', { + data: z.string().optional(), +}) +const GrandchildEvent = BaseEvent.extend('GrandchildEvent', { + value: z.number().optional(), +}) + +const delay = (ms: number): Promise => + new Promise((resolve) => { + setTimeout(resolve, ms) + }) + +test('basic parent tracking: child events get event_parent_id', async () => { + const bus = new EventBus('TestBus') + const child_events: BaseEvent[] = [] + + bus.on(ParentEvent, (event) => { + const child = event.bus?.emit(ChildEvent({ data: `child_of_${event.message ?? 'root'}` })) + if (!child) { + throw new Error('expected scoped bus on parent handler event') + } + child_events.push(child) + return 'parent_handled' + }) + + const parent = bus.emit(ParentEvent({ message: 'test_parent' })) + await bus.waitUntilIdle() + await parent.done() + + assert.equal(child_events.length, 1) + assert.equal(child_events[0].event_parent_id, parent.event_id) +}) + +test('multi-level parent tracking preserves lineage', async () => { + const bus = new EventBus('LineageBus') + + bus.on(ParentEvent, (event) => { + event.bus?.emit(ChildEvent({ data: 'child_data' })) + return 'parent' + }) + + bus.on(ChildEvent, (event) => { + event.bus?.emit(GrandchildEvent({ value: 42 })) + return 'child' + }) + + bus.on(GrandchildEvent, () => 'grandchild') + + const parent = bus.emit(ParentEvent({ message: 'root' })) + await bus.waitUntilIdle() + await parent.done() + + const seen_parent = Array.from(bus.event_history.values()).find((event) => event.event_id === parent.event_id) + const seen_child = Array.from(bus.event_history.values()).find((event) => event.event_type === 'ChildEvent') + const seen_grandchild = Array.from(bus.event_history.values()).find((event) => event.event_type === 'GrandchildEvent') + assert.ok(seen_parent) + assert.ok(seen_child) + assert.ok(seen_grandchild) + assert.equal(seen_parent.event_parent_id, null) + assert.equal(seen_child.event_parent_id, parent.event_id) + assert.equal(seen_grandchild.event_parent_id, seen_child.event_id) +}) + +test('multiple children from same parent keep same event_parent_id', async () => { + const bus = new EventBus('MultiChildBus') + const child_events: BaseEvent[] = [] + + bus.on(ParentEvent, (event) => { + for (let i = 0; i < 3; i += 1) { + const child = event.bus?.emit(ChildEvent({ data: `child_${i}` })) + if (!child) { + throw new Error('expected scoped bus on parent handler event') + } + child_events.push(child) + } + return 'spawned_children' + }) + + const parent = bus.emit(ParentEvent({ message: 'multi_child_parent' })) + await bus.waitUntilIdle() + await parent.done() + + assert.equal(child_events.length, 3) + for (const child of child_events) { + assert.equal(child.event_parent_id, parent.event_id) + } +}) + +test('bus.emit inside handler auto-links parent when not using event.bus', async () => { + const bus = new EventBus('ImplicitParentLinkBus') + const child_events: BaseEvent[] = [] + + bus.on(ParentEvent, (event) => { + const child = ChildEvent({ data: `implicit_for_${event.event_id.slice(-4)}` }) + child_events.push(bus.emit(child)) + return 'parent_done' + }) + bus.on(ChildEvent, () => 'child_done') + + const parent = bus.emit(ParentEvent({ message: 'implicit_parent' })) + await bus.waitUntilIdle() + await parent.done() + + assert.equal(child_events.length, 1) + const child = child_events[0] + assert.equal(child.event_parent_id, parent.event_id) + assert.ok(child.event_emitted_by_handler_id) + assert.ok(parent.event_results.has(child.event_emitted_by_handler_id!)) +}) + +test('cross-bus bus.emit inside handler auto-links parent when exactly one handler is active', async () => { + const bus1 = new EventBus('ImplicitParentLinkBus1') + const bus2 = new EventBus('ImplicitParentLinkBus2') + const emitted_children: BaseEvent[] = [] + + bus1.on(ParentEvent, (event) => { + const child = ChildEvent({ data: `cross_implicit_for_${event.event_id.slice(-4)}` }) + emitted_children.push(bus2.emit(child)) + return 'parent_done' + }) + bus2.on(ChildEvent, () => 'child_done') + + const parent = bus1.emit(ParentEvent({ message: 'cross_implicit_parent' })) + await Promise.all([bus1.waitUntilIdle(), bus2.waitUntilIdle()]) + await parent.done() + + assert.equal(emitted_children.length, 1) + const child = emitted_children[0] + assert.equal(child.event_parent_id, parent.event_id) + assert.ok(child.event_emitted_by_handler_id) + assert.ok(parent.event_results.has(child.event_emitted_by_handler_id!)) +}) + +test('bus.emit outside handler does not guess a parent when multiple handlers are active', async () => { + const bus1 = new EventBus('ImplicitParentAmbiguousBus1') + const bus2 = new EventBus('ImplicitParentAmbiguousBus2') + const bus3 = new EventBus('ImplicitParentAmbiguousBus3') + + let release_a!: () => void + let release_b!: () => void + let mark_started_a!: () => void + let mark_started_b!: () => void + const hold_a = new Promise((resolve) => { + release_a = resolve + }) + const hold_b = new Promise((resolve) => { + release_b = resolve + }) + const started_a = new Promise((resolve) => { + mark_started_a = resolve + }) + const started_b = new Promise((resolve) => { + mark_started_b = resolve + }) + + bus1.on(ParentEvent, async () => { + mark_started_a() + await hold_a + return 'a_done' + }) + bus2.on(ParentEvent, async () => { + mark_started_b() + await hold_b + return 'b_done' + }) + bus3.on(ChildEvent, () => 'child_done') + + const parent_a = bus1.emit(ParentEvent({ message: 'a' })) + const parent_b = bus2.emit(ParentEvent({ message: 'b' })) + await Promise.all([started_a, started_b]) + + const unrelated_child = bus3.emit(ChildEvent({ data: 'outside_handler_emit' })) + + release_a() + release_b() + await Promise.all([parent_a.done(), parent_b.done(), unrelated_child.done()]) + await Promise.all([bus1.waitUntilIdle(), bus2.waitUntilIdle(), bus3.waitUntilIdle()]) + + assert.equal(unrelated_child.event_parent_id, null) + assert.equal(unrelated_child.event_emitted_by_handler_id, null) +}) + +test('parallel parent handlers preserve parent tracking', async () => { + const bus = new EventBus('ParallelParentTrackingBus', { event_handler_concurrency: 'parallel' }) + const child_events_h1: BaseEvent[] = [] + const child_events_h2: BaseEvent[] = [] + + bus.on(ParentEvent, async (event) => { + await delay(10) + const child = event.bus?.emit(ChildEvent({ data: 'from_h1' })) + if (!child) { + throw new Error('expected scoped bus on parent handler event') + } + child_events_h1.push(child) + return 'h1' + }) + + bus.on(ParentEvent, async (event) => { + await delay(20) + const child = event.bus?.emit(ChildEvent({ data: 'from_h2' })) + if (!child) { + throw new Error('expected scoped bus on parent handler event') + } + child_events_h2.push(child) + return 'h2' + }) + + const parent = bus.emit(ParentEvent({ message: 'parallel_test' })) + await bus.waitUntilIdle() + await parent.done() + + assert.equal(child_events_h1.length, 1) + assert.equal(child_events_h2.length, 1) + assert.equal(child_events_h1[0].event_parent_id, parent.event_id) + assert.equal(child_events_h2[0].event_parent_id, parent.event_id) +}) + +test('explicit event_parent_id is not overridden', async () => { + const bus = new EventBus('ExplicitParentBus') + const explicit_parent_id = '018f8e40-1234-7000-8000-000000001234' + + bus.on(ParentEvent, () => { + const child = ChildEvent({ data: 'explicit', event_parent_id: explicit_parent_id }) + bus.emit(child) + return 'dispatched' + }) + + const parent = bus.emit(ParentEvent({ message: 'test' })) + await bus.waitUntilIdle() + await parent.done() + + const captured_child = Array.from(bus.event_history.values()).find((event) => event.event_type === 'ChildEvent') + assert.ok(captured_child) + assert.equal(captured_child.event_parent_id, explicit_parent_id) + assert.notEqual(captured_child.event_parent_id, parent.event_id) +}) + +test('cross-eventbus dispatch preserves parent tracking', async () => { + const bus1 = new EventBus('Bus1') + const bus2 = new EventBus('Bus2') + + bus1.on(ParentEvent, (event) => { + const child = ChildEvent({ data: 'cross_bus_child' }) + event.bus?.emit(child) + bus2.emit(child) + return 'bus1_handled' + }) + + bus2.on(ChildEvent, () => 'bus2_handled') + + const parent = bus1.emit(ParentEvent({ message: 'cross_bus_test' })) + await Promise.all([bus1.waitUntilIdle(), bus2.waitUntilIdle()]) + await parent.done() + + const received_child = Array.from(bus2.event_history.values()).find((event) => event.event_type === 'ChildEvent') + assert.ok(received_child) + assert.equal(received_child.event_parent_id, parent.event_id) +}) + +test('parent tracking works with sync handlers and handler errors', async () => { + const bus = new EventBus('SyncAndErrorParentTrackingBus') + const child_events: BaseEvent[] = [] + + const sync_handler = (event: BaseEvent): string => { + const child = event.bus?.emit(ChildEvent({ data: 'from_sync' })) + if (!child) { + throw new Error('expected scoped bus on parent handler event') + } + child_events.push(child) + return 'sync_handled' + } + + const failing_handler = (event: BaseEvent): never => { + const child = event.bus?.emit(ChildEvent({ data: 'before_error' })) + if (!child) { + throw new Error('expected scoped bus on parent handler event') + } + child_events.push(child) + throw new Error('expected parent-tracking error path') + } + + const success_handler = (event: BaseEvent): string => { + const child = event.bus?.emit(ChildEvent({ data: 'after_error' })) + if (!child) { + throw new Error('expected scoped bus on parent handler event') + } + child_events.push(child) + return 'success' + } + + bus.on(ParentEvent, sync_handler) + bus.on(ParentEvent, failing_handler) + bus.on(ParentEvent, success_handler) + bus.on(ChildEvent, () => 'child_handled') + + const parent = bus.emit(ParentEvent({ message: 'mixed_test' })) + await bus.waitUntilIdle() + await parent.done() + + assert.equal(child_events.length, 3) + for (const child of child_events) { + assert.equal(child.event_parent_id, parent.event_id) + } +}) + +test('erroring parent handlers still preserve child event_parent_id', async () => { + const bus = new EventBus('ErrorOnlyParentTrackingBus') + const child_events: BaseEvent[] = [] + + bus.on(ParentEvent, (event) => { + const child = event.bus?.emit(ChildEvent({ data: 'before_error' })) + if (!child) { + throw new Error('expected scoped bus on parent handler event') + } + child_events.push(child) + throw new Error('expected parent handler failure') + }) + bus.on(ParentEvent, (event) => { + const child = event.bus?.emit(ChildEvent({ data: 'after_error' })) + if (!child) { + throw new Error('expected scoped bus on parent handler event') + } + child_events.push(child) + return 'recovered' + }) + bus.on(ChildEvent, () => 'child_handled') + + const parent = bus.emit(ParentEvent({ message: 'error_only' })) + await bus.waitUntilIdle() + await parent.done() + + assert.equal(child_events.length, 2) + for (const child of child_events) { + assert.equal(child.event_parent_id, parent.event_id) + } +}) + +test('event_children tracks direct and nested descendants', async () => { + const bus = new EventBus('ChildrenTrackingBus') + + bus.on(ParentEvent, (event) => { + event.bus?.emit(ChildEvent({ data: 'level1' })) + return 'parent' + }) + + bus.on(ChildEvent, (event) => { + event.bus?.emit(GrandchildEvent({ value: 42 })) + return 'child' + }) + + bus.on(GrandchildEvent, () => 'grandchild') + + const parent = bus.emit(ParentEvent({ message: 'nested_test' })) + await bus.waitUntilIdle() + await parent.done() + + assert.equal(parent.event_children.length, 1) + const child = parent.event_children[0] + assert.equal(child.event_type, 'ChildEvent') + assert.equal(child.event_children.length, 1) + const grandchild = child.event_children[0] + assert.equal(grandchild.event_type, 'GrandchildEvent') +}) + +test('event_children tracks multiple children from a single handler', async () => { + const bus = new EventBus('EventChildrenSingleHandlerBus') + + bus.on(ParentEvent, (event) => { + event.bus?.emit(ChildEvent({ data: 'child_0' })) + event.bus?.emit(ChildEvent({ data: 'child_1' })) + event.bus?.emit(ChildEvent({ data: 'child_2' })) + return 'parent_done' + }) + bus.on(ChildEvent, (event) => `handled_${event.data ?? 'unknown'}`) + + const parent = bus.emit(ParentEvent({ message: 'children_tracking' })) + await bus.waitUntilIdle() + await parent.done() + + assert.equal(parent.event_children.length, 3) + const child_data = parent.event_children.map((child) => Reflect.get(child, 'data') as string | undefined).sort() + assert.deepEqual(child_data, ['child_0', 'child_1', 'child_2']) + for (const child of parent.event_children) { + assert.equal(child.event_parent_id, parent.event_id) + } +}) + +test('multiple parent handlers contribute to one event_children list', async () => { + const bus = new EventBus('EventChildrenMultiHandlerBus') + + bus.on(ParentEvent, (event) => { + event.bus?.emit(ChildEvent({ data: 'from_handler_1' })) + return 'h1' + }) + bus.on(ParentEvent, (event) => { + event.bus?.emit(ChildEvent({ data: 'from_handler_2a' })) + event.bus?.emit(ChildEvent({ data: 'from_handler_2b' })) + return 'h2' + }) + bus.on(ChildEvent, (event) => `handled_${event.data ?? 'unknown'}`) + + const parent = bus.emit(ParentEvent({ message: 'multi_handler_children' })) + await bus.waitUntilIdle() + await parent.done() + + assert.equal(parent.event_children.length, 3) + const child_data = parent.event_children.map((child) => Reflect.get(child, 'data') as string | undefined).sort() + assert.deepEqual(child_data, ['from_handler_1', 'from_handler_2a', 'from_handler_2b']) +}) + +test('event_children is empty when handlers do not emit children', async () => { + const bus = new EventBus('EventChildrenEmptyBus') + + bus.on(ParentEvent, () => 'no_children') + + const parent = bus.emit(ParentEvent({ message: 'no_children' })) + await bus.waitUntilIdle() + await parent.done() + + assert.equal(parent.event_children.length, 0) +}) + +test('parent completion waits for all children', async () => { + const bus = new EventBus('EventChildrenCompletionBus') + const completion_order: string[] = [] + let child_started_resolve: (() => void) | null = null + const child_started = new Promise((resolve) => { + child_started_resolve = resolve + }) + let release_children!: () => void + const children_released = new Promise((resolve) => { + release_children = () => { + resolve() + } + }) + let child_started_signaled = false + + bus.on(ParentEvent, (event) => { + event.bus?.emit(ChildEvent({ data: 'child_a' })) + event.bus?.emit(ChildEvent({ data: 'child_b' })) + completion_order.push('parent_handler') + return 'parent' + }) + bus.on(ChildEvent, async (event) => { + if (!child_started_signaled) { + child_started_signaled = true + child_started_resolve?.() + } + await children_released + completion_order.push(`child_handler_${event.data ?? 'child'}`) + return `done_${event.data ?? 'child'}` + }) + + const parent = bus.emit(ParentEvent({ message: 'completion' })) + try { + await child_started + assert.ok(parent.event_children.length >= 1) + assert.equal(parent.event_completed_at ?? null, null) + assert.notEqual(parent.event_status, 'completed') + + release_children() + await parent.done() + await bus.waitUntilIdle() + + assert.equal(parent.event_children.length, 2) + assert.equal(parent.event_status, 'completed') + assert.ok(parent.event_completed_at) + assert.ok(completion_order.includes('parent_handler')) + for (const child of parent.event_children) { + assert.equal(child.event_status, 'completed') + } + } finally { + release_children() + } +}) + +test('forwarded events are not counted as parent event_children', async () => { + const bus1 = new EventBus('ForwardBus1') + const bus2 = new EventBus('ForwardBus2') + + bus1.on('*', bus2.emit) + + const parent = bus1.emit(ParentEvent({ message: 'forward_test' })) + await Promise.all([bus1.waitUntilIdle(), bus2.waitUntilIdle()]) + await parent.done() + + assert.equal(parent.event_children.length, 0) + assert.equal(parent._areAllChildrenComplete(), true) +}) diff --git a/bubus-ts/tests/eventbus_error_handling.test.ts b/bubus-ts/tests/eventbus_error_handling.test.ts new file mode 100644 index 0000000..555e6aa --- /dev/null +++ b/bubus-ts/tests/eventbus_error_handling.test.ts @@ -0,0 +1,217 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { BaseEvent, EventBus } from '../src/index.js' + +const TestEvent = BaseEvent.extend('TestEvent', {}) + +const delay = (ms: number): Promise => + new Promise((resolve) => { + setTimeout(resolve, ms) + }) + +test('handler error is captured and does not prevent other handlers from running', async () => { + const bus = new EventBus('ErrorIsolationBus') + const results: string[] = [] + + const failing_handler = (): string => { + throw new Error('Expected to fail - testing error handling') + } + + const working_handler = (): string => { + results.push('success') + return 'worked' + } + + bus.on(TestEvent, failing_handler) + bus.on(TestEvent, working_handler) + + const event = bus.emit(TestEvent({})) + await event.done() + + // Both handlers should have run and produced results + assert.equal(event.event_results.size, 2) + + const failing_result = Array.from(event.event_results.values()).find((r) => r.handler_name === 'failing_handler') + assert.ok(failing_result, 'failing_handler result should exist') + assert.equal(failing_result.status, 'error') + assert.ok(failing_result.error instanceof Error) + assert.ok((failing_result.error as Error).message.includes('Expected to fail'), 'error message should contain the thrown message') + + const working_result = Array.from(event.event_results.values()).find((r) => r.handler_name === 'working_handler') + assert.ok(working_result, 'working_handler result should exist') + assert.equal(working_result.status, 'completed') + assert.equal(working_result.result, 'worked') + + // The working handler actually ran + assert.deepEqual(results, ['success']) +}) + +test('event.event_errors collects handler errors', async () => { + const bus = new EventBus('ErrorCollectionBus') + + const handler_a = (): void => { + throw new Error('error_a') + } + + const handler_b = (): void => { + throw new TypeError('error_b') + } + + const handler_c = (): string => { + return 'ok' + } + + bus.on(TestEvent, handler_a) + bus.on(TestEvent, handler_b) + bus.on(TestEvent, handler_c) + + const event = bus.emit(TestEvent({})) + await event.done() + + // Two errors should be collected + assert.equal(event.event_errors.length, 2) + const error_messages = event.event_errors.map((e) => (e as Error).message) + assert.ok(error_messages.includes('error_a')) + assert.ok(error_messages.includes('error_b')) +}) + +test('handler error does not prevent event completion', async () => { + const bus = new EventBus('ErrorCompletionBus') + + bus.on(TestEvent, () => { + throw new Error('handler failed') + }) + + const event = bus.emit(TestEvent({})) + await event.done() + + // Event should still complete even though handler errored + assert.equal(event.event_status, 'completed') + assert.ok(event.event_completed_at, 'event_completed_at should be set') + assert.equal(event.event_errors.length, 1) +}) + +test('error in one event does not affect subsequent queued events', async () => { + const bus = new EventBus('ErrorQueueBus') + const Event1 = BaseEvent.extend('Event1', {}) + const Event2 = BaseEvent.extend('Event2', {}) + + bus.on(Event1, () => { + throw new Error('event1 handler failed') + }) + + bus.on(Event2, () => { + return 'event2 ok' + }) + + const event_1 = bus.emit(Event1({})) + const event_2 = bus.emit(Event2({})) + + await bus.waitUntilIdle() + + // Event1 completed with error + assert.equal(event_1.event_status, 'completed') + assert.equal(event_1.event_errors.length, 1) + + // Event2 completed successfully and was not affected by Event1's error + assert.equal(event_2.event_status, 'completed') + assert.equal(event_2.event_errors.length, 0) + const result = Array.from(event_2.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.equal(result.result, 'event2 ok') +}) + +test('async handler rejection is captured as error', async () => { + const bus = new EventBus('AsyncErrorBus') + + const async_failing_handler = async (): Promise => { + await delay(1) + throw new Error('async rejection') + } + + bus.on(TestEvent, async_failing_handler) + + const event = bus.emit(TestEvent({})) + await event.done() + + assert.equal(event.event_status, 'completed') + assert.equal(event.event_errors.length, 1) + assert.ok((event.event_errors[0] as Error).message.includes('async rejection')) + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'error') +}) + +test('error in forwarded event handler does not block source bus', async () => { + const bus_a = new EventBus('ErrorForwardA') + const bus_b = new EventBus('ErrorForwardB') + + const ForwardEvent = BaseEvent.extend('ForwardEvent', {}) + + // Forward from A to B + bus_a.on('*', bus_b.emit) + + // Handler on bus_b throws + bus_b.on(ForwardEvent, () => { + throw new Error('bus_b handler failed') + }) + + // Handler on bus_a succeeds + bus_a.on(ForwardEvent, () => { + return 'bus_a ok' + }) + + const event = bus_a.emit(ForwardEvent({})) + await event.done() + + assert.equal(event.event_status, 'completed') + + // bus_a's handler succeeded + const bus_a_result = Array.from(event.event_results.values()).find((r) => r.eventbus_id === bus_a.id && r.handler_name !== 'emit') + assert.ok(bus_a_result) + assert.equal(bus_a_result.status, 'completed') + assert.equal(bus_a_result.result, 'bus_a ok') + + // bus_b's handler errored + const bus_b_result = Array.from(event.event_results.values()).find((r) => r.eventbus_id === bus_b.id && r.handler_name !== 'emit') + assert.ok(bus_b_result) + assert.equal(bus_b_result.status, 'error') + + // Both errors tracked + assert.ok(event.event_errors.length >= 1) +}) + +test('event with no handlers completes without errors', async () => { + const bus = new EventBus('NoHandlerBus') + const OrphanEvent = BaseEvent.extend('OrphanEvent', {}) + + const event = bus.emit(OrphanEvent({})) + await event.done() + + assert.equal(event.event_status, 'completed') + assert.equal(event.event_results.size, 0) + assert.equal(event.event_errors.length, 0) +}) + +test('error handler result fields are populated correctly', async () => { + const bus = new EventBus('ErrorFieldsBus') + + const my_handler = (): void => { + throw new RangeError('out of range') + } + + bus.on(TestEvent, my_handler) + + const event = bus.emit(TestEvent({})) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'error') + assert.equal(result.handler_name, 'my_handler') + assert.equal(result.eventbus_name, 'ErrorFieldsBus') + assert.ok(result.error instanceof RangeError) + assert.equal((result.error as RangeError).message, 'out of range') + assert.ok(result.started_at, 'started_at should be set') + assert.ok(result.completed_at, 'completed_at should be set even on error') +}) diff --git a/bubus-ts/tests/eventbus_find.test.ts b/bubus-ts/tests/eventbus_find.test.ts new file mode 100644 index 0000000..5338e68 --- /dev/null +++ b/bubus-ts/tests/eventbus_find.test.ts @@ -0,0 +1,767 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { z } from 'zod' + +import { BaseEvent, EventBus } from '../src/index.js' + +const ParentEvent = BaseEvent.extend('ParentEvent', {}) +const ChildEvent = BaseEvent.extend('ChildEvent', {}) +const GrandchildEvent = BaseEvent.extend('GrandchildEvent', {}) +const UnrelatedEvent = BaseEvent.extend('UnrelatedEvent', {}) +const ScreenshotEvent = BaseEvent.extend('ScreenshotEvent', { target_id: z.string() }) +const NavigateEvent = BaseEvent.extend('NavigateEvent', { url: z.string() }) +const TabCreatedEvent = BaseEvent.extend('TabCreatedEvent', { tab_id: z.string() }) +const SystemEvent = BaseEvent.extend('SystemEvent', {}) +const UserActionEvent = BaseEvent.extend('UserActionEvent', { + action: z.string(), + user_id: z.string(), +}) +const FIND_TARGET_A = '7d787f06-07fd-7406-8be7-0255fb41f459' +const FIND_TARGET_B = 'a2c7f40b-a8a7-78b2-84ef-9f8c60c40a24' +const FIND_USER_1 = 'b57fcb67-faeb-7a56-8907-116d8cbb1472' +const FIND_USER_2 = '28536f9b-4031-7f53-827f-98c24c1b3839' +const FIND_USER_3 = '50d357df-e68c-7111-8a6c-7018569514b0' +const FIND_USER_4 = 'eab58ec9-90ea-7758-893f-afed99518f43' +const FIND_TARGET_OLD = '9b447756-908c-7b75-8a51-4a2c2b4d9b14' +const FIND_TARGET_NEW = '194870e1-fa02-70a4-8101-d10d57c3449c' +const FIND_TARGET_CHILD = '12f38f3d-d8a7-7ae2-8778-bc27e285ea34' + +const delay = (ms: number): Promise => + new Promise((resolve) => { + setTimeout(resolve, ms) + }) + +test('find past returns most recent dispatched event', async () => { + const bus = new EventBus('FindPastBus') + + const first_event = bus.emit(ParentEvent({})) + await first_event.done() + await delay(20) + const second_event = bus.emit(ParentEvent({})) + await second_event.done() + + const found_event = await bus.find(ParentEvent, { past: true, future: false }) + assert.ok(found_event) + assert.equal(found_event.event_id, second_event.event_id) +}) + +test('find past returns null when no matching event exists', async () => { + const bus = new EventBus('FindPastNoneBus') + + const start = Date.now() + const found_event = await bus.find(ParentEvent, { past: true, future: false }) + const elapsed_ms = Date.now() - start + + assert.equal(found_event, null) + assert.ok(elapsed_ms < 100) +}) + +test('find past history lookup is bus-scoped', async () => { + const bus_a = new EventBus('FindScopeA') + const bus_b = new EventBus('FindScopeB') + + bus_b.on(ParentEvent, () => 'done') + const event_on_b = bus_b.emit(ParentEvent({})) + await event_on_b.done() + + const found_on_a = await bus_a.find(ParentEvent, { past: true, future: false }) + const found_on_b = await bus_b.find(ParentEvent, { past: true, future: false }) + + assert.equal(found_on_a, null) + assert.ok(found_on_b) + assert.equal(found_on_b!.event_id, event_on_b.event_id) +}) + +test('find past result retains origin bus label in event_path', async () => { + const bus = new EventBus('FindOriginBus') + + const dispatched = bus.emit(ParentEvent({})) + await dispatched.done() + + const found_event = await bus.find(ParentEvent, { past: true, future: false }) + assert.ok(found_event) + assert.equal(found_event!.event_path[0], bus.label) +}) + +test('find past window filters by time', async () => { + const bus = new EventBus('FindWindowBus') + + const old_event = bus.emit(ParentEvent({})) + await old_event.done() + await delay(120) + const new_event = bus.emit(ParentEvent({})) + await new_event.done() + + const found_event = await bus.find(ParentEvent, { past: 0.1, future: false }) + assert.ok(found_event) + assert.equal(found_event.event_id, new_event.event_id) +}) + +test('find past returns null when all events are too old', async () => { + const bus = new EventBus('FindTooOldBus') + + const old_event = bus.emit(ParentEvent({})) + await old_event.done() + await delay(120) + + const found_event = await bus.find(ParentEvent, { past: 0.05, future: false }) + assert.equal(found_event, null) +}) + +test('find future waits for event', async () => { + const bus = new EventBus('FindFutureBus') + + const find_promise = bus.find(ParentEvent, { past: false, future: 0.5 }) + + setTimeout(() => { + bus.emit(ParentEvent({})) + }, 50) + + const found_event = await find_promise + assert.ok(found_event) + assert.equal(found_event.event_type, 'ParentEvent') +}) + +test('max_history_size=0 disables past history search but future find still resolves', async () => { + const bus = new EventBus('FindZeroHistoryBus', { max_history_size: 0 }) + bus.on(ParentEvent, () => 'ok') + + const find_future = bus.find(ParentEvent, { past: false, future: 0.5 }) + const dispatched = bus.emit(ParentEvent({})) + + const found_future = await find_future + assert.ok(found_future) + assert.equal(found_future.event_id, dispatched.event_id) + + await dispatched.done() + assert.equal(bus.event_history.has(dispatched.event_id), false) + + const found_past = await bus.find(ParentEvent, { past: true, future: false }) + assert.equal(found_past, null) +}) + +test('find future works with string event keys', async () => { + const bus = new EventBus('FindFutureStringBus') + + const find_promise = bus.find('ParentEvent', { past: false, future: 0.5 }) + + setTimeout(() => { + bus.emit(ParentEvent({})) + }, 30) + + const found_event = await find_promise + assert.ok(found_event) + assert.equal(found_event.event_type, 'ParentEvent') +}) + +test('find class pattern matches generic BaseEvent event_type for future lookups', async () => { + const bus = new EventBus('FindFutureClassPatternBus') + + class DifferentNameFromClass extends BaseEvent {} + + bus.on('DifferentNameFromClass', () => 'done') + + const find_promise = bus.find(DifferentNameFromClass, { past: false, future: 1 }) + + setTimeout(() => { + void bus.emit(new BaseEvent({ event_type: 'DifferentNameFromClass' })) + }, 30) + + const found_event = await find_promise + assert.ok(found_event) + assert.equal(found_event!.event_type, 'DifferentNameFromClass') +}) + +test('find future ignores past events', async () => { + const bus = new EventBus('FindFutureIgnoresPastBus') + + const prior = bus.emit(ParentEvent({})) + await prior.done() + + const found_event = await bus.find(ParentEvent, { past: false, future: 0.05 }) + assert.equal(found_event, null) +}) + +test('find future ignores already-dispatched in-flight events when past=false', async () => { + const bus = new EventBus('FindFutureIgnoresInflightBus') + + bus.on(ParentEvent, async () => { + await delay(80) + }) + + const inflight = bus.emit(ParentEvent({})) + await delay(5) + + const found_event = await bus.find(ParentEvent, { past: false, future: 0.05 }) + assert.equal(found_event, null) + + await inflight.done() +}) + +test('find future times out when no event arrives', async () => { + const bus = new EventBus('FindFutureTimeoutBus') + + const found_event = await bus.find(ParentEvent, { past: false, future: 0.05 }) + assert.equal(found_event, null) +}) + +test('find past=false future=false returns null immediately', async () => { + const bus = new EventBus('FindNeitherBus') + + const start = Date.now() + const found_event = await bus.find(ParentEvent, { past: false, future: false }) + const elapsed_ms = Date.now() - start + + assert.equal(found_event, null) + assert.ok(elapsed_ms < 100) +}) + +test('find defaults to past=true future=false when both are undefined', async () => { + const bus = new EventBus('FindDefaultWindowBus') + + const start = Date.now() + const missing = await bus.find(ParentEvent) + const elapsed_ms = Date.now() - start + assert.equal(missing, null) + assert.ok(elapsed_ms < 100) + + const dispatched = bus.emit(ParentEvent({})) + const found = await bus.find(ParentEvent) + assert.ok(found) + assert.equal(found.event_id, dispatched.event_id) +}) + +test('find past+future returns past event immediately', async () => { + const bus = new EventBus('FindPastFutureBus') + + const dispatched = bus.emit(ParentEvent({})) + await dispatched.done() + + const start = Date.now() + const found_event = await bus.find(ParentEvent, { past: true, future: 0.5 }) + const elapsed_ms = Date.now() - start + + assert.ok(found_event) + assert.equal(found_event.event_id, dispatched.event_id) + assert.ok(elapsed_ms < 100) +}) + +test('find past+future waits for future when no past match', async () => { + const bus = new EventBus('FindPastFutureWaitBus') + + const find_promise = bus.find(ChildEvent, { past: true, future: 0.3 }) + + setTimeout(() => { + bus.emit(ChildEvent({})) + }, 50) + + const found_event = await find_promise + assert.ok(found_event) + assert.equal(found_event.event_type, 'ChildEvent') +}) + +test('find past/future windows are independent', async () => { + const bus = new EventBus('FindWindowIndependentBus') + + const old_event = bus.emit(ParentEvent({})) + await old_event.done() + await delay(120) + + const start = Date.now() + const found_event = await bus.find(ParentEvent, { past: 0.05, future: 0.05 }) + const elapsed_ms = Date.now() - start + + assert.equal(found_event, null) + assert.ok(elapsed_ms > 30) +}) + +test('find past true future float returns old event immediately', async () => { + const bus = new EventBus('FindPastTrueFutureFloatBus') + + const dispatched = bus.emit(ParentEvent({})) + await dispatched.done() + await delay(120) + + const found_event = await bus.find(ParentEvent, { past: true, future: 0.1 }) + assert.ok(found_event) + assert.equal(found_event.event_id, dispatched.event_id) +}) + +test('find past float future waits for new event', async () => { + const bus = new EventBus('FindPastFloatFutureWaitBus') + + const old_event = bus.emit(ParentEvent({})) + await old_event.done() + await delay(120) + + const find_promise = bus.find(ParentEvent, { past: 0.05, future: 0.2 }) + + setTimeout(() => { + bus.emit(ParentEvent({})) + }, 50) + + const found_event = await find_promise + assert.ok(found_event) + assert.notEqual(found_event.event_id, old_event.event_id) +}) + +test('find past true future true returns past event immediately', async () => { + const bus = new EventBus('FindPastTrueFutureTrueBus') + + const dispatched = bus.emit(ParentEvent({})) + await dispatched.done() + + const start = Date.now() + const found_event = await bus.find(ParentEvent, { past: true, future: true }) + const elapsed_ms = Date.now() - start + + assert.ok(found_event) + assert.equal(found_event.event_id, dispatched.event_id) + assert.ok(elapsed_ms < 100) +}) + +test('find respects where filter', async () => { + const bus = new EventBus('FindWhereBus') + + const event_a = bus.emit(ScreenshotEvent({ target_id: FIND_TARGET_A })) + const event_b = bus.emit(ScreenshotEvent({ target_id: FIND_TARGET_B })) + await event_a.done() + await event_b.done() + + const found_event = await bus.find(ScreenshotEvent, (event) => event.target_id === FIND_TARGET_B, { past: true, future: false }) + + assert.ok(found_event) + assert.equal(found_event.event_id, event_b.event_id) +}) + +test('find supports metadata filters like event_status', async () => { + const bus = new EventBus('FindEventStatusFilterBus') + const release_pause = bus.locks._requestRunloopPause() + + const pending_event = bus.emit(ParentEvent({})) + + const found_pending = await bus.find(ParentEvent, { past: true, future: false, event_status: 'pending' }) + assert.ok(found_pending) + assert.equal(found_pending.event_id, pending_event.event_id) + + release_pause() + await pending_event.done() + + const found_completed = await bus.find(ParentEvent, { past: true, future: false, event_status: 'completed' }) + assert.ok(found_completed) + assert.equal(found_completed.event_id, pending_event.event_id) +}) + +test('find supports metadata equality filters like event_id and event_timeout', async () => { + const bus = new EventBus('FindEventFieldFilterBus') + + const event_a = bus.emit(ParentEvent({ event_timeout: 11 })) + const event_b = bus.emit(ParentEvent({ event_timeout: 22 })) + await event_a.done() + await event_b.done() + + const found_a = await bus.find(ParentEvent, { + past: true, + future: false, + event_id: event_a.event_id, + event_timeout: 11, + }) + assert.ok(found_a) + assert.equal(found_a.event_id, event_a.event_id) + + const mismatch = await bus.find(ParentEvent, { + past: true, + future: false, + event_id: event_a.event_id, + event_timeout: 22, + }) + assert.equal(mismatch, null) +}) + +test('find supports non-event data field equality filters', async () => { + const bus = new EventBus('FindDataFieldFilterBus') + + const event_a = bus.emit(UserActionEvent({ action: 'logout', user_id: FIND_USER_2 })) + const event_b = bus.emit(UserActionEvent({ action: 'login', user_id: FIND_USER_1 })) + await event_a.done() + await event_b.done() + + const found = await bus.find(UserActionEvent, { + past: true, + future: false, + action: 'login', + user_id: FIND_USER_1, + }) + assert.ok(found) + assert.equal(found.event_id, event_b.event_id) + + const mismatch = await bus.find(UserActionEvent, { + past: true, + future: false, + action: 'signup', + }) + assert.equal(mismatch, null) +}) + +test('find where filter works with future waiting', async () => { + const bus = new EventBus('FindWhereFutureBus') + + const find_promise = bus.find(UserActionEvent, (event) => event.user_id === FIND_USER_3, { past: false, future: 0.3 }) + + setTimeout(() => { + bus.emit(UserActionEvent({ action: 'logout', user_id: FIND_USER_4 })) + bus.emit(UserActionEvent({ action: 'login', user_id: FIND_USER_3 })) + }, 50) + + const found_event = await find_promise + assert.ok(found_event) + assert.equal(found_event.user_id, FIND_USER_3) +}) + +test('find wildcard "*" with where filter matches across event types in history', async () => { + const bus = new EventBus('FindWildcardPastBus') + + const user_event = bus.emit(UserActionEvent({ action: 'login', user_id: FIND_USER_1 })) + const system_event = bus.emit(SystemEvent({})) + await user_event.done() + await system_event.done() + + const found_event = await bus.find( + '*', + (event) => event.event_type === 'UserActionEvent' && (event as InstanceType).user_id === FIND_USER_1, + { past: true, future: false } + ) + + assert.ok(found_event) + assert.equal(found_event.event_id, user_event.event_id) + assert.equal(found_event.event_type, 'UserActionEvent') +}) + +test('find wildcard "*" with where filter works for future waiting', async () => { + const bus = new EventBus('FindWildcardFutureBus') + + const find_promise = bus.find( + '*', + (event) => event.event_type === 'UserActionEvent' && (event as InstanceType).action === 'special', + { past: false, future: 0.3 } + ) + + setTimeout(() => { + bus.emit(SystemEvent({})) + bus.emit(UserActionEvent({ action: 'normal', user_id: '16ced2b3-de40-7d9b-85c8-c02241a00354' })) + bus.emit(UserActionEvent({ action: 'special', user_id: '391ce6ed-aa72-73d6-87c4-5e20f3c6fc63' })) + }, 40) + + const found_event = await find_promise + assert.ok(found_event) + assert.equal(found_event.event_type, 'UserActionEvent') + assert.equal((found_event as InstanceType).action, 'special') +}) + +test('find with multiple concurrent waiters resolves correct events', async () => { + const bus = new EventBus('FindConcurrentBus') + + const find_normal = bus.find(UserActionEvent, (event) => event.action === 'normal', { past: false, future: 0.5 }) + const find_special = bus.find(UserActionEvent, (event) => event.action === 'special', { past: false, future: 0.5 }) + const find_system = bus.find('SystemEvent', { past: false, future: 0.5 }) + + setTimeout(() => { + bus.emit(UserActionEvent({ action: 'normal', user_id: 'e692b6cb-ae63-773b-8557-3218f7ce5ced' })) + bus.emit(SystemEvent({})) + bus.emit(UserActionEvent({ action: 'special', user_id: '2a312e4d-3035-7883-86b9-578ce47046b2' })) + }, 50) + + const [normal, system, special] = await Promise.all([find_normal, find_system, find_special]) + + assert.ok(normal) + assert.equal(normal.action, 'normal') + assert.ok(system) + assert.equal(system.event_type, 'SystemEvent') + assert.ok(special) + assert.equal(special.action, 'special') +}) + +test('find child_of returns child event', async () => { + const bus = new EventBus('FindChildBus') + + bus.on(ParentEvent, (event) => { + event.bus?.emit(ChildEvent({})) + }) + + const parent_event = bus.emit(ParentEvent({})) + await bus.waitUntilIdle() + + const child_event = await bus.find(ChildEvent, { + past: true, + future: false, + child_of: parent_event, + }) + + assert.ok(child_event) + assert.equal(child_event.event_parent_id, parent_event.event_id) +}) + +test('find child_of returns null for non-child', async () => { + const bus = new EventBus('FindNonChildBus') + + const parent_event = bus.emit(ParentEvent({})) + const unrelated_event = bus.emit(UnrelatedEvent({})) + await parent_event.done() + await unrelated_event.done() + + const found_event = await bus.find(UnrelatedEvent, { + past: true, + future: false, + child_of: parent_event, + }) + + assert.equal(found_event, null) +}) + +test('find child_of returns grandchild event', async () => { + const bus = new EventBus('FindGrandchildBus') + + let child_event_id: string | null = null + bus.on(ParentEvent, async (event) => { + const child = await event.bus?.emit(ChildEvent({})).done() + child_event_id = child?.event_id ?? null + }) + bus.on(ChildEvent, async (event) => { + await event.bus?.emit(GrandchildEvent({})).done() + }) + + const parent_event = bus.emit(ParentEvent({})) + await parent_event.done() + await bus.waitUntilIdle() + + const grandchild_event = await bus.find(GrandchildEvent, { + past: true, + future: false, + child_of: parent_event, + }) + + assert.ok(grandchild_event) + assert.equal(grandchild_event.event_parent_id, child_event_id) +}) + +test('find child_of works across forwarded buses', async () => { + const main_bus = new EventBus('MainBus') + const auth_bus = new EventBus('AuthBus') + + let child_event_id: string | null = null + + main_bus.on(ParentEvent, auth_bus.emit) + auth_bus.on(ParentEvent, async (event) => { + const event_bus = event.bus + assert.ok(event_bus) + const child_event = event_bus.emit(ChildEvent({})) + const child = await child_event.done() + assert.ok(child) + child_event_id = child.event_id + }) + + const parent_event = main_bus.emit(ParentEvent({})) + await parent_event.done() + await main_bus.waitUntilIdle() + await auth_bus.waitUntilIdle() + + const found_child = await auth_bus.find(ChildEvent, { + past: 5, + future: 5, + child_of: parent_event, + }) + + assert.ok(found_child) + assert.equal(found_child.event_id, child_event_id) +}) + +test('find child_of filters to correct parent among siblings', async () => { + const bus = new EventBus('FindCorrectParentBus') + + bus.on(NavigateEvent, async (event) => { + await event.bus?.emit(TabCreatedEvent({ tab_id: `tab_for_${event.url}` })).done() + }) + bus.on(TabCreatedEvent, () => {}) + + const nav_1 = bus.emit(NavigateEvent({ url: 'site1' })) + const nav_2 = bus.emit(NavigateEvent({ url: 'site2' })) + await nav_1.done() + await nav_2.done() + + const tab_1 = await bus.find(TabCreatedEvent, { + child_of: nav_1, + past: true, + future: false, + }) + const tab_2 = await bus.find(TabCreatedEvent, { + child_of: nav_2, + past: true, + future: false, + }) + + assert.ok(tab_1) + assert.ok(tab_2) + assert.equal(tab_1.tab_id, 'tab_for_site1') + assert.equal(tab_2.tab_id, 'tab_for_site2') +}) + +test('find future with child_of waits for matching child', async () => { + const bus = new EventBus('FindFutureChildBus') + + bus.on(ParentEvent, async (event) => { + await delay(30) + await event.bus?.emit(ChildEvent({})).done() + }) + + const parent_event = bus.emit(ParentEvent({})) + + const find_promise = bus.find(ChildEvent, { + child_of: parent_event, + past: false, + future: 0.3, + }) + + const child_event = await find_promise + assert.ok(child_event) + assert.equal(child_event.event_parent_id, parent_event.event_id) +}) + +test('find with past float and where filter', async () => { + const bus = new EventBus('FindWherePastFloatBus') + + const old_event = bus.emit(ScreenshotEvent({ target_id: FIND_TARGET_OLD })) + await old_event.done() + await delay(120) + const new_event = bus.emit(ScreenshotEvent({ target_id: FIND_TARGET_NEW })) + await new_event.done() + + const found_tab2 = await bus.find(ScreenshotEvent, (event) => event.target_id === FIND_TARGET_NEW, { past: 0.1, future: false }) + + assert.ok(found_tab2) + assert.equal(found_tab2.event_id, new_event.event_id) + + const found_tab1 = await bus.find(ScreenshotEvent, (event) => event.target_id === FIND_TARGET_OLD, { past: 0.1, future: false }) + assert.equal(found_tab1, null) +}) + +test('find with child_of and past float', async () => { + const bus = new EventBus('FindChildPastFloatBus') + + let child_event_id: string | null = null + bus.on(ParentEvent, async (event) => { + const child = await event.bus?.emit(ChildEvent({})).done() + child_event_id = child?.event_id ?? null + }) + + const parent_event = bus.emit(ParentEvent({})) + await parent_event.done() + await bus.waitUntilIdle() + + const found_child = await bus.find(ChildEvent, { + child_of: parent_event, + past: 5, + future: false, + }) + + assert.ok(found_child) + assert.equal(found_child.event_id, child_event_id) +}) + +test('find with all parameters combined', async () => { + const bus = new EventBus('FindAllParamsBus') + + let child_event_id: string | null = null + bus.on(ParentEvent, async (event) => { + const child = await event.bus?.emit(ScreenshotEvent({ target_id: FIND_TARGET_CHILD })).done() + child_event_id = child?.event_id ?? null + }) + + const parent_event = bus.emit(ParentEvent({})) + await parent_event.done() + await bus.waitUntilIdle() + + const found_child = await bus.find(ScreenshotEvent, (event) => event.target_id === FIND_TARGET_CHILD, { + child_of: parent_event, + past: 5, + future: false, + }) + + assert.ok(found_child) + assert.equal(found_child.event_id, child_event_id) +}) + +test('find past includes in-progress dispatched events', async () => { + const bus = new EventBus('FindDispatchedPastBus') + + bus.on(ParentEvent, async () => { + await delay(80) + }) + + const dispatched = bus.emit(ParentEvent({})) + await delay(10) + + const found = await bus.find(ParentEvent, { past: true, future: false }) + assert.ok(found) + assert.equal(found.event_id, dispatched.event_id) + assert.notEqual(found.event_status, 'completed') + + await dispatched.done() +}) + +test('find future resolves on dispatch before completion', async () => { + const bus = new EventBus('FindOnDispatchBus') + const release_pause = bus.locks._requestRunloopPause() + + bus.on(ParentEvent, async () => { + await delay(80) + }) + + const find_promise = bus.find(ParentEvent, { past: false, future: 0.5 }) + + setTimeout(() => { + bus.emit(ParentEvent({})) + }, 20) + + const found_event = await find_promise + assert.ok(found_event) + assert.equal(found_event.event_status, 'pending') + + release_pause() + await found_event.done() + assert.equal(found_event.event_status, 'completed') +}) + +test('find catches child event that fired during parent handler', async () => { + const bus = new EventBus('FindRaceConditionBus') + + let tab_event_id: string | null = null + bus.on(NavigateEvent, async (event) => { + const tab_event = await event.bus?.emit(TabCreatedEvent({ tab_id: '06bee4cf-9f51-7e5d-82d3-65f35169329c' })).done() + tab_event_id = tab_event?.event_id ?? null + }) + bus.on(TabCreatedEvent, () => {}) + + const nav_event = bus.emit(NavigateEvent({ url: 'https://example.com' })) + await nav_event.done() + + const found_tab = await bus.find(TabCreatedEvent, { + child_of: nav_event, + past: true, + future: false, + }) + + assert.ok(found_tab) + assert.equal(found_tab.event_id, tab_event_id) +}) + +test('find returns promise that can be awaited later', async () => { + const bus = new EventBus('FindPromiseBus') + + const find_promise = bus.find(ParentEvent, { past: false, future: 0.5 }) + assert.ok(find_promise instanceof Promise) + + bus.emit(ParentEvent({})) + const found_event = await find_promise + assert.ok(found_event) +}) diff --git a/bubus-ts/tests/eventbus_forwarding.test.ts b/bubus-ts/tests/eventbus_forwarding.test.ts new file mode 100644 index 0000000..6187de6 --- /dev/null +++ b/bubus-ts/tests/eventbus_forwarding.test.ts @@ -0,0 +1,329 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { z } from 'zod' + +import { BaseEvent, EventBus } from '../src/index.js' + +const PingEvent = BaseEvent.extend('PingEvent', { value: z.number() }) +const ProxyDispatchRootEvent = BaseEvent.extend('ProxyDispatchRootEvent', {}) +const ProxyDispatchChildEvent = BaseEvent.extend('ProxyDispatchChildEvent', {}) +const ForwardedFirstDefaultsEvent = BaseEvent.extend('ForwardedFirstDefaultsEvent', { event_result_type: z.string() }) + +test('events forward between buses without duplication', async () => { + const bus_a = new EventBus('BusA') + const bus_b = new EventBus('BusB') + const bus_c = new EventBus('BusC') + + const seen_a: string[] = [] + const seen_b: string[] = [] + const seen_c: string[] = [] + + bus_a.on(PingEvent, (event) => { + seen_a.push(event.event_id) + }) + + bus_b.on(PingEvent, (event) => { + seen_b.push(event.event_id) + }) + + bus_c.on(PingEvent, (event) => { + seen_c.push(event.event_id) + }) + + bus_a.on('*', bus_b.emit) + bus_b.on('*', bus_c.emit) + + const event = bus_a.emit(PingEvent({ value: 1 })) + + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + await bus_c.waitUntilIdle() + + assert.equal(seen_a.length, 1) + assert.equal(seen_b.length, 1) + assert.equal(seen_c.length, 1) + + assert.equal(seen_a[0], event.event_id) + assert.equal(seen_b[0], event.event_id) + assert.equal(seen_c[0], event.event_id) + + assert.deepEqual(event.event_path, [bus_a.label, bus_b.label, bus_c.label]) +}) + +test('forwarding disambiguates buses that share the same name', async () => { + const bus_a = new EventBus('SharedName') + const bus_b = new EventBus('SharedName') + + const seen_a: string[] = [] + const seen_b: string[] = [] + + bus_a.on(PingEvent, (event) => { + seen_a.push(event.event_id) + }) + + bus_b.on(PingEvent, (event) => { + seen_b.push(event.event_id) + }) + + bus_a.on('*', bus_b.emit) + + const event = bus_a.emit(PingEvent({ value: 99 })) + + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + + assert.equal(seen_a.length, 1) + assert.equal(seen_b.length, 1) + assert.equal(seen_a[0], event.event_id) + assert.equal(seen_b[0], event.event_id) + assert.deepEqual(event.event_path, [bus_a.label, bus_b.label]) +}) + +test('await event.done waits for handlers on forwarded buses', async () => { + const bus_a = new EventBus('BusA') + const bus_b = new EventBus('BusB') + const bus_c = new EventBus('BusC') + + const completion_log: string[] = [] + + const delay = (ms: number): Promise => + new Promise((resolve) => { + setTimeout(resolve, ms) + }) + + bus_a.on(PingEvent, async () => { + await delay(10) + completion_log.push('A') + }) + + bus_b.on(PingEvent, async () => { + await delay(30) + completion_log.push('B') + }) + + bus_c.on(PingEvent, async () => { + await delay(50) + completion_log.push('C') + }) + + bus_a.on('*', bus_b.emit) + bus_b.on('*', bus_c.emit) + + const event = bus_a.emit(PingEvent({ value: 2 })) + + await event.done() + + assert.deepEqual(completion_log.sort(), ['A', 'B', 'C']) + assert.equal(event.event_pending_bus_count, 0) +}) + +test('circular forwarding A->B->C->A does not loop', async () => { + const peer1 = new EventBus('Peer1') + const peer2 = new EventBus('Peer2') + const peer3 = new EventBus('Peer3') + + const events_at_peer1: string[] = [] + const events_at_peer2: string[] = [] + const events_at_peer3: string[] = [] + + peer1.on(PingEvent, (event) => { + events_at_peer1.push(event.event_id) + }) + peer2.on(PingEvent, (event) => { + events_at_peer2.push(event.event_id) + }) + peer3.on(PingEvent, (event) => { + events_at_peer3.push(event.event_id) + }) + + // Create a full cycle: Peer1 -> Peer2 -> Peer3 -> Peer1 + peer1.on('*', peer2.emit) + peer2.on('*', peer3.emit) + peer3.on('*', peer1.emit) // completes the circle + + const event = peer1.emit(PingEvent({ value: 42 })) + + await peer1.waitUntilIdle() + await peer2.waitUntilIdle() + await peer3.waitUntilIdle() + + // Each peer must see the event exactly once (no infinite loop) + assert.equal(events_at_peer1.length, 1) + assert.equal(events_at_peer2.length, 1) + assert.equal(events_at_peer3.length, 1) + + // All saw the same event + assert.equal(events_at_peer1[0], event.event_id) + assert.equal(events_at_peer2[0], event.event_id) + assert.equal(events_at_peer3[0], event.event_id) + + // event_path shows propagation order without looping back + assert.deepEqual(event.event_path, [peer1.label, peer2.label, peer3.label]) + + // --- Start from a different peer in the same cycle --- + events_at_peer1.length = 0 + events_at_peer2.length = 0 + events_at_peer3.length = 0 + + const event2 = peer2.emit(PingEvent({ value: 99 })) + + await peer1.waitUntilIdle() + await peer2.waitUntilIdle() + await peer3.waitUntilIdle() + + // Each peer sees it exactly once + assert.equal(events_at_peer1.length, 1) + assert.equal(events_at_peer2.length, 1) + assert.equal(events_at_peer3.length, 1) + + // Path starts at Peer2, goes to Peer3, then Peer1 (stops before looping back to Peer2) + assert.deepEqual(event2.event_path, [peer2.label, peer3.label, peer1.label]) +}) + +test('await event.done waits when forwarding handler is async-delayed', async () => { + const bus_a = new EventBus('BusA') + const bus_b = new EventBus('BusB') + + const delay = (ms: number): Promise => + new Promise((resolve) => { + setTimeout(resolve, ms) + }) + + let bus_a_done = false + let bus_b_done = false + + bus_a.on(PingEvent, async () => { + await delay(20) + bus_a_done = true + }) + + bus_b.on(PingEvent, async () => { + await delay(10) + bus_b_done = true + }) + + bus_a.on('*', async (event) => { + await delay(30) + bus_b.emit(event) + }) + + const event = bus_a.emit(PingEvent({ value: 3 })) + await event.done() + + assert.equal(bus_a_done, true) + assert.equal(bus_b_done, true) + assert.equal(event.event_pending_bus_count, 0) + assert.deepEqual(event.event_path, [bus_a.label, bus_b.label]) +}) + +test('forwarded first-mode uses processing-bus handler defaults', async () => { + const bus_a = new EventBus('ForwardedFirstDefaultsA', { + event_handler_concurrency: 'serial', + event_handler_completion: 'all', + }) + const bus_b = new EventBus('ForwardedFirstDefaultsB', { + event_handler_concurrency: 'parallel', + event_handler_completion: 'first', + }) + const log: string[] = [] + + const slow_handler = async (_event: InstanceType): Promise => { + log.push('slow_start') + await delay(20) + log.push('slow_end') + return 'slow' + } + + const fast_handler = async (_event: InstanceType): Promise => { + log.push('fast_start') + await delay(1) + log.push('fast_end') + return 'fast' + } + + bus_a.on('*', bus_b.emit) + bus_b.on(ForwardedFirstDefaultsEvent, slow_handler) + bus_b.on(ForwardedFirstDefaultsEvent, fast_handler) + + const result = await bus_a.emit(ForwardedFirstDefaultsEvent({ event_timeout: null })).first() + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]) + + assert.equal(result, 'fast', `expected first-mode on processing bus to pick fast handler, got ${String(result)} log=${log}`) + assert.equal(log.includes('slow_start'), true, `slow handler should start under parallel first-mode, log=${log}`) + assert.equal(log.includes('fast_start'), true, `fast handler should start under parallel first-mode, log=${log}`) +}) + +test('proxy dispatch auto-links child events like emit', async () => { + const bus = new EventBus('ProxyDispatchAutoLinkBus') + + bus.on(ProxyDispatchRootEvent, (event) => { + event.bus?.emit(ProxyDispatchChildEvent({})) + return 'root' + }) + bus.on(ProxyDispatchChildEvent, () => 'child') + + const root = bus.emit(ProxyDispatchRootEvent({})) + await Promise.all([bus.waitUntilIdle(), root.done()]) + + const child = root.event_children[0] + assert.ok(child) + assert.equal(child.event_parent_id, root.event_id) + assert.equal(root.event_children.length, 1) + assert.equal(root.event_children[0]?.event_id, child.event_id) +}) + +test('proxy dispatch of same event does not self-parent or self-link child', async () => { + const bus = new EventBus('ProxyDispatchSameEventBus') + + bus.on(ProxyDispatchRootEvent, (event) => { + event.bus?.emit(event) + return 'root' + }) + + const root = bus.emit(ProxyDispatchRootEvent({})) + await Promise.all([bus.waitUntilIdle(), root.done()]) + + assert.equal(root.event_parent_id, null) + assert.equal(root.event_children.length, 0) +}) + +// Consolidated from tests/fifo.test.ts + +const OrderEvent = BaseEvent.extend('OrderEvent', { order: z.number() }) + +const delay = (ms: number): Promise => + new Promise((resolve) => { + setTimeout(resolve, ms) + }) + +test('events are processed in FIFO order', async () => { + const bus = new EventBus('FifoBus') + + const processed_orders: number[] = [] + const handler_start_times: number[] = [] + + bus.on(OrderEvent, async (event) => { + handler_start_times.push(Date.now()) + if (event.order % 2 === 0) { + await delay(30) + } else { + await delay(5) + } + processed_orders.push(event.order) + }) + + for (let i = 0; i < 10; i += 1) { + bus.emit(OrderEvent({ order: i })) + } + + await bus.waitUntilIdle() + + assert.deepEqual( + processed_orders, + Array.from({ length: 10 }, (_, i) => i) + ) + for (let i = 1; i < handler_start_times.length; i += 1) { + assert.ok(handler_start_times[i] >= handler_start_times[i - 1]) + } +}) diff --git a/bubus-ts/tests/eventbus_locking.test.ts b/bubus-ts/tests/eventbus_locking.test.ts new file mode 100644 index 0000000..aef7469 --- /dev/null +++ b/bubus-ts/tests/eventbus_locking.test.ts @@ -0,0 +1,1095 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { z } from 'zod' + +import { BaseEvent, EventBus, retry } from '../src/index.js' + +/* +Potential failure modes + +A) Event concurrency modes +- global-serial not enforcing strict FIFO across multiple buses (events interleave). +- bus-serial allows cross-bus interleaving but still must be FIFO within a bus; breaks under forwarding. +- parallel accidentally serializes (e.g., lock still used) or breaks queue-jump semantics. +- null not resolving correctly to bus defaults. + +B) Handler concurrency modes +- serial not enforcing strict handler order per event. +- parallel accidentally serializes or fails to enforce per-handler ordering. +- null not resolving correctly to bus defaults. + +C) Precedence resolution +- Event overrides not taking precedence over bus defaults. +- Conflicting settings (event says parallel, bus says serial) choose wrong winner. + +D) Queue-jump / awaited events +- event.done() inside handler doesn’t jump the queue across buses. +- Queue-jump bypasses locks incorrectly in contexts where it shouldn’t. +- Queue-jump fails when event already in-flight. + +E) FIFO correctness +- FIFO order broken under bus-serial with interleaved emissions. +- FIFO order broken under global-serial across buses. +- FIFO order broken with forwarded events. + +F) Forwarding & bus context +- Forwarded event’s event.bus mutates current handler context (wrong bus). +- Child events emitted after forwarding are mis-parented. +- event.event_path diverges between buses. +- Handler attribution lost when forwarded across buses (tree/log issues). + +G) Parent/child tracking +- Child events not correctly linked to the parent handler when emitted via event.bus. +- event_children missing under concurrency due to async timing. +- event_pending_bus_count not decremented properly, leaving events stuck. + +H) Find semantics under concurrency +- find(past) returns event not yet completed. +- find(future) doesn’t resolve when event finishes in another bus. +- find with child_of returns mismatched events under concurrency. + +I) Timeouts + cancellation propagation +- Timeout doesn’t cancel pending child handlers. +- Cancelled results not marked or mis-attributed to the wrong handler. +- Timeout doesn’t propagate across forwarded buses (event still waits forever). + +J) Handler result validation +- event_result_type not enforced under parallel handler completion. +- Invalid result doesn’t mark handler error or event failure. +- Timeout + schema error ordering wrong (e.g., schema error overwrites timeout). + +K) Idle / completion +- waitUntilIdle() returns early with in-flight events. +- event.done() resolves before children complete. +- event.done() never resolves due to deadlock in runloop. + +L) Reentrancy / nested awaits +- Nested awaited child events starve sibling handlers. +- Awaited child events skip lock incorrectly (deadlocks or ordering regressions). + +M) Edge-cases +- Multiple handlers for same event type with different options collide. +- Handler throws synchronously before await (still counted, no leaks). +- Handler returns a rejected promise (properly surfaced). +- Event emitted with event_concurrency/event_handler_concurrency invalid value (schema rejects). +- Event emitted with no bus set (done should reject). +*/ + +const sleep = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms)) +const withResolvers = () => { + let resolve!: (value: T | PromiseLike) => void + let reject!: (reason?: unknown) => void + const promise = new Promise((resolve_fn, reject_fn) => { + resolve = resolve_fn + reject = reject_fn + }) + return { promise, resolve, reject } +} + +test('global-serial: only one event processes at a time across buses', async () => { + const SerialEvent = BaseEvent.extend('SerialEvent', { + order: z.number(), + source: z.string(), + }) + + const bus_a = new EventBus('GlobalSerialA', { event_concurrency: 'global-serial' }) + const bus_b = new EventBus('GlobalSerialB', { event_concurrency: 'global-serial' }) + + let in_flight = 0 + let max_in_flight = 0 + const starts: string[] = [] + + const handler = async (event: InstanceType) => { + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + starts.push(`${event.source}:${event.order}`) + await sleep(10) + in_flight -= 1 + } + + bus_a.on(SerialEvent, handler) + bus_b.on(SerialEvent, handler) + + for (let i = 0; i < 3; i += 1) { + bus_a.emit(SerialEvent({ order: i, source: 'a' })) + bus_b.emit(SerialEvent({ order: i, source: 'b' })) + } + + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + + assert.equal(max_in_flight, 1) + + const starts_a = starts.filter((value) => value.startsWith('a:')).map((value) => Number(value.split(':')[1])) + const starts_b = starts.filter((value) => value.startsWith('b:')).map((value) => Number(value.split(':')[1])) + + assert.deepEqual(starts_a, [0, 1, 2]) + assert.deepEqual(starts_b, [0, 1, 2]) +}) + +test('global-serial: awaited child jumps ahead of queued events across buses', async () => { + const ParentEvent = BaseEvent.extend('ParentEvent', {}) + const ChildEvent = BaseEvent.extend('ChildEvent', {}) + const QueuedEvent = BaseEvent.extend('QueuedEvent', {}) + + const bus_a = new EventBus('GlobalSerialParent', { event_concurrency: 'global-serial' }) + const bus_b = new EventBus('GlobalSerialChild', { event_concurrency: 'global-serial' }) + + const order: string[] = [] + + bus_b.on(ChildEvent, async () => { + order.push('child_start') + await sleep(5) + order.push('child_end') + }) + + bus_b.on(QueuedEvent, async () => { + order.push('queued_start') + await sleep(1) + order.push('queued_end') + }) + + bus_a.on(ParentEvent, async (event) => { + order.push('parent_start') + bus_b.emit(QueuedEvent({})) + // Emit through the scoped proxy so parent tracking is set up, + // then also dispatch to bus_b for cross-bus processing. + const child = event.bus?.emit(ChildEvent({}))! + bus_b.emit(child) + order.push('child_dispatched') + await child.done() + order.push('child_awaited') + order.push('parent_end') + }) + + const parent = bus_a.emit(ParentEvent({})) + await parent.done() + await bus_b.waitUntilIdle() + + const child_start_idx = order.indexOf('child_start') + const child_end_idx = order.indexOf('child_end') + const queued_start_idx = order.indexOf('queued_start') + + assert.ok(child_start_idx !== -1) + assert.ok(child_end_idx !== -1) + assert.ok(queued_start_idx !== -1) + assert.ok(child_start_idx < queued_start_idx) + assert.ok(child_end_idx < queued_start_idx) +}) + +test('global handler lock via retry serializes handlers across buses', async () => { + const HandlerEvent = BaseEvent.extend('HandlerEvent', { + order: z.number(), + source: z.string(), + }) + + const bus_a = new EventBus('GlobalHandlerA', { + event_concurrency: 'parallel', + event_handler_concurrency: 'serial', + }) + const bus_b = new EventBus('GlobalHandlerB', { + event_concurrency: 'parallel', + event_handler_concurrency: 'serial', + }) + + let in_flight = 0 + let max_in_flight = 0 + + const handler = retry({ semaphore_scope: 'global', semaphore_name: 'handler_lock_global', semaphore_limit: 1 })(async () => { + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await sleep(5) + in_flight -= 1 + }) + + bus_a.on(HandlerEvent, handler) + bus_b.on(HandlerEvent, handler) + + for (let i = 0; i < 4; i += 1) { + bus_a.emit(HandlerEvent({ order: i, source: 'a' })) + bus_b.emit(HandlerEvent({ order: i, source: 'b' })) + } + + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + + assert.equal(max_in_flight, 1) +}) + +test('bus-serial: events serialize per bus but overlap across buses', async () => { + const SerialEvent = BaseEvent.extend('SerialPerBusEvent', { + order: z.number(), + source: z.string(), + }) + + const bus_a = new EventBus('BusSerialA', { event_concurrency: 'bus-serial' }) + const bus_b = new EventBus('BusSerialB', { event_concurrency: 'bus-serial' }) + + let in_flight_global = 0 + let max_in_flight_global = 0 + let in_flight_a = 0 + let in_flight_b = 0 + let max_in_flight_a = 0 + let max_in_flight_b = 0 + + let resolve_b_started: (() => void) | null = null + const b_started = new Promise((resolve) => { + resolve_b_started = resolve + }) + + bus_a.on(SerialEvent, async () => { + in_flight_global += 1 + in_flight_a += 1 + max_in_flight_global = Math.max(max_in_flight_global, in_flight_global) + max_in_flight_a = Math.max(max_in_flight_a, in_flight_a) + await b_started + await sleep(10) + in_flight_global -= 1 + in_flight_a -= 1 + }) + + bus_b.on(SerialEvent, async () => { + in_flight_global += 1 + in_flight_b += 1 + max_in_flight_global = Math.max(max_in_flight_global, in_flight_global) + max_in_flight_b = Math.max(max_in_flight_b, in_flight_b) + if (resolve_b_started) { + resolve_b_started() + resolve_b_started = null + } + await sleep(10) + in_flight_global -= 1 + in_flight_b -= 1 + }) + + bus_a.emit(SerialEvent({ order: 0, source: 'a' })) + bus_b.emit(SerialEvent({ order: 0, source: 'b' })) + + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + + assert.equal(max_in_flight_a, 1) + assert.equal(max_in_flight_b, 1) + assert.ok(max_in_flight_global >= 2) +}) + +test('bus-serial: FIFO order preserved per bus with interleaving', async () => { + const SerialEvent = BaseEvent.extend('SerialInterleavedEvent', { + order: z.number(), + source: z.string(), + }) + + const bus_a = new EventBus('BusSerialOrderA', { event_concurrency: 'bus-serial' }) + const bus_b = new EventBus('BusSerialOrderB', { event_concurrency: 'bus-serial' }) + + const starts_a: number[] = [] + const starts_b: number[] = [] + + bus_a.on(SerialEvent, async (event) => { + starts_a.push(event.order) + await sleep(2) + }) + + bus_b.on(SerialEvent, async (event) => { + starts_b.push(event.order) + await sleep(2) + }) + + for (let i = 0; i < 4; i += 1) { + bus_a.emit(SerialEvent({ order: i, source: 'a' })) + bus_b.emit(SerialEvent({ order: i, source: 'b' })) + } + + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + + assert.deepEqual(starts_a, [0, 1, 2, 3]) + assert.deepEqual(starts_b, [0, 1, 2, 3]) +}) + +test('bus-serial: awaiting child on one bus does not block other bus queue', async () => { + const ParentEvent = BaseEvent.extend('BusSerialParent', {}) + const ChildEvent = BaseEvent.extend('BusSerialChild', {}) + const OtherEvent = BaseEvent.extend('BusSerialOther', {}) + + const bus_a = new EventBus('BusSerialParentBus', { event_concurrency: 'bus-serial' }) + const bus_b = new EventBus('BusSerialOtherBus', { event_concurrency: 'bus-serial' }) + + const order: string[] = [] + + bus_a.on(ChildEvent, async () => { + order.push('child_start') + await sleep(10) + order.push('child_end') + }) + + bus_a.on(ParentEvent, async (event) => { + order.push('parent_start') + const child = event.bus?.emit(ChildEvent({}))! + await child.done() + order.push('parent_end') + }) + + bus_b.on(OtherEvent, async () => { + order.push('other_start') + await sleep(2) + order.push('other_end') + }) + + const parent = bus_a.emit(ParentEvent({})) + await sleep(0) + bus_b.emit(OtherEvent({})) + + await parent.done() + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + + const other_start_idx = order.indexOf('other_start') + const parent_end_idx = order.indexOf('parent_end') + assert.ok(other_start_idx !== -1) + assert.ok(parent_end_idx !== -1) + assert.ok(other_start_idx < parent_end_idx) +}) + +test('parallel: events overlap on same bus when event_concurrency is parallel', async () => { + const ParallelEvent = BaseEvent.extend('ParallelEvent', { order: z.number() }) + const bus = new EventBus('ParallelEventBus', { + event_concurrency: 'parallel', + event_handler_concurrency: 'parallel', + }) + + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() + setTimeout(() => resolve(), 20) + + bus.on(ParallelEvent, async (_event) => { + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + await sleep(10) + in_flight -= 1 + }) + + bus.emit(ParallelEvent({ order: 0 })) + bus.emit(ParallelEvent({ order: 1 })) + + await bus.waitUntilIdle() + assert.ok(max_in_flight >= 2) +}) + +test('parallel: handlers overlap for same event when event_handler_concurrency is parallel', async () => { + const ParallelHandlerEvent = BaseEvent.extend('ParallelHandlerEvent', {}) + const bus = new EventBus('ParallelHandlerBus', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'parallel', + }) + + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() + + const handler_a = async () => { + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + } + + const handler_b = async () => { + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + } + + bus.on(ParallelHandlerEvent, handler_a) + bus.on(ParallelHandlerEvent, handler_b) + + const event = bus.emit(ParallelHandlerEvent({})) + await sleep(0) + resolve() + await event.done() + await bus.waitUntilIdle() + + assert.ok(max_in_flight >= 2) +}) + +test('parallel: global handler lock via retry still serializes across buses', async () => { + const ParallelEvent = BaseEvent.extend('ParallelEventGlobalHandler', { + source: z.string(), + }) + + const bus_a = new EventBus('ParallelHandlerGlobalA', { + event_concurrency: 'parallel', + event_handler_concurrency: 'serial', + }) + const bus_b = new EventBus('ParallelHandlerGlobalB', { + event_concurrency: 'parallel', + event_handler_concurrency: 'serial', + }) + + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() + + const handler = retry({ + semaphore_scope: 'global', + semaphore_name: (event: BaseEvent) => `handler_lock_${event.event_type}`, + semaphore_limit: 1, + })(async (_event: BaseEvent) => { + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + }) + + bus_a.on(ParallelEvent, handler) + bus_b.on(ParallelEvent, handler) + + bus_a.emit(ParallelEvent({ source: 'a' })) + bus_b.emit(ParallelEvent({ source: 'b' })) + + await sleep(0) + resolve() + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + + assert.equal(max_in_flight, 1) +}) + +test('retry: instance scope serializes selected handlers per event in parallel mode', async () => { + const SerializedEvent = BaseEvent.extend('RetryInstanceSerializedHandlers', {}) + const bus = new EventBus('RetryInstanceSerializedBus', { + event_concurrency: 'parallel', + event_handler_concurrency: 'parallel', + }) + + const log: string[] = [] + + class HandlerSuite { + @retry({ semaphore_scope: 'instance', semaphore_limit: 1, semaphore_name: (event: BaseEvent) => `serial-${event.event_id}` }) + async step1(event: BaseEvent) { + log.push(`step1_start_${event.event_id}`) + await sleep(10) + log.push(`step1_end_${event.event_id}`) + } + + @retry({ semaphore_scope: 'instance', semaphore_limit: 1, semaphore_name: (event: BaseEvent) => `serial-${event.event_id}` }) + async step2(event: BaseEvent) { + log.push(`step2_start_${event.event_id}`) + await sleep(5) + log.push(`step2_end_${event.event_id}`) + } + + async parallel(_event: BaseEvent) { + log.push('parallel') + } + } + + const handlers = new HandlerSuite() + + bus.on(SerializedEvent, handlers.step1.bind(handlers)) + bus.on(SerializedEvent, handlers.step2.bind(handlers)) + bus.on(SerializedEvent, handlers.parallel.bind(handlers)) + + const event = bus.emit(SerializedEvent({})) + await event.done() + await bus.waitUntilIdle() + + const step1_end = log.findIndex((entry) => entry.startsWith('step1_end_')) + const step2_start = log.findIndex((entry) => entry.startsWith('step2_start_')) + assert.ok(step1_end !== -1 && step2_start !== -1, 'serialized handlers should have run') + assert.ok(step1_end < step2_start, `instance scope: step2 should start after step1 ends. Got: [${log.join(', ')}]`) +}) + +test('precedence: event event_concurrency overrides bus defaults to parallel', async () => { + const OverrideEvent = BaseEvent.extend('OverrideEventParallelEvents', { + event_concurrency: z.literal('parallel'), + order: z.number(), + }) + const bus = new EventBus('OverrideParallelEventsBus', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'parallel', + }) + + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() + + bus.on(OverrideEvent, async () => { + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + }) + + bus.emit(OverrideEvent({ order: 0, event_concurrency: 'parallel' })) + bus.emit(OverrideEvent({ order: 1, event_concurrency: 'parallel' })) + + await sleep(0) + resolve() + await bus.waitUntilIdle() + + assert.ok(max_in_flight >= 2) +}) + +test('precedence: event event_concurrency overrides bus defaults to bus-serial', async () => { + const OverrideEvent = BaseEvent.extend('OverrideEventBusSerial', { + event_concurrency: z.literal('bus-serial'), + order: z.number(), + }) + const bus = new EventBus('OverrideBusSerialEventsBus', { + event_concurrency: 'parallel', + event_handler_concurrency: 'parallel', + }) + + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() + + bus.on(OverrideEvent, async () => { + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + }) + + bus.emit(OverrideEvent({ order: 0, event_concurrency: 'bus-serial' })) + bus.emit(OverrideEvent({ order: 1, event_concurrency: 'bus-serial' })) + + await sleep(0) + assert.equal(max_in_flight, 1) + resolve() + await bus.waitUntilIdle() +}) + +test('global-serial + handler parallel: handlers overlap but events do not across buses', async () => { + const SerialParallelEvent = BaseEvent.extend('GlobalSerialParallelHandlers', {}) + + const bus_a = new EventBus('GlobalSerialParallelA', { + event_concurrency: 'global-serial', + event_handler_concurrency: 'parallel', + }) + const bus_b = new EventBus('GlobalSerialParallelB', { + event_concurrency: 'global-serial', + event_handler_concurrency: 'parallel', + }) + + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() + + const handler = async () => { + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + } + + bus_a.on(SerialParallelEvent, handler) + bus_a.on(SerialParallelEvent, handler) + bus_b.on(SerialParallelEvent, handler) + bus_b.on(SerialParallelEvent, handler) + + bus_a.emit(SerialParallelEvent({})) + bus_b.emit(SerialParallelEvent({})) + + await sleep(0) + assert.equal(max_in_flight, 2) + resolve() + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]) +}) + +test('event parallel + handler serial: handlers serialize within each event', async () => { + const ParallelEvent = BaseEvent.extend('ParallelEventsSerialHandlers', { order: z.number() }) + const bus = new EventBus('ParallelEventsSerialHandlersBus', { + event_concurrency: 'parallel', + event_handler_concurrency: 'serial', + }) + + let global_in_flight = 0 + let global_max = 0 + const per_event_in_flight = new Map() + const per_event_max = new Map() + const { promise, resolve } = withResolvers() + const { promise: started_promise, resolve: resolve_started } = withResolvers() + let started_handlers = 0 + const started_timeout = setTimeout(resolve_started, 50) + + const handler = async (event: BaseEvent) => { + global_in_flight += 1 + global_max = Math.max(global_max, global_in_flight) + const event_count = (per_event_in_flight.get(event.event_id) ?? 0) + 1 + per_event_in_flight.set(event.event_id, event_count) + per_event_max.set(event.event_id, Math.max(per_event_max.get(event.event_id) ?? 0, event_count)) + started_handlers += 1 + if (started_handlers === 2) { + clearTimeout(started_timeout) + resolve_started() + } + await promise + global_in_flight -= 1 + per_event_in_flight.set(event.event_id, Math.max(0, (per_event_in_flight.get(event.event_id) ?? 1) - 1)) + } + + bus.on(ParallelEvent, handler) + bus.on(ParallelEvent, handler) + + const event_a = bus.emit(ParallelEvent({ order: 0 })) + const event_b = bus.emit(ParallelEvent({ order: 1 })) + + await started_promise + assert.equal(per_event_max.get(event_a.event_id), 1) + assert.equal(per_event_max.get(event_b.event_id), 1) + assert.ok(global_max >= 2) + resolve() + await bus.waitUntilIdle() +}) + +test('event parallel + handler serial: handlers overlap across buses', async () => { + const ParallelEvent = BaseEvent.extend('ParallelEventsBusHandlers', { source: z.string() }) + + const bus_a = new EventBus('ParallelBusHandlersA', { + event_concurrency: 'parallel', + event_handler_concurrency: 'serial', + }) + const bus_b = new EventBus('ParallelBusHandlersB', { + event_concurrency: 'parallel', + event_handler_concurrency: 'serial', + }) + + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() + + const handler = async () => { + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + } + + bus_a.on(ParallelEvent, handler) + bus_b.on(ParallelEvent, handler) + + bus_a.emit(ParallelEvent({ source: 'a' })) + bus_b.emit(ParallelEvent({ source: 'b' })) + + await sleep(0) + assert.ok(max_in_flight >= 2) + resolve() + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]) +}) + +test('retry can enforce global lock even when bus defaults to parallel', async () => { + const HandlerEvent = BaseEvent.extend('HandlerOptionsGlobalSerial', { source: z.string() }) + + const bus_a = new EventBus('HandlerOptionsGlobalA', { + event_concurrency: 'parallel', + event_handler_concurrency: 'parallel', + }) + const bus_b = new EventBus('HandlerOptionsGlobalB', { + event_concurrency: 'parallel', + event_handler_concurrency: 'parallel', + }) + + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() + + const handler = retry({ semaphore_scope: 'global', semaphore_name: 'handler_lock_options', semaphore_limit: 1 })(async () => { + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + }) + + bus_a.on(HandlerEvent, handler) + bus_b.on(HandlerEvent, handler) + + bus_a.emit(HandlerEvent({ source: 'a' })) + bus_b.emit(HandlerEvent({ source: 'b' })) + + await sleep(0) + assert.equal(max_in_flight, 1) + resolve() + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]) +}) + +test('null: event_concurrency null resolves to bus defaults', async () => { + const AutoEvent = BaseEvent.extend('AutoEvent', { + event_concurrency: z.null(), + }) + const bus = new EventBus('AutoBus', { event_concurrency: 'bus-serial' }) + + let in_flight = 0 + let max_in_flight = 0 + + bus.on(AutoEvent, async () => { + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await sleep(5) + in_flight -= 1 + }) + + bus.emit(AutoEvent({ event_concurrency: null })) + bus.emit(AutoEvent({ event_concurrency: null })) + + await bus.waitUntilIdle() + assert.equal(max_in_flight, 1) +}) + +test('null: event_handler_concurrency null resolves to bus defaults', async () => { + const AutoHandlerEvent = BaseEvent.extend('AutoHandlerEvent', { + event_handler_concurrency: z.null(), + }) + const bus = new EventBus('AutoHandlerBus', { event_handler_concurrency: 'serial' }) + + let in_flight = 0 + let max_in_flight = 0 + const { promise, resolve } = withResolvers() + + const handler = async () => { + in_flight += 1 + max_in_flight = Math.max(max_in_flight, in_flight) + await promise + in_flight -= 1 + } + + bus.on(AutoHandlerEvent, handler) + bus.on(AutoHandlerEvent, handler) + + const event = bus.emit(AutoHandlerEvent({ event_handler_concurrency: null })) + await sleep(0) + resolve() + await event.done() + await bus.waitUntilIdle() + + assert.equal(max_in_flight, 1) +}) + +test('queue-jump: awaited child preempts queued sibling on same bus', async () => { + const ParentEvent = BaseEvent.extend('QueueJumpParent', {}) + const ChildEvent = BaseEvent.extend('QueueJumpChild', {}) + const SiblingEvent = BaseEvent.extend('QueueJumpSibling', {}) + + const bus = new EventBus('QueueJumpBus', { event_concurrency: 'bus-serial' }) + const order: string[] = [] + + bus.on(ChildEvent, async () => { + order.push('child_start') + await sleep(5) + order.push('child_end') + }) + + bus.on(SiblingEvent, async () => { + order.push('sibling_start') + await sleep(1) + order.push('sibling_end') + }) + + bus.on(ParentEvent, async (event) => { + order.push('parent_start') + bus.emit(SiblingEvent({})) + const child = event.bus?.emit(ChildEvent({}))! + order.push('child_dispatched') + await child.done() + order.push('child_awaited') + order.push('parent_end') + }) + + const parent = bus.emit(ParentEvent({})) + await parent.done() + await bus.waitUntilIdle() + + const child_start_idx = order.indexOf('child_start') + const child_end_idx = order.indexOf('child_end') + const sibling_start_idx = order.indexOf('sibling_start') + + assert.ok(child_start_idx !== -1) + assert.ok(child_end_idx !== -1) + assert.ok(sibling_start_idx !== -1) + assert.ok(child_start_idx < sibling_start_idx) + assert.ok(child_end_idx < sibling_start_idx) +}) + +test('queue-jump: same event handlers on separate buses stay isolated without forwarding', async () => { + const ParentEvent = BaseEvent.extend('QueueJumpIsolatedParent', {}) + const SharedEvent = BaseEvent.extend('QueueJumpIsolatedShared', {}) + const SiblingEvent = BaseEvent.extend('QueueJumpIsolatedSibling', {}) + + const bus_a = new EventBus('QueueJumpIsolatedA', { event_concurrency: 'bus-serial' }) + const bus_b = new EventBus('QueueJumpIsolatedB', { event_concurrency: 'bus-serial' }) + + const order: string[] = [] + let bus_a_shared_runs = 0 + let bus_b_shared_runs = 0 + + bus_a.on(SharedEvent, async () => { + bus_a_shared_runs += 1 + order.push('bus_a_shared_start') + await sleep(2) + order.push('bus_a_shared_end') + }) + + bus_b.on(SharedEvent, async () => { + bus_b_shared_runs += 1 + order.push('bus_b_shared_start') + await sleep(2) + order.push('bus_b_shared_end') + }) + + bus_a.on(SiblingEvent, async () => { + order.push('bus_a_sibling_start') + await sleep(1) + order.push('bus_a_sibling_end') + }) + + bus_a.on(ParentEvent, async (event) => { + order.push('parent_start') + bus_a.emit(SiblingEvent({})) + const shared = event.bus?.emit(SharedEvent({}))! + order.push('shared_dispatched') + await shared.done() + order.push('shared_awaited') + order.push('parent_end') + }) + + const parent = bus_a.emit(ParentEvent({})) + await parent.done() + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]) + + assert.equal(bus_a_shared_runs, 1) + assert.equal(bus_b_shared_runs, 0) + assert.equal(order.includes('bus_b_shared_start'), false) + + const bus_a_shared_end_idx = order.indexOf('bus_a_shared_end') + const bus_a_sibling_start_idx = order.indexOf('bus_a_sibling_start') + assert.ok(bus_a_shared_end_idx !== -1) + assert.ok(bus_a_sibling_start_idx !== -1) + assert.ok(bus_a_shared_end_idx < bus_a_sibling_start_idx) +}) + +test('queue-jump: awaiting in-flight event does not double-run handlers', async () => { + const InFlightEvent = BaseEvent.extend('InFlightEvent', {}) + const bus = new EventBus('InFlightBus', { + event_concurrency: 'parallel', + event_handler_concurrency: 'parallel', + }) + + let handler_runs = 0 + let resolve_started: (() => void) | null = null + const started = new Promise((resolve) => { + resolve_started = resolve + }) + const { promise: release_child, resolve: resolve_child } = withResolvers() + + bus.on(InFlightEvent, async () => { + handler_runs += 1 + if (resolve_started) { + resolve_started() + resolve_started = null + } + await release_child + }) + + const child = bus.emit(InFlightEvent({})) + await started + + let done_resolved = false + const done_promise = child.done().then(() => { + done_resolved = true + }) + + await sleep(0) + assert.equal(done_resolved, false) + + resolve_child() + await done_promise + await bus.waitUntilIdle() + + assert.equal(handler_runs, 1) +}) + +test('edge-case: event with no handlers completes immediately', async () => { + const NoHandlerEvent = BaseEvent.extend('NoHandlerEvent', {}) + const bus = new EventBus('NoHandlerBus') + + const event = bus.emit(NoHandlerEvent({})) + await event.done() + await bus.waitUntilIdle() + + assert.equal(event.event_status, 'completed') + assert.equal(event.event_pending_bus_count, 0) +}) + +test('fifo: forwarded events preserve order on target bus (bus-serial)', async () => { + const OrderedEvent = BaseEvent.extend('ForwardOrderEvent', { order: z.number() }) + + const bus_a = new EventBus('ForwardOrderA', { event_concurrency: 'bus-serial' }) + const bus_b = new EventBus('ForwardOrderB', { event_concurrency: 'bus-serial' }) + + const order_a: number[] = [] + const order_b: number[] = [] + + bus_a.on(OrderedEvent, async (event) => { + order_a.push(event.order) + bus_b.emit(event) + await sleep(2) + }) + + bus_b.on(OrderedEvent, async (event) => { + const bus_b_results = Array.from(event.event_results.values()).filter((result) => result.eventbus_id === bus_b.id) + const in_flight = bus_b_results.filter((result) => result.status === 'pending' || result.status === 'started') + assert.ok(in_flight.length <= 1) + order_b.push(event.order) + await sleep(1) + }) + + for (let i = 0; i < 5; i += 1) { + bus_a.emit(OrderedEvent({ order: i })) + } + + await Promise.all([bus_a.waitUntilIdle(), bus_b.waitUntilIdle()]) + + const history_orders = Array.from(bus_b.event_history.values()).map((event) => (event as { order?: number }).order) + const results_sizes = Array.from(bus_b.event_history.values()).map((event) => event.event_results.size) + const bus_b_result_counts = Array.from(bus_b.event_history.values()).map( + (event) => Array.from(event.event_results.values()).filter((result) => result.eventbus_id === bus_b.id).length + ) + const processed_flags = Array.from(bus_b.event_history.values()).map((event) => + Array.from(event.event_results.values()) + .filter((result) => result.eventbus_id === bus_b.id) + .every((result) => result.status === 'completed' || result.status === 'error') + ) + const pending_counts = Array.from(bus_b.event_history.values()).map( + (event) => Array.from(event.event_results.values()).filter((result) => result.status === 'pending').length + ) + assert.deepEqual(order_a, [0, 1, 2, 3, 4]) + assert.deepEqual(order_b, [0, 1, 2, 3, 4]) + assert.deepEqual(history_orders, [0, 1, 2, 3, 4]) + assert.deepEqual(results_sizes, [2, 2, 2, 2, 2]) + assert.deepEqual(bus_b_result_counts, [1, 1, 1, 1, 1]) + assert.deepEqual(processed_flags, [true, true, true, true, true]) + assert.deepEqual(pending_counts, [0, 0, 0, 0, 0]) +}) + +test('fifo: forwarded events preserve order across chained buses (bus-serial)', async () => { + const OrderedEvent = BaseEvent.extend('ForwardChainEvent', { order: z.number() }) + + const bus_a = new EventBus('ForwardChainA', { event_concurrency: 'bus-serial' }) + const bus_b = new EventBus('ForwardChainB', { event_concurrency: 'bus-serial' }) + const bus_c = new EventBus('ForwardChainC', { event_concurrency: 'bus-serial' }) + + const order_c: number[] = [] + + bus_b.on(OrderedEvent, async () => { + await sleep(2) + }) + + bus_c.on(OrderedEvent, async (event) => { + order_c.push(event.order) + await sleep(1) + }) + + bus_a.on('*', bus_b.emit) + bus_b.on('*', bus_c.emit) + + for (let i = 0; i < 6; i += 1) { + bus_a.emit(OrderedEvent({ order: i })) + } + + await bus_a.waitUntilIdle() + await bus_b.waitUntilIdle() + await bus_c.waitUntilIdle() + + assert.deepEqual(order_c, [0, 1, 2, 3, 4, 5]) +}) + +test('find: past returns most recent completed event (bus-scoped)', async () => { + const DebounceEvent = BaseEvent.extend('FindPastEvent', { value: z.number() }) + const bus = new EventBus('FindPastBus') + + bus.on(DebounceEvent, async () => {}) + + bus.emit(DebounceEvent({ value: 1 })) + bus.emit(DebounceEvent({ value: 2 })) + + await bus.waitUntilIdle() + + const found = await bus.find(DebounceEvent, { past: true, future: false }) + assert.ok(found) + assert.equal(found.value, 2) + assert.equal(found.event_status, 'completed') + assert.ok(found.bus) + assert.equal(found.bus.name, 'FindPastBus') + assert.equal(typeof found.bus.emit, 'function') +}) + +test('find: past returns in-flight dispatched event and done waits', async () => { + const DebounceEvent = BaseEvent.extend('FindFutureEvent', { value: z.number() }) + const bus = new EventBus('FindFutureBus') + const { promise, resolve } = withResolvers() + + bus.on(DebounceEvent, async () => { + await promise + }) + + bus.emit(DebounceEvent({ value: 1 })) + + const found = await bus.find(DebounceEvent, { past: true, future: false }) + assert.ok(found) + assert.equal(found.value, 1) + assert.ok(found.event_status !== 'completed') + assert.ok(found.bus) + assert.equal(found.bus.name, 'FindFutureBus') + + resolve() + const completed = await found.done() + assert.equal(completed.event_status, 'completed') +}) + +test('find: future waits for next event when none in-flight', async () => { + const DebounceEvent = BaseEvent.extend('FindWaitEvent', { value: z.number() }) + const bus = new EventBus('FindWaitBus') + + bus.on(DebounceEvent, async () => {}) + + setTimeout(() => { + bus.emit(DebounceEvent({ value: 99 })) + }, 10) + + const found = await bus.find(DebounceEvent, { past: false, future: 0.2 }) + assert.ok(found) + assert.equal(found.value, 99) + assert.ok(found.bus) + assert.equal(found.bus.name, 'FindWaitBus') + await found.done() +}) + +test('find: most recent wins across completed and in-flight', async () => { + const DebounceEvent = BaseEvent.extend('FindMostRecentEvent', { value: z.number() }) + const bus = new EventBus('FindMostRecentBus') + const { promise, resolve } = withResolvers() + + bus.on(DebounceEvent, async (event) => { + if (event.value === 2) { + await promise + } + }) + + bus.emit(DebounceEvent({ value: 1 })) + await bus.waitUntilIdle() + + bus.emit(DebounceEvent({ value: 2 })) + + const found = await bus.find(DebounceEvent, { past: true, future: true }) + assert.ok(found) + assert.equal(found.value, 2) + assert.ok(found.event_status !== 'completed') + + resolve() + await found.done() +}) diff --git a/bubus-ts/tests/eventbus_log_tree.test.ts b/bubus-ts/tests/eventbus_log_tree.test.ts new file mode 100644 index 0000000..fcdadb7 --- /dev/null +++ b/bubus-ts/tests/eventbus_log_tree.test.ts @@ -0,0 +1,178 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { z } from 'zod' + +import { BaseEvent, EventBus } from '../src/index.js' + +const RootEvent = BaseEvent.extend('RootEvent', { data: z.string().optional() }) +const ChildEvent = BaseEvent.extend('ChildEvent', { value: z.number().optional() }) +const GrandchildEvent = BaseEvent.extend('GrandchildEvent', { nested: z.record(z.string(), z.number()).optional() }) +const delay = (ms: number): Promise => new Promise((resolve) => setTimeout(resolve, ms)) + +class ValueError extends Error { + constructor(message: string) { + super(message) + this.name = 'ValueError' + } +} + +const waitForStartedResult = async (event: BaseEvent, timeout_ms = 1000): Promise => { + const started_by = Date.now() + timeout_ms + while (Date.now() < started_by) { + if (Array.from(event.event_results.values()).some((result) => result.status === 'started')) { + return + } + await delay(5) + } + throw new Error(`Timed out waiting for started handler result on ${event.event_type}#${event.event_id.slice(-4)}`) +} + +test('logTree: single event', async () => { + const bus = new EventBus('SingleBus') + try { + const event = bus.emit(RootEvent({ data: 'test' })) + await event.done() + const output = bus.logTree() + assert.ok(output.includes('└── βœ… RootEvent#')) + assert.ok(output.includes('[') && output.includes(']')) + } finally { + bus.destroy() + } +}) + +test('logTree: with handler results', async () => { + const bus = new EventBus('HandlerBus') + try { + async function test_handler(_event: InstanceType): Promise { + return 'status: success' + } + + bus.on(RootEvent, test_handler) + const event = bus.emit(RootEvent({ data: 'test' })) + await event.done() + const output = bus.logTree() + assert.ok(output.includes('└── βœ… RootEvent#')) + assert.ok(output.includes(`${bus.label}.test_handler#`)) + assert.ok(output.includes('"status: success"')) + } finally { + bus.destroy() + } +}) + +test('logTree: with handler errors', async () => { + const bus = new EventBus('ErrorBus') + try { + async function error_handler(_event: InstanceType): Promise { + throw new ValueError('Test error message') + } + + bus.on(RootEvent, error_handler) + const event = bus.emit(RootEvent({ data: 'test' })) + await event.done() + const output = bus.logTree() + assert.ok(output.includes(`${bus.label}.error_handler#`)) + assert.ok(output.includes('ValueError: Test error message')) + } finally { + bus.destroy() + } +}) + +test('logTree: complex nested', async () => { + const bus = new EventBus('ComplexBus') + try { + async function root_handler(_event: InstanceType): Promise { + const child = bus.emit(ChildEvent({ value: 100 })) + await child.done() + return 'Root processed' + } + + async function child_handler(_event: InstanceType): Promise { + const grandchild = bus.emit(GrandchildEvent({})) + await grandchild.done() + return [1, 2, 3] + } + + async function grandchild_handler(_event: InstanceType): Promise { + return null + } + + bus.on(RootEvent, root_handler) + bus.on(ChildEvent, child_handler) + bus.on(GrandchildEvent, grandchild_handler) + + const root = bus.emit(RootEvent({ data: 'root_data' })) + await root.done() + const output = bus.logTree() + assert.ok(output.includes('βœ… RootEvent#')) + assert.ok(output.includes(`βœ… ${bus.label}.root_handler#`)) + assert.ok(output.includes('βœ… ChildEvent#')) + assert.ok(output.includes(`βœ… ${bus.label}.child_handler#`)) + assert.ok(output.includes('βœ… GrandchildEvent#')) + assert.ok(output.includes(`βœ… ${bus.label}.grandchild_handler#`)) + assert.ok(output.includes('"Root processed"')) + assert.ok(output.includes('list(3 items)')) + assert.ok(output.includes('None')) + } finally { + bus.destroy() + } +}) + +test('logTree: multiple roots', async () => { + const bus = new EventBus('MultiBus') + try { + const root1 = bus.emit(RootEvent({ data: 'first' })) + const root2 = bus.emit(RootEvent({ data: 'second' })) + await Promise.all([root1.done(), root2.done()]) + const output = bus.logTree() + assert.equal(output.split('β”œβ”€β”€ βœ… RootEvent#').length - 1, 1) + assert.equal(output.split('└── βœ… RootEvent#').length - 1, 1) + } finally { + bus.destroy() + } +}) + +test('logTree: timing info', async () => { + const bus = new EventBus('TimingBus') + try { + async function timed_handler(_event: InstanceType): Promise { + await delay(5) + return 'done' + } + + bus.on(RootEvent, timed_handler) + const event = bus.emit(RootEvent({})) + await event.done() + const output = bus.logTree() + assert.ok(output.includes('(')) + assert.ok(output.includes('s)')) + } finally { + bus.destroy() + } +}) + +test('logTree: running handler', async () => { + const bus = new EventBus('RunningBus') + let release_handler!: () => void + const block_handler = new Promise((resolve) => { + release_handler = resolve + }) + try { + async function running_handler(_event: InstanceType): Promise { + await block_handler + return 'done' + } + + bus.on(RootEvent, running_handler) + const event = bus.emit(RootEvent({})) + await waitForStartedResult(event) + const output = bus.logTree() + assert.ok(output.includes(`${bus.label}.running_handler#`)) + assert.ok(output.includes('πŸƒ RootEvent#')) + release_handler() + await event.done() + } finally { + release_handler() + bus.destroy() + } +}) diff --git a/bubus-ts/tests/eventbus_on_off.test.ts b/bubus-ts/tests/eventbus_on_off.test.ts new file mode 100644 index 0000000..7439565 --- /dev/null +++ b/bubus-ts/tests/eventbus_on_off.test.ts @@ -0,0 +1,140 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { z } from 'zod' + +import { BaseEvent, EventBus } from '../src/index.js' + +test('on stores EventHandler entry and indexes it by event key', async () => { + const bus = new EventBus('RegistryBus') + const RegistryEvent = BaseEvent.extend('RegistryEvent', {}) + const handler = (event: BaseEvent): string => event.event_type + + const entry = bus.on('RegistryEvent', handler) + + assert.ok(entry.id) + assert.equal(bus.handlers.has(entry.id), true) + assert.equal(bus.handlers.get(entry.id), entry) + assert.equal((bus.handlers_by_key.get('RegistryEvent') ?? []).includes(entry.id), true) + + const dispatched = bus.emit(RegistryEvent({})) + await dispatched.done() + + const result = dispatched.event_results.get(entry.id) + assert.ok(result) + assert.equal(result!.handler.id, entry.id) +}) + +const RegistryTypingEvent = BaseEvent.extend('RegistryTypingEvent', { + required_token: z.string(), + event_result_type: z.string(), +}) +type RegistryTypingEventInstance = InstanceType + +const typed_sync_handler = (event: RegistryTypingEventInstance): string => event.required_token +const typed_async_handler = async (event: RegistryTypingEventInstance): Promise => event.required_token + +test('off removes handlers by callable, handler id, entry object, or all', async () => { + const bus = new EventBus('RegistryOffBus') + const RegistryEvent = BaseEvent.extend('RegistryOffEvent', {}) + + const handler_a = (_event: BaseEvent): void => {} + const handler_b = (_event: BaseEvent): void => {} + const handler_c = (_event: BaseEvent): void => {} + + const entry_a = bus.on('RegistryOffEvent', handler_a) + const entry_b = bus.on('RegistryOffEvent', handler_b) + const entry_c = bus.on('RegistryOffEvent', handler_c) + + bus.off('RegistryOffEvent', handler_a) + assert.equal(bus.handlers.has(entry_a.id), false) + assert.equal((bus.handlers_by_key.get('RegistryOffEvent') ?? []).includes(entry_a.id), false) + assert.equal(bus.handlers.has(entry_b.id), true) + + bus.off('RegistryOffEvent', entry_b.id) + assert.equal(bus.handlers.has(entry_b.id), false) + assert.equal((bus.handlers_by_key.get('RegistryOffEvent') ?? []).includes(entry_b.id), false) + assert.equal(bus.handlers.has(entry_c.id), true) + + bus.off('RegistryOffEvent', entry_c) + assert.equal(bus.handlers.has(entry_c.id), false) + assert.equal(bus.handlers_by_key.has('RegistryOffEvent'), false) + + bus.on('RegistryOffEvent', handler_a) + bus.on('RegistryOffEvent', handler_b) + bus.off('RegistryOffEvent') + assert.equal(bus.handlers_by_key.has('RegistryOffEvent'), false) + assert.equal( + Array.from(bus.handlers.values()).every((entry) => entry.event_pattern !== 'RegistryOffEvent'), + true + ) + + const dispatched = bus.emit(RegistryEvent({})) + await dispatched.done() + assert.equal(dispatched.event_results.size, 0) +}) + +test('on accepts sync handlers and dispatch captures their return values', async () => { + const bus = new EventBus('RegistryNormalizeBus') + const NormalizeEvent = BaseEvent.extend('RegistryNormalizeEvent', {}) + const calls: string[] = [] + + const sync_handler = (event: BaseEvent): string => { + calls.push(event.event_id) + return 'normalized' + } + + const entry = bus.on(NormalizeEvent, sync_handler) + assert.equal(entry.handler, sync_handler) + const normalized_result = await entry._handler_async(new NormalizeEvent({})) + assert.equal(normalized_result, 'normalized') + + const dispatched = bus.emit(NormalizeEvent({})) + await dispatched.done() + const result = dispatched.event_results.get(entry.id) + + assert.ok(result) + assert.equal(result!.status, 'completed') + assert.equal(result!.result, 'normalized') + assert.equal(calls.length, 2) +}) + +test('on keeps async handlers normalized through _handler_async', async () => { + const bus = new EventBus('RegistryAsyncNormalizeBus') + const NormalizeEvent = BaseEvent.extend('RegistryAsyncNormalizeEvent', {}) + const calls: string[] = [] + + const async_handler = async (event: BaseEvent): Promise => { + calls.push(event.event_id) + return 'async_normalized' + } + const entry = bus.on(NormalizeEvent, async_handler) + assert.equal(entry.handler, async_handler) + assert.equal(entry._handler_async, async_handler) + + const normalized_result = await entry._handler_async(new NormalizeEvent({})) + assert.equal(normalized_result, 'async_normalized') + + const dispatched = bus.emit(NormalizeEvent({})) + await dispatched.done() + const result = dispatched.event_results.get(entry.id) + + assert.ok(result) + assert.equal(result!.status, 'completed') + assert.equal(result!.result, 'async_normalized') + assert.equal(calls.length, 2) +}) + +test('_handler_async preserves typed arg/return contracts for sync handlers', async () => { + const bus = new EventBus('RegistryTypingSyncBus') + const entry = bus.on(RegistryTypingEvent, typed_sync_handler) + const result = await entry._handler_async(RegistryTypingEvent({ required_token: 'sync' })) + assert.equal(result, 'sync') +}) + +test('_handler_async preserves typed arg/return contracts for async handlers', async () => { + const bus = new EventBus('RegistryTypingAsyncBus') + const entry = bus.on(RegistryTypingEvent, typed_async_handler) + const result = await entry._handler_async(RegistryTypingEvent({ required_token: 'async' })) + assert.equal(result, 'async') +}) diff --git a/bubus-ts/tests/eventbus_performance.test.ts b/bubus-ts/tests/eventbus_performance.test.ts new file mode 100644 index 0000000..cd0f5d3 --- /dev/null +++ b/bubus-ts/tests/eventbus_performance.test.ts @@ -0,0 +1,56 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { BaseEvent, EventBus, EventHandlerCancelledError, EventHandlerTimeoutError } from '../src/index.js' +import { + runCleanupEquivalence, + runPerf50kEvents, + runPerfEphemeralBuses, + runPerfSingleEventManyFixedHandlers, + runPerfOnOffChurn, + runPerfWorstCase, +} from './performance.scenarios.js' + +const nodePerfInput = { + runtimeName: 'node:test', + api: { BaseEvent, EventBus, EventHandlerTimeoutError, EventHandlerCancelledError }, + now: () => performance.now(), + sleep: (ms: number) => new Promise((resolve) => setTimeout(resolve, ms)), + log: (message: string) => console.log(message), + getMemoryUsage: () => process.memoryUsage(), + limits: { + singleRunMs: 30_000, + worstCaseMs: 60_000, + }, +} + +test('processes 50k events within reasonable time', { timeout: 30_000 }, async () => { + const result = await runPerf50kEvents(nodePerfInput) + assert.equal(result.scenario, '50k events') +}) + +test('500 ephemeral buses with 100 events each', { timeout: 30_000 }, async () => { + const result = await runPerfEphemeralBuses(nodePerfInput) + assert.equal(result.scenario, '500 buses x 100 events') +}) + +test('1 event with 50k parallel handlers', { timeout: 30_000 }, async () => { + const result = await runPerfSingleEventManyFixedHandlers(nodePerfInput) + assert.equal(result.scenario, '1 event x 50k parallel handlers') +}) + +test('50k events with 50k one-off handlers on a single bus', { timeout: 30_000 }, async () => { + const result = await runPerfOnOffChurn(nodePerfInput) + assert.equal(result.scenario, '50k one-off handlers over 50k events') +}) + +test('worst-case: forwarding + queue-jump + timeouts + cancellation at scale', { timeout: 60_000 }, async () => { + const result = await runPerfWorstCase(nodePerfInput) + assert.equal(result.scenario, 'worst-case forwarding + timeouts') +}) + +test('cleanup equivalence: destroy() vs out-of-scope collection', { timeout: 60_000 }, async () => { + const result = await runCleanupEquivalence(nodePerfInput) + assert.equal(result.scenario, 'cleanup destroy vs scope equivalence') + assert.equal(result.equivalent, true) +}) diff --git a/bubus-ts/tests/eventbus_retry_integration.test.ts b/bubus-ts/tests/eventbus_retry_integration.test.ts new file mode 100644 index 0000000..589a3ca --- /dev/null +++ b/bubus-ts/tests/eventbus_retry_integration.test.ts @@ -0,0 +1,281 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { BaseEvent, EventBus, retry, clearSemaphoreRegistry } from '../src/index.js' + +const delay = (ms: number): Promise => new Promise((resolve) => setTimeout(resolve, ms)) + +class NetworkError extends Error { + constructor(message: string = 'network error') { + super(message) + this.name = 'NetworkError' + } +} + +class ValidationError extends Error { + constructor(message: string = 'validation error') { + super(message) + this.name = 'ValidationError' + } +} + +// ─── Integration with EventBus ─────────────────────────────────────────────── + +test('retry: works as event bus handler wrapper (inline HOF)', async () => { + const bus = new EventBus('RetryBus', { event_timeout: null }) + const TestEvent = BaseEvent.extend('TestEvent', {}) + + let calls = 0 + bus.on( + TestEvent, + retry({ max_attempts: 3 })(async (_event) => { + calls++ + if (calls < 3) throw new Error(`handler fail ${calls}`) + return 'handler ok' + }) + ) + + const event = bus.emit(TestEvent({})) + await event.done() + + assert.equal(calls, 3) + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.equal(result.result, 'handler ok') +}) + +test('retry: bus handler with retry_on_errors only retries matching errors (inline HOF)', async () => { + const bus = new EventBus('RetryFilterBus', { event_timeout: null }) + const TestEvent = BaseEvent.extend('TestEvent', {}) + + let calls = 0 + bus.on( + TestEvent, + retry({ max_attempts: 3, retry_on_errors: [NetworkError] })(async (_event) => { + calls++ + throw new ValidationError() + }) + ) + + const event = bus.emit(TestEvent({})) + await event.done() + + assert.equal(calls, 1) + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'error') +}) + +test('retry: @retry() decorated method works with bus.on via bind', async () => { + const bus = new EventBus('DecoratorBus', { event_timeout: null }) + const TestEvent = BaseEvent.extend('TestEvent', {}) + + class Handler { + calls = 0 + + @retry({ max_attempts: 3 }) + async onTest(_event: InstanceType): Promise { + this.calls++ + if (this.calls < 3) throw new Error('handler fail') + return 'handler ok' + } + } + + const handler = new Handler() + bus.on(TestEvent, handler.onTest.bind(handler)) + + const event = bus.emit(TestEvent({})) + await event.done() + assert.equal(handler.calls, 3) + const result = Array.from(event.event_results.values())[0] + assert.equal(result.result, 'handler ok') +}) + +// ─── @retry() decorator + bus.on via .bind(this) β€” all three scopes ───────── + +test('retry: @retry(scope=class) + bus.on via .bind β€” serializes across instances', async () => { + clearSemaphoreRegistry() + + const bus = new EventBus('ScopeClassBus', { event_timeout: null, event_handler_concurrency: 'parallel' }) + const SomeEvent = BaseEvent.extend('ScopeClassEvent', {}) + + let active = 0 + let max_active = 0 + + class SomeService { + constructor(b: InstanceType) { + b.on(SomeEvent, this.on_SomeEvent.bind(this)) + } + + @retry({ max_attempts: 1, semaphore_scope: 'class', semaphore_limit: 1, semaphore_name: 'on_SomeEvent' }) + async on_SomeEvent(_event: InstanceType): Promise { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + return 'ok' + } + } + + new SomeService(bus) + await delay(2) + new SomeService(bus) + + const event = bus.emit(SomeEvent({})) + await event.done() + assert.equal(max_active, 1, 'class scope should serialize across instances') +}) + +test('retry: @retry(scope=instance) + bus.on via .bind β€” isolates per instance', async () => { + const bus = new EventBus('ScopeInstanceBus', { event_timeout: null, event_handler_concurrency: 'parallel' }) + const SomeEvent = BaseEvent.extend('ScopeInstanceEvent', {}) + + let active = 0 + let max_active = 0 + let total_calls = 0 + + class SomeService { + constructor(b: InstanceType) { + b.on(SomeEvent, this.on_SomeEvent.bind(this)) + } + + @retry({ max_attempts: 1, semaphore_scope: 'instance', semaphore_limit: 1, semaphore_name: 'on_SomeEvent_inst' }) + async on_SomeEvent(_event: InstanceType): Promise { + active++ + max_active = Math.max(max_active, active) + total_calls++ + await delay(200) + active-- + return 'ok' + } + } + + new SomeService(bus) + await delay(2) + new SomeService(bus) + + const event = bus.emit(SomeEvent({})) + await event.done() + + assert.equal(total_calls, 2, 'both handlers should have run') + assert.equal( + max_active, + 2, + `instance scope should allow different instances to run in parallel (got max_active=${max_active}, total_calls=${total_calls})` + ) +}) + +test('retry: @retry(scope=global) + bus.on via .bind β€” all calls share one semaphore', async () => { + clearSemaphoreRegistry() + + const bus = new EventBus('ScopeGlobalBus', { event_timeout: null, event_handler_concurrency: 'parallel' }) + const SomeEvent = BaseEvent.extend('ScopeGlobalEvent', {}) + + let active = 0 + let max_active = 0 + + class SomeService { + constructor(b: InstanceType) { + b.on(SomeEvent, this.on_SomeEvent.bind(this)) + } + + @retry({ max_attempts: 1, semaphore_scope: 'global', semaphore_limit: 1, semaphore_name: 'on_SomeEvent' }) + async on_SomeEvent(_event: InstanceType): Promise { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + return 'ok' + } + } + + new SomeService(bus) + await delay(2) + new SomeService(bus) + + const event = bus.emit(SomeEvent({})) + await event.done() + assert.equal(max_active, 1, 'global scope should serialize all calls') +}) + +// ─── HOF pattern: retry({...})(fn).bind(instance) β€” alternative to decorator ─ + +test('retry: HOF retry()(fn).bind(instance) β€” instance scope works when bind is after wrap', async () => { + clearSemaphoreRegistry() + + const bus = new EventBus('HOFBindBus', { event_timeout: null, event_handler_concurrency: 'parallel' }) + const SomeEvent = BaseEvent.extend('HOFBindEvent', {}) + + let active = 0 + let max_active = 0 + + const some_instance_a = { name: 'a' } + const some_instance_b = { name: 'b' } + + const handler = retry({ + max_attempts: 1, + semaphore_scope: 'instance', + semaphore_limit: 1, + semaphore_name: 'handler', + })(async function (this: any, _event: InstanceType): Promise { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + return 'ok' + }) + + bus.on(SomeEvent, handler.bind(some_instance_a)) + bus.on(SomeEvent, handler.bind(some_instance_b)) + + const event = bus.emit(SomeEvent({})) + await event.done() + assert.equal(max_active, 2, 'bind-after-wrap: different instances should run in parallel') +}) + +// ─── retry wrapping emitβ†’done (TECHNICALLY SUPPORTED, NOT RECOMMENDED) ────── + +test('retry: retry wrapping emitβ†’done retries the full dispatch cycle (discouraged pattern)', async () => { + const bus = new EventBus('RetryEmitBus', { event_timeout: null, event_handler_concurrency: 'parallel' }) + + const TabsEvent = BaseEvent.extend('TabsEvent', {}) + const DOMEvent = BaseEvent.extend('DOMEvent', {}) + const ScreenshotEvent = BaseEvent.extend('ScreenshotEvent', {}) + + let tabs_attempts = 0 + let dom_calls = 0 + let screenshot_calls = 0 + + bus.on(TabsEvent, async (_event) => { + tabs_attempts++ + if (tabs_attempts < 3) throw new Error(`tabs fail attempt ${tabs_attempts}`) + return 'tabs ok' + }) + + bus.on(DOMEvent, async (_event) => { + dom_calls++ + return 'dom ok' + }) + + bus.on(ScreenshotEvent, async (_event) => { + screenshot_calls++ + return 'screenshot ok' + }) + + const [tabs_event, dom_event, screenshot_event] = await Promise.all([ + retry({ max_attempts: 4 })(async () => { + const event = bus.emit(TabsEvent({})) + await event.done() + if (event.event_errors.length) throw event.event_errors[0] + return event + })(), + bus.emit(DOMEvent({})).done(), + bus.emit(ScreenshotEvent({})).done(), + ]) + + assert.equal(tabs_attempts, 3) + assert.equal(tabs_event.event_status, 'completed') + assert.equal(dom_calls, 1) + assert.equal(screenshot_calls, 1) + assert.equal(dom_event.event_status, 'completed') + assert.equal(screenshot_event.event_status, 'completed') +}) diff --git a/bubus-ts/tests/eventbus_serialization.test.ts b/bubus-ts/tests/eventbus_serialization.test.ts new file mode 100644 index 0000000..56cf058 --- /dev/null +++ b/bubus-ts/tests/eventbus_serialization.test.ts @@ -0,0 +1,114 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { BaseEvent, EventBus } from '../src/index.js' + +const delay = (ms: number): Promise => + new Promise((resolve) => { + setTimeout(resolve, ms) + }) + +test('EventBus toJSON/fromJSON roundtrip uses id-keyed structures', async () => { + const bus = new EventBus('SerializableBus', { + id: '018f8e40-1234-7000-8000-000000001234', + max_history_size: 500, + max_history_drop: false, + event_concurrency: 'parallel', + event_handler_concurrency: 'parallel', + event_handler_completion: 'first', + event_timeout: null, + event_handler_slow_timeout: 12, + event_slow_timeout: 34, + event_handler_detect_file_paths: false, + }) + const SerializableEvent = BaseEvent.extend('SerializableEvent', {}) + + bus.on(SerializableEvent, async () => { + await delay(20) + return 'ok' + }) + + const release_pause = bus.locks._requestRunloopPause() + const pending_event = bus.emit(SerializableEvent({ event_timeout: 11 })) + await Promise.resolve() + + const json = bus.toJSON() + assert.equal(json.id, '018f8e40-1234-7000-8000-000000001234') + assert.equal(json.name, 'SerializableBus') + assert.equal(Object.keys(json.handlers).length, 1) + assert.equal(Object.keys(json.handlers_by_key).length, 1) + assert.equal(Array.isArray(json.handlers_by_key.SerializableEvent), true) + assert.equal(Object.keys(json.event_history).length, 1) + assert.equal((json.event_history[pending_event.event_id] as Record).event_id, pending_event.event_id) + assert.equal(json.pending_event_queue.length, 1) + assert.equal(json.pending_event_queue[0], pending_event.event_id) + + const restored = EventBus.fromJSON(json) + assert.equal(restored.id, '018f8e40-1234-7000-8000-000000001234') + assert.equal(restored.name, 'SerializableBus') + assert.equal(restored.event_history.max_history_size, 500) + assert.equal(restored.event_history.max_history_drop, false) + assert.equal(restored.event_concurrency, 'parallel') + assert.equal(restored.event_handler_concurrency, 'parallel') + assert.equal(restored.event_handler_completion, 'first') + assert.equal(restored.event_timeout, null) + assert.equal(restored.event_handler_slow_timeout, 12) + assert.equal(restored.event_slow_timeout, 34) + assert.equal(restored.event_handler_detect_file_paths, false) + assert.equal(restored.handlers.size, 1) + assert.equal(restored.handlers_by_key.get('SerializableEvent')?.length, 1) + assert.equal(restored.event_history.size, 1) + assert.equal(restored.pending_event_queue.length, 1) + assert.equal(restored.pending_event_queue[0].event_id, pending_event.event_id) + assert.equal(restored.runloop_running, false) + + release_pause() + await pending_event.done() +}) + +test('EventBus.fromJSON recreates missing handler entries from event_result metadata', async () => { + const bus = new EventBus('MissingHandlerHydrationBus', { + event_handler_detect_file_paths: false, + }) + const SerializableEvent = BaseEvent.extend('MissingHandlerHydrationEvent', {}) + + bus.on(SerializableEvent, () => 'ok') + const event = bus.emit(SerializableEvent({})) + await event.done() + + const handler_id = Array.from(event.event_results.values())[0].handler_id + const json = bus.toJSON() + json.handlers = {} + json.handlers_by_key = {} + + const restored = EventBus.fromJSON(json) + const restored_event = restored.event_history.get(event.event_id) + assert.ok(restored_event) + assert.ok(restored.handlers.has(handler_id)) + const restored_result = restored_event!.event_results.get(handler_id) + assert.ok(restored_result) + assert.equal(restored_result!.handler, restored.handlers.get(handler_id)) + assert.equal(typeof restored_result!.handler.handler, 'function') + assert.equal(await restored_result!.handler.handler(restored_event as BaseEvent), undefined) +}) + +test('EventBus toJSON promotes pending events into event_history snapshot', async () => { + const bus = new EventBus('ModelDumpPendingBus') + const PendingEvent = BaseEvent.extend('ModelDumpPendingEvent', {}) + + bus.on(PendingEvent, async () => { + await delay(10) + return 'ok' + }) + + const release_pause = bus.locks._requestRunloopPause() + const pending = bus.emit(PendingEvent({})) + await Promise.resolve() + + const json = bus.toJSON() + assert.equal(Boolean(json.event_history[pending.event_id]), true) + assert.equal(json.pending_event_queue.includes(pending.event_id), true) + + release_pause() + await pending.done() +}) diff --git a/bubus-ts/tests/eventbus_timeout.test.ts b/bubus-ts/tests/eventbus_timeout.test.ts new file mode 100644 index 0000000..67b5e32 --- /dev/null +++ b/bubus-ts/tests/eventbus_timeout.test.ts @@ -0,0 +1,1439 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { + BaseEvent, + EventBus, + EventHandlerCancelledError, + EventHandlerAbortedError, + EventHandlerTimeoutError, + RetryTimeoutError, + retry, +} from '../src/index.js' + +const TimeoutEvent = BaseEvent.extend('TimeoutEvent', {}) + +const delay = (ms: number): Promise => + new Promise((resolve) => { + setTimeout(resolve, ms) + }) + +test('event timeout aborts in-flight handler result', async () => { + const bus = new EventBus('TimeoutBus') + + bus.on(TimeoutEvent, async () => { + await delay(50) + return 'slow' + }) + + const event = bus.emit(TimeoutEvent({ event_timeout: 0.02 })) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'error') + assert.ok(result.error instanceof EventHandlerAbortedError) +}) + +test('event timeout does not relabel pre-existing handler timeout errors', async () => { + const bus = new EventBus('TimeoutPreserveHandlerTimeoutBus', { + event_handler_concurrency: 'parallel', + }) + const MixedTimeoutEvent = BaseEvent.extend('MixedTimeoutEvent', {}) + + bus.on( + MixedTimeoutEvent, + retry({ max_attempts: 1, timeout: 0.01 })(async () => { + await delay(50) + return 'handler-timeout' + }) + ) + + bus.on(MixedTimeoutEvent, async () => { + await delay(200) + return 'event-timeout' + }) + + const event = bus.emit(MixedTimeoutEvent({ event_timeout: 0.05 })) + await event.done() + await bus.waitUntilIdle() + + const results = Array.from(event.event_results.values()) + assert.equal(results.length, 2) + assert.ok(results.some((result) => result.error instanceof EventHandlerTimeoutError)) + assert.ok(results.some((result) => result.error instanceof EventHandlerAbortedError)) +}) + +test('handler completes within timeout', async () => { + const bus = new EventBus('TimeoutOkBus') + + bus.on(TimeoutEvent, async () => { + await delay(5) + return 'fast' + }) + + const event = bus.emit(TimeoutEvent({ event_timeout: 0.5 })) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.equal(result.result, 'fast') +}) + +test('event handler errors expose event_result, cause, and timeout metadata', async () => { + const bus = new EventBus('ErrorMetadataBus') + + const ParentCancelEvent = BaseEvent.extend('ParentCancelEvent', {}) + const PendingChildEvent = BaseEvent.extend('PendingChildEvent', {}) + const ParentAbortEvent = BaseEvent.extend('ParentAbortEvent', {}) + const AbortChildEvent = BaseEvent.extend('AbortChildEvent', {}) + + bus.on(TimeoutEvent, async () => { + await delay(40) + return 'slow' + }) + + bus.on(PendingChildEvent, async () => { + await delay(5) + return 'pending_child' + }) + + let pending_child = null as BaseEvent | null + bus.on(ParentCancelEvent, async (event) => { + pending_child = event.bus?.emit(PendingChildEvent({ event_timeout: 0.5 })) ?? null + await delay(80) + }) + + bus.on(AbortChildEvent, async () => { + await delay(120) + return 'abort_child' + }) + + let aborted_child = null as BaseEvent | null + bus.on(ParentAbortEvent, async (event) => { + aborted_child = event.bus?.emit(AbortChildEvent({ event_timeout: 0.5 })) ?? null + await aborted_child?.done() + }) + + const timeout_event = bus.emit(TimeoutEvent({ event_timeout: 0.02 })) + await timeout_event.done() + + const timeout_result = Array.from(timeout_event.event_results.values())[0] + const timeout_error = timeout_result.error as EventHandlerAbortedError + assert.ok(timeout_error.cause instanceof Error) + assert.equal(timeout_error.cause.name, 'EventHandlerTimeoutError') + assert.equal(timeout_error.event_result, timeout_result) + assert.equal(timeout_error.timeout_seconds, timeout_event.event_timeout) + assert.equal(timeout_error.event.event_id, timeout_event.event_id) + assert.equal(timeout_error.event_type, timeout_event.event_type) + assert.equal(timeout_error.handler_name, timeout_result.handler_name) + assert.equal(timeout_error.handler_id, timeout_result.handler_id) + assert.equal(timeout_error.event_timeout, timeout_event.event_timeout) + + const cancel_parent = bus.emit(ParentCancelEvent({ event_timeout: 0.02 })) + await cancel_parent.done() + await bus.waitUntilIdle() + + assert.ok(pending_child, 'pending_child should have been emitted') + const pending_results = Array.from(pending_child!.event_results.values()) + const cancel_parent_result = Array.from(cancel_parent.event_results.values())[0] + const cancel_parent_error = cancel_parent_result.error as EventHandlerAbortedError + const cancel_parent_timeout = cancel_parent_error.cause + assert.ok(cancel_parent_timeout instanceof EventHandlerTimeoutError) + const pending_error_result = pending_results.find((result) => result.error !== undefined) + if ( + pending_error_result?.error instanceof EventHandlerCancelledError || + pending_error_result?.error instanceof EventHandlerAbortedError + ) { + const cancelled_error = pending_error_result.error + assert.equal(cancelled_error.cause, cancel_parent_timeout) + assert.equal(cancelled_error.event_result, pending_error_result) + assert.equal(cancelled_error.event.event_id, pending_child!.event_id) + assert.equal(cancelled_error.timeout_seconds, pending_child!.event_timeout) + assert.equal(cancelled_error.event_type, pending_child!.event_type) + assert.equal(cancelled_error.handler_name, pending_error_result.handler_name) + assert.equal(cancelled_error.handler_id, pending_error_result.handler_id) + } else if (pending_error_result?.error instanceof EventHandlerTimeoutError) { + assert.equal(pending_error_result.error, cancel_parent_timeout) + } else { + assert.equal(pending_child!.event_status, 'completed') + } + + const abort_parent = bus.emit(ParentAbortEvent({ event_timeout: 0.05 })) + await abort_parent.done() + await bus.waitUntilIdle() + + assert.ok(aborted_child, 'aborted_child should have been emitted') + const aborted_result = Array.from(aborted_child!.event_results.values()).find( + (result) => result.error instanceof EventHandlerAbortedError + ) + assert.ok(aborted_result) + const aborted_error = aborted_result.error as EventHandlerAbortedError + const abort_parent_result = Array.from(abort_parent.event_results.values())[0] + const abort_parent_error = abort_parent_result.error as EventHandlerAbortedError + const abort_parent_timeout = abort_parent_error.cause + assert.ok(abort_parent_timeout instanceof EventHandlerTimeoutError) + assert.equal(aborted_error.cause, abort_parent_timeout) + assert.equal(aborted_error.event_result, aborted_result) + assert.equal(aborted_error.event.event_id, aborted_child!.event_id) + assert.equal(aborted_error.timeout_seconds, aborted_child!.event_timeout) + assert.equal(aborted_error.event_type, aborted_child!.event_type) + assert.equal(aborted_error.handler_name, aborted_result.handler_name) + assert.equal(aborted_error.handler_id, aborted_result.handler_id) +}) + +test('event timeouts abort handlers across concurrency modes', async () => { + const event_modes = ['global-serial', 'bus-serial', 'parallel'] as const + const handler_modes = [ + { label: 'serial', concurrency: 'serial', global_lock: false }, + { label: 'parallel', concurrency: 'parallel', global_lock: false }, + { label: 'serial-global', concurrency: 'serial', global_lock: true }, + ] as const + + for (const event_mode of event_modes) { + for (const handler_mode of handler_modes) { + const bus = new EventBus(`Timeout-${event_mode}-${handler_mode.label}`, { + event_concurrency: event_mode, + event_handler_concurrency: handler_mode.concurrency, + }) + + const handler = handler_mode.global_lock + ? retry({ semaphore_scope: 'global', semaphore_name: 'timeout_handler', semaphore_limit: 1 })(async () => { + await delay(50) + return 'slow' + }) + : async () => { + await delay(50) + return 'slow' + } + + bus.on(TimeoutEvent, handler) + + const event = bus.emit(TimeoutEvent({ event_timeout: 0.01 })) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'error', `Expected timeout-driven error for event=${event_mode} handler=${handler_mode.label}`) + assert.ok( + result.error instanceof EventHandlerAbortedError, + `Expected EventHandlerAbortedError for event=${event_mode} handler=${handler_mode.label}` + ) + + await bus.waitUntilIdle() + } + } +}) + +test('timeout still marks event failed when other handlers finish', async () => { + const bus = new EventBus('TimeoutParallelHandlers', { + event_concurrency: 'parallel', + event_handler_concurrency: 'parallel', + }) + + const results: string[] = [] + + bus.on(TimeoutEvent, async () => { + await delay(1) + results.push('fast') + return 'fast' + }) + + bus.on(TimeoutEvent, async () => { + await delay(50) + results.push('slow') + return 'slow' + }) + + const event = bus.emit(TimeoutEvent({ event_timeout: 0.01 })) + await event.done() + + const statuses = Array.from(event.event_results.values()).map((result) => result.status) + assert.ok(statuses.includes('completed')) + assert.ok(statuses.includes('error')) + assert.equal(event.event_status, 'completed') + assert.ok(event.event_errors.length > 0) + assert.ok(results.includes('fast')) +}) + +test('event-level timeout marks started parallel handlers as aborted or timed out', async () => { + const bus = new EventBus('TimeoutParallelAbortedOnlyBus', { + event_concurrency: 'parallel', + event_handler_concurrency: 'parallel', + }) + + const ParallelAbortOnlyEvent = BaseEvent.extend('ParallelAbortOnlyEvent', {}) + const deferred = () => { + let resolve!: (value: T | PromiseLike) => void + let reject!: (reason?: unknown) => void + const promise = new Promise((resolve_fn, reject_fn) => { + resolve = resolve_fn + reject = reject_fn + }) + return { promise, resolve, reject } + } + + const started_a = deferred() + const started_b = deferred() + const both_started = deferred() + let a_started = false + let b_started = false + + bus.on(ParallelAbortOnlyEvent, async () => { + a_started = true + started_a.resolve() + if (b_started) { + both_started.resolve() + } + await both_started.promise + await delay(200) + return 'a' + }) + + bus.on(ParallelAbortOnlyEvent, async () => { + b_started = true + started_b.resolve() + if (a_started) { + both_started.resolve() + } + await both_started.promise + await delay(200) + return 'b' + }) + + const event = bus.emit(ParallelAbortOnlyEvent({ event_timeout: 0.03 })) + await Promise.all([started_a.promise, started_b.promise]) + both_started.resolve() + await event.done() + + const results = Array.from(event.event_results.values()) + assert.equal(results.length, 2) + assert.ok(results.every((result) => result.status === 'error')) + assert.ok(results.every((result) => result.error instanceof EventHandlerAbortedError || result.error instanceof EventHandlerTimeoutError)) + assert.ok(!results.some((result) => result.error instanceof EventHandlerCancelledError)) +}) + +test('slow event warning fires when event exceeds event_slow_timeout', async () => { + const bus = new EventBus('SlowEventWarnBus', { + event_slow_timeout: 0.01, + event_handler_slow_timeout: null, + }) + const warnings: string[] = [] + const original_warn = console.warn + console.warn = (message?: unknown, ...args: unknown[]) => { + warnings.push(String(message)) + if (args.length > 0) { + warnings.push(args.map(String).join(' ')) + } + } + + try { + bus.on(TimeoutEvent, async () => { + await delay(25) + return 'ok' + }) + + const event = bus.emit(TimeoutEvent({ event_timeout: 0.5 })) + await event.done() + } finally { + console.warn = original_warn + } + + assert.ok( + warnings.some((message) => message.toLowerCase().includes('slow event processing')), + 'Expected slow event warning' + ) +}) + +test('slow handler warning fires when handler runs long', async () => { + const bus = new EventBus('SlowHandlerWarnBus', { + event_handler_slow_timeout: 0.01, + event_slow_timeout: null, + }) + const warnings: string[] = [] + const original_warn = console.warn + console.warn = (message?: unknown, ...args: unknown[]) => { + warnings.push(String(message)) + if (args.length > 0) { + warnings.push(args.map(String).join(' ')) + } + } + + try { + bus.on(TimeoutEvent, async () => { + await delay(25) + return 'ok' + }) + + const event = bus.emit(TimeoutEvent({ event_timeout: 0.5 })) + await event.done() + } finally { + console.warn = original_warn + } + + assert.ok( + warnings.some((message) => message.toLowerCase().includes('slow event handler')), + 'Expected slow handler warning' + ) +}) + +test('slow handler and slow event warnings can both fire', async () => { + const bus = new EventBus('SlowComboWarnBus', { + event_handler_slow_timeout: 0.01, + event_slow_timeout: 0.01, + }) + const warnings: string[] = [] + const original_warn = console.warn + console.warn = (message?: unknown, ...args: unknown[]) => { + warnings.push(String(message)) + if (args.length > 0) { + warnings.push(args.map(String).join(' ')) + } + } + + try { + bus.on(TimeoutEvent, async () => { + await delay(25) + return 'ok' + }) + + const event = bus.emit(TimeoutEvent({ event_timeout: 0.5 })) + await event.done() + } finally { + console.warn = original_warn + } + + assert.ok( + warnings.some((message) => message.toLowerCase().includes('slow event handler')), + 'Expected slow handler warning' + ) + assert.ok( + warnings.some((message) => message.toLowerCase().includes('slow event processing')), + 'Expected slow event warning' + ) +}) + +test('event-level concurrency overrides do not bypass timeout aborts', async () => { + const bus = new EventBus('TimeoutEventOverrideBus', { + event_concurrency: 'global-serial', + event_handler_concurrency: 'serial', + }) + + bus.on( + TimeoutEvent, + retry({ semaphore_scope: 'global', semaphore_name: 'timeout_override_event', semaphore_limit: 1 })(async () => { + await delay(50) + return 'slow' + }) + ) + + const event = bus.emit( + TimeoutEvent({ + event_timeout: 0.01, + event_concurrency: 'parallel', + event_handler_concurrency: 'parallel', + }) + ) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'error') + assert.ok(result.error instanceof EventHandlerAbortedError) +}) + +test('retry-based handler locks do not bypass timeouts', async () => { + const bus = new EventBus('TimeoutHandlerOverrideBus', { + event_concurrency: 'parallel', + event_handler_concurrency: 'parallel', + }) + + bus.on( + TimeoutEvent, + retry({ semaphore_scope: 'global', semaphore_name: 'timeout_override_handler', semaphore_limit: 1 })(async () => { + await delay(50) + return 'slow' + }) + ) + + bus.on( + TimeoutEvent, + retry({ semaphore_scope: 'global', semaphore_name: 'timeout_override_handler', semaphore_limit: 1 })(async () => { + await delay(1) + return 'fast' + }) + ) + + const event = bus.emit(TimeoutEvent({ event_timeout: 0.01 })) + await event.done() + + const statuses = Array.from(event.event_results.values()).map((result) => result.status) + assert.ok(statuses.includes('error')) +}) + +test('forwarded event timeout aborts apply across buses', async () => { + const bus_a = new EventBus('TimeoutForwardA', { event_concurrency: 'bus-serial' }) + const bus_b = new EventBus('TimeoutForwardB', { event_concurrency: 'bus-serial' }) + + bus_a.on(TimeoutEvent, async (event) => { + bus_b.emit(event) + }) + + bus_b.on(TimeoutEvent, async () => { + await delay(50) + return 'slow' + }) + + const event = bus_a.emit(TimeoutEvent({ event_timeout: 0.01 })) + await event.done() + + const results = Array.from(event.event_results.values()) + const bus_b_result = results.find((result) => result.eventbus_id === bus_b.id) + assert.ok(bus_b_result) + assert.equal(bus_b_result?.status, 'error') + assert.ok(bus_b_result?.error instanceof EventHandlerAbortedError) +}) + +test('queue-jump awaited child timeout aborts still fire across buses', async () => { + const ParentEvent = BaseEvent.extend('TimeoutParentEvent', {}) + const ChildEvent = BaseEvent.extend('TimeoutChildEvent', {}) + + const bus_a = new EventBus('TimeoutQueueJumpA', { event_concurrency: 'global-serial' }) + const bus_b = new EventBus('TimeoutQueueJumpB', { event_concurrency: 'global-serial' }) + + let child_ref = null as InstanceType | null + + bus_b.on(ChildEvent, async () => { + await delay(50) + return 'slow' + }) + + bus_a.on(ParentEvent, async (event) => { + // Use scoped bus emit to set parent tracking (event_parent_id, event_emitted_by_handler_id), + // then also dispatch on bus_b for cross-bus handler execution. + // Without parent tracking, _processEventImmediately can't detect the queue-jump context + // and falls back to eventCompleted(), which deadlocks with global-serial. + const child = event.bus?.emit(ChildEvent({ event_timeout: 0.01 }))! + bus_b.emit(child) + child_ref = child + await child.done() + }) + + const parent = bus_a.emit(ParentEvent({ event_timeout: 0.5 })) + await parent.done() + + assert.ok(child_ref) + const child_results = Array.from(child_ref!.event_results.values()) + const aborted_result = child_results.find((result) => result.error instanceof EventHandlerAbortedError) + assert.ok(aborted_result) +}) + +const STEP1_HANDLER_MODES = [ + { label: 'serial', global_lock: false }, + { label: 'serial-global', global_lock: true }, +] as const + +const get_handler_lock = (bus: EventBus, event: BaseEvent) => { + const lock = event._getHandlerLock(bus.event_handler_concurrency) + if (!lock) { + throw new Error('expected handler lock') + } + return lock +} + +for (const handler_mode of STEP1_HANDLER_MODES) { + test(`regression: parent timeout while reacquire waits behind third serial handler is lock-safe [${handler_mode.label}]`, async () => { + const ParentEvent = BaseEvent.extend(`TimeoutContentionParent-${handler_mode.label}`, {}) + const ChildEvent = BaseEvent.extend(`TimeoutContentionChild-${handler_mode.label}`, {}) + + const bus = new EventBus(`TimeoutContentionBus-${handler_mode.label}`, { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'serial', + }) + + const parent = ParentEvent({ event_timeout: 0.01 }) + const lock = get_handler_lock(bus, parent) + const baseline_in_use = lock.in_use + const withGlobalLock = any>(fn: T): T => + handler_mode.global_lock + ? retry({ semaphore_scope: 'global', semaphore_name: `timeout_contention_${handler_mode.label}`, semaphore_limit: 1 })(fn) + : fn + + bus.on( + ChildEvent, + withGlobalLock(async () => { + await delay(2) + return 'child_done' + }) + ) + + bus.on( + ParentEvent, + withGlobalLock(async (event) => { + const child = event.bus?.emit(ChildEvent({ event_timeout: 0.2, event_handler_concurrency: 'parallel' }))! + await child.done() + return 'parent_main' + }) + ) + + // This handler queues behind parent_main, then holds the serial lock + // while parent_main is trying to reclaim after child.done() completes. + bus.on( + ParentEvent, + withGlobalLock(async () => { + await delay(40) + return 'parent_blocker' + }) + ) + + bus.emit(parent) + await parent.done() + await bus.waitUntilIdle() + + const parent_results = Array.from(parent.event_results.values()) + const aborted_results = parent_results.filter((result) => result.error instanceof EventHandlerAbortedError) + assert.ok(aborted_results.length >= 1, `expected at least one aborted result in ${handler_mode.label}`) + assert.equal(lock.in_use, baseline_in_use) + }) +} + +for (const handler_mode of STEP1_HANDLER_MODES) { + test(`regression: nested queue-jump with timeout cancellation remains lock-safe [${handler_mode.label}]`, async () => { + const ParentEvent = BaseEvent.extend(`NestedPermitParent-${handler_mode.label}`, {}) + const ChildEvent = BaseEvent.extend(`NestedPermitChild-${handler_mode.label}`, {}) + const GrandchildEvent = BaseEvent.extend(`NestedPermitGrandchild-${handler_mode.label}`, {}) + const QueuedSiblingEvent = BaseEvent.extend(`NestedPermitQueuedSibling-${handler_mode.label}`, {}) + const TailEvent = BaseEvent.extend(`NestedPermitTail-${handler_mode.label}`, {}) + + const bus = new EventBus(`NestedPermitBus-${handler_mode.label}`, { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'serial', + }) + const parent = ParentEvent({ event_timeout: 0.01 }) + const lock = get_handler_lock(bus, parent) + const baseline_in_use = lock.in_use + const withGlobalLock = any>(fn: T): T => + handler_mode.global_lock + ? retry({ semaphore_scope: 'global', semaphore_name: `timeout_nested_${handler_mode.label}`, semaphore_limit: 1 })(fn) + : fn + + let queued_sibling_runs = 0 + let tail_runs = 0 + let queued_sibling_ref = null as InstanceType | null + + bus.on( + GrandchildEvent, + withGlobalLock(async () => { + await delay(1) + return 'grandchild_done' + }) + ) + + bus.on( + ChildEvent, + withGlobalLock(async (event) => { + const grandchild = event.bus?.emit(GrandchildEvent({ event_timeout: 0.2 }))! + await grandchild.done() + await delay(40) + return 'child_done' + }) + ) + + bus.on( + QueuedSiblingEvent, + withGlobalLock(async () => { + queued_sibling_runs += 1 + return 'queued_sibling_done' + }) + ) + + bus.on( + ParentEvent, + withGlobalLock(async (event) => { + queued_sibling_ref = event.bus?.emit(QueuedSiblingEvent({ event_timeout: 0.2 }))! + const child = event.bus?.emit(ChildEvent({ event_timeout: 0.02 }))! + await child.done() + await delay(40) + }) + ) + + bus.on( + TailEvent, + withGlobalLock(async () => { + tail_runs += 1 + return 'tail_done' + }) + ) + + const dispatched_parent = bus.emit(ParentEvent({ event_timeout: 0.03 })) + await dispatched_parent.done() + await bus.waitUntilIdle() + + const parent_result = Array.from(dispatched_parent.event_results.values())[0] + assert.equal(parent_result.status, 'error') + assert.ok(parent_result.error instanceof EventHandlerAbortedError) + + assert.ok(queued_sibling_ref) + assert.equal(queued_sibling_runs, 0) + const queued_sibling_results = Array.from(queued_sibling_ref!.event_results.values()) + assert.ok(queued_sibling_results.some((result) => result.error instanceof EventHandlerCancelledError)) + + assert.equal(lock.in_use, baseline_in_use) + + const tail = bus.emit(TailEvent({ event_timeout: 0.05 })) + const tail_completed = await Promise.race([tail.done().then(() => true), delay(100).then(() => false)]) + assert.equal(tail_completed, true) + assert.equal(tail_runs, 1) + assert.equal(lock.in_use, baseline_in_use) + }) +} + +test('parent timeout cancels pending child handler results under serial handler lock', async () => { + const ParentEvent = BaseEvent.extend('TimeoutCancelParentEvent', {}) + const ChildEvent = BaseEvent.extend('TimeoutCancelChildEvent', {}) + + const bus = new EventBus('TimeoutCancelBus', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'serial', + }) + + let child_runs = 0 + + bus.on(ChildEvent, async () => { + child_runs += 1 + await delay(30) + return 'first' + }) + + bus.on(ChildEvent, async () => { + child_runs += 1 + await delay(10) + return 'second' + }) + + bus.on(ParentEvent, async (event) => { + event.bus?.emit(ChildEvent({ event_timeout: 0.2 })) + await delay(50) + }) + + const parent = bus.emit(ParentEvent({ event_timeout: 0.01 })) + await parent.done() + await bus.waitUntilIdle() + + const child = parent.event_children[0] + assert.ok(child) + + assert.equal(child_runs, 0) + + const cancelled_results = Array.from(child.event_results.values()).filter((result) => result.error instanceof EventHandlerCancelledError) + assert.ok(cancelled_results.length > 0) +}) + +test('retry timeout cancels pending child handler results', async () => { + const ParentEvent = BaseEvent.extend('RetryTimeoutCancelParentEvent', {}) + const ChildEvent = BaseEvent.extend('RetryTimeoutCancelChildEvent', {}) + + const bus = new EventBus('RetryTimeoutCancelBus', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'serial', + }) + + bus.on(ChildEvent, async () => { + await delay(20) + return 'child_done' + }) + + bus.on( + ParentEvent, + retry({ max_attempts: 1, timeout: 0.01 })(async (event) => { + event.bus?.emit(ChildEvent({ event_timeout: 0.2 })) + await delay(50) + return 'parent_done' + }) + ) + + const parent = bus.emit(ParentEvent({ event_timeout: 0.5 })) + await parent.done() + await bus.waitUntilIdle() + + const parent_result = Array.from(parent.event_results.values())[0] + assert.equal(parent_result.status, 'error') + assert.ok(parent_result.error instanceof EventHandlerTimeoutError) + assert.ok(parent_result.error.cause instanceof RetryTimeoutError) + + const child = parent.event_children[0] + assert.ok(child) + const cancelled_results = Array.from(child.event_results.values()).filter((result) => result.error instanceof EventHandlerCancelledError) + assert.ok(cancelled_results.length > 0) +}) + +test('handler_timeout stops in-flight retries and cancels child events', async () => { + const ParentEvent = BaseEvent.extend('RetryTimeoutHandlerTimeoutParentEvent', {}) + const ChildEvent = BaseEvent.extend('RetryTimeoutHandlerTimeoutChildEvent', {}) + + const bus = new EventBus('RetryTimeoutHandlerTimeoutBus', { + event_concurrency: 'parallel', + event_handler_concurrency: 'serial', + }) + + let child_started = 0 + bus.on(ChildEvent, async () => { + child_started += 1 + await delay(500) + return 'child_done' + }) + + let child_ref = null as InstanceType | null + let emitted = false + let attempts_started = 0 + + const handler = retry({ max_attempts: 10, timeout: 0.1 })(async (event) => { + attempts_started += 1 + if (!emitted) { + emitted = true + child_ref = event.bus?.emit(ChildEvent({ event_timeout: 2 })) ?? null + await delay(10) + } + await delay(200) + return 'parent_attempt_done' + }) + + const handler_entry = bus.on(ParentEvent, handler, { handler_timeout: 0.35 }) + + const parent = bus.emit(ParentEvent({ event_timeout: 2 })) + await parent.done() + await bus.waitUntilIdle() + + const parent_result = parent.event_results.get(handler_entry.id) + assert.ok(parent_result) + assert.equal(parent_result.status, 'error') + assert.ok(parent_result.error instanceof EventHandlerTimeoutError) + assert.ok(attempts_started >= 2) + assert.ok(attempts_started < 10) + + assert.ok(child_ref) + assert.ok(child_started > 0) + const cancelled_results = Array.from(child_ref!.event_results.values()).filter( + (result) => result.error instanceof EventHandlerCancelledError || result.error instanceof EventHandlerAbortedError + ) + assert.ok(cancelled_results.length > 0) +}) + +test('event_timeout null falls back to bus default', async () => { + const bus = new EventBus('TimeoutDefaultBus', { event_timeout: 0.01 }) + + bus.on(TimeoutEvent, async (_event: BaseEvent) => { + await delay(50) + return 'slow' + }) + + const event = bus.emit(TimeoutEvent({ event_timeout: null })) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'error') + assert.ok(result.error instanceof EventHandlerAbortedError) +}) + +test('bus default null disables timeouts when event_timeout is null', async () => { + const bus = new EventBus('TimeoutDisabledBus', { event_timeout: null }) + + bus.on(TimeoutEvent, async () => { + await delay(20) + return 'ok' + }) + + const event = bus.emit(TimeoutEvent({ event_timeout: null })) + await event.done() + + const result = Array.from(event.event_results.values())[0] + assert.equal(result.status, 'completed') + assert.equal(result.result, 'ok') +}) + +test('multi-level timeout cascade with mixed cancellations', async () => { + const TopEvent = BaseEvent.extend('TimeoutCascadeTop', {}) + const QueuedChildEvent = BaseEvent.extend('TimeoutCascadeQueuedChild', {}) + const AwaitedChildEvent = BaseEvent.extend('TimeoutCascadeAwaitedChild', {}) + const ImmediateGrandchildEvent = BaseEvent.extend('TimeoutCascadeImmediateGrandchild', {}) + const QueuedGrandchildEvent = BaseEvent.extend('TimeoutCascadeQueuedGrandchild', {}) + + const bus = new EventBus('TimeoutCascadeBus', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'serial', + }) + + let queued_child = null as InstanceType | null + let awaited_child = null as InstanceType | null + let immediate_grandchild = null as InstanceType | null + let queued_grandchild = null as InstanceType | null + + let queued_child_runs = 0 + let immediate_grandchild_runs = 0 + let queued_grandchild_runs = 0 + + const queued_child_fast = async () => { + queued_child_runs += 1 + await delay(5) + return 'queued_fast' + } + + const queued_child_slow = async () => { + queued_child_runs += 1 + await delay(50) + return 'queued_slow' + } + + const awaited_child_fast = async () => { + await delay(5) + return 'awaited_fast' + } + + const awaited_child_slow = async (event: BaseEvent) => { + queued_grandchild = event.bus?.emit(QueuedGrandchildEvent({ event_timeout: 0.2 }))! + immediate_grandchild = event.bus?.emit(ImmediateGrandchildEvent({ event_timeout: 0.2 }))! + await immediate_grandchild.done() + await delay(100) + return 'awaited_slow' + } + + const immediate_grandchild_slow = async () => { + immediate_grandchild_runs += 1 + await delay(50) + return 'immediate_grandchild_slow' + } + + const immediate_grandchild_fast = async () => { + immediate_grandchild_runs += 1 + await delay(10) + return 'immediate_grandchild_fast' + } + + const queued_grandchild_slow = async () => { + queued_grandchild_runs += 1 + await delay(50) + return 'queued_grandchild_slow' + } + + const queued_grandchild_fast = async () => { + queued_grandchild_runs += 1 + await delay(10) + return 'queued_grandchild_fast' + } + + bus.on(QueuedChildEvent, queued_child_fast) + bus.on(QueuedChildEvent, queued_child_slow) + bus.on(AwaitedChildEvent, awaited_child_fast) + bus.on(AwaitedChildEvent, awaited_child_slow) + bus.on(ImmediateGrandchildEvent, immediate_grandchild_slow) + bus.on(ImmediateGrandchildEvent, immediate_grandchild_fast) + bus.on(QueuedGrandchildEvent, queued_grandchild_slow) + bus.on(QueuedGrandchildEvent, queued_grandchild_fast) + + bus.on(TopEvent, async (event) => { + queued_child = event.bus?.emit(QueuedChildEvent({ event_timeout: 0.2 }))! + awaited_child = event.bus?.emit(AwaitedChildEvent({ event_timeout: 0.03 }))! + await awaited_child.done() + await delay(80) + }) + + const top = bus.emit(TopEvent({ event_timeout: 0.04 })) + await top.done() + await bus.waitUntilIdle() + + const top_result = Array.from(top.event_results.values())[0] + assert.equal(top_result.status, 'error') + assert.ok(top_result.error instanceof EventHandlerAbortedError) + + assert.ok(queued_child) + const queued_results = Array.from(queued_child!.event_results.values()) + assert.equal(queued_child_runs, 0) + assert.ok(queued_results.length >= 2) + for (const result of queued_results) { + assert.equal(result.status, 'error') + assert.ok(result.error instanceof EventHandlerCancelledError) + assert.ok((result.error as EventHandlerCancelledError).cause instanceof EventHandlerTimeoutError) + } + + assert.ok(awaited_child) + const awaited_results = Array.from(awaited_child!.event_results.values()) + const awaited_completed = awaited_results.filter((result) => result.status === 'completed') + const awaited_aborted = awaited_results.filter((result) => result.error instanceof EventHandlerAbortedError) + assert.equal(awaited_completed.length, 1) + assert.equal(awaited_aborted.length, 1) + + assert.ok(immediate_grandchild) + const immediate_results = Array.from(immediate_grandchild!.event_results.values()) + // With serial handler concurrency (no longer bypassed during queue-jump), + // only the first grandchild handler starts before the awaited child's 30ms timeout fires. + // The second handler is still pending (waiting for lock) β†’ cancelled. + // The first handler was already started β†’ aborted (EventHandlerAbortedError). + assert.equal(immediate_grandchild_runs, 1) + const immediate_aborted = immediate_results.filter((result) => result.error instanceof EventHandlerAbortedError) + assert.equal(immediate_aborted.length, 1) + const immediate_cancelled = immediate_results.filter((result) => result.error instanceof EventHandlerCancelledError) + assert.equal(immediate_cancelled.length, 1) + + assert.ok(queued_grandchild) + const queued_grandchild_results = Array.from(queued_grandchild!.event_results.values()) + assert.equal(queued_grandchild_runs, 0) + const queued_cancelled = queued_grandchild_results.filter((result) => result.error instanceof EventHandlerCancelledError) + assert.ok(queued_cancelled.length >= 2) +}) + +// ============================================================================= +// Three-level timeout cascade (mirrors Python test_eventbus_timeout.py) +// +// This test creates a deep event hierarchy: +// TopEvent (250ms timeout) +// β”œβ”€β”€ ChildEvent (80ms timeout) β€” awaited by top_handler_main +// β”‚ β”œβ”€β”€ GrandchildEvent (35ms timeout) β€” awaited by child_handler +// β”‚ β”‚ └── 5 handlers (parallel): 3 slow (timeout), 2 fast (complete) +// β”‚ └── QueuedGrandchildEvent β€” emitted but NOT awaited, stays in queue +// β”‚ └── 1 handler: never runs, CANCELLED when child_handler times out +// └── SiblingEvent β€” emitted but NOT awaited, stays in queue +// └── 1 handler: never runs, CANCELLED when top_handler_main times out +// +// KEY MECHANIC: When a child event is awaited via event.done() inside a handler, +// it triggers "queue-jumping" via _processEventImmediately (cross-bus). +// Queue-jumped events use yield-and-reacquire: the parent handler's lock is +// temporarily released so child handlers can acquire it normally. This means +// child handlers run SERIALLY on a serial handler bus (respecting concurrency limits). +// Non-awaited child events stay in the pending_event_queue and are blocked by +// immediate_processing_stack_depth > 0 (runloop is paused during queue-jump). +// +// TIMEOUT BEHAVIOR: Each handler gets its OWN timeout window starting from when +// that handler begins execution β€” NOT from when the event was dispatched. +// With serial handlers, each timeout starts when the handler acquires the lock. +// +// CANCELLATION CASCADE: When a handler times out, bus._cancelPendingChildProcessing() +// walks the event's children tree and marks any "pending" handler results as +// EventHandlerCancelledError. Only "pending" results are cancelled β€” handlers +// that already started ("started" status) continue running in the background. +// ============================================================================= + +test('three-level timeout cascade with per-level timeouts and cascading cancellation', async () => { + const TopEvent = BaseEvent.extend('Cascade3LTop', {}) + const ChildEvent = BaseEvent.extend('Cascade3LChild', {}) + const GrandchildEvent = BaseEvent.extend('Cascade3LGrandchild', {}) + const QueuedGrandchildEvent = BaseEvent.extend('Cascade3LQueuedGC', {}) + const SiblingEvent = BaseEvent.extend('Cascade3LSibling', {}) + + const bus = new EventBus('Cascade3LevelBus', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'serial', + }) + + const execution_log: string[] = [] + let child_ref = null as InstanceType | null + let grandchild_ref = null as InstanceType | null + let queued_grandchild_ref = null as InstanceType | null + let sibling_ref = null as InstanceType | null + + // ── GrandchildEvent handlers ────────────────────────────────────────── + // These run SERIALLY because queue-jumped events respect the serial + // handler lock (yield-and-reacquire). Each handler gets its own 35ms + // timeout window starting from when that handler acquires the lock. + // + // Serial order: a(35ms timeout) β†’ b(sync) β†’ c(35ms timeout) β†’ d(10ms) β†’ e(35ms timeout) + // Total time for all 5: ~35+0+35+10+35 = ~115ms (within child's 150ms timeout) + + const gc_handler_a = async () => { + execution_log.push('gc_a_start') + await delay(500) // will be interrupted by 35ms timeout (500ms > total test time) + execution_log.push('gc_a_end') // should never reach here before assertions + return 'gc_a_done' + } + + const gc_handler_b = () => { + execution_log.push('gc_b_complete') + return 'gc_b_done' + } + + const gc_handler_c = async () => { + execution_log.push('gc_c_start') + await delay(500) // will be interrupted by 35ms timeout (500ms > total test time) + execution_log.push('gc_c_end') // should never reach here before assertions + return 'gc_c_done' + } + + const gc_handler_d = async () => { + execution_log.push('gc_d_start') + await delay(10) // fast enough to complete within 35ms + execution_log.push('gc_d_complete') + return 'gc_d_done' + } + + const gc_handler_e = async () => { + execution_log.push('gc_e_start') + await delay(500) // will be interrupted by 35ms timeout (500ms > total test time) + execution_log.push('gc_e_end') // should never reach here before assertions + return 'gc_e_done' + } + + // ── QueuedGrandchildEvent handler ───────────────────────────────────── + // This event is emitted by child_handler but NOT awaited, so it sits in + // pending_event_queue. When child_handler times out at 80ms, + // bus._cancelPendingChildProcessing walks ChildEvent.event_children and finds + // this event still pending β†’ its handler results are marked as cancelled. + const queued_gc_handler = () => { + execution_log.push('queued_gc_start') // should never reach here + return 'queued_gc_done' + } + + // ── ChildEvent handler ──────────────────────────────────────────────── + // Emits GrandchildEvent (awaited β†’ queue-jump, ~35ms to complete) + // Emits QueuedGrandchildEvent (NOT awaited β†’ stays in queue) + // After grandchild completes, sleeps 300ms β†’ times out at 80ms total + const child_handler = async (event: InstanceType) => { + execution_log.push('child_start') + grandchild_ref = event.bus?.emit(GrandchildEvent({ event_timeout: 0.035 }))! + queued_grandchild_ref = event.bus?.emit(QueuedGrandchildEvent({ event_timeout: 0.5 }))! + // Queue-jump: processes GrandchildEvent immediately via yield-and-reacquire. + // All 5 GC handlers run serially. Completes in ~115ms (within 150ms child timeout). + await grandchild_ref.done() + execution_log.push('child_after_grandchild') + await delay(300) // will be interrupted: child started at ~t=0, timeout at 150ms + execution_log.push('child_end') // should never reach here + return 'child_done' + } + + // ── SiblingEvent handler ────────────────────────────────────────────── + // This event is emitted by top_handler_main but NOT awaited. Stays in + // pending_event_queue until top_handler_main times out at 250ms β†’ + // cancelled by bus._cancelPendingChildProcessing. + const sibling_handler = () => { + execution_log.push('sibling_start') // should never reach here + return 'sibling_done' + } + + // ── TopEvent handlers ───────────────────────────────────────────────── + // These run SERIALLY (per-event handler lock) because TopEvent is + // processed by the normal runloop (not queue-jumped). top_handler_fast + // goes first, completes quickly, then top_handler_main starts. + + const top_handler_fast = async () => { + execution_log.push('top_fast_start') + await delay(2) + execution_log.push('top_fast_complete') + return 'top_fast_done' + } + + const top_handler_main = async (event: InstanceType) => { + execution_log.push('top_main_start') + child_ref = event.bus?.emit(ChildEvent({ event_timeout: 0.15 }))! + sibling_ref = event.bus?.emit(SiblingEvent({ event_timeout: 0.5 }))! + // Queue-jump: processes ChildEvent immediately (which in turn queue-jumps + // GrandchildEvent). This entire subtree resolves in ~80ms (child timeout). + await child_ref.done() + execution_log.push('top_main_after_child') + await delay(300) // will be interrupted: top_handler_main started at ~t=2, timeout at 250ms + execution_log.push('top_main_end') // should never reach here + return 'top_main_done' + } + + // Register handlers (registration order = execution order for serial) + bus.on(TopEvent, top_handler_fast) + bus.on(TopEvent, top_handler_main) + bus.on(ChildEvent, child_handler) + bus.on(GrandchildEvent, gc_handler_a) + bus.on(GrandchildEvent, gc_handler_b) + bus.on(GrandchildEvent, gc_handler_c) + bus.on(GrandchildEvent, gc_handler_d) + bus.on(GrandchildEvent, gc_handler_e) + bus.on(QueuedGrandchildEvent, queued_gc_handler) + bus.on(SiblingEvent, sibling_handler) + + // ── Dispatch and wait ───────────────────────────────────────────────── + const top = bus.emit(TopEvent({ event_timeout: 0.25 })) + await top.done() + await bus.waitUntilIdle() + + // ═══════════════════════════════════════════════════════════════════════ + // ASSERTIONS + // ═══════════════════════════════════════════════════════════════════════ + + // ── TopEvent: 2 handler results (1 completed, 1 aborted by event timeout) ────────── + assert.equal(top.event_status, 'completed') + assert.ok(top.event_errors.length >= 1, 'TopEvent should have at least 1 error') + + const top_results = Array.from(top.event_results.values()) + assert.equal(top_results.length, 2, 'TopEvent should have 2 handler results') + + const top_fast_result = top_results.find((r) => r.handler_name === 'top_handler_fast') + assert.ok(top_fast_result, 'top_handler_fast result should exist') + assert.equal(top_fast_result!.status, 'completed') + assert.equal(top_fast_result!.result, 'top_fast_done') + + const top_main_result = top_results.find((r) => r.handler_name === 'top_handler_main') + assert.ok(top_main_result, 'top_handler_main result should exist') + assert.equal(top_main_result!.status, 'error') + assert.ok(top_main_result!.error instanceof EventHandlerAbortedError, 'top_handler_main should have been aborted by event timeout') + + // ── ChildEvent: 1 handler result (aborted by event timeout at ~150ms) ──────────────── + assert.ok(child_ref, 'ChildEvent should have been emitted') + assert.equal(child_ref!.event_status, 'completed') + + const child_results = Array.from(child_ref!.event_results.values()) + assert.equal(child_results.length, 1, 'ChildEvent should have 1 handler result') + assert.equal(child_results[0].handler_name, 'child_handler') + assert.equal(child_results[0].status, 'error') + assert.ok(child_results[0].error instanceof EventHandlerAbortedError, 'child_handler should have been aborted by event timeout') + + // ── GrandchildEvent: 5 handler results (event hard-timeout after first handler starts) ── + assert.ok(grandchild_ref, 'GrandchildEvent should have been emitted') + assert.equal(grandchild_ref!.event_status, 'completed') + + const gc_results = Array.from(grandchild_ref!.event_results.values()) + assert.equal(gc_results.length, 5, 'GrandchildEvent should have 5 handler results') + + const gc_aborted = gc_results.filter((result) => result.error instanceof EventHandlerAbortedError) + assert.equal(gc_aborted.length, 1, 'GrandchildEvent should have exactly one started handler aborted by event timeout') + const gc_aborted_handler = gc_aborted[0]!.handler_name + assert.equal(gc_aborted_handler, 'gc_handler_a') + + const gc_cancelled_or_aborted = gc_results.filter( + (result) => result.error instanceof EventHandlerCancelledError || result.error instanceof EventHandlerAbortedError + ) + assert.equal(gc_cancelled_or_aborted.length, 5, 'Grandchild handlers should all be cancelled or aborted by hard event timeout') + + // ── QueuedGrandchildEvent: CANCELLED by child_handler timeout ─────── + // This event was emitted but never awaited. It sat in pending_event_queue + // until child_handler timed out, which triggered bus._cancelPendingChildProcessing + // to walk ChildEvent.event_children and cancel all pending handlers. + assert.ok(queued_grandchild_ref, 'QueuedGrandchildEvent should have been emitted') + assert.equal(queued_grandchild_ref!.event_status, 'completed') + + const queued_gc_results = Array.from(queued_grandchild_ref!.event_results.values()) + assert.equal(queued_gc_results.length, 1, 'QueuedGC should have 1 handler result') + assert.equal(queued_gc_results[0].status, 'error') + assert.ok( + queued_gc_results[0].error instanceof EventHandlerCancelledError, + 'QueuedGC handler should be EventHandlerCancelledError (not timeout β€” it never ran)' + ) + // Verify the cancellation error chain: CancelledError.cause β†’ TimeoutError + assert.ok( + (queued_gc_results[0].error as EventHandlerCancelledError).cause instanceof EventHandlerTimeoutError, + "QueuedGC cancellation should reference the child_handler's timeout as cause" + ) + + // ── SiblingEvent: CANCELLED by top_handler_main timeout ───────────── + // Same pattern: emitted but never awaited, stays in queue, cancelled when + // top_handler_main times out and bus._cancelPendingChildProcessing runs. + assert.ok(sibling_ref, 'SiblingEvent should have been emitted') + assert.equal(sibling_ref!.event_status, 'completed') + + const sibling_results = Array.from(sibling_ref!.event_results.values()) + assert.equal(sibling_results.length, 1, 'SiblingEvent should have 1 handler result') + assert.equal(sibling_results[0].status, 'error') + assert.ok(sibling_results[0].error instanceof EventHandlerCancelledError, 'SiblingEvent handler should be EventHandlerCancelledError') + assert.ok( + (sibling_results[0].error as EventHandlerCancelledError).cause instanceof EventHandlerTimeoutError, + "SiblingEvent cancellation should reference top_handler_main's timeout as cause" + ) + + // ── Execution log: verify what ran and what didn't ────────────────── + // These handlers started AND completed: + assert.ok(execution_log.includes('top_fast_start'), 'top_fast should have started') + assert.ok(execution_log.includes('top_fast_complete'), 'top_fast should have completed') + assert.ok(!execution_log.includes('gc_b_complete'), 'gc_b should not have started before hard event timeout') + assert.ok(!execution_log.includes('gc_d_start'), 'gc_d should not have started before hard event timeout') + assert.ok(!execution_log.includes('gc_d_complete'), 'gc_d should not have completed before hard event timeout') + + // These handlers started but were interrupted by their own timeout: + assert.ok(execution_log.includes('gc_a_start'), 'gc_a should have started') + assert.ok(!execution_log.includes('gc_a_end'), 'gc_a should NOT have finished (timed out)') + assert.ok(!execution_log.includes('gc_c_start'), 'gc_c should not have started before hard event timeout') + assert.ok(!execution_log.includes('gc_c_end'), 'gc_c should never have finished') + assert.ok(!execution_log.includes('gc_e_start'), 'gc_e should not have started before hard event timeout') + assert.ok(!execution_log.includes('gc_e_end'), 'gc_e should never have finished') + + // These handlers started and progressed, then parent timeout interrupted: + assert.ok(execution_log.includes('top_main_start'), 'top_main should have started') + assert.ok(execution_log.includes('child_start'), 'child should have started') + assert.ok(execution_log.includes('child_after_grandchild'), 'child should have continued after grandchild completed') + assert.ok(execution_log.includes('top_main_after_child'), 'top_main should have continued after child completed') + assert.ok(!execution_log.includes('child_end'), 'child should NOT have finished (timed out)') + assert.ok(!execution_log.includes('top_main_end'), 'top_main should NOT have finished (timed out)') + + // These handlers never ran at all (cancelled before starting): + assert.ok(!execution_log.includes('queued_gc_start'), 'queued_gc should never have started') + assert.ok(!execution_log.includes('sibling_start'), 'sibling should never have started') + + // ── Parent-child tree structure ───────────────────────────────────── + assert.ok( + top.event_children.some((c) => c.event_id === child_ref!.event_id), + 'ChildEvent should be in TopEvent.event_children' + ) + assert.ok( + top.event_children.some((c) => c.event_id === sibling_ref!.event_id), + 'SiblingEvent should be in TopEvent.event_children' + ) + assert.ok( + child_ref!.event_children.some((c) => c.event_id === grandchild_ref!.event_id), + 'GrandchildEvent should be in ChildEvent.event_children' + ) + assert.ok( + child_ref!.event_children.some((c) => c.event_id === queued_grandchild_ref!.event_id), + 'QueuedGrandchildEvent should be in ChildEvent.event_children' + ) + + // ── Timing invariants ────────────────────────────────────────────── + // All events should have completion timestamps + for (const evt of [top, child_ref!, grandchild_ref!, queued_grandchild_ref!, sibling_ref!]) { + assert.ok(evt.event_completed_at, `${evt.event_type} should have event_completed_at`) + } + // All handler results should have started_at and completed_at + for (const result of top_results) { + assert.ok(result.started_at, `${result.handler_name} should have started_at`) + assert.ok(result.completed_at, `${result.handler_name} should have completed_at`) + } + for (const result of gc_results) { + if (!(result.error instanceof EventHandlerCancelledError)) { + assert.ok(result.started_at, `${result.handler_name} should have started_at`) + } + assert.ok(result.completed_at, `${result.handler_name} should have completed_at`) + } +}) + +// ============================================================================= +// Verify the timeoutβ†’cancellation error chain is intact at every level. +// When a parent handler times out and cancels a child's pending handlers, +// the EventHandlerCancelledError.cause must reference the specific +// EventHandlerTimeoutError that caused the cascade. This test creates a +// 2-level chain where each level's cancellation error can be inspected. +// ============================================================================= + +test('cancellation error chain preserves cause references through hierarchy', async () => { + const OuterEvent = BaseEvent.extend('ErrorChainOuter', {}) + const InnerEvent = BaseEvent.extend('ErrorChainInner', {}) + const DeepEvent = BaseEvent.extend('ErrorChainDeep', {}) + + const bus = new EventBus('ErrorChainBus', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'serial', + }) + + let inner_ref = null as InstanceType | null + let deep_ref = null as InstanceType | null + + // DeepEvent handler: sleeps long, will be still pending when inner times out + // Because DeepEvent is emitted but NOT awaited, it stays in the queue. + const deep_handler = async () => { + await delay(200) + return 'deep_done' + } + + // InnerEvent handler: emits DeepEvent (not awaited), then sleeps long β†’ times out + const inner_handler = async (event: InstanceType) => { + deep_ref = event.bus?.emit(DeepEvent({ event_timeout: 0.5 }))! + await delay(200) // interrupted by inner timeout + return 'inner_done' + } + + // OuterEvent handler: emits InnerEvent (awaited), then sleeps long β†’ times out + const outer_handler = async (event: InstanceType) => { + inner_ref = event.bus?.emit(InnerEvent({ event_timeout: 0.04 }))! + await inner_ref.done() + await delay(200) // interrupted by outer timeout + return 'outer_done' + } + + bus.on(OuterEvent, outer_handler) + bus.on(InnerEvent, inner_handler) + bus.on(DeepEvent, deep_handler) + + const outer = bus.emit(OuterEvent({ event_timeout: 0.15 })) + await outer.done() + await bus.waitUntilIdle() + + // Outer handler was aborted by event-level timeout + const outer_result = Array.from(outer.event_results.values())[0] + assert.equal(outer_result.status, 'error') + assert.ok(outer_result.error instanceof EventHandlerAbortedError) + // Inner handler was aborted by its own event-level timeout (40ms), not outer's. + assert.ok(inner_ref) + const inner_result = Array.from(inner_ref!.event_results.values())[0] + assert.equal(inner_result.status, 'error') + assert.ok(inner_result.error instanceof EventHandlerAbortedError) + const inner_abort = inner_result.error as EventHandlerAbortedError + + // Inner's abort is from InnerEvent's own event_timeout (40ms), not inherited from outer. + assert.ok(inner_abort.message.includes('event timeout'), 'Inner abort should indicate event timeout') + + // DeepEvent was cancelled when inner_handler timed out. + // The cancellation error should reference inner_handler's timeout (not outer's). + assert.ok(deep_ref) + const deep_result = Array.from(deep_ref!.event_results.values())[0] + assert.equal(deep_result.status, 'error') + assert.ok( + deep_result.error instanceof EventHandlerCancelledError, + 'DeepEvent handler should be cancelled, not timed out (it never started)' + ) + const deep_cancel = deep_result.error as EventHandlerCancelledError + assert.ok(deep_cancel.cause instanceof EventHandlerTimeoutError, 'Cancellation should reference parent timeout') + // The cause should be the INNER handler's timeout, because that's + // the handler whose bus._cancelPendingChildProcessing actually cancelled DeepEvent. + assert.ok( + deep_cancel.cause.message.includes('inner_handler') || deep_cancel.cause.message.includes('child_handler'), + 'cause should reference the handler that directly caused cancellation' + ) +}) + +// ============================================================================= +// When a parent has a timeout but a child has event_timeout: null (no timeout), +// the child's handlers run indefinitely on their own β€” but if the PARENT times +// out, bus._cancelPendingChildProcessing still cancels any pending child handlers. +// This tests that cancellation works across timeout/no-timeout boundaries. +// ============================================================================= + +test('parent timeout cancels children that have no timeout of their own', async () => { + const ParentEvent = BaseEvent.extend('TimeoutBoundaryParent', {}) + const NoTimeoutChild = BaseEvent.extend('TimeoutBoundaryChild', {}) + + const bus = new EventBus('TimeoutBoundaryBus', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'serial', + event_timeout: null, // no bus-level default + }) + + let child_ref = null as InstanceType | null + let child_handler_ran = false + + // Child handler: would run forever but should be cancelled + const child_slow_handler = async () => { + child_handler_ran = true + await delay(500) + return 'child_done' + } + + // Parent handler: emits child (not awaited), then sleeps β†’ parent times out + const parent_handler = async (event: InstanceType) => { + // event_timeout: null means the child has no timeout of its own. + // It would run forever if the parent didn't cancel it. + child_ref = event.bus?.emit(NoTimeoutChild({ event_timeout: null }))! + await delay(200) + return 'parent_done' + } + + bus.on(ParentEvent, parent_handler) + bus.on(NoTimeoutChild, child_slow_handler) + + const parent = bus.emit(ParentEvent({ event_timeout: 0.03 })) + await parent.done() + await bus.waitUntilIdle() + + // Parent timed out + const parent_result = Array.from(parent.event_results.values())[0] + assert.equal(parent_result.status, 'error') + assert.ok(parent_result.error instanceof EventHandlerAbortedError) + + // Child should exist and be cancelled (it was in the queue, never started) + assert.ok(child_ref, 'Child event should have been emitted') + assert.equal(child_ref!.event_status, 'completed') + assert.equal(child_handler_ran, false, 'Child handler should never have started') + + const child_results = Array.from(child_ref!.event_results.values()) + assert.equal(child_results.length, 1) + assert.ok( + child_results[0].error instanceof EventHandlerCancelledError, + 'Child handler should be cancelled by parent timeout, even though it has no timeout' + ) +}) diff --git a/bubus-ts/tests/events_suck.test.ts b/bubus-ts/tests/events_suck.test.ts new file mode 100644 index 0000000..a5d4868 --- /dev/null +++ b/bubus-ts/tests/events_suck.test.ts @@ -0,0 +1,99 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' +import { z } from 'zod' + +import { BaseEvent, EventBus, events_suck } from '../src/index.js' + +test('events_suck.wrap builds imperative methods for emitting events', async () => { + const bus = new EventBus('EventsSuckBus') + const CreateEvent = BaseEvent.extend('EventsSuckCreateEvent', { + name: z.string(), + age: z.number(), + nickname: z.string().nullable().optional(), + event_result_type: z.string(), + }) + const UpdateEvent = BaseEvent.extend('EventsSuckUpdateEvent', { + id: z.string(), + age: z.number().nullable().optional(), + source: z.string().nullable().optional(), + event_result_type: z.boolean(), + }) + + bus.on(CreateEvent, async (event) => { + assert.equal(event.nickname, 'bobby') + return `user-${event.age}` + }) + + bus.on(UpdateEvent, async (event) => { + assert.equal(event.source, 'sync') + return event.age === 46 + }) + + const SDKClient = events_suck.wrap('SDKClient', { + create: CreateEvent, + update: UpdateEvent, + }) + const client = new SDKClient(bus) + + const user_id = await client.create({ name: 'bob', age: 45 }, { nickname: 'bobby' }) + const updated = await client.update({ id: user_id ?? 'fallback-id', age: 46 }, { source: 'sync' }) + + assert.equal(user_id, 'user-45') + assert.equal(updated, true) +}) + +test('events_suck.make_events works with inline handlers', async () => { + class LegacyService { + calls: Array<[string, Record]> = [] + + create(id: string | null, name: string, age: number): string { + this.calls.push(['create', { id, name, age }]) + return `${name}-${age}` + } + + update(id: string, name?: string | null, age?: number | null, extra?: Record): boolean { + this.calls.push(['update', { id, name, age, ...(extra ?? {}) }]) + return true + } + } + + const ping_user = (user_id: string): string => `pong:${user_id}` + const service = new LegacyService() + + const create_from_payload = (payload: { id: string | null; name: string; age: number }): string => { + return service.create(payload.id, payload.name, payload.age) + } + + const update_from_payload = (payload: { id: string; name?: string | null; age?: number | null } & Record): boolean => { + const { id, name, age, ...extra } = payload + return service.update(id, name, age, extra) + } + + const ping_from_payload = (payload: { user_id: string }): string => ping_user(payload.user_id) + + const events = events_suck.make_events({ + FooBarAPICreateEvent: create_from_payload, + FooBarAPIUpdateEvent: update_from_payload, + FooBarAPIPingEvent: ping_from_payload, + }) + + const bus = new EventBus('LegacyBus') + bus.on(events.FooBarAPICreateEvent, (event) => create_from_payload(event)) + bus.on(events.FooBarAPIUpdateEvent, (event) => update_from_payload(event)) + bus.on(events.FooBarAPIPingEvent, (event) => ping_from_payload(event)) + + const created = await bus.emit(events.FooBarAPICreateEvent({ id: null, name: 'bob', age: 45 })).first() + assert.ok(created !== undefined) + const updated = await bus.emit(events.FooBarAPIUpdateEvent({ id: created, age: 46, source: 'sync' })).first() + const user_id = 'e692b6cb-ae63-773b-8557-3218f7ce5ced' + const pong = await bus.emit(events.FooBarAPIPingEvent({ user_id })).first() + + assert.equal(created, 'bob-45') + assert.equal(updated, true) + assert.equal(pong, `pong:${user_id}`) + assert.deepEqual(service.calls[0], ['create', { id: null, name: 'bob', age: 45 }]) + assert.equal(service.calls[1]?.[0], 'update') + assert.equal(service.calls[1]?.[1].id, 'bob-45') + assert.equal(service.calls[1]?.[1].age, 46) + assert.equal(service.calls[1]?.[1].source, 'sync') +}) diff --git a/bubus-ts/tests/lock_manager.test.ts b/bubus-ts/tests/lock_manager.test.ts new file mode 100644 index 0000000..fe02e25 --- /dev/null +++ b/bubus-ts/tests/lock_manager.test.ts @@ -0,0 +1,202 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { AsyncLock, HandlerLock, LockManager, runWithLock } from '../src/lock_manager.js' + +const delay = (ms: number): Promise => new Promise((resolve) => setTimeout(resolve, ms)) + +test('AsyncLock(1): releasing to a queued waiter does not allow a new acquire to slip in', async () => { + const lock = new AsyncLock(1) + + await lock.acquire() // Initial holder. + + const waiter = lock.acquire() + assert.equal(lock.waiters.length, 1) + + // Transfer the permit to the waiter. + lock.release() + + // A new acquire in the same tick must wait behind the queued waiter. + let contender_acquired = false + const contender = lock.acquire().then(() => { + contender_acquired = true + }) + assert.equal(lock.waiters.length, 1) + + await waiter + await Promise.resolve() + assert.equal(contender_acquired, false) + lock.release() // waiter release + await contender + lock.release() // contender release + + assert.equal(lock.in_use, 0) +}) + +test('AsyncLock(Infinity): acquire/release is a no-op bypass', async () => { + const lock = new AsyncLock(Infinity) + await Promise.all([lock.acquire(), lock.acquire(), lock.acquire()]) + assert.equal(lock.in_use, 0) + assert.equal(lock.waiters.length, 0) + lock.release() + assert.equal(lock.in_use, 0) + assert.equal(lock.waiters.length, 0) +}) + +test('AsyncLock(size>1): enforces semaphore concurrency limit', async () => { + const lock = new AsyncLock(2) + let active = 0 + let max_active = 0 + + await Promise.all( + Array.from({ length: 6 }, async () => { + await lock.acquire() + active += 1 + max_active = Math.max(max_active, active) + await delay(5) + active -= 1 + lock.release() + }) + ) + + assert.equal(max_active, 2) + assert.equal(lock.in_use, 0) + assert.equal(lock.waiters.length, 0) +}) + +test('runWithLock(null): executes function directly and preserves errors', async () => { + let called = 0 + const value = await runWithLock(null, async () => { + called += 1 + return 'ok' + }) + assert.equal(value, 'ok') + assert.equal(called, 1) + + await assert.rejects( + runWithLock(null, async () => { + throw new Error('boom') + }), + /boom/ + ) +}) + +test('HandlerLock.reclaimHandlerLockIfRunning: releases reclaimed permit if handler exits while waiting', async () => { + const lock = new AsyncLock(1) + await lock.acquire() + const handler_lock = new HandlerLock(lock) + + assert.equal(handler_lock.yieldHandlerLockForChildRun(), true) + await lock.acquire() // Occupy lock so reclaim waits. + + const reclaim_promise = handler_lock.reclaimHandlerLockIfRunning() + await Promise.resolve() + assert.equal(lock.waiters.length, 1) + + handler_lock.exitHandlerRun() // Handler exits while reclaim is pending. + lock.release() // Let pending reclaim continue. + + const reclaimed = await reclaim_promise + assert.equal(reclaimed, false) + assert.equal(lock.in_use, 0) + assert.equal(lock.waiters.length, 0) +}) + +test('HandlerLock.runQueueJump: yields permit during child run and reacquires before returning', async () => { + const lock = new AsyncLock(1) + await lock.acquire() + const handler_lock = new HandlerLock(lock) + + let contender_acquired = false + let release_contender: (() => void) | null = null + const contender_can_release = new Promise((resolve) => { + release_contender = resolve + }) + const contender = (async () => { + await lock.acquire() + contender_acquired = true + await contender_can_release + lock.release() + })() + await Promise.resolve() + assert.equal(lock.waiters.length, 1) + + const result = await handler_lock.runQueueJump(async () => { + while (!contender_acquired) { + await Promise.resolve() + } + release_contender?.() + return 'child-ok' + }) + + assert.equal(result, 'child-ok') + assert.equal(lock.in_use, 1, 'parent handler lock should be reacquired on return') + handler_lock.exitHandlerRun() + await contender + assert.equal(lock.in_use, 0) +}) + +test('LockManager pause is re-entrant and resumes waiters only at depth zero', async () => { + let idle = true + const bus = { + isIdleAndQueueEmpty: () => idle, + event_concurrency: 'bus-serial' as const, + _lock_for_event_global_serial: new AsyncLock(1), + } + const locks = new LockManager(bus) + + const release_a = locks._requestRunloopPause() + const release_b = locks._requestRunloopPause() + assert.equal(locks._isPaused(), true) + + let resumed = false + const resumed_promise = locks._waitUntilRunloopResumed().then(() => { + resumed = true + }) + await Promise.resolve() + assert.equal(resumed, false) + + release_a() + await Promise.resolve() + assert.equal(resumed, false) + assert.equal(locks._isPaused(), true) + + release_b() + await resumed_promise + assert.equal(resumed, true) + assert.equal(locks._isPaused(), false) + + release_a() + release_b() + idle = false +}) + +test('LockManager waitForIdle uses two-check stability and supports timeout', async () => { + let idle = false + const bus = { + isIdleAndQueueEmpty: () => idle, + event_concurrency: 'bus-serial' as const, + _lock_for_event_global_serial: new AsyncLock(1), + } + const timeout_locks = new LockManager(bus) + + // Busy bus should timeout. + const timeout_false = await timeout_locks.waitForIdle(0.01) + assert.equal(timeout_false, false) + + const locks = new LockManager(bus, { auto_schedule_idle_checks: false }) + + let resolved: boolean | null = null + const idle_promise = locks.waitForIdle(0.2).then((value) => { + resolved = value + }) + + idle = true + locks._notifyIdleListeners() // first stable-idle tick; should not resolve synchronously + assert.equal(resolved, null) + await delay(0) + assert.equal(resolved, null) + locks._notifyIdleListeners() // second stable-idle tick; now resolve + await idle_promise + assert.equal(resolved, true) +}) diff --git a/bubus-ts/tests/middleware.test.ts b/bubus-ts/tests/middleware.test.ts new file mode 100644 index 0000000..55b8d4d --- /dev/null +++ b/bubus-ts/tests/middleware.test.ts @@ -0,0 +1,574 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { + BaseEvent, + EventBus, + EventHandlerAbortedError, + EventHandlerCancelledError, + EventHandlerResultSchemaError, + EventHandlerTimeoutError, + type EventBusMiddleware, +} from '../src/index.js' + +const delay = (ms: number): Promise => + new Promise((resolve) => { + setTimeout(resolve, ms) + }) + +const flushHooks = async (ticks: number = 4): Promise => { + for (let i = 0; i < ticks; i += 1) { + await Promise.resolve() + } +} + +type HookRecord = { + middleware: string + hook: 'event' | 'result' | 'handler' + bus_id: string + status?: 'pending' | 'started' | 'completed' + handler_id?: string + registered?: boolean +} + +type HandlerChangeRecord = { + handler_id: string + event_pattern: string + registered: boolean + eventbus_id: string +} + +class RecordingMiddleware implements EventBusMiddleware { + name: string + records: HookRecord[] + sequence: string[] | null + + constructor(name: string, sequence: string[] | null = null) { + this.name = name + this.records = [] + this.sequence = sequence + } + + async onEventChange(eventbus: EventBus, _event: BaseEvent, status: 'pending' | 'started' | 'completed'): Promise { + this.records.push({ middleware: this.name, hook: 'event', status, bus_id: eventbus.id }) + this.sequence?.push(`${this.name}:event:${status}`) + } + + async onEventResultChange( + eventbus: EventBus, + _event: BaseEvent, + event_result: { handler_id: string }, + status: 'pending' | 'started' | 'completed' + ): Promise { + this.records.push({ middleware: this.name, hook: 'result', status, handler_id: event_result.handler_id, bus_id: eventbus.id }) + this.sequence?.push(`${this.name}:result:${status}`) + } + + async onBusHandlersChange(eventbus: EventBus, handler: { id: string }, registered: boolean): Promise { + this.records.push({ middleware: this.name, hook: 'handler', registered, handler_id: handler.id, bus_id: eventbus.id }) + this.sequence?.push(`${this.name}:handler:${registered ? 'registered' : 'unregistered'}`) + } +} + +test('middleware ctor+instance normalization and handler registration hooks', async () => { + class CtorMiddleware extends RecordingMiddleware { + static created = 0 + + constructor() { + super('ctor') + CtorMiddleware.created += 1 + } + } + + const instance = new RecordingMiddleware('instance') + const bus = new EventBus('MiddlewareCtorBus', { + middlewares: [CtorMiddleware, instance], + }) + const Event = BaseEvent.extend('MiddlewareCtorEvent', {}) + const handler = bus.on(Event, () => 'ok') + bus.off(Event, handler) + + await flushHooks() + + assert.equal(CtorMiddleware.created, 1) + assert.equal( + instance.records.some((record) => record.hook === 'handler' && record.registered === true), + true + ) + assert.equal( + instance.records.some((record) => record.hook === 'handler' && record.registered === false), + true + ) + + bus.destroy() +}) + +test('middleware hooks execute sequentially in registration order', async () => { + const sequence: string[] = [] + class FirstMiddleware extends RecordingMiddleware { + constructor() { + super('first', sequence) + } + } + class SecondMiddleware extends RecordingMiddleware { + constructor() { + super('second', sequence) + } + } + + const bus = new EventBus('MiddlewareOrderBus', { middlewares: [FirstMiddleware, SecondMiddleware] }) + const Event = BaseEvent.extend('MiddlewareOrderEvent', {}) + + bus.on(Event, () => 'ok') + await bus.emit(Event({ event_timeout: 0.2 })).done() + await flushHooks() + + const pairs: Array<[string, string]> = [ + ['first:event:pending', 'second:event:pending'], + ['first:event:started', 'second:event:started'], + ['first:result:pending', 'second:result:pending'], + ['first:result:started', 'second:result:started'], + ['first:result:completed', 'second:result:completed'], + ['first:event:completed', 'second:event:completed'], + ] + for (const [first, second] of pairs) { + assert.ok(sequence.indexOf(first) >= 0, `missing sequence marker: ${first}`) + assert.ok(sequence.indexOf(second) >= 0, `missing sequence marker: ${second}`) + assert.ok(sequence.indexOf(first) < sequence.indexOf(second), `expected ${first} before ${second}`) + } + + bus.destroy() +}) + +test('middleware hooks are per-bus on forwarded events', async () => { + const middleware_a = new RecordingMiddleware('a') + const middleware_b = new RecordingMiddleware('b') + const bus_a = new EventBus('MiddlewareForwardA', { middlewares: [middleware_a] }) + const bus_b = new EventBus('MiddlewareForwardB', { middlewares: [middleware_b] }) + const Event = BaseEvent.extend('MiddlewareForwardEvent', {}) + + const handler_a = bus_a.on(Event, async (event) => { + bus_b.emit(event) + }) + const handler_b = bus_b.on(Event, async () => 'ok') + + await bus_a.emit(Event({ event_timeout: 0.2 })).done() + await flushHooks() + + assert.equal( + middleware_a.records.some((record) => record.hook === 'result' && record.handler_id === handler_a.id), + true + ) + assert.equal( + middleware_a.records.some((record) => record.hook === 'result' && record.handler_id === handler_b.id), + false + ) + assert.equal( + middleware_b.records.some((record) => record.hook === 'result' && record.handler_id === handler_b.id), + true + ) + + bus_a.destroy() + bus_b.destroy() +}) + +test('middleware emits event lifecycle hooks for no-handler events', async () => { + const middleware = new RecordingMiddleware('single') + const bus = new EventBus('MiddlewareNoHandlerBus', { middlewares: [middleware] }) + const Event = BaseEvent.extend('MiddlewareNoHandlerEvent', {}) + + await bus.emit(Event({ event_timeout: 0.2 })).done() + await flushHooks() + + const event_statuses = middleware.records + .filter((record) => record.hook === 'event') + .map((record) => record.status) + .filter((status): status is 'pending' | 'started' | 'completed' => status !== undefined) + assert.deepEqual(event_statuses, ['pending', 'started', 'completed']) + assert.equal( + middleware.records.some((record) => record.hook === 'result'), + false + ) + + bus.destroy() +}) + +test('middleware event lifecycle ordering is deterministic per event', async () => { + const event_statuses_by_id = new Map>() + + class LifecycleMiddleware extends RecordingMiddleware { + constructor() { + super('deterministic') + } + + async onEventChange(eventbus: EventBus, event: BaseEvent, status: 'pending' | 'started' | 'completed'): Promise { + const statuses = event_statuses_by_id.get(event.event_id) ?? [] + statuses.push(status) + event_statuses_by_id.set(event.event_id, statuses) + await super.onEventChange(eventbus, event, status) + } + } + + const bus = new EventBus('MiddlewareDeterministicBus', { middlewares: [LifecycleMiddleware], max_history_size: null }) + const Event = BaseEvent.extend('MiddlewareDeterministicEvent', {}) + + bus.on(Event, async () => { + await delay(0) + return 'ok' + }) + + const batch_count = 5 + const events_per_batch = 50 + for (let batch_index = 0; batch_index < batch_count; batch_index += 1) { + const events = Array.from({ length: events_per_batch }, (_unused, _event_index) => bus.emit(Event({ event_timeout: 0.2 }))) + await Promise.all(events.map((event) => event.done())) + await flushHooks() + + for (const event of events) { + assert.deepEqual(event_statuses_by_id.get(event.event_id), ['pending', 'started', 'completed']) + } + } + assert.equal(event_statuses_by_id.size, batch_count * events_per_batch) + + bus.destroy() +}) + +test('middleware result hooks never reverse from completed to started', async () => { + const middleware = new RecordingMiddleware('ordering') + const bus = new EventBus('MiddlewareOrderingBus', { middlewares: [middleware], event_handler_concurrency: 'parallel' }) + const Event = BaseEvent.extend('MiddlewareOrderingEvent', {}) + + bus.on(Event, async () => { + await delay(50) + return 'slow' + }) + bus.on(Event, async () => { + await delay(50) + return 'slow-2' + }) + + await bus.emit(Event({ event_timeout: 0.01 })).done() + await flushHooks() + + const statuses_by_handler = new Map() + for (const record of middleware.records.filter((record) => record.hook === 'result' && record.handler_id && record.status)) { + const statuses = statuses_by_handler.get(record.handler_id!) ?? [] + statuses.push(record.status!) + statuses_by_handler.set(record.handler_id!, statuses) + } + for (const statuses of statuses_by_handler.values()) { + const completed_index = statuses.indexOf('completed') + if (completed_index >= 0) { + assert.equal(statuses.slice(completed_index + 1).includes('started'), false) + } + } + + bus.destroy() +}) + +test('hard event timeout finalizes immediately without waiting for in-flight handlers', async () => { + const bus = new EventBus('MiddlewareHardTimeoutBus', { + event_handler_concurrency: 'parallel', + }) + const Event = BaseEvent.extend('MiddlewareHardTimeoutEvent', {}) + + bus.on(Event, async () => { + await delay(200) + return 'late-1' + }) + bus.on(Event, async () => { + await delay(200) + return 'late-2' + }) + + const started_at = Date.now() + const event = bus.emit(Event({ event_timeout: 0.01 })) + await event.done() + const elapsed_ms = Date.now() - started_at + + const initial_snapshot = Array.from(event.event_results.values()).map((result) => ({ + id: result.id, + status: result.status, + error_name: (result.error as { constructor?: { name?: string } } | undefined)?.constructor?.name ?? null, + })) + + assert.ok(elapsed_ms < 100, `event.done() took too long after timeout: ${elapsed_ms}ms`) + assert.equal( + initial_snapshot.every((result) => result.status === 'error'), + true + ) + + await delay(250) + const final_snapshot = Array.from(event.event_results.values()).map((result) => ({ + id: result.id, + status: result.status, + error_name: (result.error as { constructor?: { name?: string } } | undefined)?.constructor?.name ?? null, + })) + + assert.deepEqual(final_snapshot, initial_snapshot) + bus.destroy() +}) + +test('timeout/cancel/abort/result-schema taxonomy remains explicit', async () => { + const SchemaEvent = BaseEvent.extend('MiddlewareSchemaEvent', { + event_result_type: Number, + }) + const serial_bus = new EventBus('MiddlewareTaxonomySerialBus', { + event_handler_concurrency: 'serial', + }) + const parallel_bus = new EventBus('MiddlewareTaxonomyParallelBus', { + event_handler_concurrency: 'parallel', + }) + + serial_bus.on(SchemaEvent, async () => JSON.parse('"not-a-number"')) + const schema_event = serial_bus.emit(SchemaEvent({ event_timeout: 0.2 })) + await schema_event.done() + const schema_result = Array.from(schema_event.event_results.values())[0] + assert.ok(schema_result.error instanceof EventHandlerResultSchemaError) + + const SerialTimeoutEvent = BaseEvent.extend('MiddlewareSerialTimeoutEvent', {}) + serial_bus.on(SerialTimeoutEvent, async () => { + await delay(100) + return 'slow' + }) + serial_bus.on(SerialTimeoutEvent, async () => { + await delay(100) + return 'slow-2' + }) + const serial_timeout_event = serial_bus.emit(SerialTimeoutEvent({ event_timeout: 0.01 })) + await serial_timeout_event.done() + const serial_results = Array.from(serial_timeout_event.event_results.values()) + assert.equal( + serial_results.some((result) => result.error instanceof EventHandlerCancelledError), + true + ) + assert.equal( + serial_results.some((result) => result.error instanceof EventHandlerAbortedError || result.error instanceof EventHandlerTimeoutError), + true + ) + + const ParallelTimeoutEvent = BaseEvent.extend('MiddlewareParallelTimeoutEvent', {}) + parallel_bus.on(ParallelTimeoutEvent, async () => { + await delay(100) + return 'slow' + }) + parallel_bus.on(ParallelTimeoutEvent, async () => { + await delay(100) + return 'slow-2' + }) + const parallel_timeout_event = parallel_bus.emit(ParallelTimeoutEvent({ event_timeout: 0.01 })) + await parallel_timeout_event.done() + const parallel_results = Array.from(parallel_timeout_event.event_results.values()) + assert.equal( + parallel_results.some((result) => result.error instanceof EventHandlerAbortedError || result.error instanceof EventHandlerTimeoutError), + true + ) + assert.equal( + parallel_results.some((result) => result.error instanceof EventHandlerCancelledError), + false + ) + + serial_bus.destroy() + parallel_bus.destroy() +}) + +test('middleware hooks cover class/string/wildcard handler patterns', async () => { + const event_statuses_by_id = new Map() + const result_hook_statuses_by_handler = new Map() + const result_runtime_statuses_by_handler = new Map() + const handler_change_records: HandlerChangeRecord[] = [] + + class PatternRecordingMiddleware implements EventBusMiddleware { + async onEventChange(_eventbus: EventBus, event: BaseEvent, status: 'pending' | 'started' | 'completed'): Promise { + const statuses = event_statuses_by_id.get(event.event_id) ?? [] + statuses.push(status) + event_statuses_by_id.set(event.event_id, statuses) + } + + async onEventResultChange( + _eventbus: EventBus, + _event: BaseEvent, + event_result: { handler_id: string; status: string }, + status: 'pending' | 'started' | 'completed' + ): Promise { + const hook_statuses = result_hook_statuses_by_handler.get(event_result.handler_id) ?? [] + hook_statuses.push(status) + result_hook_statuses_by_handler.set(event_result.handler_id, hook_statuses) + + const runtime_statuses = result_runtime_statuses_by_handler.get(event_result.handler_id) ?? [] + runtime_statuses.push(event_result.status) + result_runtime_statuses_by_handler.set(event_result.handler_id, runtime_statuses) + } + + async onBusHandlersChange( + _eventbus: EventBus, + handler: { id: string; event_pattern: string; eventbus_id: string }, + registered: boolean + ): Promise { + handler_change_records.push({ + handler_id: handler.id, + event_pattern: handler.event_pattern, + registered, + eventbus_id: handler.eventbus_id, + }) + } + } + + const bus = new EventBus('MiddlewareHookPatternParityBus', { + middlewares: [new PatternRecordingMiddleware()], + }) + const PatternEvent = BaseEvent.extend('MiddlewarePatternEvent', {}) + + const class_entry = bus.on(PatternEvent, async () => 'class-result') + const string_entry = bus.on('MiddlewarePatternEvent', async () => 'string-result') + const wildcard_entry = bus.on('*', async (event) => `wildcard:${event.event_type}`) + + await flushHooks() + + const registered_records = handler_change_records.filter((record) => record.registered) + assert.equal(registered_records.length, 3) + + const expected_patterns = new Map([ + [class_entry.id, 'MiddlewarePatternEvent'], + [string_entry.id, 'MiddlewarePatternEvent'], + [wildcard_entry.id, '*'], + ]) + + assert.deepEqual(new Set(registered_records.map((record) => record.handler_id)), new Set(expected_patterns.keys())) + for (const record of registered_records) { + assert.equal(record.event_pattern, expected_patterns.get(record.handler_id)) + assert.equal(record.eventbus_id, bus.id) + } + + const event = bus.emit(PatternEvent({ event_timeout: 0.2 })) + await event.done() + await bus.waitUntilIdle() + await flushHooks() + + assert.equal(event.event_status, 'completed') + assert.deepEqual(event_statuses_by_id.get(event.event_id), ['pending', 'started', 'completed']) + assert.deepEqual(new Set(event.event_results.keys()), new Set(expected_patterns.keys())) + + for (const handler_id of expected_patterns.keys()) { + assert.deepEqual(result_hook_statuses_by_handler.get(handler_id), ['pending', 'started', 'completed']) + assert.deepEqual(result_runtime_statuses_by_handler.get(handler_id), ['pending', 'started', 'completed']) + } + + assert.equal(event.event_results.get(class_entry.id)?.result, 'class-result') + assert.equal(event.event_results.get(string_entry.id)?.result, 'string-result') + assert.equal(event.event_results.get(wildcard_entry.id)?.result, 'wildcard:MiddlewarePatternEvent') + + bus.off(PatternEvent, class_entry) + bus.off('MiddlewarePatternEvent', string_entry) + bus.off('*', wildcard_entry) + await flushHooks() + + const unregistered_records = handler_change_records.filter((record) => !record.registered) + assert.equal(unregistered_records.length, 3) + assert.deepEqual(new Set(unregistered_records.map((record) => record.handler_id)), new Set(expected_patterns.keys())) + for (const record of unregistered_records) { + assert.equal(record.event_pattern, expected_patterns.get(record.handler_id)) + } + + bus.destroy() +}) + +test('middleware hooks cover ad-hoc BaseEvent string + wildcard patterns', async () => { + const event_statuses_by_id = new Map() + const result_hook_statuses_by_handler = new Map() + const result_runtime_statuses_by_handler = new Map() + const handler_change_records: HandlerChangeRecord[] = [] + + class PatternRecordingMiddleware implements EventBusMiddleware { + async onEventChange(_eventbus: EventBus, event: BaseEvent, status: 'pending' | 'started' | 'completed'): Promise { + const statuses = event_statuses_by_id.get(event.event_id) ?? [] + statuses.push(status) + event_statuses_by_id.set(event.event_id, statuses) + } + + async onEventResultChange( + _eventbus: EventBus, + _event: BaseEvent, + event_result: { handler_id: string; status: string }, + status: 'pending' | 'started' | 'completed' + ): Promise { + const hook_statuses = result_hook_statuses_by_handler.get(event_result.handler_id) ?? [] + hook_statuses.push(status) + result_hook_statuses_by_handler.set(event_result.handler_id, hook_statuses) + + const runtime_statuses = result_runtime_statuses_by_handler.get(event_result.handler_id) ?? [] + runtime_statuses.push(event_result.status) + result_runtime_statuses_by_handler.set(event_result.handler_id, runtime_statuses) + } + + async onBusHandlersChange( + _eventbus: EventBus, + handler: { id: string; event_pattern: string; eventbus_id: string }, + registered: boolean + ): Promise { + handler_change_records.push({ + handler_id: handler.id, + event_pattern: handler.event_pattern, + registered, + eventbus_id: handler.eventbus_id, + }) + } + } + + const bus = new EventBus('MiddlewareHookStringPatternParityBus', { + middlewares: [new PatternRecordingMiddleware()], + }) + + const ad_hoc_event_type = 'AdHocPatternEvent' + const string_entry = bus.on(ad_hoc_event_type, async (event) => { + assert.equal(event.event_type, ad_hoc_event_type) + return `string:${event.event_type}` + }) + const wildcard_entry = bus.on('*', async (event) => `wildcard:${event.event_type}`) + + await flushHooks() + + const registered_records = handler_change_records.filter((record) => record.registered) + assert.equal(registered_records.length, 2) + + const expected_patterns = new Map([ + [string_entry.id, ad_hoc_event_type], + [wildcard_entry.id, '*'], + ]) + + assert.deepEqual(new Set(registered_records.map((record) => record.handler_id)), new Set(expected_patterns.keys())) + for (const record of registered_records) { + assert.equal(record.event_pattern, expected_patterns.get(record.handler_id)) + assert.equal(record.eventbus_id, bus.id) + } + + const event = bus.emit(new BaseEvent({ event_type: ad_hoc_event_type, event_timeout: 0.2 })) + await event.done() + await bus.waitUntilIdle() + await flushHooks() + + assert.equal(event.event_status, 'completed') + assert.deepEqual(event_statuses_by_id.get(event.event_id), ['pending', 'started', 'completed']) + assert.deepEqual(new Set(event.event_results.keys()), new Set(expected_patterns.keys())) + + for (const handler_id of expected_patterns.keys()) { + assert.deepEqual(result_hook_statuses_by_handler.get(handler_id), ['pending', 'started', 'completed']) + assert.deepEqual(result_runtime_statuses_by_handler.get(handler_id), ['pending', 'started', 'completed']) + } + + assert.equal(event.event_results.get(string_entry.id)?.result, `string:${ad_hoc_event_type}`) + assert.equal(event.event_results.get(wildcard_entry.id)?.result, `wildcard:${ad_hoc_event_type}`) + + bus.off(ad_hoc_event_type, string_entry) + bus.off('*', wildcard_entry) + await flushHooks() + + const unregistered_records = handler_change_records.filter((record) => !record.registered) + assert.equal(unregistered_records.length, 2) + assert.deepEqual(new Set(unregistered_records.map((record) => record.handler_id)), new Set(expected_patterns.keys())) + for (const record of unregistered_records) { + assert.equal(record.event_pattern, expected_patterns.get(record.handler_id)) + } + + bus.destroy() +}) diff --git a/bubus-ts/tests/optional_dependencies.test.ts b/bubus-ts/tests/optional_dependencies.test.ts new file mode 100644 index 0000000..ecf17fd --- /dev/null +++ b/bubus-ts/tests/optional_dependencies.test.ts @@ -0,0 +1,78 @@ +import assert from 'node:assert/strict' +import { readFileSync } from 'node:fs' +import { dirname, join } from 'node:path' +import { fileURLToPath } from 'node:url' +import { test } from 'node:test' + +const tests_dir = dirname(fileURLToPath(import.meta.url)) +const ts_root = join(tests_dir, '..') +const package_json_path = join(ts_root, 'package.json') +const src_dir = join(ts_root, 'src') + +type PackageJSON = { + dependencies?: Record + optionalDependencies?: Record + devDependencies?: Record + peerDependencies?: Record +} + +const loadPackageJson = (): PackageJSON => JSON.parse(readFileSync(package_json_path, 'utf8')) as PackageJSON + +test('bridge dependencies are optional in package.json', () => { + const package_json = loadPackageJson() + const dependencies = package_json.dependencies ?? {} + const optional_dependencies = package_json.optionalDependencies ?? {} + + assert.equal(Object.hasOwn(dependencies, 'ioredis'), false) + assert.equal(Object.hasOwn(dependencies, 'nats'), false) + assert.equal(Object.hasOwn(dependencies, 'pg'), false) + + assert.equal(Object.hasOwn(optional_dependencies, 'ioredis'), true) + assert.equal(Object.hasOwn(optional_dependencies, 'nats'), true) + assert.equal(Object.hasOwn(optional_dependencies, 'pg'), true) +}) + +test('package.json does not depend on third-party sqlite packages', () => { + const package_json = loadPackageJson() + const sqlite_package_names = ['sqlite3', 'better-sqlite3', '@sqlite.org/sqlite-wasm'] + const dependency_sections = [ + package_json.dependencies ?? {}, + package_json.optionalDependencies ?? {}, + package_json.devDependencies ?? {}, + package_json.peerDependencies ?? {}, + ] + + for (const section of dependency_sections) { + for (const sqlite_package_name of sqlite_package_names) { + assert.equal(Object.hasOwn(section, sqlite_package_name), false, `unexpected sqlite package: ${sqlite_package_name}`) + } + } +}) + +test('bridge modules do not statically import optional bridge packages', () => { + const bridge_modules = [ + { + path: join(src_dir, 'bridge_redis.ts'), + forbidden_patterns: [/from\s+['"]ioredis['"]/, /import\s+['"]ioredis['"]/], + required_pattern: /importOptionalDependency\('RedisEventBridge', 'ioredis'\)/, + }, + { + path: join(src_dir, 'bridge_nats.ts'), + forbidden_patterns: [/from\s+['"]nats['"]/, /import\s+['"]nats['"]/], + required_pattern: /importOptionalDependency\('NATSEventBridge', 'nats'\)/, + }, + { + path: join(src_dir, 'bridge_postgres.ts'), + forbidden_patterns: [/from\s+['"]pg['"]/, /import\s+['"]pg['"]/], + required_pattern: /importOptionalDependency\('PostgresEventBridge', 'pg'\)/, + }, + ] + + for (const bridge_module of bridge_modules) { + const source = readFileSync(bridge_module.path, 'utf8') + for (const forbidden_pattern of bridge_module.forbidden_patterns) { + assert.equal(forbidden_pattern.test(source), false, `${bridge_module.path} has eager optional dependency import`) + } + assert.equal(bridge_module.required_pattern.test(source), true, `${bridge_module.path} must use lazy optional dependency import`) + } +}) diff --git a/bubus-ts/tests/performance.browser.spec.cjs b/bubus-ts/tests/performance.browser.spec.cjs new file mode 100644 index 0000000..a1d12ff --- /dev/null +++ b/bubus-ts/tests/performance.browser.spec.cjs @@ -0,0 +1,117 @@ +const fs = require('fs') +const http = require('http') +const path = require('path') +const { test, expect } = require('playwright/test') + +test.describe('browser runtime perf', () => { + test.setTimeout(120_000) + + test('runs shared perf scenarios in Chromium JS runtime', async ({ page, browserName }) => { + expect(browserName).toBe('chromium') + + const rootDir = path.resolve(__dirname, '..') + const server = http.createServer((req, res) => { + const requestUrl = new URL(req.url || '/', 'http://127.0.0.1') + const pathname = decodeURIComponent(requestUrl.pathname) + + if (pathname === '/' || pathname === '/index.html') { + res.statusCode = 200 + res.setHeader('content-type', 'text/html; charset=utf-8') + res.end(` + + + + + + browser perf harness +`) + return + } + + const absolutePath = path.resolve(rootDir, `.${pathname}`) + const relativePath = path.relative(rootDir, absolutePath) + if (relativePath.startsWith('..') || path.isAbsolute(relativePath)) { + res.statusCode = 403 + res.end('forbidden') + return + } + if (!fs.existsSync(absolutePath) || fs.statSync(absolutePath).isDirectory()) { + res.statusCode = 404 + res.end('not found') + return + } + + const ext = path.extname(absolutePath) + if (ext === '.js' || ext === '.mjs' || ext === '.cjs') { + res.setHeader('content-type', 'text/javascript; charset=utf-8') + } else if (ext === '.map' || ext === '.json') { + res.setHeader('content-type', 'application/json; charset=utf-8') + } else if (ext === '.html') { + res.setHeader('content-type', 'text/html; charset=utf-8') + } else { + res.setHeader('content-type', 'text/plain; charset=utf-8') + } + + res.statusCode = 200 + res.end(fs.readFileSync(absolutePath)) + }) + + await new Promise((resolve, reject) => { + server.once('error', reject) + server.listen(0, '127.0.0.1', () => { + server.removeListener('error', reject) + resolve() + }) + }) + + const address = server.address() + if (!address || typeof address === 'string') { + await new Promise((resolve) => server.close(() => resolve())) + throw new Error('failed to resolve browser perf server address') + } + const baseUrl = `http://127.0.0.1:${address.port}` + + let result + try { + await page.goto(`${baseUrl}/`) + result = await page.evaluate(async () => { + const [api, scenarios] = await Promise.all([import('/dist/esm/index.js'), import('/tests/performance.scenarios.js')]) + const logs = [] + + const results = await scenarios.runAllPerfScenarios({ + runtimeName: 'chromium-js', + api: { + BaseEvent: api.BaseEvent, + EventBus: api.EventBus, + EventHandlerTimeoutError: api.EventHandlerTimeoutError, + EventHandlerCancelledError: api.EventHandlerCancelledError, + }, + now: () => performance.now(), + sleep: (ms) => new Promise((resolve) => setTimeout(resolve, ms)), + log: (message) => logs.push(message), + limits: { + singleRunMs: 30_000, + worstCaseMs: 60_000, + }, + }) + + return { logs, results } + }) + } finally { + await new Promise((resolve) => server.close(() => resolve())) + } + + for (const line of result.logs) { + console.log(line) + } + + expect(result.results.length).toBe(6) + }) +}) diff --git a/bubus-ts/tests/performance.runtime.ts b/bubus-ts/tests/performance.runtime.ts new file mode 100644 index 0000000..128616a --- /dev/null +++ b/bubus-ts/tests/performance.runtime.ts @@ -0,0 +1,92 @@ +import { BaseEvent, EventBus, EventHandlerCancelledError, EventHandlerTimeoutError } from '../src/index.js' +import { PERF_SCENARIO_IDS, runAllPerfScenarios, runPerfScenarioById } from './performance.scenarios.js' + +declare const Bun: { gc?: (full?: boolean) => void } | undefined +declare const Deno: + | { + args?: string[] + memoryUsage?: () => { rss: number; heapUsed: number } + [key: symbol]: unknown + } + | undefined + +const runtime = typeof Bun !== 'undefined' && Bun ? 'bun' : typeof Deno !== 'undefined' && Deno ? 'deno' : 'node' + +const getCliArgs = () => { + const processArgs = typeof process !== 'undefined' && Array.isArray(process.argv) ? process.argv.slice(2) : [] + if (processArgs.length > 0) return processArgs + return typeof Deno !== 'undefined' && Deno && Array.isArray(Deno.args) ? (Deno.args ?? []) : [] +} + +const getScenarioArg = () => { + const args = getCliArgs() + for (let i = 0; i < args.length; i += 1) { + const arg = args[i] + if (!arg) continue + if (arg.startsWith('--scenario=')) { + const value = arg.slice('--scenario='.length).trim() + return value.length > 0 ? value : null + } + if (arg === '--scenario') { + const value = args[i + 1]?.trim() + return value && value.length > 0 ? value : null + } + } + return null +} + +const getMemoryUsage = () => { + if (typeof process !== 'undefined' && typeof process.memoryUsage === 'function') { + return process.memoryUsage() + } + if (typeof Deno !== 'undefined' && Deno && typeof Deno.memoryUsage === 'function') { + return Deno.memoryUsage() + } + return { heapUsed: 0, rss: 0 } +} + +const forceGc = () => { + if (runtime === 'bun' && typeof Bun !== 'undefined' && Bun && typeof Bun.gc === 'function') { + Bun.gc(true) + Bun.gc(true) + return + } + + const maybeGlobalGc = (globalThis as { gc?: () => void }).gc + if (typeof maybeGlobalGc === 'function') { + for (let i = 0; i < 4; i += 1) maybeGlobalGc() + } +} + +const main = async () => { + const scenario = getScenarioArg() + console.log(`[${runtime}] runtime perf harness starting`) + + const input = { + runtimeName: runtime, + api: { BaseEvent, EventBus, EventHandlerTimeoutError, EventHandlerCancelledError }, + now: () => performance.now(), + sleep: (ms: number) => new Promise((resolve) => setTimeout(resolve, ms)), + log: (message: string) => console.log(message), + getMemoryUsage, + forceGc, + limits: { + singleRunMs: 90_000, + worstCaseMs: 180_000, + maxHeapDeltaAfterGcMb: 0, + }, + } + + if (scenario) { + if (!PERF_SCENARIO_IDS.includes(scenario)) { + throw new Error(`Unknown --scenario value "${scenario}". Expected one of: ${PERF_SCENARIO_IDS.join(', ')}`) + } + await runPerfScenarioById(input, scenario) + } else { + await runAllPerfScenarios(input) + } + + console.log(`[${runtime}] runtime perf harness complete`) +} + +await main() diff --git a/bubus-ts/tests/performance.scenarios.d.ts b/bubus-ts/tests/performance.scenarios.d.ts new file mode 100644 index 0000000..80a2a45 --- /dev/null +++ b/bubus-ts/tests/performance.scenarios.d.ts @@ -0,0 +1,31 @@ +export type PerfScenarioResult = Record + +export type PerfScenarioInput = { + runtimeName: string + api: { + BaseEvent: unknown + EventBus: unknown + EventHandlerTimeoutError: unknown + EventHandlerCancelledError: unknown + } + now: () => number + sleep: (ms: number) => Promise + log: (message: string) => void + getMemoryUsage: () => { rss: number; heapUsed: number } + forceGc?: () => void + limits: { + singleRunMs: number + worstCaseMs: number + maxHeapDeltaAfterGcMb?: number + } +} + +export const PERF_SCENARIO_IDS: readonly string[] +export function runPerfScenarioById(input: PerfScenarioInput, scenarioId: string): Promise +export function runAllPerfScenarios(input: PerfScenarioInput): Promise +export function runPerf50kEvents(input: PerfScenarioInput): Promise +export function runPerfEphemeralBuses(input: PerfScenarioInput): Promise +export function runPerfSingleEventManyFixedHandlers(input: PerfScenarioInput): Promise +export function runPerfOnOffChurn(input: PerfScenarioInput): Promise +export function runPerfWorstCase(input: PerfScenarioInput): Promise +export function runCleanupEquivalence(input: PerfScenarioInput): Promise diff --git a/bubus-ts/tests/performance.scenarios.js b/bubus-ts/tests/performance.scenarios.js new file mode 100644 index 0000000..63655fe --- /dev/null +++ b/bubus-ts/tests/performance.scenarios.js @@ -0,0 +1,846 @@ +const defaultNow = () => performance.now() +const defaultSleep = (ms) => new Promise((resolve) => setTimeout(resolve, ms)) + +const assert = (condition, message) => { + if (!condition) { + throw new Error(message) + } +} + +const mb = (bytes) => (bytes / 1024 / 1024).toFixed(1) +const kb = (bytes) => bytes / 1024 +const clampNonNegative = (value) => (value < 0 ? 0 : value) +const formatMsPerEvent = (value, unit = 'event') => `${value.toFixed(3)}ms/${unit}` +const formatKbPerEvent = (value) => `${value.toFixed(3)}kb/event` +const formatMs = (value) => `${value.toFixed(3)}ms` +const formatMb = (value) => `${value.toFixed(3)}mb` + +const HISTORY_LIMIT_STREAM = 512 +const HISTORY_LIMIT_ON_OFF = 128 +const HISTORY_LIMIT_EPHEMERAL_BUS = 128 +const HISTORY_LIMIT_FIXED_HANDLERS = 128 +const HISTORY_LIMIT_WORST_CASE = 128 +const TRIM_TARGET = 1 +const WORST_CASE_IMMEDIATE_TIMEOUT_MS = 0.0001 +const WORST_CASE_IMMEDIATE_TIMEOUT_SECONDS = WORST_CASE_IMMEDIATE_TIMEOUT_MS / 1000 + +const heapDeltaNoiseFloorMb = (runtimeName) => { + if (runtimeName === 'bun') return 192.0 + if (runtimeName === 'deno') return 4.5 + return 3.0 +} + +const measureMemory = (hooks) => { + if (typeof hooks.getMemoryUsage !== 'function') { + return null + } + return hooks.getMemoryUsage() +} + +const maybeForceGc = (hooks) => { + if (typeof hooks.forceGc === 'function') { + hooks.forceGc() + } +} + +const waitForRuntimeSettle = async (hooks) => { + // Let normal runtime scheduling/GC progress naturally without explicit GC forcing. + await hooks.sleep(50) +} + +const measureStableHeapUsed = async (hooks, mode = 'max', rounds = 12) => { + const heaps = [] + for (let i = 0; i < rounds; i += 1) { + maybeForceGc(hooks) + await hooks.sleep(12) + const mem = measureMemory(hooks) + if (mem) heaps.push(mem.heapUsed) + } + if (heaps.length === 0) return null + return mode === 'min' ? Math.min(...heaps) : Math.max(...heaps) +} + +const measureHeapDeltaAfterGc = async (hooks, baselineHeapUsed) => { + if (baselineHeapUsed === null || baselineHeapUsed === undefined) return null + await hooks.sleep(120) + const endHeapUsed = await measureStableHeapUsed(hooks, 'min', 24) + if (endHeapUsed === null) return null + return (endHeapUsed - baselineHeapUsed) / 1024 / 1024 +} + +const trimBusHistoryToOneEvent = async (hooks, bus, TrimEvent) => { + bus.event_history.max_history_size = TRIM_TARGET + bus.event_history.max_history_drop = true + let trimEvent = bus.emit(TrimEvent({})) + await trimEvent.done() + trimEvent = null + await bus.waitUntilIdle() + assert(bus.event_history.size <= TRIM_TARGET, `trim-to-1 failed for ${bus.toString()}: ${bus.event_history.size}/${TRIM_TARGET}`) +} + +const waitForRegistrySize = async (hooks, EventBus, expectedSize, attempts = 150) => { + for (let i = 0; i < attempts; i += 1) { + await hooks.sleep(40) + if (EventBus.all_instances.size <= expectedSize) { + return true + } + } + return EventBus.all_instances.size <= expectedSize +} + +const runCleanupBurst = async ({ hooks, EventBus, CleanupEvent, TrimEvent, busesPerMode, eventsPerBus, destroyMode }) => { + for (let i = 0; i < busesPerMode; i += 1) { + let bus = new EventBus(`CleanupEq-${destroyMode ? 'destroy' : 'scope'}-${i}`, { + max_history_size: HISTORY_LIMIT_EPHEMERAL_BUS, + max_history_drop: true, + }) + bus.on(CleanupEvent, () => {}) + + const pending = [] + for (let e = 0; e < eventsPerBus; e += 1) { + // Store completion promises (not event proxies) to avoid retaining bus-bound proxies across GC checks. + pending.push( + bus + .emit(CleanupEvent({})) + .done() + .then(() => undefined) + ) + } + await Promise.all(pending) + pending.length = 0 + await bus.waitUntilIdle() + await trimBusHistoryToOneEvent(hooks, bus, TrimEvent) + + if (destroyMode) { + bus.destroy() + } + bus = null + } +} + +const runWarmup = async (input) => { + const hooks = withDefaults(input) + const { BaseEvent, EventBus } = hooks.api + const { PerfWarmupEvent: WarmEvent, PerfWarmupTrimEvent: WarmTrimEvent } = getEventClasses(BaseEvent) + + const bus = new EventBus('PerfWarmupBus', { max_history_size: 512, max_history_drop: true }) + bus.on(WarmEvent, () => {}) + + for (let i = 0; i < 2048; i += 256) { + const pending = [] + for (let j = 0; j < 256; j += 1) { + pending.push(bus.emit(WarmEvent({}))) + } + await Promise.all(pending.map((event) => event.done())) + await bus.waitUntilIdle() + } + + await trimBusHistoryToOneEvent(hooks, bus, WarmTrimEvent) + bus.destroy() + await waitForRuntimeSettle(hooks) +} + +const createMemoryTracker = (hooks) => { + const baselineRaw = measureMemory(hooks) + if (!baselineRaw) { + return { + baseline: null, + current: null, + sample: () => null, + peakHeapKbPerEvent: () => null, + peakRssKbPerEvent: () => null, + } + } + + const baseline = { rss: baselineRaw.rss, heapUsed: baselineRaw.heapUsed } + let current = baselineRaw + let peakHeapUsed = baselineRaw.heapUsed + let peakRss = baselineRaw.rss + + const sample = () => { + const snapshot = measureMemory(hooks) + if (!snapshot) return null + current = snapshot + if (snapshot.heapUsed > peakHeapUsed) peakHeapUsed = snapshot.heapUsed + if (snapshot.rss > peakRss) peakRss = snapshot.rss + return snapshot + } + + const peakHeapKbPerEvent = (events) => { + if (!events || !baseline) return null + const deltaBytes = clampNonNegative(peakHeapUsed - baseline.heapUsed) + return kb(deltaBytes) / events + } + + const peakRssKbPerEvent = (events) => { + if (!events || !baseline) return null + const deltaBytes = clampNonNegative(peakRss - baseline.rss) + return kb(deltaBytes) / events + } + + return { baseline, current: () => current, sample, peakHeapKbPerEvent, peakRssKbPerEvent } +} + +const record = (hooks, name, metrics) => { + if (typeof hooks.log === 'function') { + const perEventOnly = name === 'worst-case forwarding + timeouts' + const parts = [] + if (!perEventOnly && typeof metrics.totalEvents === 'number') parts.push(`events=${metrics.totalEvents}`) + if (!perEventOnly && typeof metrics.totalMs === 'number') parts.push(`total=${formatMs(metrics.totalMs)}`) + if (typeof metrics.msPerEvent === 'number') + parts.push(`latency=${formatMsPerEvent(metrics.msPerEvent, metrics.msPerEventUnit ?? 'event')}`) + if (typeof metrics.peakHeapKbPerEvent === 'number') parts.push(`peak_heap=${formatKbPerEvent(metrics.peakHeapKbPerEvent)}`) + if (typeof metrics.peakRssKbPerEvent === 'number') parts.push(`peak_rss=${formatKbPerEvent(metrics.peakRssKbPerEvent)}`) + if ( + typeof metrics.ramKbPerEvent === 'number' && + typeof metrics.peakHeapKbPerEvent !== 'number' && + typeof metrics.peakRssKbPerEvent !== 'number' + ) { + parts.push(`ram=${formatKbPerEvent(metrics.ramKbPerEvent)}`) + } + if (typeof metrics.throughput === 'number') parts.push(`throughput=${metrics.throughput}/s`) + if (typeof metrics.equivalent === 'boolean') parts.push(`equivalent=${metrics.equivalent ? 'yes' : 'no'}`) + if (typeof metrics.timeoutCount === 'number') parts.push(`timeouts=${metrics.timeoutCount}`) + if (typeof metrics.cancelCount === 'number') parts.push(`cancels=${metrics.cancelCount}`) + if (typeof metrics.heapDeltaAfterGcMb === 'number') parts.push(`heap_delta_after_gc=${formatMb(metrics.heapDeltaAfterGcMb)}`) + hooks.log(`[${hooks.runtimeName}] ${name}: ${parts.join(' ')}`) + } +} + +const withDefaults = (input) => { + const hooks = { + runtimeName: input.runtimeName ?? 'runtime', + now: input.now ?? defaultNow, + sleep: input.sleep ?? defaultSleep, + log: input.log ?? (() => {}), + getMemoryUsage: input.getMemoryUsage, + forceGc: input.forceGc, + limits: { + singleRunMs: input.limits?.singleRunMs ?? 90_000, + worstCaseMs: input.limits?.worstCaseMs ?? 180_000, + maxHeapDeltaAfterGcMb: input.limits?.maxHeapDeltaAfterGcMb ?? null, + heapDeltaNoiseFloorMb: input.limits?.heapDeltaNoiseFloorMb ?? heapDeltaNoiseFloorMb(input.runtimeName ?? 'runtime'), + }, + api: input.api, + } + return hooks +} + +const eventClassCache = new WeakMap() + +const getEventClasses = (BaseEvent) => { + const cached = eventClassCache.get(BaseEvent) + if (cached) return cached + + const classes = { + PerfSimpleEvent: BaseEvent.extend('PerfSimpleEvent', {}), + PerfTrimEvent: BaseEvent.extend('PerfTrimEvent', {}), + PerfTrimEventEphemeral: BaseEvent.extend('PerfTrimEventEphemeral', {}), + PerfRequestEvent: BaseEvent.extend('PerfRequestEvent', {}), + PerfTrimEventOnOff: BaseEvent.extend('PerfTrimEventOnOff', {}), + PerfFixedHandlersEvent: BaseEvent.extend('PerfFixedHandlersEvent', {}), + PerfTrimEventFixedHandlers: BaseEvent.extend('PerfTrimEventFixedHandlers', {}), + WCParent: BaseEvent.extend('WCParent', {}), + WCChild: BaseEvent.extend('WCChild', {}), + WCGrandchild: BaseEvent.extend('WCGrandchild', {}), + WCTrimEvent: BaseEvent.extend('WCTrimEvent', {}), + CleanupEqEvent: BaseEvent.extend('CleanupEqEvent', {}), + CleanupEqTrimEvent: BaseEvent.extend('CleanupEqTrimEvent', {}), + PerfWarmupEvent: BaseEvent.extend('PerfWarmupEvent', {}), + PerfWarmupTrimEvent: BaseEvent.extend('PerfWarmupTrimEvent', {}), + } + eventClassCache.set(BaseEvent, classes) + return classes +} + +export const runPerf50kEvents = async (input) => { + const hooks = withDefaults(input) + const { BaseEvent, EventBus } = hooks.api + const totalEvents = 50_000 + const batchSize = 512 + const { PerfSimpleEvent: SimpleEvent, PerfTrimEvent: TrimEvent } = getEventClasses(BaseEvent) + const bus = new EventBus('PerfBus', { max_history_size: HISTORY_LIMIT_STREAM, max_history_drop: true }) + + let processedCount = 0 + const sampledEarlyEvents = [] + bus.on(SimpleEvent, () => { + processedCount += 1 + }) + + const memory = createMemoryTracker(hooks) + const t0 = hooks.now() + + let dispatched = 0 + while (dispatched < totalEvents) { + const pending = [] + const thisBatch = Math.min(batchSize, totalEvents - dispatched) + for (let i = 0; i < thisBatch; i += 1) { + const dispatchedEvent = bus.emit(SimpleEvent({})) + pending.push(dispatchedEvent) + if (sampledEarlyEvents.length < 64) { + const original = dispatchedEvent._event_original ?? dispatchedEvent + sampledEarlyEvents.push(original) + } + dispatched += 1 + } + + await Promise.all(pending.map((event) => event.done())) + await bus.waitUntilIdle() + if (dispatched % 2048 === 0) memory.sample() + } + + const tDispatch = hooks.now() + memory.sample() + + await trimBusHistoryToOneEvent(hooks, bus, TrimEvent) + const tDone = hooks.now() + await waitForRuntimeSettle(hooks) + memory.sample() + const memDone = measureMemory(hooks) + + const dispatchMs = tDispatch - t0 + const awaitMs = tDone - tDispatch + const totalMs = tDone - t0 + const msPerEvent = totalMs / totalEvents + const peakHeapKbPerEvent = memory.peakHeapKbPerEvent(totalEvents) + const peakRssKbPerEvent = memory.peakRssKbPerEvent(totalEvents) + + assert(processedCount === totalEvents, `50k events processed ${processedCount}/${totalEvents}`) + assert(totalMs < hooks.limits.singleRunMs, `50k events took ${Math.round(totalMs)}ms (limit ${hooks.limits.singleRunMs}ms)`) + assert( + bus.event_history.size <= bus.event_history.max_history_size, + `50k events history exceeded limit: ${bus.event_history.size}/${bus.event_history.max_history_size}` + ) + + assert(sampledEarlyEvents.length > 0, 'expected sampled early events to be captured') + + let sampledEvictedCount = 0 + for (const event of sampledEarlyEvents) { + const isStillInHistory = bus.event_history.has(event.event_id) + assert(!isStillInHistory, `expected sampled early event to be evicted from history: ${event.event_id}`) + sampledEvictedCount += 1 + assert(event.event_results.size === 0, `trimmed event still has event_results: ${event.event_id} (${event.event_results.size})`) + assert(event.bus === undefined, `trimmed event still has bus reference: ${event.event_id}`) + } + assert( + sampledEvictedCount === sampledEarlyEvents.length, + `expected all sampled events to be evicted: ${sampledEvictedCount}/${sampledEarlyEvents.length}` + ) + + const result = { + scenario: '50k events', + totalEvents, + totalMs, + dispatchMs, + awaitMs, + msPerEvent, + msPerEventLabel: formatMsPerEvent(msPerEvent), + ramKbPerEvent: peakHeapKbPerEvent, + ramKbPerEventLabel: peakHeapKbPerEvent === null ? null : formatKbPerEvent(peakHeapKbPerEvent), + peakHeapKbPerEvent, + peakHeapKbPerEventLabel: peakHeapKbPerEvent === null ? null : formatKbPerEvent(peakHeapKbPerEvent), + peakRssKbPerEvent, + peakRssKbPerEventLabel: peakRssKbPerEvent === null ? null : formatKbPerEvent(peakRssKbPerEvent), + throughput: Math.round(totalEvents / (totalMs / 1000)), + processedCount, + sampledEvictedCount, + } + + if (memory.baseline && memDone) { + result.heapBeforeMb = Number(mb(memory.baseline.heapUsed)) + result.heapDoneMb = Number(mb(memDone.heapUsed)) + } + + bus.destroy() + record(hooks, result.scenario, result) + return result +} + +export const runPerfEphemeralBuses = async (input) => { + const hooks = withDefaults(input) + const { BaseEvent, EventBus } = hooks.api + const totalBuses = 500 + const eventsPerBus = 100 + const totalEvents = totalBuses * eventsPerBus + const { PerfSimpleEvent: SimpleEvent, PerfTrimEventEphemeral: TrimEvent } = getEventClasses(BaseEvent) + + let processedCount = 0 + const memory = createMemoryTracker(hooks) + const t0 = hooks.now() + + for (let b = 0; b < totalBuses; b += 1) { + const bus = new EventBus(`ReqBus-${b}`, { max_history_size: HISTORY_LIMIT_EPHEMERAL_BUS, max_history_drop: true }) + bus.on(SimpleEvent, () => { + processedCount += 1 + }) + + const pending = [] + for (let i = 0; i < eventsPerBus; i += 1) { + pending.push(bus.emit(SimpleEvent({}))) + } + + await Promise.all(pending.map((event) => event.done())) + await bus.waitUntilIdle() + await trimBusHistoryToOneEvent(hooks, bus, TrimEvent) + bus.destroy() + if (b % 10 === 0) memory.sample() + } + + const totalMs = hooks.now() - t0 + await waitForRuntimeSettle(hooks) + memory.sample() + const msPerEvent = totalMs / totalEvents + const peakHeapKbPerEvent = memory.peakHeapKbPerEvent(totalEvents) + const peakRssKbPerEvent = memory.peakRssKbPerEvent(totalEvents) + + assert(processedCount === totalEvents, `500x100 buses processed ${processedCount}/${totalEvents}`) + assert(totalMs < hooks.limits.singleRunMs, `500x100 buses took ${Math.round(totalMs)}ms (limit ${hooks.limits.singleRunMs}ms)`) + assert(EventBus.all_instances.size === 0, `500x100 buses leaked instances: ${EventBus.all_instances.size}`) + + const result = { + scenario: '500 buses x 100 events', + totalEvents, + totalMs, + msPerEvent, + msPerEventLabel: formatMsPerEvent(msPerEvent), + ramKbPerEvent: peakHeapKbPerEvent, + ramKbPerEventLabel: peakHeapKbPerEvent === null ? null : formatKbPerEvent(peakHeapKbPerEvent), + peakHeapKbPerEvent, + peakHeapKbPerEventLabel: peakHeapKbPerEvent === null ? null : formatKbPerEvent(peakHeapKbPerEvent), + peakRssKbPerEvent, + peakRssKbPerEventLabel: peakRssKbPerEvent === null ? null : formatKbPerEvent(peakRssKbPerEvent), + throughput: Math.round(totalEvents / (totalMs / 1000)), + processedCount, + } + record(hooks, result.scenario, result) + return result +} + +export const runPerfSingleEventManyFixedHandlers = async (input) => { + const hooks = withDefaults(input) + const { BaseEvent, EventBus } = hooks.api + const totalEvents = 1 + const totalHandlers = 50_000 + const { PerfFixedHandlersEvent: FixedHandlersEvent, PerfTrimEventFixedHandlers: TrimEvent } = getEventClasses(BaseEvent) + const bus = new EventBus('FixedHandlersBus', { + max_history_size: HISTORY_LIMIT_FIXED_HANDLERS, + max_history_drop: true, + event_handler_concurrency: 'parallel', + }) + + let processedCount = 0 + for (let i = 0; i < totalHandlers; i += 1) { + bus.on( + FixedHandlersEvent, + () => { + processedCount += 1 + }, + { id: `fixed-handler-${i}` } + ) + if (i % 1000 === 0) { + // Keep memory sampling overhead bounded during massive registration. + measureMemory(hooks) + } + } + + const memory = createMemoryTracker(hooks) + const t0 = hooks.now() + + const event = bus.emit(FixedHandlersEvent({})) + await event.done() + await bus.waitUntilIdle() + + const totalMs = hooks.now() - t0 + await waitForRuntimeSettle(hooks) + memory.sample() + const msPerEvent = totalMs / (totalEvents * totalHandlers) + const peakHeapKbPerEvent = memory.peakHeapKbPerEvent(totalEvents) + const peakRssKbPerEvent = memory.peakRssKbPerEvent(totalEvents) + + assert(processedCount === totalHandlers, `fixed-handlers processed ${processedCount}/${totalHandlers}`) + assert(totalMs < hooks.limits.singleRunMs, `fixed-handlers took ${Math.round(totalMs)}ms (limit ${hooks.limits.singleRunMs}ms)`) + assert(bus.handlers.size === totalHandlers, `fixed-handlers expected ${totalHandlers} registered handlers, got ${bus.handlers.size}`) + + await trimBusHistoryToOneEvent(hooks, bus, TrimEvent) + bus.destroy() + + const result = { + scenario: '1 event x 50k parallel handlers', + totalEvents, + totalMs, + msPerEvent, + msPerEventLabel: formatMsPerEvent(msPerEvent, 'event/handler'), + msPerEventUnit: 'event/handler', + ramKbPerEvent: peakHeapKbPerEvent, + ramKbPerEventLabel: peakHeapKbPerEvent === null ? null : formatKbPerEvent(peakHeapKbPerEvent), + peakHeapKbPerEvent, + peakHeapKbPerEventLabel: peakHeapKbPerEvent === null ? null : formatKbPerEvent(peakHeapKbPerEvent), + peakRssKbPerEvent, + peakRssKbPerEventLabel: peakRssKbPerEvent === null ? null : formatKbPerEvent(peakRssKbPerEvent), + throughput: Math.round(totalEvents / (totalMs / 1000)), + processedCount, + totalHandlers, + } + record(hooks, result.scenario, result) + return result +} + +export const runPerfOnOffChurn = async (input) => { + const hooks = withDefaults(input) + const { BaseEvent, EventBus } = hooks.api + const { PerfRequestEvent: RequestEvent, PerfTrimEventOnOff: TrimEvent } = getEventClasses(BaseEvent) + + const totalEvents = 50_000 + const bus = new EventBus('OneOffHandlerBus', { max_history_size: HISTORY_LIMIT_ON_OFF, max_history_drop: true }) + + let processedCount = 0 + + const memory = createMemoryTracker(hooks) + const t0 = hooks.now() + + const dispatchWithEphemeralHandler = async () => { + // Allocate/register exactly one handler for one event, then immediately remove it. + // Avoid pre-building handler arrays so memory samples reflect runtime churn, not idle closures. + const oneOffHandler = () => { + processedCount += 1 + } + bus.on(RequestEvent, oneOffHandler) + + const ev = bus.emit(RequestEvent({})) + await ev.done() + + bus.off(RequestEvent, oneOffHandler) + } + + for (let i = 0; i < totalEvents; i += 1) { + await dispatchWithEphemeralHandler() + if (i % 1000 === 0) memory.sample() + } + + await bus.waitUntilIdle() + const totalMs = hooks.now() - t0 + const msPerEvent = totalMs / totalEvents + + assert(processedCount === totalEvents, `50k one-off handlers processed ${processedCount}/${totalEvents}`) + assert(totalMs < hooks.limits.singleRunMs, `50k on/off took ${Math.round(totalMs)}ms (limit ${hooks.limits.singleRunMs}ms)`) + assert(bus.handlers.size === 0, `50k on/off leaked handlers: ${bus.handlers.size}`) + + await trimBusHistoryToOneEvent(hooks, bus, TrimEvent) + await waitForRuntimeSettle(hooks) + memory.sample() + const peakHeapKbPerEvent = memory.peakHeapKbPerEvent(totalEvents) + const peakRssKbPerEvent = memory.peakRssKbPerEvent(totalEvents) + bus.destroy() + + const result = { + scenario: '50k one-off handlers over 50k events', + totalEvents, + totalMs, + msPerEvent, + msPerEventLabel: formatMsPerEvent(msPerEvent), + ramKbPerEvent: peakHeapKbPerEvent, + ramKbPerEventLabel: peakHeapKbPerEvent === null ? null : formatKbPerEvent(peakHeapKbPerEvent), + peakHeapKbPerEvent, + peakHeapKbPerEventLabel: peakHeapKbPerEvent === null ? null : formatKbPerEvent(peakHeapKbPerEvent), + peakRssKbPerEvent, + peakRssKbPerEventLabel: peakRssKbPerEvent === null ? null : formatKbPerEvent(peakRssKbPerEvent), + throughput: Math.round(totalEvents / (totalMs / 1000)), + processedCount, + } + record(hooks, result.scenario, result) + return result +} + +export const runPerfWorstCase = async (input) => { + const hooks = withDefaults(input) + const { BaseEvent, EventBus, EventHandlerTimeoutError, EventHandlerCancelledError } = hooks.api + + const { WCParent: ParentEvent, WCChild: ChildEvent, WCGrandchild: GrandchildEvent, WCTrimEvent: TrimEvent } = getEventClasses(BaseEvent) + + const totalIterations = 500 + const historyLimit = HISTORY_LIMIT_WORST_CASE + const busA = new EventBus('WCA', { max_history_size: historyLimit, max_history_drop: true }) + const busB = new EventBus('WCB', { max_history_size: historyLimit, max_history_drop: true }) + const busC = new EventBus('WCC', { max_history_size: historyLimit, max_history_drop: true }) + + let parentHandledA = 0 + let parentHandledB = 0 + let childHandled = 0 + let grandchildHandled = 0 + let timeoutCount = 0 + let cancelCount = 0 + + busB.on(ParentEvent, () => { + parentHandledB += 1 + }) + + busC.on(ChildEvent, async (event) => { + childHandled += 1 + const gc = event.bus.emit(GrandchildEvent({})) + busC.emit(gc) + if (event.event_timeout !== null) { + // Yield once so near-zero timeout paths execute without adding a large fixed delay. + await hooks.sleep(0) + } + await gc.done() + }) + + busC.on(GrandchildEvent, () => { + grandchildHandled += 1 + }) + + const memory = createMemoryTracker(hooks) + const t0 = hooks.now() + + for (let i = 0; i < totalIterations; i += 1) { + const shouldTimeout = i % 5 === 0 + + const ephemeralHandler = async (event) => { + parentHandledA += 1 + const child = event.bus.emit( + ChildEvent({ + // event_timeout is in seconds; use a near-zero timeout to exercise timeout handling overhead. + event_timeout: shouldTimeout ? WORST_CASE_IMMEDIATE_TIMEOUT_SECONDS : null, + }) + ) + busC.emit(child) + try { + await child.done() + } catch { + // Timeouts are expected for timeout iterations. + } + } + + busA.on(ParentEvent, ephemeralHandler) + const parent = ParentEvent({}) + const evA = busA.emit(parent) + busB.emit(parent) + await evA.done() + busA.off(ParentEvent, ephemeralHandler) + + if (i % 10 === 0) { + busA.find(ParentEvent, { future: 0.001 }) + } + if (i % 5 === 0) memory.sample() + } + + await busA.waitUntilIdle() + await busB.waitUntilIdle() + await busC.waitUntilIdle() + memory.sample() + + for (const event of busC.event_history.values()) { + for (const result of event.event_results.values()) { + if (result.error instanceof EventHandlerTimeoutError) timeoutCount += 1 + if (result.error instanceof EventHandlerCancelledError) cancelCount += 1 + } + } + + const totalMs = hooks.now() - t0 + const estimatedEvents = totalIterations * 3 + const msPerEvent = totalMs / estimatedEvents + + assert(parentHandledA === totalIterations, `worst-case parentA ${parentHandledA}/${totalIterations}`) + assert(parentHandledB === totalIterations, `worst-case parentB ${parentHandledB}/${totalIterations}`) + assert(busA.handlers.size === 0, `worst-case leaked busA handlers: ${busA.handlers.size}`) + assert(busA.event_history.size <= historyLimit, `worst-case busA history ${busA.event_history.size}/${historyLimit}`) + assert(busB.event_history.size <= historyLimit, `worst-case busB history ${busB.event_history.size}/${historyLimit}`) + assert(busC.event_history.size <= historyLimit, `worst-case busC history ${busC.event_history.size}/${historyLimit}`) + assert(totalMs < hooks.limits.worstCaseMs, `worst-case took ${Math.round(totalMs)}ms (limit ${hooks.limits.worstCaseMs}ms)`) + + await trimBusHistoryToOneEvent(hooks, busA, TrimEvent) + await trimBusHistoryToOneEvent(hooks, busB, TrimEvent) + await trimBusHistoryToOneEvent(hooks, busC, TrimEvent) + await waitForRuntimeSettle(hooks) + const peakHeapKbPerEvent = memory.peakHeapKbPerEvent(estimatedEvents) + const peakRssKbPerEvent = memory.peakRssKbPerEvent(estimatedEvents) + busA.destroy() + busB.destroy() + busC.destroy() + + const result = { + scenario: 'worst-case forwarding + timeouts', + totalEvents: estimatedEvents, + totalMs, + msPerEvent, + msPerEventLabel: formatMsPerEvent(msPerEvent), + ramKbPerEvent: peakHeapKbPerEvent, + ramKbPerEventLabel: peakHeapKbPerEvent === null ? null : formatKbPerEvent(peakHeapKbPerEvent), + peakHeapKbPerEvent, + peakHeapKbPerEventLabel: peakHeapKbPerEvent === null ? null : formatKbPerEvent(peakHeapKbPerEvent), + peakRssKbPerEvent, + peakRssKbPerEventLabel: peakRssKbPerEvent === null ? null : formatKbPerEvent(peakRssKbPerEvent), + parentHandledA, + parentHandledB, + childHandled, + grandchildHandled, + timeoutCount, + cancelCount, + } + record(hooks, result.scenario, result) + assert(EventBus.all_instances.size === 0, `worst-case leaked instances: ${EventBus.all_instances.size}`) + + return result +} + +export const runCleanupEquivalence = async (input) => { + const hooks = withDefaults(input) + const { BaseEvent, EventBus } = hooks.api + const { CleanupEqEvent: CleanupEvent, CleanupEqTrimEvent: TrimEvent } = getEventClasses(BaseEvent) + + const busesPerMode = 80 + const eventsPerBus = 64 + const totalEvents = busesPerMode * eventsPerBus * 2 + const baselineRegistrySize = EventBus.all_instances.size + + const t0 = hooks.now() + + await runCleanupBurst({ + hooks, + EventBus, + CleanupEvent, + TrimEvent, + busesPerMode, + eventsPerBus, + destroyMode: true, + }) + assert( + EventBus.all_instances.size === baselineRegistrySize, + `cleanup equivalence destroy branch leaked instances: ${EventBus.all_instances.size}/${baselineRegistrySize}` + ) + + await runCleanupBurst({ + hooks, + EventBus, + CleanupEvent, + TrimEvent, + busesPerMode, + eventsPerBus, + destroyMode: false, + }) + + const scopeCollectionAttempts = hooks.runtimeName === 'deno' ? 500 : 150 + let scopeCollected = await waitForRegistrySize(hooks, EventBus, baselineRegistrySize, scopeCollectionAttempts) + let scopeEquivalentByState = false + + if (!scopeCollected) { + const retained = Array.from(EventBus.all_instances) + const allRetainedIdle = retained.every( + (bus) => + bus.pending_event_queue.length === 0 && + bus.in_flight_event_ids.size === 0 && + bus.find_waiters.size === 0 && + bus.runloop_running === false && + bus.event_history.size <= TRIM_TARGET + ) + assert( + allRetainedIdle, + `cleanup equivalence scope branch retained active deno instances: ${EventBus.all_instances.size}/${baselineRegistrySize}` + ) + if (hooks.runtimeName === 'deno') { + assert( + retained.length <= 24, + `cleanup equivalence scope branch retained too many deno instances: ${retained.length} (expected <= 24)` + ) + } else { + assert( + retained.length <= busesPerMode, + `cleanup equivalence scope branch retained too many non-gc-forced instances: ${retained.length} (expected <= ${busesPerMode})` + ) + } + scopeEquivalentByState = true + + // Some runtimes may defer finalizing weak refs even after explicit waits. + // Destroy retained idle buses so following scenarios start from a clean baseline. + for (const bus of retained) { + bus.destroy() + } + scopeCollected = await waitForRegistrySize(hooks, EventBus, baselineRegistrySize, 100) + } + + const equivalent = scopeCollected || scopeEquivalentByState + assert(equivalent, `cleanup equivalence scope branch retained instances: ${EventBus.all_instances.size}/${baselineRegistrySize}`) + + const totalMs = hooks.now() - t0 + const msPerEvent = totalMs / totalEvents + + const result = { + scenario: 'cleanup destroy vs scope equivalence', + totalEvents, + totalMs, + msPerEvent, + msPerEventLabel: formatMsPerEvent(msPerEvent), + ramKbPerEvent: null, + peakHeapKbPerEvent: null, + peakRssKbPerEvent: null, + equivalent, + } + record(hooks, result.scenario, result) + return result +} + +const runWithLeakCheck = async (input, scenarioId, scenarioFn) => { + const hooks = withDefaults(input) + let baselineHeapUsed = null + if (typeof hooks.getMemoryUsage === 'function') { + // Leak checks compare retained floor before/after work; min/min reduces allocator jitter noise. + baselineHeapUsed = await measureStableHeapUsed(hooks, 'min', 8) + } + + const result = await scenarioFn(input) + + if (baselineHeapUsed === null) { + return result + } + + const heapDeltaAfterGcMb = await measureHeapDeltaAfterGc(hooks, baselineHeapUsed) + if (heapDeltaAfterGcMb === null) { + return result + } + + const normalizedHeapDeltaAfterGcMb = clampNonNegative(heapDeltaAfterGcMb) + result.heapDeltaAfterGcMb = Number(normalizedHeapDeltaAfterGcMb.toFixed(3)) + if (typeof hooks.log === 'function') { + hooks.log(`[${hooks.runtimeName}] ${result.scenario}: heap_delta_after_gc=${formatMb(result.heapDeltaAfterGcMb)}`) + } + + const maxHeapDeltaAfterGcMb = hooks.limits.maxHeapDeltaAfterGcMb + const heapNoiseFloorMb = hooks.limits.heapDeltaNoiseFloorMb + if (typeof maxHeapDeltaAfterGcMb === 'number') { + const allowedMb = maxHeapDeltaAfterGcMb + heapNoiseFloorMb + assert( + normalizedHeapDeltaAfterGcMb <= allowedMb, + `${scenarioId} retained ${normalizedHeapDeltaAfterGcMb.toFixed(3)}mb heap after GC (limit ${allowedMb.toFixed(3)}mb = ${maxHeapDeltaAfterGcMb.toFixed(3)}mb + ${heapNoiseFloorMb.toFixed(3)}mb noise floor)` + ) + } + + return result +} + +const PERF_SCENARIO_RUNNERS = { + '50k-events': runPerf50kEvents, + '500-buses-x-100-events': runPerfEphemeralBuses, + '1-event-x-50k-parallel-handlers': runPerfSingleEventManyFixedHandlers, + '50k-one-off-handlers': runPerfOnOffChurn, + 'worst-case-forwarding-timeouts': runPerfWorstCase, + 'cleanup-equivalence': runCleanupEquivalence, +} + +export const PERF_SCENARIO_IDS = Object.freeze(Object.keys(PERF_SCENARIO_RUNNERS)) + +export const runPerfScenarioById = async (input, scenarioId) => { + const scenarioFn = PERF_SCENARIO_RUNNERS[scenarioId] + if (!scenarioFn) { + throw new Error(`unknown perf scenario "${scenarioId}", expected one of: ${PERF_SCENARIO_IDS.join(', ')}`) + } + await runWarmup(input) + return runWithLeakCheck(input, scenarioId, scenarioFn) +} + +export const runAllPerfScenarios = async (input) => { + const results = [] + for (const scenarioId of PERF_SCENARIO_IDS) { + results.push(await runPerfScenarioById(input, scenarioId)) + } + return results +} diff --git a/bubus-ts/tests/retry.test.ts b/bubus-ts/tests/retry.test.ts new file mode 100644 index 0000000..dafb0d1 --- /dev/null +++ b/bubus-ts/tests/retry.test.ts @@ -0,0 +1,960 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { retry, clearSemaphoreRegistry, RetryTimeoutError, SemaphoreTimeoutError } from '../src/index.js' + +const delay = (ms: number): Promise => new Promise((resolve) => setTimeout(resolve, ms)) + +// ─── Basic retry behavior ──────────────────────────────────────────────────── + +test('retry: function succeeds on first attempt with no retries needed', async () => { + const fn = retry({ max_attempts: 3 })(async () => 'ok') + assert.equal(await fn(), 'ok') +}) + +test('retry: function retries on failure and eventually succeeds', async () => { + let calls = 0 + const fn = retry({ max_attempts: 3 })(async () => { + calls++ + if (calls < 3) throw new Error(`fail ${calls}`) + return 'ok' + }) + assert.equal(await fn(), 'ok') + assert.equal(calls, 3) +}) + +test('retry: throws after exhausting all attempts', async () => { + let calls = 0 + const fn = retry({ max_attempts: 3 })(async () => { + calls++ + throw new Error('always fails') + }) + await assert.rejects(fn, { message: 'always fails' }) + assert.equal(calls, 3) +}) + +test('retry: max_attempts=1 means no retries (single attempt)', async () => { + let calls = 0 + const fn = retry({ max_attempts: 1 })(async () => { + calls++ + throw new Error('fail') + }) + await assert.rejects(fn, { message: 'fail' }) + assert.equal(calls, 1) +}) + +test('retry: default max_attempts=1 means single attempt', async () => { + let calls = 0 + const fn = retry()(async () => { + calls++ + throw new Error('fail') + }) + await assert.rejects(fn, { message: 'fail' }) + assert.equal(calls, 1) +}) + +// ─── retry_after delay ─────────────────────────────────────────────────────── + +test('retry: retry_after introduces delay between attempts', async () => { + let calls = 0 + const timestamps: number[] = [] + const fn = retry({ max_attempts: 3, retry_after: 0.05 })(async () => { + calls++ + timestamps.push(performance.now()) + if (calls < 3) throw new Error('fail') + return 'ok' + }) + assert.equal(await fn(), 'ok') + assert.equal(calls, 3) + + // Check that delays were at least ~50ms between attempts + const gap1 = timestamps[1] - timestamps[0] + const gap2 = timestamps[2] - timestamps[1] + assert.ok(gap1 >= 40, `expected >=40ms gap, got ${gap1.toFixed(1)}ms`) + assert.ok(gap2 >= 40, `expected >=40ms gap, got ${gap2.toFixed(1)}ms`) +}) + +// ─── Exponential backoff ───────────────────────────────────────────────────── + +test('retry: retry_backoff_factor increases delay between attempts', async () => { + let calls = 0 + const timestamps: number[] = [] + const fn = retry({ max_attempts: 4, retry_after: 0.03, retry_backoff_factor: 2.0 })(async () => { + calls++ + timestamps.push(performance.now()) + if (calls < 4) throw new Error('fail') + return 'ok' + }) + assert.equal(await fn(), 'ok') + assert.equal(calls, 4) + + // Delays: 30ms, 60ms, 120ms (0.03 * 2^0, 0.03 * 2^1, 0.03 * 2^2) + const gap1 = timestamps[1] - timestamps[0] + const gap2 = timestamps[2] - timestamps[1] + const gap3 = timestamps[3] - timestamps[2] + + assert.ok(gap1 >= 20, `gap1=${gap1.toFixed(1)}ms, expected >=20ms`) + assert.ok(gap2 >= 45, `gap2=${gap2.toFixed(1)}ms, expected >=45ms (should be ~60ms)`) + assert.ok(gap3 >= 90, `gap3=${gap3.toFixed(1)}ms, expected >=90ms (should be ~120ms)`) + // Verify backoff is actually increasing + assert.ok(gap2 > gap1, 'gap2 should be larger than gap1') + assert.ok(gap3 > gap2, 'gap3 should be larger than gap2') +}) + +// ─── retry_on_errors filtering ─────────────────────────────────────────────── + +class NetworkError extends Error { + constructor(message: string = 'network error') { + super(message) + this.name = 'NetworkError' + } +} + +class ValidationError extends Error { + constructor(message: string = 'validation error') { + super(message) + this.name = 'ValidationError' + } +} + +test('retry: retry_on_errors retries only matching error types', async () => { + let calls = 0 + const fn = retry({ max_attempts: 3, retry_on_errors: [NetworkError] })(async () => { + calls++ + if (calls < 3) throw new NetworkError() + return 'ok' + }) + assert.equal(await fn(), 'ok') + assert.equal(calls, 3) +}) + +test('retry: retry_on_errors does not retry non-matching errors', async () => { + let calls = 0 + const fn = retry({ max_attempts: 3, retry_on_errors: [NetworkError] })(async () => { + calls++ + throw new ValidationError() + }) + await assert.rejects(fn, { name: 'ValidationError' }) + // Should have thrown immediately without retrying + assert.equal(calls, 1) +}) + +test('retry: retry_on_errors accepts string error name', async () => { + let calls = 0 + const fn = retry({ max_attempts: 3, retry_on_errors: ['NetworkError'] })(async () => { + calls++ + if (calls < 3) throw new NetworkError() + return 'ok' + }) + assert.equal(await fn(), 'ok') + assert.equal(calls, 3) +}) + +test('retry: retry_on_errors string matcher does not retry non-matching names', async () => { + let calls = 0 + const fn = retry({ max_attempts: 3, retry_on_errors: ['NetworkError'] })(async () => { + calls++ + throw new ValidationError() + }) + await assert.rejects(fn, { name: 'ValidationError' }) + assert.equal(calls, 1) +}) + +test('retry: retry_on_errors accepts RegExp pattern', async () => { + let calls = 0 + const fn = retry({ max_attempts: 3, retry_on_errors: [/network/i] })(async () => { + calls++ + if (calls < 3) throw new NetworkError('Network timeout occurred') + return 'ok' + }) + assert.equal(await fn(), 'ok') + assert.equal(calls, 3) +}) + +test('retry: retry_on_errors RegExp does not retry non-matching errors', async () => { + let calls = 0 + const fn = retry({ max_attempts: 3, retry_on_errors: [/network/i] })(async () => { + calls++ + throw new ValidationError('bad input') + }) + await assert.rejects(fn, { name: 'ValidationError' }) + assert.equal(calls, 1) +}) + +test('retry: retry_on_errors mixes class, string, and RegExp matchers', async () => { + let calls = 0 + const fn = retry({ max_attempts: 5, retry_on_errors: [TypeError, 'NetworkError', /timeout/i] })(async () => { + calls++ + if (calls === 1) throw new TypeError('type error') + if (calls === 2) throw new NetworkError() + if (calls === 3) throw new Error('Connection timeout') + return 'ok' + }) + assert.equal(await fn(), 'ok') + assert.equal(calls, 4) +}) + +test('retry: retry_on_errors with multiple error types', async () => { + let calls = 0 + const fn = retry({ max_attempts: 5, retry_on_errors: [NetworkError, TypeError] })(async () => { + calls++ + if (calls === 1) throw new NetworkError() + if (calls === 2) throw new TypeError('type error') + return 'ok' + }) + assert.equal(await fn(), 'ok') + assert.equal(calls, 3) +}) + +// ─── Per-attempt timeout ───────────────────────────────────────────────────── + +test('retry: timeout triggers RetryTimeoutError on slow attempts', async () => { + let calls = 0 + const fn = retry({ max_attempts: 1, timeout: 0.05 })(async () => { + calls++ + await delay(200) + return 'ok' + }) + await assert.rejects(fn, (error: unknown) => { + assert.ok(error instanceof RetryTimeoutError) + assert.equal(error.attempt, 1) + return true + }) + assert.equal(calls, 1) +}) + +test('retry: timeout allows fast attempts to succeed', async () => { + const fn = retry({ max_attempts: 1, timeout: 1 })(async () => { + await delay(5) + return 'fast' + }) + assert.equal(await fn(), 'fast') +}) + +test('retry: timed-out attempts are retried when max_attempts > 1', async () => { + let calls = 0 + const fn = retry({ max_attempts: 3, timeout: 0.05 })(async () => { + calls++ + if (calls < 3) { + await delay(200) // will timeout + return 'slow' + } + return 'ok' + }) + assert.equal(await fn(), 'ok') + assert.equal(calls, 3) +}) + +// ─── Semaphore concurrency control ────────────────────────────────────────── + +test('retry: semaphore_limit controls max concurrent executions', async () => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + + const fn = retry({ max_attempts: 1, semaphore_limit: 2, semaphore_name: 'test_sem_limit' })(async () => { + active++ + max_active = Math.max(max_active, active) + await delay(50) + active-- + }) + + // Launch 6 concurrent calls β€” should only run 2 at a time + await Promise.all([fn(), fn(), fn(), fn(), fn(), fn()]) + assert.equal(max_active, 2, 'should never exceed semaphore_limit=2') +}) + +test('retry: semaphore handoff keeps concurrency bounded during nextTick scheduling', async () => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + let unblock_first!: () => void + const first_block = new Promise((resolve) => { + unblock_first = resolve + }) + let third_done_resolve!: () => void + let third_done_reject!: (reason?: unknown) => void + const third_done = new Promise((resolve, reject) => { + third_done_resolve = resolve + third_done_reject = reject + }) + + let call_count = 0 + const fn = retry({ max_attempts: 1, semaphore_limit: 1, semaphore_name: 'test_sem_handoff' })(async () => { + call_count += 1 + const current_call = call_count + + active += 1 + max_active = Math.max(max_active, active) + try { + if (current_call === 1) { + await first_block + } + await delay(5) + } finally { + active -= 1 + } + }) + + const first = fn() + await delay(5) + const second = fn() + await delay(5) + unblock_first() + + void Promise.resolve().then(() => { + process.nextTick(() => { + void fn().then( + () => third_done_resolve(), + (error) => third_done_reject(error) + ) + }) + }) + + await Promise.all([first, second, third_done]) + assert.equal(call_count, 3) + assert.equal(max_active, 1, 'should never exceed semaphore_limit=1 during handoff') +}) + +test('retry: semaphore_lax=false throws SemaphoreTimeoutError when slots are full', async () => { + clearSemaphoreRegistry() + + const fn = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_name: 'test_sem_lax_false', + semaphore_lax: false, + semaphore_timeout: 0.05, + })(async () => { + await delay(200) // hold the semaphore for a while + return 'ok' + }) + + // Start one call to grab the semaphore + const first = fn() + + // Give the first call time to acquire the semaphore + await delay(10) + + // Second call should timeout trying to acquire semaphore + await assert.rejects(fn(), (error: unknown) => { + assert.ok(error instanceof SemaphoreTimeoutError) + assert.equal(error.semaphore_name, 'test_sem_lax_false') + return true + }) + + // Let the first call finish + assert.equal(await first, 'ok') +}) + +test('retry: semaphore_lax=true (default) proceeds without semaphore on timeout', async () => { + clearSemaphoreRegistry() + + let calls = 0 + const fn = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_name: 'test_sem_lax_true', + semaphore_lax: true, + semaphore_timeout: 0.05, + })(async () => { + calls++ + await delay(200) + return 'ok' + }) + + // Start first call to grab the semaphore + const first = fn() + await delay(10) + + // Second call should proceed anyway (lax mode) + const second = fn() + const results = await Promise.all([first, second]) + assert.deepEqual(results, ['ok', 'ok']) + assert.equal(calls, 2) +}) + +// ─── Preserves function metadata ───────────────────────────────────────────── + +test('retry: preserves function name', () => { + async function myNamedFunction(): Promise { + return 'ok' + } + const wrapped = retry()(myNamedFunction) + assert.equal(wrapped.name, 'myNamedFunction') +}) + +// ─── Preserves `this` context ──────────────────────────────────────────────── + +test('retry: preserves this context for methods', async () => { + class MyService { + value = 42 + fetch = retry({ max_attempts: 2 })(async function (this: MyService) { + return this.value + }) + } + + const svc = new MyService() + assert.equal(await svc.fetch(), 42) +}) + +// ─── Works with synchronous functions ──────────────────────────────────────── + +test('retry: wraps sync functions (result becomes a promise)', async () => { + let calls = 0 + const fn = retry({ max_attempts: 3 })(() => { + calls++ + if (calls < 2) throw new Error('sync fail') + return 'sync ok' + }) + assert.equal(await fn(), 'sync ok') + assert.equal(calls, 2) +}) + +// ─── Edge cases ────────────────────────────────────────────────────────────── + +test('retry: max_attempts=0 is treated as 1 (minimum)', async () => { + let calls = 0 + const fn = retry({ max_attempts: 0 })(async () => { + calls++ + return 'ok' + }) + assert.equal(await fn(), 'ok') + assert.equal(calls, 1) +}) + +test('retry: passes arguments through to wrapped function', async () => { + const fn = retry({ max_attempts: 1 })(async (a: number, b: string) => `${a}-${b}`) + assert.equal(await fn(1, 'hello'), '1-hello') +}) + +test('retry: semaphore is held across all retry attempts', async () => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + let total_calls = 0 + + const fn = retry({ + max_attempts: 3, + semaphore_limit: 1, + semaphore_name: 'test_sem_across_retries', + })(async () => { + active++ + max_active = Math.max(max_active, active) + total_calls++ + await delay(10) + active-- + // Odd calls fail, even calls succeed β€” each invocation needs 2 attempts + if (total_calls % 2 === 1) throw new Error('fail') + return 'ok' + }) + + // Run 3 calls concurrently β€” they should run serially because semaphore_limit=1 + // The semaphore should be held across retries, so only 1 active at a time + const results = await Promise.all([fn(), fn(), fn()]) + assert.equal(max_active, 1, 'semaphore should enforce serial execution even during retries') + assert.deepEqual(results, ['ok', 'ok', 'ok']) + assert.equal(total_calls, 6, 'each of 3 calls should have taken 2 attempts') +}) + +test('retry: semaphore released even when all attempts fail', async () => { + clearSemaphoreRegistry() + + const fn = retry({ + max_attempts: 2, + semaphore_limit: 1, + semaphore_name: 'test_sem_release_on_fail', + })(async () => { + throw new Error('always fails') + }) + + // First call fails, should release semaphore + await assert.rejects(fn) + + // Second call should be able to acquire the semaphore (not deadlocked) + await assert.rejects(fn) +}) + +// ─── TC39 decorator syntax on class methods ────────────────────────────────── + +test('retry: works on class method via manual wrapping pattern', async () => { + // Since TC39 Stage 3 decorators require experimentalDecorators or TS 5.0+ native support, + // we test the equivalent pattern: applying retry() to a method post-definition. + class ApiClient { + base_url = 'https://example.com' + calls = 0 + + fetchData = retry({ max_attempts: 3 })(async function (this: ApiClient) { + this.calls++ + if (this.calls < 3) throw new Error('api error') + return `data from ${this.base_url}` + }) + } + + const client = new ApiClient() + assert.equal(await client.fetchData(), 'data from https://example.com') + assert.equal(client.calls, 3) +}) + +// ─── Re-entrancy / deadlock prevention ─────────────────────────────────────── + +test('retry: re-entrant call on same semaphore does not deadlock', async () => { + clearSemaphoreRegistry() + + const inner = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_name: 'shared_sem', + })(async () => { + return 'inner ok' + }) + + const outer = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_name: 'shared_sem', + })(async () => { + // This would deadlock without re-entrancy tracking: + // outer holds the semaphore, inner tries to acquire the same one + const result = await inner() + return `outer got: ${result}` + }) + + assert.equal(await outer(), 'outer got: inner ok') +}) + +test('retry: recursive function with semaphore does not deadlock', async () => { + clearSemaphoreRegistry() + + let depth = 0 + const recurse: (n: number) => Promise = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_name: 'recursive_sem', + })(async (n: number): Promise => { + depth++ + if (n <= 1) return 1 + return n + (await recurse(n - 1)) + }) + + const result = await recurse(5) + assert.equal(result, 15) // 5 + 4 + 3 + 2 + 1 + assert.equal(depth, 5) +}) + +test('retry: different semaphore names do not interfere with re-entrancy', async () => { + clearSemaphoreRegistry() + + let inner_active = 0 + let inner_max_active = 0 + + const inner = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_name: 'inner_sem', + })(async () => { + inner_active++ + inner_max_active = Math.max(inner_max_active, inner_active) + await delay(20) + inner_active-- + return 'inner ok' + }) + + const outer = retry({ + max_attempts: 1, + semaphore_limit: 2, + semaphore_name: 'outer_sem', + })(async () => { + return await inner() + }) + + // Run 3 outer calls concurrently + // outer_sem allows 2 concurrent, but inner_sem only allows 1 + const results = await Promise.all([outer(), outer(), outer()]) + assert.deepEqual(results, ['inner ok', 'inner ok', 'inner ok']) + assert.equal(inner_max_active, 1, 'inner semaphore should still enforce limit=1') +}) + +test('retry: three-level nested re-entrancy does not deadlock', async () => { + clearSemaphoreRegistry() + + const level3 = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_name: 'nested_sem', + })(async () => 'level3') + + const level2 = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_name: 'nested_sem', + })(async () => { + const r = await level3() + return `level2>${r}` + }) + + const level1 = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_name: 'nested_sem', + })(async () => { + const r = await level2() + return `level1>${r}` + }) + + assert.equal(await level1(), 'level1>level2>level3') +}) + +// ─── Semaphore scope ───────────────────────────────────────────────────────── + +test('retry: semaphore_scope=class shares semaphore across instances of same class', async () => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + + class Worker { + run = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_scope: 'class', + semaphore_name: 'work', + })(async function (this: Worker) { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + return 'done' + }) + } + + const a = new Worker() + const b = new Worker() + const c = new Worker() + + await Promise.all([a.run(), b.run(), c.run()]) + assert.equal(max_active, 1, 'class scope: all instances should share one semaphore') +}) + +test('retry: semaphore_scope=instance gives each instance its own semaphore', async () => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + + class Worker { + run = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_scope: 'instance', + semaphore_name: 'work', + })(async function (this: Worker) { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + return 'done' + }) + } + + const a = new Worker() + const b = new Worker() + + // Same instance: serialized (limit=1 per instance) + // Different instances: can run in parallel (separate semaphores) + await Promise.all([a.run(), b.run()]) + assert.equal(max_active, 2, 'instance scope: different instances should get separate semaphores') +}) + +test('retry: semaphore_scope=instance serializes calls on same instance', async () => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + + class Worker { + run = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_scope: 'instance', + semaphore_name: 'work', + })(async function (this: Worker) { + active++ + max_active = Math.max(max_active, active) + await delay(20) + active-- + return 'done' + }) + } + + const a = new Worker() + await Promise.all([a.run(), a.run(), a.run()]) + assert.equal(max_active, 1, 'instance scope: same instance calls should serialize') +}) + +test('retry: semaphore_name function uses call args for keying', async () => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + + const work = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_scope: 'global', + semaphore_name: (a: string, b: string) => `${a}-${b}`, + })(async (_a: string, _b: string) => { + active++ + max_active = Math.max(max_active, active) + await delay(20) + active-- + return 'done' + }) + + await Promise.all([work('a', 'b'), work('a', 'b')]) + assert.equal(max_active, 1, 'semaphore_name(args): same args should serialize') + + active = 0 + max_active = 0 + await Promise.all([work('a', 'b'), work('c', 'd')]) + assert.ok(max_active >= 2, 'semaphore_name(args): different args should not share a semaphore') +}) + +test('retry: semaphore_scope=class isolates different classes', async () => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + + class Alpha { + run = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_scope: 'class', + semaphore_name: 'run', + })(async function (this: Alpha) { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + }) + } + + class Beta { + run = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_scope: 'class', + semaphore_name: 'run', + })(async function (this: Beta) { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + }) + } + + await Promise.all([new Alpha().run(), new Beta().run()]) + assert.equal(max_active, 2, 'class scope: different classes should get separate semaphores') +}) + +// ─── TC39 Stage 3 decorator syntax (RECOMMENDED PATTERN) ──────────────────── +// +// The primary supported pattern for event bus handlers is: +// +// class Service { +// constructor(bus) { +// bus.on(Event, this.on_Event.bind(this)) +// } +// +// @retry({ max_attempts: 3, ... }) +// async on_Event(event) { ... } +// } +// +// Retry/timeout is a handler-level concern. Event processing itself has no error +// state β€” only individual handlers produce errors/timeouts that need retrying. +// Event-level and handler-level concurrency on the bus is still controllable via +// event_concurrency / event_handler_concurrency options (those are separate). + +test('retry: @retry() TC39 decorator on class method retries on failure', async () => { + clearSemaphoreRegistry() + + class ApiService { + calls = 0 + + @retry({ max_attempts: 3 }) + async fetchData(): Promise { + this.calls++ + if (this.calls < 3) throw new Error('api error') + return 'data' + } + } + + const svc = new ApiService() + assert.equal(await svc.fetchData(), 'data') + assert.equal(svc.calls, 3) +}) + +test('retry: @retry() TC39 decorator preserves this context', async () => { + class Config { + endpoint = 'https://api.example.com' + + @retry({ max_attempts: 2 }) + async getEndpoint(): Promise { + return this.endpoint + } + } + + const cfg = new Config() + assert.equal(await cfg.getEndpoint(), 'https://api.example.com') +}) + +test('retry: @retry() TC39 decorator with semaphore_scope=class', async () => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + + class Service { + @retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_scope: 'class', + semaphore_name: 'handle', + }) + async handle(): Promise { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + return 'ok' + } + } + + const a = new Service() + const b = new Service() + await Promise.all([a.handle(), b.handle()]) + assert.equal(max_active, 1, '@retry class scope: all instances share one semaphore') +}) + +test('retry: @retry() TC39 decorator with semaphore_scope=instance', async () => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + + class Service { + @retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_scope: 'instance', + semaphore_name: 'handle', + }) + async handle(): Promise { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + return 'ok' + } + } + + const a = new Service() + const b = new Service() + await Promise.all([a.handle(), b.handle()]) + assert.equal(max_active, 2, '@retry instance scope: different instances get separate semaphores') +}) + +// ─── Scope fallback to global ─────────────────────────────────────────────── + +test('retry: semaphore_scope=class falls back to global for standalone functions', async () => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + + const fn = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_scope: 'class', + semaphore_name: 'standalone_class', + })(async () => { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + return 'ok' + }) + + // Two concurrent calls should serialize since they share the same global-fallback semaphore + const results = await Promise.all([fn(), fn()]) + assert.deepEqual(results, ['ok', 'ok']) + assert.equal(max_active, 1, 'class scope on standalone fn should fall back to global and serialize') +}) + +test('retry: semaphore_scope=instance falls back to global for standalone functions', async () => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + + const fn = retry({ + max_attempts: 1, + semaphore_limit: 1, + semaphore_scope: 'instance', + semaphore_name: 'standalone_instance', + })(async () => { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + return 'ok' + }) + + // Two concurrent calls should serialize since they share the same global-fallback semaphore + const results = await Promise.all([fn(), fn()]) + assert.deepEqual(results, ['ok', 'ok']) + assert.equal(max_active, 1, 'instance scope on standalone fn should fall back to global and serialize') +}) + +// ─── HOF pattern: retry({...})(fn.bind(instance)) β€” bind BEFORE wrapping ──── +// NOTE: This falls back to global scope because JS cannot extract [[BoundThis]] +// from a bound function. The handler works correctly (this is preserved inside +// the handler), but the semaphore scoping cannot see the bound instance. +// Recommendation: use retry({...})(fn).bind(instance) instead. + +test('retry: HOF retry()(fn.bind(instance)) β€” scope falls back to global (bind before wrap)', async () => { + clearSemaphoreRegistry() + + let active = 0 + let max_active = 0 + + const instance_a = { name: 'a' } + const instance_b = { name: 'b' } + + const make_handler = (inst: object) => + retry({ + max_attempts: 1, + semaphore_scope: 'instance', + semaphore_limit: 1, + semaphore_name: 'handler_bind_before', + })( + async function (this: any, _event: any): Promise { + active++ + max_active = Math.max(max_active, active) + await delay(30) + active-- + return 'ok' + }.bind(inst) + ) + + const handler_a = make_handler(instance_a) + const handler_b = make_handler(instance_b) + + // Both handlers fall back to global scope (same semaphore), so they serialize + await Promise.all([handler_a('event1'), handler_b('event2')]) + assert.equal(max_active, 1, 'bind-before-wrap: scoping falls back to global (serialized)') +}) diff --git a/bubus-ts/tests/subtests/eventbus_cross_runtime_features.test.ts b/bubus-ts/tests/subtests/eventbus_cross_runtime_features.test.ts new file mode 100644 index 0000000..c6f72e4 --- /dev/null +++ b/bubus-ts/tests/subtests/eventbus_cross_runtime_features.test.ts @@ -0,0 +1,348 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' + +import { BaseEvent, EventBus } from '../../src/index.js' +import { async_local_storage, hasAsyncLocalStorage } from '../../src/async_context.js' + +type ContextStore = { + request_id?: string +} + +const QueueJumpRootEvent = BaseEvent.extend('QueueJumpRootEvent', {}) +const QueueJumpChildEvent = BaseEvent.extend('QueueJumpChildEvent', {}) +const QueueJumpSiblingEvent = BaseEvent.extend('QueueJumpSiblingEvent', {}) + +const ConcurrencyIntersectionEvent = BaseEvent.extend('ConcurrencyIntersectionEvent', {}) + +const TimeoutEnforcementEvent = BaseEvent.extend('TimeoutEnforcementEvent', { + event_timeout: 0.02, +}) +const TimeoutFollowupEvent = BaseEvent.extend('TimeoutFollowupEvent', {}) + +const ZeroHistoryEvent = BaseEvent.extend('ZeroHistoryEvent', {}) + +const ContextParentEvent = BaseEvent.extend('ContextParentEvent', {}) +const ContextChildEvent = BaseEvent.extend('ContextChildEvent', {}) +const PendingVisibilityEvent = BaseEvent.extend('PendingVisibilityEvent', {}) +const BackpressureEvent = BaseEvent.extend('BackpressureEvent', {}) + +test('queue-jump preserves parent/child lineage and find visibility', async () => { + const bus = new EventBus('ParityQueueJumpBus', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'serial', + }) + const execution_order: string[] = [] + let child_event_id: string | null = null + + bus.on(QueueJumpRootEvent, async (event) => { + execution_order.push('root:start') + const child = event.bus!.emit(QueueJumpChildEvent({})) + await child.done() + execution_order.push('root:end') + return 'root-ok' + }) + + bus.on(QueueJumpChildEvent, async (event) => { + child_event_id = event.event_id + execution_order.push('child') + await new Promise((resolve) => setTimeout(resolve, 5)) + return 'child-ok' + }) + + bus.on(QueueJumpSiblingEvent, async () => { + execution_order.push('sibling') + return 'sibling-ok' + }) + + const root = bus.emit(QueueJumpRootEvent({})) + const sibling = bus.emit(QueueJumpSiblingEvent({})) + await root.done() + await sibling.done() + await bus.waitUntilIdle() + + assert.deepEqual(execution_order, ['root:start', 'child', 'root:end', 'sibling']) + + const found_child = await bus.find(QueueJumpChildEvent, { + child_of: root, + past: true, + future: false, + }) + assert.ok(found_child) + assert.ok(child_event_id) + assert.equal(found_child!.event_id, child_event_id) + assert.equal(found_child!.event_parent_id, root.event_id) + assert.equal( + root.event_children.some((child) => child.event_id === found_child!.event_id), + true + ) + + bus.destroy() +}) + +test('concurrency intersection: parallel events with serial handlers stays serial per-event', async () => { + const bus = new EventBus('ParityConcurrencyIntersectionBus', { + event_concurrency: 'parallel', + event_handler_concurrency: 'serial', + max_history_size: null, + }) + + const current_by_event = new Map() + const max_by_event = new Map() + let global_current = 0 + let global_max = 0 + + const tracked_handler = async (event: BaseEvent): Promise => { + const current = (current_by_event.get(event.event_id) ?? 0) + 1 + current_by_event.set(event.event_id, current) + max_by_event.set(event.event_id, Math.max(max_by_event.get(event.event_id) ?? 0, current)) + + global_current += 1 + global_max = Math.max(global_max, global_current) + + await new Promise((resolve) => setTimeout(resolve, 10)) + + current_by_event.set(event.event_id, Math.max(0, (current_by_event.get(event.event_id) ?? 1) - 1)) + global_current -= 1 + + return 'ok' + } + + bus.on(ConcurrencyIntersectionEvent, tracked_handler) + bus.on(ConcurrencyIntersectionEvent, tracked_handler) + + const events = Array.from({ length: 8 }, () => bus.emit(ConcurrencyIntersectionEvent({}))) + await Promise.all(events.map((event) => event.done())) + await bus.waitUntilIdle() + + for (const event of events) { + assert.equal(max_by_event.get(event.event_id), 1) + assert.equal( + Array.from(event.event_results.values()).every((result) => result.status === 'completed'), + true + ) + } + + assert.ok(global_max >= 2) + bus.destroy() +}) + +test('timeout enforcement preserves follow-up processing and queue state', async () => { + const bus = new EventBus('ParityTimeoutEnforcementBus', { + event_handler_concurrency: 'parallel', + }) + + bus.on(TimeoutEnforcementEvent, async () => { + await new Promise((resolve) => setTimeout(resolve, 200)) + return 'slow-a' + }) + + bus.on(TimeoutEnforcementEvent, async () => { + await new Promise((resolve) => setTimeout(resolve, 200)) + return 'slow-b' + }) + + bus.on(TimeoutFollowupEvent, async () => 'followup-ok') + + const timed_out = await bus.emit(TimeoutEnforcementEvent({})).done() + assert.equal(timed_out.event_status, 'completed') + assert.equal( + Array.from(timed_out.event_results.values()).every((result) => result.status === 'error'), + true + ) + + const followup = await bus.emit(TimeoutFollowupEvent({})).done() + assert.equal( + Array.from(followup.event_results.values()).every((result) => result.status === 'completed'), + true + ) + assert.equal(Array.from(followup.event_results.values())[0]?.result, 'followup-ok') + + await bus.waitUntilIdle() + assert.equal(bus.pending_event_queue.length, 0) + assert.equal(bus.in_flight_event_ids.size, 0) + bus.destroy() +}) + +test('zero-history backpressure with find future still resolves new events', async () => { + const bus = new EventBus('ParityZeroHistoryBus', { + max_history_size: 0, + max_history_drop: false, + }) + + bus.on(ZeroHistoryEvent, async (event) => `ok:${(event as BaseEvent & { value?: string }).value ?? ''}`) + + const first = await bus.emit(ZeroHistoryEvent({ value: 'first' } as Record)).done() + assert.equal(bus.event_history.has(first.event_id), false) + + const past = await bus.find(ZeroHistoryEvent, { past: true, future: false }) + assert.equal(past, null) + + let captured_future_id: string | null = null + const later = (async () => { + await new Promise((resolve) => setTimeout(resolve, 20)) + const future_event = bus.emit(ZeroHistoryEvent({ value: 'future' } as Record)) + captured_future_id = future_event.event_id + })() + + const future_match = await bus.find(ZeroHistoryEvent, (event) => (event as BaseEvent & { value?: string }).value === 'future', { + past: false, + future: 1, + }) + await later + + assert.ok(future_match) + assert.equal((future_match as BaseEvent & { value?: string }).value, 'future') + assert.ok(captured_future_id) + assert.equal(future_match!.event_id, captured_future_id) + + await bus.waitUntilIdle() + assert.equal(bus.event_history.size, 0) + bus.destroy() +}) + +test('context propagates through forwarding and child dispatch with lineage intact', async () => { + assert.ok(hasAsyncLocalStorage(), 'AsyncLocalStorage must be available') + assert.ok(async_local_storage) + + const storage = async_local_storage! + const bus_a = new EventBus('ParityContextForwardA') + const bus_b = new EventBus('ParityContextForwardB') + + let captured_parent_request_id: string | null = null + let captured_child_request_id: string | null = null + let parent_event_id: string | null = null + let child_parent_id: string | null = null + + bus_a.on('*', bus_b.emit) + + bus_b.on(ContextParentEvent, async (event) => { + const store = storage.getStore() as ContextStore | undefined + captured_parent_request_id = store?.request_id ?? null + parent_event_id = event.event_id + + const child = event.bus!.emit(ContextChildEvent({})) + await child.done() + return 'parent-ok' + }) + + bus_b.on(ContextChildEvent, async (event) => { + const store = storage.getStore() as ContextStore | undefined + captured_child_request_id = store?.request_id ?? null + child_parent_id = event.event_parent_id + return 'child-ok' + }) + + const request_id = 'fc81f432-98cd-7a06-824c-dafed74761bb' + const parent = await storage.run({ request_id }, async () => bus_a.emit(ContextParentEvent({})).done()) + await bus_b.waitUntilIdle() + + assert.equal(captured_parent_request_id, request_id) + assert.equal(captured_child_request_id, request_id) + assert.ok(parent_event_id) + assert.equal(child_parent_id, parent_event_id) + assert.equal(parent.event_path[0]?.startsWith('ParityContextForwardA#'), true) + assert.equal( + parent.event_path.some((path) => path.startsWith('ParityContextForwardB#')), + true + ) + + const found_child = await bus_b.find(ContextChildEvent, { + child_of: parent, + past: true, + future: false, + }) + assert.ok(found_child) + assert.equal(found_child!.event_parent_id, parent.event_id) + + bus_a.destroy() + bus_b.destroy() +}) + +test('pending queue find visibility transitions to completed after release', async () => { + const bus = new EventBus('ParityPendingFindBus', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'serial', + max_history_size: null, + }) + + const blocking_control: { release?: () => void } = {} + const blocking_handler_started = new Promise((resolve) => { + bus.on(PendingVisibilityEvent, async (event) => { + const tag = (event as BaseEvent & { tag?: string }).tag + if (tag === 'blocking') { + resolve() + await new Promise((inner_resolve) => { + blocking_control.release = inner_resolve + }) + } + return `ok:${tag ?? ''}` + }) + }) + + const blocking = bus.emit(PendingVisibilityEvent({ tag: 'blocking' } as Record)) + await blocking_handler_started + + const queued = bus.emit(PendingVisibilityEvent({ tag: 'queued' } as Record)) + await new Promise((resolve) => setTimeout(resolve, 10)) + + const pending = await bus.find(PendingVisibilityEvent, (event) => (event as BaseEvent & { tag?: string }).tag === 'queued', { + past: true, + future: false, + event_status: 'pending', + }) + assert.ok(pending) + assert.equal(pending!.event_id, queued.event_id) + + const release = blocking_control.release + assert.ok(release) + release() + await blocking.done() + await queued.done() + await bus.waitUntilIdle() + + const completed = await bus.find(PendingVisibilityEvent, (event) => (event as BaseEvent & { tag?: string }).tag === 'queued', { + past: true, + future: false, + event_status: 'completed', + }) + assert.ok(completed) + assert.equal(completed!.event_id, queued.event_id) + assert.equal(bus.pending_event_queue.length, 0) + assert.equal(bus.in_flight_event_ids.size, 0) + + bus.destroy() +}) + +test('history backpressure rejects overflow and preserves findable history', async () => { + const bus = new EventBus('ParityBackpressureBus', { + max_history_size: 1, + max_history_drop: false, + }) + + bus.on(BackpressureEvent, async (event) => `ok:${(event as BaseEvent & { value?: string }).value ?? ''}`) + + const first = await bus.emit(BackpressureEvent({ value: 'first' } as Record)).done() + assert.equal(bus.event_history.size, 1) + assert.equal(bus.event_history.has(first.event_id), true) + + const found_first = await bus.find(BackpressureEvent, (event) => (event as BaseEvent & { value?: string }).value === 'first', { + past: true, + future: false, + }) + assert.ok(found_first) + assert.equal(found_first!.event_id, first.event_id) + + await assert.rejects( + async () => { + await bus.emit(BackpressureEvent({ value: 'second' } as Record)).done() + }, + (error: unknown) => error instanceof Error && error.message.includes('history limit reached') + ) + + assert.equal(bus.event_history.size, 1) + assert.equal(bus.event_history.has(first.event_id), true) + assert.equal(bus.pending_event_queue.length, 0) + assert.equal(bus.in_flight_event_ids.size, 0) + + bus.destroy() +}) diff --git a/bubus-ts/tsconfig.json b/bubus-ts/tsconfig.json new file mode 100644 index 0000000..f653c22 --- /dev/null +++ b/bubus-ts/tsconfig.json @@ -0,0 +1,18 @@ +{ + "compilerOptions": { + "target": "ES2022", + "lib": ["ES2024", "DOM"], + "module": "ESNext", + "moduleResolution": "Bundler", + "strict": true, + "skipLibCheck": true, + "noEmitOnError": true, + "declaration": true, + "emitDeclarationOnly": false, + "outDir": "dist/types", + "rootDir": "src", + "forceConsistentCasingInFileNames": true, + "useDefineForClassFields": true + }, + "include": ["src"] +} diff --git a/bubus-ts/tsconfig.typecheck.json b/bubus-ts/tsconfig.typecheck.json new file mode 100644 index 0000000..3c0a49a --- /dev/null +++ b/bubus-ts/tsconfig.typecheck.json @@ -0,0 +1,10 @@ +{ + "extends": "./tsconfig.json", + "compilerOptions": { + "noEmit": true, + "declaration": false, + "emitDeclarationOnly": false, + "rootDir": "." + }, + "include": ["src", "tests", "examples"] +} diff --git a/bubus/__init__.py b/bubus/__init__.py index df6e6e2..a3778e2 100644 --- a/bubus/__init__.py +++ b/bubus/__init__.py @@ -1,14 +1,68 @@ -"""Event bus for the browser-use agent.""" +"""Event bus library.""" -from bubus.models import BaseEvent, EventHandler, EventResult, PythonIdentifierStr, PythonIdStr, UUIDStr -from bubus.service import EventBus +from . import events_suck +from .base_event import ( + BaseEvent, + EventConcurrencyMode, + EventHandlerCompletionMode, + EventHandlerConcurrencyMode, + EventResult, + EventStatus, + PythonIdentifierStr, + PythonIdStr, + UUIDStr, +) +from .bridges import HTTPEventBridge, SocketEventBridge +from .event_bus import EventBus +from .event_handler import ( + EventHandler, + EventHandlerAbortedError, + EventHandlerCancelledError, + EventHandlerResultSchemaError, + EventHandlerTimeoutError, +) +from .event_history import EventHistory +from .middlewares import ( + AutoErrorEventMiddleware, + AutoHandlerChangeEventMiddleware, + AutoReturnEventMiddleware, + BusHandlerRegisteredEvent, + BusHandlerUnregisteredEvent, + EventBusMiddleware, + LoggerEventBusMiddleware, + OtelTracingMiddleware, + SQLiteHistoryMirrorMiddleware, + WALEventBusMiddleware, +) __all__ = [ 'EventBus', + 'EventBusMiddleware', + 'BusHandlerRegisteredEvent', + 'BusHandlerUnregisteredEvent', + 'HTTPEventBridge', + 'SocketEventBridge', + 'LoggerEventBusMiddleware', + 'OtelTracingMiddleware', + 'SQLiteHistoryMirrorMiddleware', + 'AutoErrorEventMiddleware', + 'AutoHandlerChangeEventMiddleware', + 'AutoReturnEventMiddleware', + 'WALEventBusMiddleware', + 'EventHistory', 'BaseEvent', + 'EventStatus', 'EventResult', 'EventHandler', + 'EventHandlerCancelledError', + 'EventHandlerResultSchemaError', + 'EventHandlerTimeoutError', + 'EventHandlerAbortedError', + 'EventHandlerConcurrencyMode', + 'EventHandlerCompletionMode', + 'EventConcurrencyMode', 'UUIDStr', 'PythonIdStr', 'PythonIdentifierStr', + 'events_suck', ] diff --git a/bubus/base_event.py b/bubus/base_event.py new file mode 100644 index 0000000..9d3ea24 --- /dev/null +++ b/bubus/base_event.py @@ -0,0 +1,1675 @@ +import asyncio +import contextvars +import inspect +import logging +import os +from collections.abc import AsyncIterator, Callable, Coroutine, Generator +from contextlib import asynccontextmanager +from datetime import UTC, datetime +from enum import StrEnum +from functools import partial +from typing import TYPE_CHECKING, Annotated, Any, ClassVar, Generic, Literal, Self, TypeAlias +from uuid import UUID + +from pydantic import ( + AfterValidator, + BaseModel, + ConfigDict, + Field, + PrivateAttr, + computed_field, + field_serializer, + field_validator, + model_serializer, + model_validator, +) +from typing_extensions import TypeVar # needed to get TypeVar(default=...) above python 3.11 +from uuid_extensions import uuid7str + +from bubus.event_handler import ( + ContravariantEventHandlerCallable, + EventHandler, + EventHandlerAbortedError, + EventHandlerCancelledError, + EventHandlerResultSchemaError, + EventHandlerTimeoutError, + NormalizedEventHandlerCallable, +) +from bubus.helpers import ( # pyright: ignore[reportPrivateUsage] + _run_with_slow_monitor, # pyright: ignore[reportPrivateUsage] + cancel_and_await, + extract_basemodel_generic_arg, + monotonic_datetime, +) +from bubus.jsonschema import ( + pydantic_model_from_json_schema, + pydantic_model_to_json_schema, + result_type_identifier_from_schema, + validate_result_against_type, +) + +if TYPE_CHECKING: + from bubus.event_bus import EventBus + from bubus.lock_manager import ReentrantLock + + +logger = logging.getLogger('bubus') + +BUBUS_LOGGING_LEVEL = os.getenv('BUBUS_LOGGING_LEVEL', 'WARNING').upper() # WARNING normally, otherwise DEBUG when testing +LIBRARY_VERSION = os.getenv('LIBRARY_VERSION', '0.0.1') + +logger.setLevel(BUBUS_LOGGING_LEVEL) + + +class EventStatus(StrEnum): + """Lifecycle status used for events and middleware transition hooks. + + Event statuses are strictly ``pending`` -> ``started`` -> ``completed``. + Handler-level failures are represented on ``EventResult.status == 'error'`` + and ``EventResult.error``, not as an event status. + """ + + PENDING = 'pending' + STARTED = 'started' + COMPLETED = 'completed' # errored events are also considered completed + + +def validate_event_name(s: str) -> str: + assert str(s).isidentifier() and not str(s).startswith('_'), f'Invalid event name: {s}' + return str(s) + + +def validate_python_id_str(s: str) -> str: + assert str(s).replace('.', '').isdigit(), f'Invalid Python ID: {s}' + return str(s) + + +def validate_event_path_entry_str(s: str) -> str: + entry = str(s) + assert '#' in entry, f'Invalid event_path entry: {entry} (expected BusName#abcd)' + bus_name, short_id = entry.rsplit('#', 1) + assert bus_name.isidentifier() and short_id.isalnum() and len(short_id) == 4, ( + f'Invalid event_path entry: {entry} (expected BusName#abcd)' + ) + return entry + + +def validate_uuid_str(s: str) -> str: + uuid = UUID(str(s)) + return str(uuid) + + +UUIDStr: TypeAlias = Annotated[str, AfterValidator(validate_uuid_str)] +PythonIdStr: TypeAlias = Annotated[str, AfterValidator(validate_python_id_str)] +PythonIdentifierStr: TypeAlias = Annotated[str, AfterValidator(validate_event_name)] +EventPathEntryStr: TypeAlias = Annotated[str, AfterValidator(validate_event_path_entry_str)] + + +class EventHandlerConcurrencyMode(StrEnum): + SERIAL = 'serial' + PARALLEL = 'parallel' + + +class EventHandlerCompletionMode(StrEnum): + ALL = 'all' + FIRST = 'first' + + +class EventConcurrencyMode(StrEnum): + GLOBAL_SERIAL = 'global-serial' + BUS_SERIAL = 'bus-serial' + PARALLEL = 'parallel' + + +T_EventResultType = TypeVar('T_EventResultType', bound=Any, default=None) +# TypeVar for BaseEvent and its subclasses +RESERVED_USER_EVENT_FIELDS = frozenset({'bus', 'first', 'toString', 'toJSON', 'fromJSON'}) +# We use contravariant=True because if a handler accepts BaseEvent, +# it can also handle any subclass of BaseEvent +T_Event = TypeVar('T_Event', bound='BaseEvent[Any]', contravariant=True, default='BaseEvent[Any]') +EventResultFilter = Callable[['EventResult[Any]'], bool] + + +def _default_format_exception_for_log(exc: BaseException) -> str: + from traceback import TracebackException + + return ''.join(TracebackException.from_exception(exc, capture_locals=False).format()) + + +def _as_any(value: Any) -> Any: + return value + + +def _normalize_any_dict(value: Any) -> dict[str, Any]: + if not isinstance(value, dict): + return {} + value_any = _as_any(value) + normalized: dict[str, Any] = {} + for key, item in value_any.items(): + normalized[str(key)] = item + return normalized + + +def _normalize_any_list(value: Any) -> list[Any]: + if isinstance(value, list): + value_any = _as_any(value) + return [item for item in value_any] + return [] + + +def _normalize_any_tuple(value: Any) -> tuple[Any, ...]: + if isinstance(value, tuple): + value_any = _as_any(value) + return tuple(item for item in value_any) + return () + + +# Keep EventResult and BaseEvent co-located in this module. +# Cross-file generic forward refs between these two models caused fragile +# incomplete-model states and import-order dependent rebuild behavior in Pydantic. +# Context: +# - https://github.com/pydantic/pydantic/issues/1873 +# - https://github.com/pydantic/pydantic/issues/707 +# - https://stackoverflow.com/questions/77582955/how-can-i-separate-two-pydantic-models-into-different-files-when-these-models-ha +# - https://github.com/pydantic/pydantic/issues/11532 +class EventResult(BaseModel, Generic[T_EventResultType]): + """Individual result from a single handler.""" + + model_config = ConfigDict( + extra='forbid', + arbitrary_types_allowed=True, + validate_assignment=False, # Validation handled in update() for flexible result types. + validate_default=True, + revalidate_instances='always', + ) + + # Automatically set fields, setup at Event init and updated by EventBus._run_handler() + id: str = Field(default_factory=uuid7str) + status: Literal['pending', 'started', 'completed', 'error'] = 'pending' + event_id: str + handler: EventHandler = Field(default_factory=EventHandler) + result_type: Any = Field(default=None, exclude=True, repr=False) + timeout: float | None = None + started_at: str | None = None + + # Result fields, updated by EventBus._run_handler() + result: T_EventResultType | 'BaseEvent[Any]' | None = None + error: BaseException | None = None + completed_at: str | None = None + + # Completion signal + _handler_completed_signal: asyncio.Event | None = PrivateAttr(default=None) + + # Child events emitted during handler execution + event_children: list['BaseEvent[Any]'] = Field(default_factory=lambda: []) + + @staticmethod + def _serialize_datetime_json(value: str | datetime | None) -> str | None: + if value is None: + return None + if isinstance(value, datetime): + value = value.astimezone(UTC).isoformat().replace('+00:00', 'Z') + return monotonic_datetime(value) + + @staticmethod + def _serialize_error_json(value: BaseException | None) -> dict[str, Any] | None: + if value is None: + return None + return {'type': type(value).__name__, 'message': str(value)} + + @classmethod + def _serialize_jsonable(cls, value: Any) -> Any: + if value is None or isinstance(value, (str, int, float, bool)): + return value + if isinstance(value, datetime): + return cls._serialize_datetime_json(value) + if isinstance(value, BaseException): + return cls._serialize_error_json(value) + if isinstance(value, BaseEvent): + return value.model_dump(mode='json') + if isinstance(value, BaseModel): + return value.model_dump(mode='json') + if isinstance(value, list): + return [cls._serialize_jsonable(item) for item in _normalize_any_list(value)] + if isinstance(value, tuple): + return [cls._serialize_jsonable(item) for item in _normalize_any_tuple(value)] + if isinstance(value, dict): + serialized_dict: dict[str, Any] = {} + for key, item in _normalize_any_dict(value).items(): + serialized_dict[str(key)] = cls._serialize_jsonable(item) + return serialized_dict + return repr(value) + + @model_validator(mode='before') + @classmethod + def _hydrate_handler_from_flat_json(cls, data: Any) -> Any: + if not isinstance(data, dict): + return data + payload = _normalize_any_dict(data) + if isinstance(payload.get('handler'), dict): + raise ValueError('EventResult JSON no longer accepts nested handler payloads; use flat handler_* fields') + + if 'handler' not in payload: + raw_handler_id = payload.pop('handler_id', None) + raw_handler_name = payload.pop('handler_name', None) + raw_handler_file_path = payload.pop('handler_file_path', None) + raw_handler_timeout = payload.pop('handler_timeout', None) + raw_handler_slow_timeout = payload.pop('handler_slow_timeout', None) + raw_handler_registered_at = payload.pop('handler_registered_at', None) + raw_handler_event_pattern = payload.pop('handler_event_pattern', None) + raw_eventbus_name = payload.pop('eventbus_name', None) + raw_eventbus_id = payload.pop('eventbus_id', None) + + has_flat_handler_fields = any( + value is not None + for value in ( + raw_handler_id, + raw_handler_name, + raw_handler_file_path, + raw_handler_timeout, + raw_handler_slow_timeout, + raw_handler_registered_at, + raw_handler_event_pattern, + raw_eventbus_name, + raw_eventbus_id, + ) + ) + if has_flat_handler_fields: + handler_payload: dict[str, Any] = { + 'handler_name': raw_handler_name or 'anonymous', + 'event_pattern': raw_handler_event_pattern or '*', + 'eventbus_name': raw_eventbus_name or 'EventBus', + 'eventbus_id': raw_eventbus_id or '00000000-0000-0000-0000-000000000000', + } + if raw_handler_id is not None: + handler_payload['id'] = raw_handler_id + if raw_handler_file_path is not None: + handler_payload['handler_file_path'] = raw_handler_file_path + if raw_handler_timeout is not None: + handler_payload['handler_timeout'] = raw_handler_timeout + if raw_handler_slow_timeout is not None: + handler_payload['handler_slow_timeout'] = raw_handler_slow_timeout + if raw_handler_registered_at is not None: + handler_payload['handler_registered_at'] = raw_handler_registered_at + payload['handler'] = handler_payload + + raw_children = payload.get('event_children') + if isinstance(raw_children, list) and all(isinstance(item, str) for item in _normalize_any_list(raw_children)): + payload['event_children'] = [] + + raw_error = payload.get('error') + if raw_error is not None and not isinstance(raw_error, BaseException): + if isinstance(raw_error, str): + payload['error'] = Exception(raw_error) + elif isinstance(raw_error, dict): + raw_error_json = _normalize_any_dict(raw_error) + raw_message = raw_error_json.get('message') + message = raw_message if isinstance(raw_message, str) else str(raw_error_json) + payload['error'] = Exception(message) + else: + payload['error'] = Exception(str(raw_error)) + + for at_key in ('started_at', 'completed_at'): + if at_key not in payload: + continue + raw_at_value = payload.get(at_key) + if raw_at_value is None: + continue + if isinstance(raw_at_value, datetime): + normalized_value = ( + raw_at_value.replace(tzinfo=UTC) if raw_at_value.tzinfo is None else raw_at_value.astimezone(UTC) + ) + payload[at_key] = monotonic_datetime(normalized_value.isoformat().replace('+00:00', 'Z')) + else: + payload[at_key] = monotonic_datetime(str(raw_at_value)) + + return payload + + @model_serializer(mode='plain', when_used='json') + def _serialize_event_result_json(self) -> dict[str, Any]: + handler = self.handler + return { + 'id': self.id, + 'status': self.status, + 'event_id': self.event_id, + 'handler_id': self.handler_id, + 'handler_name': self.handler_name, + 'handler_file_path': handler.handler_file_path, + 'handler_timeout': handler.handler_timeout, + 'handler_slow_timeout': handler.handler_slow_timeout, + 'handler_registered_at': monotonic_datetime(handler.handler_registered_at), + 'handler_event_pattern': handler.event_pattern, + 'eventbus_id': self.eventbus_id, + 'eventbus_name': self.eventbus_name, + 'started_at': self._serialize_datetime_json(self.started_at), + 'completed_at': self._serialize_datetime_json(self.completed_at), + 'result': self._serialize_jsonable(self.result), + 'error': self._serialize_error_json(self.error), + 'event_children': [child.event_id for child in self.event_children], + } + + @computed_field(return_type=str) + @property + def handler_id(self) -> str: + return self.handler.id + + @computed_field(return_type=str) + @property + def handler_name(self) -> str: + return self.handler.handler_name + + @computed_field(return_type=str) + @property + def eventbus_id(self) -> str: + return self.handler.eventbus_id + + @computed_field(return_type=str) + @property + def eventbus_name(self) -> str: + return self.handler.eventbus_name + + @property + def eventbus_label(self) -> str: + return self.handler.eventbus_label + + @property + def handler_completed_signal(self) -> asyncio.Event | None: + """Lazily create asyncio.Event when accessed.""" + if self._handler_completed_signal is None: + try: + asyncio.get_running_loop() + self._handler_completed_signal = asyncio.Event() + except RuntimeError: + pass + return self._handler_completed_signal + + def __str__(self) -> str: + """Constant-time summary for hot-path logging.""" + handler_qualname = f'{self.eventbus_label}.{self.handler_name}' + if self.status == 'pending': + outcome = 'pending' + elif self.status == 'started': + outcome = 'started' + elif self.error is not None: + outcome = f'error:{type(self.error).__name__}' + elif self.result is None: + outcome = 'result:none' + elif isinstance(self.result, BaseEvent): + outcome = 'event' + else: + outcome = f'result:{type(self.result).__name__}' + return f'{handler_qualname}() -> {outcome} ({self.status})' + + def __repr__(self) -> str: + icon = 'πŸƒ' if self.status == 'pending' else 'βœ…' if self.status == 'completed' else '❌' + return f'{self.handler.label}() {icon}' + + def __await__(self) -> Generator[Self, Any, T_EventResultType | 'BaseEvent[Any]' | None]: + """ + Wait for this result to complete and return the result or raise error. + Does not execute the handler itself, only waits for completion. + """ + + async def wait_for_handler_to_complete_and_return_result() -> T_EventResultType | 'BaseEvent[Any]' | None: + assert self.handler_completed_signal is not None, 'EventResult cannot be awaited outside of an async context' + try: + await asyncio.wait_for(self.handler_completed_signal.wait(), timeout=self.timeout) + except TimeoutError: + raise TimeoutError( + f'Event handler {self.eventbus_label}.{self.handler_name}(#{self.event_id[-4:]}) timed out after {self.timeout}s' + ) + + if self.status == 'error' and self.error is not None: + raise self.error + return self.result + + return wait_for_handler_to_complete_and_return_result().__await__() + + def update(self, **kwargs: Any) -> Self: + """Update the EventResult with provided kwargs, called by EventBus during handler execution.""" + + # Common mistake: returning an exception object instead of setting error. + if 'result' in kwargs and isinstance(kwargs['result'], BaseException): + logger.warning( + f'β„Ή Event handler {self.handler_name} returned an exception object, auto-converting to EventResult(result=None, status="error", error={kwargs["result"]})' + ) + kwargs['error'] = kwargs['result'] + kwargs['status'] = 'error' + kwargs['result'] = None + + if 'result' in kwargs: + result: Any = kwargs['result'] + self.status = 'completed' + if self.result_type is not None and result is not None: + if isinstance(result, BaseEvent): + self.result = result + else: + try: + validated_result = validate_result_against_type(self.result_type, result) + self.result = validated_result + except Exception as cast_error: + schema_id = result_type_identifier_from_schema(self.result_type) or 'unknown' + self.error = EventHandlerResultSchemaError( + f'Event handler returned a value that did not match expected event_result_type ' + f'({schema_id}): {result} -> {type(cast_error).__name__}: {cast_error}' + ) + self.result = None + self.status = 'error' + else: + self.result = result + + if 'error' in kwargs: + assert isinstance(kwargs['error'], (BaseException, str)), ( + f'Invalid error type: {type(kwargs["error"]).__name__} {kwargs["error"]}' + ) + self.error = kwargs['error'] if isinstance(kwargs['error'], BaseException) else Exception(kwargs['error']) + self.status = 'error' + + if 'status' in kwargs: + assert kwargs['status'] in ('pending', 'started', 'completed', 'error'), f'Invalid status: {kwargs["status"]}' + self.status = kwargs['status'] + + if self.status != 'pending' and not self.started_at: + self.started_at = monotonic_datetime() + if self.status in ('completed', 'error') and not self.completed_at: + self.completed_at = monotonic_datetime() + if self.handler_completed_signal: + self.handler_completed_signal.set() + return self + + def _create_slow_handler_warning_timer( + self, + event: 'BaseEvent[T_EventResultType]', + eventbus: 'EventBus', + handler_slow_timeout: float | None, + ) -> Callable[[], Coroutine[Any, Any, None]] | None: + should_warn_for_slow_handler = handler_slow_timeout is not None and ( + self.timeout is None or self.timeout > handler_slow_timeout + ) + if not should_warn_for_slow_handler: + return None + return partial(self._slow_handler_monitor, event=event, eventbus=eventbus, handler_slow_timeout=handler_slow_timeout) + + async def _slow_handler_monitor( + self, + *, + event: 'BaseEvent[T_EventResultType]', + eventbus: 'EventBus', + handler_slow_timeout: float | None, + ) -> None: + assert handler_slow_timeout is not None + await asyncio.sleep(handler_slow_timeout) + if self.status != 'started': + return + started_at = self.started_at or event.event_started_at or event.event_created_at + started_at_dt = datetime.fromisoformat(started_at) + elapsed_seconds = max(0.0, (datetime.now(UTC) - started_at_dt).total_seconds()) + logger.warning( + '⚠️ Slow event handler: %s.on(%s#%s, %s) still running after %.1fs', + eventbus.label, + event.event_type, + event.event_id[-4:], + self.handler.label, + elapsed_seconds, + ) + + async def _call_handler( + self, + event: 'BaseEvent[T_EventResultType]', + handler: NormalizedEventHandlerCallable[T_EventResultType], + dispatch_context: contextvars.Context | None, + ) -> T_EventResultType | 'BaseEvent[Any]' | None: + handler_task: asyncio.Task[Any] | None = None + handler_return_value: T_EventResultType | BaseEvent[Any] | None = None + try: + if dispatch_context is None: + handler_return_value = await handler(event) + else: + merged_context = dispatch_context.copy() + for variable, value in contextvars.copy_context().items(): + merged_context.run(variable.set, value) + handler_task = asyncio.create_task( + handler(event), + context=merged_context, + ) + handler_return_value = await handler_task + return handler_return_value + finally: + await cancel_and_await(handler_task, timeout=0.1) + + @asynccontextmanager + async def _run_with_timeout(self, event: 'BaseEvent[T_EventResultType]') -> AsyncIterator[None]: + """Apply handler timeout and normalize timeout expiry to EventHandlerTimeoutError.""" + timeout_scope = asyncio.timeout(self.timeout) + try: + async with timeout_scope: + yield + except TimeoutError as exc: + if not timeout_scope.expired(): + raise + timeout_error = self._on_handler_timeout(event) + raise timeout_error from exc + + def _on_handler_timeout(self, event: 'BaseEvent[T_EventResultType]') -> EventHandlerTimeoutError: + children = f' and interrupted any processing of {len(event.event_children)} child events' if event.event_children else '' + timeout_error = EventHandlerTimeoutError( + f'Event handler {self.handler.label}({event}) timed out after {self.timeout}s{children}' + ) + self.update(error=timeout_error) + event._cancel_pending_child_processing(timeout_error) # pyright: ignore[reportPrivateUsage] + + from bubus.logging import log_timeout_tree + + log_timeout_tree(event, self) + return timeout_error + + def _on_handler_error( + self, + event: 'BaseEvent[T_EventResultType]', + eventbus: 'EventBus', + exc: Exception, + *, + format_exception_for_log: Callable[[BaseException], str], + ) -> None: + normalized_error: Exception = exc + if isinstance(exc, TimeoutError) and not isinstance(exc, EventHandlerTimeoutError): + timeout_message = str(exc) or f'Event handler {self.handler.label}({event}) timed out' + normalized_error = EventHandlerTimeoutError(timeout_message) + + self.update(error=normalized_error) + red = '\033[91m' + reset = '\033[0m' + logger.error( + f'❌ {eventbus} Error in event handler {self.handler_name}({event}) -> \n{red}{type(normalized_error).__name__}({normalized_error}){reset}\n{format_exception_for_log(normalized_error)}', + ) + + async def _on_handler_exit(self) -> None: + return + + async def run_handler( + self, + event: 'BaseEvent[T_EventResultType]', + *, + eventbus: 'EventBus', + timeout: float | None, + handler_slow_timeout: float | None = None, + notify_event_started: bool = False, + format_exception_for_log: Callable[[BaseException], str] | None = None, + ) -> T_EventResultType | 'BaseEvent[Any]' | None: + """Execute one handler inside the unified runtime scope stack. + + Runtime layering for one handler execution: + - per-event handler lock + - handler execution context manager (ContextVars + dispatch lock mirror) + - optional timeout wrapper (hard cap) + - optional slow-handler background monitor + - handler call + result/error normalization + """ + _format_exception_for_log_callable = format_exception_for_log or _default_format_exception_for_log + + handler: NormalizedEventHandlerCallable[T_EventResultType] | None = self.handler._handler_async # pyright: ignore[reportPrivateUsage] + if handler is None: + raise RuntimeError(f'EventResult {self.id} has no callable attached to handler {self.handler.id}') + + self.timeout = timeout + self.result_type = event.event_result_type + + dispatch_context = event._get_dispatch_context() # pyright: ignore[reportPrivateUsage] + slow_handler_monitor: Callable[[], Coroutine[Any, Any, None]] | None = None + try: + async with eventbus.locks._run_with_handler_lock(eventbus, event, self): # pyright: ignore[reportPrivateUsage] + if self.status in ('error', 'completed'): + return self.result + self.update(status='started') + event._mark_started(self.started_at) # pyright: ignore[reportPrivateUsage] + await eventbus.on_event_result_change(event, self, EventStatus.STARTED) + if notify_event_started: + await eventbus.on_event_change(event, EventStatus.STARTED) + + with eventbus._run_with_handler_dispatch_context(event, self.handler_id): # pyright: ignore[reportPrivateUsage] + slow_handler_monitor = self._create_slow_handler_warning_timer(event, eventbus, handler_slow_timeout) + async with self._run_with_timeout(event): + async with _run_with_slow_monitor( + slow_handler_monitor, + task_name=f'{eventbus}.slow_handler_monitor({event}, {self.handler.label})', + ): + handler_return_value = await self._call_handler( + event, + handler, + dispatch_context, + ) + self.update(result=handler_return_value) + return self.result + + except asyncio.CancelledError: + handler_interrupted_error = EventHandlerAbortedError( + f'Event handler {self.handler.label}({event}) was interrupted because of a parent timeout' + ) + self.update(error=handler_interrupted_error) + raise + + except Exception as exc: + self._on_handler_error( + event, + eventbus, + exc, + format_exception_for_log=_format_exception_for_log_callable, + ) + raise + finally: + await self._on_handler_exit() + + def log_tree( + self, + indent: str = '', + is_last: bool = True, + event_children_by_parent: dict[str | None, list['BaseEvent[Any]']] | None = None, + ) -> None: + """Print this result and its child events with proper tree formatting.""" + from bubus.logging import log_event_result_tree + + log_event_result_tree(self, indent, is_last, event_children_by_parent) + + +class BaseEvent(BaseModel, Generic[T_EventResultType]): + """ + The base model used for all Events that flow through the EventBus system. + """ + + model_config = ConfigDict( + extra='allow', + arbitrary_types_allowed=True, + # Allow ergonomic subclass defaults like `class MyEvent(BaseEvent): event_version = '1.2.3'` + # without requiring repetitive type annotations on every override. + ignored_types=(str,), + # Runtime lifecycle updates mutate event fields very frequently; avoid + # assignment-time model rebuilding and keep state updates stable/O(1). + validate_assignment=False, + validate_default=True, + revalidate_instances='always', + ) + + # Class-level cache for auto-extracted event_result_type from BaseEvent[T] + _event_result_type_cache: ClassVar[Any | None] = None + + event_type: PythonIdentifierStr = Field(default='UndefinedEvent', description='Event type name', max_length=64) + event_version: str = Field( + default=LIBRARY_VERSION, + description='Event type version tag, defaults to LIBRARY_VERSION env var or "0.0.1" if not overridden', + ) + event_timeout: float | None = Field( + default=None, description='Timeout in seconds for event to finish processing (bus default applied at dispatch)' + ) + event_slow_timeout: float | None = Field( + default=None, description='Optional per-event slow processing warning threshold in seconds' + ) + event_concurrency: EventConcurrencyMode | None = Field( + default=None, + description=( + 'Event scheduling strategy relative to other events: ' + "'global-serial' | 'bus-serial' | 'parallel'. " + 'None defers to the bus default.' + ), + ) + event_handler_timeout: float | None = Field(default=None, description='Optional per-event handler timeout cap in seconds') + event_handler_slow_timeout: float | None = Field( + default=None, description='Optional per-event slow handler warning threshold in seconds' + ) + event_handler_concurrency: EventHandlerConcurrencyMode | None = Field( + default=None, + description=( + "Handler scheduling strategy: 'serial' runs one handler at a time, 'parallel' runs handlers concurrently. " + 'None defers to the bus default.' + ), + ) + event_handler_completion: EventHandlerCompletionMode | None = Field( + default=None, + description=( + "Handler completion strategy: 'all' waits for all handlers, 'first' resolves on first successful result. " + 'None defers to the bus default.' + ), + ) + event_result_type: Any = Field( + default=None, description='Schema/type for handler result validation (serialized as JSON Schema)' + ) + + @field_validator('event_result_type', mode='before') + @classmethod + def _deserialize_event_result_type(cls, value: Any) -> Any: + return pydantic_model_from_json_schema(value) + + @field_serializer('event_result_type', when_used='json') + def event_result_type_serializer(self, value: Any) -> dict[str, Any] | None: + """Serialize event_result_type to JSON Schema for cross-language transport.""" + return pydantic_model_to_json_schema(value) + + # Runtime metadata + event_id: UUIDStr = Field(default_factory=uuid7str, max_length=36) + event_path: list[EventPathEntryStr] = Field(default_factory=list, description='Path tracking for event routing') + event_parent_id: UUIDStr | None = Field( + default=None, description='ID of the parent event that triggered this event', max_length=36 + ) + event_emitted_by_handler_id: str | None = Field( + default=None, + description='Handler id that emitted this event when dispatched from inside a handler', + ) + event_pending_bus_count: int = Field( + default=0, + description='Number of buses that currently have this event pending/in-flight', + ) + + # Completion tracking fields + event_created_at: str = Field( + default_factory=monotonic_datetime, + description='Timestamp when event was first dispatched to an EventBus aka marked pending', + ) + event_status: EventStatus = Field( + default=EventStatus.PENDING, + description='Current event lifecycle status: pending, started, or completed', + ) + event_started_at: str | None = Field( + default=None, + description='Timestamp when event processing first started', + ) + event_completed_at: str | None = Field( + default=None, + description='Timestamp when event was completed by all handlers and child events', + ) + + event_results: dict[PythonIdStr, EventResult[T_EventResultType]] = Field( + default_factory=dict, exclude=True + ) # Results indexed by str(id(handler_func)) + + # Completion signal + _event_completed_signal: asyncio.Event | None = PrivateAttr(default=None) + _event_is_complete_flag: bool = PrivateAttr(default=False) + _lock_for_event_handler: 'ReentrantLock | None' = PrivateAttr(default=None) + + # Dispatch-time context for ContextVar propagation to handlers + # Captured when emit() is called, used when executing handlers via ctx.run() + _event_dispatch_context: contextvars.Context | None = PrivateAttr(default=None) + + def __hash__(self) -> int: + """Make events hashable using their unique event_id""" + return hash(self.event_id) + + def __str__(self) -> str: + """Compact O(1) summary for hot-path logging.""" + completed_signal = self._event_completed_signal + if ( + self.event_status == EventStatus.COMPLETED + or self._event_is_complete_flag + or (completed_signal is not None and completed_signal.is_set()) + ): + icon = 'βœ…' + elif self.event_status == EventStatus.STARTED: + icon = 'πŸƒ' + else: + icon = '⏳' + + bus_hint = self.event_path[-1] if self.event_path else '?' + return f'{bus_hint}β–Ά {self.event_type}#{self.event_id[-4:]} {icon}' + + def _remove_self_from_queue(self, bus: 'EventBus') -> bool: + """Remove this event from the bus's queue if present. Returns True if removed.""" + return bus.remove_event_from_pending_queue(self) + + @staticmethod + def _iter_eventbuses_for_registry(registry_owner: 'EventBus | None' = None) -> list['EventBus']: + from bubus.event_bus import EventBus, get_current_eventbus + + if registry_owner is not None: + return list(type(registry_owner).all_instances) + current_bus = get_current_eventbus() + if current_bus is not None: + return list(type(current_bus).all_instances) + return list(EventBus.iter_all_instances()) + + def _is_queued_on_any_bus(self, ignore_bus: 'EventBus | None' = None) -> bool: + """ + Check whether this event is currently queued on any live EventBus. + + This prevents premature completion when an event has been forwarded to + another bus but that bus hasn't processed it yet. + """ + for bus in self._iter_eventbuses_for_registry(ignore_bus): + if not bus: + continue + if ignore_bus is not None and bus is ignore_bus: + continue + if bus.is_event_inflight_or_queued(self.event_id): + return True + return False + + async def _process_self_on_all_buses(self) -> None: + """ + Process this specific event on all buses where it's queued. + + This handles the case where an event is forwarded to multiple buses - + we need to process it on each bus, but we only process THIS event, + not other events in the queues (to avoid overshoot). + + The loop continues until the event's completion signal is set, which + happens after all handlers on all buses have completed. + """ + max_iterations = 1000 # Prevent infinite loops + iterations = 0 + + # Cache the signal - in async context it will always be created + completed_signal = self.event_completed_signal + assert completed_signal is not None, 'event_completed_signal should exist in async context' + claimed_processed_bus_ids: set[int] = set() + try: + while not completed_signal.is_set() and iterations < max_iterations: + iterations += 1 + processed_any = False + + # Look for this specific event in all bus queues and process it + for bus in self._iter_eventbuses_for_registry(): + if not bus: + continue + processed_on_bus = False + + if self._remove_self_from_queue(bus): + # Fast path: event is still in the queue, claim and process it via EventBus.step + # so completion/finalization uses the same logic as the runloop. + await bus.step(event=self) + bus.mark_pending_queue_task_done() + processed_on_bus = True + else: + # Slow path: another task already claimed queue.get() and set + # processing state, but may be blocked on an event-level lock held + # by the awaiting parent handler. Process once here to make progress. + bus_key = id(bus) + event_lock = bus.locks.get_lock_for_event(bus, self) + if ( + event_lock is not None + and bus.is_event_processing(self.event_id) + and bus_key not in claimed_processed_bus_ids + ): + await bus.step(event=self) + claimed_processed_bus_ids.add(bus_key) + processed_on_bus = True + + if processed_on_bus: + processed_any = True + if completed_signal.is_set(): + break + + if completed_signal.is_set(): + break + + if not processed_any: + # Event not in any queue, yield control and wait + await asyncio.sleep(0) + + except asyncio.CancelledError: + if logger.isEnabledFor(logging.DEBUG): + logger.debug('Polling loop cancelled for %s', self) + raise + + async def _wait_for_completion_inside_handler(self) -> None: + """ + Wait for this event to complete when called from inside a handler. + + Processes this specific event on all buses where it appears (handling + the forwarding case), but doesn't process other events (avoiding overshoot). + """ + await self._process_self_on_all_buses() + + def __await__(self) -> Generator[Self, Any, Any]: + """Immediate await path (queue-jump when inside a handler), returns self.""" + + async def wait_for_handlers_to_complete_then_return_event(): + if self._event_is_complete_flag: + return self + assert self.event_completed_signal is not None + from bubus.event_bus import in_handler_context + + is_inside_handler = in_handler_context() + is_not_yet_complete = not self._event_is_complete_flag and not self.event_completed_signal.is_set() + + if is_not_yet_complete and is_inside_handler: + await self._wait_for_completion_inside_handler() + else: + await self.event_completed() + + return self + + return wait_for_handlers_to_complete_then_return_event().__await__() + + async def event_completed(self) -> Self: + """Queue-order await path (never queue-jumps), returns self.""" + if self._event_is_complete_flag: + return self + assert self.event_completed_signal is not None + await self.event_completed_signal.wait() + return self + + async def first( + self, + timeout: float | None = None, + *, + raise_if_any: bool = False, + raise_if_none: bool = False, + ) -> T_EventResultType | None: + """ + Resolve with the first successful non-None handler result for this event. + + This switches the event to ``event_handler_completion='first'`` before awaiting completion. + """ + self.event_handler_completion = EventHandlerCompletionMode.FIRST + await self + return await self.event_result(timeout=timeout, raise_if_any=raise_if_any, raise_if_none=raise_if_none) + + @model_validator(mode='before') + @classmethod + def _normalize_and_validate_event_input(cls, data: Any) -> Any: + """Normalize event input once, enforce reserved namespaces, and apply built-in defaults.""" + if not isinstance(data, dict): + return data + params = _normalize_any_dict(data) + + for key in RESERVED_USER_EVENT_FIELDS: + if key in params: + raise ValueError(f'Field "{key}" is reserved for BaseEvent runtime APIs and cannot be set in event payload') + + for key in params: + if key.startswith('event_') and key not in BaseEvent.model_fields: + raise ValueError(f'Field "{key}" starts with "event_" but is not a recognized BaseEvent field') + if key.startswith('model_'): + raise ValueError(f'Field "{key}" starts with "model_" and is reserved for Pydantic model internals') + + for at_key in ('event_created_at', 'event_started_at', 'event_completed_at'): + if at_key not in params: + continue + raw_value = params[at_key] + if raw_value is None: + continue + if isinstance(raw_value, datetime): + normalized_value = raw_value.replace(tzinfo=UTC) if raw_value.tzinfo is None else raw_value.astimezone(UTC) + params[at_key] = monotonic_datetime(normalized_value.isoformat().replace('+00:00', 'Z')) + else: + params[at_key] = monotonic_datetime(str(raw_value)) + + is_class_default_unchanged = cls.model_fields['event_type'].default == 'UndefinedEvent' + is_event_type_not_provided = 'event_type' not in params or params['event_type'] == 'UndefinedEvent' + if is_class_default_unchanged and is_event_type_not_provided: + params['event_type'] = cls.__name__ + + if 'event_result_type' not in params: + if 'event_result_type' in cls.model_fields: + field = cls.model_fields['event_result_type'] + if field.default is not None and field.default != BaseEvent.model_fields['event_result_type'].default: + params['event_result_type'] = field.default + return params + + if cls._event_result_type_cache is not None: + params['event_result_type'] = cls._event_result_type_cache + return params + + extracted_type = extract_basemodel_generic_arg(cls) + cls._event_result_type_cache = extracted_type + if extracted_type is not None: + params['event_result_type'] = extracted_type + + return params + + @model_validator(mode='after') + def _hydrate_event_result_types_from_event(self) -> Self: + """Rehydrate per-handler result_type from the event-level event_result_type.""" + if self.event_results: + first_result = next(iter(self.event_results.values())) + if first_result.result_type != self.event_result_type: + for event_result in self.event_results.values(): + event_result.result_type = self.event_result_type + return self + + @property + def event_completed_signal(self) -> asyncio.Event | None: + """Lazily create asyncio.Event when accessed""" + if self._event_completed_signal is None: + try: + asyncio.get_running_loop() + self._event_completed_signal = asyncio.Event() + except RuntimeError: + pass # Keep it None if no event loop + return self._event_completed_signal + + @property + def event_children(self) -> list['BaseEvent[Any]']: + """Get all child events dispatched from within this event's handlers""" + children: list[BaseEvent[Any]] = [] + for event_result in self.event_results.values(): + children.extend(event_result.event_children) + return children + + def _mark_started(self, started_at: str | datetime | None = None) -> None: + """Mark event runtime state as started, preserving the earliest start timestamp.""" + if self.event_status == EventStatus.COMPLETED: + return + + if isinstance(started_at, datetime): + normalized_value = started_at.replace(tzinfo=UTC) if started_at.tzinfo is None else started_at.astimezone(UTC) + resolved_started_at = monotonic_datetime(normalized_value.isoformat().replace('+00:00', 'Z')) + elif started_at is None: + resolved_started_at = monotonic_datetime() + else: + resolved_started_at = monotonic_datetime(started_at) + if self.event_started_at is None or resolved_started_at < self.event_started_at: + self.event_started_at = resolved_started_at + if self.event_status == EventStatus.PENDING: + self.event_status = EventStatus.STARTED + self.event_completed_at = None + self._event_is_complete_flag = False + + def _create_pending_handler_results( + self, + handlers: dict[PythonIdStr, EventHandler | ContravariantEventHandlerCallable[Any]], + *, + eventbus: 'EventBus | None' = None, + timeout: float | None = None, + ) -> 'dict[PythonIdStr, EventResult[T_EventResultType]]': + """Ensure EventResult placeholders exist for provided handlers before execution. + + Any stale timing/error data from prior runs is cleared so consumers immediately see a fresh pending state. + """ + pending_results: dict[PythonIdStr, EventResult[T_EventResultType]] = {} + self._event_is_complete_flag = False + self.event_completed_at = None + if self.event_status == EventStatus.COMPLETED: + self.event_status = EventStatus.PENDING + self.event_started_at = None + for handler_id, handler in handlers.items(): + event_result = self.event_result_update( + handler=handler, + eventbus=eventbus, + status='pending', + ) + # Reset runtime fields so we never reuse stale data + event_result.result = None + event_result.error = None + event_result.started_at = None + event_result.completed_at = None + event_result.status = 'pending' + event_result.timeout = timeout if timeout is not None else self.event_timeout + event_result.result_type = self.event_result_type + pending_results[handler_id] = event_result + return pending_results + + @staticmethod + def _is_first_mode_winning_result(event_result: 'EventResult[Any]') -> bool: + return ( + event_result.status == 'completed' + and event_result.error is None + and event_result.result is not None + and not isinstance(event_result.result, BaseEvent) + ) + + async def _mark_remaining_first_mode_result_cancelled( + self, + event_result: 'EventResult[Any]', + *, + eventbus: 'EventBus', + ) -> None: + if event_result.status in ('completed', 'error'): + return + event_result.update(error=EventHandlerCancelledError('Cancelled: first() resolved')) + await eventbus.on_event_result_change(self, event_result, EventStatus.COMPLETED) + + async def _run_handlers( + self: 'BaseEvent[T_EventResultType]', + *, + eventbus: 'EventBus', + handlers: dict[PythonIdStr, EventHandler] | None = None, + timeout: float | None = None, + ) -> None: + """Run all handlers for this event using the bus concurrency/completion configuration.""" + applicable_handlers = handlers if (handlers is not None) else eventbus._get_handlers_for_event(self) # pyright: ignore[reportPrivateUsage] + if not applicable_handlers: + return + + pending_handler_map: dict[PythonIdStr, EventHandler | ContravariantEventHandlerCallable[Any]] = dict(applicable_handlers) + pending_results = self._create_pending_handler_results( + pending_handler_map, + eventbus=eventbus, + timeout=timeout if timeout is not None else self.event_timeout, + ) + eventbus._resolve_find_waiters(self) # pyright: ignore[reportPrivateUsage] + if eventbus.middlewares: + for pending_result in pending_results.values(): + await eventbus.on_event_result_change(self, pending_result, EventStatus.PENDING) + + completion_mode = self.event_handler_completion or eventbus.event_handler_completion + concurrency_mode = self.event_handler_concurrency or eventbus.event_handler_concurrency + handler_items = list(applicable_handlers.items()) + + if concurrency_mode == EventHandlerConcurrencyMode.PARALLEL: + if completion_mode == EventHandlerCompletionMode.FIRST: + handler_tasks: dict[asyncio.Task[Any], PythonIdStr] = {} + local_handler_ids: set[PythonIdStr] = set(applicable_handlers.keys()) + for handler_id, handler_entry in applicable_handlers.items(): + handler_tasks[asyncio.create_task(eventbus._run_handler(self, handler_entry, timeout=timeout))] = handler_id # pyright: ignore[reportPrivateUsage] + + pending_tasks: set[asyncio.Task[Any]] = set(handler_tasks.keys()) + winner_handler_id: PythonIdStr | None = None + + try: + while pending_tasks: + done_tasks, pending_tasks = await asyncio.wait(pending_tasks, return_when=asyncio.FIRST_COMPLETED) + for done_task in done_tasks: + try: + await done_task + except Exception: + pass + + done_handler_id = handler_tasks[done_task] + completed_result = self.event_results.get(done_handler_id) + if completed_result is not None and self._is_first_mode_winning_result(completed_result): + winner_handler_id = done_handler_id + break + + if winner_handler_id is not None: + break + + if winner_handler_id is not None: + for pending_task in pending_tasks: + pending_task.cancel() + if pending_tasks: + await asyncio.gather(*pending_tasks, return_exceptions=True) + + for handler_id, event_result in self.event_results.items(): + if handler_id not in local_handler_ids or handler_id == winner_handler_id: + continue + await self._mark_remaining_first_mode_result_cancelled(event_result, eventbus=eventbus) + elif pending_tasks: + await asyncio.gather(*pending_tasks, return_exceptions=True) + except asyncio.CancelledError: + for pending_task in pending_tasks: + pending_task.cancel() + if pending_tasks: + await asyncio.gather(*pending_tasks, return_exceptions=True) + raise + return + + parallel_tasks = [ + asyncio.create_task(eventbus._run_handler(self, handler_entry, timeout=timeout)) # pyright: ignore[reportPrivateUsage] + for _, handler_entry in handler_items + ] + try: + for task in parallel_tasks: + try: + await task + except Exception: + pass + except asyncio.CancelledError: + for task in parallel_tasks: + if not task.done(): + task.cancel() + await asyncio.gather(*parallel_tasks, return_exceptions=True) + raise + return + + for index, (handler_id, handler_entry) in enumerate(handler_items): + try: + await eventbus._run_handler(self, handler_entry, timeout=timeout) # pyright: ignore[reportPrivateUsage] + except Exception as e: + if logger.isEnabledFor(logging.DEBUG): + logger.debug( + '❌ %s Handler %s#%s(%s) failed with %s: %s', + eventbus, + handler_entry.handler_name, + handler_entry.id[-4:], + self, + type(e).__name__, + e, + ) + + if completion_mode != EventHandlerCompletionMode.FIRST: + continue + + completed_result = self.event_results.get(handler_id) + if completed_result is None or not self._is_first_mode_winning_result(completed_result): + continue + + for remaining_handler_id, _ in handler_items[index + 1 :]: + remaining_result = self.event_results.get(remaining_handler_id) + if remaining_result is None: + continue + await self._mark_remaining_first_mode_result_cancelled(remaining_result, eventbus=eventbus) + break + + @staticmethod + def _event_result_is_truthy(event_result: 'EventResult[T_EventResultType]') -> bool: + # omit BaseEvent results, they are forwarded event refs not actual return values + return ( + event_result.status == 'completed' + and event_result.result is not None + and not isinstance(event_result.result, BaseException) + and not event_result.error + and not isinstance(event_result.result, BaseEvent) + ) + + def _collect_handler_errors(self, include_cancelled: bool) -> list[Exception]: + """Collect handler errors as Exception instances for aggregation.""" + collected_errors: list[Exception] = [] + for event_result in self.event_results.values(): + original_error = event_result.error + if original_error is None and isinstance(event_result.result, BaseException): + original_error = event_result.result + + if original_error is None: + continue + + if isinstance(original_error, asyncio.CancelledError) and not include_cancelled: + continue + + if isinstance(original_error, Exception): + collected_errors.append(original_error) + continue + + wrapped = RuntimeError( + f'Non-Exception handler error from {event_result.eventbus_label}.{event_result.handler_name}: ' + f'{type(original_error).__name__}: {original_error}' + ) + wrapped.__cause__ = original_error + collected_errors.append(wrapped) + return collected_errors + + async def event_result( + self, + timeout: float | None = None, + include: EventResultFilter = _event_result_is_truthy, + raise_if_any: bool = True, + raise_if_none: bool = True, + ) -> T_EventResultType | None: + """Get the first non-None result from the event handlers""" + if not self._event_is_complete_flag: + completed_signal = self._event_completed_signal + if completed_signal is None: + completed_signal = self.event_completed_signal + assert completed_signal is not None, 'EventResult cannot be awaited outside of an async context' + await asyncio.wait_for(completed_signal.wait(), timeout=timeout or self.event_timeout) + + for event_result in self.event_results.values(): + try: + await event_result + except (Exception, asyncio.CancelledError): + pass + + valid_results: dict[PythonIdStr, EventResult[T_EventResultType]] = { + handler_key: event_result for handler_key, event_result in self.event_results.items() if include(event_result) + } + error_results = [ + event_result + for event_result in self.event_results.values() + if event_result.error or isinstance(event_result.result, BaseException) + ] + + if raise_if_any and error_results: + if len(error_results) == 1: + single_result = error_results[0] + single_error = single_result.error if single_result.error is not None else single_result.result + if isinstance(single_error, BaseException): + raise single_error + raise Exception(str(single_error)) + + collected_errors = self._collect_handler_errors(include_cancelled=True) + raise ExceptionGroup( + f'Event {self.event_type}#{self.event_id[-4:]} had {len(collected_errors)} handler error(s)', + collected_errors, + ) + + if raise_if_none and not valid_results: + raise ValueError( + f'Expected at least one handler to return a non-None result, but none did! {self} -> {self.event_results}' + ) + + for event_result in valid_results.values(): + assert event_result.result is not None, f'EventResult {event_result} has no result' + + results = list(valid_results.values()) + if not results: + return None + return self._coerce_typed_result_value(results[0].result) + + async def event_results_list( + self, + timeout: float | None = None, + include: EventResultFilter = _event_result_is_truthy, + raise_if_any: bool = True, + raise_if_none: bool = True, + ) -> list[T_EventResultType | None]: + """Get all result values in a list [handler1_result, handler2_result, ...]""" + if not self._event_is_complete_flag: + completed_signal = self._event_completed_signal + if completed_signal is None: + completed_signal = self.event_completed_signal + assert completed_signal is not None, 'EventResult cannot be awaited outside of an async context' + await asyncio.wait_for(completed_signal.wait(), timeout=timeout or self.event_timeout) + + for event_result in self.event_results.values(): + try: + await event_result + except (Exception, asyncio.CancelledError): + pass + + valid_results: dict[PythonIdStr, EventResult[T_EventResultType]] = { + handler_key: event_result for handler_key, event_result in self.event_results.items() if include(event_result) + } + error_results = [ + event_result + for event_result in self.event_results.values() + if event_result.error or isinstance(event_result.result, BaseException) + ] + + if raise_if_any and error_results: + if len(error_results) == 1: + single_result = error_results[0] + single_error = single_result.error if single_result.error is not None else single_result.result + if isinstance(single_error, BaseException): + raise single_error + raise Exception(str(single_error)) + + collected_errors = self._collect_handler_errors(include_cancelled=True) + raise ExceptionGroup( + f'Event {self.event_type}#{self.event_id[-4:]} had {len(collected_errors)} handler error(s)', + collected_errors, + ) + + if raise_if_none and not valid_results: + raise ValueError( + f'Expected at least one handler to return a non-None result, but none did! {self} -> {self.event_results}' + ) + + for event_result in valid_results.values(): + assert event_result.result is not None, f'EventResult {event_result} has no result' + + results_list: list[T_EventResultType | None] = [] + for event_result in valid_results.values(): + results_list.append(self._coerce_typed_result_value(event_result.result)) + return results_list + + def event_result_update( + self, + handler: EventHandler | ContravariantEventHandlerCallable[Any], + eventbus: 'EventBus | None' = None, + **kwargs: Any, + ) -> 'EventResult[T_EventResultType]': + """Create or update an EventResult for a handler""" + + from bubus.event_bus import EventBus + + assert eventbus is None or isinstance(eventbus, EventBus) + if ( + eventbus is None + and not isinstance(handler, EventHandler) + and inspect.ismethod(handler) + and isinstance(handler.__self__, EventBus) + ): + eventbus = handler.__self__ + + if isinstance(handler, EventHandler): + handler_entry = handler + if eventbus is None and handler_entry.eventbus_id != '00000000-0000-0000-0000-000000000000': + for bus in self._iter_eventbuses_for_registry(eventbus): + if bus and bus.id == handler_entry.eventbus_id: + eventbus = bus + break + if ( + eventbus is None + and handler_entry.eventbus_id + and handler_entry.eventbus_id != '00000000-0000-0000-0000-000000000000' + ): + expected_label = handler_entry.eventbus_label + for bus in self._iter_eventbuses_for_registry(eventbus): + if bus and bus.label == expected_label: + eventbus = bus + break + else: + handler_entry = EventHandler.from_callable( + handler=handler, + event_pattern=self.event_type, + eventbus_name=str(eventbus.name if eventbus is not None else 'EventBus'), + eventbus_id=str(eventbus.id if eventbus is not None else '00000000-0000-0000-0000-000000000000'), + ) + + handler_id: PythonIdStr = handler_entry.id + + # Get or create EventResult + if handler_id not in self.event_results: + self.event_results[handler_id] = EventResult[T_EventResultType]( + event_id=self.event_id, + handler=handler_entry, + status=kwargs.get('status', 'pending'), + timeout=self.event_timeout, + result_type=self.event_result_type, + ) + # logger.debug(f'Created EventResult for handler {handler_id}: {handler and EventHandler._get_callable_handler_name(handler)}') + + # Update the EventResult with provided kwargs + existing_result = self.event_results[handler_id] + if existing_result.handler.id != handler_entry.id: + existing_result.handler = handler_entry + + existing_result.update(**kwargs) + if existing_result.status == 'started' and existing_result.started_at is not None: + self._mark_started(existing_result.started_at) + if 'timeout' in kwargs: + existing_result.timeout = kwargs['timeout'] + if kwargs.get('status') in ('pending', 'started'): + self.event_completed_at = None + # logger.debug( + # f'Updated EventResult for handler {handler_id}: status={self.event_results[handler_id].status}, total_results={len(self.event_results)}' + # ) + # Don't mark complete here - let the EventBus do it after all handlers are done + return existing_result + + @staticmethod + def _coerce_typed_result_value( + value: Any, + ) -> T_EventResultType | None: + # Handlers may return BaseEvent instances when forwarding via + # bus.emit. Typed accessors expose only typed payload values, + # so forwarded events are normalized to None. + if isinstance(value, BaseEvent): + return None + return value + + def _mark_completed(self, current_bus: 'EventBus | None' = None) -> None: + """Check if all handlers are done and signal completion""" + completed_signal = self._event_completed_signal + if completed_signal is not None and completed_signal.is_set(): + self._event_is_complete_flag = True + if self.event_completed_at is None: + self.event_completed_at = monotonic_datetime() + if self.event_started_at is None: + self.event_started_at = self.event_completed_at + self.event_status = EventStatus.COMPLETED + return + + # If there are no results at all, the event is complete. + if not self.event_results: + # Even with no local handlers, forwarded copies may still be queued elsewhere. + if self._is_queued_on_any_bus(ignore_bus=current_bus): + return + if not self._are_all_children_complete(): + return + self.event_completed_at = self.event_completed_at or monotonic_datetime() + if self.event_started_at is None: + self.event_started_at = self.event_completed_at + self._event_is_complete_flag = True + self.event_status = EventStatus.COMPLETED + if completed_signal is not None: + completed_signal.set() + self._event_dispatch_context = None + return + + # Check if all handler results are done. + for result in self.event_results.values(): + if result.status not in ('completed', 'error'): + return + + # Forwarded events may still be waiting in another bus queue. + # Don't mark complete until all queue copies have been consumed. + if self._is_queued_on_any_bus(ignore_bus=current_bus): + return + + # Recursively check if all child events are also complete + if not self._are_all_children_complete(): + return + + # All handlers and all child events are done. + latest_completed: str | None = None + for result in self.event_results.values(): + completed_at = result.completed_at + if completed_at is None: + continue + if latest_completed is None or completed_at > latest_completed: + latest_completed = completed_at + self.event_completed_at = latest_completed or self.event_completed_at or monotonic_datetime() + if self.event_started_at is None: + self.event_started_at = self.event_completed_at + self._event_is_complete_flag = True + self.event_status = EventStatus.COMPLETED + if completed_signal is not None: + completed_signal.set() + # Clear dispatch context to avoid memory leaks (it holds references to ContextVars) + self._event_dispatch_context = None + + def _mark_pending(self) -> Self: + """Reset mutable runtime state so this event can be dispatched again as pending.""" + self.event_status = EventStatus.PENDING + self.event_started_at = None + self._event_is_complete_flag = False + self.event_completed_at = None + self.event_results.clear() + self._lock_for_event_handler = None + self._event_dispatch_context = None + try: + asyncio.get_running_loop() + self._event_completed_signal = asyncio.Event() + except RuntimeError: + self._event_completed_signal = None + return self + + def event_reset(self) -> Self: + """Return a fresh copy of this event with pending runtime state.""" + fresh_event = self.__class__.model_validate(self.model_dump(mode='python')) + fresh_event.event_id = uuid7str() + return fresh_event._mark_pending() + + def _get_handler_lock(self) -> 'ReentrantLock | None': + return self._lock_for_event_handler + + def _set_handler_lock(self, lock: 'ReentrantLock | None') -> None: + self._lock_for_event_handler = lock + + def _get_dispatch_context(self) -> contextvars.Context | None: + return self._event_dispatch_context + + def _set_dispatch_context(self, dispatch_context: contextvars.Context | None) -> None: + self._event_dispatch_context = dispatch_context + + def _are_all_children_complete(self, _visited: set[str] | None = None) -> bool: + """Recursively check if all child events and their descendants are complete""" + if _visited is None: + _visited = set() + + # Prevent infinite recursion on circular references + if self.event_id in _visited: + return True + _visited.add(self.event_id) + + for child_event in self.event_children: + if child_event.event_status != 'completed': + if logger.isEnabledFor(logging.DEBUG): + logger.debug('Event %s has incomplete child %s', self, child_event) + return False + # Recursively check child's children + if not child_event._are_all_children_complete(_visited): + return False + return True + + def _cancel_pending_child_processing(self, error: BaseException) -> None: + """Cancel any pending child events that were dispatched during handler execution""" + if not isinstance(error, EventHandlerCancelledError): + error = EventHandlerCancelledError( + f'Cancelled pending handler as a result of parent error {error}' + ) # keep the word "pending" in the error, checked by print_handler_line() + for child_event in self.event_children: + for result in child_event.event_results.values(): + if result.status == 'pending': + # print('CANCELLING CHILD HANDLER', result, 'due to', error) + result.update(error=error) + child_event._cancel_pending_child_processing(error) + + def event_log_safe_summary(self) -> dict[str, Any]: + """only event metadata without contents, avoid potentially sensitive event contents in logs""" + return {k: v for k, v in self.model_dump(mode='json').items() if k.startswith('event_') and 'results' not in k} + + def event_log_tree( + self, + indent: str = '', + is_last: bool = True, + event_children_by_parent: 'dict[str | None, list[BaseEvent[Any]]] | None' = None, + ) -> None: + """Print this event and its results with proper tree formatting""" + from bubus.logging import log_event_tree + + log_event_tree(self, indent, is_last, event_children_by_parent) + + @property + def bus(self) -> 'EventBus': + """Get the EventBus that is currently processing this event""" + from bubus.event_bus import get_current_event, get_current_eventbus, in_handler_context + + if not in_handler_context(): + raise AttributeError('bus property can only be accessed from within an event handler') + + current_bus = get_current_eventbus() + current_event = get_current_event() + if current_bus is not None and current_event is not None and current_event.event_id == self.event_id: + return current_bus + + # The event_path contains all buses this event has passed through + # The last one in the path is the one currently processing + if not self.event_path: + raise RuntimeError('Event has no event_path - was it dispatched?') + + current_bus_label = self.event_path[-1] + + # Find the bus by label (BusName#abcd). + # Create a list copy to avoid "Set changed size during iteration" error + for bus in self._iter_eventbuses_for_registry(current_bus): + if bus and bus.label == current_bus_label: + return bus + + raise RuntimeError(f'Could not find active EventBus for path entry {current_bus_label}') + + @property + def event_bus(self) -> 'EventBus': + return self.bus + + +def attr_name_allowed_on_event(key: str) -> bool: + allowed_unprefixed_attrs = {'first', 'bus'} + return key in pydantic_builtin_attrs or key in event_builtin_attrs or key.startswith('_') or key in allowed_unprefixed_attrs + + +# PSA: All BaseEvent buil-in attrs and methods must be prefixed with "event_" in order to avoid clashing with data contents (which share a namespace with the metadata) +# This is the same approach Pydantic uses for their special `model_*` attrs (and BaseEvent is also a pydantic model, so model_ prefixes are reserved too) +# resist the urge to nest the event data in an inner object unless absolutely necessary, flat simplifies most of the code and makes it easier to read JSON logs with less nesting +pydantic_builtin_attrs = dir(BaseModel) +event_builtin_attrs = {key for key in dir(BaseEvent) if key.startswith('event_')} +illegal_attrs = {key for key in dir(BaseEvent) if not attr_name_allowed_on_event(key)} +assert not illegal_attrs, ( + 'All BaseEvent attrs and methods must be prefixed with "event_" in order to avoid clashing ' + 'with BaseEvent subclass fields used to store event contents (which share a namespace with the event_ metadata). ' + f'not allowed: {illegal_attrs}' +) + +# Resolve forward refs after both core models are defined. +EventResult.model_rebuild() +BaseEvent.model_rebuild() +EventHandler.model_rebuild() diff --git a/bubus/bridge_jsonl.py b/bubus/bridge_jsonl.py new file mode 100644 index 0000000..9e78199 --- /dev/null +++ b/bubus/bridge_jsonl.py @@ -0,0 +1,156 @@ +"""JSONL bridge for forwarding events between runtimes. + +This bridge is intentionally simple: +- emit/dispatch appends one raw event JSON object per line +- listener polls the file and emits any unseen lines +""" + +from __future__ import annotations + +import asyncio +import json +from collections.abc import Callable +from pathlib import Path +from typing import Any + +from uuid_extensions import uuid7str + +from bubus.base_event import BaseEvent +from bubus.event_bus import EventBus, EventPatternType, in_handler_context + + +class JSONLEventBridge: + def __init__(self, path: str, *, poll_interval: float = 0.25, name: str | None = None): + self.path = Path(path) + self.poll_interval = poll_interval + self._inbound_bus = EventBus(name=name or f'JSONLEventBridge_{uuid7str()[-8:]}', max_history_size=0) + + self._running = False + self._start_task: asyncio.Task[None] | None = None + self._start_lock = asyncio.Lock() + self._listener_task: asyncio.Task[None] | None = None + self._byte_offset = 0 + self._pending_line = '' + + def on(self, event_pattern: EventPatternType, handler: Callable[[BaseEvent[Any]], Any]) -> None: + self._ensure_started() + self._inbound_bus.on(event_pattern, handler) + + async def emit(self, event: BaseEvent[Any]) -> BaseEvent[Any] | None: + self._ensure_started() + + payload = event.model_dump(mode='json') + self.path.parent.mkdir(parents=True, exist_ok=True) + + await asyncio.to_thread(self._append_line, json.dumps(payload, separators=(',', ':'))) + + if in_handler_context(): + return None + return event + + async def dispatch(self, event: BaseEvent[Any]) -> BaseEvent[Any] | None: + return await self.emit(event) + + async def start(self) -> None: + current_task = asyncio.current_task() + if self._start_task is not None and self._start_task is not current_task and not self._start_task.done(): + await self._start_task + return + + if self._running: + return + + try: + async with self._start_lock: + if self._running: + return + self.path.parent.mkdir(parents=True, exist_ok=True) + self.path.touch(exist_ok=True) + self._byte_offset = self.path.stat().st_size + self._pending_line = '' + self._running = True + if self._listener_task is None or self._listener_task.done(): + self._listener_task = asyncio.create_task(self._listen_loop()) + finally: + if self._start_task is current_task: + self._start_task = None + + async def close(self, *, clear: bool = True) -> None: + if self._start_task is not None: + self._start_task.cancel() + await asyncio.gather(self._start_task, return_exceptions=True) + self._start_task = None + self._running = False + if self._listener_task is not None: + self._listener_task.cancel() + await asyncio.gather(self._listener_task, return_exceptions=True) + self._listener_task = None + await self._inbound_bus.stop(clear=clear) + + def _ensure_started(self) -> None: + if self._running: + return + try: + asyncio.get_running_loop() + except RuntimeError: + return + if self._start_task is None or self._start_task.done(): + self._start_task = asyncio.create_task(self.start()) + + async def _listen_loop(self) -> None: + while self._running: + try: + await self._poll_new_lines() + except asyncio.CancelledError: + raise + except Exception: + pass + await asyncio.sleep(self.poll_interval) + + async def _poll_new_lines(self) -> None: + previous_offset = self._byte_offset + appended_text, new_offset = await asyncio.to_thread(self._read_appended_text, previous_offset) + self._byte_offset = new_offset + + if new_offset < previous_offset: + self._pending_line = '' + + if not appended_text: + return + + combined_text = self._pending_line + appended_text + new_lines = combined_text.split('\n') + self._pending_line = new_lines.pop() if new_lines else '' + + for line in new_lines: + line = line.strip() + if not line: + continue + try: + payload = json.loads(line) + except Exception: + continue + await self._dispatch_inbound_payload(payload) + + async def _dispatch_inbound_payload(self, payload: Any) -> None: + event = BaseEvent[Any].model_validate(payload).event_reset() + self._inbound_bus.emit(event) + + def _read_appended_text(self, offset: int) -> tuple[str, int]: + try: + with self.path.open('r', encoding='utf-8') as fp: + fp.seek(0, 2) + file_size = fp.tell() + + start_offset = 0 if file_size < offset else offset + if file_size == start_offset: + return '', file_size + + fp.seek(start_offset) + return fp.read(), fp.tell() + except FileNotFoundError: + return '', 0 + + def _append_line(self, payload: str) -> None: + with self.path.open('a', encoding='utf-8') as fp: + fp.write(payload + '\n') diff --git a/bubus/bridge_nats.py b/bubus/bridge_nats.py new file mode 100644 index 0000000..c243b0c --- /dev/null +++ b/bubus/bridge_nats.py @@ -0,0 +1,131 @@ +"""NATS bridge for forwarding events between runtimes. + +Optional dependency: nats-py +""" + +from __future__ import annotations + +import asyncio +import importlib +import json +from collections.abc import Callable +from typing import Any + +from uuid_extensions import uuid7str + +from bubus.base_event import BaseEvent +from bubus.event_bus import EventBus, EventPatternType, in_handler_context +from bubus.helpers import QueueShutDown + + +class NATSEventBridge: + def __init__(self, server: str, subject: str, *, name: str | None = None): + self.server = server + self.subject = subject + self._inbound_bus = EventBus(name=name or f'NATSEventBridge_{uuid7str()[-8:]}', max_history_size=0) + + self._running = False + self._start_task: asyncio.Task[None] | None = None + self._start_lock = asyncio.Lock() + self._nc: Any | None = None + + def on(self, event_pattern: EventPatternType, handler: Callable[[BaseEvent[Any]], Any]) -> None: + self._ensure_started() + self._inbound_bus.on(event_pattern, handler) + + async def emit(self, event: BaseEvent[Any]) -> BaseEvent[Any] | None: + self._ensure_started() + if self._nc is None: + await self.start() + + payload = event.model_dump(mode='json') + assert self._nc is not None + await self._nc.publish(self.subject, json.dumps(payload, separators=(',', ':')).encode('utf-8')) + + if in_handler_context(): + return None + return event + + async def dispatch(self, event: BaseEvent[Any]) -> BaseEvent[Any] | None: + return await self.emit(event) + + async def start(self) -> None: + current_task = asyncio.current_task() + if self._start_task is not None and self._start_task is not current_task and not self._start_task.done(): + await self._start_task + return + + if self._running: + return + + try: + async with self._start_lock: + if self._running: + return + + nats_module = self._load_nats() + nc = await nats_module.connect(self.server) + + async def _on_msg(msg: Any) -> None: + try: + payload = json.loads(msg.data.decode('utf-8')) + except Exception: + return + try: + await self._dispatch_inbound_payload(payload) + except QueueShutDown: + return + + try: + await nc.subscribe(self.subject, cb=_on_msg) + except Exception: + try: + await nc.close() + except Exception: + pass + raise + + self._nc = nc + self._running = True + finally: + if self._start_task is current_task: + self._start_task = None + + async def close(self, *, clear: bool = True) -> None: + if self._start_task is not None: + self._start_task.cancel() + await asyncio.gather(self._start_task, return_exceptions=True) + self._start_task = None + self._running = False + if self._nc is not None: + try: + await self._nc.drain() + except Exception: + pass + try: + await self._nc.close() + except Exception: + pass + self._nc = None + await self._inbound_bus.stop(clear=clear) + + def _ensure_started(self) -> None: + if self._running: + return + try: + asyncio.get_running_loop() + except RuntimeError: + return + if self._start_task is None or self._start_task.done(): + self._start_task = asyncio.create_task(self.start()) + + async def _dispatch_inbound_payload(self, payload: Any) -> None: + event = BaseEvent[Any].model_validate(payload).event_reset() + self._inbound_bus.emit(event) + + @staticmethod + def _load_nats() -> Any: + try: + return importlib.import_module('nats') + except ModuleNotFoundError as exc: + raise RuntimeError('NATSEventBridge requires optional dependency: pip install nats-py') from exc diff --git a/bubus/bridge_postgres.py b/bubus/bridge_postgres.py new file mode 100644 index 0000000..3314d73 --- /dev/null +++ b/bubus/bridge_postgres.py @@ -0,0 +1,333 @@ +"""PostgreSQL LISTEN/NOTIFY + flat-table bridge for forwarding events. + +Optional dependency: asyncpg + +Connection URL format: + postgresql://user:pass@host:5432/dbname[/tablename]?sslmode=require + +Schema shape: +- event_id (PRIMARY KEY) +- event_created_at (indexed) +- event_type (indexed) +- event_payload (full event JSON payload) +- one TEXT column per event_* field storing JSON-serialized values +""" + +from __future__ import annotations + +import asyncio +import importlib +import json +import re +from collections.abc import Callable +from typing import Any, TypeGuard +from urllib.parse import urlsplit, urlunsplit + +from uuid_extensions import uuid7str + +from bubus.base_event import BaseEvent +from bubus.event_bus import EventBus, EventPatternType, in_handler_context + +_IDENTIFIER_RE = re.compile(r'^[A-Za-z_][A-Za-z0-9_]*$') +_DEFAULT_POSTGRES_TABLE = 'bubus_events' +_DEFAULT_POSTGRES_CHANNEL = 'bubus_events' +_EVENT_PAYLOAD_COLUMN = 'event_payload' + + +def _validate_identifier(identifier: str, *, label: str) -> str: + if not _IDENTIFIER_RE.match(identifier): + raise ValueError(f'Invalid {label}: {identifier!r}. Use only [A-Za-z0-9_] and start with a letter/_') + return identifier + + +def _parse_table_url(table_url: str) -> tuple[str, str]: + parsed = urlsplit(table_url) + segments = [segment for segment in parsed.path.split('/') if segment] + if len(segments) < 1: + raise ValueError( + 'PostgresEventBridge URL must include at least database in path, e.g. ' + 'postgresql://user:pass@host:5432/dbname[/tablename]' + ) + + db_name = segments[0] + table_name = _validate_identifier(segments[1], label='table name') if len(segments) >= 2 else _DEFAULT_POSTGRES_TABLE + + dsn_path = f'/{db_name}' + dsn = urlunsplit((parsed.scheme, parsed.netloc, dsn_path, parsed.query, parsed.fragment)) + return dsn, table_name + + +def _index_name(table: str, suffix: str) -> str: + return _validate_identifier(f'{table}_{suffix}'[:63], label='index name') + + +def _is_str_keyed_dict(value: Any) -> TypeGuard[dict[str, Any]]: + return isinstance(value, dict) + + +def _split_bridge_payload(payload: dict[str, Any]) -> tuple[dict[str, Any], dict[str, Any]]: + event_fields: dict[str, Any] = {} + event_payload: dict[str, Any] = dict(payload) + for key, value in payload.items(): + if key.startswith('event_'): + event_fields[key] = value + return event_fields, event_payload + + +class PostgresEventBridge: + def __init__(self, table_url: str, channel: str | None = None, *, name: str | None = None): + self.table_url = table_url + self.dsn, self.table = _parse_table_url(table_url) + derived_channel = channel or _DEFAULT_POSTGRES_CHANNEL + self.channel = _validate_identifier(derived_channel[:63], label='channel name') + self._inbound_bus = EventBus(name=name or f'PostgresEventBridge_{uuid7str()[-8:]}', max_history_size=0) + + self._running = False + self._write_conn: Any | None = None + self._listen_conn: Any | None = None + self._listener_callback: Any | None = None + self._start_task: asyncio.Task[None] | None = None + self._start_lock = asyncio.Lock() + self._listen_query_lock = asyncio.Lock() + self._table_columns: set[str] = {'event_id', 'event_created_at', 'event_type', _EVENT_PAYLOAD_COLUMN} + + def on(self, event_pattern: EventPatternType, handler: Callable[[BaseEvent[Any]], Any]) -> None: + self._ensure_started() + self._inbound_bus.on(event_pattern, handler) + + async def emit(self, event: BaseEvent[Any]) -> BaseEvent[Any] | None: + self._ensure_started() + if self._write_conn is None: + await self.start() + + payload = event.model_dump(mode='json') + event_fields, event_payload = _split_bridge_payload(payload) + write_payload = {**event_fields, _EVENT_PAYLOAD_COLUMN: event_payload} + payload_keys = sorted(write_payload.keys()) + await self._ensure_columns(payload_keys) + + columns_sql = ', '.join(f'"{key}"' for key in payload_keys) + placeholders_sql = ', '.join(f'${index}' for index in range(1, len(payload_keys) + 1)) + values = [ + json.dumps(write_payload[key], separators=(',', ':')) if write_payload[key] is not None else None + for key in payload_keys + ] + + update_fields = [key for key in payload_keys if key != 'event_id'] + if update_fields: + updates_sql = ', '.join(f'"{key}" = EXCLUDED."{key}"' for key in update_fields) + upsert_sql = ( + f'INSERT INTO "{self.table}" ({columns_sql}) VALUES ({placeholders_sql}) ' + f'ON CONFLICT ("event_id") DO UPDATE SET {updates_sql}' + ) + else: + upsert_sql = ( + f'INSERT INTO "{self.table}" ({columns_sql}) VALUES ({placeholders_sql}) ON CONFLICT ("event_id") DO NOTHING' + ) + + assert self._write_conn is not None + await self._write_conn.execute(upsert_sql, *values) + event_id_payload = json.dumps(payload['event_id'], separators=(',', ':')) + await self._write_conn.execute('SELECT pg_notify($1, $2)', self.channel, event_id_payload) + + if in_handler_context(): + return None + return event + + async def dispatch(self, event: BaseEvent[Any]) -> BaseEvent[Any] | None: + return await self.emit(event) + + async def start(self) -> None: + current_task = asyncio.current_task() + if self._start_task is not None and self._start_task is not current_task and not self._start_task.done(): + await self._start_task + return + + if self._running: + return + + try: + async with self._start_lock: + if self._running: + return + + asyncpg = self._load_asyncpg() + write_conn = await asyncpg.connect(self.dsn) + listen_conn = await asyncpg.connect(self.dsn) + listener_callback: Any | None = None + try: + self._write_conn = write_conn + self._listen_conn = listen_conn + await self._ensure_table_exists() + await self._refresh_column_cache() + await self._ensure_columns(['event_id', 'event_created_at', 'event_type', _EVENT_PAYLOAD_COLUMN]) + await self._ensure_base_indexes() + + async def _dispatch_event_id(event_id: str) -> None: + try: + await self._dispatch_by_event_id(event_id) + except Exception: + return + + def _listener(_connection: Any, _pid: int, _channel: str, payload: str) -> None: + asyncio.create_task(_dispatch_event_id(payload)) + + listener_callback = _listener + await listen_conn.add_listener(self.channel, listener_callback) + self._listener_callback = listener_callback + self._running = True + except Exception: + if listener_callback is not None: + try: + await listen_conn.remove_listener(self.channel, listener_callback) + except Exception: + pass + try: + await listen_conn.close() + except Exception: + pass + try: + await write_conn.close() + except Exception: + pass + if self._listen_conn is listen_conn: + self._listen_conn = None + if self._write_conn is write_conn: + self._write_conn = None + if self._listener_callback is listener_callback: + self._listener_callback = None + raise + finally: + if self._start_task is current_task: + self._start_task = None + + async def close(self, *, clear: bool = True) -> None: + if self._start_task is not None: + self._start_task.cancel() + await asyncio.gather(self._start_task, return_exceptions=True) + self._start_task = None + self._running = False + if self._listen_conn is not None: + if self._listener_callback is not None: + try: + await self._listen_conn.remove_listener(self.channel, self._listener_callback) + except Exception: + pass + self._listener_callback = None + try: + await self._listen_conn.close() + except Exception: + pass + self._listen_conn = None + if self._write_conn is not None: + try: + await self._write_conn.close() + except Exception: + pass + self._write_conn = None + await self._inbound_bus.stop(clear=clear) + + def _ensure_started(self) -> None: + if self._running: + return + if self._start_task is not None and not self._start_task.done(): + return + try: + asyncio.get_running_loop() + except RuntimeError: + return + self._start_task = asyncio.create_task(self.start()) + + async def _dispatch_by_event_id(self, event_id: str) -> None: + async with self._listen_query_lock: + assert self._listen_conn is not None + row = await self._listen_conn.fetchrow(f'SELECT * FROM "{self.table}" WHERE "event_id" = $1', event_id) + if row is None: + return + + row_values = dict(row) + payload: dict[str, Any] = {} + + raw_event_payload = row_values.get(_EVENT_PAYLOAD_COLUMN) + if isinstance(raw_event_payload, str): + try: + parsed_event_payload: Any = json.loads(raw_event_payload) + if _is_str_keyed_dict(parsed_event_payload): + payload.update(parsed_event_payload) + except Exception: + pass + + for key, raw_value in row_values.items(): + if key == _EVENT_PAYLOAD_COLUMN or raw_value is None: + continue + if not key.startswith('event_'): + continue + try: + decoded_value = json.loads(raw_value) + except Exception: + decoded_value = raw_value + + payload[key] = decoded_value + + await self._dispatch_inbound_payload(payload) + + async def _dispatch_inbound_payload(self, payload: Any) -> None: + event = BaseEvent[Any].model_validate(payload).event_reset() + self._inbound_bus.emit(event) + + async def _ensure_table_exists(self) -> None: + assert self._write_conn is not None + await self._write_conn.execute( + f''' + CREATE TABLE IF NOT EXISTS "{self.table}" ( + "event_id" TEXT PRIMARY KEY, + "event_created_at" TEXT, + "event_type" TEXT, + "event_payload" TEXT + ) + ''' + ) + + async def _ensure_base_indexes(self) -> None: + assert self._write_conn is not None + event_created_at_idx = _index_name(self.table, 'event_created_at_idx') + event_type_idx = _index_name(self.table, 'event_type_idx') + + await self._write_conn.execute( + f'CREATE INDEX IF NOT EXISTS "{event_created_at_idx}" ON "{self.table}" ("event_created_at")' + ) + await self._write_conn.execute(f'CREATE INDEX IF NOT EXISTS "{event_type_idx}" ON "{self.table}" ("event_type")') + + async def _refresh_column_cache(self) -> None: + assert self._write_conn is not None + rows = await self._write_conn.fetch( + """ + SELECT column_name + FROM information_schema.columns + WHERE table_schema = 'public' AND table_name = $1 + """, + self.table, + ) + self._table_columns = {str(row['column_name']) for row in rows} + + async def _ensure_columns(self, keys: list[str]) -> None: + for key in keys: + _validate_identifier(key, label='event field name') + if key != _EVENT_PAYLOAD_COLUMN and not key.startswith('event_'): + raise ValueError(f'Invalid event field name for bridge column: {key!r}. Only event_* fields become columns') + + missing_columns = [key for key in keys if key not in self._table_columns] + if not missing_columns: + return + + assert self._write_conn is not None + for key in missing_columns: + await self._write_conn.execute(f'ALTER TABLE "{self.table}" ADD COLUMN IF NOT EXISTS "{key}" TEXT') + self._table_columns.add(key) + + @staticmethod + def _load_asyncpg() -> Any: + try: + return importlib.import_module('asyncpg') + except ModuleNotFoundError as exc: + raise RuntimeError('PostgresEventBridge requires optional dependency: pip install asyncpg') from exc diff --git a/bubus/bridge_redis.py b/bubus/bridge_redis.py new file mode 100644 index 0000000..996e62e --- /dev/null +++ b/bubus/bridge_redis.py @@ -0,0 +1,228 @@ +"""Redis pub/sub bridge for forwarding events between runtimes. + +Optional dependency: redis>=5 (uses redis.asyncio) + +Usage: + # channel from URL path + bridge = RedisEventBridge('redis://user:pass@localhost:6379/1/my_channel') + + # explicit channel override + bridge = RedisEventBridge('redis://user:pass@localhost:6379/1', channel='my_channel') + +Connection URL format: + redis://user:pass@host:6379/1/channel_name + +The first path segment is the Redis logical DB index. +An optional second path segment is used as the pub/sub channel. +If channel is omitted in both URL and constructor, defaults to "bubus_events". +""" + +from __future__ import annotations + +import asyncio +import importlib +import json +from collections.abc import Callable +from typing import Any, cast +from urllib.parse import urlsplit, urlunsplit + +from uuid_extensions import uuid7str + +from bubus.base_event import BaseEvent +from bubus.event_bus import EventBus, EventPatternType, in_handler_context + +_DEFAULT_REDIS_CHANNEL = 'bubus_events' +_DB_INIT_KEY = '__bubus:bridge_init__' + + +def _parse_redis_url(redis_url: str, channel: str | None) -> tuple[str, str]: + parsed = urlsplit(redis_url) + scheme = parsed.scheme.lower() + if scheme not in ('redis', 'rediss'): + raise ValueError(f'RedisEventBridge URL must use redis:// or rediss://, got: {redis_url}') + + path_segments = [segment for segment in parsed.path.split('/') if segment] + if len(path_segments) > 2: + raise ValueError(f'RedisEventBridge URL path must be / or //, got: {parsed.path or "/"}') + + db_index = '0' + channel_from_url: str | None = None + if path_segments: + db_index = path_segments[0] + if not db_index.isdigit(): + raise ValueError(f'RedisEventBridge URL db path segment must be numeric, got: {db_index!r} in {redis_url}') + if len(path_segments) == 2: + channel_from_url = path_segments[1] + + resolved_channel = channel or channel_from_url or _DEFAULT_REDIS_CHANNEL + if not resolved_channel: + raise ValueError('RedisEventBridge channel must not be empty') + + normalized_path = f'/{db_index}' + normalized_url = urlunsplit((parsed.scheme, parsed.netloc, normalized_path, parsed.query, parsed.fragment)) + return normalized_url, resolved_channel + + +class RedisEventBridge: + def __init__(self, redis_url: str, channel: str | None = None, *, name: str | None = None): + self.url, self.channel = _parse_redis_url(redis_url, channel) + self._inbound_bus = EventBus(name=name or f'RedisEventBridge_{uuid7str()[-8:]}', max_history_size=0) + + self._running = False + self._start_task: asyncio.Task[None] | None = None + self._start_lock = asyncio.Lock() + self._listener_task: asyncio.Task[None] | None = None + self._redis_pub: Any | None = None + self._redis_sub: Any | None = None + self._pubsub: Any | None = None + + def on(self, event_pattern: EventPatternType, handler: Callable[[BaseEvent[Any]], Any]) -> None: + self._ensure_started() + self._inbound_bus.on(event_pattern, handler) + + async def emit(self, event: BaseEvent[Any]) -> BaseEvent[Any] | None: + self._ensure_started() + if self._redis_pub is None: + await self.start() + + payload = event.model_dump(mode='json') + assert self._redis_pub is not None + await self._redis_pub.publish(self.channel, json.dumps(payload, separators=(',', ':'))) + + if in_handler_context(): + return None + return event + + async def dispatch(self, event: BaseEvent[Any]) -> BaseEvent[Any] | None: + return await self.emit(event) + + async def start(self) -> None: + current_task = asyncio.current_task() + if self._start_task is not None and self._start_task is not current_task and not self._start_task.done(): + await self._start_task + return + + if self._running: + return + + try: + async with self._start_lock: + if self._running: + return + + redis_asyncio = self._load_redis_asyncio() + redis_pub = redis_asyncio.from_url(self.url, decode_responses=True) + redis_sub = redis_asyncio.from_url(self.url, decode_responses=True) + pubsub = redis_sub.pubsub() + + try: + # Redis logical DBs are created lazily; writing a short-lived key initializes/validates the selected DB. + await redis_pub.set(_DB_INIT_KEY, '1', ex=60, nx=True) + await pubsub.subscribe(self.channel) + except Exception: + await self._close_pubsub(pubsub) + await self._close_redis_client(redis_sub) + await self._close_redis_client(redis_pub) + raise + + self._redis_pub = redis_pub + self._redis_sub = redis_sub + self._pubsub = pubsub + self._running = True + if self._listener_task is None or self._listener_task.done(): + self._listener_task = asyncio.create_task(self._listen_loop()) + finally: + if self._start_task is current_task: + self._start_task = None + + async def close(self, *, clear: bool = True) -> None: + if self._start_task is not None: + self._start_task.cancel() + await asyncio.gather(self._start_task, return_exceptions=True) + self._start_task = None + self._running = False + if self._listener_task is not None: + self._listener_task.cancel() + await asyncio.gather(self._listener_task, return_exceptions=True) + self._listener_task = None + + if self._pubsub is not None: + await self._close_pubsub(self._pubsub) + self._pubsub = None + if self._redis_sub is not None: + await self._close_redis_client(self._redis_sub) + self._redis_sub = None + if self._redis_pub is not None: + await self._close_redis_client(self._redis_pub) + self._redis_pub = None + + await self._inbound_bus.stop(clear=clear) + + def _ensure_started(self) -> None: + if self._running: + return + try: + asyncio.get_running_loop() + except RuntimeError: + return + if self._start_task is None or self._start_task.done(): + self._start_task = asyncio.create_task(self.start()) + + async def _listen_loop(self) -> None: + assert self._pubsub is not None + try: + async for message in self._pubsub.listen(): + if not self._running: + break + if not isinstance(message, dict): + continue + message_dict = cast(dict[str, Any], message) + if message_dict.get('type') != 'message': + continue + + raw_data = message_dict.get('data') + if isinstance(raw_data, bytes): + data = raw_data.decode('utf-8') + elif isinstance(raw_data, str): + data = raw_data + else: + continue + + try: + payload = json.loads(data) + except Exception: + continue + await self._dispatch_inbound_payload(payload) + except asyncio.CancelledError: + raise + except Exception: + if self._running: + await asyncio.sleep(0.05) + + async def _dispatch_inbound_payload(self, payload: Any) -> None: + event = BaseEvent[Any].model_validate(payload).event_reset() + self._inbound_bus.emit(event) + + async def _close_pubsub(self, pubsub: Any) -> None: + try: + await pubsub.unsubscribe(self.channel) + except Exception: + pass + try: + await pubsub.close() + except Exception: + pass + + @staticmethod + async def _close_redis_client(client: Any) -> None: + try: + await client.close() + except Exception: + pass + + @staticmethod + def _load_redis_asyncio() -> Any: + try: + return importlib.import_module('redis.asyncio') + except ModuleNotFoundError as exc: + raise RuntimeError('RedisEventBridge requires optional dependency: pip install redis') from exc diff --git a/bubus/bridge_sqlite.py b/bubus/bridge_sqlite.py new file mode 100644 index 0000000..56556dd --- /dev/null +++ b/bubus/bridge_sqlite.py @@ -0,0 +1,331 @@ +"""SQLite flat-table bridge for forwarding events between runtimes. + +Uses Python stdlib sqlite3 and polling for new rows. +Schema mirrors Postgres bridge shape: +- event_id (PRIMARY KEY) +- event_created_at (indexed) +- event_type (indexed) +- event_payload (full event JSON payload) +- one TEXT column per event_* field storing JSON-serialized values +""" + +from __future__ import annotations + +import asyncio +import json +import re +import sqlite3 +import time +from collections.abc import Callable +from contextlib import closing +from pathlib import Path +from typing import Any, TypeGuard + +from uuid_extensions import uuid7str + +from bubus.base_event import BaseEvent +from bubus.event_bus import EventBus, EventPatternType, in_handler_context + +_IDENTIFIER_RE = re.compile(r'^[A-Za-z_][A-Za-z0-9_]*$') +_EVENT_PAYLOAD_COLUMN = 'event_payload' + + +def _validate_identifier(identifier: str, *, label: str) -> str: + if not _IDENTIFIER_RE.match(identifier): + raise ValueError(f'Invalid {label}: {identifier!r}. Use only [A-Za-z0-9_] and start with a letter/_') + return identifier + + +def _is_str_keyed_dict(value: Any) -> TypeGuard[dict[str, Any]]: + return isinstance(value, dict) + + +def _split_bridge_payload(payload: dict[str, Any]) -> tuple[dict[str, Any], dict[str, Any]]: + event_fields: dict[str, Any] = {} + event_payload: dict[str, Any] = dict(payload) + for key, value in payload.items(): + if key.startswith('event_'): + event_fields[key] = value + return event_fields, event_payload + + +class SQLiteEventBridge: + def __init__( + self, + path: str, + table: str = 'bubus_events', + *, + poll_interval: float = 0.25, + name: str | None = None, + ): + self.path = Path(path) + self.table = _validate_identifier(table, label='table name') + self.poll_interval = poll_interval + self._inbound_bus = EventBus(name=name or f'SQLiteEventBridge_{uuid7str()[-8:]}', max_history_size=0) + + self._running = False + self._start_task: asyncio.Task[None] | None = None + self._start_lock = asyncio.Lock() + self._listener_task: asyncio.Task[None] | None = None + self._last_seen_event_created_at = '' + self._last_seen_event_id = '' + self._table_columns: set[str] = {'event_id', 'event_created_at', 'event_type', _EVENT_PAYLOAD_COLUMN} + + def on(self, event_pattern: EventPatternType, handler: Callable[[BaseEvent[Any]], Any]) -> None: + self._ensure_started() + self._inbound_bus.on(event_pattern, handler) + + async def emit(self, event: BaseEvent[Any]) -> BaseEvent[Any] | None: + self._ensure_started() + if not self._running: + await self.start() + + payload = event.model_dump(mode='json') + event_fields, event_payload = _split_bridge_payload(payload) + write_payload = {**event_fields, _EVENT_PAYLOAD_COLUMN: event_payload} + payload_keys = sorted(write_payload.keys()) + + await asyncio.to_thread(self._ensure_columns, payload_keys) + await asyncio.to_thread(self._upsert_payload, write_payload, payload_keys) + + if in_handler_context(): + return None + return event + + async def dispatch(self, event: BaseEvent[Any]) -> BaseEvent[Any] | None: + return await self.emit(event) + + async def start(self) -> None: + current_task = asyncio.current_task() + if self._start_task is not None and self._start_task is not current_task and not self._start_task.done(): + await self._start_task + return + + if self._running: + return + + try: + async with self._start_lock: + if self._running: + return + self.path.parent.mkdir(parents=True, exist_ok=True) + await asyncio.to_thread(self._init_db) + await asyncio.to_thread(self._refresh_column_cache) + await asyncio.to_thread( + self._ensure_columns, ['event_id', 'event_created_at', 'event_type', _EVENT_PAYLOAD_COLUMN] + ) + await asyncio.to_thread(self._ensure_base_indexes) + await asyncio.to_thread(self._set_cursor_to_latest_row) + self._running = True + if self._listener_task is None or self._listener_task.done(): + self._listener_task = asyncio.create_task(self._listen_loop()) + finally: + if self._start_task is current_task: + self._start_task = None + + async def close(self, *, clear: bool = True) -> None: + self._running = False + if self._start_task is not None: + self._start_task.cancel() + await asyncio.gather(self._start_task, return_exceptions=True) + self._start_task = None + if self._listener_task is not None: + self._listener_task.cancel() + await asyncio.gather(self._listener_task, return_exceptions=True) + self._listener_task = None + await self._inbound_bus.stop(clear=clear) + + def _ensure_started(self) -> None: + if self._running: + return + try: + asyncio.get_running_loop() + except RuntimeError: + return + if self._start_task is None or self._start_task.done(): + self._start_task = asyncio.create_task(self.start()) + + async def _listen_loop(self) -> None: + while self._running: + try: + rows = await asyncio.to_thread( + self._fetch_new_rows, + self._last_seen_event_created_at, + self._last_seen_event_id, + ) + for row in rows: + event_created_at = str(row.get('event_created_at') or '') + event_id = str(row.get('event_id') or '') + if event_created_at or event_id: + self._last_seen_event_created_at = event_created_at + self._last_seen_event_id = event_id + + payload: dict[str, Any] = {} + raw_event_payload = row.get(_EVENT_PAYLOAD_COLUMN) + if isinstance(raw_event_payload, str): + try: + decoded_event_payload: Any = json.loads(raw_event_payload) + if _is_str_keyed_dict(decoded_event_payload): + payload.update(decoded_event_payload) + except Exception: + pass + + for key, raw_value in row.items(): + if key == _EVENT_PAYLOAD_COLUMN or raw_value is None or not key.startswith('event_'): + continue + if isinstance(raw_value, str): + try: + payload[key] = json.loads(raw_value) + except Exception: + payload[key] = raw_value + else: + payload[key] = raw_value + + await self._dispatch_inbound_payload(payload) + except asyncio.CancelledError: + raise + except Exception: + pass + await asyncio.sleep(self.poll_interval) + + async def _dispatch_inbound_payload(self, payload: Any) -> None: + event = BaseEvent[Any].model_validate(payload).event_reset() + self._inbound_bus.emit(event) + + def _connect(self) -> sqlite3.Connection: + # Under concurrent bridge startup/teardown across processes, sqlite can + # intermittently fail with "unable to open database file" while the + # parent path is being materialized. Recover by ensuring parent exists + # and retrying a bounded number of times. + connect_attempts = 20 + conn: sqlite3.Connection | None = None + last_error: sqlite3.OperationalError | None = None + for _ in range(connect_attempts): + try: + conn = sqlite3.connect(str(self.path), timeout=30.0) + break + except sqlite3.OperationalError as exc: + message = str(exc).lower() + if 'unable to open database file' not in message: + raise + last_error = exc + self.path.parent.mkdir(parents=True, exist_ok=True) + time.sleep(0.05) + if conn is None: + assert last_error is not None + raise last_error + + conn.execute('PRAGMA busy_timeout=30000') + for _ in range(20): + try: + conn.execute('PRAGMA journal_mode=WAL') + break + except sqlite3.OperationalError as exc: + if 'locked' not in str(exc).lower(): + raise + time.sleep(0.05) + conn.row_factory = sqlite3.Row + return conn + + def _init_db(self) -> None: + with closing(self._connect()) as conn: + conn.execute( + f''' + CREATE TABLE IF NOT EXISTS "{self.table}" ( + "event_id" TEXT PRIMARY KEY, + "event_created_at" TEXT, + "event_type" TEXT, + "event_payload" JSON + ) + ''' + ) + conn.commit() + + def _refresh_column_cache(self) -> None: + with closing(self._connect()) as conn: + rows = conn.execute(f'PRAGMA table_info("{self.table}")').fetchall() + self._table_columns = {str(row['name']) for row in rows} + + def _ensure_columns(self, keys: list[str]) -> None: + for key in keys: + _validate_identifier(key, label='event field name') + if key != _EVENT_PAYLOAD_COLUMN and not key.startswith('event_'): + raise ValueError(f'Invalid event field name for bridge column: {key!r}. Only event_* fields become columns') + + missing_columns = [key for key in keys if key not in self._table_columns] + if not missing_columns: + return + + with closing(self._connect()) as conn: + for key in missing_columns: + column_type = 'JSON' if key == _EVENT_PAYLOAD_COLUMN else 'TEXT' + conn.execute(f'ALTER TABLE "{self.table}" ADD COLUMN "{key}" {column_type}') + self._table_columns.add(key) + conn.commit() + + def _ensure_base_indexes(self) -> None: + event_created_at_index = f'{self.table}_event_created_at_idx' + event_type_index = f'{self.table}_event_type_idx' + + with closing(self._connect()) as conn: + conn.execute(f'CREATE INDEX IF NOT EXISTS "{event_created_at_index}" ON "{self.table}" ("event_created_at")') + conn.execute(f'CREATE INDEX IF NOT EXISTS "{event_type_index}" ON "{self.table}" ("event_type")') + conn.commit() + + def _upsert_payload(self, payload: dict[str, Any], payload_keys: list[str]) -> None: + columns_sql = ', '.join(f'"{key}"' for key in payload_keys) + placeholders_sql = ', '.join('json(?)' if key == _EVENT_PAYLOAD_COLUMN else '?' for key in payload_keys) + values = [json.dumps(payload[key], separators=(',', ':')) if payload[key] is not None else None for key in payload_keys] + + update_fields = [key for key in payload_keys if key != 'event_id'] + if update_fields: + updates_sql = ', '.join(f'"{key}" = excluded."{key}"' for key in update_fields) + upsert_sql = ( + f'INSERT INTO "{self.table}" ({columns_sql}) VALUES ({placeholders_sql}) ' + f'ON CONFLICT("event_id") DO UPDATE SET {updates_sql}' + ) + else: + upsert_sql = ( + f'INSERT INTO "{self.table}" ({columns_sql}) VALUES ({placeholders_sql}) ON CONFLICT("event_id") DO NOTHING' + ) + + with closing(self._connect()) as conn: + conn.execute(upsert_sql, values) + conn.commit() + + def _set_cursor_to_latest_row(self) -> None: + with closing(self._connect()) as conn: + row = conn.execute( + f''' + SELECT + COALESCE("event_created_at", '') AS event_created_at, + COALESCE("event_id", '') AS event_id + FROM "{self.table}" + ORDER BY COALESCE("event_created_at", '') DESC, COALESCE("event_id", '') DESC + LIMIT 1 + ''' + ).fetchone() + if row is None: + self._last_seen_event_created_at = '' + self._last_seen_event_id = '' + return + self._last_seen_event_created_at = str(row['event_created_at'] or '') + self._last_seen_event_id = str(row['event_id'] or '') + + def _fetch_new_rows(self, last_event_created_at: str, last_event_id: str) -> list[dict[str, Any]]: + with closing(self._connect()) as conn: + rows = conn.execute( + f''' + SELECT * + FROM "{self.table}" + WHERE + COALESCE("event_created_at", '') > ? + OR ( + COALESCE("event_created_at", '') = ? + AND COALESCE("event_id", '') > ? + ) + ORDER BY COALESCE("event_created_at", '') ASC, COALESCE("event_id", '') ASC + ''', + (last_event_created_at, last_event_created_at, last_event_id), + ).fetchall() + return [dict(row) for row in rows] diff --git a/bubus/bridges.py b/bubus/bridges.py new file mode 100644 index 0000000..443f492 --- /dev/null +++ b/bubus/bridges.py @@ -0,0 +1,375 @@ +"""IPC bridges for forwarding EventBus instances over HTTP or unix sockets.""" + +from __future__ import annotations + +import asyncio +import importlib +import json +import logging +from collections.abc import Callable +from pathlib import Path +from typing import TYPE_CHECKING, Any, Literal +from urllib.parse import urlparse +from urllib.request import Request, urlopen + +from anyio import Path as AnyPath +from uuid_extensions import uuid7str + +from bubus.base_event import BaseEvent +from bubus.event_bus import EventBus, EventPatternType, in_handler_context + +logger = logging.getLogger('bubus.bridges') +UNIX_SOCKET_MAX_PATH_CHARS = 90 +__all__ = [ + 'HTTPEventBridge', + 'SocketEventBridge', + 'NATSEventBridge', + 'RedisEventBridge', + 'PostgresEventBridge', + 'JSONLEventBridge', + 'SQLiteEventBridge', +] + +if TYPE_CHECKING: + from .bridge_jsonl import JSONLEventBridge + from .bridge_nats import NATSEventBridge + from .bridge_postgres import PostgresEventBridge + from .bridge_redis import RedisEventBridge + from .bridge_sqlite import SQLiteEventBridge + +_LAZY_BRIDGE_MODULES: dict[str, str] = { + 'NATSEventBridge': '.bridge_nats', + 'RedisEventBridge': '.bridge_redis', + 'PostgresEventBridge': '.bridge_postgres', + 'JSONLEventBridge': '.bridge_jsonl', + 'SQLiteEventBridge': '.bridge_sqlite', +} + + +class _Endpoint: + def __init__( + self, + raw: str, + scheme: Literal['unix', 'http', 'https'], + *, + host: str | None = None, + port: int | None = None, + path: str | None = None, + ): + self.raw = raw + self.scheme = scheme + self.host = host + self.port = port + self.path = path + + +def _parse_endpoint(raw_endpoint: str) -> _Endpoint: + parsed = urlparse(raw_endpoint) + scheme = parsed.scheme.lower() + + if scheme == 'unix': + socket_path = parsed.path or parsed.netloc + if not socket_path: + raise ValueError(f'Invalid unix endpoint (missing socket path): {raw_endpoint}') + socket_path_len = len(socket_path.encode('utf-8')) + if socket_path_len > UNIX_SOCKET_MAX_PATH_CHARS: + raise ValueError( + f'Unix socket path is too long ({socket_path_len} chars), max is {UNIX_SOCKET_MAX_PATH_CHARS}: {socket_path}' + ) + return _Endpoint(raw_endpoint, 'unix', path=socket_path) + + if scheme == 'http': + if not parsed.hostname: + raise ValueError(f'Invalid HTTP endpoint (missing hostname): {raw_endpoint}') + request_path = parsed.path or '/' + if parsed.query: + request_path = f'{request_path}?{parsed.query}' + port = parsed.port if parsed.port is not None else 80 + return _Endpoint(raw_endpoint, 'http', host=parsed.hostname, port=port, path=request_path) + + if scheme == 'https': + if not parsed.hostname: + raise ValueError(f'Invalid HTTP endpoint (missing hostname): {raw_endpoint}') + request_path = parsed.path or '/' + if parsed.query: + request_path = f'{request_path}?{parsed.query}' + port = parsed.port if parsed.port is not None else 443 + return _Endpoint(raw_endpoint, 'https', host=parsed.hostname, port=port, path=request_path) + + raise ValueError(f'Unsupported endpoint scheme: {raw_endpoint}') + + +class EventBridge: + """Shared bridge implementation exposing EventBus-like on/emit/dispatch.""" + + def __init__( + self, + send_to: str | None = None, + listen_on: str | None = None, + *, + name: str | None = None, + ): + self.send_to = _parse_endpoint(send_to) if send_to else None + self.listen_on = _parse_endpoint(listen_on) if listen_on else None + internal_name = name or f'EventBridge_{uuid7str()[-8:]}' + self._inbound_bus = EventBus(name=internal_name, max_history_size=0) + + self._server: asyncio.AbstractServer | None = None + self._start_lock = asyncio.Lock() + self._listen_socket_path: Path | None = None + self._autostart_task: asyncio.Task[None] | None = None + + def on(self, event_pattern: EventPatternType, handler: Callable[[BaseEvent[Any]], Any]) -> None: + self._ensure_listener_started() + self._inbound_bus.on(event_pattern, handler) + + async def emit(self, event: BaseEvent[Any]) -> BaseEvent[Any] | None: + if self.send_to is None: + raise RuntimeError(f'{self.__class__.__name__}.emit() requires send_to=...') + + payload = event.model_dump(mode='json') + + if self.send_to.scheme == 'unix': + await self._send_unix(self.send_to, payload) + else: + await self._send_http(self.send_to, payload) + + if in_handler_context(): + return None + return event + + async def dispatch(self, event: BaseEvent[Any]) -> BaseEvent[Any] | None: + return await self.emit(event) + + async def start(self) -> None: + if self.listen_on is None or self._server is not None: + return + + async with self._start_lock: + if self._server is not None: + return + + endpoint = self.listen_on + assert endpoint is not None + if endpoint.scheme == 'unix': + socket_path = Path(endpoint.path or '') + if not socket_path.is_absolute(): + raise ValueError(f'unix listen_on path must be absolute, got: {endpoint.raw}') + socket_path.parent.mkdir(parents=True, exist_ok=True) + async_socket_path = AnyPath(socket_path) + if await async_socket_path.exists(): + await async_socket_path.unlink() + self._listen_socket_path = socket_path + self._server = await asyncio.start_unix_server(self._handle_unix_client, path=str(socket_path)) + return + + if endpoint.scheme != 'http': + raise ValueError(f'listen_on only supports unix:// or http:// endpoints, got: {endpoint.raw}') + assert endpoint.host is not None + assert endpoint.port is not None + self._server = await asyncio.start_server(self._handle_http_client, host=endpoint.host, port=endpoint.port) + + async def close(self, *, clear: bool = True) -> None: + if self._autostart_task is not None: + await asyncio.gather(self._autostart_task, return_exceptions=True) + self._autostart_task = None + + if self._server is not None: + self._server.close() + await self._server.wait_closed() + self._server = None + + if self._listen_socket_path and self._listen_socket_path.exists(): + self._listen_socket_path.unlink() + self._listen_socket_path = None + + await self._inbound_bus.stop(clear=clear) + + def _ensure_listener_started(self) -> None: + if self.listen_on is None or self._server is not None: + return + if self._autostart_task is not None and not self._autostart_task.done(): + return + try: + loop = asyncio.get_running_loop() + except RuntimeError: + return + self._autostart_task = loop.create_task(self.start()) + + async def _handle_unix_client(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None: + try: + while True: + line = await reader.readline() + if not line: + break + if not line.strip(): + continue + await self._handle_incoming_bytes(line) + finally: + writer.close() + await writer.wait_closed() + + async def _handle_http_client(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None: + try: + raw_headers = await reader.readuntil(b'\r\n\r\n') + except asyncio.IncompleteReadError: + await self._write_http_response(writer, status=400, body='incomplete request') + return + except asyncio.LimitOverrunError: + await self._write_http_response(writer, status=400, body='headers too large') + return + + header_lines = raw_headers.decode('utf-8', errors='replace').split('\r\n') + if not header_lines or not header_lines[0]: + await self._write_http_response(writer, status=400, body='missing request line') + return + + parts = header_lines[0].split(' ', 2) + if len(parts) != 3: + await self._write_http_response(writer, status=400, body='invalid request line') + return + method, request_target, _version = parts + + headers: dict[str, str] = {} + for line in header_lines[1:]: + if not line: + continue + name, separator, value = line.partition(':') + if not separator: + continue + headers[name.strip().lower()] = value.strip() + + if method.upper() != 'POST': + await self._write_http_response(writer, status=405, body='method not allowed') + return + + expected_path = (self.listen_on.path if self.listen_on else None) or '/' + if request_target != expected_path: + await self._write_http_response(writer, status=404, body='not found') + return + + content_length = headers.get('content-length', '0') + try: + body_size = int(content_length) + except ValueError: + await self._write_http_response(writer, status=400, body='invalid content-length') + return + + if body_size < 0: + await self._write_http_response(writer, status=400, body='invalid content-length') + return + + try: + body = await reader.readexactly(body_size) + except asyncio.IncompleteReadError: + await self._write_http_response(writer, status=400, body='incomplete body') + return + + try: + await self._handle_incoming_bytes(body) + except Exception as exc: # pragma: no cover + logger.exception('Failed to process inbound IPC event: %s', exc) + await self._write_http_response(writer, status=500, body='failed to process event') + return + + await self._write_http_response(writer, status=202, body='accepted') + + async def _handle_incoming_bytes(self, payload: bytes) -> None: + message = json.loads(payload.decode('utf-8')) + event = BaseEvent[Any].model_validate(message).event_reset() + self._inbound_bus.emit(event) + + async def _send_unix(self, endpoint: _Endpoint, payload: dict[str, Any]) -> None: + socket_path = endpoint.path or '' + if not socket_path: + raise ValueError(f'Invalid unix endpoint: {endpoint.raw}') + + _reader, writer = await asyncio.open_unix_connection(path=socket_path) + writer.write(json.dumps(payload, separators=(',', ':')).encode('utf-8')) + writer.write(b'\n') + await writer.drain() + writer.close() + await writer.wait_closed() + + async def _send_http(self, endpoint: _Endpoint, payload: dict[str, Any]) -> None: + payload_bytes = json.dumps(payload, separators=(',', ':')).encode('utf-8') + request = Request( + endpoint.raw, + data=payload_bytes, + headers={ + 'content-type': 'application/json', + 'content-length': str(len(payload_bytes)), + }, + method='POST', + ) + + def _post() -> int: + with urlopen(request, timeout=10) as response: + return int(response.status) + + status_code = await asyncio.to_thread(_post) + if status_code < 200 or status_code >= 300: + raise RuntimeError(f'IPC HTTP send failed with status {status_code}: {endpoint.raw}') + + @staticmethod + async def _write_http_response(writer: asyncio.StreamWriter, *, status: int, body: str) -> None: + reasons = { + 202: 'Accepted', + 400: 'Bad Request', + 404: 'Not Found', + 405: 'Method Not Allowed', + 500: 'Internal Server Error', + } + reason = reasons.get(status, 'OK') + body_bytes = body.encode('utf-8') + headers = [ + f'HTTP/1.1 {status} {reason}', + f'content-length: {len(body_bytes)}', + 'content-type: text/plain; charset=utf-8', + 'connection: close', + '', + '', + ] + writer.write('\r\n'.join(headers).encode('utf-8')) + writer.write(body_bytes) + await writer.drain() + writer.close() + await writer.wait_closed() + + +class HTTPEventBridge(EventBridge): + """Bridge events over HTTP(S) endpoints.""" + + def __init__(self, send_to: str | None = None, listen_on: str | None = None, *, name: str | None = None): + if send_to and _parse_endpoint(send_to).scheme == 'unix': + raise ValueError('HTTPEventBridge send_to must be http:// or https://') + if listen_on and _parse_endpoint(listen_on).scheme != 'http': + raise ValueError('HTTPEventBridge listen_on must be http://') + super().__init__(send_to=send_to, listen_on=listen_on, name=name or f'HTTPEventBridge_{uuid7str()[-8:]}') + + +class SocketEventBridge(EventBridge): + """Bridge events over a unix domain socket path.""" + + def __init__(self, path: str | None = None, *, name: str | None = None): + if path is None: + send_to = None + listen_on = None + else: + normalized = path[7:] if path.startswith('unix://') else path + if not normalized: + raise ValueError('SocketEventBridge path must not be empty') + send_to = f'unix://{normalized}' + listen_on = f'unix://{normalized}' + + super().__init__(send_to=send_to, listen_on=listen_on, name=name or f'SocketEventBridge_{uuid7str()[-8:]}') + + +def __getattr__(name: str) -> Any: + module_name = _LAZY_BRIDGE_MODULES.get(name) + if module_name is None: + raise AttributeError(name) + module = importlib.import_module(module_name, __package__) + value = getattr(module, name) + globals()[name] = value + return value diff --git a/bubus/event_bus.py b/bubus/event_bus.py new file mode 100644 index 0000000..640ac41 --- /dev/null +++ b/bubus/event_bus.py @@ -0,0 +1,2312 @@ +import asyncio +import contextvars +import inspect +import json +import logging +import warnings +import weakref +from collections import defaultdict +from collections.abc import Callable, Coroutine, Iterator, Sequence +from contextlib import contextmanager +from contextvars import ContextVar +from dataclasses import dataclass +from datetime import UTC, datetime, timedelta +from functools import partial +from typing import Any, Literal, TypeVar, overload +from uuid import UUID + +from uuid_extensions import uuid7str + +uuid7str: Callable[[], str] = uuid7str + +from bubus.base_event import ( + BUBUS_LOGGING_LEVEL, + BaseEvent, + EventConcurrencyMode, + EventHandlerCompletionMode, + EventHandlerConcurrencyMode, + EventResult, + EventStatus, + PythonIdentifierStr, + PythonIdStr, + T_Event, + T_EventResultType, + UUIDStr, +) +from bubus.event_handler import ( + ContravariantEventHandlerCallable, + EventHandler, + EventHandlerAbortedError, + EventHandlerCallable, + EventHandlerCancelledError, +) +from bubus.event_history import EventHistory +from bubus.helpers import ( + CleanShutdownQueue, + QueueShutDown, + _run_with_slow_monitor, # pyright: ignore[reportPrivateUsage] + log_filtered_traceback, +) +from bubus.lock_manager import LockManager, LockManagerProtocol, ReentrantLock +from bubus.middlewares import EventBusMiddleware + +logger = logging.getLogger('bubus') +logger.setLevel(BUBUS_LOGGING_LEVEL) + +T_ExpectedEvent = TypeVar('T_ExpectedEvent', bound=BaseEvent[Any]) +T_OnEvent = TypeVar('T_OnEvent', bound=BaseEvent[Any]) + +EventPatternType = PythonIdentifierStr | Literal['*'] | type[BaseEvent[Any]] +# Middleware entries can be provided as already-initialized instances or middleware classes. +EventBusMiddlewareInput = EventBusMiddleware | type[EventBusMiddleware] + + +def _as_any(value: Any) -> Any: + return value + + +@dataclass(slots=True, eq=False) +class _FindWaiter: + event_key: str + matches: Callable[[BaseEvent[Any]], bool] + future: asyncio.Future[BaseEvent[Any] | None] + timeout_handle: asyncio.TimerHandle | None = None + + +class GlobalEventBusRegistry: + """Weak global registry of EventBus instances.""" + + def __init__(self) -> None: + self._buses: weakref.WeakSet['EventBus'] = weakref.WeakSet() + + def add(self, bus: 'EventBus') -> None: + self._buses.add(bus) + + def discard(self, bus: 'EventBus') -> None: + self._buses.discard(bus) + + def has(self, bus: 'EventBus') -> bool: + return bus in self._buses + + @property + def size(self) -> int: + return len(self._buses) + + def __iter__(self) -> Iterator['EventBus']: + return iter(self._buses) + + def __len__(self) -> int: + return len(self._buses) + + def __contains__(self, bus: object) -> bool: + return bus in self._buses + + +def get_current_event() -> BaseEvent[Any] | None: + """Return the currently active event in this async context, if any.""" + return EventBus.current_event_context.get() + + +def get_current_handler_id() -> str | None: + """Return the currently active handler id in this async context, if any.""" + return EventBus.current_handler_id_context.get() + + +def get_current_eventbus() -> 'EventBus | None': + """Return the currently active EventBus in this async context, if any.""" + return EventBus.current_eventbus_context.get() + + +def in_handler_context() -> bool: + """Return True when called from inside an executing handler context.""" + return get_current_handler_id() is not None + + +class EventBus: + """ + Async event bus with write-ahead logging and guaranteed FIFO processing. + + Features: + - Enqueue events synchronously, await their results using 'await Event()' + - FIFP Write-ahead logging with UUIDs and timestamps, + - Serial event processing, configurable handler concurrency per event ('serial' | 'parallel') + """ + + # Track all EventBus instances (using weakrefs to allow garbage collection) + all_instances: GlobalEventBusRegistry = GlobalEventBusRegistry() + _lock_for_event_global_serial: ReentrantLock = ReentrantLock() + + # Per-loop EventBus registry and original close method tracking. + _loop_eventbus_instances: weakref.WeakKeyDictionary[asyncio.AbstractEventLoop, weakref.WeakSet['EventBus']] = ( + weakref.WeakKeyDictionary() + ) + _loop_original_close: weakref.WeakKeyDictionary[asyncio.AbstractEventLoop, Callable[[], None]] = weakref.WeakKeyDictionary() + + # Context variables for current event/handler/bus execution scope. + current_event_context: ContextVar[BaseEvent[Any] | None] = ContextVar('current_event', default=None) + current_handler_id_context: ContextVar[str | None] = ContextVar('current_handler_id', default=None) + current_eventbus_context: ContextVar['EventBus | None'] = ContextVar('current_eventbus', default=None) + + # Class Attributes + name: PythonIdentifierStr = 'EventBus' + event_concurrency: EventConcurrencyMode = EventConcurrencyMode.BUS_SERIAL + event_timeout: float | None = 60.0 + event_slow_timeout: float | None = 300.0 + event_handler_concurrency: EventHandlerConcurrencyMode = EventHandlerConcurrencyMode.SERIAL + event_handler_completion: EventHandlerCompletionMode = EventHandlerCompletionMode.ALL + event_handler_slow_timeout: float | None = 30.0 + event_handler_detect_file_paths: bool = True + + # Runtime State + id: UUIDStr = '00000000-0000-0000-0000-000000000000' + handlers: dict[PythonIdStr, EventHandler] + handlers_by_key: dict[str, list[PythonIdStr]] + pending_event_queue: CleanShutdownQueue[BaseEvent[Any]] | None + event_history: EventHistory[BaseEvent[Any]] + + _is_running: bool = False + _runloop_task: asyncio.Task[None] | None = None + _parallel_event_tasks: set[asyncio.Task[None]] + _pending_middleware_tasks: set[asyncio.Task[None]] + _on_idle: asyncio.Event | None = None + in_flight_event_ids: set[str] + processing_event_ids: set[str] + _duplicate_handler_name_check_limit: int = 256 + _pending_handler_changes: list[tuple[EventHandler, bool]] + find_waiters: set[_FindWaiter] + _lock_for_event_bus_serial: ReentrantLock + locks: LockManagerProtocol + + def __init_subclass__(cls, **kwargs: Any) -> None: + super().__init_subclass__(**kwargs) + # Subclasses get isolated global registries/locks by default. + cls.all_instances = GlobalEventBusRegistry() + cls._lock_for_event_global_serial = ReentrantLock() + cls._loop_eventbus_instances = weakref.WeakKeyDictionary() + cls._loop_original_close = weakref.WeakKeyDictionary() + + @classmethod + def iter_all_instances(cls) -> Iterator['EventBus']: + """Iterate live EventBus instances for ``cls`` and all subclasses.""" + pending_classes: list[type[EventBus]] = [cls] + seen_classes: set[type[EventBus]] = set() + seen_buses: set[int] = set() + while pending_classes: + bus_class = pending_classes.pop() + if bus_class in seen_classes: + continue + seen_classes.add(bus_class) + pending_classes.extend(bus_class.__subclasses__()) + for bus in list(bus_class.all_instances): + bus_id = id(bus) + if bus_id in seen_buses: + continue + seen_buses.add(bus_id) + yield bus + + def __init__( + self, + name: PythonIdentifierStr | None = None, + event_concurrency: EventConcurrencyMode | str | None = None, + event_handler_concurrency: EventHandlerConcurrencyMode | str = EventHandlerConcurrencyMode.SERIAL, + event_handler_completion: EventHandlerCompletionMode | str = EventHandlerCompletionMode.ALL, + max_history_size: int | None = 100, # Keep only 100 events in history + max_history_drop: bool = False, + event_timeout: float | None = 60.0, + event_slow_timeout: float | None = 300.0, + event_handler_slow_timeout: float | None = 30.0, + event_handler_detect_file_paths: bool = True, + middlewares: Sequence[EventBusMiddlewareInput] | None = None, + id: UUIDStr | str | None = None, + ): + self.id = str(UUID(str(id))) if id is not None else uuid7str() + self.name = name or f'{self.__class__.__name__}_{self.id[-8:]}' + assert self.name.isidentifier(), f'EventBus name must be a unique identifier string, got: {self.name}' + + # Force garbage collection to clean up any dead EventBus instances in the WeakSet + # gc.collect() # Commented out - this is expensive and causes 5s delays when creating many EventBus instances + + # Check for name uniqueness among existing instances + # We'll collect potential conflicts and check if they're still alive + original_name = self.name + conflicting_buses: list[EventBus] = [] + + for existing_bus in list(type(self).all_instances): # Make a list copy to avoid modification during iteration + if existing_bus is not self and existing_bus.name == self.name: + # Since stop() renames buses to _stopped_{id}, any bus with a matching + # user-specified name is either running or never-started - both should + # be considered conflicts. This makes name conflict detection deterministic. + conflicting_buses.append(existing_bus) + + # If we found conflicting buses, auto-generate a unique suffix + if conflicting_buses: + # Generate a unique suffix using the last 8 chars of a UUID + unique_suffix = uuid7str()[-8:] + self.name = f'{original_name}_{unique_suffix}' + + warnings.warn( + f'⚠️ EventBus with name "{original_name}" already exists. ' + f'Auto-generated unique name: "{self.name}" to avoid conflicts. ' + f'Consider using unique names or stop(clear=True) on unused buses.', + UserWarning, + stacklevel=2, + ) + + self.pending_event_queue = None + self.event_history = EventHistory(max_history_size=max_history_size, max_history_drop=max_history_drop) + self.handlers = {} + self.handlers_by_key = defaultdict(list) + self._lock_for_event_bus_serial = ReentrantLock() + self.locks = LockManager() + self._parallel_event_tasks = set() + self._pending_middleware_tasks = set() + try: + self.event_concurrency = EventConcurrencyMode(event_concurrency or EventConcurrencyMode.BUS_SERIAL) + except ValueError as exc: + raise AssertionError( + f'event_concurrency must be "global-serial", "bus-serial", or "parallel", got: {event_concurrency!r}' + ) from exc + try: + self.event_handler_concurrency = EventHandlerConcurrencyMode( + event_handler_concurrency or EventHandlerConcurrencyMode.SERIAL + ) + except ValueError as exc: + raise AssertionError( + f'event_handler_concurrency must be "serial" or "parallel", got: {event_handler_concurrency!r}' + ) from exc + try: + self.event_handler_completion = EventHandlerCompletionMode(event_handler_completion or EventHandlerCompletionMode.ALL) + except ValueError as exc: + raise AssertionError(f'event_handler_completion must be "all" or "first", got: {event_handler_completion!r}') from exc + self.event_timeout = event_timeout + self.event_slow_timeout = event_slow_timeout + self.event_handler_slow_timeout = event_handler_slow_timeout + self.event_handler_detect_file_paths = bool(event_handler_detect_file_paths) + assert self.event_timeout is None or self.event_timeout > 0, ( + f'event_timeout must be > 0 or None, got: {self.event_timeout!r}' + ) + assert self.event_slow_timeout is None or self.event_slow_timeout > 0, ( + f'event_slow_timeout must be > 0 or None, got: {self.event_slow_timeout!r}' + ) + assert self.event_handler_slow_timeout is None or self.event_handler_slow_timeout > 0, ( + f'event_handler_slow_timeout must be > 0 or None, got: {self.event_handler_slow_timeout!r}' + ) + self._on_idle = None + self.middlewares = self._normalize_middlewares(middlewares) + self.in_flight_event_ids = set() + self.processing_event_ids = set() + self._pending_handler_changes = [] + self.find_waiters = set() + + # Register this instance + type(self).all_instances.add(self) + + @staticmethod + def _normalize_middlewares(middlewares: Sequence[EventBusMiddlewareInput] | None) -> list[EventBusMiddleware]: + """Normalize middleware inputs to concrete middleware instances. + + Accepts mixed instance/class entries and instantiates class entries once + during bus construction, preserving registration order. + """ + normalized: list[EventBusMiddleware] = [] + for middleware in middlewares or (): + if isinstance(middleware, EventBusMiddleware): + normalized.append(middleware) + continue + normalized.append(middleware()) + return normalized + + def __del__(self): + """Auto-cleanup on garbage collection""" + # Most cleanup should have been done by the event loop close hook + # This is just a fallback for any remaining cleanup + + # Signal the run loop to stop + self._is_running = False + if self.pending_event_queue: + try: + # Wake any blocked queue.get() in the weak runloop so it can exit. + self.pending_event_queue.shutdown(immediate=True) + except Exception: + pass + if self._runloop_task and not self._runloop_task.done(): + try: + setattr(self._runloop_task, '_log_destroy_pending', False) + self._runloop_task.cancel() + except Exception: + pass + for task in tuple(self._parallel_event_tasks): + if task.done(): + continue + try: + task.cancel() + except Exception: + pass + for task in tuple(self._pending_middleware_tasks): + if task.done(): + continue + try: + task.cancel() + except Exception: + pass + + # Our custom queue handles cleanup properly in shutdown() + # No need for manual cleanup here + + # Check total memory usage across all EventBus instances + try: + self._check_total_memory_usage() + except Exception: + # Don't let memory check errors prevent cleanup + pass + + def __str__(self) -> str: + icon = '🟒' if self._is_running else 'πŸ”΄' + queue_size = self.pending_event_queue.qsize() if self.pending_event_queue else 0 + return f'{self.label}{icon}(queue={queue_size} active={len(self.in_flight_event_ids)} history={len(self.event_history)} handlers={len(self.handlers)})' + + @property + def label(self) -> str: + return f'{self.name}#{self.id[-4:]}' + + def __repr__(self) -> str: + return str(self) + + @property + def event_bus_serial_lock(self) -> ReentrantLock: + """Public accessor for the bus-serial event lock used by LockManager.""" + return self._lock_for_event_bus_serial + + @property + def event_global_serial_lock(self) -> ReentrantLock: + """Class-scoped global-serial lock (isolated per EventBus subclass).""" + return type(self)._lock_for_event_global_serial + + @staticmethod + def _stub_handler_callable(_event: BaseEvent[Any]) -> None: + return None + + @classmethod + def _event_json_payload(cls, event: BaseEvent[Any]) -> dict[str, Any]: + payload = event.model_dump(mode='json') + payload['event_results'] = [result.model_dump(mode='json') for result in event.event_results.values()] + return payload + + @staticmethod + def _normalize_json_object(value: Any) -> dict[str, Any] | None: + if not isinstance(value, dict): + return None + value_any = _as_any(value) + normalized: dict[str, Any] = {} + for raw_key, raw_value in value_any.items(): + key_value: Any = raw_key + item_value: Any = raw_value + if isinstance(key_value, str): + normalized[key_value] = item_value + return normalized + + @classmethod + def _normalize_json_object_required(cls, value: Any, error_message: str) -> dict[str, Any]: + normalized = cls._normalize_json_object(value) + if normalized is None: + raise TypeError(error_message) + return normalized + + @staticmethod + def _normalize_json_string_list(value: Any) -> list[str] | None: + if not isinstance(value, list): + return None + value_any = _as_any(value) + normalized: list[str] = [] + for raw_item in value_any: + item_value: Any = raw_item + if isinstance(item_value, str): + normalized.append(item_value) + return normalized + + @staticmethod + def _upsert_handler_index(handlers_by_key: dict[str, list[PythonIdStr]], event_pattern: str, handler_id: PythonIdStr) -> None: + ids = handlers_by_key.setdefault(event_pattern, []) + if handler_id not in ids: + ids.append(handler_id) + + @classmethod + def _hydrate_handler_from_event_result( + cls, bus: 'EventBus', event: BaseEvent[Any], result_payload: dict[str, Any] + ) -> EventHandler: + if isinstance(result_payload.get('handler'), dict): + raise ValueError('Legacy nested EventResult.handler payload is not supported') + raw_handler_id = result_payload.get('handler_id') + handler_id: str | None = raw_handler_id if isinstance(raw_handler_id, str) and raw_handler_id else None + if handler_id is None: + raise ValueError('EventResult JSON payload must include handler_id') + if handler_id in bus.handlers: + return bus.handlers[handler_id] + handler_payload: dict[str, Any] = {'id': handler_id} + handler_payload.setdefault('handler_name', result_payload.get('handler_name') or 'anonymous') + handler_payload.setdefault('handler_file_path', result_payload.get('handler_file_path')) + if result_payload.get('handler_timeout') is not None: + handler_payload.setdefault('handler_timeout', result_payload.get('handler_timeout')) + if result_payload.get('handler_slow_timeout') is not None: + handler_payload.setdefault('handler_slow_timeout', result_payload.get('handler_slow_timeout')) + if result_payload.get('handler_registered_at') is not None: + handler_payload.setdefault('handler_registered_at', result_payload.get('handler_registered_at')) + handler_payload.setdefault('event_pattern', result_payload.get('handler_event_pattern') or event.event_type) + handler_payload.setdefault('eventbus_name', result_payload.get('eventbus_name') or bus.name) + handler_payload.setdefault('eventbus_id', result_payload.get('eventbus_id') or bus.id) + + handler_entry = EventHandler.model_validate({**handler_payload, 'handler': cls._stub_handler_callable}) + resolved_handler_id = handler_entry.id + bus.handlers[resolved_handler_id] = handler_entry + cls._upsert_handler_index(bus.handlers_by_key, handler_entry.event_pattern, resolved_handler_id) + return handler_entry + + @classmethod + def _hydrate_event_result_from_json( + cls, + *, + bus: 'EventBus', + event: BaseEvent[Any], + result_payload: dict[str, Any], + ) -> tuple[EventResult[Any], list[str]]: + handler_entry = cls._hydrate_handler_from_event_result(bus, event, result_payload) + model_payload = dict(result_payload) + child_ids = [child_id for child_id in model_payload.pop('event_children', []) if isinstance(child_id, str)] + has_result = 'result' in model_payload + raw_result = model_payload.pop('result', None) + for flat_key in ( + 'handler_id', + 'handler_name', + 'handler_file_path', + 'handler_timeout', + 'handler_slow_timeout', + 'handler_registered_at', + 'handler_event_pattern', + 'eventbus_name', + 'eventbus_id', + ): + model_payload.pop(flat_key, None) + model_payload['event_id'] = event.event_id + model_payload['handler'] = handler_entry + event_result = EventResult.model_validate(model_payload) + if has_result: + event_result.result = raw_result + event_result.handler = handler_entry + return event_result, child_ids + + def model_dump(self) -> dict[str, Any]: + handlers_payload: dict[str, dict[str, Any]] = {} + for handler_entry in self.handlers.values(): + handlers_payload[handler_entry.id] = handler_entry.model_dump(mode='json', exclude={'handler'}) + + handlers_by_key_payload: dict[str, list[str]] = { + str(key): [str(handler_id) for handler_id in handler_ids] for key, handler_ids in self.handlers_by_key.items() + } + for handler_entry in self.handlers.values(): + self._upsert_handler_index(handlers_by_key_payload, handler_entry.event_pattern, handler_entry.id) + + event_history_payload: dict[str, dict[str, Any]] = {} + for event in self.event_history.values(): + event_history_payload[event.event_id] = self._event_json_payload(event) + + pending_event_ids: list[str] = [] + if self.pending_event_queue is not None: + for queued_event in self.pending_event_queue.iter_items(): + event_id = queued_event.event_id + if event_id not in event_history_payload: + event_history_payload[event_id] = self._event_json_payload(queued_event) + pending_event_ids.append(event_id) + + return { + 'id': self.id, + 'name': self.name, + 'max_history_size': self.event_history.max_history_size, + 'max_history_drop': self.event_history.max_history_drop, + 'event_concurrency': str(self.event_concurrency), + 'event_timeout': self.event_timeout, + 'event_slow_timeout': self.event_slow_timeout, + 'event_handler_concurrency': str(self.event_handler_concurrency), + 'event_handler_completion': str(self.event_handler_completion), + 'event_handler_slow_timeout': self.event_handler_slow_timeout, + 'event_handler_detect_file_paths': self.event_handler_detect_file_paths, + 'handlers': handlers_payload, + 'handlers_by_key': handlers_by_key_payload, + 'event_history': event_history_payload, + 'pending_event_queue': pending_event_ids, + } + + def model_dump_json(self, *, indent: int | None = None) -> str: + return json.dumps(self.model_dump(), ensure_ascii=False, default=str, indent=indent) + + @classmethod + def validate(cls, data: Any) -> 'EventBus': + raw_payload: Any = data + if isinstance(data, (bytes, bytearray)): + raw_payload = data.decode('utf-8') + if isinstance(raw_payload, str): + raw_payload = json.loads(raw_payload) + payload = cls._normalize_json_object(raw_payload) + if payload is None: + raise TypeError(f'EventBus.validate() expects dict or JSON string, got: {type(data).__name__}') + name = payload.get('name') + requested_name = str(name) if isinstance(name, str) else None + bus = cls( + name=None, + event_concurrency=payload.get('event_concurrency'), + event_handler_concurrency=payload.get('event_handler_concurrency') or EventHandlerConcurrencyMode.SERIAL, + event_handler_completion=payload.get('event_handler_completion') or EventHandlerCompletionMode.ALL, + max_history_size=payload.get('max_history_size') + if isinstance(payload.get('max_history_size'), int) or payload.get('max_history_size') is None + else 100, + max_history_drop=bool(payload.get('max_history_drop', False)), + event_timeout=payload.get('event_timeout') + if isinstance(payload.get('event_timeout'), (int, float)) or payload.get('event_timeout') is None + else None, + event_slow_timeout=payload.get('event_slow_timeout') + if isinstance(payload.get('event_slow_timeout'), (int, float)) or payload.get('event_slow_timeout') is None + else 300.0, + event_handler_slow_timeout=payload.get('event_handler_slow_timeout') + if isinstance(payload.get('event_handler_slow_timeout'), (int, float)) + or payload.get('event_handler_slow_timeout') is None + else 30.0, + event_handler_detect_file_paths=bool(payload.get('event_handler_detect_file_paths', True)), + id=payload.get('id') if isinstance(payload.get('id'), str) else None, + ) + if requested_name is not None: + bus.name = requested_name + + bus.handlers.clear() + bus.handlers_by_key = defaultdict(list) + bus.event_history.clear() + + raw_handlers = cls._normalize_json_object_required( + payload.get('handlers'), + 'EventBus.validate() expects handlers to be an id-keyed object', + ) + for raw_handler_id, raw_handler_payload in raw_handlers.items(): + handler_payload_json = cls._normalize_json_object(raw_handler_payload) + if handler_payload_json is None: + continue + handler_payload: dict[str, Any] = dict(handler_payload_json) + if 'id' not in handler_payload: + handler_payload['id'] = raw_handler_id + handler_entry = EventHandler.model_validate({**handler_payload, 'handler': cls._stub_handler_callable}) + bus.handlers[handler_entry.id] = handler_entry + + raw_handlers_by_key = cls._normalize_json_object_required( + payload.get('handlers_by_key'), + 'EventBus.validate() expects handlers_by_key to be an object', + ) + for raw_key, raw_ids in raw_handlers_by_key.items(): + handler_id_list = cls._normalize_json_string_list(raw_ids) + if handler_id_list is None: + continue + handler_ids: list[PythonIdStr] = [] + for handler_id in handler_id_list: + handler_ids.append(handler_id) + bus.handlers_by_key[raw_key] = handler_ids + + pending_child_links: list[tuple[EventResult[Any], list[str]]] = [] + + raw_event_history = cls._normalize_json_object_required( + payload.get('event_history'), + 'EventBus.validate() expects event_history to be an id-keyed object', + ) + history_items: list[tuple[str, Any]] = list(raw_event_history.items()) + + for event_id_hint, event_payload_any in history_items: + event_payload_json = cls._normalize_json_object(event_payload_any) + if event_payload_json is None: + continue + event_payload: dict[str, Any] = dict(event_payload_json) + raw_event_results = event_payload.pop('event_results', []) + if 'event_id' not in event_payload or not isinstance(event_payload.get('event_id'), str): + event_payload['event_id'] = event_id_hint + try: + event = BaseEvent[Any].model_validate(event_payload) + except Exception: + continue + + hydrated_results: dict[PythonIdStr, EventResult[Any]] = {} + result_items: list[dict[str, Any]] = [] + if isinstance(raw_event_results, list): + raw_event_results_any = _as_any(raw_event_results) + for raw_item in raw_event_results_any: + result_payload = cls._normalize_json_object(raw_item) + if result_payload is None: + continue + result_items.append(dict(result_payload)) + + for result_payload in result_items: + try: + event_result, child_ids = cls._hydrate_event_result_from_json( + bus=bus, + event=event, + result_payload=result_payload, + ) + except Exception: + continue + hydrated_results[event_result.handler_id] = event_result + pending_child_links.append((event_result, child_ids)) + + event.event_results = hydrated_results + bus.event_history[event.event_id] = event + + pending_event_ids: list[str] = [] + raw_pending_queue = cls._normalize_json_string_list(payload.get('pending_event_queue')) + if raw_pending_queue is None: + raise TypeError('EventBus.validate() expects pending_event_queue to be a list of event ids') + pending_event_ids.extend(raw_pending_queue) + + for event_result, child_ids in pending_child_links: + event_result.event_children = [bus.event_history[child_id] for child_id in child_ids if child_id in bus.event_history] + + if pending_event_ids: + queue = CleanShutdownQueue[BaseEvent[Any]](maxsize=0) + for event_id in pending_event_ids: + event = bus.event_history.get(event_id) + if event is None: + continue + queue.put_nowait(event) + bus.pending_event_queue = queue + bus._on_idle = asyncio.Event() + if queue.qsize() == 0 and not bus._has_inflight_events_fast(): + bus._on_idle.set() + else: + bus._on_idle.clear() + else: + bus.pending_event_queue = None + bus._on_idle = None + + bus._is_running = False + bus._runloop_task = None + bus._parallel_event_tasks = set() + bus._pending_middleware_tasks = set() + bus.in_flight_event_ids = set() + bus.processing_event_ids = set() + return bus + + async def on_event_change(self, event: BaseEvent[Any], status: EventStatus) -> None: + if not self.middlewares: + return + for middleware in self.middlewares: + await middleware.on_event_change(self, event, status) + + async def on_event_result_change(self, event: BaseEvent[Any], event_result: EventResult[Any], status: EventStatus) -> None: + if not self.middlewares: + return + for middleware in self.middlewares: + await middleware.on_event_result_change(self, event, event_result, status) + + def remove_event_from_pending_queue(self, event: BaseEvent[Any]) -> bool: + if self.pending_event_queue is None: + return False + return self.pending_event_queue.remove_item(event) + + def mark_pending_queue_task_done(self) -> None: + if self.pending_event_queue is None: + return + try: + self.pending_event_queue.task_done() + except ValueError: + pass + + def queue_contains_event_id(self, event_id: str) -> bool: + if self.pending_event_queue is None: + return False + for queued_event in self.pending_event_queue.iter_items(): + if queued_event.event_id == event_id: + return True + return False + + def is_event_inflight_or_queued(self, event_id: str) -> bool: + if event_id in self.in_flight_event_ids: + return True + if event_id in self.processing_event_ids: + return True + return self.queue_contains_event_id(event_id) + + def is_event_processing(self, event_id: str) -> bool: + return event_id in self.processing_event_ids + + def _resolve_find_waiters(self, event: BaseEvent[Any]) -> None: + if not self.find_waiters: + return + for waiter in tuple(self.find_waiters): + if (waiter.event_key != '*' and event.event_type != waiter.event_key) or not waiter.matches(event): + continue + if waiter.timeout_handle is not None: + waiter.timeout_handle.cancel() + self.find_waiters.discard(waiter) + if not waiter.future.done(): + waiter.future.set_result(event) + + async def _wait_for_future_match( + self, + event_key: str, + matches: Callable[[BaseEvent[Any]], bool], + future: bool | float, + ) -> BaseEvent[Any] | None: + if future is False: + return None + event_match_future: asyncio.Future[BaseEvent[Any] | None] = asyncio.get_running_loop().create_future() + waiter = _FindWaiter(event_key=event_key, matches=matches, future=event_match_future) + if future is not True: + timeout_seconds = float(future) + + def _on_wait_timeout() -> None: + self.find_waiters.discard(waiter) + if waiter.timeout_handle is not None: + waiter.timeout_handle.cancel() + waiter.timeout_handle = None + if not event_match_future.done(): + event_match_future.set_result(None) + + waiter.timeout_handle = asyncio.get_running_loop().call_later(timeout_seconds, _on_wait_timeout) + self.find_waiters.add(waiter) + try: + return await event_match_future + finally: + self.find_waiters.discard(waiter) + if waiter.timeout_handle is not None: + waiter.timeout_handle.cancel() + waiter.timeout_handle = None + + async def on_bus_handlers_change(self, handler: EventHandler, registered: bool) -> None: + if not self.middlewares: + return + for middleware in self.middlewares: + await middleware.on_bus_handlers_change(self, handler, registered) + + def _schedule_middleware_task(self, task: asyncio.Task[None]) -> None: + self._pending_middleware_tasks.add(task) + if self._on_idle is not None: + self._on_idle.clear() + + def _on_done(done_task: asyncio.Task[None]) -> None: + self._pending_middleware_tasks.discard(done_task) + if self._on_idle and self.pending_event_queue: + if not self._has_inflight_events_fast() and self.pending_event_queue.qsize() == 0: + self._on_idle.set() + if done_task.cancelled(): + return + exc = done_task.exception() + if exc is not None: + logger.error('❌ %s middleware task failed: %s(%r)', self, type(exc).__name__, exc) + + task.add_done_callback(_on_done) + + def _notify_handler_change(self, handler: EventHandler, registered: bool) -> None: + if not self.middlewares: + return + try: + loop = asyncio.get_running_loop() + except RuntimeError: + # Preserve .on()/.off() notifications registered before an event loop starts. + self._pending_handler_changes.append((handler.model_copy(deep=False), registered)) + return + task = loop.create_task(self.on_bus_handlers_change(handler, registered)) + self._schedule_middleware_task(task) + + def _flush_pending_handler_changes(self) -> None: + if not self._pending_handler_changes or not self.middlewares: + return + loop = asyncio.get_running_loop() + queued = list(self._pending_handler_changes) + self._pending_handler_changes.clear() + for handler, registered in queued: + task = loop.create_task(self.on_bus_handlers_change(handler, registered)) + self._schedule_middleware_task(task) + + @staticmethod + def _resolve_event_slow_timeout(event: BaseEvent[Any], eventbus: 'EventBus') -> float | None: + event_slow_timeout = event.event_slow_timeout + if event_slow_timeout is not None: + return event_slow_timeout + return eventbus.event_slow_timeout + + @staticmethod + def _resolve_handler_slow_timeout(event: BaseEvent[Any], handler: EventHandler, eventbus: 'EventBus') -> float | None: + if 'handler_slow_timeout' in handler.model_fields_set: + return handler.handler_slow_timeout + if event.event_handler_slow_timeout is not None: + return event.event_handler_slow_timeout + if event.event_slow_timeout is not None: + return event.event_slow_timeout + return eventbus.event_handler_slow_timeout + + @staticmethod + def _resolve_handler_timeout( + event: BaseEvent[Any], + handler: EventHandler, + eventbus: 'EventBus', + timeout_override: float | None = None, + ) -> float | None: + if 'handler_timeout' in handler.model_fields_set: + resolved_handler_timeout = handler.handler_timeout + elif event.event_handler_timeout is not None: + resolved_handler_timeout = event.event_handler_timeout + else: + resolved_handler_timeout = eventbus.event_timeout + + resolved_event_timeout = event.event_timeout if event.event_timeout is not None else eventbus.event_timeout + + if resolved_handler_timeout is None and resolved_event_timeout is None: + resolved_timeout = None + elif resolved_handler_timeout is None: + resolved_timeout = resolved_event_timeout + elif resolved_event_timeout is None: + resolved_timeout = resolved_handler_timeout + else: + resolved_timeout = min(resolved_handler_timeout, resolved_event_timeout) + + if timeout_override is None: + return resolved_timeout + if resolved_timeout is None: + return timeout_override + return min(resolved_timeout, timeout_override) + + async def _slow_event_warning_monitor(self, event: BaseEvent[Any], event_slow_timeout: float) -> None: + await asyncio.sleep(event_slow_timeout) + if event.event_status == EventStatus.COMPLETED: + return + running_handler_count = sum(1 for result in event.event_results.values() if result.status == 'started') + started_at = event.event_started_at or event.event_created_at + started_at_dt = datetime.fromisoformat(started_at) + elapsed_seconds = max(0.0, (datetime.now(UTC) - started_at_dt).total_seconds()) + logger.warning( + '⚠️ Slow event processing: %s.on(%s#%s, %d handlers) still running after %.2fs', + self.label, + event.event_type, + event.event_id[-4:], + running_handler_count, + elapsed_seconds, + ) + + def _has_inflight_events_fast(self) -> bool: + return bool( + self.in_flight_event_ids or self.processing_event_ids or self._parallel_event_tasks or self._pending_middleware_tasks + ) + + def _mark_event_complete_on_all_buses(self, event: BaseEvent[Any]) -> None: + event_id = event.event_id + for bus in list(type(self).all_instances): + if bus: + bus.in_flight_event_ids.discard(event_id) + if bus.event_history.max_history_size == 0: + # max_history_size=0 means "keep only in-flight events". + # As soon as an event is completed, drop it from history. + bus.event_history.remove_event(event_id) + + @property + def events_pending(self) -> list[BaseEvent[Any]]: + """Get events that haven't started processing yet (does not include events still being enqueued in self.event_queue).""" + return [ + event + for event in self.event_history.values() + if event.event_status != EventStatus.COMPLETED and event.event_status != EventStatus.STARTED + ] + + @property + def events_started(self) -> list[BaseEvent[Any]]: + """Get events currently being processed""" + return [ + event + for event in self.event_history.values() + if event.event_status != EventStatus.COMPLETED and event.event_status == EventStatus.STARTED + ] + + @property + def events_completed(self) -> list[BaseEvent[Any]]: + """Get events that have completed processing""" + return [event for event in self.event_history.values() if event.event_status == EventStatus.COMPLETED] + + # Overloads for typed event patterns with specific handler signatures + # Order matters - more specific types must come before general ones + + # Class pattern registration keeps strict event typing. + @overload + def on(self, event_pattern: type[T_OnEvent], handler: ContravariantEventHandlerCallable[T_OnEvent]) -> EventHandler: ... + + # String and wildcard registration is looser: any BaseEvent subclass handler is allowed. + @overload + def on( + self, + event_pattern: PythonIdentifierStr | Literal['*'], + handler: ContravariantEventHandlerCallable[T_OnEvent], + ) -> EventHandler: ... + + # I dont think this is needed, but leaving it here for now + # 9. Coroutine[Any, Any, Any] - direct coroutine + # @overload + # def on(self, event_pattern: EventPatternType, handler: Coroutine[Any, Any, Any]) -> None: ... + + def on( + self, + event_pattern: EventPatternType, + handler: ContravariantEventHandlerCallable[T_OnEvent], + ) -> EventHandler: + """ + Subscribe to events matching a pattern, event type name, or event model class. + Use event_pattern='*' to subscribe to all events. Handler can be sync or async function or method. + + Examples: + eventbus.on('TaskStartedEvent', handler) # Specific event type + eventbus.on(TaskStartedEvent, handler) # Event model class + eventbus.on('*', handler) # Subscribe to all events + eventbus.on('*', other_eventbus.emit) # Forward all events to another EventBus + + Note: When forwarding events between buses, all handler results are automatically + flattened into the original event's results, so EventResults sees all handlers + from all buses as a single flat collection. + """ + assert isinstance(event_pattern, str) or isinstance(event_pattern, type), ( + f'Invalid event pattern: {event_pattern}, must be a string event type or subclass of BaseEvent' + ) + assert inspect.isfunction(handler) or inspect.ismethod(handler) or inspect.iscoroutinefunction(handler), ( + f'Invalid handler: {handler}, must be a sync or async function or method' + ) + + # Normalize event key to string event_type or wildcard. + event_key = EventHistory.normalize_event_pattern(event_pattern) + + # Ensure event_key is definitely a string at this point + assert isinstance(event_key, str) + + # Check for duplicate handler names. Keep this bounded so large handler + # registrations (e.g. perf scenarios with tens of thousands of handlers) + # do not degrade into O(n^2) registration time. + new_handler_name = EventHandler.get_callable_handler_name(handler) + existing_handler_ids = self.handlers_by_key.get(event_key, []) + if existing_handler_ids and len(existing_handler_ids) <= self._duplicate_handler_name_check_limit: + for existing_handler_id in existing_handler_ids: + existing_handler = self.handlers.get(existing_handler_id) + if existing_handler and existing_handler.handler_name == new_handler_name: + warnings.warn( + f"⚠️ {self} Handler {new_handler_name} already registered for event '{event_key}'. " + f'This may make it difficult to filter event results by handler name. ' + f'Consider using unique function names.', + UserWarning, + stacklevel=2, + ) + break + + # Register handler entry and index it by event key. + handler_entry = EventHandler.from_callable( + handler=handler, + event_pattern=event_key, + eventbus_name=self.name, + eventbus_id=self.id, + detect_handler_file_path=self.event_handler_detect_file_paths, + ) + assert handler_entry.id is not None + self.handlers[handler_entry.id] = handler_entry + self.handlers_by_key[event_key].append(handler_entry.id) + if logger.isEnabledFor(logging.DEBUG): + logger.debug( + 'πŸ‘‚ %s.on(%s, %s) Registered event handler #%s', + self, + event_key, + handler_entry.handler_name, + handler_entry.id[-4:], + ) + self._notify_handler_change(handler_entry, registered=True) + return handler_entry + + @overload + def off( + self, event_pattern: type[T_Event], handler: EventHandlerCallable | PythonIdStr | EventHandler | None = None + ) -> None: ... + + @overload + def off( + self, + event_pattern: PythonIdentifierStr | Literal['*'], + handler: EventHandlerCallable | PythonIdStr | EventHandler | None = None, + ) -> None: ... + + def off( + self, + event_pattern: EventPatternType, + handler: EventHandlerCallable | PythonIdStr | EventHandler | None = None, + ) -> None: + """Deregister handlers for an event pattern by id, callable, EventHandler, or all.""" + event_key = EventHistory.normalize_event_pattern(event_pattern) + indexed_ids = list(self.handlers_by_key.get(event_key, [])) + if not indexed_ids: + return + + requested_id: str | None = None + requested_callable: EventHandlerCallable | None = None + if isinstance(handler, EventHandler): + requested_id = handler.id + elif isinstance(handler, str): + requested_id = handler + elif handler is not None: + requested_callable = handler + + for handler_id in indexed_ids: + entry = self.handlers.get(handler_id) + if entry is None: + self._remove_indexed_handler(event_key, handler_id) + continue + + should_remove = False + if handler is None: + should_remove = True + elif requested_id is not None and entry.id == requested_id: + should_remove = True + elif requested_callable is not None and entry.handler is requested_callable: + should_remove = True + + if should_remove: + self.handlers.pop(handler_id, None) + self._remove_indexed_handler(event_key, handler_id) + self._notify_handler_change(entry, registered=False) + + def emit(self, event: T_ExpectedEvent) -> T_ExpectedEvent: + """ + Enqueue an event for processing and immediately return an Event(status='pending') version (synchronous). + You can await the returned Event(status='pending') object to block until it is done being executed aka Event(status='completed'), + or you can interact with the unawaited Event(status='pending') before its handlers have finished. + + (The first EventBus.emit() call auto-starts the bus processing loop task if not already running) + + >>> completed_event = await eventbus.emit(SomeEvent()) + # 1. enqueues the event synchronously + # 2. returns an awaitable SomeEvent() with pending results in .event_results + # 3. awaits the SomeEvent() which waits until all pending results are complete and returns the completed SomeEvent() + + >>> result_value = await eventbus.emit(SomeEvent()).event_result() + # 1. enqueues the event synchronously + # 2. returns a pending SomeEvent() with pending results in .event_results + # 3. awaiting .event_result() waits until all pending results are complete, and returns the raw result value of the first one + """ + + try: + asyncio.get_running_loop() + except RuntimeError: + raise RuntimeError(f'{self}.emit() called but no event loop is running! Event not queued: {event.event_type}') + + assert event.event_id, 'Missing event.event_id: UUIDStr = uuid7str()' + assert event.event_created_at, 'Missing event.event_created_at: str = monotonic_datetime()' + assert event.event_type and event.event_type.isidentifier(), 'Missing event.event_type: str' + + # Automatically set event_parent_id from context when emitting a NEW child event. + # If we are forwarding the same event object from inside its own handler, keep the + # existing parent linkage untouched to avoid self-parent cycles. + if event.event_parent_id is None: + current_event: BaseEvent[Any] | None = EventBus.current_event_context.get() + if current_event is not None and event.event_id != current_event.event_id: + event.event_parent_id = current_event.event_id + + # Capture emit-time context for propagation to handlers (GitHub issue #20) + # This ensures ContextVars set before emit() are accessible in handlers + if event._get_dispatch_context() is None: # pyright: ignore[reportPrivateUsage] + event._set_dispatch_context(contextvars.copy_context()) # pyright: ignore[reportPrivateUsage] + + # Track child events - if we're inside a handler, add this event to the handler's event_children list + # Only track if this is a NEW event (not forwarding an existing event) + current_handler_id = EventBus.current_handler_id_context.get() + if current_handler_id is not None: + current_event = EventBus.current_event_context.get() + if current_event is not None and current_handler_id in current_event.event_results: + # Only add as child if it's a different event (not forwarding the same event) + if event.event_id != current_event.event_id: + if event.event_emitted_by_handler_id is None: + event.event_emitted_by_handler_id = current_handler_id + current_event.event_results[current_handler_id].event_children.append(event) + + # Add this EventBus label to the event_path if not already there + if self.label not in event.event_path: + # preserve identity of the original object instead of creating a new one, so that the original object remains awaitable to get the result + # NOT: event = event.model_copy(update={'event_path': event.event_path + [self.name]}) + event.event_path.append(self.label) + else: + if logger.isEnabledFor(logging.DEBUG): + logger.debug( + '⚠️ %s.emit(%s) - Bus already in path, not adding again. Path: %s', + self, + event.event_type, + event.event_path, + ) + + assert event.event_path, 'Missing event.event_path: list[str] (with at least one bus label recorded in it)' + assert all( + '#' in entry + and entry.rsplit('#', 1)[0].isidentifier() + and entry.rsplit('#', 1)[1].isalnum() + and len(entry.rsplit('#', 1)[1]) == 4 + for entry in event.event_path + ), f'Event.event_path must be a list of EventBus labels BusName#abcd, got: {event.event_path}' + + # NOTE: + # emit() is intentionally synchronous and runs on the same event-loop + # thread as the runloop task. Blocking here for "pressure" would deadlock + # naive flood loops because the runloop cannot progress until emit() returns. + # So pressure is handled by policy: + # - max_history_drop=True -> absorb and trim oldest history entries + # - max_history_drop=False -> reject new emits at max_history_size + if ( + self.event_history.max_history_size is not None + and self.event_history.max_history_size > 0 + and not self.event_history.max_history_drop + ): + if len(self.event_history) >= self.event_history.max_history_size: + # Before rejecting, opportunistically evict already-completed history entries. + # This preserves max_history_drop=False semantics (never dropping in-flight events) + # while avoiding needless backpressure when only completed entries are occupying the cap. + self.event_history.trim_event_history(owner_label=str(self)) + if len(self.event_history) >= self.event_history.max_history_size: + raise RuntimeError( + f'{self} history limit reached ({len(self.event_history)}/{self.event_history.max_history_size}); ' + 'set event_history.max_history_drop=True to drop old history instead of rejecting new events' + ) + + # Auto-start if needed + self._flush_pending_handler_changes() + self._start() + # Ensure every emitted event has a completion signal tied to this loop. + # Completion logic always sets this signal; consumers like event_results_* await it. + _ = event.event_completed_signal + + # Put event in queue synchronously using put_nowait + if self.pending_event_queue: + try: + self.pending_event_queue.put_nowait(event) + # Only add to history after successfully queuing + self.event_history[event.event_id] = event + self.in_flight_event_ids.add(event.event_id) + # Resolve future find waiters immediately on emit so callers + # don't wait for queue position or handler execution. + self._resolve_find_waiters(event) + if logger.isEnabledFor(logging.INFO): + logger.info( + 'πŸ—£οΈ %s.emit(%s) ➑️ %s#%s (#%d %s)', + self, + event.event_type, + event.event_type, + event.event_id[-4:], + self.pending_event_queue.qsize(), + event.event_status, + ) + except asyncio.QueueFull: + # Don't add to history if we can't queue it + logger.error( + f'⚠️ {self} Event queue is full! Dropping event and aborting {event.event_type}:\n{event.model_dump_json()}' + ) + raise # could also block indefinitely until queue has space, but dont drop silently or delete events + else: + logger.warning('⚠️ %s.emit() called but event_queue is None! Event not queued: %s', self, event.event_type) + + # Note: We do NOT pre-create EventResults here anymore. + # EventResults are created only when handlers actually start executing. + # This avoids "orphaned" pending results for handlers that get filtered out later. + + # Amortize cleanup work by trimming only after a soft overage; this keeps + # hot emit fast under large naive floods while still bounding memory. + if ( + self.event_history.max_history_size is not None + and self.event_history.max_history_size > 0 + and self.event_history.max_history_drop + ): + soft_limit = max(self.event_history.max_history_size, int(self.event_history.max_history_size * 1.2)) + if len(self.event_history) > soft_limit: + self.event_history.trim_event_history(owner_label=str(self)) + + return event + + def dispatch(self, event: T_ExpectedEvent) -> T_ExpectedEvent: + """Convenience synonym for :meth:`emit`.""" + return self.emit(event) + + def _remove_indexed_handler(self, event_pattern: str, handler_id: PythonIdStr) -> None: + ids = self.handlers_by_key.get(event_pattern) + if not ids: + return + if handler_id in ids: + ids.remove(handler_id) + if not ids: + self.handlers_by_key.pop(event_pattern, None) + + @overload + async def find( + self, + event_type: type[T_ExpectedEvent], + where: None = None, + child_of: BaseEvent[Any] | None = None, + past: bool | float | timedelta | None = None, + future: bool | float | None = None, + **event_fields: Any, + ) -> T_ExpectedEvent | None: ... + + @overload + async def find( + self, + event_type: type[T_ExpectedEvent], + where: Callable[[T_ExpectedEvent], bool], + child_of: BaseEvent[Any] | None = None, + past: bool | float | timedelta | None = None, + future: bool | float | None = None, + **event_fields: Any, + ) -> T_ExpectedEvent | None: ... + + @overload + async def find( + self, + event_type: PythonIdentifierStr | Literal['*'], + where: Callable[[BaseEvent[Any]], bool] | None = None, + child_of: BaseEvent[Any] | None = None, + past: bool | float | timedelta | None = None, + future: bool | float | None = None, + **event_fields: Any, + ) -> BaseEvent[Any] | None: ... + + async def find( + self, + event_type: EventPatternType, + where: Callable[[Any], bool] | None = None, + child_of: BaseEvent[Any] | None = None, + past: bool | float | timedelta | None = None, + future: bool | float | None = None, + **event_fields: Any, + ) -> BaseEvent[Any] | None: + """ + Find an event matching criteria in history and/or future. + + Mirrors TS `EventBus.find` behavior: + - Default behavior with no options: `past=True`, `future=False` + - Search history and return the most recent match + - Optionally wait for future emits + - Supports exact-match equality filters via keyword args for any event field + + Args: + event_type: The event type string or model class to find + where: Predicate function for filtering (default: lambda _: True) + child_of: Only match events that are descendants of this parent event + past: Controls history search behavior: + - True: search all history + - False: skip history search + - float: search events from last N seconds only + - timedelta: search events from last N seconds + future: Controls future wait behavior: + - True: wait forever for matching event + - False: don't wait for future events + - float: wait up to N seconds for matching event + **event_fields: Optional exact-match filters for any event field + (for example `event_status='completed'`, `user_id='u-1'`) + + Returns: + Matching event or None if not found/timeout + """ + return await self.event_history.find( + event_type, + where=where, + child_of=child_of, + past=past, + future=future, + event_is_child_of=self.event_is_child_of, + wait_for_future_match=self._wait_for_future_match, + **event_fields, + ) + + def event_is_child_of(self, event: BaseEvent[Any], ancestor: BaseEvent[Any]) -> bool: + """ + Check if event is a descendant of ancestor (child, grandchild, etc.). + + Walks up the parent chain from event looking for ancestor. + Returns True if ancestor is found in the chain, False otherwise. + + Args: + event: The potential descendant event + ancestor: The potential ancestor event + + Returns: + True if event is a descendant of ancestor, False otherwise + """ + current_id = event.event_parent_id + visited: set[str] = set() + + while current_id and current_id not in visited: + if current_id == ancestor.event_id: + return True + visited.add(current_id) + + # Find parent event in any bus's history + parent = self.event_history.get(current_id) + if parent is None: + # Check other buses + for bus in list(type(self).all_instances): + if bus is not self and current_id in bus.event_history: + parent = bus.event_history[current_id] + break + if parent is None: + break + current_id = parent.event_parent_id + + return False + + def event_is_parent_of(self, event: BaseEvent[Any], descendant: BaseEvent[Any]) -> bool: + return self.event_is_child_of(descendant, event) + + def _start(self) -> None: + """Start the event bus if not already running""" + if not self._is_running: + try: + loop = asyncio.get_running_loop() + + # Hook into the event loop's close method to cleanup before it closes + # this is necessary to silence "RuntimeError: no running event loop" and "event loop is closed" errors on shutdown + eventbus_class = type(self) + registered_eventbuses: weakref.WeakSet[EventBus] | None = eventbus_class._loop_eventbus_instances.get(loop) + if registered_eventbuses is None: + registered_eventbuses = weakref.WeakSet() + eventbus_class._loop_eventbus_instances[loop] = registered_eventbuses + + if loop not in eventbus_class._loop_original_close: + original_close = loop.close + eventbus_class._loop_original_close[loop] = original_close + + def close_with_cleanup() -> None: + empty_eventbuses: weakref.WeakSet[EventBus] = weakref.WeakSet() + # Clean up all registered EventBuses before closing the loop + for eventbus in list(eventbus_class._loop_eventbus_instances.get(loop, empty_eventbuses)): + try: + # Stop the eventbus while loop is still running + if eventbus._is_running: + eventbus._is_running = False + + # Shutdown the queue properly - our custom queue will handle cleanup + if eventbus.pending_event_queue: + eventbus.pending_event_queue.shutdown(immediate=True) + + if eventbus._runloop_task and not eventbus._runloop_task.done(): + # Suppress warning before cancelling + setattr(eventbus._runloop_task, '_log_destroy_pending', False) + eventbus._runloop_task.cancel() + except Exception: + pass + + # Now close the loop + original_close() + + setattr(loop, 'close', close_with_cleanup) + + # Register this EventBus instance in the per-loop WeakSet. + registered_eventbuses.add(self) + + # Create async objects if needed + if self.pending_event_queue is None: + # Keep queue unbounded so naive emit floods can enqueue without + # artificial queue caps; queue stores event object references. + self.pending_event_queue = CleanShutdownQueue[BaseEvent[Any]](maxsize=0) + self._on_idle = asyncio.Event() + if not self._has_inflight_events_fast() and self.pending_event_queue.qsize() == 0: + self._on_idle.set() + else: + self._on_idle.clear() + + # Create and start the run loop task. + # Use a weakref-based runner so an unreferenced EventBus can be GC'd + # without requiring explicit stop(clear=True) by callers. + # Run loops must start with a clean context. If emit() is called + # from inside a handler, lock-depth ContextVars would otherwise leak + # into the new task and bypass event lock acquisition. + self._runloop_task = loop.create_task( + EventBus._run_loop_weak(weakref.ref(self)), + name=f'{self}._run_loop', + context=contextvars.Context(), + ) + self._is_running = True + except RuntimeError: + # No event loop - will start when one becomes available + pass + + async def stop(self, timeout: float | None = None, clear: bool = False) -> None: + """Stop the event bus, optionally waiting for events to complete + + Args: + timeout: Maximum time to wait for pending events to complete + clear: If True, clear event history and remove from global tracking to free memory + """ + if not self._is_running and not self._parallel_event_tasks: + for waiter in tuple(self.find_waiters): + self.find_waiters.discard(waiter) + if waiter.timeout_handle is not None: + waiter.timeout_handle.cancel() + if not waiter.future.done(): + waiter.future.set_result(None) + return + + # Wait for completion if timeout specified and > 0 + # timeout=0 means "don't wait", so skip the wait entirely + if timeout is not None and timeout > 0: + try: + await self.wait_until_idle(timeout=timeout) + except TimeoutError: + pass + + queue_size = self.pending_event_queue.qsize() if self.pending_event_queue else 0 + has_inflight = self._has_inflight_events_fast() + if queue_size or has_inflight: + logger.debug( + '⚠️ %s stopping with pending events: queue=%d inflight=%s history=%d', + self, + queue_size, + has_inflight, + len(self.event_history), + ) + + # Signal shutdown + self._is_running = False + + # Shutdown the queue to unblock any pending get() operations + if self.pending_event_queue: + self.pending_event_queue.shutdown() + + # print('STOPPING', self.event_history) + + # Wait for the run loop task to finish / force-cancel it if it's hanging + if self._runloop_task and not self._runloop_task.done(): + await asyncio.wait({self._runloop_task}, timeout=0.1) + try: + self._runloop_task.cancel() + except Exception: + pass + + if self._parallel_event_tasks: + for task in list(self._parallel_event_tasks): + if not task.done(): + task.cancel() + await asyncio.gather(*list(self._parallel_event_tasks), return_exceptions=True) + self._parallel_event_tasks.clear() + + # Clear references + self._runloop_task = None + self.in_flight_event_ids.clear() + self.processing_event_ids.clear() + for waiter in tuple(self.find_waiters): + self.find_waiters.discard(waiter) + if waiter.timeout_handle is not None: + waiter.timeout_handle.cancel() + if not waiter.future.done(): + waiter.future.set_result(None) + if self._on_idle: + self._on_idle.set() + + # Rename the bus to release the name. This ensures stopped buses don't + # cause name conflicts with new buses using the same name. This makes + # name conflict detection deterministic (not dependent on GC timing). + self.name = f'_stopped_{self.id[-8:]}' + + # Clear event history and handlers if requested (for memory cleanup) + if clear: + self.event_history.clear() + self.handlers.clear() + self.handlers_by_key.clear() + self.in_flight_event_ids.clear() + + # Remove from global instance tracking + if self in type(self).all_instances: + type(self).all_instances.discard(self) + + # Remove from event loop's tracking if present + try: + loop = asyncio.get_running_loop() + registered_eventbuses = type(self)._loop_eventbus_instances.get(loop) + if registered_eventbuses is not None: + registered_eventbuses.discard(self) + except RuntimeError: + # No running loop, that's fine + pass + + logger.debug('🧹 %s cleared event history and removed from global tracking', self) + + logger.debug('πŸ›‘ %s shut down %s', self, 'gracefully' if timeout is not None else 'immediately') + + # Check total memory usage across all instances + try: + self._check_total_memory_usage() + except Exception: + # Don't let memory check errors prevent shutdown + pass + + async def wait_until_idle(self, timeout: float | None = None) -> None: + """Wait until the event bus is idle (no events being processed and all handlers completed)""" + + self._start() + assert self._on_idle and self.pending_event_queue, 'EventBus._start() must be called before wait_until_idle() is reached' + + start_time = asyncio.get_event_loop().time() + remaining_timeout = timeout + + try: + # Wait until both queue and inflight execution are empty. + # Avoid relying on queue.join() because unfinished-task counters can + # drift under queue-jump paths while observable runtime state is idle. + while True: + queue_empty = self.pending_event_queue.qsize() == 0 + has_inflight = self._has_inflight_events_fast() + if queue_empty and not has_inflight: + self._on_idle.set() + break + + if timeout is not None: + elapsed = asyncio.get_event_loop().time() - start_time + remaining_timeout = max(0, timeout - elapsed) + if remaining_timeout <= 0: + raise TimeoutError() + + # Wait again for an idle transition. + idle_task = asyncio.create_task(self._on_idle.wait()) + await asyncio.wait_for(idle_task, timeout=remaining_timeout) + await asyncio.sleep(0) # Yield again + + except TimeoutError: + logger.warning( + 'βŒ›οΈ %s Timeout waiting for event bus to be idle after %ss (history=%d)', + self, + timeout, + len(self.event_history), + ) + + @staticmethod + async def _run_loop_weak(bus_ref: 'weakref.ReferenceType[EventBus]') -> None: + """ + Weakref-based run loop. + + This runner avoids holding a strong EventBus reference while idle, + allowing unreferenced buses to be garbage-collected naturally without + an explicit stop(). + """ + try: + while True: + bus = bus_ref() + if bus is None or not bus._is_running: + break + + queue = bus.pending_event_queue + on_idle = bus._on_idle + del bus + + if queue is None or on_idle is None: + await asyncio.sleep(0.01) + continue + + event: BaseEvent[Any] | None = None + try: + event = await queue.get() + except QueueShutDown: + break + except asyncio.CancelledError: + break + except RuntimeError as e: + if 'Event loop is closed' in str(e) or 'no running event loop' in str(e): + break + logger.exception(f'❌ Weak run loop runtime error: {type(e).__name__} {e}', exc_info=True) + continue + except Exception as e: + logger.exception(f'❌ Weak run loop error: {type(e).__name__} {e}', exc_info=True) + continue + + bus = bus_ref() + if bus is None: + try: + queue.task_done() + except Exception: + pass + break + + try: + if bus._on_idle: + bus._on_idle.clear() + + bus.processing_event_ids.add(event.event_id) + + event_lock = bus.locks.get_lock_for_event(bus, event) + if event_lock is None: + + async def process_parallel_event( + bus: 'EventBus' = bus, + event: BaseEvent[Any] = event, + queue: CleanShutdownQueue[BaseEvent[Any]] = queue, + ) -> None: + try: + await bus.step(event=event) + finally: + try: + queue.task_done() + except ValueError: + pass + + task = asyncio.create_task( + process_parallel_event(), + name=f'{bus}._process_event({event.event_id[-4:]})', + ) + bus._parallel_event_tasks.add(task) + + def _on_done( + done_task: asyncio.Task[None], *, bus_ref: 'weakref.ReferenceType[EventBus]' = weakref.ref(bus) + ): + live_bus = bus_ref() + if live_bus is not None: + live_bus._parallel_event_tasks.discard(done_task) + if ( + live_bus._on_idle + and live_bus.pending_event_queue + and not live_bus._has_inflight_events_fast() + and live_bus.pending_event_queue.qsize() == 0 + ): + live_bus._on_idle.set() + if done_task.cancelled(): + return + try: + exc = done_task.exception() + except asyncio.CancelledError: + return + if exc is not None: + logger.exception('❌ Weak run loop parallel event task error: %s %s', type(exc).__name__, exc) + + task.add_done_callback(_on_done) + else: + try: + await bus.step(event=event) + finally: + try: + queue.task_done() + except ValueError: + pass + if bus._on_idle and bus.pending_event_queue: + if not bus._has_inflight_events_fast() and bus.pending_event_queue.qsize() == 0: + bus._on_idle.set() + except QueueShutDown: + break + except asyncio.CancelledError: + break + except RuntimeError as e: + if 'Event loop is closed' in str(e) or 'no running event loop' in str(e): + break + logger.exception(f'❌ Weak run loop runtime error: {type(e).__name__} {e}', exc_info=True) + except Exception as e: + logger.exception(f'❌ Weak run loop error: {type(e).__name__} {e}', exc_info=True) + finally: + del bus + finally: + bus = bus_ref() + if bus is not None: + bus._is_running = False + + async def _get_next_event(self, wait_for_timeout: float = 0.1) -> 'BaseEvent[Any] | None': + """Get the next event from the queue""" + + assert self._on_idle and self.pending_event_queue, 'EventBus._start() must be called before _get_next_event()' + if not self._is_running: + return None + + try: + # Create a task for queue.get() so we can cancel it cleanly + get_next_queued_event = asyncio.create_task(self.pending_event_queue.get()) + setattr(get_next_queued_event, '_log_destroy_pending', False) # Suppress warnings on cleanup + + # Wait for next event with timeout + has_next_event, _pending = await asyncio.wait({get_next_queued_event}, timeout=wait_for_timeout) + if has_next_event: + # Check if we're still running before returning the event + if not self._is_running: + get_next_queued_event.cancel() + return None + return await get_next_queued_event # await to actually resolve it to the next event + else: + # Get task timed out, cancel it cleanly to suppress warnings + get_next_queued_event.cancel() + + # Check if we're idle, if so, set the idle flag + if not self._has_inflight_events_fast() and self.pending_event_queue.qsize() == 0: + self._on_idle.set() + return None + + except (asyncio.CancelledError, RuntimeError, QueueShutDown): + # Clean cancellation during shutdown or queue was shut down + return None + + async def _finalize_local_event_processing(self, event: BaseEvent[Any]) -> None: + """ + Clear local in-flight markers and run completion propagation exactly once. + + This is shared by both `step()` and the weak runloop path so completion + semantics stay identical regardless of which runner consumed the event. + """ + self.processing_event_ids.discard(event.event_id) + # Local bus consumed this event instance (or observed completion), so it + # should not remain in this bus's active set. + self.in_flight_event_ids.discard(event.event_id) + + newly_completed_events = self._mark_event_tree_complete_if_ready(event) + for completed_event in newly_completed_events: + await self.on_event_change(completed_event, EventStatus.COMPLETED) + + def _mark_event_tree_complete_if_ready(self, root_event: BaseEvent[Any]) -> list[BaseEvent[Any]]: + """ + Re-check completion for `root_event` and descendants in post-order. + + Timeout/cancellation paths can update child result statuses after an + earlier completion check. Running this post-order pass ensures children + are marked complete before their parents are re-evaluated. + """ + newly_completed: list[BaseEvent[Any]] = [] + visited_event_ids: set[str] = set() + + def visit(event: BaseEvent[Any]) -> None: + if event.event_id in visited_event_ids: + return + visited_event_ids.add(event.event_id) + + for child_event in event.event_children: + visit(child_event) + + was_complete = event.event_status == EventStatus.COMPLETED + # Only the root event may still appear "in-flight" on this bus during finalization. + # Descendants are not currently being processed in this frame, so they must consider + # queues on this bus too (otherwise queued children can be marked complete too early). + current_bus = self if event.event_id == root_event.event_id else None + event._mark_completed(current_bus=current_bus) # pyright: ignore[reportPrivateUsage] + just_completed = (not was_complete) and event.event_status == EventStatus.COMPLETED + if just_completed: + self._mark_event_complete_on_all_buses(event) + newly_completed.append(event) + + visit(root_event) + return newly_completed + + async def step( + self, event: 'BaseEvent[Any] | None' = None, timeout: float | None = None, wait_for_timeout: float = 0.1 + ) -> 'BaseEvent[Any] | None': + """ + Consume and process a single event from the queue (one iteration of the run loop). + + This is the high-level "consumer" method that: + 1. Dequeues the next event (or uses one passed in) + 2. Acquires the event lock selected by concurrency mode + 3. Calls _process_event() to execute handlers + 4. Marks the queue task as done (only if event came from queue) + 5. Manages idle state signaling + + Use this method when manually driving the event loop (e.g., in tests). + For automatic processing, use emit() which queues events for the run loop. + + Args: + event: Optional event to process directly (bypasses queue if provided) + timeout: Handler execution timeout in seconds + wait_for_timeout: How long to wait for next event from queue (default: 0.1s) + + Returns: + The processed event, or None if queue was empty/shutdown + + Warning: + Passing an event directly (bypassing the queue) is for advanced use only, be aware if: + + - **Event not in queue**: Works fine, handlers execute normally. + - **Event already completed**: Handlers will run AGAIN, overwriting previous + results. No guard against double-processing. + - **Event in queue but not next**: Event processes immediately, but STAYS + in queue. The run loop will process it again later (double-processing). + + See Also: + emit: Queues an event for normal async processing by the bus's existing run loop (recommended) + _process_event: Lower-level method that executes handlers (called by step) + """ + assert self._on_idle and self.pending_event_queue, 'EventBus._start() must be called before step()' + + # Track if we got the event from the queue + from_queue = False + + # Wait for next event with timeout to periodically check idle state + if event is None: + event = await self._get_next_event(wait_for_timeout=wait_for_timeout) + from_queue = True + if event is None: + return None + + if logger.isEnabledFor(logging.DEBUG): + logger.debug('πŸƒ %s.step(%s) STARTING', self, event) + + # Lifecycle note: + # - Every `step()` transition starts in "not idle" state. + # - The caller may be the runloop (`from_queue=True`) or a direct queue-jump + # call (`from_queue=False`) from `BaseEvent._process_self_on_all_buses()`. + # - Idle can be restored only after queue bookkeeping and in-flight markers + # are both fully reconciled in `finally`. + self._on_idle.clear() + + # Acquire the event lock selected by event/bus concurrency policy. + self.processing_event_ids.add(event.event_id) + try: + async with self.locks._run_with_event_lock(self, event): # pyright: ignore[reportPrivateUsage] + # Process the event + if event.event_status != EventStatus.COMPLETED: + await self._process_event(event, timeout=timeout) + + # Queue lifecycle: + # - `queue.get()` increments `_unfinished_tasks`. + # - We must call `task_done()` exactly once for that consume path. + # - Direct `step(event=...)` calls bypass `queue.get()` and therefore + # must not call `task_done()`. + if from_queue: + self.pending_event_queue.task_done() + finally: + await self._finalize_local_event_processing(event) + # Idle lifecycle reconciliation: + # - The runloop normally restores `_on_idle` after each queue turn. + # - Direct `step(event=...)` calls have no subsequent runloop turn, so if + # this step drained the last in-flight work, `_on_idle` could remain + # permanently cleared and `wait_until_idle()` would block forever. + # - Setting `_on_idle` here when queue+inflight are both empty keeps idle + # semantics identical for runloop and direct-step execution paths. + if self._on_idle and self.pending_event_queue: + if not self._has_inflight_events_fast() and self.pending_event_queue.qsize() == 0: + self._on_idle.set() + + if logger.isEnabledFor(logging.DEBUG): + logger.debug('βœ… %s.step(%s) COMPLETE', self, event) + return event + + def _create_slow_event_warning_timer( + self, + event: BaseEvent[Any], + ) -> Callable[[], Coroutine[Any, Any, None]] | None: + event_slow_timeout = self._resolve_event_slow_timeout(event, self) + if event_slow_timeout is None: + return None + return partial(self._slow_event_warning_monitor, event, event_slow_timeout) + + async def _mark_event_complete_if_ready(self, event: BaseEvent[Any]) -> None: + was_complete = event.event_status == EventStatus.COMPLETED + event._mark_completed(current_bus=self) # pyright: ignore[reportPrivateUsage] + just_completed = (not was_complete) and event.event_status == EventStatus.COMPLETED + if just_completed: + self._mark_event_complete_on_all_buses(event) + await self.on_event_change(event, EventStatus.COMPLETED) + + async def _propagate_parent_completion(self, event: BaseEvent[Any]) -> None: + current = event + checked_ids: set[str] = set() + + while current.event_parent_id and current.event_parent_id not in checked_ids: + checked_ids.add(current.event_parent_id) + + parent_event = None + parent_bus: EventBus | None = None + for bus in list(type(self).all_instances): + if bus and current.event_parent_id in bus.event_history: + parent_event = bus.event_history[current.event_parent_id] + parent_bus = bus + break + + if not parent_event: + break + + was_complete = parent_event.event_status == EventStatus.COMPLETED + if not was_complete: + parent_event._mark_completed(current_bus=parent_bus) # pyright: ignore[reportPrivateUsage] + just_completed = (not was_complete) and parent_event.event_status == EventStatus.COMPLETED + if parent_bus and just_completed: + self._mark_event_complete_on_all_buses(parent_event) + await parent_bus.on_event_change(parent_event, EventStatus.COMPLETED) + + current = parent_event + + def _trim_event_history_if_needed(self) -> None: + if ( + self.event_history.max_history_size is not None + and self.event_history.max_history_size > 0 + and self.event_history.max_history_drop + and len(self.event_history) > self.event_history.max_history_size + ): + self.event_history.trim_event_history(owner_label=str(self)) + + async def _process_event(self, event: BaseEvent[Any], timeout: float | None = None) -> None: + """ + Execute all applicable handlers for an event (low-level, assumes lock is held). + + This is the core event handling method that: + 1. Finds all applicable handlers (type-specific + wildcard) + 2. Creates pending EventResult placeholders + 3. Executes handlers (serially or in parallel based on bus config) + 4. Marks the event as complete when all handlers finish + 5. Propagates completion status up the parent event chain + 6. Cleans up event history if over size limit + + IMPORTANT: This method assumes the caller already applied `locks._run_with_event_lock(...)` + for the event execution. + For safe external use, call step() instead which handles locking. + + Args: + event: The event to handle + timeout: Handler execution timeout in seconds (defaults to event.event_timeout) + + Warning: + This is a low-level method with no safety guards. Behavior in edge cases: + + - **Event not in queue**: Works fine, handlers execute normally. This method + does not interact with the queue at all. + - **Event already completed**: Handlers run AGAIN, ``_create_pending_handler_results()`` + overwrites previous results. No guard against double-processing. + - **Event in queue but not next**: Works fine for this call, but event stays + in queue and will be processed again later by the run loop. + - **Another event being processed (lock held elsewhere)**: If called without + holding the lock, concurrent handler execution may cause race conditions. + If called from within a handler (lock is re-entrant), causes nested processing. + - **This exact event already being processed**: Recursive/re-entrant processing. + Handlers run again while already running, results overwritten mid-execution. + Likely to cause undefined behavior. + + See Also: + step: High-level method that acquires lock and calls _process_event + emit: Queues an event for async processing (recommended) + """ + # Get applicable handlers + applicable_handlers = self._get_handlers_for_event(event) + slow_event_monitor_factory = self._create_slow_event_warning_timer(event) + resolved_event_timeout = ( + timeout if timeout is not None else (event.event_timeout if event.event_timeout is not None else self.event_timeout) + ) + + await self.on_event_change(event, EventStatus.PENDING) + + # Execute handlers + timeout_scope = asyncio.timeout(resolved_event_timeout) + try: + async with timeout_scope: + async with _run_with_slow_monitor( + slow_event_monitor_factory, + task_name=f'{self}.slow_event_monitor({event})', + ): + await event._run_handlers( # pyright: ignore[reportPrivateUsage] + eventbus=self, + handlers=applicable_handlers, + timeout=resolved_event_timeout, + ) + except TimeoutError: + if not timeout_scope.expired(): + raise + assert resolved_event_timeout is not None + await self._finalize_event_timeout(event, resolved_event_timeout) + + await self._mark_event_complete_if_ready(event) + await self._propagate_parent_completion(event) + self._trim_event_history_if_needed() + + def _get_handlers_for_event(self, event: BaseEvent[Any]) -> dict[PythonIdStr, EventHandler]: + """Get all handlers that should process the given event, filtering out those that would create loops""" + applicable_handlers: list[EventHandler] = [] + + for key in (event.event_type, '*'): + indexed_ids = self.handlers_by_key.get(key, []) + if not indexed_ids: + continue + for handler_id in indexed_ids: + handler_entry = self.handlers.get(handler_id) + if handler_entry: + applicable_handlers.append(handler_entry) + + # Filter out handlers that would create loops and build id->handler mapping + # Use handler id as key to preserve all handlers even with duplicate names + filtered_handlers: dict[PythonIdStr, EventHandler] = {} + for handler_entry in applicable_handlers: + if self._would_create_loop(event, handler_entry): + continue + else: + assert handler_entry.id is not None + filtered_handlers[handler_entry.id] = handler_entry + # logger.debug(f' Found handler {handler_entry.handler_name}#{handler_entry.id[-4:]}()') + + return filtered_handlers + + @contextmanager + def _run_with_handler_dispatch_context(self, event: BaseEvent[T_EventResultType], handler_id: str): + """Scope ContextVar state for one handler execution. + + This is the single handler execution context manager used by + ``EventResult.run_handler(...)`` for both sync and async handlers. It sets + current event/handler/bus ContextVars and mirrors event-lock ownership + into copied dispatch contexts via ``locks._run_with_handler_dispatch_context(...)``. + """ + event_token = EventBus.current_event_context.set(event) + current_handler_token = EventBus.current_handler_id_context.set(handler_id) + current_eventbus_token = EventBus.current_eventbus_context.set(self) + try: + with self.locks._run_with_handler_dispatch_context(self, event): # pyright: ignore[reportPrivateUsage] + yield + finally: + EventBus.current_event_context.reset(event_token) + EventBus.current_handler_id_context.reset(current_handler_token) + EventBus.current_eventbus_context.reset(current_eventbus_token) + + async def _finalize_event_timeout(self, event: BaseEvent[Any], timeout_seconds: float) -> None: + """Finalize event-level hard timeout across pending/started handler results. + + - pending results become ``EventHandlerCancelledError`` + - started results become ``EventHandlerAbortedError`` + - child event processing is cancelled through event-level propagation + """ + timeout_error = TimeoutError( + f'Event {self.label}.on({event.event_type}#{event.event_id[-4:]}) timed out after {timeout_seconds}s' + ) + event._cancel_pending_child_processing(timeout_error) # pyright: ignore[reportPrivateUsage] + for event_result in event.event_results.values(): + if event_result.status == 'pending': + event_result.update( + error=EventHandlerCancelledError( + f'Cancelled: event {event.event_type}#{event.event_id[-4:]} timed out after {timeout_seconds}s' + ) + ) + await self.on_event_result_change(event, event_result, EventStatus.COMPLETED) + elif event_result.status == 'started': + event_result.update( + error=EventHandlerAbortedError( + f'Event handler {event_result.handler.label}({event}) was interrupted because the event timed out after {timeout_seconds}s' + ) + ) + await self.on_event_result_change(event, event_result, EventStatus.COMPLETED) + + async def _run_handler( + self, + event: 'BaseEvent[T_EventResultType]', + handler_entry: EventHandler, + timeout: float | None = None, + ) -> T_EventResultType | BaseEvent[Any] | None: + """Safely execute a single handler with middleware support and EventResult orchestration.""" + + handler_id = handler_entry.id + if logger.isEnabledFor(logging.DEBUG): + logger.debug( + ' ↳ %s._run_handler(%s, handler=%s#%s)', + self, + event, + handler_entry.handler_name, + handler_id[-4:], + ) + + resolved_timeout = self._resolve_handler_timeout(event, handler_entry, self, timeout_override=timeout) + resolved_slow_timeout = self._resolve_handler_slow_timeout(event, handler_entry, self) + + if handler_id not in event.event_results: + new_results = event._create_pending_handler_results( # pyright: ignore[reportPrivateUsage] + {handler_id: handler_entry}, eventbus=self, timeout=resolved_timeout + ) + for pending_result in new_results.values(): + await self.on_event_result_change(event, pending_result, EventStatus.PENDING) + + first_handler_id = next(iter(event.event_results), None) + event_result = event.event_results[handler_id] + + try: + result_value = await event_result.run_handler( + event, + eventbus=self, + timeout=resolved_timeout, + handler_slow_timeout=resolved_slow_timeout, + notify_event_started=(first_handler_id == handler_id), + format_exception_for_log=log_filtered_traceback, + ) + + result_type_name = type(result_value).__name__ if result_value is not None else 'None' + if logger.isEnabledFor(logging.DEBUG): + logger.debug( + ' ↳ Handler %s#%s returned: %s', + handler_entry.handler_name, + handler_id[-4:], + result_type_name, + ) + + await self.on_event_result_change(event, event_result, EventStatus.COMPLETED) + return result_value + + except asyncio.CancelledError: + await self.on_event_result_change(event, event_result, EventStatus.COMPLETED) + raise + except Exception: + await self.on_event_result_change(event, event_result, EventStatus.COMPLETED) + raise + + def _would_create_loop(self, event: BaseEvent[Any], handler_entry: EventHandler) -> bool: + """Check if calling this handler would create a loop""" + handler = handler_entry.handler + if handler is None: + return False + + # First check: If handler is another EventBus emit/dispatch method, check if we're forwarding to another bus that it's already been processed by + if inspect.ismethod(handler): + bound_self = handler.__self__ + bound_name = handler.__name__ + if isinstance(bound_self, EventBus) and bound_name in ('emit', 'dispatch'): + target_bus = bound_self + if target_bus.label in event.event_path: + logger.debug( + f'⚠️ {self} handler {handler_entry.label}({event}) skipped to prevent infinite forwarding loop with {target_bus.label}' + ) + return True + + # Second check: if this handler already has an in-flight/completed result for this + # event on this bus, avoid re-entrancy. Rehydrated events can legitimately contain + # pending placeholders from a previous process; those must remain runnable. + handler_id = handler_entry.id + if handler_id in event.event_results: + existing_result = event.event_results[handler_id] + if existing_result.status == 'started': + logger.debug( + f'⚠️ {self} handler {handler_entry.label}({event}) is already {existing_result.status} for event {event.event_id} (preventing recursive call)' + ) + return True + if existing_result.status == 'pending' and event.event_status == EventStatus.STARTED: + logger.debug( + f'⚠️ {self} handler {handler_entry.label}({event}) is already pending while event is started for event {event.event_id} (preventing recursive call)' + ) + return True + if existing_result.status in ('completed', 'error') or existing_result.completed_at is not None: + logger.debug( + f'⚠️ {self} handler {handler_entry.label}({event}) already completed @ {existing_result.completed_at} for event {event.event_id} (will not re-run)' + ) + return True + + # Third check: For non-forwarding handlers, check recursion depth + # Forwarding handlers (EventBus.emit / EventBus.dispatch) are allowed to forward at any depth + is_forwarding_handler = ( + inspect.ismethod(handler) and isinstance(handler.__self__, EventBus) and handler.__name__ in ('emit', 'dispatch') + ) + + if not is_forwarding_handler: + # Only check recursion for regular handlers, not forwarding + recursion_depth = self._handler_dispatched_ancestor(event, handler_id) + if recursion_depth > 2: + raise RuntimeError( + f'Infinite loop detected: Handler {handler_entry.label} ' + f'has recursively processed {recursion_depth} levels of events. ' + f'Current event: {event}, Handler: {handler_id}' + ) + elif recursion_depth == 2: + logger.warning( + f'⚠️ {self} handler {handler_entry.label} ' + f'at maximum recursion depth (2 levels) - next level will raise exception' + ) + + return False + + def _handler_dispatched_ancestor( + self, event: BaseEvent[Any], handler_id: str, visited: set[str] | None = None, depth: int = 0 + ) -> int: + """Check how many times this handler appears in the ancestry chain. Returns the depth count.""" + # Prevent infinite recursion in case of circular parent references + if visited is None: + visited = set() + if event.event_id in visited: + return depth + visited.add(event.event_id) + + # If this event has no parent, it's a root event - no ancestry to check + if not event.event_parent_id: + return depth + + # Find parent event in any bus's history + parent_event = None + # Create a list copy to avoid "Set changed size during iteration" error + for bus in list(type(self).all_instances): + if event.event_parent_id in bus.event_history: + parent_event = bus.event_history[event.event_parent_id] + break + + if not parent_event: + return depth + + # Check if this handler processed the parent event + if handler_id in parent_event.event_results: + result = parent_event.event_results[handler_id] + if result.status in ('pending', 'started', 'completed'): + # This handler processed the parent event, increment depth + depth += 1 + + # Recursively check the parent's ancestry + return self._handler_dispatched_ancestor(parent_event, handler_id, visited, depth) + + def cleanup_excess_events(self) -> int: + """ + Clean up excess events from event_history based on max_history_size. + + Returns: + Number of events removed from history + """ + return self.event_history.cleanup_excess_events() + + def log_tree(self) -> str: + """Print a nice pretty formatted tree view of all events in the history including their results and child events recursively""" + from bubus.logging import log_eventbus_tree + + return log_eventbus_tree(self) + + def _check_total_memory_usage(self) -> None: + """Check total memory usage across all EventBus instances and warn if >50MB""" + import sys + + total_bytes = 0 + bus_details: list[tuple[str, int, int, int]] = [] + + # Iterate through all EventBus instances + # Create a list copy to avoid "Set changed size during iteration" error + for bus in list(type(self).all_instances): + try: + bus_bytes = 0 + + # Count events in history + for event in bus.event_history.values(): + bus_bytes += sys.getsizeof(event) + # Also count the event's data + for attr_value in event.__dict__.values(): + if isinstance(attr_value, (str, bytes, list, dict)): + bus_bytes += sys.getsizeof(_as_any(attr_value)) + + # Count events in queue + if bus.pending_event_queue: + for event in bus.pending_event_queue.iter_items(): + bus_bytes += sys.getsizeof(event) + for attr_value in event.__dict__.values(): + if isinstance(attr_value, (str, bytes, list, dict)): + bus_bytes += sys.getsizeof(_as_any(attr_value)) + + total_bytes += bus_bytes + bus_details.append( + ( + bus.label, + bus_bytes, + len(bus.event_history), + bus.pending_event_queue.qsize() if bus.pending_event_queue else 0, + ) + ) + except Exception: + # Skip buses that can't be measured + continue + + total_mb = total_bytes / (1024 * 1024) + + if total_mb > 50: + # Build detailed breakdown + details: list[str] = [] + for name, bytes_used, history_size, queue_size in sorted(bus_details, key=lambda x: x[1], reverse=True): + mb = bytes_used / (1024 * 1024) + if mb > 0.1: # Only show buses using >0.1MB + details.append(f' - {name}: {mb:.1f}MB (history={history_size}, queue={queue_size})') + + warning_msg = ( + f'\n⚠️ WARNING: Total EventBus memory usage is {total_mb:.1f}MB (>50MB limit)\n' + f'Active EventBus instances: {len(type(self).all_instances)}\n' + ) + if details: + warning_msg += 'Memory breakdown:\n' + '\n'.join(details[:5]) # Show top 5 + if len(details) > 5: + warning_msg += f'\n ... and {len(details) - 5} more' + + warning_msg += '\nConsider:\n' + warning_msg += ' - Reducing max_history_size\n' + warning_msg += ' - Clearing completed EventBus instances with stop(clear=True)\n' + warning_msg += ' - Reducing event payload sizes\n' + + logger.warning(warning_msg) diff --git a/bubus/event_handler.py b/bubus/event_handler.py new file mode 100644 index 0000000..5681449 --- /dev/null +++ b/bubus/event_handler.py @@ -0,0 +1,422 @@ +import asyncio +import inspect +import os +from collections.abc import Awaitable, Callable, Coroutine +from datetime import UTC, datetime +from functools import lru_cache +from pathlib import Path +from typing import TYPE_CHECKING, Any, Protocol, TypeAlias, cast, overload, runtime_checkable +from uuid import NAMESPACE_DNS, UUID, uuid5 +from weakref import ref as weakref + +from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator +from typing_extensions import TypeVar + +from bubus.helpers import monotonic_datetime + +if TYPE_CHECKING: + from bubus.base_event import BaseEvent + + +# TypeVar for BaseEvent and its subclasses +# We use contravariant=True because if a handler accepts BaseEvent, +# it can also handle any subclass of BaseEvent +T_Event = TypeVar('T_Event', bound='BaseEvent[Any]', contravariant=True, default='BaseEvent[Any]') + +# For protocols with __func__ attributes, we need an invariant TypeVar +T_EventInvariant = TypeVar('T_EventInvariant', bound='BaseEvent[Any]', default='BaseEvent[Any]') +T_EventResult = TypeVar('T_EventResult', default=Any) +T_HandlerEvent = TypeVar('T_HandlerEvent', bound='BaseEvent[Any]', default='BaseEvent[Any]') +T_HandlerReturn = TypeVar('T_HandlerReturn', default=Any) + + +class EventHandlerCancelledError(asyncio.CancelledError): + """Handler was cancelled before starting or before producing a result.""" + + +class EventHandlerTimeoutError(TimeoutError): + """Handler exceeded its configured handler timeout.""" + + +class EventHandlerAbortedError(asyncio.CancelledError): + """Handler was interrupted while running (for example by event hard-timeout).""" + + +class EventHandlerResultSchemaError(ValueError): + """Handler returned a value incompatible with the event_result_type schema.""" + + +@runtime_checkable +class EventHandlerFunc(Protocol[T_Event]): + """Protocol for sync event handler functions.""" + + def __call__(self, event: T_Event, /) -> Any: ... + + +@runtime_checkable +class AsyncEventHandlerFunc(Protocol[T_Event]): + """Protocol for async event handler functions.""" + + async def __call__(self, event: T_Event, /) -> Any: ... + + +@runtime_checkable +class EventHandlerMethod(Protocol[T_Event]): + """Protocol for instance method event handlers.""" + + def __call__(self, self_: Any, event: T_Event, /) -> Any: ... + + __self__: Any + __name__: str + + +@runtime_checkable +class AsyncEventHandlerMethod(Protocol[T_Event]): + """Protocol for async instance method event handlers.""" + + async def __call__(self, self_: Any, event: T_Event, /) -> Any: ... + + __self__: Any + __name__: str + + +@runtime_checkable +class EventHandlerClassMethod(Protocol[T_EventInvariant]): + """Protocol for class method event handlers.""" + + def __call__(self, cls: type[Any], event: T_EventInvariant, /) -> Any: ... + + __self__: type[Any] + __name__: str + __func__: Callable[[type[Any], T_EventInvariant], Any] + + +@runtime_checkable +class AsyncEventHandlerClassMethod(Protocol[T_EventInvariant]): + """Protocol for async class method event handlers.""" + + async def __call__(self, cls: type[Any], event: T_EventInvariant, /) -> Any: ... + + __self__: type[Any] + __name__: str + __func__: Callable[[type[Any], T_EventInvariant], Awaitable[Any]] + + +# Event handlers are normalized to bound single-argument callables at registration time. +EventHandlerCallable: TypeAlias = EventHandlerFunc['BaseEvent[Any]'] | AsyncEventHandlerFunc['BaseEvent[Any]'] + +# Normalized async callable shape used at call sites that require a Coroutine. +NormalizedEventHandlerCallable: TypeAlias = Callable[ + ['BaseEvent[T_EventResult]'], + Coroutine[Any, Any, T_EventResult | 'BaseEvent[Any]' | None], +] + +# Internal normalized one-argument callable used for invocation at runtime. +_InvokableEventHandlerCallable: TypeAlias = Callable[['BaseEvent[Any]'], Any | Awaitable[Any]] + +# ContravariantEventHandlerCallable allows subtype-specific handlers. +ContravariantEventHandlerCallable: TypeAlias = EventHandlerFunc[T_Event] | AsyncEventHandlerFunc[T_Event] + +HANDLER_ID_NAMESPACE: UUID = uuid5(NAMESPACE_DNS, 'bubus-handler') + + +def _format_handler_source_path(path: str, line_no: int | None = None) -> str: + normalized = str(Path(path).expanduser().resolve()) + home = str(Path.home()) + if normalized == home: + display = '~' + elif normalized.startswith(home + os.sep): + display = f'~{normalized[len(home) :]}' + else: + display = normalized + return f'{display}:{line_no}' if line_no else display + + +class _HandlerCacheKey: + __slots__ = ('handler_ref', 'handler_id', '_hash') + + def __init__(self, handler: Callable[..., Any]) -> None: + # Some callables override __eq__ without __hash__ and become unhashable. + # Use identity-based hashing for a stable cache key without retaining handlers. + self.handler_ref = weakref(handler) + self.handler_id = id(handler) + self._hash = self.handler_id + + def __hash__(self) -> int: + return self._hash + + def __eq__(self, other: object) -> bool: + if not isinstance(other, _HandlerCacheKey): + return False + if self.handler_id != other.handler_id: + return False + return self.handler_ref() is other.handler_ref() + + +@lru_cache(maxsize=100) +def _get_callable_handler_file_path(handler_key: _HandlerCacheKey) -> str | None: + """Best-effort, low-overhead source location for a handler callable.""" + handler = handler_key.handler_ref() + if handler is None: + return None + target: Any = handler.__func__ if inspect.ismethod(handler) else handler + target = inspect.unwrap(target) + + code_obj = getattr(target, '__code__', None) + if code_obj is not None: + file_path = getattr(code_obj, 'co_filename', None) + line_no = getattr(code_obj, 'co_firstlineno', None) + if isinstance(file_path, str) and file_path.strip(): + return _format_handler_source_path(file_path, int(line_no) if isinstance(line_no, int) else None) + + try: + source_file = inspect.getsourcefile(target) or inspect.getfile(target) + except (OSError, TypeError): + source_file = None + + line_no: int | None = None + try: + _, line_no = inspect.getsourcelines(target) + except (OSError, TypeError): + line_no = None + + if isinstance(source_file, str) and source_file.strip(): + return _format_handler_source_path(source_file, line_no) + + module = inspect.getmodule(target) + module_file = getattr(module, '__file__', None) if module is not None else None + if isinstance(module_file, str) and module_file.strip(): + return _format_handler_source_path(module_file, line_no) + + return None + + +@overload +def _normalize_handler_callable( + handler: Callable[[T_HandlerEvent], Coroutine[Any, Any, T_HandlerReturn]], +) -> Callable[[T_HandlerEvent], Coroutine[Any, Any, T_HandlerReturn]]: ... + + +@overload +def _normalize_handler_callable( + handler: Callable[[T_HandlerEvent], Awaitable[T_HandlerReturn]], +) -> Callable[[T_HandlerEvent], Coroutine[Any, Any, T_HandlerReturn]]: ... + + +@overload +def _normalize_handler_callable( + handler: Callable[[T_HandlerEvent], T_HandlerReturn], +) -> Callable[[T_HandlerEvent], Coroutine[Any, Any, T_HandlerReturn]]: ... + + +def _normalize_handler_callable( + handler: Callable[[T_HandlerEvent], object], +) -> Callable[[T_HandlerEvent], Coroutine[Any, Any, T_HandlerReturn]]: + """Normalize one handler callable to a single async call signature.""" + if not callable(handler): + raise ValueError(f'Handler {handler!r} must be callable, got: {type(handler)}') + + if inspect.iscoroutinefunction(handler): + return cast(Callable[[T_HandlerEvent], Coroutine[Any, Any, T_HandlerReturn]], handler) + + async def normalized_handler(event: T_HandlerEvent) -> T_HandlerReturn: + handler_result = handler(event) + # BaseEvent implements __await__ for ergonomic `await event`, but handler + # return values of BaseEvent must be treated as plain results (forwarded + # child event refs), not awaited here. + if inspect.isawaitable(handler_result): + from bubus.base_event import BaseEvent + + if isinstance(handler_result, BaseEvent): + return cast(T_HandlerReturn, handler_result) + return cast(T_HandlerReturn, await handler_result) + return cast(T_HandlerReturn, handler_result) + + return normalized_handler + + +class EventHandler(BaseModel): + """Serializable metadata wrapper around a registered event handler callable.""" + + model_config = ConfigDict( + extra='forbid', + arbitrary_types_allowed=True, + validate_assignment=False, + validate_default=True, + revalidate_instances='always', + ) + + id: str = '' + handler: EventHandlerCallable | None = Field(default=None, exclude=True, repr=False) + handler_name: str = 'anonymous' + handler_file_path: str | None = None + handler_timeout: float | None = None + handler_slow_timeout: float | None = None + handler_registered_at: str = Field(default_factory=monotonic_datetime) + event_pattern: str = '*' + eventbus_name: str = 'EventBus' + eventbus_id: str = '00000000-0000-0000-0000-000000000000' + + @field_validator('handler_name', mode='before') + @classmethod + def _validate_handler_name_field(cls, value: Any) -> str: + if isinstance(value, str): + normalized = value.strip() + if normalized: + return normalized + return 'anonymous' + + @field_validator('eventbus_name') + @classmethod + def _validate_eventbus_name_field(cls, value: str) -> str: + normalized = str(value) + assert normalized.isidentifier() and not normalized.startswith('_'), f'Invalid event bus name: {value!r}' + return normalized + + @field_validator('handler_registered_at', mode='before') + @classmethod + def _normalize_handler_registered_at(cls, value: Any) -> str: + if isinstance(value, datetime): + normalized_value = value.replace(tzinfo=UTC) if value.tzinfo is None else value.astimezone(UTC) + normalized_input = normalized_value.isoformat().replace('+00:00', 'Z') + return monotonic_datetime(normalized_input) + if value is None: + return monotonic_datetime() + return monotonic_datetime(str(value)) + + @property + def eventbus_label(self) -> str: + return f'{self.eventbus_name}#{self.eventbus_id[-4:]}' + + @staticmethod + def get_callable_handler_name(handler: Callable[..., Any]) -> str: + assert hasattr(handler, '__name__'), f'Handler {handler} has no __name__ attribute!' + if inspect.ismethod(handler): + return f'{type(handler.__self__).__name__}.{handler.__name__}' + if callable(handler): + handler_module = getattr(handler, '__module__', '') + handler_name = getattr(handler, '__name__', type(handler).__name__) + return f'{handler_module}.{handler_name}' + raise ValueError(f'Invalid handler: {handler} {type(handler)}, expected a function, coroutine, or method') + + @model_validator(mode='before') + @classmethod + def _populate_handler_name(cls, data: Any) -> Any: + if not isinstance(data, dict): + return data + params = cast(dict[str, Any], data) + if params.get('id') is None: + params.pop('id', None) + handler = params.get('handler') + if handler is not None and not params.get('handler_name'): + try: + derived_name = cls.get_callable_handler_name(cast(EventHandlerCallable, handler)) + params['handler_name'] = derived_name.strip() or 'function' + except Exception: + params['handler_name'] = 'function' + return params + + def model_post_init(self, __context: Any) -> None: + if not self.id: + self.id = self.compute_handler_id() + + @property + def _handler_async(self) -> NormalizedEventHandlerCallable[Any] | None: + """Return the normalized async callable view of `handler`.""" + if self.handler is None: + return None + return cast( + NormalizedEventHandlerCallable[Any], + _normalize_handler_callable(cast(Callable[[Any], object], self.handler)), + ) + + def compute_handler_id(self) -> str: + """Match TS handler-id algorithm: uuidv5(seed, HANDLER_ID_NAMESPACE).""" + file_path = self.handler_file_path or 'unknown' + seed = f'{self.eventbus_id}|{self.handler_name}|{file_path}|{self.handler_registered_at}|{self.event_pattern}' + return str(uuid5(HANDLER_ID_NAMESPACE, seed)) + + @property + def label(self) -> str: + assert self.id, 'EventHandler.id must be set' + return f'{self.handler_name}#{self.id[-4:]}' + + def __str__(self) -> str: + has_name = self.handler_name and self.handler_name != 'anonymous' + assert self.id, 'EventHandler.id must be set' + display = f'{self.handler_name}()' if has_name else f'function#{self.id[-4:]}()' + return f'{display} @ {self.handler_file_path}' if self.handler_file_path else display + + def __call__(self, event: 'BaseEvent[Any]') -> Any: + if self.handler is None: + raise RuntimeError(f'EventHandler {self.id} has no callable attached') + handler_callable = cast(Callable[[Any], Any], self.handler) + return handler_callable(event) + + @classmethod + def from_callable( + cls, + *, + handler: ContravariantEventHandlerCallable[Any], + event_pattern: str, + eventbus_name: str, + eventbus_id: str, + detect_handler_file_path: bool = True, + id: str | None = None, + handler_file_path: str | None = None, + handler_timeout: float | None = None, + handler_slow_timeout: float | None = None, + handler_registered_at: str | datetime | None = None, + ) -> 'EventHandler': + resolved_file_path = handler_file_path + if resolved_file_path is None and detect_handler_file_path: + resolved_file_path = _get_callable_handler_file_path(_HandlerCacheKey(handler)) + + handler_params: dict[str, Any] = { + 'handler': handler, + 'handler_file_path': resolved_file_path, + 'handler_registered_at': monotonic_datetime( + ( + handler_registered_at.replace(tzinfo=UTC) + if handler_registered_at.tzinfo is None + else handler_registered_at.astimezone(UTC) + ) + .isoformat() + .replace('+00:00', 'Z') + if isinstance(handler_registered_at, datetime) + else handler_registered_at + ), + 'event_pattern': event_pattern, + 'eventbus_name': eventbus_name, + 'eventbus_id': eventbus_id, + } + try: + derived_name = cls.get_callable_handler_name(handler) + handler_params['handler_name'] = derived_name.strip() or 'function' + except Exception: + handler_params['handler_name'] = 'function' + if id is not None: + handler_params['id'] = id + if handler_timeout is not None: + handler_params['handler_timeout'] = handler_timeout + if handler_slow_timeout is not None: + handler_params['handler_slow_timeout'] = handler_slow_timeout + + entry = cls(**handler_params) + if not entry.id: + entry.id = entry.compute_handler_id() + return entry + + +__all__ = [ + 'AsyncEventHandlerClassMethod', + 'AsyncEventHandlerFunc', + 'AsyncEventHandlerMethod', + 'ContravariantEventHandlerCallable', + 'EventHandler', + 'EventHandlerCallable', + 'EventHandlerClassMethod', + 'EventHandlerFunc', + 'EventHandlerMethod', + 'NormalizedEventHandlerCallable', +] diff --git a/bubus/event_history.py b/bubus/event_history.py new file mode 100644 index 0000000..2a9e1e1 --- /dev/null +++ b/bubus/event_history.py @@ -0,0 +1,273 @@ +from __future__ import annotations + +import logging +from collections.abc import Awaitable, Callable +from datetime import UTC, datetime, timedelta +from typing import Any, Generic, Literal, TypeVar, overload + +from .base_event import BaseEvent, UUIDStr +from .helpers import monotonic_datetime + +BaseEventT = TypeVar('BaseEventT', bound=BaseEvent[Any]) +TExpectedEvent = TypeVar('TExpectedEvent', bound=BaseEvent[Any]) +EventPatternType = str | Literal['*'] | type[BaseEvent[Any]] + +logger = logging.getLogger('bubus') + + +class EventHistory(dict[UUIDStr, BaseEventT], Generic[BaseEventT]): + """Ordered event history map with query and trim helpers.""" + + __slots__ = ('max_history_size', 'max_history_drop', '_warned_about_dropping_uncompleted_events') + + def __init__(self, max_history_size: int | None = 100, max_history_drop: bool = False): + super().__init__() + self.max_history_size = max_history_size + self.max_history_drop = max_history_drop + self._warned_about_dropping_uncompleted_events = False + + def add_event(self, event: BaseEventT) -> None: + self[event.event_id] = event + + def get_event(self, event_id: str) -> BaseEventT | None: + return self.get(event_id) + + def remove_event(self, event_id: str) -> bool: + if event_id not in self: + return False + del self[event_id] + return True + + def has_event(self, event_id: str) -> bool: + return event_id in self + + @staticmethod + def normalize_event_pattern(event_pattern: EventPatternType) -> str: + if event_pattern == '*': + return '*' + if isinstance(event_pattern, str): + return event_pattern + event_type_field = event_pattern.model_fields.get('event_type') + event_type_default = event_type_field.default if event_type_field is not None else None + if isinstance(event_type_default, str) and event_type_default not in ('', 'UndefinedEvent'): + return event_type_default + return event_pattern.__name__ + + def event_is_child_of(self, event: BaseEvent[Any], ancestor: BaseEvent[Any]) -> bool: + current_id = event.event_parent_id + visited: set[str] = set() + + while current_id and current_id not in visited: + if current_id == ancestor.event_id: + return True + visited.add(current_id) + parent = self.get(current_id) + if parent is None: + return False + current_id = parent.event_parent_id + + return False + + @overload + async def find( + self, + event_type: type[TExpectedEvent], + where: None = None, + child_of: BaseEvent[Any] | None = None, + past: bool | float | timedelta | None = None, + future: bool | float | None = None, + event_is_child_of: Callable[[BaseEvent[Any], BaseEvent[Any]], bool] | None = None, + wait_for_future_match: Callable[ + [str, Callable[[BaseEvent[Any]], bool], bool | float], + Awaitable[BaseEvent[Any] | None], + ] + | None = None, + **event_fields: Any, + ) -> TExpectedEvent | None: ... + + @overload + async def find( + self, + event_type: type[TExpectedEvent], + where: Callable[[TExpectedEvent], bool], + child_of: BaseEvent[Any] | None = None, + past: bool | float | timedelta | None = None, + future: bool | float | None = None, + event_is_child_of: Callable[[BaseEvent[Any], BaseEvent[Any]], bool] | None = None, + wait_for_future_match: Callable[ + [str, Callable[[BaseEvent[Any]], bool], bool | float], + Awaitable[BaseEvent[Any] | None], + ] + | None = None, + **event_fields: Any, + ) -> TExpectedEvent | None: ... + + @overload + async def find( + self, + event_type: str | Literal['*'], + where: Callable[[BaseEvent[Any]], bool] | None = None, + child_of: BaseEvent[Any] | None = None, + past: bool | float | timedelta | None = None, + future: bool | float | None = None, + event_is_child_of: Callable[[BaseEvent[Any], BaseEvent[Any]], bool] | None = None, + wait_for_future_match: Callable[ + [str, Callable[[BaseEvent[Any]], bool], bool | float], + Awaitable[BaseEvent[Any] | None], + ] + | None = None, + **event_fields: Any, + ) -> BaseEvent[Any] | None: ... + + async def find( + self, + event_type: EventPatternType, + where: Callable[[Any], bool] | None = None, + child_of: BaseEvent[Any] | None = None, + past: bool | float | timedelta | None = None, + future: bool | float | None = None, + event_is_child_of: Callable[[BaseEvent[Any], BaseEvent[Any]], bool] | None = None, + wait_for_future_match: Callable[ + [str, Callable[[BaseEvent[Any]], bool], bool | float], + Awaitable[BaseEvent[Any] | None], + ] + | None = None, + **event_fields: Any, + ) -> BaseEvent[Any] | None: + resolved_past_input = True if past is None else past + if isinstance(resolved_past_input, timedelta): + resolved_past: bool | float = max(0.0, resolved_past_input.total_seconds()) + elif isinstance(resolved_past_input, bool): + resolved_past = resolved_past_input + else: + resolved_past = max(0.0, float(resolved_past_input)) + + resolved_future_input = False if future is None else future + if isinstance(resolved_future_input, bool): + resolved_future: bool | float = resolved_future_input + else: + resolved_future = max(0.0, float(resolved_future_input)) + + if resolved_past is False and resolved_future is False: + return None + + event_key = self.normalize_event_pattern(event_type) + where_predicate: Callable[[BaseEvent[Any]], bool] + if where is None: + where_predicate = lambda _: True + else: + where_predicate = where + + child_check = event_is_child_of or self.event_is_child_of + cutoff: str | None = None + if resolved_past is not True: + cutoff_dt = datetime.now(UTC) - timedelta(seconds=float(resolved_past)) + cutoff = monotonic_datetime(cutoff_dt.isoformat().replace('+00:00', 'Z')) + missing = object() + + def matches(event: BaseEvent[Any]) -> bool: + return ( + (event_key == '*' or event.event_type == event_key) + and (child_of is None or child_check(event, child_of)) + and all( + getattr(event, field_name, missing) == expected_value for field_name, expected_value in event_fields.items() + ) + and where_predicate(event) + ) + + if resolved_past is not False: + events = list(self.values()) + for event in reversed(events): + if cutoff is not None and event.event_created_at < cutoff: + continue + if matches(event): + return event + + if resolved_future is False or wait_for_future_match is None: + return None + + return await wait_for_future_match(event_key, matches, resolved_future) + + def cleanup_excess_events(self, *, on_remove: Callable[[BaseEventT], None] | None = None) -> int: + if self.max_history_size is None: + return 0 + if self.max_history_size == 0: + return self.trim_event_history(on_remove=on_remove) + remove_count = len(self) - self.max_history_size + if remove_count <= 0: + return 0 + + removed_count = 0 + for event_id in list(self.keys())[:remove_count]: + event = self.pop(event_id, None) + if event is None: + continue + if on_remove: + on_remove(event) + removed_count += 1 + + return removed_count + + def trim_event_history( + self, + *, + on_remove: Callable[[BaseEventT], None] | None = None, + owner_label: str | None = None, + ) -> int: + if self.max_history_size is None: + return 0 + + if self.max_history_size == 0: + completed_event_ids = [event_id for event_id, event in self.items() if event.event_status == 'completed'] + removed_count = 0 + for event_id in completed_event_ids: + event = self.get(event_id) + if event is None: + continue + del self[event_id] + if on_remove: + on_remove(event) + removed_count += 1 + return removed_count + + if not self.max_history_drop or len(self) <= self.max_history_size: + return 0 + + remaining_overage = len(self) - self.max_history_size + removed_count = 0 + + def remove_event(event_id: str, event: BaseEventT) -> None: + nonlocal removed_count + del self[event_id] + if on_remove: + on_remove(event) + removed_count += 1 + + for event_id, event in list(self.items()): + if remaining_overage <= 0: + break + if event.event_status != 'completed': + continue + remove_event(event_id, event) + remaining_overage -= 1 + + dropped_uncompleted = 0 + for event_id, event in list(self.items()): + if remaining_overage <= 0: + break + if event.event_status != 'completed': + dropped_uncompleted += 1 + remove_event(event_id, event) + remaining_overage -= 1 + + if dropped_uncompleted > 0 and not self._warned_about_dropping_uncompleted_events: + self._warned_about_dropping_uncompleted_events = True + owner = owner_label or 'EventBus' + logger.warning( + '[bubus] ⚠️ Bus %s has exceeded max_history_size=%s and is dropping oldest history entries ' + '(even uncompleted events). Increase max_history_size or set max_history_drop=False to reject.', + owner, + self.max_history_size, + ) + + return removed_count diff --git a/bubus/event_result.py b/bubus/event_result.py new file mode 100644 index 0000000..9e247aa --- /dev/null +++ b/bubus/event_result.py @@ -0,0 +1,11 @@ +from bubus.base_event import EventResult + +__all__ = ['EventResult'] + +# EventResult cannot be defined in a separate file from BaseEvent +# because Pydantic needs to be able to reference BaseEvent and vice versa in the same file. +# This is a known issue with Pydantic and generic models: +# https://github.com/pydantic/pydantic/issues/1873 +# https://github.com/pydantic/pydantic/issues/707 +# https://stackoverflow.com/questions/77582955/how-can-i-separate-two-pydantic-models-into-different-files-when-these-models-ha +# https://github.com/pydantic/pydantic/issues/11532 diff --git a/bubus/events_suck.py b/bubus/events_suck.py new file mode 100644 index 0000000..760da39 --- /dev/null +++ b/bubus/events_suck.py @@ -0,0 +1,205 @@ +from __future__ import annotations + +import inspect +import types +from collections.abc import Awaitable, Callable, Mapping +from types import SimpleNamespace +from typing import Any, Protocol, TypeVar, cast, get_args, get_origin + +from pydantic.fields import FieldInfo +from pydantic_core import PydanticUndefined + +from bubus.base_event import BaseEvent +from bubus.event_bus import EventBus +from bubus.helpers import extract_basemodel_generic_arg + +EventClass = type[BaseEvent[Any]] +_BASE_EVENT_FIELD_NAMES = frozenset(BaseEvent.model_fields) +_EMPTY = inspect.Parameter.empty +T_Result = TypeVar('T_Result') + + +class _HasBus(Protocol): + bus: EventBus + + +class GeneratedEvents(SimpleNamespace): + by_name: dict[str, EventClass] + + +def _custom_event_fields(event_cls: EventClass) -> list[tuple[str, FieldInfo]]: + return [ + (field_name, field) for field_name, field in event_cls.model_fields.items() if field_name not in _BASE_EVENT_FIELD_NAMES + ] + + +def _event_field_default(field: FieldInfo) -> Any: + default = field.get_default(call_default_factory=False) + if default is PydanticUndefined: + return None + return default + + +def _event_result_annotation(event_cls: EventClass) -> Any: + generic_result_type = extract_basemodel_generic_arg(event_cls) + if generic_result_type is not None: + return generic_result_type + + result_field = event_cls.model_fields.get('event_result_type') + if result_field is not None and result_field.default not in (None, PydanticUndefined): + return result_field.default + + return Any + + +def _callable_params(func: Callable[..., Any]) -> tuple[list[inspect.Parameter], bool, Any]: + signature = inspect.signature(func) + params = list(signature.parameters.values()) + has_var_kwargs = any(param.kind == inspect.Parameter.VAR_KEYWORD for param in params) + if params and params[0].name in {'self', 'cls'}: + params = params[1:] + + filtered: list[inspect.Parameter] = [] + for param in params: + if param.kind == inspect.Parameter.VAR_POSITIONAL: + raise TypeError(f'events_suck does not support *args in {func!r}') + if param.kind == inspect.Parameter.POSITIONAL_ONLY: + raise TypeError(f'events_suck does not support positional-only params in {func!r}') + if param.kind == inspect.Parameter.VAR_KEYWORD: + continue + filtered.append(param) + + return_annotation = signature.return_annotation if signature.return_annotation is not _EMPTY else Any + return filtered, has_var_kwargs, return_annotation + + +def _event_payload(event: BaseEvent[Any]) -> dict[str, Any]: + payload = { + field_name: getattr(event, field_name) + for field_name in event.__class__.model_fields + if field_name not in _BASE_EVENT_FIELD_NAMES + } + extras = event.model_extra + if isinstance(extras, dict): + payload.update(extras) + return payload + + +def _annotation_allows_none(annotation: Any) -> bool: + if annotation is None or annotation is type(None): + return True + origin = get_origin(annotation) + if origin is None: + return False + return any(arg is type(None) for arg in get_args(annotation)) + + +def _make_event_class(event_name: str, func: Callable[..., Any]) -> EventClass: + if not event_name.isidentifier() or event_name.startswith('_'): + raise ValueError(f'Invalid event name: {event_name!r}') + + params, _, return_annotation = _callable_params(func) + annotations: dict[str, Any] = {'event_result_type': Any} + namespace: dict[str, Any] = {'__module__': __name__, 'event_type': event_name, 'event_result_type': return_annotation} + for param in params: + annotation = param.annotation if param.annotation is not _EMPTY else Any + annotations[param.name] = annotation + if param.default is not _EMPTY: + namespace[param.name] = param.default + elif _annotation_allows_none(annotation): + namespace[param.name] = None + namespace['__annotations__'] = annotations + try: + event_base = cast(type[Any], BaseEvent[return_annotation]) + except Exception: + event_base = BaseEvent + event_cls = types.new_class(event_name, (event_base,), exec_body=lambda ns: ns.update(namespace)) + return cast(EventClass, event_cls) + + +def make_events(events: Mapping[str, Callable[..., Any]]) -> GeneratedEvents: + by_name = {event_name: _make_event_class(event_name, func) for event_name, func in events.items()} + return GeneratedEvents(**by_name, by_name=by_name) + + +def make_handler(func: Callable[..., T_Result | Awaitable[T_Result]]) -> Callable[[BaseEvent[Any]], Awaitable[T_Result]]: + params, has_var_kwargs, _ = _callable_params(func) + + async def _handler(event: BaseEvent[Any]) -> T_Result: + payload = _event_payload(event) + kwargs: dict[str, Any] = {} + for param in params: + if param.name in payload: + kwargs[param.name] = payload.pop(param.name) + elif param.default is _EMPTY: + raise TypeError(f'Missing required event field {param.name!r} for handler {func!r}') + if has_var_kwargs: + kwargs.update(payload) + result = func(**kwargs) + if inspect.isawaitable(result): + return await result + return result + + return _handler + + +def _build_event_method(class_name: str, method_name: str, event_cls: EventClass): + event_fields = _custom_event_fields(event_cls) + event_field_names = tuple(field_name for field_name, _ in event_fields) + + parameters = [inspect.Parameter('self', inspect.Parameter.POSITIONAL_OR_KEYWORD)] + for field_name, field in event_fields: + field_annotation = field.annotation if field.annotation is not None else Any + field_default = inspect.Parameter.empty if field.is_required() else _event_field_default(field) + parameters.append( + inspect.Parameter( + field_name, + inspect.Parameter.KEYWORD_ONLY, + default=field_default, + annotation=field_annotation, + ) + ) + parameters.append(inspect.Parameter('extra', inspect.Parameter.VAR_KEYWORD, annotation=Any)) + signature = inspect.Signature(parameters=parameters, return_annotation=_event_result_annotation(event_cls)) + + async def _method(self: _HasBus, *args: Any, **kwargs: Any) -> Any: + bound = signature.bind(self, *args, **kwargs) + payload: dict[str, Any] = { + field_name: bound.arguments[field_name] for field_name in event_field_names if field_name in bound.arguments + } + payload.update(cast(dict[str, Any], bound.arguments.get('extra', {}))) + return await self.bus.emit(event_cls(**payload)).event_result() + + _method.__name__ = method_name + _method.__qualname__ = f'{class_name}.{method_name}' + _method.__annotations__ = { + **{field_name: (field.annotation if field.annotation is not None else Any) for field_name, field in event_fields}, + 'extra': Any, + 'return': signature.return_annotation, + } + cast(Any, _method).__signature__ = signature + return _method + + +def wrap(class_name: str, methods: Mapping[str, EventClass]) -> type[Any]: + if not class_name.isidentifier() or class_name.startswith('_'): + raise ValueError(f'Invalid class name: {class_name!r}') + + def __init__(self: _HasBus, bus: EventBus | None = None) -> None: + self.bus = bus or EventBus(f'{class_name}Bus') + + namespace: dict[str, Any] = { + '__module__': __name__, + '__annotations__': {'bus': EventBus}, + '__init__': __init__, + } + + for method_name, event_cls in methods.items(): + if not method_name.isidentifier() or method_name.startswith('_'): + raise ValueError(f'Invalid method name: {method_name!r}') + namespace[method_name] = _build_event_method(class_name, method_name, event_cls) + + return cast(type[Any], type(class_name, (), namespace)) + + +__all__ = ['GeneratedEvents', 'make_events', 'make_handler', 'wrap'] diff --git a/bubus/helpers.py b/bubus/helpers.py index 574e605..3823dea 100644 --- a/bubus/helpers.py +++ b/bubus/helpers.py @@ -1,40 +1,249 @@ import asyncio import logging -import tempfile -import threading +import re import time -from collections.abc import Callable, Coroutine +import traceback +from collections import deque +from collections.abc import Awaitable, Callable, Coroutine +from contextlib import asynccontextmanager +from datetime import UTC, datetime from functools import wraps -from pathlib import Path -from typing import Any, Literal, ParamSpec, TypeVar +from typing import Any, ParamSpec, TypeVar, cast -import portalocker +# Define generic type variables for return type and parameters +R = TypeVar('R') +P = ParamSpec('P') +QueueEntryType = TypeVar('QueueEntryType') + +_MONOTONIC_DATETIME_REGEX = re.compile( + r'^(\d{4})-(\d{2})-(\d{2})' + r'T(\d{2}):(\d{2}):(\d{2})' + r'(?:\.(\d{1,9}))?' + r'(Z|[+-]\d{2}:\d{2})$' +) +_MONOTONIC_DATETIME_LENGTH = 30 # YYYY-MM-DDTHH:MM:SS.fffffffffZ +_MONOTONIC_EPOCH_ANCHOR_NS = time.time_ns() +_MONOTONIC_CLOCK_ANCHOR_NS = time.monotonic_ns() +_last_monotonic_datetime_ns = _MONOTONIC_EPOCH_ANCHOR_NS + + +def _format_epoch_ns_to_iso(epoch_ns: int) -> str: + seconds, fractional_ns = divmod(epoch_ns, 1_000_000_000) + base_datetime = datetime.fromtimestamp(seconds, tz=UTC) + if base_datetime.year <= 1990 or base_datetime.year >= 2500: + raise ValueError('Datetime year must be >1990 and <2500') + normalized = f'{base_datetime.strftime("%Y-%m-%dT%H:%M:%S")}.{fractional_ns:09d}Z' + assert len(normalized) == _MONOTONIC_DATETIME_LENGTH, ( + f'Expected canonical datetime length {_MONOTONIC_DATETIME_LENGTH}, got {len(normalized)}: {normalized!r}' + ) + return normalized + + +def monotonic_datetime(isostring: str | None = None) -> str: + """Return canonical UTC ISO datetime with exactly 9 fractional digits.""" + if isostring is not None: + match = _MONOTONIC_DATETIME_REGEX.fullmatch(isostring) + if match is None: + raise ValueError(f'Invalid ISO datetime: {isostring!r}') + + parsed = datetime.fromisoformat(isostring.replace('Z', '+00:00')) + if parsed.tzinfo is None: + raise ValueError(f'ISO datetime must include timezone: {isostring!r}') + parsed_utc = parsed.astimezone(UTC) + if parsed_utc.year <= 1990 or parsed_utc.year >= 2500: + raise ValueError(f'Datetime year must be >1990 and <2500: {isostring!r}') + + fractional = (match.group(7) or '').ljust(9, '0') + normalized = f'{parsed_utc.strftime("%Y-%m-%dT%H:%M:%S")}.{fractional}Z' + assert len(normalized) == _MONOTONIC_DATETIME_LENGTH, ( + f'Expected canonical datetime length {_MONOTONIC_DATETIME_LENGTH}, got {len(normalized)}: {normalized!r}' + ) + return normalized + + global _last_monotonic_datetime_ns + elapsed_ns = time.monotonic_ns() - _MONOTONIC_CLOCK_ANCHOR_NS + epoch_ns = _MONOTONIC_EPOCH_ANCHOR_NS + elapsed_ns + if epoch_ns <= _last_monotonic_datetime_ns: + epoch_ns = _last_monotonic_datetime_ns + 1 + _last_monotonic_datetime_ns = epoch_ns + return _format_epoch_ns_to_iso(epoch_ns) + + +async def run_with_timeout(awaitable: Awaitable[R], timeout: float | None = None) -> R: + """Await `awaitable` with optional timeout.""" + if timeout is None: + return await awaitable + return await asyncio.wait_for(awaitable, timeout=timeout) + + +async def cancel_and_await(task: asyncio.Task[Any] | None, timeout: float | None = None) -> None: + """Best-effort task cancellation helper that suppresses cancellation-time noise.""" + if task is None: + return + if not task.done(): + task.cancel() + try: + await run_with_timeout(task, timeout=timeout) + except (asyncio.CancelledError, TimeoutError): + pass + except Exception: + pass -# Silence portalocker debug messages -portalocker_logger = logging.getLogger('portalocker.utils') -portalocker_logger.setLevel(logging.WARNING) -# Silence root level portalocker logs too -portalocker_root_logger = logging.getLogger('portalocker') -portalocker_root_logger.setLevel(logging.WARNING) +@asynccontextmanager +async def _run_with_slow_monitor( + monitor_factory: Callable[[], Coroutine[Any, Any, Any]] | None, + *, + task_name: str | None = None, +): + """Run an optional slow-monitor task scoped to the surrounding execution. -PSUTIL_AVAILABLE = False -try: - import psutil # type: ignore[import] + The monitor is started on enter and always cancelled on exit. + """ + task: asyncio.Task[Any] | None = None + if monitor_factory is not None: + if task_name is None: + task = asyncio.create_task(monitor_factory()) + else: + task = asyncio.create_task(monitor_factory(), name=task_name) + try: + yield task + finally: + await cancel_and_await(task) + + +class QueueShutDown(Exception): + """Raised when putting on to or getting from a shut-down Queue.""" - PSUTIL_AVAILABLE = True # type: ignore[assignment] -except ImportError: - psutil = None pass -logger = logging.getLogger(__name__) +class CleanShutdownQueue(asyncio.Queue[QueueEntryType]): + """asyncio.Queue subclass that handles shutdown cleanly without warnings.""" + + _is_shutdown: bool = False + _queue: deque[QueueEntryType] + _getters: deque[asyncio.Future[QueueEntryType]] + _putters: deque[asyncio.Future[QueueEntryType]] + + def iter_items(self) -> tuple[QueueEntryType, ...]: + """Return a snapshot of queued items in FIFO order.""" + return tuple(self._queue) + + def remove_item(self, item: QueueEntryType) -> bool: + """Remove one matching queued item if present.""" + if item not in self._queue: + return False + self._queue.remove(item) + return True + + def shutdown(self, immediate: bool = True): + """Shutdown the queue and clean up all pending futures.""" + del immediate + self._is_shutdown = True + + # Cancel all waiting getters without triggering warnings + while self._getters: + getter = self._getters.popleft() + if not getter.done(): + # Set exception instead of cancelling to avoid "Event loop is closed" errors + getter.set_exception(QueueShutDown()) + + # Cancel all waiting putters + while self._putters: + putter = self._putters.popleft() + if not putter.done(): + putter.set_exception(QueueShutDown()) + + async def get(self) -> QueueEntryType: + """Remove and return an item from the queue, with shutdown support.""" + while self.empty(): + if self._is_shutdown: + raise QueueShutDown + + getter: asyncio.Future[QueueEntryType] = asyncio.get_running_loop().create_future() + assert isinstance(getter, asyncio.Future) + self._getters.append(getter) + try: + await getter + except: + # Clean up the getter if we're cancelled + getter.cancel() # Just in case getter is not done yet. + try: + self._getters.remove(getter) + except ValueError: + pass + # Re-raise the exception + raise + return self.get_nowait() -# Define generic type variables for return type and parameters -R = TypeVar('R') -T = TypeVar('T') -P = ParamSpec('P') + async def put(self, item: QueueEntryType) -> None: + """Put an item into the queue, with shutdown support.""" + while self.full(): + if self._is_shutdown: + raise QueueShutDown + + putter: asyncio.Future[QueueEntryType] = asyncio.get_running_loop().create_future() + assert isinstance(putter, asyncio.Future) + self._putters.append(putter) + try: + await putter + except: + putter.cancel() # Just in case putter is not done yet. + try: + self._putters.remove(putter) + except ValueError: + pass + raise + + return self.put_nowait(item) + + def put_nowait(self, item: QueueEntryType) -> None: + """Put an item into the queue without blocking, with shutdown support.""" + if self._is_shutdown: + raise QueueShutDown + return super().put_nowait(item) + + def get_nowait(self) -> QueueEntryType: + """Remove and return an item if one is immediately available, with shutdown support.""" + if self._is_shutdown and self.empty(): + raise QueueShutDown + return super().get_nowait() + + +def extract_basemodel_generic_arg(cls: type) -> Any: + """ + Extract T_EventResultType Generic arg from BaseEvent[T_EventResultType] subclasses using pydantic generic metadata. + Needed because pydantic messes with the mro and obscures the Generic from the bases list. + https://github.com/pydantic/pydantic/issues/8410 + """ + + def _extract_arg_from_metadata(metadata_value: Any) -> Any: + metadata = cast(dict[str, Any], metadata_value) + origin: Any = metadata.get('origin') + args: tuple[Any, ...] = cast(tuple[Any, ...], metadata.get('args') or ()) + if not args: + return None + # Avoid importing BaseEvent here to keep helpers.py decoupled from models.py. + if getattr(origin, '__name__', None) == 'BaseEvent' and getattr(origin, '__module__', None) == 'bubus.base_event': + return args[0] + return None + + # Direct check first for speed - most subclasses will have it directly + if hasattr(cls, '__pydantic_generic_metadata__'): + generic_arg = _extract_arg_from_metadata(getattr(cls, '__pydantic_generic_metadata__')) + if generic_arg is not None: + return generic_arg + + # Only check MRO if direct check failed + for parent in cls.__mro__[1:]: + if hasattr(parent, '__pydantic_generic_metadata__'): + generic_arg = _extract_arg_from_metadata(getattr(parent, '__pydantic_generic_metadata__')) + if generic_arg is not None: + return generic_arg + + return None def time_execution( @@ -68,463 +277,30 @@ async def wrapper(*args: P.args, **kwargs: P.kwargs) -> R: return decorator -# Global semaphore registry for retry decorator -GLOBAL_RETRY_SEMAPHORES: dict[str, asyncio.Semaphore] = {} -GLOBAL_RETRY_SEMAPHORE_LOCK = threading.Lock() - -# Multiprocess semaphore support -MULTIPROCESS_SEMAPHORE_DIR = Path(tempfile.gettempdir()) / 'browser_use_semaphores' -MULTIPROCESS_SEMAPHORE_DIR.mkdir(exist_ok=True) - -# Global multiprocess semaphore registry -# Multiprocess semaphores are not cached due to internal state issues causing "Already locked" errors -MULTIPROCESS_SEMAPHORE_LOCK = threading.Lock() - -# Global overload detection state -_last_overload_check = 0.0 -_overload_check_interval = 5.0 # Check every 5 seconds -_active_retry_operations = 0 -_active_operations_lock = threading.Lock() - - -def _check_system_overload() -> tuple[bool, str]: - """Check if system is overloaded and return (is_overloaded, reason)""" - if not PSUTIL_AVAILABLE: - return False, '' - - assert psutil is not None - try: - # Get system stats - cpu_percent = psutil.cpu_percent(interval=0.1) - memory = psutil.virtual_memory() +def log_filtered_traceback(exc: BaseException) -> str: + """Format traceback while filtering noisy asyncio/stdlib frames.""" + trace_exc = traceback.TracebackException.from_exception(exc, capture_locals=False) - # Check thresholds - reasons: list[str] = [] - is_overloaded = False - - if cpu_percent > 85: - is_overloaded = True - reasons.append(f'CPU: {cpu_percent:.1f}%') - - if memory.percent > 85: - is_overloaded = True - reasons.append(f'Memory: {memory.percent:.1f}%') - - # Check number of concurrent operations - with _active_operations_lock: - if _active_retry_operations > 30: - is_overloaded = True - reasons.append(f'Active operations: {_active_retry_operations}') - - return is_overloaded, ', '.join(reasons) - except Exception: - return False, '' - - -def _get_semaphore_key( - func_name: str, - semaphore_name: str | None, - semaphore_scope: Literal['multiprocess', 'global', 'class', 'self'], - args: tuple[Any, ...], -) -> str: - """Determine the semaphore key based on scope.""" - base_name = semaphore_name or func_name - - if semaphore_scope == 'multiprocess': - return base_name - elif semaphore_scope == 'global': - return base_name - elif semaphore_scope == 'class' and args and hasattr(args[0], '__class__'): - class_name = args[0].__class__.__name__ - return f'{class_name}.{base_name}' - elif semaphore_scope == 'self' and args: - instance_id = id(args[0]) - return f'{instance_id}.{base_name}' - else: - # Fallback to global if we can't determine scope - return base_name - - -def _get_or_create_semaphore( - sem_key: str, - semaphore_limit: int, - semaphore_scope: Literal['multiprocess', 'global', 'class', 'self'], -) -> Any: - """Get or create a semaphore based on scope.""" - if semaphore_scope == 'multiprocess': - # Don't cache multiprocess semaphores - they have internal state issues - # Create a new instance each time to avoid "Already locked" errors - with MULTIPROCESS_SEMAPHORE_LOCK: - # Ensure the directory exists (it might have been cleaned up in cloud environments) - MULTIPROCESS_SEMAPHORE_DIR.mkdir(exist_ok=True, parents=True) - - # Clean up any stale lock files before creating semaphore - lock_pattern = f'{sem_key}.*.lock' - for lock_file in MULTIPROCESS_SEMAPHORE_DIR.glob(lock_pattern): - try: - # Try to remove lock files older than 5 minutes - if lock_file.stat().st_mtime < time.time() - 300: - lock_file.unlink(missing_ok=True) - except Exception: - pass # Ignore errors when cleaning up - - # Use a more aggressive timeout for lock acquisition - try: - semaphore = portalocker.utils.NamedBoundedSemaphore( - maximum=semaphore_limit, - name=sem_key, - directory=str(MULTIPROCESS_SEMAPHORE_DIR), - timeout=0.1, # Very short timeout for internal lock acquisition - ) - return semaphore - except FileNotFoundError as e: - # In some cloud environments, the lock file creation might fail - # Try once more after ensuring directory exists - logger.warning(f'Lock file creation failed: {e}. Retrying after ensuring directory exists.') - MULTIPROCESS_SEMAPHORE_DIR.mkdir(exist_ok=True, parents=True) - - # Create a fallback asyncio semaphore instead of multiprocess - logger.warning(f'Falling back to asyncio semaphore for {sem_key} due to filesystem issues') - with GLOBAL_RETRY_SEMAPHORE_LOCK: - fallback_key = f'multiprocess_fallback_{sem_key}' - if fallback_key not in GLOBAL_RETRY_SEMAPHORES: - GLOBAL_RETRY_SEMAPHORES[fallback_key] = asyncio.Semaphore(semaphore_limit) - return GLOBAL_RETRY_SEMAPHORES[fallback_key] - else: - with GLOBAL_RETRY_SEMAPHORE_LOCK: - if sem_key not in GLOBAL_RETRY_SEMAPHORES: - GLOBAL_RETRY_SEMAPHORES[sem_key] = asyncio.Semaphore(semaphore_limit) - return GLOBAL_RETRY_SEMAPHORES[sem_key] - - -def _calculate_semaphore_timeout( - semaphore_timeout: float | None, - timeout: float, - semaphore_limit: int, -) -> float: - """Calculate the timeout for semaphore acquisition.""" - if semaphore_timeout is None: - # Default: wait time is if all other slots are occupied with max timeout operations - # Ensure minimum of timeout value when limit=1 - return max(timeout, timeout * (semaphore_limit - 1)) - else: - # Use provided timeout, but ensure minimum of 0.01 if 0 was passed - return max(0.01, semaphore_timeout) if semaphore_timeout == 0 else semaphore_timeout - - -async def _acquire_multiprocess_semaphore( - semaphore: Any, - sem_timeout: float, - sem_key: str, - semaphore_lax: bool, - semaphore_limit: int, - timeout: float, -) -> tuple[bool, Any]: - """Acquire a multiprocess semaphore with retries and exponential backoff.""" - start_time = time.time() - retry_delay = 0.1 # Start with 100ms - backoff_factor = 2.0 - max_single_attempt = 1.0 # Max time for a single acquire attempt - recreate_attempts = 0 - max_recreate_attempts = 3 - - while time.time() - start_time < sem_timeout: - try: - # Calculate remaining time - remaining_time = sem_timeout - (time.time() - start_time) - if remaining_time <= 0: - break - - # Use minimum of remaining time or max single attempt - attempt_timeout = min(remaining_time, max_single_attempt) - - # Use a temporary thread to run the blocking operation - multiprocess_lock = await asyncio.to_thread( - lambda: semaphore.acquire(timeout=attempt_timeout, check_interval=0.1, fail_when_locked=False) - ) - if multiprocess_lock: - return True, multiprocess_lock - - # If we didn't get the lock, wait before retrying - if remaining_time > retry_delay: - await asyncio.sleep(retry_delay) - retry_delay = min(retry_delay * backoff_factor, 1.0) # Cap at 1 second - - except (FileNotFoundError, OSError) as e: - # Handle case where lock file disappears - if isinstance(e, FileNotFoundError) or 'No such file or directory' in str(e): - recreate_attempts += 1 - if recreate_attempts <= max_recreate_attempts: - logger.warning( - f'Semaphore lock file disappeared for "{sem_key}". Attempting to recreate (attempt {recreate_attempts}/{max_recreate_attempts})...' - ) - - # Ensure directory exists - with MULTIPROCESS_SEMAPHORE_LOCK: - MULTIPROCESS_SEMAPHORE_DIR.mkdir(exist_ok=True, parents=True) - - # Try to recreate the semaphore - try: - semaphore = await asyncio.to_thread( - lambda: portalocker.utils.NamedBoundedSemaphore( - maximum=semaphore_limit, - name=sem_key, - directory=str(MULTIPROCESS_SEMAPHORE_DIR), - timeout=0.1, - ) - ) - # Continue with the new semaphore - continue - except Exception as recreate_error: - logger.error(f'Failed to recreate semaphore: {recreate_error}') - # If recreation fails and we're in lax mode, return without lock - if semaphore_lax: - logger.warning(f'Failed to recreate semaphore "{sem_key}", proceeding without concurrency limit') - return False, None - raise - else: - # Max recreate attempts exceeded - if semaphore_lax: - logger.warning( - f'Max semaphore recreation attempts exceeded for "{sem_key}", proceeding without concurrency limit' - ) - return False, None - raise - else: - # Other OS errors - raise - - except (AssertionError, Exception) as e: - # Handle "Already locked" error by skipping this attempt - if 'Already locked' in str(e) or isinstance(e, AssertionError): - # Lock file might be stale from a previous process crash - # Wait before retrying - remaining_time = sem_timeout - (time.time() - start_time) - if remaining_time > retry_delay: - await asyncio.sleep(retry_delay) - retry_delay = min(retry_delay * backoff_factor, 1.0) - continue - elif 'Could not acquire' not in str(e) and not isinstance(e, TimeoutError): - raise - - # Timeout reached - if not semaphore_lax: - raise TimeoutError( - f'Failed to acquire multiprocess semaphore "{sem_key}" within {sem_timeout}s ' - f'(limit={semaphore_limit}, timeout={timeout}s per operation)' + def _filter(_: traceback.TracebackException): + trace_exc.stack = traceback.StackSummary.from_list( + [f for f in trace_exc.stack if 'asyncio/tasks.py' not in f.filename and 'lib/python' not in f.filename] ) - logger.warning( - f'Failed to acquire multiprocess semaphore "{sem_key}" after {sem_timeout:.1f}s, proceeding without concurrency limit' - ) - return False, None - - -async def _acquire_asyncio_semaphore( - semaphore: asyncio.Semaphore, - sem_timeout: float, - sem_key: str, - semaphore_lax: bool, - semaphore_limit: int, - timeout: float, - sem_start: float, -) -> bool: - """Acquire an asyncio semaphore.""" - try: - async with asyncio.timeout(sem_timeout): - await semaphore.acquire() - return True - except TimeoutError: - sem_wait_time = time.time() - sem_start - if not semaphore_lax: - raise TimeoutError( - f'Failed to acquire semaphore "{sem_key}" within {sem_timeout}s ' - f'(limit={semaphore_limit}, timeout={timeout}s per operation)' - ) - logger.warning( - f'Failed to acquire semaphore "{sem_key}" after {sem_wait_time:.1f}s, proceeding without concurrency limit' - ) - return False - - -async def _execute_with_retries( - func: Callable[P, Coroutine[Any, Any, T]], - args: P.args, # type: ignore - kwargs: P.kwargs, # type: ignore - retries: int, - timeout: float, - wait: float, - backoff_factor: float, - retry_on: tuple[type[Exception], ...] | None, - start_time: float, - sem_start: float, - semaphore_limit: int | None, -) -> T: - """Execute the function with retry logic.""" - for attempt in range(retries + 1): - try: - # Execute with per-attempt timeout - async with asyncio.timeout(timeout): - return await func(*args, **kwargs) # type: ignore[reportCallIssue] - - except Exception as e: - # Check if we should retry this exception - if retry_on is not None and not isinstance(e, retry_on): - raise - - if attempt < retries: - # Calculate wait time with backoff - current_wait = wait * (backoff_factor**attempt) - - # Only log warning on the final retry attempt (second-to-last overall attempt) - if attempt == retries - 1: - logger.warning( - f'{func.__name__} failed (attempt {attempt + 1}/{retries + 1}): ' - f'{type(e).__name__}: {e}. Waiting {current_wait:.1f}s before retry...' - ) - # else: - # # For earlier attempts, skip logging to reduce noise - # logger.debug( - # f'{func.__name__} failed (attempt {attempt + 1}/{retries + 1}): ' - # f'{type(e).__name__}: {e}. Waiting {current_wait:.1f}s before retry...' - # ) - await asyncio.sleep(current_wait) - else: - # Final failure - total_time = time.time() - start_time - sem_wait = time.time() - sem_start - total_time if semaphore_limit else 0 - sem_str = f'Semaphore wait: {sem_wait:.1f}s. ' if sem_wait > 0 else '' - logger.error( - f'{func.__name__} failed after {retries + 1} attempts over {total_time:.1f}s. ' - f'{sem_str}Final error: {type(e).__name__}: {e}' - ) - raise - - # This should never be reached, but satisfies type checker - raise RuntimeError('Unexpected state in retry logic') - - -def _track_active_operations(increment: bool = True) -> None: - """Track active retry operations.""" - global _active_retry_operations - with _active_operations_lock: - if increment: - _active_retry_operations += 1 - else: - _active_retry_operations = max(0, _active_retry_operations - 1) - - -def _check_system_overload_if_needed() -> None: - """Check for system overload if enough time has passed since last check.""" - global _last_overload_check - current_time = time.time() - if current_time - _last_overload_check > _overload_check_interval: - _last_overload_check = current_time - is_overloaded, reason = _check_system_overload() - if is_overloaded: - logger.warning(f'⚠️ System overload detected: {reason}. Consider reducing concurrent operations to prevent hanging.') - - -def retry( - wait: float = 3, - retries: int = 3, - timeout: float = 5, - retry_on: tuple[type[Exception], ...] | None = None, - backoff_factor: float = 1.0, - semaphore_limit: int | None = None, - semaphore_name: str | None = None, - semaphore_lax: bool = True, - semaphore_scope: Literal['multiprocess', 'global', 'class', 'self'] = 'global', - semaphore_timeout: float | None = None, -): - """ - Retry decorator with semaphore support for async functions. - - Args: - wait: Seconds to wait between retries - retries: Number of retry attempts after initial failure - timeout: Per-attempt timeout in seconds - retry_on: Tuple of exception types to retry on (None = retry all exceptions) - backoff_factor: Multiplier for wait time after each retry (1.0 = no backoff) - semaphore_limit: Max concurrent executions (creates semaphore if needed) - semaphore_name: Name for semaphore (defaults to function name) - semaphore_lax: If True, continue without semaphore on acquisition failure - semaphore_scope: Scope for semaphore sharing: - - 'global': All calls share one semaphore (default) - - 'class': All instances of a class share one semaphore - - 'self': Each instance gets its own semaphore - - 'multiprocess': All processes on the machine share one semaphore - semaphore_timeout: Max time to wait for semaphore acquisition (None = timeout * (limit - 1)) or 0.01s - - Example: - @retry(wait=3, retries=3, timeout=5, semaphore_limit=3, semaphore_scope='self') - async def some_function(self, ...): - # Limited to 5s per attempt, retries up to 3 times on failure - # Max 3 concurrent executions per instance - - Notes: - - semaphore aquision happens once at start time, it's not retried - - semaphore_timeout is only used if semaphore_limit is set. - - if semaphore_timeout is set to 0, it will wait forever for a semaphore slot to become available. - - if semaphore_timeout is set to None, it will wait for the default (timeout * (semaphore_limit - 1)) +0.01s - - retries are 0-indexed, so retries=1 means the function will be called 2 times total (1 initial + 1 retry) - """ - - def decorator(func: Callable[P, Coroutine[Any, Any, T]]) -> Callable[P, Coroutine[Any, Any, T]]: - @wraps(func) - async def wrapper(*args: P.args, **kwargs: P.kwargs) -> T: # type: ignore[return] - # Initialize semaphore-related variables - semaphore: Any = None - semaphore_acquired = False - multiprocess_lock: Any = None - sem_start = time.time() - - # Handle semaphore if specified - if semaphore_limit is not None: - # Get semaphore key and create/retrieve semaphore - sem_key = _get_semaphore_key(func.__name__, semaphore_name, semaphore_scope, args) - semaphore = _get_or_create_semaphore(sem_key, semaphore_limit, semaphore_scope) - - # Calculate timeout for semaphore acquisition - sem_timeout = _calculate_semaphore_timeout(semaphore_timeout, timeout, semaphore_limit) - - # Acquire semaphore based on type - if semaphore_scope == 'multiprocess': - semaphore_acquired, multiprocess_lock = await _acquire_multiprocess_semaphore( - semaphore, sem_timeout, sem_key, semaphore_lax, semaphore_limit, timeout - ) - else: - semaphore_acquired = await _acquire_asyncio_semaphore( - semaphore, sem_timeout, sem_key, semaphore_lax, semaphore_limit, timeout, sem_start - ) - - # Track active operations and check system overload - _track_active_operations(increment=True) - _check_system_overload_if_needed() - - # Execute function with retries - start_time = time.time() - try: - return await _execute_with_retries( - func, args, kwargs, retries, timeout, wait, backoff_factor, retry_on, start_time, sem_start, semaphore_limit - ) - finally: - # Clean up: decrement active operations and release semaphore - _track_active_operations(increment=False) - - if semaphore_acquired and semaphore: - try: - if semaphore_scope == 'multiprocess' and multiprocess_lock: - await asyncio.to_thread(lambda: multiprocess_lock.release()) - elif semaphore: - semaphore.release() - except (FileNotFoundError, OSError) as e: - # Handle case where lock file was removed during operation - if isinstance(e, FileNotFoundError) or 'No such file or directory' in str(e): - logger.warning(f'Semaphore lock file disappeared during release, ignoring: {e}') - else: - # Log other OS errors but don't raise - we already completed the operation - logger.error(f'Error releasing semaphore: {e}') - - return wrapper - - return decorator + if trace_exc.__cause__: + _filter(trace_exc.__cause__) + if trace_exc.__context__: + _filter(trace_exc.__context__) + + _filter(trace_exc) + return ''.join(trace_exc.format()) + + +__all__ = [ + 'run_with_timeout', + 'cancel_and_await', + '_run_with_slow_monitor', + 'log_filtered_traceback', + 'CleanShutdownQueue', + 'QueueShutDown', + 'extract_basemodel_generic_arg', + 'time_execution', +] diff --git a/bubus/jsonschema.py b/bubus/jsonschema.py new file mode 100644 index 0000000..37de83e --- /dev/null +++ b/bubus/jsonschema.py @@ -0,0 +1,397 @@ +import inspect +from collections.abc import Callable, Iterator, Mapping, Sequence +from typing import Any, TypeAlias, cast + +from pydantic import BaseModel, Field, TypeAdapter, create_model + +_SCHEMA_TYPE_REGISTRY: tuple[tuple[str, type[Any], str], ...] = ( + ('string', str, 'string'), + ('integer', int, 'number'), # note both integer and number are mapped to the same JSON Schema type + ('number', float, 'number'), + ('boolean', bool, 'boolean'), + ('object', dict, 'object'), + ('array', list, 'array'), + ('null', type(None), 'null'), +) + +TYPE_MAPPING: dict[str, type[Any]] = {schema_type: python_type for schema_type, python_type, _ in _SCHEMA_TYPE_REGISTRY} + +CONSTRAINT_MAPPING: dict[str, str] = { + 'minimum': 'ge', + 'maximum': 'le', + 'exclusiveMinimum': 'gt', + 'exclusiveMaximum': 'lt', + 'inclusiveMinimum': 'ge', + 'inclusiveMaximum': 'le', + 'minItems': 'min_length', + 'maxItems': 'max_length', +} + +_NON_PRIMITIVE_SCHEMA_TYPES = {'object', 'array'} + +PRIMITIVE_TYPE_MAPPING: dict[str, type[Any]] = { + schema_type: python_type + for schema_type, python_type, _ in _SCHEMA_TYPE_REGISTRY + if schema_type not in _NON_PRIMITIVE_SCHEMA_TYPES +} + +IDENTIFIER_NORMALIZATION: dict[str, str] = {schema_type: identifier for schema_type, _, identifier in _SCHEMA_TYPE_REGISTRY} + +JSON_SCHEMA_DRAFT = 'https://json-schema.org/draft/2020-12/schema' +_TYPE_ADAPTER_CACHE: dict[Any, TypeAdapter[Any]] = {} + +FieldDefinition: TypeAlias = Any | tuple[Any, Any] + + +def _get_cached_type_adapter(result_type: Any) -> TypeAdapter[Any]: + """Return a cached TypeAdapter for hashable result types.""" + try: + cached = _TYPE_ADAPTER_CACHE.get(result_type) + except TypeError: + return TypeAdapter(result_type) + if cached is not None: + return cached + adapter = TypeAdapter(result_type) + _TYPE_ADAPTER_CACHE[result_type] = adapter + return adapter + + +def _as_string_key_dict(value: object) -> dict[str, Any] | None: + """Return a dict view with only string keys, otherwise None.""" + if not isinstance(value, Mapping): + return None + value_mapping = cast(Mapping[object, Any], value) + normalized: dict[str, Any] = {} + for raw_key, raw_value in value_mapping.items(): + if isinstance(raw_key, str): + normalized[raw_key] = raw_value + return normalized + + +def _as_non_string_sequence(value: object) -> Sequence[Any] | None: + if isinstance(value, Sequence) and not isinstance(value, (str, bytes, bytearray)): + return cast(Sequence[Any], value) + return None + + +def _iter_string_key_dicts(value: object) -> Iterator[dict[str, Any]]: + sequence_values = _as_non_string_sequence(value) + if sequence_values is None: + return + for candidate_raw in sequence_values: + candidate = _as_string_key_dict(candidate_raw) + if candidate is not None: + yield candidate + + +def _extract_non_null_json_schema_type(schema: Mapping[str, Any]) -> str | None: + raw_type = schema.get('type') + if isinstance(raw_type, str): + return raw_type + + raw_type_values = _as_non_string_sequence(raw_type) + if raw_type_values is not None: + non_null_types = [item for item in raw_type_values if isinstance(item, str) and item != 'null'] + if len(non_null_types) == 1: + return non_null_types[0] + + return None + + +def _json_schema_allows_null(schema: Mapping[str, Any]) -> bool: + raw_type = schema.get('type') + if raw_type == 'null': + return True + raw_type_values = _as_non_string_sequence(raw_type) + if raw_type_values is not None: + if any(item == 'null' for item in raw_type_values): + return True + + for candidate in _iter_string_key_dicts(schema.get('anyOf')): + if candidate.get('type') == 'null': + return True + return False + + +def _nullable_type(resolved_type: Any, *, nullable: bool) -> Any: + if not nullable or resolved_type is type(None): + return resolved_type + return resolved_type | None + + +def normalize_result_dict(value: Any) -> dict[str, Any]: + """Return a dict with only string keys from an arbitrary mapping-like value.""" + return _as_string_key_dict(value) or {} + + +def _json_schema_primitive_type(schema: dict[str, Any]) -> type[Any] | None: + """Map simple JSON Schema primitive types to Python runtime types.""" + schema_type = _extract_non_null_json_schema_type(schema) + return PRIMITIVE_TYPE_MAPPING.get(schema_type) if schema_type is not None else None + + +def _json_schema_identifier(schema: dict[str, Any]) -> str | None: + schema_type = _extract_non_null_json_schema_type(schema) + return IDENTIFIER_NORMALIZATION.get(schema_type) if schema_type is not None else None + + +def get_field_params_from_field_schema(field_schema: dict[str, Any]) -> dict[str, Any]: + """Gets Pydantic field parameters from a JSON schema field.""" + field_params: dict[str, Any] = {} + for constraint, constraint_value in CONSTRAINT_MAPPING.items(): + if constraint in field_schema: + field_params[constraint_value] = field_schema[constraint] + if 'description' in field_schema: + field_params['description'] = field_schema['description'] + if 'default' in field_schema: + field_params['default'] = field_schema['default'] + return field_params + + +def _json_schema_ref_name(schema: Mapping[str, Any]) -> str | None: + raw_ref = schema.get('$ref') + if raw_ref is None: + return None + reference = str(raw_ref).strip() + if not reference: + return None + return reference.split('/')[-1] + + +def _build_model_fields_from_schema( + schema: Mapping[str, Any], + *, + resolve_field_type: Callable[[dict[str, Any]], Any], +) -> dict[str, FieldDefinition]: + fields: dict[str, FieldDefinition] = {} + properties = _as_string_key_dict(schema.get('properties')) + if properties is None: + return fields + required_raw = schema.get('required') + required_fields: set[str] = set() + required_values = _as_non_string_sequence(required_raw) + if required_values is not None: + required_fields = {name for name in required_values if isinstance(name, str)} + + for field_name, field_schema_raw in properties.items(): + field_schema = _as_string_key_dict(field_schema_raw) + if field_schema is None: + continue + field_type = resolve_field_type(field_schema) + field_params = get_field_params_from_field_schema(field_schema=field_schema) + field_name_str = str(field_name) + is_required = field_name_str in required_fields + has_default = 'default' in field_params + if not is_required and not has_default: + relaxed_type = _nullable_type(field_type, nullable=True) + fields[field_name_str] = (relaxed_type, Field(default=None, **field_params)) + else: + fields[field_name_str] = (field_type, Field(**field_params)) + + return fields + + +def _create_dynamic_model( + *, + model_name: str, + model_schema: Mapping[str, Any], + fields: Mapping[str, FieldDefinition], +) -> type[BaseModel]: + field_definitions: dict[str, Any] = dict(fields) + return create_model( + model_name, + __doc__=str(model_schema.get('description', '')), + **field_definitions, + ) + + +def pydantic_model_from_json_schema(result_type: Any) -> Any: + """Reconstruct runtime types from JSON Schema when possible.""" + if not isinstance(result_type, dict): + return result_type + normalized_schema = normalize_result_dict(result_type) + definitions = _as_string_key_dict(normalized_schema.get('$defs')) or {} + models: dict[str, type[BaseModel]] = {} + model_build_stack: set[str] = set() + + def _combine_union_types(resolved_types: list[Any], *, nullable: bool) -> Any: + if not resolved_types: + return _nullable_type(Any, nullable=nullable) + combined = resolved_types[0] + for candidate_type in resolved_types[1:]: + combined = combined | candidate_type + return _nullable_type(combined, nullable=nullable) + + def _resolve_ref_model(model_reference: str) -> Any: + if model_reference in models: + return models[model_reference] + if model_reference in model_build_stack: + return Any + model_schema_raw = definitions.get(model_reference) + model_schema = _as_string_key_dict(model_schema_raw) + if model_schema is None: + return Any + + model_build_stack.add(model_reference) + try: + dynamic_model = _create_dynamic_model( + model_name=model_reference, + model_schema=model_schema, + fields=_build_model_fields_from_schema( + model_schema, + resolve_field_type=_resolve_schema, + ), + ) + models[model_reference] = dynamic_model + return dynamic_model + finally: + model_build_stack.remove(model_reference) + + def _resolve_array_schema(schema: dict[str, Any], *, nullable: bool) -> Any: + prefix_items_raw = schema.get('prefixItems') + prefix_items = _as_non_string_sequence(prefix_items_raw) + if prefix_items is not None: + tuple_items = [_resolve_schema(item) for item in prefix_items] + if tuple_items: + resolved_tuple = tuple.__class_getitem__(tuple(tuple_items)) + return _nullable_type(resolved_tuple, nullable=nullable) + + items_schema = _as_string_key_dict(schema.get('items')) + if items_schema is None: + return _nullable_type(list[Any], nullable=nullable) + item_type = _resolve_schema(items_schema) + if schema.get('uniqueItems') is True: + return _nullable_type(set[item_type], nullable=nullable) + return _nullable_type(list[item_type], nullable=nullable) + + def _resolve_object_schema(schema: dict[str, Any], *, nullable: bool) -> Any: + properties = _as_string_key_dict(schema.get('properties')) + if properties: + dynamic_model = _create_dynamic_model( + model_name=str(schema.get('title', 'InlineObject')), + model_schema=schema, + fields=_build_model_fields_from_schema( + schema, + resolve_field_type=_resolve_schema, + ), + ) + return _nullable_type(dynamic_model, nullable=nullable) + + additional_properties = _as_string_key_dict(schema.get('additionalProperties')) + if additional_properties is not None: + value_type = _resolve_schema(additional_properties) + return _nullable_type(dict[str, value_type], nullable=nullable) + return _nullable_type(dict[str, Any], nullable=nullable) + + def _resolve_schema(schema_raw: Any) -> Any: + schema = normalize_result_dict(schema_raw) + if not schema: + return Any + + allows_null = _json_schema_allows_null(schema) + model_reference = _json_schema_ref_name(schema) + if model_reference is not None: + return _nullable_type(_resolve_ref_model(model_reference), nullable=allows_null) + + primitive_type = _json_schema_primitive_type(schema) + if primitive_type is not None: + return _nullable_type(primitive_type, nullable=allows_null) + + any_of_candidates = _as_non_string_sequence(schema.get('anyOf')) + if any_of_candidates is not None: + resolved_types: list[Any] = [] + includes_null = allows_null + for candidate in _iter_string_key_dicts(any_of_candidates): + if candidate.get('type') == 'null': + includes_null = True + continue + resolved_types.append(_resolve_schema(candidate)) + return _combine_union_types(resolved_types, nullable=includes_null) + + schema_type = _extract_non_null_json_schema_type(schema) + if schema_type == 'null': + return type(None) + if schema_type == 'array': + return _resolve_array_schema(schema, nullable=allows_null) + if schema_type == 'object': + return _resolve_object_schema(schema, nullable=allows_null) + if isinstance(schema_type, str) and schema_type in TYPE_MAPPING: + return _nullable_type(TYPE_MAPPING[schema_type], nullable=allows_null) + return _nullable_type(Any, nullable=allows_null) + + for model_name in definitions: + _resolve_ref_model(model_name) + return _resolve_schema(normalized_schema) + + +def pydantic_model_to_json_schema(result_type: Any) -> dict[str, Any] | None: + """Best-effort conversion of a Python result schema/type into JSON Schema.""" + if result_type is None: + return None + if isinstance(result_type, dict): + schema = dict(cast(dict[str, Any], result_type)) + schema.setdefault('$schema', JSON_SCHEMA_DRAFT) + return schema + if isinstance(result_type, str): + return None + + try: + if inspect.isclass(result_type) and issubclass(result_type, BaseModel): + schema = result_type.model_json_schema() + schema.setdefault('$schema', JSON_SCHEMA_DRAFT) + return schema + except TypeError: + pass + + try: + schema = TypeAdapter(result_type).json_schema() + normalized_schema = normalize_result_dict(schema) + normalized_schema.setdefault('$schema', JSON_SCHEMA_DRAFT) + return normalized_schema + except Exception: + return None + + +def result_type_identifier_from_schema(result_type: Any) -> str | None: + if result_type is None: + return None + if isinstance(result_type, str): + return result_type + if isinstance(result_type, dict): + return _json_schema_identifier(normalize_result_dict(result_type)) + + if result_type is str: + return 'string' + if result_type in (int, float): + return 'number' + if result_type is bool: + return 'boolean' + + derived_schema = pydantic_model_to_json_schema(result_type) + if isinstance(derived_schema, dict): + return _json_schema_identifier(derived_schema) + return None + + +def validate_result_against_type(result_type: Any, result: Any) -> Any: + if result_type is None: + return result + + if isinstance(result_type, dict): + result_type = pydantic_model_from_json_schema(result_type) + + if inspect.isclass(result_type) and issubclass(result_type, BaseModel): + return result_type.model_validate(result) + + adapter = _get_cached_type_adapter(result_type) + return adapter.validate_python(result) + + +__all__ = [ + 'get_field_params_from_field_schema', + 'normalize_result_dict', + 'pydantic_model_from_json_schema', + 'pydantic_model_to_json_schema', + 'result_type_identifier_from_schema', + 'validate_result_against_type', +] diff --git a/bubus/lock_manager.py b/bubus/lock_manager.py new file mode 100644 index 0000000..695fdd9 --- /dev/null +++ b/bubus/lock_manager.py @@ -0,0 +1,231 @@ +import asyncio +import contextvars +from contextlib import asynccontextmanager, contextmanager +from contextvars import ContextVar +from typing import TYPE_CHECKING, Any, Protocol, TypeVar + +from bubus.base_event import BaseEvent, EventConcurrencyMode, EventHandlerConcurrencyMode, EventResult + +if TYPE_CHECKING: + from bubus.event_bus import EventBus + +T_EventResultType = TypeVar('T_EventResultType') + + +class LockManagerProtocol(Protocol): + """Minimal lock API required by EventBus runtime execution paths.""" + + def get_lock_for_event(self, bus: 'EventBus', event: BaseEvent[T_EventResultType]) -> 'ReentrantLock | None': + """Return the concrete event-level lock object or ``None`` for parallel mode.""" + ... + + def get_lock_for_event_handler( + self, + bus: 'EventBus', + event: BaseEvent[T_EventResultType], + event_result: EventResult[T_EventResultType], + ) -> 'ReentrantLock | None': + """Return the concrete handler-level lock object or ``None`` for parallel mode.""" + ... + + def _run_with_event_lock(self, bus: 'EventBus', event: BaseEvent[T_EventResultType]) -> Any: + """Context manager for event-level lock scope.""" + ... + + def _run_with_handler_lock( + self, bus: 'EventBus', event: BaseEvent[T_EventResultType], event_result: EventResult[T_EventResultType] + ) -> Any: + """Context manager for per-handler lock scope.""" + ... + + def _run_with_handler_dispatch_context(self, bus: 'EventBus', event: BaseEvent[T_EventResultType]) -> Any: + """Context manager that mirrors held event-lock state into dispatch context.""" + ... + + +class ReentrantLock: + """Context-aware re-entrant lock over an asyncio semaphore. + + Lifecycle: + 1. `__aenter__` acquires the semaphore when this context does not already hold + the lock id. + 2. Nested entries in the same context only bump the local depth counter. + 3. `__aexit__` decrements depth and releases semaphore at depth zero. + """ + + # Context variable storing lock-id -> re-entrant depth for the current async context. + _held_lock_depths: ContextVar[dict[int, int]] = ContextVar('held_lock_depths', default={}) + + def __init__(self): + self._semaphore: asyncio.Semaphore | None = None + self._loop: asyncio.AbstractEventLoop | None = None + self._lock_id = id(self) + + def _get_semaphore(self) -> asyncio.Semaphore: + """Get or create the semaphore for the current event loop.""" + current_loop = asyncio.get_running_loop() + if self._semaphore is None or self._loop != current_loop: + # Create new semaphore for this event loop + self._semaphore = asyncio.Semaphore(1) + self._loop = current_loop + return self._semaphore + + def _depth(self) -> int: + return ReentrantLock._held_lock_depths.get().get(self._lock_id, 0) + + def _set_depth(self, depth: int) -> None: + current = ReentrantLock._held_lock_depths.get() + updated = dict(current) + if depth <= 0: + updated.pop(self._lock_id, None) + else: + updated[self._lock_id] = depth + ReentrantLock._held_lock_depths.set(updated) + + def mark_held_in_current_context(self) -> contextvars.Token[dict[int, int]]: + """Temporarily mark this lock as already held in the current context. + + Used when a handler runs in a copied dispatch context and needs re-entrant + lock behavior to match the parent processing context. + """ + current = ReentrantLock._held_lock_depths.get() + updated = dict(current) + updated[self._lock_id] = updated.get(self._lock_id, 0) + 1 + return ReentrantLock._held_lock_depths.set(updated) + + @staticmethod + def reset_context_mark(token: contextvars.Token[dict[int, int]]) -> None: + """Undo a prior `mark_held_in_current_context` update.""" + ReentrantLock._held_lock_depths.reset(token) + + async def __aenter__(self): + depth = self._depth() + if depth > 0: + self._set_depth(depth + 1) + return self + + # Acquire the lock + await self._get_semaphore().acquire() + self._set_depth(1) + return self + + async def __aexit__(self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: Any) -> None: + depth = self._depth() + if depth <= 0: + return + + next_depth = depth - 1 + self._set_depth(next_depth) + if next_depth == 0: + self._get_semaphore().release() + + def locked(self) -> bool: + """Check if the lock is currently held.""" + # If semaphore doesn't exist yet or is from a different loop, it's not locked + try: + current_loop = asyncio.get_running_loop() + if self._semaphore is None or self._loop != current_loop: + return False + return self._semaphore.locked() + except RuntimeError: + # No running loop, can't check + return False + + +class LockManager: + """Centralized lock/semaphore policy for event and handler execution. + + This manager owns lock resolution and all lock mutations. `EventBus` and + handlers should use only these APIs instead of touching lock objects directly. + """ + + def get_lock_for_event(self, bus: 'EventBus', event: BaseEvent[T_EventResultType]) -> ReentrantLock | None: + """Resolve the event-level lock for one event execution. + + Lifecycle: + - Called before processing an event (runloop, step, queue-jump). + - Returns `None` for `'parallel'`, so no lock is acquired. + - Returns the shared class lock for `'global-serial'`. + - Returns `bus.event_bus_serial_lock` for `'bus-serial'`. + """ + resolved = event.event_concurrency or bus.event_concurrency + if resolved == EventConcurrencyMode.PARALLEL: + return None + if resolved == EventConcurrencyMode.GLOBAL_SERIAL: + return bus.event_global_serial_lock + return bus.event_bus_serial_lock + + def get_lock_for_event_handler( + self, + bus: 'EventBus', + event: BaseEvent[T_EventResultType], + event_result: EventResult[T_EventResultType], + ) -> ReentrantLock | None: + """Resolve the per-event handler lock for one handler execution. + + Lifecycle: + - Called inside `EventBus._run_handler` before running a handler. + - Returns `None` for `'parallel'` handler mode. + - Returns and lazily initializes the event handler lock for `'serial'`. + """ + del event_result # reserved for future mode-specific rules + resolved = event.event_handler_concurrency or bus.event_handler_concurrency + if resolved == EventHandlerConcurrencyMode.PARALLEL: + return None + current_lock = event._get_handler_lock() # pyright: ignore[reportPrivateUsage] + if current_lock is None: + current_lock = ReentrantLock() + event._set_handler_lock(current_lock) # pyright: ignore[reportPrivateUsage] + return current_lock + + @asynccontextmanager + async def _run_with_event_lock(self, bus: 'EventBus', event: BaseEvent[T_EventResultType]): + """Acquire/release the resolved event lock around event processing. + + Lifecycle: + - Wraps event processing in runloop and manual `step()`. + - No-op for `'parallel'` events. + """ + lock = self.get_lock_for_event(bus, event) + if lock is None: + yield + return + async with lock: + yield + + @asynccontextmanager + async def _run_with_handler_lock( + self, bus: 'EventBus', event: BaseEvent[T_EventResultType], event_result: EventResult[T_EventResultType] + ): + """Acquire/release the resolved per-event handler lock around one handler run. + + Lifecycle: + - Used directly inside `EventResult.run_handler(...)`. + - No-op for `'parallel'` handler mode. + """ + lock = self.get_lock_for_event_handler(bus, event, event_result) + if lock is None: + yield + return + async with lock: + yield + + @contextmanager + def _run_with_handler_dispatch_context(self, bus: 'EventBus', event: BaseEvent[T_EventResultType]): + """Mirror parent event-lock ownership into the current copied context. + + Lifecycle: + - Used only by `EventResult.run_handler` when running handlers inside a copied + dispatch context (`context=dispatch_context`). + - Marks the resolved event lock as held in this context without acquiring + the semaphore, enabling safe re-entry for awaited child events. + """ + lock = self.get_lock_for_event(bus, event) + if lock is None: + yield + return + token = lock.mark_held_in_current_context() + try: + yield + finally: + ReentrantLock.reset_context_mark(token) diff --git a/bubus/logging.py b/bubus/logging.py index b1b3814..42a8bc6 100644 --- a/bubus/logging.py +++ b/bubus/logging.py @@ -1,21 +1,26 @@ """Helper functions for logging event trees and formatting""" import asyncio +import logging import math from collections import defaultdict from datetime import UTC, datetime -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING, Any, cast if TYPE_CHECKING: - from bubus.models import BaseEvent, EventResult - from bubus.service import EventBus + from bubus.base_event import BaseEvent, EventResult + from bubus.event_bus import EventBus -def format_timestamp(dt: datetime | None) -> str: - """Format a datetime for display""" +def format_timestamp(dt: str | datetime | None) -> str: + """Format an ISO datetime string (or datetime) for display.""" if dt is None: return 'N/A' - return dt.strftime('%H:%M:%S.%f')[:-3] # Show time with milliseconds + if isinstance(dt, str): + parsed = datetime.fromisoformat(dt) + else: + parsed = dt + return parsed.strftime('%H:%M:%S.%f')[:-3] def format_result_value(value: Any) -> str: @@ -27,9 +32,11 @@ def format_result_value(value: Any) -> str: if isinstance(value, (str, int, float, bool)): return repr(value) if isinstance(value, dict): - return f'dict({len(value)} items)' # type: ignore[arg-type] + value_dict = cast(dict[Any, Any], value) + return f'dict({len(value_dict)} items)' if isinstance(value, list): - return f'list({len(value)} items)' # type: ignore[arg-type] + value_list = cast(list[Any], value) + return f'list({len(value_list)} items)' return f'{type(value).__name__}(...)' @@ -37,27 +44,27 @@ def log_event_tree( event: 'BaseEvent[Any]', indent: str = '', is_last: bool = True, - child_events_by_parent: dict[str | None, list['BaseEvent[Any]']] | None = None, + event_children_by_parent: dict[str | None, list['BaseEvent[Any]']] | None = None, ) -> str: - from bubus.models import logger + from bubus.base_event import logger """Print this event and its results with proper tree formatting""" # Determine the connector connector = '└── ' if is_last else 'β”œβ”€β”€ ' # Print this event's line - status_icon = 'βœ…' if event.event_status == 'completed' else 'πŸƒ' if event.event_status == 'started' else '⏳' - # Format timing info timing_str = f'[{format_timestamp(event.event_created_at)}' if event.event_completed_at and event.event_created_at: - duration = (event.event_completed_at - event.event_created_at).total_seconds() + completed_dt = datetime.fromisoformat(event.event_completed_at) + created_dt = datetime.fromisoformat(event.event_created_at) + duration = (completed_dt - created_dt).total_seconds() timing_str += f' ({duration:.3f}s)' timing_str += ']' lines: list[str] = [] - event_line = f'{indent}{connector}{status_icon} {event.event_type}#{event.event_id[-4:]} {timing_str}' + event_line = f'{indent}{connector}{event.event_type}#{event.event_id[-4:]} {timing_str}' logger.warning(event_line) lines.append(event_line) @@ -70,12 +77,12 @@ def log_event_tree( # Print each result if event.event_results: - results_sorted = sorted(event.event_results.items(), key=lambda x: x[1].started_at or datetime.min.replace(tzinfo=UTC)) + results_sorted = sorted(event.event_results.items(), key=lambda x: x[1].started_at or '') # Calculate which is the last item considering both results and unmapped children - unmapped_children: list['BaseEvent[Any]'] = [] - if child_events_by_parent: - all_children = child_events_by_parent.get(event.event_id, []) + unmapped_children: list[BaseEvent[Any]] = [] + if event_children_by_parent: + all_children = event_children_by_parent.get(event.event_id, []) for child in all_children: # Will be printed later if not already printed by a handler if child.event_id not in [c.event_id for r in event.event_results.values() for c in r.event_children]: @@ -85,31 +92,31 @@ def log_event_tree( for i, (_handler_id, result) in enumerate(results_sorted): is_last_item = i == total_items - 1 - lines.append(log_eventresult_tree(result, new_indent, is_last_item, child_events_by_parent)) + lines.append(log_event_result_tree(result, new_indent, is_last_item, event_children_by_parent)) # Track child events printed by this result for child in result.event_children: printed_child_ids.add(child.event_id) # Print unmapped children (those not printed by any handler) - if child_events_by_parent: - children = child_events_by_parent.get(event.event_id, []) + if event_children_by_parent: + children = event_children_by_parent.get(event.event_id, []) for i, child in enumerate(children): if child.event_id not in printed_child_ids: is_last_child = i == len(children) - 1 - lines.append(log_event_tree(child, new_indent, is_last_child, child_events_by_parent)) + lines.append(log_event_tree(child, new_indent, is_last_child, event_children_by_parent)) return '\n'.join(lines) -def log_eventresult_tree( +def log_event_result_tree( result: 'EventResult[Any]', indent: str = '', is_last: bool = True, - child_events_by_parent: dict[str | None, list['BaseEvent[Any]']] | None = None, + event_children_by_parent: dict[str | None, list['BaseEvent[Any]']] | None = None, ) -> str: """Print this result and its child events with proper tree formatting""" - from bubus.models import logger + from bubus.base_event import logger # Determine the connector connector = '└── ' if is_last else 'β”œβ”€β”€ ' @@ -126,7 +133,7 @@ def log_eventresult_tree( ) # Format handler name with bus info - handler_display = f'{result.eventbus_name}.{result.handler_name}#{result.handler_id[-4:]}' + handler_display = f'{result.eventbus_label}.{result.handler.label}' # Format the result line result_line = f'{indent}{connector}{result_icon} {handler_display}' @@ -135,7 +142,9 @@ def log_eventresult_tree( if result.started_at: result_line += f' [{format_timestamp(result.started_at)}' if result.completed_at: - duration = (result.completed_at - result.started_at).total_seconds() + completed_dt = datetime.fromisoformat(result.completed_at) + started_dt = datetime.fromisoformat(result.started_at) + duration = (completed_dt - started_dt).total_seconds() result_line += f' ({duration:.3f}s)' result_line += ']' @@ -158,7 +167,7 @@ def log_eventresult_tree( if result.event_children: for i, child in enumerate(result.event_children): is_last_child = i == len(result.event_children) - 1 - lines.append(log_event_tree(child, new_indent, is_last_child, child_events_by_parent)) + lines.append(log_event_tree(child, new_indent, is_last_child, event_children_by_parent)) return '\n'.join(lines) @@ -166,10 +175,10 @@ def log_eventresult_tree( def log_eventbus_tree(eventbus: 'EventBus') -> str: """Print a nice pretty formatted tree view of all events in the history including their results and child events recursively""" - from bubus.models import logger + from bubus.base_event import logger # Build a mapping of parent_id to child events - parent_to_children: dict[str | None, list['BaseEvent[Any]']] = defaultdict(list) + parent_to_children: dict[str | None, list[BaseEvent[Any]]] = defaultdict(list) for event in eventbus.event_history.values(): parent_to_children[event.event_parent_id].append(event) @@ -211,17 +220,22 @@ def log_eventbus_tree(eventbus: 'EventBus') -> str: def log_timeout_tree(event: 'BaseEvent[Any]', timed_out_result: 'EventResult[Any]') -> None: """Log detailed timeout information showing the event tree and which handler timed out""" - from bubus.models import logger + from bubus.base_event import logger + from bubus.event_bus import EventBus + + if not logger.isEnabledFor(logging.WARNING): + return now = datetime.now(UTC) # Find the root event by walking up the parent chain root_event = event - eventbus = event.event_bus - while root_event.event_parent_id: + visited_parent_ids: set[str] = set() + while root_event.event_parent_id and root_event.event_parent_id not in visited_parent_ids: + visited_parent_ids.add(root_event.event_parent_id) parent_found = False # Search for parent in all EventBus instances - for bus in list(eventbus.all_instances): + for bus in list(EventBus.all_instances): if root_event.event_parent_id in bus.event_history: root_event = bus.event_history[root_event.event_parent_id] parent_found = True @@ -237,7 +251,7 @@ def log_timeout_tree(event: 'BaseEvent[Any]', timed_out_result: 'EventResult[Any logger.warning('=' * 80) logger.warning( - f'⏱️ TIMEOUT ERROR - Handling took more than {event.event_timeout}s for {timed_out_result.eventbus_name}.{timed_out_result.handler_name}({event})' + f'⏱️ TIMEOUT ERROR - Handling took more than {event.event_timeout}s for {timed_out_result.eventbus_label}.{timed_out_result.handler_name}({event})' ) logger.warning('=' * 80) @@ -246,8 +260,8 @@ def print_handler_line( handler_name: str, event_id_suffix: str, status: str = 'pending', - started_at: datetime | None = None, - completed_at: datetime | None = None, + started_at: str | None = None, + completed_at: str | None = None, timeout: float | None = None, is_expired: bool = False, is_interrupted: bool = False, @@ -280,7 +294,9 @@ def print_handler_line( # Col 5-10: timing info max_time = timeout or 0 if started_at: - elapsed_time = ((completed_at or now) - started_at).total_seconds() + started_at_dt = datetime.fromisoformat(started_at) + completed_at_dt = datetime.fromisoformat(completed_at) if completed_at else now + elapsed_time = (completed_at_dt - started_at_dt).total_seconds() if is_expired or (elapsed_time >= max_time): col5_timing_icon = 'βŒ›οΈ' @@ -340,7 +356,8 @@ def print_event_tree(evt: 'BaseEvent[Any]', indent: str = ''): or evt.event_created_at ) now = datetime.now(UTC) - elapsed = round((now - event_start_time).total_seconds()) + event_start_time_dt = datetime.fromisoformat(event_start_time) + elapsed = round((now - event_start_time_dt).total_seconds()) # Event line formatted with proper columns # Col 1: indent, Col 2: icon (πŸ“£), Col 3: description @@ -415,27 +432,27 @@ def print_event_tree(evt: 'BaseEvent[Any]', indent: str = ''): # After showing all handlers that ran, show any registered handlers that never started # This is for handlers that were registered but didn't get to run due to timeouts - from bubus.models import get_handler_id, get_handler_name - # Find which EventBus contains this event event_bus = None - for bus in list(eventbus.all_instances): + for bus in list(EventBus.all_instances): if evt.event_id in bus.event_history: event_bus = bus break - # Get all registered handlers for this event type - if event_bus and hasattr(event_bus, 'handlers') and evt.event_type in event_bus.handlers: - registered_handlers = event_bus.handlers[evt.event_type] + # Get all registered handlers that could match this event_type. + if event_bus is not None: + indexed_ids = list(event_bus.handlers_by_key.get(evt.event_type, [])) + list(event_bus.handlers_by_key.get('*', [])) - for handler in registered_handlers: - handler_id = get_handler_id(handler, event_bus) + for handler_id in indexed_ids: + entry = event_bus.handlers.get(handler_id) + if entry is None: + continue # Check if this handler already ran (has an EventResult) if handler_id not in evt.event_results: # This handler was registered but never started - use helper to format print_handler_line( handler_indent=handler_indent, - handler_name=get_handler_name(handler), + handler_name=entry.handler_name, event_id_suffix=evt.event_id[-4:], status='pending', # Will show πŸ”² icon started_at=None, diff --git a/bubus/middlewares.py b/bubus/middlewares.py new file mode 100644 index 0000000..9582692 --- /dev/null +++ b/bubus/middlewares.py @@ -0,0 +1,565 @@ +"""Reusable EventBus middleware helpers.""" + +from __future__ import annotations + +import asyncio +import importlib +import logging +import sqlite3 +import threading +from pathlib import Path +from typing import TYPE_CHECKING, Any, Self + +from bubus.base_event import BaseEvent, EventResult, EventStatus +from bubus.event_handler import EventHandler +from bubus.logging import log_eventbus_tree + +if TYPE_CHECKING: + from bubus.event_bus import EventBus + +__all__ = [ + 'EventBusMiddleware', + 'OtelTracingMiddleware', + 'BusHandlerRegisteredEvent', + 'BusHandlerUnregisteredEvent', + 'WALEventBusMiddleware', + 'LoggerEventBusMiddleware', + 'SQLiteHistoryMirrorMiddleware', + 'AutoErrorEventMiddleware', + 'AutoReturnEventMiddleware', + 'AutoHandlerChangeEventMiddleware', +] + +logger = logging.getLogger('bubus.middleware') +_SYNTHETIC_EVENT_SUFFIXES = ('ErrorEvent', 'ResultEvent') + + +class EventBusMiddleware: + """Hookable lifecycle interface for observing or extending EventBus execution. + + Hooks: + on_event_change(eventbus, event, status): Called on event state transitions + on_event_result_change(eventbus, event, event_result, status): Called on EventResult lifecycle transitions + on_bus_handlers_change(eventbus, handler, registered): Called when handlers are added/removed via on()/off() + + Status values for these hooks are only: + EventStatus.PENDING, EventStatus.STARTED, EventStatus.COMPLETED. + Handler failures are surfaced via ``event_result.status == 'error'`` and ``event_result.error`` + when ``status`` is ``EventStatus.COMPLETED``. + """ + + async def on_event_change(self, eventbus: EventBus, event: BaseEvent[Any], status: EventStatus) -> None: + """Called on event state transitions (pending, started, completed).""" + + async def on_event_result_change( + self, + eventbus: EventBus, + event: BaseEvent[Any], + event_result: EventResult[Any], + status: EventStatus, + ) -> None: + """Called on EventResult lifecycle transitions (pending, started, completed). + + Note: ``status`` never equals ``'error'``. Check ``event_result.status`` + and ``event_result.error`` on the completed callback to detect failures. + """ + + async def on_bus_handlers_change(self, eventbus: EventBus, handler: EventHandler, registered: bool) -> None: + """Called when handlers are added (registered=True) or removed (registered=False).""" + + +class OtelTracingMiddleware(EventBusMiddleware): + """Emit OpenTelemetry spans for events/handlers. + + Setup example (with optional Sentry export): + + ```python + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.trace.export import BatchSpanProcessor + import sentry_sdk + + provider = TracerProvider() + # provider.add_span_processor(BatchSpanProcessor(...your OTLP exporter...)) + # provider.add_span_processor(sentry_sdk.integrations.opentelemetry.SentrySpanProcessor()) # optional + trace.set_tracer_provider(provider) + + bus = EventBus(middlewares=[OtelTracingMiddleware()]) + ``` + """ + + def __init__(self, tracer: Any | None = None, trace_api: Any | None = None): + self._trace_api = trace_api + self._status_cls = None + self._status_code = None + if self._trace_api is None: + try: + self._trace_api = importlib.import_module('opentelemetry.trace') + except Exception: + self._trace_api = None + if tracer is None: + if self._trace_api is None: + raise RuntimeError( + 'OtelTracingMiddleware requires "opentelemetry-api". Install it with: pip install opentelemetry-api' + ) + tracer = self._trace_api.get_tracer('bubus.middleware.otel') + try: + status_mod = importlib.import_module('opentelemetry.trace.status') + self._status_cls = getattr(status_mod, 'Status', None) + self._status_code = getattr(status_mod, 'StatusCode', None) + except Exception: + pass + if tracer is None: + raise ImportError('OpenTelemetry tracer unavailable') + self._tracer = tracer + self._event_spans: dict[tuple[str, str], Any] = {} + self._handler_spans: dict[tuple[str, str, str], Any] = {} + + @staticmethod + def _event_key(eventbus: EventBus, event: BaseEvent[Any]) -> tuple[str, str]: + return (eventbus.id, event.event_id) + + @staticmethod + def _handler_key(eventbus: EventBus, event: BaseEvent[Any], event_result: EventResult[Any]) -> tuple[str, str, str]: + return (eventbus.id, event.event_id, event_result.handler_id) + + def _start_span(self, name: str, parent_span: Any | None = None) -> Any: + if parent_span is not None and self._trace_api is not None: + try: + return self._tracer.start_span(name, context=self._trace_api.set_span_in_context(parent_span)) + except Exception: + pass + return self._tracer.start_span(name) + + def _find_parent_span(self, event: BaseEvent[Any]) -> Any | None: + if not event.event_parent_id: + return None + from bubus.event_bus import EventBus + + for bus in list(EventBus.all_instances): + if not bus or event.event_parent_id not in bus.event_history: + continue + parent_event = bus.event_history[event.event_parent_id] + for parent_result in parent_event.event_results.values(): + if any(child.event_id == event.event_id for child in parent_result.event_children): + parent_handler_span = self._handler_spans.get((bus.id, parent_event.event_id, parent_result.handler_id)) + if parent_handler_span is not None: + return parent_handler_span + return self._event_spans.get((bus.id, parent_event.event_id)) + return None + + def _ensure_event_span(self, eventbus: EventBus, event: BaseEvent[Any]) -> Any: + key = self._event_key(eventbus, event) + existing = self._event_spans.get(key) + if existing is not None: + return existing + span = self._start_span(f'bubus.event.{event.event_type}', parent_span=self._find_parent_span(event)) + span.set_attribute('bubus.kind', 'event') + span.set_attribute('bubus.event_id', event.event_id) + span.set_attribute('bubus.event_type', event.event_type) + span.set_attribute('bubus.bus_id', eventbus.id) + span.set_attribute('bubus.bus_name', eventbus.label) + if event.event_parent_id: + span.set_attribute('bubus.event_parent_id', event.event_parent_id) + self._event_spans[key] = span + return span + + async def on_event_change(self, eventbus: EventBus, event: BaseEvent[Any], status: EventStatus) -> None: + if status == EventStatus.STARTED: + self._ensure_event_span(eventbus, event) + return + if status == EventStatus.COMPLETED: + key = self._event_key(eventbus, event) + span = self._event_spans.pop(key, None) + if span is None: + span = self._ensure_event_span(eventbus, event) + self._event_spans.pop(key, None) + span.end() + + async def on_event_result_change( + self, + eventbus: EventBus, + event: BaseEvent[Any], + event_result: EventResult[Any], + status: EventStatus, + ) -> None: + key = self._handler_key(eventbus, event, event_result) + if status == EventStatus.STARTED: + if key in self._handler_spans: + return + parent_event_span = self._ensure_event_span(eventbus, event) + span = self._start_span(f'bubus.handler.{event_result.handler_name}', parent_span=parent_event_span) + span.set_attribute('bubus.kind', 'handler') + span.set_attribute('bubus.event_id', event.event_id) + span.set_attribute('bubus.event_type', event.event_type) + span.set_attribute('bubus.handler_id', event_result.handler_id) + span.set_attribute('bubus.handler_name', event_result.handler_name) + span.set_attribute('bubus.bus_id', eventbus.id) + span.set_attribute('bubus.bus_name', eventbus.label) + self._handler_spans[key] = span + return + if status != EventStatus.COMPLETED: + return + span = self._handler_spans.pop(key, None) + if span is None: + return + error = event_result.error + if error is not None: + span.record_exception(error) + if self._status_cls and self._status_code and hasattr(span, 'set_status'): + span.set_status(self._status_cls(self._status_code.ERROR, str(error))) + span.end() + + +class BusHandlerRegisteredEvent(BaseEvent): + """Auto event emitted when a handler is added with EventBus.on().""" + + handler: EventHandler + + +class BusHandlerUnregisteredEvent(BaseEvent): + """Auto event emitted when a handler is removed with EventBus.off().""" + + handler: EventHandler + + +class AutoErrorEvent(BaseEvent): + """Auto event payload used by AutoErrorEventMiddleware.""" + + error: Any + error_type: str + + +class AutoReturnEvent(BaseEvent): + """Auto event payload used by AutoReturnEventMiddleware.""" + + data: Any + + +class AutoErrorEventMiddleware(EventBusMiddleware): + """Use in `EventBus(middlewares=[...])` to emit `{OriginalEventType}ErrorEvent` on handler failures.""" + + async def on_event_result_change( + self, + eventbus: EventBus, + event: BaseEvent[Any], + event_result: EventResult[Any], + status: EventStatus, + ) -> None: + if status != EventStatus.COMPLETED or event_result.error is None or event.event_type.endswith(_SYNTHETIC_EVENT_SUFFIXES): + return + try: + eventbus.emit( + AutoErrorEvent( + event_type=f'{event.event_type}ErrorEvent', + error=event_result.error, + error_type=type(event_result.error).__name__, + ) + ) + except Exception as exc: # pragma: no cover + logger.exception('❌ %s Failed to emit auto error event for %s: %s', eventbus, event.event_id, exc) + + +class AutoReturnEventMiddleware(EventBusMiddleware): + """Use in `EventBus(middlewares=[...])` to emit `{OriginalEventType}ResultEvent` for non-None returns.""" + + async def on_event_result_change( + self, + eventbus: EventBus, + event: BaseEvent[Any], + event_result: EventResult[Any], + status: EventStatus, + ) -> None: + result_value = event_result.result + if ( + status != EventStatus.COMPLETED + or event_result.error is not None + or result_value is None + or isinstance(result_value, BaseEvent) + or event.event_type.endswith(_SYNTHETIC_EVENT_SUFFIXES) + ): + return + try: + eventbus.emit(AutoReturnEvent(event_type=f'{event.event_type}ResultEvent', data=result_value)) + except Exception as exc: # pragma: no cover + logger.exception('❌ %s Failed to emit auto result event for %s: %s', eventbus, event.event_id, exc) + + +class AutoHandlerChangeEventMiddleware(EventBusMiddleware): + """Use in `EventBus(middlewares=[...])` to emit handler metadata events on .on() and .off().""" + + async def on_bus_handlers_change(self, eventbus: EventBus, handler: EventHandler, registered: bool) -> None: + try: + handler_snapshot = handler.model_copy(deep=False) + if registered: + eventbus.emit(BusHandlerRegisteredEvent(handler=handler_snapshot)) + else: + eventbus.emit(BusHandlerUnregisteredEvent(handler=handler_snapshot)) + except Exception as exc: # pragma: no cover + logger.exception( + '❌ %s Failed to emit auto handler change event for handler %s: %s(%r)', + eventbus, + handler.id, + type(exc).__name__, + exc, + ) + + +class WALEventBusMiddleware(EventBusMiddleware): + """Persist completed events to a JSONL write-ahead log.""" + + def __init__(self, wal_path: Path | str): + self.wal_path = Path(wal_path) + self.wal_path.parent.mkdir(parents=True, exist_ok=True) + self._lock = threading.Lock() + + async def on_event_change(self, eventbus: EventBus, event: BaseEvent[Any], status: EventStatus) -> None: + if status != EventStatus.COMPLETED: + return + try: + event_json = event.model_dump_json() + await asyncio.to_thread(self._write_line, event_json + '\n') + except Exception as exc: # pragma: no cover + logger.exception('❌ %s Failed to save event %s to WAL: %s', eventbus, event.event_id, exc) + + def _write_line(self, line: str) -> None: + with self._lock: + with self.wal_path.open('a', encoding='utf-8') as fp: + fp.write(line) + + +class LoggerEventBusMiddleware(EventBusMiddleware): + """Log completed events to stdout and optionally to a file.""" + + def __init__(self, log_path: Path | str | None = None): + self.log_path = Path(log_path) if log_path is not None else None + if self.log_path is not None: + self.log_path.parent.mkdir(parents=True, exist_ok=True) + + async def on_event_change(self, eventbus: EventBus, event: BaseEvent[Any], status: EventStatus) -> None: + if status != EventStatus.COMPLETED: + return + + summary = event.event_log_safe_summary() + logger.info('βœ… %s completed event %s', eventbus, summary) + line = f'[{eventbus.label}] {summary}\n' + + if self.log_path is not None: + await asyncio.to_thread(self._write_line, line) + print(line.rstrip('\n'), flush=True) + + if logger.isEnabledFor(logging.DEBUG): + log_eventbus_tree(eventbus) + + def _write_line(self, line: str) -> None: + assert self.log_path is not None + with self.log_path.open('a', encoding='utf-8') as fp: + fp.write(line) + + +class SQLiteHistoryMirrorMiddleware(EventBusMiddleware): + """Mirror event and handler snapshots into append-only SQLite tables.""" + + def __init__(self, db_path: Path | str): + self.db_path = Path(db_path) + self.db_path.parent.mkdir(parents=True, exist_ok=True) + + self._lock = threading.RLock() + self._conn = sqlite3.connect(self.db_path, check_same_thread=False, isolation_level=None) + self._closed = False + self._init_db() + + def close(self) -> None: + """Close the SQLite connection; safe to call multiple times.""" + with self._lock: + if self._closed: + return + self._conn.close() + self._closed = True + + def __enter__(self) -> Self: + return self + + def __exit__(self, exc_type: Any, exc: Any, tb: Any) -> bool: + self.close() + return False + + def __del__(self): + try: + self.close() + except Exception: + pass + + async def on_event_change(self, eventbus: EventBus, event: BaseEvent[Any], status: EventStatus) -> None: + event_json = event.model_dump_json() + await asyncio.to_thread( + self._insert_event_snapshot, + eventbus, + event.event_id, + event.event_type, + str(event.event_status), + str(status), + event_json, + ) + + async def on_event_result_change( + self, + eventbus: EventBus, + event: BaseEvent[Any], + event_result: EventResult[Any], + status: EventStatus, + ) -> None: + error_repr = repr(event_result.error) if event_result.error is not None else None + result_repr: str | None = None + if event_result.result is not None and event_result.error is None: + try: + result_repr = repr(event_result.result) + except Exception: + result_repr = '' + + try: + event_result_json = event_result.model_dump_json() + except Exception: + event_result_json = None + + await asyncio.to_thread( + self._insert_event_result_snapshot, + event_result.id, + event_result.event_id, + event_result.handler_id, + event_result.handler_name, + eventbus.id, + eventbus.label, + event.event_type, + event_result.status, + str(status), + result_repr, + error_repr, + event_result_json, + ) + + def _init_db(self) -> None: + with self._lock: + self._conn.execute( + """ + CREATE TABLE IF NOT EXISTS events_log ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + event_id TEXT NOT NULL, + event_type TEXT NOT NULL, + event_status TEXT NOT NULL, + eventbus_id TEXT NOT NULL, + eventbus_name TEXT NOT NULL, + phase TEXT, + event_json TEXT NOT NULL, + inserted_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ) + """ + ) + self._conn.execute( + """ + CREATE TABLE IF NOT EXISTS event_results_log ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + event_result_id TEXT NOT NULL, + event_id TEXT NOT NULL, + handler_id TEXT NOT NULL, + handler_name TEXT NOT NULL, + eventbus_id TEXT NOT NULL, + eventbus_name TEXT NOT NULL, + event_type TEXT NOT NULL, + status TEXT NOT NULL, + phase TEXT, + result_repr TEXT, + error_repr TEXT, + event_result_json TEXT, + inserted_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ) + """ + ) + self._conn.execute('PRAGMA journal_mode=WAL') + self._conn.execute('PRAGMA synchronous=NORMAL') + + def _insert_event_snapshot( + self, + eventbus: EventBus, + event_id: str, + event_type: str, + event_status: str, + phase: str | None, + event_json: str, + ) -> None: + with self._lock: + self._conn.execute( + """ + INSERT INTO events_log ( + event_id, + event_type, + event_status, + eventbus_id, + eventbus_name, + phase, + event_json + ) + VALUES (?, ?, ?, ?, ?, ?, ?) + """, + ( + event_id, + event_type, + event_status, + eventbus.id, + eventbus.label, + phase, + event_json, + ), + ) + self._conn.commit() + + def _insert_event_result_snapshot( + self, + event_result_id: str, + event_id: str, + handler_id: str, + handler_name: str, + eventbus_id: str, + eventbus_name: str, + event_type: str, + status: str, + phase: str | None, + result_repr: str | None, + error_repr: str | None, + event_result_json: str | None, + ) -> None: + with self._lock: + self._conn.execute( + """ + INSERT INTO event_results_log ( + event_result_id, + event_id, + handler_id, + handler_name, + eventbus_id, + eventbus_name, + event_type, + status, + phase, + result_repr, + error_repr, + event_result_json + ) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + """, + ( + event_result_id, + event_id, + handler_id, + handler_name, + eventbus_id, + eventbus_name, + event_type, + status, + phase, + result_repr, + error_repr, + event_result_json, + ), + ) + self._conn.commit() diff --git a/bubus/models.py b/bubus/models.py deleted file mode 100644 index 4079e49..0000000 --- a/bubus/models.py +++ /dev/null @@ -1,972 +0,0 @@ -import asyncio -import inspect -import logging -import os -from collections.abc import Awaitable, Callable, Generator -from datetime import UTC, datetime -from typing import TYPE_CHECKING, Annotated, Any, ClassVar, Generic, Literal, Protocol, Self, TypeAlias, cast, runtime_checkable -from uuid import UUID - -from pydantic import ( - AfterValidator, - BaseModel, - ConfigDict, - Field, - PrivateAttr, - TypeAdapter, - field_serializer, - model_validator, -) -from typing_extensions import TypeVar # needed to get TypeVar(default=...) above python 3.11 -from uuid_extensions import uuid7str - -if TYPE_CHECKING: - from bubus.service import EventBus - - -logger = logging.getLogger('bubus') - -BUBUS_LOGGING_LEVEL = os.getenv('BUBUS_LOGGING_LEVEL', 'WARNING').upper() # WARNING normally, otherwise DEBUG when testing -LIBRARY_VERSION = os.getenv('LIBRARY_VERSION', '1.0.0') - -logger.setLevel(BUBUS_LOGGING_LEVEL) - - -def validate_event_name(s: str) -> str: - assert str(s).isidentifier() and not str(s).startswith('_'), f'Invalid event name: {s}' - return str(s) - - -def validate_python_id_str(s: str) -> str: - assert str(s).replace('.', '').isdigit(), f'Invalid Python ID: {s}' - return str(s) - - -def validate_uuid_str(s: str) -> str: - uuid = UUID(str(s)) - return str(uuid) - - -UUIDStr: TypeAlias = Annotated[str, AfterValidator(validate_uuid_str)] -PythonIdStr: TypeAlias = Annotated[str, AfterValidator(validate_python_id_str)] -PythonIdentifierStr: TypeAlias = Annotated[str, AfterValidator(validate_event_name)] -T_EventResultType = TypeVar('T_EventResultType', bound=Any, default=None) -# TypeVar for BaseEvent and its subclasses -# We use contravariant=True because if a handler accepts BaseEvent, -# it can also handle any subclass of BaseEvent -T_Event = TypeVar('T_Event', bound='BaseEvent[Any]', contravariant=True, default='BaseEvent[Any]') - -# For protocols with __func__ attributes, we need an invariant TypeVar -T_EventInvariant = TypeVar('T_EventInvariant', bound='BaseEvent[Any]', default='BaseEvent[Any]') - -# For handlers, we need to be flexible about the signature since: -# 1. Functions take just the event: handler(event) -# 2. Methods take self + event: handler(self, event) -# 3. Classmethods take cls + event: handler(cls, event) -# 4. Handlers can accept BaseEvent subclasses (contravariance) -# -# Python's type system doesn't handle this well, so we define specific protocols - - -@runtime_checkable -class EventHandlerFunc(Protocol[T_Event]): - """Protocol for sync event handler functions""" - - def __call__(self, event: T_Event, /) -> Any: ... - - -@runtime_checkable -class AsyncEventHandlerFunc(Protocol[T_Event]): - """Protocol for async event handler functions""" - - async def __call__(self, event: T_Event, /) -> Any: ... - - -@runtime_checkable -class EventHandlerMethod(Protocol[T_Event]): - """Protocol for instance method event handlers""" - - def __call__(self, self_: Any, event: T_Event, /) -> Any: ... - - __self__: Any - __name__: str - - -@runtime_checkable -class AsyncEventHandlerMethod(Protocol[T_Event]): - """Protocol for async instance method event handlers""" - - async def __call__(self, self_: Any, event: T_Event, /) -> Any: ... - - __self__: Any - __name__: str - - -@runtime_checkable -class EventHandlerClassMethod(Protocol[T_EventInvariant]): - """Protocol for class method event handlers""" - - def __call__(self, cls: type[Any], event: T_EventInvariant, /) -> Any: ... - - __self__: type[Any] - __name__: str - __func__: Callable[[type[Any], T_EventInvariant], Any] - - -@runtime_checkable -class AsyncEventHandlerClassMethod(Protocol[T_EventInvariant]): - """Protocol for async class method event handlers""" - - async def __call__(self, cls: type[Any], event: T_EventInvariant, /) -> Any: ... - - __self__: type[Any] - __name__: str - __func__: Callable[[type[Any], T_EventInvariant], Awaitable[Any]] - - -# Event handlers can be sync/async functions, methods, class methods, or coroutines -# The protocols are parameterized with BaseEvent but due to contravariance, -# they also accept handlers that take any BaseEvent subclass -EventHandler: TypeAlias = ( - EventHandlerFunc['BaseEvent[Any]'] - | AsyncEventHandlerFunc['BaseEvent[Any]'] - | EventHandlerMethod['BaseEvent[Any]'] - | AsyncEventHandlerMethod['BaseEvent[Any]'] - | EventHandlerClassMethod['BaseEvent[Any]'] - | AsyncEventHandlerClassMethod['BaseEvent[Any]'] - # | Callable[['BaseEvent'], Any] # Simple sync callable - # | Callable[['BaseEvent'], Awaitable[Any]] # Simple async callable - # | Coroutine[Any, Any, Any] # Direct coroutine -) - -# ContravariantEventHandler is needed to allow handlers to accept any BaseEvent subclass in some signatures -ContravariantEventHandler: TypeAlias = ( - EventHandlerFunc[T_Event] # cannot be BaseEvent or type checker will complain - | AsyncEventHandlerFunc['BaseEvent[Any]'] - | EventHandlerMethod['BaseEvent[Any]'] - | AsyncEventHandlerMethod[T_Event] # cannot be 'BaseEvent' or type checker will complain - | EventHandlerClassMethod['BaseEvent[Any]'] - | AsyncEventHandlerClassMethod['BaseEvent[Any]'] -) - -EventResultFilter = Callable[['EventResult[Any]'], bool] - - -def get_handler_name(handler: ContravariantEventHandler[T_Event]) -> str: - assert hasattr(handler, '__name__'), f'Handler {handler} has no __name__ attribute!' - if inspect.ismethod(handler): - return f'{type(handler.__self__).__name__}.{handler.__name__}' - elif callable(handler): - return f'{handler.__module__}.{handler.__name__}' # type: ignore - else: - raise ValueError(f'Invalid handler: {handler} {type(handler)}, expected a function, coroutine, or method') - - -def get_handler_id(handler: EventHandler, eventbus: Any = None) -> str: - """Generate a unique handler ID based on the bus and handler instance.""" - if eventbus is None: - return str(id(handler)) - return f'{id(eventbus)}.{id(handler)}' - - -def _extract_basemodel_generic_arg(cls: type) -> Any: - """ - Extract T_EventResultType Generic arg from BaseModel[T_EventResultType] subclasses using pydantic generic metadata. - Needed because pydantic messes with the mro and obscures the Generic from the bases list. - https://github.com/pydantic/pydantic/issues/8410 - """ - # Direct check first for speed - most subclasses will have it directly - if hasattr(cls, '__pydantic_generic_metadata__'): - metadata: dict[str, Any] = cls.__pydantic_generic_metadata__ # type: ignore - origin = metadata.get('origin') # type: ignore - args: tuple[Any, ...] = metadata.get('args') # type: ignore - if origin is BaseEvent and args and len(args) > 0: # type: ignore - return args[0] - - # Only check MRO if direct check failed - # Skip first element (cls itself) since we already checked it - for parent in cls.__mro__[1:]: - if hasattr(parent, '__pydantic_generic_metadata__'): - metadata = parent.__pydantic_generic_metadata__ # type: ignore - # Check if this is a parameterized BaseEvent - origin = metadata.get('origin') # type: ignore - args: tuple[Any, ...] = metadata.get('args') # type: ignore - if origin is BaseEvent and args and len(args) > 0: # type: ignore - return args[0] - - return None - - -class BaseEvent(BaseModel, Generic[T_EventResultType]): - """ - The base model used for all Events that flow through the EventBus system. - """ - - model_config = ConfigDict( - extra='allow', - arbitrary_types_allowed=True, - validate_assignment=True, - validate_default=True, - revalidate_instances='always', - ) - - # Class-level cache for auto-extracted event_result_type - _event_result_type_cache: ClassVar[Any | None] = None - - event_type: PythonIdentifierStr = Field(default='UndefinedEvent', description='Event type name', max_length=64) - event_schema: str = Field( - default=f'UndefinedEvent@{LIBRARY_VERSION}', - description='Event schema version in format ClassName@version', - max_length=250, - ) # long because it can include long function names / module paths - event_timeout: float | None = Field(default=300.0, description='Timeout in seconds for event to finish processing') - event_result_type: Any = Field( - default=None, description='Type to cast/validate handler return values (e.g. int, str, bytes, BaseModel subclass)' - ) - - @field_serializer('event_result_type') - def event_result_type_serializer(self, value: Any) -> str | None: - """Serialize event_result_type to a string representation""" - if value is None: - return None - # Use str() to get full representation: 'int', 'str', 'list[int]', etc. - return str(value) - - # Runtime metadata - event_id: UUIDStr = Field(default_factory=uuid7str, max_length=36) - event_path: list[PythonIdentifierStr] = Field(default_factory=list, description='Path tracking for event routing') - event_parent_id: UUIDStr | None = Field( - default=None, description='ID of the parent event that triggered this event', max_length=36 - ) - - # Completion tracking fields - event_created_at: datetime = Field( - default_factory=lambda: datetime.now(UTC), - description='Timestamp when event was first dispatched to an EventBus aka marked pending', - ) - event_processed_at: datetime | None = Field( - default=None, - description='Timestamp when event was first processed by any handler', - ) - - event_results: dict[PythonIdStr, 'EventResult[T_EventResultType]'] = Field( - default_factory=dict, exclude=True - ) # Results indexed by str(id(handler_func)) - - # Completion signal - _event_completed_signal: asyncio.Event | None = PrivateAttr(default=None) - - def __hash__(self) -> int: - """Make events hashable using their unique event_id""" - return hash(self.event_id) - - def __str__(self) -> str: - """BaseEvent#ab12⏳""" - icon = ( - '⏳' - if self.event_status == 'pending' - else 'βœ…' - if self.event_status == 'completed' - else '❌' - if self.event_status == 'error' - else 'πŸƒ' - ) - # AuthBus≫DataBusβ–Ά AuthLoginEvent#ab12 ⏳ - return f'{"≫".join(self.event_path[1:] or "?")}β–Ά {self.event_type}#{self.event_id[-4:]} {icon}' - - def __await__(self) -> Generator[Self, Any, Any]: - """Wait for event to complete and return self""" - - # long descriptive name here really helps make traceback easier to follow - async def wait_for_handlers_to_complete_then_return_event(): - assert self.event_completed_signal is not None - - # If we're inside a handler and this event isn't complete yet, - # we need to process it immediately to avoid deadlock - from bubus.service import EventBus, holds_global_lock, inside_handler_context - - if not self.event_completed_signal.is_set() and inside_handler_context.get() and holds_global_lock.get(): - # We're inside a handler and hold the global lock - # Process events until this one completes - - # logger.debug(f'__await__ for {self} - inside handler context, processing child events') - - # Keep processing events from all buses until this event is complete - max_iterations = 1000 # Prevent infinite loops - iterations = 0 - - try: - while not self.event_completed_signal.is_set() and iterations < max_iterations: - iterations += 1 - processed_any = False - - # Process any queued events on all buses - # Create a list copy to avoid "Set changed size during iteration" error - for bus in list(EventBus.all_instances): - if not bus or not bus.event_queue: - continue - - # Process one event from this bus if available - try: - if bus.event_queue.qsize() > 0: - event = bus.event_queue.get_nowait() - await bus.process_event(event) - bus.event_queue.task_done() - processed_any = True - # Check if the event we're waiting for is now complete - if self.event_completed_signal.is_set(): - break - except asyncio.QueueEmpty: - pass - - # Break out of the loop if event completed after processing - if self.event_completed_signal.is_set(): - break - - if not processed_any: - # No events to process, yield control and check for cancellation - try: - await asyncio.sleep(0) - except asyncio.CancelledError: - raise - except asyncio.CancelledError: - # Handler was cancelled due to timeout, exit cleanly - logger.debug(f'Polling loop cancelled for {self}') - raise - - if iterations >= max_iterations: - # logger.error(f'Max iterations reached while waiting for {self}') - pass - else: - # Not in handler context - wait for the event to complete normally - await self.event_completed_signal.wait() - - # Check if any handlers had errors and raise the first one - # for result in self.event_results.values(): - # if result.error: - # raise result.error - - # Return the completed event without raising errors - # Errors should only be raised when explicitly requested via event_result() methods - return self - - return wait_for_handlers_to_complete_then_return_event().__await__() - - @model_validator(mode='before') - @classmethod - def _set_event_type_from_class_name(cls, data: dict[str, Any]) -> dict[str, Any]: - """Automatically set event_type to the class name if not provided""" - is_class_default_unchanged = cls.model_fields['event_type'].default == 'UndefinedEvent' - is_event_type_not_provided = 'event_type' not in data or data['event_type'] == 'UndefinedEvent' - if is_class_default_unchanged and is_event_type_not_provided: - data['event_type'] = cls.__name__ - return data - - @model_validator(mode='before') - @classmethod - def _set_event_schema_from_class_name(cls, data: dict[str, Any]) -> dict[str, Any]: - """Append the library version number to the event schema so we know what version was used to create any JSON dump""" - is_class_default_unchanged = cls.model_fields['event_schema'].default == f'UndefinedEvent@{LIBRARY_VERSION}' - is_event_schema_not_provided = 'event_schema' not in data or data['event_schema'] == f'UndefinedEvent@{LIBRARY_VERSION}' - if is_class_default_unchanged and is_event_schema_not_provided: - data['event_schema'] = f'{cls.__module__}.{cls.__qualname__}@{LIBRARY_VERSION}' - return data - - @model_validator(mode='before') - @classmethod - def _set_event_result_type_from_generic_arg(cls, data: dict[str, Any]) -> dict[str, Any]: - """Automatically set event_result_type from Generic type parameter if not explicitly provided.""" - if not isinstance(data, dict): # type: ignore - return data - - # Fast path: if event_result_type is already in the data, skip all checks - if 'event_result_type' in data: - return data - - # Check if class explicitly defines event_result_type in model_fields - # This handles cases where user explicitly sets event_result_type in class definition - if 'event_result_type' in cls.model_fields: - field = cls.model_fields['event_result_type'] - if field.default is not None and field.default != BaseEvent.model_fields['event_result_type'].default: - # Explicitly set, use the default value - data['event_result_type'] = field.default - return data - - # Fast path: check if class has cached the result type - if cls._event_result_type_cache is not None: - data['event_result_type'] = cls._event_result_type_cache - return data - - # Extract the generic type from BaseEvent[T] - extracted_type = _extract_basemodel_generic_arg(cls) - - # Cache the result on the class - cls._event_result_type_cache = extracted_type - - # Set the type if we successfully resolved it - if extracted_type is not None: - data['event_result_type'] = extracted_type - - return cast(dict[str, Any], data) # type: ignore - - @property - def event_completed_signal(self) -> asyncio.Event | None: - """Lazily create asyncio.Event when accessed""" - if self._event_completed_signal is None: - try: - asyncio.get_running_loop() - self._event_completed_signal = asyncio.Event() - except RuntimeError: - pass # Keep it None if no event loop - return self._event_completed_signal - - @property - def event_status(self) -> str: - return 'completed' if self.event_completed_at else 'started' if self.event_started_at else 'pending' - - @property - def event_children(self) -> list['BaseEvent[Any]']: - """Get all child events dispatched from within this event's handlers""" - children: list[BaseEvent[Any]] = [] - for event_result in self.event_results.values(): - children.extend(event_result.event_children) - return children - - @property - def event_started_at(self) -> datetime | None: - """Timestamp when event first started being processed by any handler""" - started_times = [result.started_at for result in self.event_results.values() if result.started_at is not None] - # If no handlers but event was processed, use the processed timestamp - if not started_times and self.event_processed_at: - return self.event_processed_at - return min(started_times) if started_times else None - - @property - def event_completed_at(self) -> datetime | None: - """Timestamp when event was completed by all handlers""" - # If no handlers at all but event was processed, use the processed timestamp - if not self.event_results and self.event_processed_at: - return self.event_processed_at - - # All handlers must be done (completed or error) - all_done = all(result.status in ('completed', 'error') for result in self.event_results.values()) - if not all_done: - return None - - # Return the latest completion time - completed_times = [result.completed_at for result in self.event_results.values() if result.completed_at is not None] - return max(completed_times) if completed_times else self.event_processed_at - - @staticmethod - def _event_result_is_truthy(event_result: 'EventResult[T_EventResultType]') -> bool: - if event_result.status != 'completed': - return False - if event_result.result is None: - return False - if isinstance(event_result.result, BaseException) or event_result.error: - return False - if isinstance( - event_result.result, BaseEvent - ): # omit if result is a BaseEvent, it's a forwarded event not an actual return value - return False - return True - - async def event_results_filtered( - self, - timeout: float | None = None, - include: EventResultFilter = _event_result_is_truthy, - raise_if_any: bool = True, - raise_if_none: bool = True, - ) -> 'dict[PythonIdStr, EventResult[T_EventResultType]]': - """Get all results filtered by the include function""" - - # wait for all handlers to finish processing - assert self.event_completed_signal is not None, 'EventResult cannot be awaited outside of an async context' - await asyncio.wait_for(self.event_completed_signal.wait(), timeout=timeout or self.event_timeout) - - # Wait for each result to complete, but don't raise errors yet - for event_result in self.event_results.values(): - try: - await event_result - except Exception: - # Ignore exceptions here - we'll handle them based on raise_if_any below - pass - - event_results: dict[PythonIdStr, EventResult[T_EventResultType]] = { - handler_key: event_result for handler_key, event_result in self.event_results.items() - } - included_results: dict[PythonIdStr, EventResult[T_EventResultType]] = { - handler_key: event_result for handler_key, event_result in event_results.items() if include(event_result) - } - error_results: dict[PythonIdStr, EventResult[T_EventResultType]] = { - handler_key: event_result - for handler_key, event_result in event_results.items() - if event_result.error or isinstance(event_result.result, BaseException) - } - - if raise_if_any and error_results: - failing_handler, failing_result = list(error_results.items())[0] # throw first error - original_error = failing_result.error or cast(Any, failing_result.result) - - # Log the handler context information instead of wrapping the exception - logger.debug(f'Event handler {failing_handler}({self}) returned an error -> {original_error}') - - # Re-raise the original exception to preserve its type and structured data - if isinstance(original_error, BaseException): - raise original_error - else: - # Fallback for non-exception errors (shouldn't happen in practice) - raise Exception(str(original_error)) - - if raise_if_none and not included_results: - raise ValueError( - f'Expected at least one handler to return a non-None result, but none did! {self} -> {self.event_results}' - ) - - event_results_by_handler_id: dict[PythonIdStr, EventResult[T_EventResultType]] = { - handler_key: result for handler_key, result in included_results.items() - } - for event_result in event_results_by_handler_id.values(): - assert event_result.result is not None, f'EventResult {event_result} has no result' - - return event_results_by_handler_id - - async def event_results_by_handler_id( - self, - timeout: float | None = None, - include: EventResultFilter = _event_result_is_truthy, - raise_if_any: bool = True, - raise_if_none: bool = True, - ) -> dict[PythonIdStr, T_EventResultType | None]: - """Get all raw result values organized by handler id {handler1_id: handler1_result, handler2_id: handler2_result, ...}""" - included_results = await self.event_results_filtered( - timeout=timeout, include=include, raise_if_any=raise_if_any, raise_if_none=raise_if_none - ) - return { - handler_id: cast(T_EventResultType | None, event_result.result) - for handler_id, event_result in included_results.items() - } - - async def event_results_by_handler_name( - self, - timeout: float | None = None, - include: EventResultFilter = _event_result_is_truthy, - raise_if_any: bool = True, - raise_if_none: bool = True, - ) -> dict[PythonIdentifierStr, T_EventResultType | None]: - """Get all raw result values organized by handler name {handler1_name: handler1_result, handler2_name: handler2_result, ...}""" - included_results = await self.event_results_filtered( - timeout=timeout, include=include, raise_if_any=raise_if_any, raise_if_none=raise_if_none - ) - return { - event_result.handler_name: cast(T_EventResultType | None, event_result.result) - for event_result in included_results.values() - } - - async def event_result( - self, - timeout: float | None = None, - include: EventResultFilter = _event_result_is_truthy, - raise_if_any: bool = True, - raise_if_none: bool = True, - ) -> T_EventResultType | None: - """Get the first non-None result from the event handlers""" - valid_results = await self.event_results_filtered( - timeout=timeout, include=include, raise_if_any=raise_if_any, raise_if_none=raise_if_none - ) - results = list(valid_results.values()) - return cast(T_EventResultType | None, results[0].result) if results else None - - async def event_results_list( - self, - timeout: float | None = None, - include: EventResultFilter = _event_result_is_truthy, - raise_if_any: bool = True, - raise_if_none: bool = True, - ) -> list[T_EventResultType | None]: - """Get all result values in a list [handler1_result, handler2_result, ...]""" - valid_results = await self.event_results_filtered( - timeout=timeout, include=include, raise_if_any=raise_if_any, raise_if_none=raise_if_none - ) - return [cast(T_EventResultType | None, event_result.result) for event_result in valid_results.values()] - - async def event_results_flat_dict( - self, - timeout: float | None = None, - include: EventResultFilter = _event_result_is_truthy, - raise_if_any: bool = True, - raise_if_none: bool = False, - raise_if_conflicts: bool = True, - ) -> dict[str, Any]: - """Assuming all handlers return dicts, merge all the returned dicts into a single flat dict {**handler1_result, **handler2_result, ...}""" - - valid_results = await self.event_results_filtered( - timeout=timeout, - include=lambda event_result: isinstance(event_result.result, dict) and include(event_result), - raise_if_any=raise_if_any, - raise_if_none=raise_if_none, - ) - - merged_results: dict[str, Any] = {} - for event_result in valid_results.values(): - if not event_result.result: - continue - - # check for event results trampling each other / conflicting - overlapping_keys: set[str] = merged_results.keys() & event_result.result.keys() # type: ignore - if raise_if_conflicts and overlapping_keys: # type: ignore - raise ValueError( - f'Event handler {event_result.handler_name} returned a dict with keys that would overwrite values from previous handlers: {overlapping_keys} (pass raise_if_conflicts=False to merge with last-handler-wins)' - ) # type: ignore - - merged_results.update( - event_result.result # pyright: ignore[reportUnknownArgumentType, reportUnknownMemberType] - ) # update the merged dict with the contents of the result dict - return merged_results - - async def event_results_flat_list( - self, - timeout: float | None = None, - include: EventResultFilter = _event_result_is_truthy, - raise_if_any: bool = True, - raise_if_none: bool = True, - ) -> list[Any]: - """Assuming all handlers return lists, merge all the returned lists into a single flat list [*handler1_result, *handler2_result, ...]""" - valid_results = await self.event_results_filtered( - timeout=timeout, - include=lambda event_result: isinstance(event_result.result, list) and include(event_result), - raise_if_any=raise_if_any, - raise_if_none=raise_if_none, - ) - merged_results: list[T_EventResultType | None] = [] - for event_result in valid_results.values(): - merged_results.extend( - cast(list[T_EventResultType | None], event_result.result) - ) # append the contents of the list to the merged list - return merged_results - - def event_result_update( - self, handler: EventHandler, eventbus: 'EventBus | None' = None, **kwargs: Any - ) -> 'EventResult[T_EventResultType]': - """Create or update an EventResult for a handler""" - - from bubus.service import EventBus - - assert eventbus is None or isinstance(eventbus, EventBus) - if eventbus is None and handler and inspect.ismethod(handler) and isinstance(handler.__self__, EventBus): - eventbus = handler.__self__ - - handler_name: str = get_handler_name(handler) if handler else 'unknown_handler' - eventbus_id: PythonIdStr = str(id(eventbus) if eventbus is not None else '000000000000') - eventbus_name: PythonIdentifierStr = str(eventbus and eventbus.name or 'EventBus') - - # Use bus+handler combination for unique ID - handler_id: PythonIdStr = get_handler_id(handler, eventbus) - - # Get or create EventResult - if handler_id not in self.event_results: - self.event_results[handler_id] = cast( - EventResult[T_EventResultType], - EventResult( - event_id=self.event_id, - handler_id=handler_id, - handler_name=handler_name, - eventbus_id=eventbus_id, - eventbus_name=eventbus_name, - status=kwargs.get('status', 'pending'), - timeout=self.event_timeout, - result_type=self.event_result_type, - ), - ) - # logger.debug(f'Created EventResult for handler {handler_id}: {handler and get_handler_name(handler)}') - - # Update the EventResult with provided kwargs - self.event_results[handler_id].update(**kwargs) - # logger.debug( - # f'Updated EventResult for handler {handler_id}: status={self.event_results[handler_id].status}, total_results={len(self.event_results)}' - # ) - # Don't mark complete here - let the EventBus do it after all handlers are done - return self.event_results[handler_id] - - def event_mark_complete_if_all_handlers_completed(self) -> None: - """Check if all handlers are done and signal completion""" - if self.event_completed_signal and not self.event_completed_signal.is_set(): - # If there are no results at all, the event is complete - if not self.event_results: - if hasattr(self, 'event_processed_at'): - self.event_processed_at = datetime.now(UTC) - self.event_completed_signal.set() - return - - # Check if all handler results are done - all_handlers_done = all(result.status in ('completed', 'error') for result in self.event_results.values()) - if not all_handlers_done: - # logger.debug( - # f'Event {self} not complete - waiting for handlers: {[r for r in self.event_results.values() if r.status not in ("completed", "error")]}' - # ) - return - - # Recursively check if all child events are also complete - if not self.event_are_all_children_complete(): - # incomplete_children = [c for c in self.event_children if c.event_status != 'completed'] - # logger.debug( - # f'Event {self} not complete - waiting for {len(incomplete_children)} child events: {incomplete_children}' - # ) - return - - # All handlers and all child events are done - if hasattr(self, 'event_processed_at'): - self.event_processed_at = datetime.now(UTC) - # logger.debug(f'Event {self} marking complete - all handlers and children done') - self.event_completed_signal.set() - - def event_are_all_children_complete(self, _visited: set[str] | None = None) -> bool: - """Recursively check if all child events and their descendants are complete""" - if _visited is None: - _visited = set() - - # Prevent infinite recursion on circular references - if self.event_id in _visited: - return True - _visited.add(self.event_id) - - for child_event in self.event_children: - if child_event.event_status != 'completed': - logger.debug(f'Event {self} has incomplete child {child_event}') - return False - # Recursively check child's children - if not child_event.event_are_all_children_complete(_visited): - return False - return True - - def event_cancel_pending_child_processing(self, error: BaseException) -> None: - """Cancel any pending child events that were dispatched during handler execution""" - if not isinstance(error, asyncio.CancelledError): - error = asyncio.CancelledError( - f'Cancelled pending handler as a result of parent error {error}' - ) # keep the word "pending" in the error, checked by print_handler_line() - for child_event in self.event_children: - for result in child_event.event_results.values(): - if result.status == 'pending': - # print('CANCELLING CHILD HANDLER', result, 'due to', error) - result.update(error=error) - child_event.event_cancel_pending_child_processing(error) - - def event_log_safe_summary(self) -> dict[str, Any]: - """only event metadata without contents, avoid potentially sensitive event contents in logs""" - return {k: v for k, v in self.model_dump(mode='json').items() if k.startswith('event_') and 'results' not in k} - - def event_log_tree( - self, - indent: str = '', - is_last: bool = True, - child_events_by_parent: 'dict[str | None, list[BaseEvent[Any]]] | None' = None, - ) -> None: - """Print this event and its results with proper tree formatting""" - from bubus.logging import log_event_tree - - log_event_tree(self, indent, is_last, child_events_by_parent) - - @property - def event_bus(self) -> 'EventBus': - """Get the EventBus that is currently processing this event""" - from bubus.service import EventBus, inside_handler_context - - if not inside_handler_context.get(): - raise AttributeError('event_bus property can only be accessed from within an event handler') - - # The event_path contains all buses this event has passed through - # The last one in the path is the one currently processing - if not self.event_path: - raise RuntimeError('Event has no event_path - was it dispatched?') - - current_bus_name = self.event_path[-1] - - # Find the bus by name - # Create a list copy to avoid "Set changed size during iteration" error - for bus in list(EventBus.all_instances): - if bus and hasattr(bus, 'name') and bus.name == current_bus_name: - return bus - - raise RuntimeError(f'Could not find active EventBus named {current_bus_name}') - - -def attr_name_allowed(key: str) -> bool: - return key in pydantic_builtin_attrs or key in event_builtin_attrs or key.startswith('_') - - -# PSA: All BaseEvent buil-in attrs and methods must be prefixed with "event_" in order to avoid clashing with data contents (which share a namespace with the metadata) -# This is the same approach Pydantic uses for their special `model_*` attrs (and BaseEvent is also a pydantic model, so model_ prefixes are reserved too) -# resist the urge to nest the event data in an inner object unless absolutely necessary, flat simplifies most of the code and makes it easier to read JSON logs with less nesting -pydantic_builtin_attrs = dir(BaseModel) -event_builtin_attrs = {key for key in dir(BaseEvent) if key.startswith('event_')} -illegal_attrs = {key for key in dir(BaseEvent) if not attr_name_allowed(key)} -assert not illegal_attrs, ( - 'All BaseEvent attrs and methods must be prefixed with "event_" in order to avoid clashing ' - 'with BaseEvent subclass fields used to store event contents (which share a namespace with the event_ metadata). ' - f'not allowed: {illegal_attrs}' -) - - -class EventResult(BaseModel, Generic[T_EventResultType]): - """Individual result from a single handler""" - - model_config = ConfigDict( - extra='forbid', - arbitrary_types_allowed=True, - validate_assignment=False, # Disable to allow flexible result types - validation handled in update() - validate_default=True, - revalidate_instances='always', - ) - - # Automatically set fields, setup at Event init and updated by the EventBus.execute_handler() calling event_result.update(...) - id: UUIDStr = Field(default_factory=uuid7str) - status: Literal['pending', 'started', 'completed', 'error'] = 'pending' - event_id: UUIDStr - handler_id: PythonIdStr - handler_name: str - result_type: Any | type[T_EventResultType] | None = None - eventbus_id: PythonIdStr - eventbus_name: PythonIdentifierStr - timeout: float | None = None - started_at: datetime | None = None - - # Result fields, updated by the EventBus.execute_handler() calling event_result.update(...) - result: T_EventResultType | BaseEvent[Any] | None = None - error: BaseException | None = None - completed_at: datetime | None = None - - # Completion signal - _handler_completed_signal: asyncio.Event | None = PrivateAttr(default=None) - - # any child events that were emitted during handler execution are captured automatically and stored here to track hierarchy - # note about why this is BaseEvent[Any] instead of a more specific type: - # unfortunately we cant determine child event types statically / it's not worth it to force child event types to be defined at compile-time - # so we just allow handlers to emit any BaseEvent subclass/instances with any result types - # in theory it's possible to define the entire event tree hierarchy at compile-time with something like ParentEvent[ChildEvent[GrandchildEvent[FinalResultValueType]]], - # it's not worth the complexity headache it would incur on users of the library though, - # and it would significantly reduce runtime flexibility, e.g. you couldn't define and dispatch arbitrary server-provided event types at runtime - event_children: list['BaseEvent[Any]'] = Field(default_factory=list) # pyright: ignore[reportUnknownVariableType] - - @property - def handler_completed_signal(self) -> asyncio.Event | None: - """Lazily create asyncio.Event when accessed""" - if self._handler_completed_signal is None: - try: - asyncio.get_running_loop() - self._handler_completed_signal = asyncio.Event() - except RuntimeError: - pass # Keep it None if no event loop - return self._handler_completed_signal - - def __str__(self) -> str: - handler_qualname = f'{self.eventbus_name}.{self.handler_name}' - return f'{handler_qualname}() -> {self.result or self.error or "..."} ({self.status})' - - def __repr__(self) -> str: - icon = 'πŸƒ' if self.status == 'pending' else 'βœ…' if self.status == 'completed' else '❌' - return f'{self.handler_name}#{self.handler_id[-4:]}() {icon}' - - def __await__(self) -> Generator[Self, Any, T_EventResultType | BaseEvent[Any] | None]: - """ - Wait for this result to complete and return the result or raise error. - Does not execute the handler itself, only waits for it to be marked completed by the EventBus. - EventBus triggers handlers and calls event_result.update() to mark them as started or completed. - """ - - async def wait_for_handler_to_complete_and_return_result() -> T_EventResultType | BaseEvent[Any] | None: - assert self.handler_completed_signal is not None, 'EventResult cannot be awaited outside of an async context' - - try: - await asyncio.wait_for(self.handler_completed_signal.wait(), timeout=self.timeout) - except TimeoutError: - # self.handler_completed_signal.clear() - raise TimeoutError( - f'Event handler {self.eventbus_name}.{self.handler_name}(#{self.event_id[-4:]}) timed out after {self.timeout}s' - ) - - if self.status == 'error' and self.error: - raise self.error if isinstance(self.error, BaseException) else Exception(self.error) # pyright: ignore[reportUnnecessaryIsInstance] - - return self.result - - # do not re-raise exceptions here for now, just return the event in all cases and let the caller handle checking event.error or event.result - - return wait_for_handler_to_complete_and_return_result().__await__() - - def update(self, **kwargs: Any) -> Self: - """Update the EventResult with provided kwargs, called by EventBus during handler execution.""" - - # fix common mistake of returning an exception object instead of marking the event result as an error result - if 'result' in kwargs and isinstance(kwargs['result'], BaseException): - logger.warning( - f'β„Ή Event handler {self.handler_name} returned an exception object, auto-converting to EventResult(result=None, status="error", error={kwargs["result"]})' - ) - kwargs['error'] = kwargs['result'] - kwargs['status'] = 'error' - kwargs['result'] = None - - if 'result' in kwargs: - result: Any = kwargs['result'] - self.status = 'completed' - if self.result_type is not None and result is not None: - # Always allow BaseEvent results without validation - # This is needed for event forwarding patterns like bus1.on('*', bus2.dispatch) - if isinstance(result, BaseEvent): - self.result = cast(T_EventResultType, result) - else: - # cast the return value to the expected type using TypeAdapter - try: - if issubclass(self.result_type, BaseModel): - # if expected result type is a pydantic model, validate it with pydantic - validated_result = self.result_type.model_validate(result) - else: - # cast the return value to the expected type e.g. int(result) / str(result) / list(result) / etc. - ResultType = TypeAdapter(self.result_type) - validated_result = ResultType.validate_python(result) - - # Normal assignment works, make sure validate_assignment=False otherwise pydantic will attempt to re-validate it a second time - self.result = cast(T_EventResultType, validated_result) - - except Exception as cast_error: - self.error = ValueError( - f'Event handler returned a value that did not match expected event_result_type: {self.result_type.__name__}({result}) -> {type(cast_error).__name__}: {cast_error}' - ) - self.result = None - self.status = 'error' - else: - # No result_type specified or result is None - assign directly - self.result = cast(T_EventResultType, result) - - if 'error' in kwargs: - assert isinstance(kwargs['error'], (BaseException, str)), ( - f'Invalid error type: {type(kwargs["error"]).__name__} {kwargs["error"]}' - ) - self.error = kwargs['error'] if isinstance(kwargs['error'], BaseException) else Exception(kwargs['error']) # pyright: ignore[reportUnnecessaryIsInstance] - self.status = 'error' - - if 'status' in kwargs: - assert kwargs['status'] in ('pending', 'started', 'completed', 'error'), f'Invalid status: {kwargs["status"]}' - self.status = kwargs['status'] - - if self.status != 'pending' and not self.started_at: - self.started_at = datetime.now(UTC) - - if self.status in ('completed', 'error') and not self.completed_at: - self.completed_at = datetime.now(UTC) - if self.handler_completed_signal: - self.handler_completed_signal.set() - return self - - def log_tree( - self, indent: str = '', is_last: bool = True, child_events_by_parent: dict[str | None, list[BaseEvent[Any]]] | None = None - ) -> None: - """Print this result and its child events with proper tree formatting""" - from bubus.logging import log_eventresult_tree - - log_eventresult_tree(self, indent, is_last, child_events_by_parent) - - -# Resolve forward references -BaseEvent.model_rebuild() -EventResult.model_rebuild() diff --git a/bubus/package-lock.json b/bubus/package-lock.json deleted file mode 100644 index 0966feb..0000000 --- a/bubus/package-lock.json +++ /dev/null @@ -1,63 +0,0 @@ -{ - "name": "bubus", - "version": "0.1.0", - "lockfileVersion": 3, - "requires": true, - "packages": { - "": { - "name": "bubus", - "version": "0.1.0", - "license": "MIT", - "dependencies": { - "uuidv7": "^1.0.0" - }, - "devDependencies": { - "@types/node": "^20.10.0", - "typescript": "^5.3.0" - }, - "engines": { - "node": ">=18.0.0" - } - }, - "node_modules/@types/node": { - "version": "20.19.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.19.1.tgz", - "integrity": "sha512-jJD50LtlD2dodAEO653i3YF04NWak6jN3ky+Ri3Em3mGR39/glWiboM/IePaRbgwSfqM1TpGXfAg8ohn/4dTgA==", - "dev": true, - "license": "MIT", - "dependencies": { - "undici-types": "~6.21.0" - } - }, - "node_modules/typescript": { - "version": "5.8.3", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.8.3.tgz", - "integrity": "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ==", - "dev": true, - "license": "Apache-2.0", - "bin": { - "tsc": "bin/tsc", - "tsserver": "bin/tsserver" - }, - "engines": { - "node": ">=14.17" - } - }, - "node_modules/undici-types": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", - "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/uuidv7": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/uuidv7/-/uuidv7-1.0.2.tgz", - "integrity": "sha512-8JQkH4ooXnm1JCIhqTMbtmdnYEn6oKukBxHn1Ic9878jMkL7daTI7anTExfY18VRCX7tcdn5quzvCb6EWrR8PA==", - "license": "Apache-2.0", - "bin": { - "uuidv7": "cli.js" - } - } - } -} diff --git a/bubus/retry.py b/bubus/retry.py new file mode 100644 index 0000000..5768c98 --- /dev/null +++ b/bubus/retry.py @@ -0,0 +1,561 @@ +import asyncio +import logging +import re +import tempfile +import threading +import time +from collections.abc import Callable, Coroutine +from functools import wraps +from pathlib import Path +from types import ModuleType +from typing import Any, Literal, ParamSpec, TypeVar, cast + +import portalocker + +# Silence portalocker debug messages +portalocker_logger = logging.getLogger('portalocker.utils') +portalocker_logger.setLevel(logging.WARNING) + +# Silence root level portalocker logs too +portalocker_root_logger = logging.getLogger('portalocker') +portalocker_root_logger.setLevel(logging.WARNING) + +psutil: ModuleType | None +try: + import psutil as _psutil +except ImportError: + psutil = None +else: + psutil = _psutil + +PSUTIL_AVAILABLE: bool = psutil is not None + + +logger = logging.getLogger(__name__) + + +T = TypeVar('T') +P = ParamSpec('P') +RetryErrorMatcher = type[Exception] | re.Pattern[str] +RetryOnErrors = list[RetryErrorMatcher] | tuple[RetryErrorMatcher, ...] + +# Global semaphore registry for retry decorator +GLOBAL_RETRY_SEMAPHORES: dict[str, asyncio.Semaphore] = {} +GLOBAL_RETRY_SEMAPHORE_LOCK = threading.Lock() + +# Multiprocess semaphore support +MULTIPROCESS_SEMAPHORE_DIR = Path(tempfile.gettempdir()) / 'browser_use_semaphores' +MULTIPROCESS_SEMAPHORE_DIR.mkdir(exist_ok=True) + +# Global multiprocess semaphore registry +# Multiprocess semaphores are not cached due to internal state issues causing "Already locked" errors +MULTIPROCESS_SEMAPHORE_LOCK = threading.Lock() + +# Global overload detection state +_last_overload_check = 0.0 +_overload_check_interval = 5.0 # Check every 5 seconds +_active_retry_operations = 0 +_active_operations_lock = threading.Lock() + + +def _check_system_overload() -> tuple[bool, str]: + """Check if system is overloaded and return (is_overloaded, reason)""" + if not PSUTIL_AVAILABLE: + return False, '' + + assert psutil is not None + try: + # Get system stats + cpu_percent = psutil.cpu_percent(interval=0.1) + memory = psutil.virtual_memory() + + # Check thresholds + reasons: list[str] = [] + is_overloaded = False + + if cpu_percent > 85: + is_overloaded = True + reasons.append(f'CPU: {cpu_percent:.1f}%') + + if memory.percent > 85: + is_overloaded = True + reasons.append(f'Memory: {memory.percent:.1f}%') + + # Check number of concurrent operations + with _active_operations_lock: + if _active_retry_operations > 30: + is_overloaded = True + reasons.append(f'Active operations: {_active_retry_operations}') + + return is_overloaded, ', '.join(reasons) + except Exception: + return False, '' + + +def _get_semaphore_key( + base_name: str, + semaphore_scope: Literal['multiprocess', 'global', 'class', 'instance'], + args: tuple[Any, ...], +) -> str: + """Determine the semaphore key based on scope.""" + if semaphore_scope == 'multiprocess': + return base_name + elif semaphore_scope == 'global': + return base_name + elif semaphore_scope == 'class' and args and hasattr(args[0], '__class__'): + class_name = args[0].__class__.__name__ + return f'{class_name}.{base_name}' + elif semaphore_scope == 'instance' and args: + instance_id = id(args[0]) + return f'{instance_id}.{base_name}' + else: + # Fallback to global if we can't determine scope + return base_name + + +def _get_or_create_semaphore( + sem_key: str, + semaphore_limit: int, + semaphore_scope: Literal['multiprocess', 'global', 'class', 'instance'], +) -> Any: + """Get or create a semaphore based on scope.""" + if semaphore_scope == 'multiprocess': + # Don't cache multiprocess semaphores - they have internal state issues + # Create a new instance each time to avoid "Already locked" errors + with MULTIPROCESS_SEMAPHORE_LOCK: + # Ensure the directory exists (it might have been cleaned up in cloud environments) + MULTIPROCESS_SEMAPHORE_DIR.mkdir(exist_ok=True, parents=True) + + # Clean up any stale lock files before creating semaphore + lock_pattern = f'{sem_key}.*.lock' + for lock_file in MULTIPROCESS_SEMAPHORE_DIR.glob(lock_pattern): + try: + # Try to remove lock files older than 5 minutes + if lock_file.stat().st_mtime < time.time() - 300: + lock_file.unlink(missing_ok=True) + except Exception: + pass # Ignore errors when cleaning up + + # Use a more aggressive timeout for lock acquisition + try: + semaphore = portalocker.utils.NamedBoundedSemaphore( + maximum=semaphore_limit, + name=sem_key, + directory=str(MULTIPROCESS_SEMAPHORE_DIR), + timeout=0.1, # Very short timeout for internal lock acquisition + ) + return semaphore + except FileNotFoundError as e: + # In some cloud environments, the lock file creation might fail + # Try once more after ensuring directory exists + logger.warning(f'Lock file creation failed: {e}. Retrying after ensuring directory exists.') + MULTIPROCESS_SEMAPHORE_DIR.mkdir(exist_ok=True, parents=True) + + # Create a fallback asyncio semaphore instead of multiprocess + logger.warning(f'Falling back to asyncio semaphore for {sem_key} due to filesystem issues') + with GLOBAL_RETRY_SEMAPHORE_LOCK: + fallback_key = f'multiprocess_fallback_{sem_key}' + if fallback_key not in GLOBAL_RETRY_SEMAPHORES: + GLOBAL_RETRY_SEMAPHORES[fallback_key] = asyncio.Semaphore(semaphore_limit) + return GLOBAL_RETRY_SEMAPHORES[fallback_key] + else: + with GLOBAL_RETRY_SEMAPHORE_LOCK: + if sem_key not in GLOBAL_RETRY_SEMAPHORES: + GLOBAL_RETRY_SEMAPHORES[sem_key] = asyncio.Semaphore(semaphore_limit) + return GLOBAL_RETRY_SEMAPHORES[sem_key] + + +def _calculate_semaphore_timeout( + semaphore_timeout: float | None, + timeout: float | None, + semaphore_limit: int, +) -> float | None: + """Calculate the timeout for semaphore acquisition.""" + if semaphore_timeout is not None: + return semaphore_timeout + if timeout is None: + return None + # Default aligns with TS: timeout * max(1, semaphore_limit - 1) + return timeout * max(1, semaphore_limit - 1) + + +def _callable_name(func: Callable[..., Any]) -> str: + """Return a stable name for logs even for callable instances.""" + return getattr(func, '__name__', func.__class__.__name__) + + +def _resolve_semaphore_name( + func_name: str, + semaphore_name: str | Callable[..., str] | None, + args: tuple[Any, ...], +) -> str: + """Resolve semaphore name from a static name or call-time getter.""" + base_name: str | Any + if callable(semaphore_name): + base_name = semaphore_name(*args) + else: + base_name = semaphore_name if semaphore_name is not None else func_name + return str(base_name) + + +def _matches_retry_on_error(error: Exception, retry_on_errors: RetryOnErrors | None) -> bool: + """Return True when an error matches any configured retry matcher.""" + if not retry_on_errors: + return True + + error_text = f'{error.__class__.__name__}: {error}' + for matcher in retry_on_errors: + if isinstance(matcher, re.Pattern): + if matcher.search(error_text): + return True + continue + if isinstance(error, matcher): + return True + + return False + + +async def _acquire_multiprocess_semaphore( + semaphore: Any, + sem_timeout: float | None, + sem_key: str, + semaphore_lax: bool, + semaphore_limit: int, + timeout: float | None, +) -> tuple[bool, Any]: + """Acquire a multiprocess semaphore with retries and exponential backoff.""" + start_time = time.time() + retry_delay = 0.1 # Start with 100ms + backoff_factor = 2.0 + max_single_attempt = 1.0 # Max time for a single acquire attempt + recreate_attempts = 0 + max_recreate_attempts = 3 + has_timeout = sem_timeout is not None and sem_timeout > 0 + + while True: + try: + # Calculate remaining time (when configured) + elapsed = time.time() - start_time + remaining_time: float | None = (sem_timeout - elapsed) if has_timeout and sem_timeout is not None else None + if remaining_time is not None and remaining_time <= 0: + break + + # Use bounded one-second acquire loops so we can recover from transient lock file errors. + attempt_timeout = min(remaining_time, max_single_attempt) if remaining_time is not None else max_single_attempt + + # Use a temporary thread to run the blocking operation + multiprocess_lock = await asyncio.to_thread( + lambda: semaphore.acquire(timeout=attempt_timeout, check_interval=0.1, fail_when_locked=False) + ) + if multiprocess_lock: + return True, multiprocess_lock + + # If we didn't get the lock, wait before retrying + if remaining_time is None or remaining_time > retry_delay: + await asyncio.sleep(retry_delay) + retry_delay = min(retry_delay * backoff_factor, 1.0) # Cap at 1 second + + except (FileNotFoundError, OSError) as e: + # Handle case where lock file disappears + if isinstance(e, FileNotFoundError) or 'No such file or directory' in str(e): + recreate_attempts += 1 + if recreate_attempts <= max_recreate_attempts: + logger.warning( + f'Semaphore lock file disappeared for "{sem_key}". Attempting to recreate (attempt {recreate_attempts}/{max_recreate_attempts})...' + ) + + # Ensure directory exists + with MULTIPROCESS_SEMAPHORE_LOCK: + MULTIPROCESS_SEMAPHORE_DIR.mkdir(exist_ok=True, parents=True) + + # Try to recreate the semaphore + try: + semaphore = await asyncio.to_thread( + lambda: portalocker.utils.NamedBoundedSemaphore( + maximum=semaphore_limit, + name=sem_key, + directory=str(MULTIPROCESS_SEMAPHORE_DIR), + timeout=0.1, + ) + ) + # Continue with the new semaphore + continue + except Exception as recreate_error: + logger.error(f'Failed to recreate semaphore: {recreate_error}') + # If recreation fails and we're in lax mode, return without lock + if semaphore_lax: + logger.warning(f'Failed to recreate semaphore "{sem_key}", proceeding without concurrency limit') + return False, None + raise + else: + # Max recreate attempts exceeded + if semaphore_lax: + logger.warning( + f'Max semaphore recreation attempts exceeded for "{sem_key}", proceeding without concurrency limit' + ) + return False, None + raise + else: + # Other OS errors + raise + + except (AssertionError, Exception) as e: + # Handle "Already locked" error by skipping this attempt + if 'Already locked' in str(e) or isinstance(e, AssertionError): + # Lock file might be stale from a previous process crash + # Wait before retrying + elapsed = time.time() - start_time + remaining_time = (sem_timeout - elapsed) if has_timeout and sem_timeout is not None else None + if remaining_time is None or remaining_time > retry_delay: + await asyncio.sleep(retry_delay) + retry_delay = min(retry_delay * backoff_factor, 1.0) + continue + elif 'Could not acquire' not in str(e) and not isinstance(e, TimeoutError): + raise + + # Timeout reached + if not semaphore_lax: + timeout_str = f', timeout={timeout}s per operation' if timeout is not None else '' + raise TimeoutError( + f'Failed to acquire multiprocess semaphore "{sem_key}" within {sem_timeout}s (limit={semaphore_limit}{timeout_str})' + ) + logger.warning( + f'Failed to acquire multiprocess semaphore "{sem_key}" after {sem_timeout:.1f}s, proceeding without concurrency limit' + ) + return False, None + + +async def _acquire_asyncio_semaphore( + semaphore: asyncio.Semaphore, + sem_timeout: float | None, + sem_key: str, + semaphore_lax: bool, + semaphore_limit: int, + timeout: float | None, + sem_start: float, +) -> bool: + """Acquire an asyncio semaphore.""" + if sem_timeout is None or sem_timeout <= 0: + await semaphore.acquire() + return True + + try: + async with asyncio.timeout(sem_timeout): + await semaphore.acquire() + return True + except TimeoutError: + sem_wait_time = time.time() - sem_start + if not semaphore_lax: + timeout_str = f', timeout={timeout}s per operation' if timeout is not None else '' + raise TimeoutError( + f'Failed to acquire semaphore "{sem_key}" within {sem_timeout}s (limit={semaphore_limit}{timeout_str})' + ) + logger.warning( + f'Failed to acquire semaphore "{sem_key}" after {sem_wait_time:.1f}s, proceeding without concurrency limit' + ) + return False + + +async def _execute_with_retries( + func: Callable[P, Coroutine[Any, Any, T]], + args: tuple[Any, ...], + kwargs: dict[str, Any], + max_attempts: int, + timeout: float | None, + retry_after: float, + retry_backoff_factor: float, + retry_on_errors: RetryOnErrors | None, + start_time: float, + sem_start: float, + semaphore_limit: int | None, +) -> T: + """Execute the function with retry logic.""" + func_name = _callable_name(func) + func_runner = cast(Callable[..., Coroutine[Any, Any, T]], func) + for attempt in range(1, max_attempts + 1): + try: + # Execute with per-attempt timeout + if timeout is not None and timeout > 0: + async with asyncio.timeout(timeout): + return await func_runner(*args, **kwargs) + return await func_runner(*args, **kwargs) + + except Exception as e: + # Check if we should retry this exception + if not _matches_retry_on_error(e, retry_on_errors): + raise + + if attempt < max_attempts: + # Calculate wait time with backoff + current_wait = retry_after * (retry_backoff_factor ** (attempt - 1)) + + # Only log warning on the final retry attempt (second-to-last overall attempt) + if attempt == max_attempts - 1: + logger.warning( + f'{func_name} failed (attempt {attempt}/{max_attempts}): ' + f'{type(e).__name__}: {e}. Waiting {current_wait:.1f}s before retry...' + ) + if current_wait > 0: + await asyncio.sleep(current_wait) + else: + # Final failure + total_time = time.time() - start_time + sem_wait = time.time() - sem_start - total_time if semaphore_limit else 0 + sem_str = f'Semaphore wait: {sem_wait:.1f}s. ' if sem_wait > 0 else '' + logger.error( + f'{func_name} failed after {max_attempts} attempts over {total_time:.1f}s. ' + f'{sem_str}Final error: {type(e).__name__}: {e}' + ) + raise + + # This should never be reached, but satisfies type checker + raise RuntimeError('Unexpected state in retry logic') + + +def _track_active_operations(increment: bool = True) -> None: + """Track active retry operations.""" + global _active_retry_operations + with _active_operations_lock: + if increment: + _active_retry_operations += 1 + else: + _active_retry_operations = max(0, _active_retry_operations - 1) + + +def _check_system_overload_if_needed() -> None: + """Check for system overload if enough time has passed since last check.""" + global _last_overload_check + current_time = time.time() + if current_time - _last_overload_check > _overload_check_interval: + _last_overload_check = current_time + is_overloaded, reason = _check_system_overload() + if is_overloaded: + logger.warning(f'⚠️ System overload detected: {reason}. Consider reducing concurrent operations to prevent hanging.') + + +def retry( + retry_after: float = 0, + max_attempts: int = 1, + timeout: float | None = None, + retry_on_errors: RetryOnErrors | None = None, + retry_backoff_factor: float = 1.0, + semaphore_limit: int | None = None, + semaphore_name: str | Callable[..., str] | None = None, + semaphore_lax: bool = True, + semaphore_scope: Literal['multiprocess', 'global', 'class', 'instance'] = 'global', + semaphore_timeout: float | None = None, +): + """ + Retry decorator with semaphore support for async functions. + + Args: + retry_after: Seconds to wait between retries + max_attempts: Total attempts including the initial call (1 = no retries) + timeout: Per-attempt timeout in seconds (`None` = no per-attempt timeout) + retry_on_errors: Error matchers to retry on (Exception subclasses or compiled regexes) + retry_backoff_factor: Multiplier for retry delay after each attempt (1.0 = no backoff) + semaphore_limit: Max concurrent executions (creates semaphore if needed) + semaphore_name: Name for semaphore (defaults to function name), or callable receiving function args + semaphore_lax: If True, continue without semaphore on acquisition failure + semaphore_scope: Scope for semaphore sharing: + - 'global': All calls share one semaphore (default) + - 'class': All instances of a class share one semaphore + - 'instance': Each instance gets its own semaphore + - 'multiprocess': All processes on the machine share one semaphore + semaphore_timeout: Max time to wait for semaphore acquisition + (`None` => `timeout * max(1, limit - 1)` when timeout is set, else unbounded) + + Example: + @retry(retry_after=3, max_attempts=3, timeout=5, semaphore_limit=3, semaphore_scope='instance') + async def some_function(self, ...): + # Limited to 5s per attempt, up to 3 total attempts + # Max 3 concurrent executions per instance + + Notes: + - semaphore acquisition happens once at start time, it is not retried + - semaphore_timeout is only used if semaphore_limit is set. + - if semaphore_timeout is set to 0, it waits forever for a semaphore slot. + - if semaphore_timeout is None and timeout is None, semaphore acquisition wait is unbounded. + """ + + def decorator(func: Callable[P, Coroutine[Any, Any, T]]) -> Callable[P, Coroutine[Any, Any, T]]: + func_name = _callable_name(func) + effective_max_attempts = max(1, max_attempts) + effective_retry_after = max(0, retry_after) + effective_semaphore_limit = semaphore_limit if semaphore_limit is not None and semaphore_limit > 0 else None + + @wraps(func) + async def wrapper(*args: P.args, **kwargs: P.kwargs) -> T: + # Initialize semaphore-related variables + semaphore: Any = None + semaphore_acquired = False + multiprocess_lock: Any = None + sem_start = time.time() + + # Handle semaphore if specified + if effective_semaphore_limit is not None: + # Get semaphore key and create/retrieve semaphore + base_name = _resolve_semaphore_name(func_name, semaphore_name, tuple(args)) + sem_key = _get_semaphore_key(base_name, semaphore_scope, tuple(args)) + semaphore = _get_or_create_semaphore(sem_key, effective_semaphore_limit, semaphore_scope) + + # Calculate timeout for semaphore acquisition + sem_timeout = _calculate_semaphore_timeout(semaphore_timeout, timeout, effective_semaphore_limit) + + # Acquire semaphore based on type + if semaphore_scope == 'multiprocess': + semaphore_acquired, multiprocess_lock = await _acquire_multiprocess_semaphore( + semaphore, sem_timeout, sem_key, semaphore_lax, effective_semaphore_limit, timeout + ) + else: + semaphore_acquired = await _acquire_asyncio_semaphore( + semaphore, sem_timeout, sem_key, semaphore_lax, effective_semaphore_limit, timeout, sem_start + ) + + # Track active operations and check system overload + _track_active_operations(increment=True) + _check_system_overload_if_needed() + + # Execute function with retries + start_time = time.time() + try: + return await _execute_with_retries( + func, + tuple(args), + dict(kwargs), + effective_max_attempts, + timeout, + effective_retry_after, + retry_backoff_factor, + retry_on_errors, + start_time, + sem_start, + effective_semaphore_limit, + ) + finally: + # Clean up: decrement active operations and release semaphore + _track_active_operations(increment=False) + + if semaphore_acquired and semaphore: + try: + if semaphore_scope == 'multiprocess' and multiprocess_lock: + await asyncio.to_thread(lambda: multiprocess_lock.release()) + elif semaphore: + semaphore.release() + except (FileNotFoundError, OSError) as e: + # Handle case where lock file was removed during operation + if isinstance(e, FileNotFoundError) or 'No such file or directory' in str(e): + logger.warning(f'Semaphore lock file disappeared during release, ignoring: {e}') + else: + # Log other OS errors but don't raise - we already completed the operation + logger.error(f'Error releasing semaphore: {e}') + + return wrapper + + return decorator + + +__all__ = [ + 'MULTIPROCESS_SEMAPHORE_DIR', + 'retry', +] diff --git a/bubus/service.py b/bubus/service.py deleted file mode 100644 index 72f652e..0000000 --- a/bubus/service.py +++ /dev/null @@ -1,1505 +0,0 @@ -import asyncio -import contextvars -import inspect -import logging -import traceback -import warnings -import weakref -from collections import defaultdict, deque -from collections.abc import Callable -from contextvars import ContextVar -from pathlib import Path -from typing import Any, Literal, TypeVar, cast, overload - -import anyio # pyright: ignore[reportMissingImports] -from uuid_extensions import uuid7str # pyright: ignore[reportMissingImports, reportUnknownVariableType] - -uuid7str: Callable[[], str] = uuid7str # pyright: ignore - -from bubus.models import ( - BUBUS_LOGGING_LEVEL, - AsyncEventHandlerClassMethod, - AsyncEventHandlerFunc, - AsyncEventHandlerMethod, - BaseEvent, - ContravariantEventHandler, - EventHandler, - EventHandlerClassMethod, - EventHandlerFunc, - EventHandlerMethod, - PythonIdentifierStr, - PythonIdStr, - T_Event, - T_EventResultType, - UUIDStr, - get_handler_id, - get_handler_name, -) - -logger = logging.getLogger('bubus') -logger.setLevel(BUBUS_LOGGING_LEVEL) - - -# Define our own QueueShutDown exception -class QueueShutDown(Exception): - """Raised when putting on to or getting from a shut-down Queue.""" - - pass - - -QueueEntryType = TypeVar('QueueEntryType', bound='BaseEvent[Any]') -T_ExpectedEvent = TypeVar('T_ExpectedEvent', bound='BaseEvent[Any]') - -EventPatternType = PythonIdentifierStr | Literal['*'] | type['BaseEvent[Any]'] - - -class CleanShutdownQueue(asyncio.Queue[QueueEntryType]): - """asyncio.Queue subclass that handles shutdown cleanly without warnings.""" - - _is_shutdown: bool = False - _getters: deque[asyncio.Future[QueueEntryType]] - _putters: deque[asyncio.Future[QueueEntryType]] - - def shutdown(self, immediate: bool = True): - """Shutdown the queue and clean up all pending futures.""" - self._is_shutdown = True - - # Cancel all waiting getters without triggering warnings - while self._getters: - getter = self._getters.popleft() - if not getter.done(): - # Set exception instead of cancelling to avoid "Event loop is closed" errors - getter.set_exception(QueueShutDown()) - - # Cancel all waiting putters - while self._putters: - putter = self._putters.popleft() - if not putter.done(): - putter.set_exception(QueueShutDown()) - - async def get(self) -> QueueEntryType: - """Remove and return an item from the queue, with shutdown support.""" - while self.empty(): - if self._is_shutdown: - raise QueueShutDown - - getter: asyncio.Future[QueueEntryType] = self._get_loop().create_future() # type: ignore - assert isinstance(getter, asyncio.Future) - self._getters.append(getter) # type: ignore[arg-type] - try: - await getter - except: - # Clean up the getter if we're cancelled - getter.cancel() # Just in case getter is not done yet. - try: - self._getters.remove(getter) # type: ignore[arg-type] - except ValueError: - pass - # Re-raise the exception - raise - - return self.get_nowait() - - async def put(self, item: QueueEntryType) -> None: - """Put an item into the queue, with shutdown support.""" - while self.full(): - if self._is_shutdown: - raise QueueShutDown - - putter: asyncio.Future[QueueEntryType] = self._get_loop().create_future() # type: ignore - assert isinstance(putter, asyncio.Future) - self._putters.append(putter) # type: ignore[arg-type] - try: - await putter - except: - putter.cancel() # Just in case putter is not done yet. - try: - self._putters.remove(putter) # type: ignore[arg-type] - except ValueError: - pass - raise - - return self.put_nowait(item) - - def put_nowait(self, item: QueueEntryType) -> None: - """Put an item into the queue without blocking, with shutdown support.""" - if self._is_shutdown: - raise QueueShutDown - return super().put_nowait(item) - - def get_nowait(self) -> QueueEntryType: - """Remove and return an item if one is immediately available, with shutdown support.""" - if self._is_shutdown and self.empty(): - raise QueueShutDown - return super().get_nowait() - - -# Context variable to track the current event being processed (for setting event_parent_id from inside a child event) -_current_event_context: ContextVar['BaseEvent[Any] | None'] = ContextVar('current_event', default=None) -# Context variable to track if we're inside a handler (for nested event detection) -inside_handler_context: ContextVar[bool] = ContextVar('inside_handler', default=False) -# Context variable to track if we hold the global lock (for re-entrancy across tasks) -holds_global_lock: ContextVar[bool] = ContextVar('holds_global_lock', default=False) -# Context variable to track the current handler ID (for tracking child events) -_current_handler_id_context: ContextVar[str | None] = ContextVar('current_handler_id', default=None) - - -class ReentrantLock: - """A re-entrant lock that works across different asyncio tasks using ContextVar.""" - - def __init__(self): - self._semaphore: asyncio.Semaphore | None = None - self._depth = 0 # Track re-entrance depth - self._loop: asyncio.AbstractEventLoop | None = None - - def _get_semaphore(self) -> asyncio.Semaphore: - """Get or create the semaphore for the current event loop.""" - current_loop = asyncio.get_running_loop() - if self._semaphore is None or self._loop != current_loop: - # Create new semaphore for this event loop - self._semaphore = asyncio.Semaphore(1) - self._loop = current_loop - return self._semaphore - - async def __aenter__(self): - if holds_global_lock.get(): - # We already hold the lock in this context, increment depth - self._depth += 1 - return self - - # Acquire the lock - await self._get_semaphore().acquire() - holds_global_lock.set(True) - self._depth = 1 - return self - - async def __aexit__(self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: Any) -> None: - if not holds_global_lock.get(): - # We don't hold the lock, nothing to do - return - - self._depth -= 1 - if self._depth == 0: - # Last exit, release the lock - holds_global_lock.set(False) - self._get_semaphore().release() - - def locked(self) -> bool: - """Check if the lock is currently held.""" - # If semaphore doesn't exist yet or is from a different loop, it's not locked - try: - current_loop = asyncio.get_running_loop() - if self._semaphore is None or self._loop != current_loop: - return False - return self._semaphore.locked() - except RuntimeError: - # No running loop, can't check - return False - - -# Global re-entrant lock shared by all EventBus instances -_global_eventbus_lock: ReentrantLock | None = None - - -def _get_global_lock() -> ReentrantLock: - """Get or create the global EventBus lock.""" - global _global_eventbus_lock - if _global_eventbus_lock is None: - _global_eventbus_lock = ReentrantLock() - return _global_eventbus_lock - - -def _log_pretty_path(path: Path | str | None) -> str: - """Pretty-print a path, shorten home dir to ~ and cwd to .""" - - if not path or not str(path).strip(): - return '' # always falsy in -> falsy out so it can be used in ternaries - - # dont print anything thats not a path - if not isinstance(path, (str, Path)): # type: ignore - # no other types are safe to just str(path) and log to terminal unless we know what they are - # e.g. what if we get storage_date=dict | Path and the dict version could contain real cookies - return f'<{type(path).__name__}>' - - # replace home dir and cwd with ~ and . - pretty_path = str(path).replace(str(Path.home()), '~').replace(str(Path.cwd().resolve()), '.') - - # wrap in quotes if it contains spaces - if pretty_path.strip() and ' ' in pretty_path: - pretty_path = f'"{pretty_path}"' - - return pretty_path - - -def _log_filtered_traceback(exc: BaseException) -> str: - trace_exc = traceback.TracebackException.from_exception(exc, capture_locals=False) - - def _filter(_: traceback.TracebackException): - trace_exc.stack = traceback.StackSummary.from_list( - [f for f in trace_exc.stack if 'asyncio/tasks.py' not in f.filename and 'lib/python' not in f.filename] - ) - if trace_exc.__cause__: - _filter(trace_exc.__cause__) - if trace_exc.__context__: - _filter(trace_exc.__context__) - - _filter(trace_exc) - return ''.join(trace_exc.format()) - - -class EventBus: - """ - Async event bus with write-ahead logging and guaranteed FIFO processing. - - Features: - - Enqueue events synchronously, await their results using 'await Event()' - - FIFP Write-ahead logging with UUIDs and timestamps, - - Serial event processing, parallel handler execution per event - """ - - # Track all EventBus instances (using weakrefs to allow garbage collection) - all_instances: weakref.WeakSet['EventBus'] = weakref.WeakSet() - - # Class Attributes - name: PythonIdentifierStr = 'EventBus' - parallel_handlers: bool = False - wal_path: Path | None = None - - # Runtime State - id: UUIDStr = '00000000-0000-0000-0000-000000000000' - handlers: dict[PythonIdStr, list[ContravariantEventHandler['BaseEvent[Any]']]] # collected by .on(, ) - event_queue: CleanShutdownQueue['BaseEvent[Any]'] | None - event_history: dict[UUIDStr, 'BaseEvent[Any]'] # collected by .dispatch() - - _is_running: bool = False - _runloop_task: asyncio.Task[None] | None = None - _on_idle: asyncio.Event | None = None - - def __init__( - self, - name: PythonIdentifierStr | None = None, - wal_path: Path | str | None = None, - parallel_handlers: bool = False, - max_history_size: int | None = 50, # Keep only 50 events in history - ): - self.id = uuid7str() - self.name = name or f'{self.__class__.__name__}_{self.id[-8:]}' - assert self.name.isidentifier(), f'EventBus name must be a unique identifier string, got: {self.name}' - - # Force garbage collection to clean up any dead EventBus instances in the WeakSet - # gc.collect() # Commented out - this is expensive and causes 5s delays when creating many EventBus instances - - # Check for name uniqueness among existing instances - # We'll collect potential conflicts and check if they're still alive - original_name = self.name - conflicting_buses: list[EventBus] = [] - - for existing_bus in list(EventBus.all_instances): # Make a list copy to avoid modification during iteration - if existing_bus is not self and existing_bus.name == self.name: - # Try to trigger collection of just this object by checking if it's collectable - # First, temporarily remove from WeakSet to see if that was the only reference - EventBus.all_instances.discard(existing_bus) - - # Check if the object is still reachable by creating a new weak reference - # If the object only existed in the WeakSet, it should be unreachable now - try: - # Try to access an attribute to see if the object is still valid - _ = existing_bus.name # This will work if object is still alive - - # Object is still alive with real references, restore to WeakSet - EventBus.all_instances.add(existing_bus) - conflicting_buses.append(existing_bus) - except Exception: - # Object was garbage collected or is invalid (e.g., AttributeError), that's fine - # Don't re-add to WeakSet, let it stay removed - pass - - # If we found conflicting buses, auto-generate a unique suffix - if conflicting_buses: - # Generate a unique suffix using the last 8 chars of a UUID - unique_suffix = uuid7str()[-8:] - self.name = f'{original_name}_{unique_suffix}' - - warnings.warn( - f'⚠️ EventBus with name "{original_name}" already exists. ' - f'Auto-generated unique name: "{self.name}" to avoid conflicts. ' - f'Consider using unique names or stop(clear=True) on unused buses.', - UserWarning, - stacklevel=2, - ) - - self.event_queue = None - self.event_history = {} - self.handlers = defaultdict(list) - self.parallel_handlers = parallel_handlers - self.wal_path = Path(wal_path) if wal_path else None - self._on_idle = None - - # Memory leak prevention settings - self.max_history_size = max_history_size - - # Register this instance - EventBus.all_instances.add(self) - - # Instead of registering as normal event handlers, - # these special handlers are just called manually at the end of step - # self.on('*', self._default_log_handler) - # self.on('*', self._default_wal_handler) - - def __del__(self): - """Auto-cleanup on garbage collection""" - # Most cleanup should have been done by the event loop close hook - # This is just a fallback for any remaining cleanup - - # Signal the run loop to stop - self._is_running = False - - # Our custom queue handles cleanup properly in shutdown() - # No need for manual cleanup here - - # Check total memory usage across all EventBus instances - try: - self._check_total_memory_usage() - except Exception: - # Don't let memory check errors prevent cleanup - pass - - def __str__(self) -> str: - icon = '🟒' if self._is_running else 'πŸ”΄' - return f'{self.name}{icon}(⏳ {len(self.events_pending or [])} | ▢️ {len(self.events_started or [])} | βœ… {len(self.events_completed or [])} ➑️ {len(self.handlers)} πŸ‘‚)' - - def __repr__(self) -> str: - return str(self) - - @property - def events_pending(self) -> list['BaseEvent[Any]']: - """Get events that haven't started processing yet (does not include events that have not even finished dispatching yet in self.event_queue)""" - return [ - event for event in self.event_history.values() if event.event_started_at is None and event.event_completed_at is None - ] - - @property - def events_started(self) -> list['BaseEvent[Any]']: - """Get events currently being processed""" - return [event for event in self.event_history.values() if event.event_started_at and not event.event_completed_at] - - @property - def events_completed(self) -> list['BaseEvent[Any]']: - """Get events that have completed processing""" - return [event for event in self.event_history.values() if event.event_completed_at is not None] - - # Overloads for typed event patterns with specific handler signatures - # Order matters - more specific types must come before general ones - - # 1. EventHandlerFunc[T_Event] - sync function taking event - @overload - def on(self, event_pattern: EventPatternType, handler: EventHandlerFunc[T_Event]) -> None: ... - - # 2. AsyncEventHandlerFunc[T_Event] - async function taking event - @overload - def on(self, event_pattern: EventPatternType, handler: AsyncEventHandlerFunc[T_Event]) -> None: ... - - # 3. EventHandlerMethod[T_Event] - sync method taking self and event - @overload - def on(self, event_pattern: EventPatternType, handler: EventHandlerMethod[T_Event]) -> None: ... - - # 4. AsyncEventHandlerMethod[T_Event] - async method taking self and event - @overload - def on(self, event_pattern: EventPatternType, handler: AsyncEventHandlerMethod[T_Event]) -> None: ... - - # 5. EventHandlerClassMethod[BaseEvent] - sync classmethod taking cls and event - @overload - def on(self, event_pattern: EventPatternType, handler: EventHandlerClassMethod['BaseEvent[Any]']) -> None: ... - - # 6. AsyncEventHandlerClassMethod[BaseEvent] - async classmethod taking cls and event - @overload - def on(self, event_pattern: EventPatternType, handler: AsyncEventHandlerClassMethod['BaseEvent[Any]']) -> None: ... - - # I dont think this is needed, but leaving it here for now - # 9. Coroutine[Any, Any, Any] - direct coroutine - # @overload # type: ignore[reportUnknownReturnType] - # def on(self, event_pattern: EventPatternType, handler: Coroutine[Any, Any, Any]) -> None: ... - - def on( - self, - event_pattern: EventPatternType, - handler: ( # TypeAlias with args doesnt work on overloaded signature, has to be defined inline - EventHandlerFunc[T_Event] - | AsyncEventHandlerFunc['BaseEvent[Any]'] - | EventHandlerMethod[T_Event] - | AsyncEventHandlerMethod['BaseEvent[Any]'] - | EventHandlerClassMethod['BaseEvent[Any]'] - | AsyncEventHandlerClassMethod['BaseEvent[Any]'] - ), - ) -> None: - """ - Subscribe to events matching a pattern, event type name, or event model class. - Use event_pattern='*' to subscribe to all events. Handler can be sync or async function or method. - - Examples: - eventbus.on('TaskStartedEvent', handler) # Specific event type - eventbus.on(TaskStartedEvent, handler) # Event model class - eventbus.on('*', handler) # Subscribe to all events - eventbus.on('*', other_eventbus.dispatch) # Forward all events to another EventBus - - Note: When forwarding events between buses, all handler results are automatically - flattened into the original event's results, so EventResults sees all handlers - from all buses as a single flat collection. - """ - assert isinstance(event_pattern, str) or issubclass(event_pattern, BaseEvent), ( - f'Invalid event pattern: {event_pattern}, must be a string event type or subclass of BaseEvent' - ) - assert inspect.isfunction(handler) or inspect.ismethod(handler) or inspect.iscoroutinefunction(handler), ( - f'Invalid handler: {handler}, must be a sync or async function or method' - ) - - # Determine event key - event_key: str - if event_pattern == '*': - event_key = '*' - elif isinstance(event_pattern, type) and issubclass(event_pattern, BaseEvent): # pyright: ignore[reportUnnecessaryIsInstance] - event_key = event_pattern.__name__ # pyright: ignore[reportUnknownMemberType, reportUnknownVariableType] - else: - event_key = str(event_pattern) - - # Ensure event_key is definitely a string at this point - assert isinstance(event_key, str) - - # Check for duplicate handler names - new_handler_name = get_handler_name(handler) - existing_registered_handlers = [get_handler_name(h) for h in self.handlers.get(event_key, [])] # pyright: ignore[reportUnknownArgumentType] - - if new_handler_name in existing_registered_handlers: - warnings.warn( - f"⚠️ {self} Handler {new_handler_name} already registered for event '{event_key}'. " - f'This may cause ambiguous results when using name-based access. ' - f'Consider using unique function names.', - UserWarning, - stacklevel=2, - ) - - # Register handler - self.handlers[event_key].append(handler) # type: ignore - logger.debug(f'πŸ‘‚ {self}.on({event_key}, {get_handler_name(handler)}) Registered event handler') - - def dispatch(self, event: T_ExpectedEvent) -> T_ExpectedEvent: - """ - Enqueue an event for processing and immediately return an Event(status='pending') version (synchronous). - You can await the returned Event(status='pending') object to block until it is done being executed aka Event(status='completed'), - or you can interact with the unawaited Event(status='pending') before its handlers have finished. - - (The first EventBus.dispatch() call will auto-start a bus's async _run_loop() if it's not already running) - - >>> completed_event = await eventbus.dispatch(SomeEvent()) - # 1. enqueues the event synchronously - # 2. returns an awaitable SomeEvent() with pending results in .event_results - # 3. awaits the SomeEvent() which waits until all pending results are complete and returns the completed SomeEvent() - - >>> result_value = await eventbus.dispatch(SomeEvent()).event_result() - # 1. enqueues the event synchronously - # 2. returns a pending SomeEvent() with pending results in .event_results - # 3. awaiting .event_result() waits until all pending results are complete, and returns the raw result value of the first one - """ - - try: - asyncio.get_running_loop() - except RuntimeError: - raise RuntimeError(f'{self}.dispatch() called but no event loop is running! Event not queued: {event.event_type}') - - assert event.event_id, 'Missing event.event_id: UUIDStr = uuid7str()' - assert event.event_created_at, 'Missing event.event_created_at: datetime = datetime.now(UTC)' - assert event.event_type and event.event_type.isidentifier(), 'Missing event.event_type: str' - assert event.event_schema and '@' in event.event_schema, 'Missing event.event_schema: str (with @version)' - - # Automatically set event_parent_id from context if not already set - if event.event_parent_id is None: - current_event: 'BaseEvent[Any] | None' = _current_event_context.get() - if current_event is not None: - event.event_parent_id = current_event.event_id - - # Track child events - if we're inside a handler, add this event to the handler's event_children list - # Only track if this is a NEW event (not forwarding an existing event) - current_handler_id = _current_handler_id_context.get() - if current_handler_id is not None and inside_handler_context.get(): - current_event = _current_event_context.get() - if current_event is not None and current_handler_id in current_event.event_results: - # Only add as child if it's a different event (not forwarding the same event) - if event.event_id != current_event.event_id: - current_event.event_results[current_handler_id].event_children.append(event) - - # Add this EventBus to the event_path if not already there - if self.name not in event.event_path: - # preserve identity of the original object instead of creating a new one, so that the original object remains awaitable to get the result - # NOT: event = event.model_copy(update={'event_path': event.event_path + [self.name]}) - event.event_path.append(self.name) - else: - logger.debug( - f'⚠️ {self}.dispatch({event.event_type}) - Bus already in path, not adding again. Path: {event.event_path}' - ) - - assert event.event_path, 'Missing event.event_path: list[str] (with at least the origin function name recorded in it)' - assert all(entry.isidentifier() for entry in event.event_path), ( - f'Event.event_path must be a list of valid EventBus names, got: {event.event_path}' - ) - - # Check hard limit on total pending events (queue + in-progress) - # Only enforce if we have memory limits set - if self.max_history_size is not None: - queue_size = self.event_queue.qsize() if self.event_queue else 0 - pending_in_history = sum(1 for e in self.event_history.values() if e.event_status in ('pending', 'started')) - total_pending = queue_size + pending_in_history - - if total_pending >= 100: - raise RuntimeError( - f'EventBus at capacity: {total_pending} pending events (100 max). ' - f'Queue: {queue_size}, Processing: {pending_in_history}. ' - f'Cannot accept new events until some complete.' - ) - - # Auto-start if needed - self._start() - - # Put event in queue synchronously using put_nowait - if self.event_queue: - try: - self.event_queue.put_nowait(event) - # Only add to history after successfully queuing - self.event_history[event.event_id] = event - logger.info( - f'πŸ—£οΈ {self}.dispatch({event.event_type}) ➑️ {event.event_type}#{event.event_id[-4:]} (#{self.event_queue.qsize()} {event.event_status})' - ) - except asyncio.QueueFull: - # Don't add to history if we can't queue it - logger.error( - f'⚠️ {self} Event queue is full! Dropping event and aborting {event.event_type}:\n{event.model_dump_json()}' # pyright: ignore[reportUnknownMemberType] - ) - raise # could also block indefinitely until queue has space, but dont drop silently or delete events - else: - logger.warning(f'⚠️ {self}.dispatch() called but event_queue is None! Event not queued: {event.event_type}') - - # Note: We do NOT pre-create EventResults here anymore. - # EventResults are created only when handlers actually start executing. - # This avoids "orphaned" pending results for handlers that get filtered out later. - - # Clean up if over the limit - if self.max_history_size and len(self.event_history) > self.max_history_size: - self.cleanup_event_history() - - return event - - @overload - async def expect( - self, - event_type: type[T_ExpectedEvent], - include: Callable[['BaseEvent[Any]' | T_ExpectedEvent], bool] = lambda _: True, - exclude: Callable[['BaseEvent[Any]' | T_ExpectedEvent], bool] = lambda _: False, - predicate: Callable[['BaseEvent[Any]' | T_ExpectedEvent], bool] = lambda _: True, # deprecated, alias for include - timeout: float | None = None, - ) -> T_ExpectedEvent: ... - - @overload - async def expect( - self, - event_type: PythonIdentifierStr, - include: Callable[['BaseEvent[Any]'], bool] = lambda _: True, - exclude: Callable[['BaseEvent[Any]'], bool] = lambda _: False, - predicate: Callable[['BaseEvent[Any]'], bool] = lambda _: True, # deprecated, alias for include - timeout: float | None = None, - ) -> 'BaseEvent[Any]': ... - - async def expect( - self, - event_type: PythonIdentifierStr | type[T_ExpectedEvent], - include: Callable[['BaseEvent[Any]'], bool] = lambda _: True, - exclude: Callable[['BaseEvent[Any]'], bool] = lambda _: False, - predicate: Callable[['BaseEvent[Any]'], bool] = lambda _: True, # deprecated, alias for include - timeout: float | None = None, - ) -> 'BaseEvent[Any]' | T_ExpectedEvent: - """ - Wait for an event matching the given type/pattern with optional filters. - - Args: - event_type: The event type string or model class to wait for - include: Filter function that must return True for the event to match (default: lambda e: True) - exclude: Filter function that must return False for the event to match (default: lambda e: False) - predicate: Deprecated name, alias for include (default: lambda e: True) - timeout: Maximum time to wait in seconds as a float (None = wait forever) - - Returns: - The first matching event - - Raises: - asyncio.TimeoutError: If timeout is reached before a matching event - - Example: - # Wait for any response event - response = await eventbus.expect('ResponseEvent', timeout=30) - - # Wait for specific response with include filter - response = await eventbus.expect( - 'ResponseEvent', - include=lambda e: e.request_id == my_request_id, - timeout=30 - ) - - # Wait for response excluding certain types - response = await eventbus.expect( - 'ResponseEvent', - exclude=lambda e: e.error_code is not None, - timeout=30 - ) - """ - future: asyncio.Future['BaseEvent[Any]'] = asyncio.Future() - - # Handle backwards compatibility: merge predicate into include - if predicate is not None: # type: ignore[conditionAlwaysTrue] - original_include = include - include = lambda e, orig=original_include, pred=predicate: orig(e) and pred(e) - - def notify_expect_handler(event: 'BaseEvent[Any]') -> None: - """Handler that resolves the future when a matching event is found""" - if not future.done() and include(event) and not exclude(event): - future.set_result(event) - - # make debugging otherwise ephemeral async expect handlers easier by including some metadata in the stacktrace func names - current_frame = inspect.currentframe() - assert current_frame - notify_expect_handler.__name__ = f'{self}.expect({event_type}, timeout={timeout})@{_log_pretty_path(current_frame.f_code.co_filename)}:{current_frame.f_lineno}' # add file and line number to the name - - # Register temporary listener that watches for matching events and triggers the expect handler - self.on(event_type, notify_expect_handler) - - try: - # Wait for the future with optional timeout - if timeout is not None: - return await asyncio.wait_for(future, timeout=timeout) - else: - return await future - finally: - # Clean up handler - event_key: str = event_type.__name__ if isinstance(event_type, type) else str(event_type) # pyright: ignore[reportUnknownMemberType, reportPartialTypeErrors] - if event_key in self.handlers and notify_expect_handler in self.handlers[event_key]: - self.handlers[event_key].remove(notify_expect_handler) - - def _start(self) -> None: - """Start the event bus if not already running""" - if not self._is_running: - try: - loop = asyncio.get_running_loop() - - # Hook into the event loop's close method to cleanup before it closes - # this is necessary to silence "RuntimeError: no running event loop" and "event loop is closed" errors on shutdown - if not hasattr(loop, '_eventbus_close_hooked'): - original_close = loop.close - registered_eventbuses: weakref.WeakSet[EventBus] = weakref.WeakSet() - - def close_with_cleanup() -> None: - # Clean up all registered EventBuses before closing the loop - for eventbus in list(registered_eventbuses): - try: - # Stop the eventbus while loop is still running - if eventbus._is_running: - eventbus._is_running = False - - # Shutdown the queue properly - our custom queue will handle cleanup - if eventbus.event_queue: - eventbus.event_queue.shutdown(immediate=True) - - if eventbus._runloop_task and not eventbus._runloop_task.done(): - # Suppress warning before cancelling - if hasattr(eventbus._runloop_task, '_log_destroy_pending'): - eventbus._runloop_task._log_destroy_pending = False # type: ignore - eventbus._runloop_task.cancel() - except Exception: - pass - - # Now close the loop - original_close() - - loop.close = close_with_cleanup - loop._eventbus_close_hooked = True # type: ignore - loop._eventbus_instances = registered_eventbuses # type: ignore - - # Register this EventBus instance in the WeakSet of all EventBuses on the loop - if hasattr(loop, '_eventbus_instances'): - loop._eventbus_instances.add(self) # type: ignore - - # Create async objects if needed - if self.event_queue is None: - # Set queue size based on whether we have limits - queue_size = 50 if self.max_history_size is not None else 0 # 0 = unlimited - self.event_queue = CleanShutdownQueue['BaseEvent[Any]'](maxsize=queue_size) - self._on_idle = asyncio.Event() - self._on_idle.clear() # Start in a busy state unless we confirm queue is empty by running step() at least once - - # Create and start the run loop task - self._runloop_task = loop.create_task(self._run_loop(), name=f'{self}._run_loop') - self._is_running = True - except RuntimeError: - # No event loop - will start when one becomes available - pass - - async def stop(self, timeout: float | None = None, clear: bool = False) -> None: - """Stop the event bus, optionally waiting for events to complete - - Args: - timeout: Maximum time to wait for pending events to complete - clear: If True, clear event history and remove from global tracking to free memory - """ - if not self._is_running: - return - - # Wait for completion if timeout specified and > 0 - # timeout=0 means "don't wait", so skip the wait entirely - if timeout is not None and timeout > 0: - try: - await self.wait_until_idle(timeout=timeout) - except TimeoutError: - pass - - queue_size = self.event_queue.qsize() if self.event_queue else 0 - if queue_size or self.events_pending or self.events_started: - logger.debug( - f'⚠️ {self} stopping with pending events: Pending {len(self.events_pending) + queue_size} | Started {len(self.events_started)} | Completed {len(self.events_completed)}\n' - f'PENDING={str(self.events_pending)[:500]}\nSTARTED={str(self.events_started)[:500]}' - ) - - # Signal shutdown - self._is_running = False - - # Shutdown the queue to unblock any pending get() operations - if self.event_queue: - self.event_queue.shutdown() - - # print('STOPPING', self.event_history) - - # Wait for the run loop task to finish / force-cancel it if it's hanging - if self._runloop_task and not self._runloop_task.done(): - await asyncio.wait({self._runloop_task}, timeout=0.1) - try: - self._runloop_task.cancel() - except Exception: - pass - - # Clear references - self._runloop_task = None - if self._on_idle: - self._on_idle.set() - - # Clear event history and handlers if requested (for memory cleanup) - if clear: - self.event_history.clear() - self.handlers.clear() - # Remove from global instance tracking - if self in EventBus.all_instances: - EventBus.all_instances.discard(self) - - # Remove from event loop's tracking if present - try: - loop = asyncio.get_running_loop() - if hasattr(loop, '_eventbus_instances'): - loop._eventbus_instances.discard(self) # type: ignore - except RuntimeError: - # No running loop, that's fine - pass - - logger.debug(f'🧹 {self} cleared event history and removed from global tracking') - - logger.debug(f'πŸ›‘ {self} shut down gracefully' if timeout is not None else f'πŸ›‘ {self} killed') - - # Check total memory usage across all instances - try: - self._check_total_memory_usage() - except Exception: - # Don't let memory check errors prevent shutdown - pass - - async def wait_until_idle(self, timeout: float | None = None) -> None: - """Wait until the event bus is idle (no events being processed and all handlers completed)""" - - self._start() - assert self._on_idle and self.event_queue, 'EventBus._start() must be called before wait_until_idle() is reached' - - start_time = asyncio.get_event_loop().time() - remaining_timeout = timeout - - try: - # First wait for the queue to be empty - join_task = asyncio.create_task(self.event_queue.join()) - await asyncio.wait_for(join_task, timeout=remaining_timeout) - - # Update remaining timeout - if timeout is not None: - elapsed = asyncio.get_event_loop().time() - start_time - remaining_timeout = max(0, timeout - elapsed) - - # Wait for idle state - idle_task = asyncio.create_task(self._on_idle.wait()) - await asyncio.wait_for(idle_task, timeout=remaining_timeout) - - # Critical: Ensure the runloop has settled by yielding control - # This allows the runloop to complete any in-flight operations - # and prevents race conditions with event_history access - await asyncio.sleep(0) # Yield to event loop - - # Double-check we're truly idle - if new events came in, wait again - while not self._on_idle.is_set() or self.events_started or self.events_pending: - if timeout is not None: - elapsed = asyncio.get_event_loop().time() - start_time - remaining_timeout = max(0, timeout - elapsed) - if remaining_timeout <= 0: - raise TimeoutError() - - # Clear and wait again - self._on_idle.clear() - idle_task = asyncio.create_task(self._on_idle.wait()) - await asyncio.wait_for(idle_task, timeout=remaining_timeout) - await asyncio.sleep(0) # Yield again - - except TimeoutError: - logger.warning( - f'βŒ›οΈ {self} Timeout waiting for event bus to be idle after {timeout}s (processing: {len(self.events_started)})' - ) - - async def _run_loop(self) -> None: - """Main event processing loop""" - try: - while self._is_running: - try: - _processed_event = await self.step() - # Check if we should set idle state after processing - if self._on_idle and self.event_queue: - if not (self.events_pending or self.events_started or self.event_queue.qsize()): - self._on_idle.set() - except QueueShutDown: - # Queue was shut down, exit cleanly - break - except RuntimeError as e: - # Event loop is closing - if 'Event loop is closed' in str(e) or 'no running event loop' in str(e): - break - else: - logger.exception(f'❌ {self} Runtime error in event loop: {type(e).__name__} {e}', exc_info=True) - # Continue running even if there's an error - except Exception as e: - logger.exception(f'❌ {self} Error in event loop: {type(e).__name__} {e}', exc_info=True) - # Continue running even if there's an error - except asyncio.CancelledError: - # Task was cancelled, clean exit - # logger.debug(f'πŸ›‘ {self} Event loop task cancelled') - pass - finally: - # Don't call stop() here as it might create new tasks - self._is_running = False - - async def _get_next_event(self, wait_for_timeout: float = 0.1) -> 'BaseEvent[Any] | None': - """Get the next event from the queue""" - - assert self._on_idle and self.event_queue, 'EventBus._start() must be called before _get_next_event()' - if not self._is_running: - return None - - try: - # Create a task for queue.get() so we can cancel it cleanly - get_next_queued_event = asyncio.create_task(self.event_queue.get()) - if hasattr(get_next_queued_event, '_log_destroy_pending'): - get_next_queued_event._log_destroy_pending = False # type: ignore # Suppress warnings on this task in case of cleanup - - # Wait for next event with timeout - has_next_event, _pending = await asyncio.wait({get_next_queued_event}, timeout=wait_for_timeout) - if has_next_event: - # Check if we're still running before returning the event - if not self._is_running: - get_next_queued_event.cancel() - return None - return await get_next_queued_event # await to actually resolve it to the next event - else: - # Get task timed out, cancel it cleanly to suppress warnings - get_next_queued_event.cancel() - - # Check if we're idle, if so, set the idle flag - if not (self.events_pending or self.events_started or self.event_queue.qsize()): - self._on_idle.set() - return None - - except (asyncio.CancelledError, RuntimeError, QueueShutDown): - # Clean cancellation during shutdown or queue was shut down - return None - - async def step( - self, event: 'BaseEvent[Any] | None' = None, timeout: float | None = None, wait_for_timeout: float = 0.1 - ) -> 'BaseEvent[Any] | None': - """Process a single event from the queue""" - assert self._on_idle and self.event_queue, 'EventBus._start() must be called before step()' - - # Track if we got the event from the queue - from_queue = False - - # Wait for next event with timeout to periodically check idle state - if event is None: - event = await self._get_next_event(wait_for_timeout=wait_for_timeout) - from_queue = True - if event is None: - return None - - logger.debug(f'πŸƒ {self}.step({event}) STARTING') - - # Clear idle state when we get an event - self._on_idle.clear() - - # Always acquire the global lock (it's re-entrant across tasks) - async with _get_global_lock(): - # Process the event - await self.process_event(event, timeout=timeout) - - # Mark task as done only if we got it from the queue - if from_queue: - self.event_queue.task_done() - - logger.debug(f'βœ… {self}.step({event}) COMPLETE') - return event - - async def process_event(self, event: 'BaseEvent[Any]', timeout: float | None = None) -> None: - """Process a single event (assumes lock is already held)""" - # Get applicable handlers - applicable_handlers = self._get_applicable_handlers(event) - - # Create pending EventResults for all applicable handlers before execution - # This ensures the event knows it has handlers and won't mark itself complete prematurely - for handler_id, handler in applicable_handlers.items(): - if handler_id not in event.event_results: - event.event_result_update( - handler=handler, eventbus=self, status='pending', timeout=timeout or event.event_timeout - ) - - # Execute handlers - await self._execute_handlers(event, handlers=applicable_handlers, timeout=timeout) - - await self._default_log_handler(event) - await self._default_wal_handler(event) - - # Mark event as complete if all handlers are done - event.event_mark_complete_if_all_handlers_completed() - - # After processing this event, check if any parent events can now be marked complete - # We do this by walking up the parent chain - current = event - checked_ids: set[str] = set() - - while current.event_parent_id and current.event_parent_id not in checked_ids: - checked_ids.add(current.event_parent_id) - - # Find parent event in any bus's history - parent_event = None - # Create a list copy to avoid "Set changed size during iteration" error - for bus in list(EventBus.all_instances): - if bus and current.event_parent_id in bus.event_history: - parent_event = bus.event_history[current.event_parent_id] - break - - if not parent_event: - break - - # Check if parent can be marked complete - if parent_event.event_completed_signal and not parent_event.event_completed_signal.is_set(): - parent_event.event_mark_complete_if_all_handlers_completed() - - # Move up the chain - current = parent_event - - # Clean up excess events to prevent memory leaks - if self.max_history_size: - self.cleanup_event_history() - - def _get_applicable_handlers(self, event: 'BaseEvent[Any]') -> dict[str, EventHandler]: - """Get all handlers that should process the given event, filtering out those that would create loops""" - applicable_handlers: list[EventHandler] = [] - - # Add event-type-specific handlers - applicable_handlers.extend(self.handlers.get(event.event_type, [])) - - # Add wildcard handlers (handlers registered for '*') - applicable_handlers.extend(self.handlers.get('*', [])) - - # Filter out handlers that would create loops and build id->handler mapping - # Use handler id as key to preserve all handlers even with duplicate names - filtered_handlers: dict[PythonIdStr, EventHandler] = {} - for handler in applicable_handlers: - if self._would_create_loop(event, handler): - continue - else: - handler_id = get_handler_id(handler, self) - filtered_handlers[handler_id] = handler - # logger.debug(f' Found handler {get_handler_name(handler)}#{handler_id[-4:]}()') - - return filtered_handlers - - async def _execute_handlers( - self, event: 'BaseEvent[Any]', handlers: dict[PythonIdStr, EventHandler] | None = None, timeout: float | None = None - ) -> None: - """Execute all handlers for an event in parallel""" - applicable_handlers = handlers if (handlers is not None) else self._get_applicable_handlers(event) - if not applicable_handlers: - event.event_mark_complete_if_all_handlers_completed() # mark event completed immediately if it has no handlers - return - - # Execute all handlers in parallel - if self.parallel_handlers: - handler_tasks: dict[PythonIdStr, tuple[asyncio.Task[Any], EventHandler]] = {} - # Copy the current context to ensure context vars are propagated - context = contextvars.copy_context() - for handler_id, handler in applicable_handlers.items(): - task = asyncio.create_task( - self.execute_handler(event, handler, timeout=timeout), - name=f'{self}.execute_handler({event}, {get_handler_name(handler)})', - context=context, - ) - handler_tasks[handler_id] = (task, handler) - - # Wait for all handlers to complete - for handler_id, (task, handler) in handler_tasks.items(): - try: - await task - except Exception: - # Error already logged and recorded in execute_handler - pass - else: - # otherwise, execute handlers serially, wait until each one completes before moving on to the next - for handler_id, handler in applicable_handlers.items(): - try: - await self.execute_handler(event, handler, timeout=timeout) - except Exception as e: - # Error already logged and recorded in execute_handler - logger.debug( - f'❌ {self} Handler {get_handler_name(handler)}#{str(id(handler))[-4:]}({event}) failed with {type(e).__name__}: {e}' - ) - pass - - # print('FINSIHED EXECUTING ALL HANDLERS') - - async def execute_handler( - self, event: 'BaseEvent[T_EventResultType]', handler: EventHandler, timeout: float | None = None - ) -> Any: - """Safely execute a single handler with deadlock detection""" - - # Check if this handler has already been executed for this event - handler_id = get_handler_id(handler, self) - - logger.debug(f' ↳ {self}.execute_handler({event}, handler={get_handler_name(handler)}#{handler_id[-4:]})') - if handler_id in event.event_results: - existing_result = event.event_results[handler_id] - if existing_result.started_at is not None: - raise RuntimeError( - f'Handler {get_handler_name(handler)}#{handler_id[-4:]} has already been executed for event {event.event_id}. ' - f'Previous execution started at {existing_result.started_at}' - ) - - # Mark handler as started - event_result = event.event_result_update( - handler=handler, eventbus=self, status='started', timeout=timeout or event.event_timeout - ) - - # Set the current event in context so child events can reference it - token = _current_event_context.set(event) - # Mark that we're inside a handler - handler_token = inside_handler_context.set(True) - # Set the current handler ID so child events can be tracked - handler_id_token = _current_handler_id_context.set(handler_id) - - # Create a task to monitor for potential deadlock / slow handlers - async def deadlock_monitor(): - await asyncio.sleep(15.0) - logger.warning( - f'⚠️ {self} handler {get_handler_name(handler)}() has been running for >15s on event. Possible slow processing or deadlock.\n' - '(handler could be trying to await its own result or could be blocked by another async task).\n' - f'{get_handler_name(handler)}({event})' - ) - - monitor_task = asyncio.create_task( - deadlock_monitor(), name=f'{self}.deadlock_monitor({event}, {get_handler_name(handler)}#{handler_id[-4:]})' - ) - - handler_task = None - try: - if inspect.iscoroutinefunction(handler): - # Create a task for the handler so we can properly cancel it on timeout - handler_task = asyncio.create_task(handler(event)) # type: ignore - # This allows us to process child events when the handler awaits them - result_value: Any = await asyncio.wait_for(handler_task, timeout=event_result.timeout) - elif inspect.isfunction(handler) or inspect.ismethod(handler): - # If handler function is sync function, run it directly in the main thread - # This blocks but ensures we have access to the event loop, dont run it in a subthread! - result_value: Any = handler(event) - - # If the sync handler returned a BaseEvent (from dispatch), DON'T await it - # For forwarding handlers like bus.on('*', other_bus.dispatch), the handler - # has already queued the event on the target bus. The event will be tracked - # as a child event automatically. - if isinstance(result_value, BaseEvent): - logger.debug( - f'Handler {get_handler_name(handler)} returned BaseEvent, not awaiting to avoid circular dependency' - ) - else: - raise ValueError(f'Handler {get_handler_name(handler)} must be a sync or async function, got: {type(handler)}') - - logger.debug( - f' ↳ Handler {get_handler_name(handler)}#{handler_id[-4:]} returned: {type(result_value).__name__} {str(result_value)[:26]}...' # pyright: ignore - ) - # Cancel the monitor task since handler completed successfully - monitor_task.cancel() - - # Record successful result - event.event_result_update(handler=handler, eventbus=self, result=result_value) - if handler_id in event.event_results: - # logger.debug( - # f' ↳ Updated result for {get_handler_name(handler)}#{handler_id[-4:]}: {event.event_results[handler_id].status}' - # ) - pass - else: - logger.error(f' ↳ ERROR: Result not found for {get_handler_name(handler)}#{handler_id[-4:]} after update!') - return cast(T_EventResultType, result_value) - - except asyncio.CancelledError as e: - # Cancel the monitor task on timeout too - monitor_task.cancel() - - # Create a RuntimeError for timeout - # TODO: figure out why it breaks when we try to switch to InterruptedError instead of asyncio.CancelledError - handler_interrupted_error = asyncio.CancelledError( - f'Event handler {get_handler_name(handler)}#{handler_id[-4:]}({event}) was interrupted because of a parent timeout' - ) - event.event_result_update(handler=handler, eventbus=self, error=handler_interrupted_error) - - # import ipdb; ipdb.set_trace() - raise handler_interrupted_error from e - - except TimeoutError as e: - # Cancel the monitor task on timeout too - monitor_task.cancel() - - # Create a RuntimeError for timeout - children = ( - f' and interrupted any processing of {len(event.event_children)} child events' if event.event_children else '' - ) - handler_timeout_error = TimeoutError( - f'Event handler {get_handler_name(handler)}#{handler_id[-4:]}({event}) timed out after {event_result.timeout}s{children}' - ) - event.event_result_update(handler=handler, eventbus=self, error=handler_timeout_error) - event.event_cancel_pending_child_processing(handler_timeout_error) - - from bubus.logging import log_timeout_tree - - log_timeout_tree(event, event_result) - # import ipdb; ipdb.set_trace() - raise handler_timeout_error from e - except Exception as e: - # Cancel the monitor task on error too - monitor_task.cancel() - - # Record error - event.event_result_update(handler=handler, eventbus=self, error=e) - - red = '\033[91m' - reset = '\033[0m' - logger.error( - f'❌ {self} Error in event handler {get_handler_name(handler)}({event}) -> \n{red}{type(e).__name__}({e}){reset}\n{_log_filtered_traceback(e)}', - ) - raise - finally: - # Reset context - _current_event_context.reset(token) - inside_handler_context.reset(handler_token) - _current_handler_id_context.reset(handler_id_token) - - # Ensure handler task is cancelled if it's still running - if handler_task and not handler_task.done(): - handler_task.cancel() - try: - await asyncio.wait_for(handler_task, timeout=0.1) - except (asyncio.CancelledError, TimeoutError): - pass # Expected when we cancel the task - - # Ensure monitor task is cancelled - try: - if not monitor_task.done(): - monitor_task.cancel() - await monitor_task - except asyncio.CancelledError: - pass # Expected when we cancel the monitor - except Exception as e: - # logger.debug(f"❌ {self} Handler monitor task cleanup error for {get_handler_name(handler)}#{str(id(handler))[-4:]}({event}): {type(e).__name__}: {e}") - pass - - def _would_create_loop(self, event: 'BaseEvent[Any]', handler: EventHandler) -> bool: - """Check if calling this handler would create a loop""" - - assert inspect.isfunction(handler) or inspect.iscoroutinefunction(handler) or inspect.ismethod(handler), ( - f'Handler {get_handler_name(handler)} must be a sync or async function, got: {type(handler)}' - ) - - # First check: If handler is another EventBus.dispatch method, check if we're forwarding to another bus that it's already been processed by - if hasattr(handler, '__self__') and isinstance(handler.__self__, EventBus) and handler.__name__ == 'dispatch': # pyright: ignore[reportFunctionMemberAccess] # type: ignore - target_bus = handler.__self__ # pyright: ignore[reportFunctionMemberAccess] # type: ignore - if target_bus.name in event.event_path: - logger.debug( - f'⚠️ {self} handler {get_handler_name(handler)}#{str(id(handler))[-4:]}({event}) skipped to prevent infinite forwarding loop with {target_bus.name}' - ) - return True - - # Second check: Check if there's already a result (pending or completed) for this handler on THIS bus - # We use a combination of bus ID and handler ID to allow the same handler function - # to run on different buses (important for forwarding) - handler_id = get_handler_id(handler, self) - if handler_id in event.event_results: - existing_result = event.event_results[handler_id] - if existing_result.status == 'pending' or existing_result.status == 'started': - logger.debug( - f'⚠️ {self} handler {get_handler_name(handler)}#{str(id(handler))[-4:]}({event}) is already {existing_result.status} for event {event.event_id} (preventing recursive call)' - ) - return True - elif existing_result.completed_at is not None: - logger.debug( - f'⚠️ {self} handler {get_handler_name(handler)}#{str(id(handler))[-4:]}({event}) already completed @ {existing_result.completed_at} for event {event.event_id} (will not re-run)' - ) - return True - - # Third check: For non-forwarding handlers, check recursion depth - # Forwarding handlers (EventBus.dispatch) are allowed to forward at any depth - is_forwarding_handler = ( - inspect.ismethod(handler) and isinstance(handler.__self__, EventBus) and handler.__name__ == 'dispatch' - ) - - if not is_forwarding_handler: - # Only check recursion for regular handlers, not forwarding - recursion_depth = self._handler_dispatched_ancestor(event, handler_id) - if recursion_depth > 2: - raise RuntimeError( - f'Infinite loop detected: Handler {get_handler_name(handler)}#{str(id(handler))[-4:]} ' - f'has recursively processed {recursion_depth} levels of events. ' - f'Current event: {event}, Handler: {handler_id}' - ) - elif recursion_depth == 2: - logger.warning( - f'⚠️ {self} handler {get_handler_name(handler)}#{str(id(handler))[-4:]} ' - f'at maximum recursion depth (2 levels) - next level will raise exception' - ) - - return False - - def _handler_dispatched_ancestor( - self, event: 'BaseEvent[Any]', handler_id: str, visited: set[str] | None = None, depth: int = 0 - ) -> int: - """Check how many times this handler appears in the ancestry chain. Returns the depth count.""" - # Prevent infinite recursion in case of circular parent references - if visited is None: - visited = set() - if event.event_id in visited: - return depth - visited.add(event.event_id) - - # If this event has no parent, it's a root event - no ancestry to check - if not event.event_parent_id: - return depth - - # Find parent event in any bus's history - parent_event = None - # Create a list copy to avoid "Set changed size during iteration" error - for bus in list(EventBus.all_instances): - if event.event_parent_id in bus.event_history: - parent_event = bus.event_history[event.event_parent_id] - break - - if not parent_event: - return depth - - # Check if this handler processed the parent event - if handler_id in parent_event.event_results: - result = parent_event.event_results[handler_id] - if result.status in ('pending', 'started', 'completed'): - # This handler processed the parent event, increment depth - depth += 1 - - # Recursively check the parent's ancestry - return self._handler_dispatched_ancestor(parent_event, handler_id, visited, depth) - - async def _default_log_handler(self, event: 'BaseEvent[Any]') -> None: - """Default handler that logs all events""" - # logger.debug( - # f'βœ… {self} completed: {event} -> {list(event.event_results.values()) or ''}' - # ) - pass - - async def _default_wal_handler(self, event: 'BaseEvent[Any]') -> None: - """Persist completed event to WAL file as JSONL""" - - if not self.wal_path: - return None - - try: - event_json = event.model_dump_json() # pyright: ignore[reportUnknownMemberType] - self.wal_path.parent.mkdir(parents=True, exist_ok=True) - async with await anyio.open_file(self.wal_path, 'a', encoding='utf-8') as f: # pyright: ignore[reportUnknownMemberType] - await f.write(event_json + '\n') # pyright: ignore[reportUnknownMemberType] - except Exception as e: - logger.error(f'❌ {self} Failed to save event {event.event_id} to WAL file: {type(e).__name__} {e}\n{event}') - - def cleanup_excess_events(self) -> int: - """ - Clean up excess events from event_history based on max_history_size. - - Returns: - Number of events removed from history - """ - if not self.max_history_size or len(self.event_history) <= self.max_history_size: - return 0 - - # Sort events by creation time (oldest first) - sorted_events = sorted(self.event_history.items(), key=lambda x: x[1].event_created_at.timestamp()) - - # Remove oldest events to get down to max_history_size - events_to_remove = sorted_events[: -self.max_history_size] - event_ids_to_remove = [event_id for event_id, _ in events_to_remove] - - for event_id in event_ids_to_remove: - del self.event_history[event_id] - - if event_ids_to_remove: - logger.debug(f'🧹 {self} Cleaned up {len(event_ids_to_remove)} excess events from history') - - return len(event_ids_to_remove) - - def cleanup_event_history(self) -> int: - """ - Clean up event history to maintain max_history_size limit. - Prioritizes keeping pending/started events over completed ones. - - Returns: - Total number of events removed from history - """ - if not self.max_history_size or len(self.event_history) <= self.max_history_size: - return 0 - - # Separate events by status - pending_events: list[tuple[str, 'BaseEvent[Any]']] = [] - started_events: list[tuple[str, 'BaseEvent[Any]']] = [] - completed_events: list[tuple[str, 'BaseEvent[Any]']] = [] - - for event_id, event in self.event_history.items(): - if event.event_status == 'pending': - pending_events.append((event_id, event)) - elif event.event_status == 'started': - started_events.append((event_id, event)) - else: # completed or error - completed_events.append((event_id, event)) - - # Sort completed events by creation time (oldest first) - completed_events.sort(key=lambda x: x[1].event_created_at.timestamp()) # pyright: ignore[reportUnknownMemberType, reportUnknownLambdaType] - - # Calculate how many to remove - total_events = len(self.event_history) - events_to_remove_count = total_events - self.max_history_size - - events_to_remove: list[str] = [] - - # First remove completed events (oldest first) - if completed_events and events_to_remove_count > 0: - remove_from_completed = min(len(completed_events), events_to_remove_count) - events_to_remove.extend([event_id for event_id, _ in completed_events[:remove_from_completed]]) - events_to_remove_count -= remove_from_completed - - # If still need to remove more, remove oldest started events - if events_to_remove_count > 0 and started_events: - started_events.sort(key=lambda x: x[1].event_created_at.timestamp()) # pyright: ignore[reportUnknownMemberType, reportUnknownLambdaType] - remove_from_started = min(len(started_events), events_to_remove_count) - events_to_remove.extend([event_id for event_id, _ in started_events[:remove_from_started]]) - events_to_remove_count -= remove_from_started - - # If still need to remove more, remove oldest pending events - if events_to_remove_count > 0 and pending_events: - pending_events.sort(key=lambda x: x[1].event_created_at.timestamp()) # pyright: ignore[reportUnknownMemberType, reportUnknownLambdaType] - events_to_remove.extend([event_id for event_id, _ in pending_events[:events_to_remove_count]]) - - # Remove the events - for event_id in events_to_remove: - del self.event_history[event_id] - - if events_to_remove: - logger.debug( - f'🧹 {self} Cleaned up {len(events_to_remove)} events from history (kept {len(self.event_history)}/{self.max_history_size})' - ) - - return len(events_to_remove) - - def log_tree(self) -> str: - """Print a nice pretty formatted tree view of all events in the history including their results and child events recursively""" - from bubus.logging import log_eventbus_tree - - return log_eventbus_tree(self) - - def _check_total_memory_usage(self) -> None: - """Check total memory usage across all EventBus instances and warn if >50MB""" - import sys - - total_bytes = 0 - bus_details: list[tuple[str, int, int, int]] = [] - - # Iterate through all EventBus instances - # Create a list copy to avoid "Set changed size during iteration" error - for bus in list(EventBus.all_instances): - try: - bus_bytes = 0 - - # Count events in history - for event in bus.event_history.values(): - bus_bytes += sys.getsizeof(event) - # Also count the event's data - if hasattr(event, '__dict__'): - for attr_value in event.__dict__.values(): - if isinstance(attr_value, (str, bytes, list, dict)): - bus_bytes += sys.getsizeof(attr_value) # pyright: ignore[reportUnknownArgumentType] - - # Count events in queue - if bus.event_queue: - # Access internal queue storage - if hasattr(bus.event_queue, '_queue'): - queue: deque[BaseEvent] = bus.event_queue._queue # type: ignore[attr-defined] - for event in queue: # pyright: ignore[reportUnknownVariableType] - bus_bytes += sys.getsizeof(event) # pyright: ignore[reportUnknownArgumentType] - if hasattr(event, '__dict__'): # pyright: ignore[reportUnknownArgumentType] - for attr_value in event.__dict__.values(): # pyright: ignore[reportUnknownMemberType, reportUnknownVariableType] - if isinstance(attr_value, (str, bytes, list, dict)): - bus_bytes += sys.getsizeof(attr_value) # pyright: ignore[reportUnknownArgumentType] - - total_bytes += bus_bytes - bus_details.append( - (bus.name, bus_bytes, len(bus.event_history), bus.event_queue.qsize() if bus.event_queue else 0) - ) - except Exception: - # Skip buses that can't be measured - continue - - total_mb = total_bytes / (1024 * 1024) - - if total_mb > 50: - # Build detailed breakdown - details: list[str] = [] - for name, bytes_used, history_size, queue_size in sorted(bus_details, key=lambda x: x[1], reverse=True): # pyright: ignore[reportUnknownLambdaType] - mb = bytes_used / (1024 * 1024) - if mb > 0.1: # Only show buses using >0.1MB - details.append(f' - {name}: {mb:.1f}MB (history={history_size}, queue={queue_size})') - - warning_msg = ( - f'\n⚠️ WARNING: Total EventBus memory usage is {total_mb:.1f}MB (>50MB limit)\n' - f'Active EventBus instances: {len(EventBus.all_instances)}\n' - ) - if details: - warning_msg += 'Memory breakdown:\n' + '\n'.join(details[:5]) # Show top 5 - if len(details) > 5: - warning_msg += f'\n ... and {len(details) - 5} more' - - warning_msg += '\nConsider:\n' - warning_msg += ' - Reducing max_history_size\n' - warning_msg += ' - Clearing completed EventBus instances with stop(clear=True)\n' - warning_msg += ' - Reducing event payload sizes\n' - - logger.warning(warning_msg) diff --git a/docs/api/baseevent.mdx b/docs/api/baseevent.mdx new file mode 100644 index 0000000..24dbae7 --- /dev/null +++ b/docs/api/baseevent.mdx @@ -0,0 +1,284 @@ +--- +title: BaseEvent +description: BaseEvent schema fields, lifecycle, and result helpers. +--- + +`BaseEvent` is the typed payload + runtime state object that flows through the bus. + +Use subclassing (Python) or `BaseEvent.extend(...)` (TypeScript) to define event payload fields. + +## Defining events + + + + +```python +from bubus import BaseEvent + +class FooCreateEvent(BaseEvent[str]): + id: str | None = None + name: str + age: int +``` + + + + +```ts +import { BaseEvent } from 'bubus' +import { z } from 'zod' + +const FooCreateEvent = BaseEvent.extend('FooCreateEvent', { + id: z.string().nullable().optional(), + name: z.string(), + age: z.number(), + event_result_type: z.string(), +}) +``` + + + + +Queue semantics are split in both runtimes: +- Immediate path: `await event` (Python) / `await event.done()` (TypeScript) +- Queue-order path: `await event.event_completed()` (Python) / `await event.eventCompleted()` (TypeScript) + +## Core metadata fields + +Common event metadata fields available in both runtimes: + +- `event_id`: unique UUIDv7 +- `event_type`: event name/type key +- `event_version`: payload version marker +- `event_result_type`: expected handler return schema/type +- `event_timeout`: per-event timeout override (`None`/`null` means use the current processing bus default) +- `event_handler_timeout`: per-handler timeout cap override +- `event_handler_slow_timeout`: per-handler slow warning threshold +- `event_concurrency`: event scheduling mode override (`None`/`null` means use the current processing bus default) +- `event_handler_concurrency`: handler scheduling mode override (`None`/`null` means use the current processing bus default) +- `event_handler_completion`: handler completion strategy override (`None`/`null` means use the current processing bus default) + +## Runtime fields + +- `event_status`: pending/started/completed +- `event_created_at`, `event_started_at`, `event_completed_at` +- `event_started_at` / `event_completed_at` are `None` (Python) / `null` (TypeScript) until set +- `event_parent_id` and `event_emitted_by_handler_id` are `None` / `null` when unset +- `event_path`: buses traversed +- `event_results`: per-handler result entries +- Child-event tracking (`event_children`/descendants) + +## Completion model + +Events are returned in pending state from `emit()`, then complete asynchronously. + + + + +```python +pending = bus.emit(MyEvent()) +completed = await pending +completed_in_queue_order = await pending.event_completed() +value = await completed.event_result() +``` + + + + +```ts +const pending = bus.emit(MyEvent({})) +const completed = await pending.done() +const completed_in_queue_order = await pending.eventCompleted() +const value = completed.event_result +``` + + + + +## Result access helpers + +### `first()` + + + + +```python +value = await event.first() +# equivalent: await event.event_result(...) with first-completion mode +``` + + + + +```ts +const value = await event.first() +``` + + + + +### All results + + + + +```python +items = await event.event_results_list() +by_handler = {handler_id: result.result for handler_id, result in event.event_results.items()} +``` + + + + +```ts +const items = await event.eventResultsList() +const filtered = await event.eventResultsList((result) => typeof result === 'string', { + raise_if_any: false, + raise_if_none: true, +}) +const first = event.event_result +const errors = event.event_errors +``` + + + + +### Per-handler result entries + +You can create/update a specific `EventResult` entry for a handler (useful for controlled seeding/rehydration flows). + + + + +```python +pending = event.event_result_update(handler=handler_entry, eventbus=bus, status='pending') +pending.update(status='completed', result='seeded') +``` + + + + +```ts +const pending = event.eventResultUpdate(handler_entry, { eventbus: bus, status: 'pending' }) +pending.update({ status: 'completed', result: 'seeded' }) +``` + + + + +## Resetting an event + +You can create a fresh pending copy for re-emit. + + + + +```python +fresh = event.event_reset() +``` + + + + +```ts +const fresh = event.eventReset() +``` + + + + +## Serialization + +Events are JSON-serializable in both implementations for bridge and cross-runtime workflows. + + + + +```python +payload = event.model_dump(mode='json') +print(payload) +# { +# "event_id": "0190...", +# "event_type": "CreateUserEvent", +# "event_status": "pending", +# "event_result_type": {"type": "object", "...": "..."}, +# "email": "someuser@example.com", +# "...": "..." +# } + +restored = type(event).model_validate(payload) +``` + + + + +```ts +const payload = event.toJSON() +console.log(payload) +// { +// event_id: '0190...', +// event_type: 'CreateUserEvent', +// event_status: 'pending', +// event_result_type: { type: 'object', ... }, +// email: 'someuser@example.com', +// ... +// } + +const restored = BaseEvent.fromJSON(payload) +``` + + + + +## Notes + +- Reserved names are validated in both runtimes: + - `bus` and `first` are reserved runtime APIs and cannot be provided as payload fields. + - Unknown `event_*` fields are rejected. + - Known built-in `event_*` fields (for example `event_timeout`) can still be intentionally overridden in event definitions. +- `model_*` is also reserved: + - Python: unknown `model_*` fields are rejected, but valid Pydantic namespace overrides (for example `model_config`) are allowed. + - TypeScript: any `model_*` field is rejected. +- `event_result_type` drives handler return validation in both runtimes. +- Parent-child tracking is automatic when events are emitted from handlers. + +## Reserved Fields + + + + +```python +from pydantic import ConfigDict +from bubus import BaseEvent + +class AllowedEvent(BaseEvent[None]): + event_timeout: float | None = 30 # allowed built-in event_* override + model_config = ConfigDict(extra='allow') # allowed Pydantic model_* override + +# rejected: unknown reserved prefixes +class InvalidEvent(BaseEvent[None]): + event_some_field_we_dont_recognize: int = 1 # raises + model_something_random: int = 2 # raises +``` + + + + +```ts +import { BaseEvent } from 'bubus' +import { z } from 'zod' + +const AllowedEvent = BaseEvent.extend('AllowedEvent', { + event_timeout: 30, // allowed built-in event_* override + payload: z.string(), +}) + +// rejected: unknown event_* and all model_* +BaseEvent.extend('InvalidEvent', { + event_some_field_we_dont_recognize: 1, // throws + model_something_random: 2, // throws +}) +``` + + + diff --git a/docs/api/eventbus.mdx b/docs/api/eventbus.mdx new file mode 100644 index 0000000..d04375f --- /dev/null +++ b/docs/api/eventbus.mdx @@ -0,0 +1,389 @@ +--- +title: EventBus +description: EventBus constructors, configuration, and core methods. +--- + +`EventBus` is the central runtime for handler registration, event emit, history lookup, and lifecycle control. + +## `EventBus(...)` + + + + +```python +EventBus( + name: str | None = None, + event_concurrency: Literal['global-serial', 'bus-serial', 'parallel'] | str | None = None, + event_handler_concurrency: Literal['serial', 'parallel'] | str = 'serial', + event_handler_completion: Literal['all', 'first'] | str = 'all', + max_history_size: int | None = 100, + max_history_drop: bool = False, + event_timeout: float | None = 60.0, + event_slow_timeout: float | None = 300.0, + event_handler_slow_timeout: float | None = 30.0, + event_handler_detect_file_paths: bool = True, + middlewares: Sequence[EventBusMiddleware] | None = None, +) +``` + + + + +```ts +new EventBus(name?: string, options?: { + id?: string + max_history_size?: number | null + max_history_drop?: boolean + event_concurrency?: 'global-serial' | 'bus-serial' | 'parallel' | null + event_timeout?: number | null + event_slow_timeout?: number | null + event_handler_concurrency?: 'serial' | 'parallel' | null + event_handler_completion?: 'all' | 'first' + event_handler_slow_timeout?: number | null + event_handler_detect_file_paths?: boolean + middlewares?: Array EventBusMiddleware> +}) +``` + + + + +### Shared configuration semantics + +| Option | Description | +| --- | --- | +| `name` | Human-readable bus name used in logs/labels. | +| `event_concurrency` | Event scheduling policy across queue processing (`global-serial`, `bus-serial`, `parallel`). | +| `event_handler_concurrency` | How handlers for one event execute (`serial` vs `parallel`). | +| `event_handler_completion` | Completion mode (`all` waits for all handlers, `first` resolves on first successful result). | +| `event_timeout` | Default outer timeout budget for event/handler execution. | +| `event_slow_timeout` | Slow-event warning threshold. | +| `event_handler_slow_timeout` | Slow-handler warning threshold. | +| `event_handler_detect_file_paths` | Whether to capture source path metadata for handlers. | +| `max_history_size` | Maximum retained history (`null` = unbounded, `0` = keep only in-flight). | +| `max_history_drop` | If `true`, drop oldest history entries when full; if `false`, reject new emits at limit. | +| `middlewares` | Ordered middleware instances (or middleware classes/constructors) that receive lifecycle hooks. | + +Defaults are resolved at processing time on each bus, not copied onto the event at emit. +When event fields are unset (`None`/`null`), the current processing bus applies its own defaults. + +## Runtime state + +Both implementations expose equivalent runtime state: + +- Bus identity: `id`, `name`, `label` +- Registered handlers and indexes +- Event history and pending queue +- In-flight tracking +- Locking/concurrency runtime objects + +## `on(...)` + +Registers a handler for an event key (`EventClass`, event type string, or `'*'`). + + + + +```python +bus.on(UserEvent, handler) +bus.on('UserEvent', handler) +bus.on('*', wildcard_handler) +``` + + + + +```ts +bus.on(UserEvent, handler) +bus.on('UserEvent', handler) +bus.on('*', wildcardHandler) +``` + + + + +## `off(...)` + +Unregisters handlers by event key, handler function/reference, or handler id. + + + + +```python +bus.off(UserEvent, handler) +bus.off(UserEvent) # remove all handlers for UserEvent +bus.off('*') # remove all wildcard handlers +``` + + + + +```ts +bus.off(UserEvent, handler) +bus.off(UserEvent) +bus.off('*') +``` + + + + +## `emit(...)` + +`emit(...)` enqueues synchronously and returns the pending event immediately. + + + + +```python +event = bus.emit(MyEvent(data='x')) +result = await event.event_result() +``` + + + + +```ts +const event = bus.emit(MyEvent({ data: 'x' })) +const result = await event.first() +``` + + + + +## `find(...)` + +`find(...)` supports history lookup, optional future waiting, predicate filtering, and parent/child scoping. + + + + +```python +event = await bus.find(ResponseEvent) # history lookup by default +future = await bus.find(ResponseEvent, past=False, future=5) +child = await bus.find(ChildEvent, child_of=parent_event, future=5) +``` + + + + +```ts +const event = await bus.find(ResponseEvent) +const future = await bus.find(ResponseEvent, { past: false, future: 5 }) +const child = await bus.find(ChildEvent, { child_of: parentEvent, future: 5 }) +``` + + + + +## Lifecycle helpers + +### Wait for idle + + + + +```python +await bus.wait_until_idle() +await bus.wait_until_idle(timeout=5) +``` + + + + +```ts +await bus.waitUntilIdle() +await bus.waitUntilIdle(5) +``` + + + + +### Parent/child relationship checks + + + + +```python +bus.event_is_child_of(child_event, parent_event) +bus.event_is_parent_of(parent_event, child_event) +``` + + + + +```ts +bus.eventIsChildOf(childEvent, parentEvent) +bus.eventIsParentOf(parentEvent, childEvent) +``` + + + + +### Execution pipeline + +Both runtimes use the same layered model, expressed with runtime-native wrappers. + +- Event scope lock +- Event-level timeout and slow monitor +- Per-handler lock +- Handler-level timeout and slow monitor +- Handler execution context scope +- Error normalization and completion callbacks + + + + +```python +# EventBus.step(...) +async with self.locks._run_with_event_lock(self, event): + await self._process_event(event, timeout=timeout) + +# EventBus.process_event(...) +async with asyncio.timeout(resolved_event_timeout): + async with with_slow_monitor(self._create_slow_event_warning_timer(event)): + await event._run_handlers( + eventbus=self, + handlers=self.get_handlers_for_event(event), + timeout=resolved_event_timeout, + ) + +# EventResult.run_handler(...) +async with eventbus.locks._run_with_handler_lock(eventbus, event, event_result): + with eventbus._run_with_handler_dispatch_context(event, event_result.handler_id): + async with event_result._run_with_timeout(event): + async with with_slow_monitor(handler_slow_monitor): + await event_result._call_handler(event, handler, dispatch_context) +``` + + + + +```ts +// EventBus.processEvent(...) +await this.locks._runWithEventLock( + event, + () => + this._runHandlersWithTimeout(event, pending_entries, resolved_event_timeout, () => + _runWithSlowMonitor(event._createSlowEventWarningTimer(), () => scoped_event._runHandlers(pending_entries)) + ), + options +) + +// BaseEvent._runHandlers(...) +await this.bus.locks._runWithHandlerLock(original, this.bus.event_handler_concurrency, async (handler_lock) => { + await entry.runHandler(handler_lock) +}) + +// EventResult.runHandler(...) +await this.bus.locks._runWithHandlerDispatchContext(this, async () => { + await _runWithAsyncContext(event._getDispatchContext() ?? null, async () => { + const handler_result = await _runWithTimeout( + this.handler_timeout, + () => this._createHandlerTimeoutError(event), + () => + _runWithSlowMonitor(slow_handler_warning_timer, () => + _runWithAbortMonitor(() => this.handler.handler(handler_event), abort_signal) + ) + ) + this._finalizeHandlerResult(event, handler_result) + }) +}) +``` + + + + +### Serialization and teardown + +Both runtimes can serialize the entire bus state (config, handlers metadata, event history, pending queue), restore it, re-attach handler callables, and continue processing. + + + + +```python +from bubus import BaseEvent, EventBus + +class ResumeTickEvent(BaseEvent[None]): + pass + +# 1) Serialize full bus state +print(bus.model_dump_json(indent=2)) +# { +# "id": "018f...", +# "name": "SerializableBus", +# "handlers": { +# "018f...": {"id": "018f...", "handler_name": "app.handlers.on_user_created", "event_pattern": "UserCreatedEvent"} +# }, +# "event_history": { +# "0190...": {"event_id": "0190...", "event_type": "UserCreatedEvent", "...": "..."} +# }, +# "pending_event_queue": ["0190..."] +# } + +# 2) Rehydrate state +bus = EventBus.validate(bus.model_dump_json()) + +# 3) Re-link runtime callables by handler id +bus.handlers.get("018f...").handler = on_user_created +bus.handlers.get("018g...").handler = on_user_deleted +bus.handlers.get("018h...").handler = on_user_updated + +# 4) Resume processing (emit starts the runloop and drains restored pending events) +bus.emit(ResumeTickEvent()) +await bus.wait_until_idle() + +# 5) Teardown when done +await bus.stop(timeout=1.0) +await bus.stop(clear=True) +``` + + + + +```ts +import { BaseEvent, EventBus } from 'bubus' + +const ResumeTickEvent = BaseEvent.extend('ResumeTickEvent', {}) + +// 1) Serialize full bus state +console.log(bus.toJSON()) +// { +// id: '018f...', +// name: 'SerializableBus', +// handlers: { +// '018f...': { id: '018f...', handler_name: 'onUserCreated', event_pattern: 'UserCreatedEvent' } +// }, +// event_history: { +// '0190...': { event_id: '0190...', event_type: 'UserCreatedEvent', ... } +// }, +// pending_event_queue: ['0190...'] +// } + +// 2) Rehydrate state +bus = EventBus.fromJSON(bus.toJSON()) + +// 3) Re-link runtime callables by handler id +bus.handlers.get('018f...')!.handler = onUserCreated +bus.handlers.get('018g...')!.handler = onUserDeleted +bus.handlers.get('018h...')!.handler = onUserUpdated + +// 4) Resume processing (emit starts the runloop and drains restored pending events) +bus.emit(ResumeTickEvent({})) +await bus.waitUntilIdle() + +// 5) Teardown when done +bus.destroy() +``` + + + + +## Timeout and precedence + +Shared precedence model: + +1. Handler override +2. Event override +3. Bus default + +Effective handler timeout is capped by event timeout when both are set. diff --git a/docs/api/eventbusmiddleware.mdx b/docs/api/eventbusmiddleware.mdx new file mode 100644 index 0000000..9a252fd --- /dev/null +++ b/docs/api/eventbusmiddleware.mdx @@ -0,0 +1,176 @@ +--- +title: EventBusMiddleware +description: Base middleware interface for EventBus lifecycle hooks. +--- + +`EventBusMiddleware` defines the middleware hook contract used by `EventBus` in both runtimes. + +## Interface + + + + +```python +from typing import Any +from bubus import BaseEvent, EventBus, EventHandler, EventResult, EventStatus + +class EventBusMiddleware: + async def on_event_change(self, eventbus: EventBus, event: BaseEvent[Any], status: EventStatus) -> None: + ... + + async def on_event_result_change( + self, + eventbus: EventBus, + event: BaseEvent[Any], + event_result: EventResult[Any], + status: EventStatus, + ) -> None: + ... + + async def on_bus_handlers_change(self, eventbus: EventBus, handler: EventHandler, registered: bool) -> None: + ... +``` + + + + +```ts +import type { BaseEvent, EventBus, EventHandler, EventResult, EventStatus } from 'bubus' + +export interface EventBusMiddleware { + onEventChange?(eventbus: EventBus, event: BaseEvent, status: EventStatus): void | Promise + onEventResultChange?( + eventbus: EventBus, + event: BaseEvent, + event_result: EventResult, + status: EventStatus + ): void | Promise + onBusHandlersChange?(eventbus: EventBus, handler: EventHandler, registered: boolean): void | Promise +} +``` + + + + +## Setup with EventBus + + + + +```python +from bubus import EventBus +from bubus.middlewares import EventBusMiddleware + +class AnalyticsMiddleware(EventBusMiddleware): + pass + +bus = EventBus('AppBus', middlewares=[AnalyticsMiddleware]) +``` + + + + +```ts +import { EventBus, type EventBusMiddleware } from 'bubus' + +class AnalyticsMiddleware implements EventBusMiddleware {} + +const bus = new EventBus('AppBus', { middlewares: [AnalyticsMiddleware] }) +``` + + + + +## Lifecycle behavior + +- `on_event_change` / `onEventChange` runs on event lifecycle transitions. +- `on_event_result_change` / `onEventResultChange` runs on handler-result lifecycle transitions. +- `on_bus_handlers_change` / `onBusHandlersChange` runs when handlers are added/removed. +- Hook `status` values are only `pending`, `started`, and `completed`. +- Handler failures are represented on `event_result.status == 'error'` and `event_result.error` when the hook status is `completed`. + +## Custom middleware example + + + + +```python +from bubus import EventBus, EventStatus +from bubus.middlewares import EventBusMiddleware + +class AnalyticsMiddleware(EventBusMiddleware): + async def on_event_result_change(self, eventbus, event, event_result, status): + if status != EventStatus.COMPLETED: + return + if event_result.status == 'error': + return + print(event.event_type, event_result.handler_name) + + async def on_bus_handlers_change(self, eventbus, handler, registered): + action = 'registered' if registered else 'unregistered' + print(eventbus.label, handler.id, action) + +bus = EventBus('AppBus', middlewares=[AnalyticsMiddleware]) +``` + + + + +```ts +import { + BaseEvent, + EventBus, + type EventHandler, + type EventBusMiddleware, + type EventResult, + type EventStatus, +} from 'bubus' + +class AnalyticsMiddleware implements EventBusMiddleware { + async onEventResultChange( + eventbus: EventBus, + event: BaseEvent, + event_result: EventResult, + status: EventStatus + ): Promise { + if (status !== 'completed') { + return + } + if (event_result.status === 'error') { + return + } + console.log(event.event_type, event_result.handler_name) + } + + async onBusHandlersChange(eventbus: EventBus, handler: EventHandler, registered: boolean): Promise { + const action = registered ? 'registered' : 'unregistered' + console.log(eventbus.label, handler.id, action) + } +} + +const bus = new EventBus('AppBus', { middlewares: [AnalyticsMiddleware] }) +``` + + + + +## Built-in implementations + + + + +- `OtelTracingMiddleware` +- `AutoErrorEventMiddleware` +- `AutoReturnEventMiddleware` +- `AutoHandlerChangeEventMiddleware` +- `WALEventBusMiddleware` +- `LoggerEventBusMiddleware` +- `SQLiteHistoryMirrorMiddleware` + + + + +TypeScript currently exports the middleware interface but no built-in middleware implementations. + + + diff --git a/docs/api/eventhandler.mdx b/docs/api/eventhandler.mdx new file mode 100644 index 0000000..b88d581 --- /dev/null +++ b/docs/api/eventhandler.mdx @@ -0,0 +1,86 @@ +--- +title: EventHandler +description: EventHandler metadata and registration records. +--- + +`EventHandler` is the serializable metadata record for a registered handler function. + +You receive handler entries from `bus.on(...)`, can remove them with `bus.off(...)`, and see them in handler-change middleware/hooks. + +## Common fields + +- `id`: stable handler id +- `handler_name`: callable/function name +- `handler_file_path`: source path metadata (`str | None` in Python, `string | null` in TypeScript) +- `handler_timeout`: optional per-handler timeout override +- `handler_slow_timeout`: optional slow-warning override +- `handler_registered_at` +- `event_pattern`: subscribed key (`EventType` or `'*'`) +- `eventbus_name`, `eventbus_id` + +## `bus.on(...)` and `bus.off(...)` + + + + +```python +entry = bus.on(MyEvent, handler) +bus.off(MyEvent, entry) +``` + + + + +```ts +const entry = bus.on(MyEvent, handler) +bus.off(MyEvent, entry) +``` + + + + +## Serialization + + + + +```python +payload = entry.model_dump(mode='json', exclude={'handler'}) +print(payload) +# { +# "id": "018f...", +# "handler_name": "app.handlers.on_user_created", +# "event_pattern": "UserCreatedEvent", +# "eventbus_name": "AuthBus", +# "eventbus_id": "018f...", +# "...": "..." +# } + +restored = EventHandler.from_json_dict(payload, handler=real_handler) +``` + + + + +```ts +const payload = entry.toJSON() +console.log(payload) +// { +// id: '018f...', +// handler_name: 'onUserCreated', +// event_pattern: 'UserCreatedEvent', +// eventbus_name: 'AuthBus', +// eventbus_id: '018f...', +// ... +// } + +const restored = EventHandler.fromJSON(payload, realHandler) +``` + + + + +## Notes + +- Function bodies are not serialized. +- Rehydration restores metadata; execution behavior requires re-binding a real callable. diff --git a/docs/api/eventhistory.mdx b/docs/api/eventhistory.mdx new file mode 100644 index 0000000..c634cda --- /dev/null +++ b/docs/api/eventhistory.mdx @@ -0,0 +1,122 @@ +--- +title: EventHistory +description: EventHistory ordered store API for lookup, retention, and trimming. +--- + +`EventHistory` is the ordered event store owned by each `EventBus` as `bus.event_history`. + +It is the canonical history backend in both runtimes and exposes mapping-style access (`{event_id: event}`), lookup (`find`), and retention controls. + +## Common fields + +- `max_history_size`: max retained events (`None`/`null` means unbounded, `0` keeps only in-flight visibility) +- `max_history_drop`: whether to trim oldest entries when over limit +- Ordered mapping keyed by `event_id` + +## Mapping interface + +`EventHistory` behaves like an ordered mapping in both runtimes. + + + + +```python +from bubus import EventHistory + +history = EventHistory(max_history_size=100, max_history_drop=True) +history[event.event_id] = event + +exists = event.event_id in history +loaded = history.get(event.event_id) +count = len(history) + +for event_id, item in history.items(): + print(event_id, item.event_type) +``` + + + + +```ts +import { EventHistory } from 'bubus' + +const history = new EventHistory({ max_history_size: 100, max_history_drop: true }) +history.set(event.event_id, event) + +const exists = history.has(event.event_id) +const loaded = history.get(event.event_id) +const count = history.size + +for (const [event_id, item] of history) { + console.log(event_id, item.event_type) +} +``` + + + + +## Accessor methods + +Both runtimes expose explicit accessors in addition to mapping methods. + + + + +- `add_event(event)` +- `get_event(event_id)` +- `remove_event(event_id)` +- `has_event(event_id)` +- `find(...)` +- `trim_event_history(...)` +- `cleanup_excess_events(...)` + + + + +- `addEvent(event)` +- `getEvent(event_id)` +- `removeEvent(event_id)` +- `hasEvent(event_id)` +- `find(...)` +- `trimEventHistory(...)` +- `cleanupExcessEvents(...)` + + + + +## `find(...)` + +`EventHistory.find(...)` supports: + +- event pattern (`'EventType'`, class, or `'*'`) +- `where` predicate filtering +- field equality filters (`**event_fields` in Python, options object fields in TypeScript) +- `past` / `future` windows +- `child_of` constraints + +`EventBus.find(...)` delegates to `event_history.find(...)`. Future waiter ownership remains on `EventBus`; the bus injects the callback used for future waits. + +## Retention and trim methods + +- `trim_event_history` / `trimEventHistory` applies configured retention policy. +- `cleanup_excess_events` / `cleanupExcessEvents` performs direct overage cleanup. +- `on_remove` callbacks let the caller run cleanup side effects when history entries are removed. + +## Constructor options + + + + +```python +history = EventHistory(max_history_size=100, max_history_drop=False) +``` + + + + +```ts +const history = new EventHistory({ max_history_size: 100, max_history_drop: false }) +``` + + + diff --git a/docs/api/eventresult.mdx b/docs/api/eventresult.mdx new file mode 100644 index 0000000..5223d15 --- /dev/null +++ b/docs/api/eventresult.mdx @@ -0,0 +1,113 @@ +--- +title: EventResult +description: EventResult fields, status, and handler execution results. +--- + +Each handler execution for an event produces one `EventResult`. + +You usually access results through `event.event_results` (or high-level event helper methods), but this page documents the underlying object. + +## Common fields + +- `id`: unique result id +- `status`: `pending | started | completed | error` +- `result`: handler return value (typed by event result schema/type) +- `error`: captured exception/error when handler fails +- `started_at`, `completed_at` (`None`/`null` until the handler starts/completes) +- `event_children`: child events emitted from inside this handler execution +- Handler metadata (`handler_id`, `handler_name`, `handler_file_path`, bus label/id/name) + +## Await semantics + +Awaiting an `EventResult` resolves to handler return value or raises captured failure. + + + + +```python +entry = event.event_results[some_handler_id] +value = await entry +``` + + + + +```ts +const [, entry] = Array.from(event.event_results.entries())[0] +const value = entry.result +``` + + + + +## Scope stack ordering + +`EventResult` execution is intentionally layered as nested scopes/wrappers. + +These are flow references for execution order, not public API method documentation. + + + + +- `eventbus.locks._run_with_handler_lock(...)` +- `eventbus._run_with_handler_dispatch_context(...)` +- `event_result._run_with_timeout(...)` +- `_run_with_slow_monitor(...)` +- `event_result._call_handler(...)` + + + + +- `_runWithHandlerLock(...)` (acquired before `runHandler(...)`) +- `_runWithHandlerDispatchContext(...)` +- `_runWithAsyncContext(...)` +- `_runWithTimeout(...)` +- `_runWithSlowMonitor(...)` +- `_runWithAbortMonitor(...)` around handler invocation + + + + +## Serialization + + + + +```python +payload = entry.model_dump(mode='json') +print(payload) +# { +# "id": "0190...", +# "status": "completed", +# "event_id": "018f...", +# "handler_id": "018g...", +# "handler_name": "on_user_created", +# "result": {"user_id": "u_123"}, +# "error": None, +# "...": "..." +# } + +restored = EventResult.model_validate(payload) +``` + + + + +```ts +const payload = entry.toJSON() +console.log(payload) +// { +// id: '0190...', +// status: 'completed', +// event_id: '018f...', +// handler: { id: '018g...', handler_name: 'onUserCreated', ... }, +// result: { user_id: 'u_123' }, +// error: undefined, +// ... +// } + +const restored = EventResult.fromJSON(event, payload) +``` + + + diff --git a/docs/api/retry.mdx b/docs/api/retry.mdx new file mode 100644 index 0000000..4a939a4 --- /dev/null +++ b/docs/api/retry.mdx @@ -0,0 +1,156 @@ +--- +title: retry +description: Retry decorator/higher-order wrapper for async functions and handlers. +--- + +`retry` adds per-attempt timeout, retry/backoff, and optional semaphore-based concurrency control around async functions (including bus handlers). + +## Signature + + + + +```python +def retry( + retry_after: float = 0, + max_attempts: int = 1, + timeout: float | None = None, + retry_on_errors: list[type[Exception] | re.Pattern[str]] | tuple[type[Exception] | re.Pattern[str], ...] | None = None, + retry_backoff_factor: float = 1.0, + semaphore_limit: int | None = None, + semaphore_name: str | Callable[..., str] | None = None, + semaphore_lax: bool = True, + semaphore_scope: Literal['multiprocess', 'global', 'class', 'instance'] = 'global', + semaphore_timeout: float | None = None, +) -> Callable[[Callable[P, Coroutine[Any, Any, T]]], Callable[P, Coroutine[Any, Any, T]]] +``` + + + + +```ts +retry({ + max_attempts?: number, // default: 1 + retry_after?: number, // default: 0 (seconds) + retry_backoff_factor?: number, // default: 1.0 + retry_on_errors?: Array<(new (...args) => Error) | RegExp | string>, // default: retry any error + timeout?: number | null, // default: no per-attempt timeout + semaphore_limit?: number | null, // default: no semaphore limit + semaphore_name?: string | ((...args: any[]) => string) | null, // default: function name + semaphore_lax?: boolean, // default: true + semaphore_scope?: 'global' | 'class' | 'instance', // default: 'global' + semaphore_timeout?: number | null, // default: derived when timeout + limit are set +}) +``` + + + + +## Options + +| Option | Description | +| --- | --- | +| `max_attempts` | Total attempts including the first call (`1` disables retries). | +| `retry_after` | Base delay between retries, in seconds. | +| `retry_backoff_factor` | Delay multiplier applied after each failed attempt. | +| `retry_on_errors` | Optional matcher list to restrict which errors are retried. | +| `timeout` | Per-attempt timeout in seconds (`None`/`undefined` means no per-attempt timeout). | +| `semaphore_limit` | Max concurrent executions sharing the same semaphore. | +| `semaphore_name` | Semaphore key (string or function deriving a key from call args). | +| `semaphore_scope` | Semaphore sharing scope (`global`, `class`, `instance`; Python also supports `multiprocess`). | +| `semaphore_timeout` | Max wait time for semaphore acquisition before timeout/lax fallback. | +| `semaphore_lax` | If true, continue execution without semaphore limit when acquisition times out. | + +## Example: Inline wrapper + + + + +```python +from bubus import EventBus, BaseEvent +from bubus.retry import retry + +class FetchEvent(BaseEvent[dict]): + url: str + +bus = EventBus('AppBus') + +async def fetch_with_retry(event: FetchEvent) -> dict: + return await fetch_json(event.url) + +bus.on( + FetchEvent, + retry(max_attempts=3, retry_after=1, timeout=5)(fetch_with_retry), +) +``` + + + + +```ts +import { BaseEvent, EventBus, retry } from 'bubus' +import { z } from 'zod' + +const FetchEvent = BaseEvent.extend('FetchEvent', { + url: z.string(), + event_result_type: z.record(z.string(), z.unknown()), +}) + +const bus = new EventBus('AppBus') + +bus.on( + FetchEvent, + retry({ max_attempts: 3, retry_after: 1, timeout: 5 })(async (event) => { + return await fetchJson(event.url) + }) +) +``` + + + + +## Example: Decorated class method + + + + +```python +from bubus.retry import retry + +class ApiService: + @retry(max_attempts=4, retry_after=1, timeout=10, semaphore_limit=2, semaphore_scope='class') + async def get_user(self, user_id: str) -> dict: + return await call_remote_api(user_id) +``` + + + + +```ts +import { retry } from 'bubus' + +class ApiService { + @retry({ max_attempts: 4, retry_after: 1, timeout: 10, semaphore_limit: 2, semaphore_scope: 'class' }) + async getUser(userId: string): Promise> { + return await callRemoteApi(userId) + } +} +``` + + + + +## Behavior + +- Semaphore acquisition happens once per call, then all retry attempts run within that acquired slot. +- Backoff delay per retry is: `retry_after * retry_backoff_factor^(attempt - 1)`. +- Retries stop immediately when the thrown error does not match `retry_on_errors`. +- Bus/event timeouts act as outer execution budgets; `retry.timeout` is per-attempt. + +## Runtime differences + +- Python supports semaphore scope `multiprocess` in addition to `global`, `class`, and `instance`. +- TypeScript supports `global`, `class`, and `instance`, and uses async-context re-entrancy tracking in Node/Bun to avoid same-semaphore nested deadlocks. +- `retry_on_errors` matching differs slightly: + - Python: exception classes or compiled regex patterns (matched against `"ErrorClass: message"`). + - TypeScript: error constructors, error-name strings, or regex patterns. diff --git a/docs/concurrency/backpressure.mdx b/docs/concurrency/backpressure.mdx new file mode 100644 index 0000000..1007fb1 --- /dev/null +++ b/docs/concurrency/backpressure.mdx @@ -0,0 +1,145 @@ +--- +title: Backpressure +description: How emit, queueing, and history limits interact under high event volume. +--- + +Backpressure in `bubus` is history-policy based, not queue-capacity based. + +- `emit()` enqueues synchronously and returns immediately. +- Pending queues are unbounded in both runtimes. +- Overload behavior is controlled by `max_history_size` + `max_history_drop`. + +## 1) If I emit 1,000,000 events, will errors be raised? + +### Error conditions + +| Runtime | Condition | What is raised | +| --- | --- | --- | +| Python | `emit()` called with no running event loop | `RuntimeError` (`emit() called but no event loop is running`) | +| Python | `max_history_size > 0`, `max_history_drop=False`, and history already at limit | `RuntimeError` (`history limit reached`) | +| TypeScript | `emit()` with `max_history_size > 0`, `max_history_drop=false`, and history already at limit | `Error` (message contains `history limit reached`) | +| Both | Process runs out of memory under extreme load | Runtime/VM OOM failure (not a bus-specific exception type) | + +In normal operation, queue-capacity errors are not the backpressure mechanism. + +`max_history_size=0` is a special case in both runtimes: it does not trigger history-limit rejection, and instead keeps only in-flight visibility. + +With `max_history_drop=true`, `emit()` does not reject on history size. Under sustained overload, old uncompleted entries can be dropped and a warning is logged. + +### Reject vs drop behavior + + + + +```python +from bubus import EventBus + +# Reject new emits once history reaches N +reject_bus = EventBus(max_history_size=10_000, max_history_drop=False) + +# Never reject on history size; trim oldest history entries instead +drop_bus = EventBus(max_history_size=10_000, max_history_drop=True) +``` + + + + +```ts +import { EventBus } from 'bubus' + +const rejectBus = new EventBus('RejectBus', { max_history_size: 10_000, max_history_drop: false }) +const dropBus = new EventBus('DropBus', { max_history_size: 10_000, max_history_drop: true }) +``` + + + + +## 2) If 1,000,000 events complete, how many are kept? + +Let `N = max_history_size`. + +| Setting | Events retained after bus becomes idle | Notes | +| --- | --- | --- | +| `N = None` / `null` | All completed events (so up to 1,000,000) | History is unbounded. | +| `N > 0`, `max_history_drop = false` | Up to `N` | New emits are rejected once history reaches `N`. | +| `N > 0`, `max_history_drop = true` | Bounded to `N` at steady state | Oldest history entries are removed first. | +| `N = 0` | `0` completed events retained | Only pending/in-flight visibility is kept; completed entries are dropped. | + +Python nuance: in heavy bursts with `max_history_drop=True`, cleanup is amortized, so history can temporarily exceed `N` before converging back to `<= N`. + +For the broader retention model, see [Event History Store](../features/event-history-store). + +## 3) How RAM usage scales + +At a high level, memory grows with: + +- pending queue depth, +- retained history size, +- per-event handler/result payload size. + +A practical model is: + +`RAM ~= O(pending_event_queue) + O(event_history) + O(event_results and payloads)` + +### Measured slopes from perf suites + +- Python README matrix reports scenario-dependent peak RSS slopes between about `0.025kb/event` and `8.024kb/event`. +- TypeScript README matrix reports scenario/runtime-dependent peak RSS slopes between about `0.1kb/event` and `7.9kb/event`. +- TypeScript README notes those `kb/event` values are measured during active processing with history aggressively bounded (`max_history_size=1` in perf harnesses). + +Use those numbers as throughput-era slope indicators, not exact long-term retention multipliers for your payloads. + +Operationally: + +- bounded history (`N` finite) keeps steady-state memory bounded by queue depth + `N`, +- unbounded history (`N=None/null`) makes retained RAM grow roughly linearly with total completed events. + +## 4) Queue vs history lifecycle (exact behavior) + +Events do not "move from queue to history." They are added to history at `emit()` time, and can exist in both structures while pending. + +### Python timeline (`emit`) + +1. Validate pressure policy. +2. Enqueue into `pending_event_queue`. +3. Add same event object to `event_history`. +4. Runloop dequeues event (`queue.get()`), then executes handlers. +5. Event remains in `event_history` as `pending` -> `started` -> `completed` unless trimmed/removed by history policy. + +### TypeScript timeline (`emit`) + +1. Validate pressure policy. +2. Add event to `event_history`. +3. Apply `trimHistory()`. +4. Push event into `pending_event_queue`. +5. Runloop shifts from queue and executes handlers. +6. Event remains in `event_history` unless trimmed/removed by policy. + +So yes: + +- an event can be in both `pending_event_queue` and `event_history` at the same time, +- `event_history` can contain pending events (not only started/completed events). + +## Observe both structures directly + + + + +```python +event = bus.emit(MyEvent()) +pending_count = bus.pending_event_queue.qsize() if bus.pending_event_queue else 0 +history_count = len(bus.event_history) +print('pending_event_queue=', pending_count, 'event_history=', history_count) +# pending_event_queue= 1 event_history= 1 +``` + + + + +```ts +const event = bus.emit(MyEvent({})) +console.log('pending_event_queue=', bus.pending_event_queue.length, 'event_history=', bus.event_history.size) +``` + + + diff --git a/docs/concurrency/events-bus-serial.mdx b/docs/concurrency/events-bus-serial.mdx new file mode 100644 index 0000000..865df4a --- /dev/null +++ b/docs/concurrency/events-bus-serial.mdx @@ -0,0 +1,117 @@ +--- +title: "Events: bus-serial" +description: Process one event at a time per bus, while allowing overlap across buses. +--- + +`bus-serial` enforces one active event per bus, while different buses can process events simultaneously. + +Companion runnable example: +- [`examples/concurrency_options.py`](https://github.com/pirate/bbus/blob/main/examples/concurrency_options.py) +- [`bubus-ts/examples/concurrency_options.ts`](https://github.com/pirate/bbus/blob/main/bubus-ts/examples/concurrency_options.ts) + +## Lifecycle impact + +1. Events enqueue per bus in FIFO order. +2. Each bus holds its own event lock. +3. A busy bus does not block other buses. +4. Queue-jump child events can preempt that same bus queue when awaited in-handler. + +## Execution order example + + + + +```python +import asyncio +from bubus import BaseEvent, EventBus + +class WorkEvent(BaseEvent): + order: int + source: str + +bus_a = EventBus('BusSerialA', event_concurrency='bus-serial') +bus_b = EventBus('BusSerialB', event_concurrency='bus-serial') + +starts_a: list[int] = [] +starts_b: list[int] = [] +in_flight_global = 0 +max_in_flight_global = 0 + +async def handler_a(event: WorkEvent) -> None: + global in_flight_global, max_in_flight_global + in_flight_global += 1 + max_in_flight_global = max(max_in_flight_global, in_flight_global) + starts_a.append(event.order) + await asyncio.sleep(0.01) + in_flight_global -= 1 + +async def handler_b(event: WorkEvent) -> None: + global in_flight_global, max_in_flight_global + in_flight_global += 1 + max_in_flight_global = max(max_in_flight_global, in_flight_global) + starts_b.append(event.order) + await asyncio.sleep(0.01) + in_flight_global -= 1 + +bus_a.on(WorkEvent, handler_a) +bus_b.on(WorkEvent, handler_b) + +for i in range(4): + bus_a.emit(WorkEvent(order=i, source='a')) + bus_b.emit(WorkEvent(order=i, source='b')) + +await bus_a.wait_until_idle() +await bus_b.wait_until_idle() + +assert starts_a == [0, 1, 2, 3] +assert starts_b == [0, 1, 2, 3] +assert max_in_flight_global >= 2 +``` + + + + +```ts +import { BaseEvent, EventBus } from 'bubus' +import { z } from 'zod' + +const WorkEvent = BaseEvent.extend('WorkEvent', { + order: z.number(), + source: z.string(), +}) + +const busA = new EventBus('BusSerialA', { event_concurrency: 'bus-serial' }) +const busB = new EventBus('BusSerialB', { event_concurrency: 'bus-serial' }) + +const startsA: number[] = [] +const startsB: number[] = [] + +busA.on(WorkEvent, async (event) => { + startsA.push(event.order) + await new Promise((resolve) => setTimeout(resolve, 2)) +}) + +busB.on(WorkEvent, async (event) => { + startsB.push(event.order) + await new Promise((resolve) => setTimeout(resolve, 2)) +}) + +for (let i = 0; i < 4; i += 1) { + busA.emit(WorkEvent({ order: i, source: 'a' })) + busB.emit(WorkEvent({ order: i, source: 'b' })) +} + +await busA.waitUntilIdle() +await busB.waitUntilIdle() + +if (JSON.stringify(startsA) !== JSON.stringify([0, 1, 2, 3])) throw new Error('bus A FIFO failed') +if (JSON.stringify(startsB) !== JSON.stringify([0, 1, 2, 3])) throw new Error('bus B FIFO failed') +``` + + + + +## Notes + +- This is typically the best default for multi-bus systems. +- It preserves local determinism while retaining cross-bus throughput. diff --git a/docs/concurrency/events-global-serial.mdx b/docs/concurrency/events-global-serial.mdx new file mode 100644 index 0000000..d0b9bac --- /dev/null +++ b/docs/concurrency/events-global-serial.mdx @@ -0,0 +1,110 @@ +--- +title: "Events: global-serial" +description: Process only one event at a time across all buses. +--- + +`global-serial` enforces a single global event-processing slot across all `EventBus` instances. +Note: that global lock is scoped to the `EventBus` class; if you need separate global lock domains, subclass `EventBus`. + +Companion runnable example: +- [`examples/concurrency_options.py`](https://github.com/pirate/bbus/blob/main/examples/concurrency_options.py) +- [`bubus-ts/examples/concurrency_options.ts`](https://github.com/pirate/bbus/blob/main/bubus-ts/examples/concurrency_options.ts) + +## Lifecycle impact + +1. An emitted event is queued on its target bus as usual. +2. Before handler execution starts, the bus acquires the shared global event lock. +3. While one event is running anywhere, other buses wait. +4. Handler-level concurrency still applies inside that one active event. + +## Execution order example + + + + +```python +import asyncio +from bubus import BaseEvent, EventBus + +class SerialEvent(BaseEvent): + order: int + source: str + +bus_a = EventBus('GlobalSerialA', event_concurrency='global-serial') +bus_b = EventBus('GlobalSerialB', event_concurrency='global-serial') + +in_flight = 0 +max_in_flight = 0 +starts: list[str] = [] + +async def handler(event: SerialEvent) -> None: + global in_flight, max_in_flight + in_flight += 1 + max_in_flight = max(max_in_flight, in_flight) + starts.append(f'{event.source}:{event.order}') + await asyncio.sleep(0.01) + in_flight -= 1 + +bus_a.on(SerialEvent, handler) +bus_b.on(SerialEvent, handler) + +for i in range(3): + bus_a.emit(SerialEvent(order=i, source='a')) + bus_b.emit(SerialEvent(order=i, source='b')) + +await bus_a.wait_until_idle() +await bus_b.wait_until_idle() + +assert max_in_flight == 1 +assert [s for s in starts if s.startswith('a:')] == ['a:0', 'a:1', 'a:2'] +assert [s for s in starts if s.startswith('b:')] == ['b:0', 'b:1', 'b:2'] +``` + + + + +```ts +import { BaseEvent, EventBus } from 'bubus' +import { z } from 'zod' + +const SerialEvent = BaseEvent.extend('SerialEvent', { + order: z.number(), + source: z.string(), +}) + +const busA = new EventBus('GlobalSerialA', { event_concurrency: 'global-serial' }) +const busB = new EventBus('GlobalSerialB', { event_concurrency: 'global-serial' }) + +let inFlight = 0 +let maxInFlight = 0 +const starts: string[] = [] + +const handler = async (event: InstanceType) => { + inFlight += 1 + maxInFlight = Math.max(maxInFlight, inFlight) + starts.push(`${event.source}:${event.order}`) + await new Promise((resolve) => setTimeout(resolve, 10)) + inFlight -= 1 +} + +busA.on(SerialEvent, handler) +busB.on(SerialEvent, handler) + +for (let i = 0; i < 3; i += 1) { + busA.emit(SerialEvent({ order: i, source: 'a' })) + busB.emit(SerialEvent({ order: i, source: 'b' })) +} + +await busA.waitUntilIdle() +await busB.waitUntilIdle() + +if (maxInFlight !== 1) throw new Error('expected global serialization') +``` + + + + +## Notes + +- This mode is strongest for determinism across distributed in-process bus topologies. +- Queue-jump behavior (`await event` inside handlers) still applies, but it does so under the same global lock. diff --git a/docs/concurrency/events-parallel.mdx b/docs/concurrency/events-parallel.mdx new file mode 100644 index 0000000..2ae4277 --- /dev/null +++ b/docs/concurrency/events-parallel.mdx @@ -0,0 +1,102 @@ +--- +title: "Events: parallel" +description: Allow multiple events to execute concurrently on the same bus. +--- + +`parallel` removes event-level serialization for a bus, so multiple events can be in-flight simultaneously. + +Companion runnable example: +- [`examples/concurrency_options.py`](https://github.com/pirate/bbus/blob/main/examples/concurrency_options.py) +- [`bubus-ts/examples/concurrency_options.ts`](https://github.com/pirate/bbus/blob/main/bubus-ts/examples/concurrency_options.ts) + +## Lifecycle impact + +1. Events still enqueue and are tracked in history. +2. The bus does not gate execution with an event semaphore. +3. Handler-level concurrency rules still apply within each event. +4. Ordering guarantees become weaker under load because events can overlap. + +## Execution order example + + + + +```python +import asyncio +from bubus import BaseEvent, EventBus + +class ParallelEvent(BaseEvent): + order: int + +bus = EventBus('ParallelEventBus', event_concurrency='parallel', event_handler_concurrency='parallel') + +in_flight = 0 +max_in_flight = 0 +release = asyncio.Event() + +async def handler(_: ParallelEvent) -> None: + global in_flight, max_in_flight + in_flight += 1 + max_in_flight = max(max_in_flight, in_flight) + await release.wait() + await asyncio.sleep(0.01) + in_flight -= 1 + +bus.on(ParallelEvent, handler) + +bus.emit(ParallelEvent(order=0)) +bus.emit(ParallelEvent(order=1)) + +await asyncio.sleep(0) +release.set() +await bus.wait_until_idle() + +assert max_in_flight >= 2 +``` + + + + +```ts +import { BaseEvent, EventBus } from 'bubus' +import { z } from 'zod' + +const ParallelEvent = BaseEvent.extend('ParallelEvent', { order: z.number() }) + +const bus = new EventBus('ParallelEventBus', { + event_concurrency: 'parallel', + event_handler_concurrency: 'parallel', +}) + +let inFlight = 0 +let maxInFlight = 0 +let release!: () => void +const gate = new Promise((resolve) => { + release = resolve +}) + +bus.on(ParallelEvent, async () => { + inFlight += 1 + maxInFlight = Math.max(maxInFlight, inFlight) + await gate + await new Promise((resolve) => setTimeout(resolve, 10)) + inFlight -= 1 +}) + +bus.emit(ParallelEvent({ order: 0 })) +bus.emit(ParallelEvent({ order: 1 })) + +await new Promise((resolve) => setTimeout(resolve, 0)) +release() +await bus.waitUntilIdle() + +if (maxInFlight < 2) throw new Error('expected overlapping events') +``` + + + + +## Notes + +- Use when throughput matters more than deterministic event ordering. +- Combine with idempotent handlers and explicit external coordination when needed. diff --git a/docs/concurrency/handler-completion-all.mdx b/docs/concurrency/handler-completion-all.mdx new file mode 100644 index 0000000..8806fd5 --- /dev/null +++ b/docs/concurrency/handler-completion-all.mdx @@ -0,0 +1,100 @@ +--- +title: "Handler Completion: all" +description: Wait for every matching handler before an event completes. +--- + +`all` is the default handler completion mode. The event completes only after every matching handler reaches a terminal state. + +## Lifecycle impact + +1. All matching handlers are allowed to run. +2. A successful early handler does not short-circuit the event. +3. Event completion waits for every handler to finish, fail, or time out. +4. Result collection includes all successful non-`None` / non-`undefined` return values. + +## Execution order example + + + + +```python +import asyncio +from bubus import BaseEvent, EventBus + +class CompletionEvent(BaseEvent[str]): + pass + +bus = EventBus( + 'CompletionAllBus', + event_handler_concurrency='parallel', + event_handler_completion='all', +) + +seen: list[str] = [] + +async def fast_handler(_: CompletionEvent) -> str: + await asyncio.sleep(0.01) + seen.append('fast') + return 'fast' + +async def slow_handler(_: CompletionEvent) -> str: + await asyncio.sleep(0.05) + seen.append('slow') + return 'slow' + +bus.on(CompletionEvent, fast_handler) +bus.on(CompletionEvent, slow_handler) + +event = bus.emit(CompletionEvent()) +await event + +assert set(seen) == {'fast', 'slow'} +results = await event.event_results_list(raise_if_any=False, raise_if_none=False) +assert set(results) == {'fast', 'slow'} +``` + + + + +```ts +import { BaseEvent, EventBus } from 'bubus' +import { z } from 'zod' + +const CompletionEvent = BaseEvent.extend('CompletionEvent', { event_result_type: z.string() }) +const bus = new EventBus('CompletionAllBus', { + event_handler_concurrency: 'parallel', + event_handler_completion: 'all', +}) + +const seen: string[] = [] +const delay = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms)) + +bus.on(CompletionEvent, async () => { + await delay(10) + seen.push('fast') + return 'fast' +}) + +bus.on(CompletionEvent, async () => { + await delay(50) + seen.push('slow') + return 'slow' +}) + +const event = bus.emit(CompletionEvent({})) +await event.done() +const results = await event.eventResultsList({ raise_if_any: false, raise_if_none: false }) + +if (seen.length !== 2) throw new Error('expected all handlers to run') +if (!results.includes('fast') || !results.includes('slow')) { + throw new Error('expected both handler return values') +} +``` + + + + +## Notes + +- `all` is best when multiple handlers contribute required side effects. +- Handler scheduling (`serial` vs `parallel`) changes overlap, but not the fact that all handlers must settle. diff --git a/docs/concurrency/handler-completion-first.mdx b/docs/concurrency/handler-completion-first.mdx new file mode 100644 index 0000000..5537a63 --- /dev/null +++ b/docs/concurrency/handler-completion-first.mdx @@ -0,0 +1,103 @@ +--- +title: "Handler Completion: first" +description: Complete an event on the first successful handler result. +--- + +`first` short-circuits event completion once the first successful non-`None` / non-`undefined` result is available. + +## Lifecycle impact + +1. The first successful result wins (`None`/`undefined` and errors do not win). +2. In `serial` handler mode, remaining handlers are skipped once a winner appears. +3. In `parallel` handler mode, in-flight losers are cancelled or aborted. +4. Event completion resolves as soon as a winner is found (or all handlers fail). + +## Execution order example + + + + +```python +import asyncio +from bubus import BaseEvent, EventBus + +class CompletionEvent(BaseEvent[str]): + pass + +bus = EventBus( + 'CompletionFirstBus', + event_handler_concurrency='parallel', + event_handler_completion='first', +) + +state = {'slow_started': False, 'slow_cancelled': False} + +async def fast_handler(_: CompletionEvent) -> str: + await asyncio.sleep(0.01) + return 'winner' + +async def slow_handler(_: CompletionEvent) -> str: + state['slow_started'] = True + try: + await asyncio.sleep(0.5) + return 'slow' + except asyncio.CancelledError: + state['slow_cancelled'] = True + raise + +bus.on(CompletionEvent, slow_handler) +bus.on(CompletionEvent, fast_handler) + +event = bus.emit(CompletionEvent()) +await event + +value = await event.event_result(raise_if_any=False, raise_if_none=False) +assert value == 'winner' +assert state['slow_started'] is True +assert state['slow_cancelled'] is True +``` + + + + +```ts +import { BaseEvent, EventBus } from 'bubus' +import { z } from 'zod' + +const CompletionEvent = BaseEvent.extend('CompletionEvent', { event_result_type: z.string() }) +const bus = new EventBus('CompletionFirstBus', { + event_handler_concurrency: 'parallel', + event_handler_completion: 'first', +}) + +let slowStarted = false +let slowCompleted = false +const delay = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms)) + +bus.on(CompletionEvent, async () => { + slowStarted = true + await delay(500) + slowCompleted = true + return 'slow' +}) + +bus.on(CompletionEvent, async () => { + await delay(10) + return 'winner' +}) + +const event = bus.emit(CompletionEvent({})) +await event.done() + +if (event.event_result !== 'winner') throw new Error('expected first winner result') +if (!slowStarted) throw new Error('expected slow handler to start') +if (slowCompleted) throw new Error('slow handler should not complete before event resolves') +``` + + + + +## Notes + +- This mode is useful for fallback chains and race-to-first-response patterns. +- `await event.first()` also forces this mode for that event at call time. diff --git a/docs/concurrency/handlers-parallel.mdx b/docs/concurrency/handlers-parallel.mdx new file mode 100644 index 0000000..0a7d1af --- /dev/null +++ b/docs/concurrency/handlers-parallel.mdx @@ -0,0 +1,95 @@ +--- +title: "Handlers: parallel" +description: Run handlers for one event concurrently. +--- + +`parallel` allows multiple handlers for the same event to run at the same time. + +Companion runnable example: +- [`examples/concurrency_options.py`](https://github.com/pirate/bbus/blob/main/examples/concurrency_options.py) +- [`bubus-ts/examples/concurrency_options.ts`](https://github.com/pirate/bbus/blob/main/bubus-ts/examples/concurrency_options.ts) + +## Lifecycle impact + +1. Event starts processing. +2. All applicable handlers are scheduled concurrently. +3. Event completion waits based on completion mode (`all` or `first`). +4. Per-handler timeout/error behavior remains independent per handler. + +## Execution order example + + + + +```python +import asyncio +from bubus import BaseEvent, EventBus + +class HandlerEvent(BaseEvent): + pass + +bus = EventBus('ParallelHandlerBus', event_handler_concurrency='parallel') + +in_flight = 0 +max_in_flight = 0 +release = asyncio.Event() + +async def tracked(_: HandlerEvent) -> None: + global in_flight, max_in_flight + in_flight += 1 + max_in_flight = max(max_in_flight, in_flight) + await release.wait() + in_flight -= 1 + +bus.on(HandlerEvent, tracked) +bus.on(HandlerEvent, tracked) + +event = bus.emit(HandlerEvent()) +await asyncio.sleep(0) +release.set() +await event + +assert max_in_flight >= 2 +``` + + + + +```ts +import { BaseEvent, EventBus } from 'bubus' + +const HandlerEvent = BaseEvent.extend('HandlerEvent', {}) +const bus = new EventBus('ParallelHandlerBus', { event_handler_concurrency: 'parallel' }) + +let inFlight = 0 +let maxInFlight = 0 +let release!: () => void +const gate = new Promise((resolve) => { + release = resolve +}) + +const tracked = async () => { + inFlight += 1 + maxInFlight = Math.max(maxInFlight, inFlight) + await gate + inFlight -= 1 +} + +bus.on(HandlerEvent, tracked) +bus.on(HandlerEvent, tracked) + +const event = bus.emit(HandlerEvent({})) +await new Promise((resolve) => setTimeout(resolve, 0)) +release() +await event.done() + +if (maxInFlight < 2) throw new Error('expected overlapping handlers') +``` + + + + +## Notes + +- Best for independent I/O-bound handlers where overlap reduces total latency. +- If handlers mutate shared resources, add explicit synchronization. diff --git a/docs/concurrency/handlers-serial.mdx b/docs/concurrency/handlers-serial.mdx new file mode 100644 index 0000000..34aa644 --- /dev/null +++ b/docs/concurrency/handlers-serial.mdx @@ -0,0 +1,87 @@ +--- +title: "Handlers: serial" +description: Run handlers one at a time per event, in registration order. +--- + +`serial` executes handlers for a single event sequentially. + +Companion runnable example: +- [`examples/concurrency_options.py`](https://github.com/pirate/bbus/blob/main/examples/concurrency_options.py) +- [`bubus-ts/examples/concurrency_options.ts`](https://github.com/pirate/bbus/blob/main/bubus-ts/examples/concurrency_options.ts) + +## Lifecycle impact + +1. Event starts processing. +2. Handler A runs to completion (or failure/timeout). +3. Handler B starts afterward, then C, and so on. +4. Event completion waits for the serial chain (or completion-mode short-circuit rules). + +## Execution order example + + + + +```python +import asyncio +from bubus import BaseEvent, EventBus + +class HandlerEvent(BaseEvent): + pass + +bus = EventBus('SerialHandlerBus', event_handler_concurrency='serial') +log: list[str] = [] + +async def h1(_: HandlerEvent) -> None: + log.append('h1_start') + await asyncio.sleep(0.01) + log.append('h1_end') + +async def h2(_: HandlerEvent) -> None: + log.append('h2_start') + await asyncio.sleep(0.01) + log.append('h2_end') + +bus.on(HandlerEvent, h1) +bus.on(HandlerEvent, h2) + +await bus.emit(HandlerEvent()) + +assert log == ['h1_start', 'h1_end', 'h2_start', 'h2_end'] +``` + + + + +```ts +import { BaseEvent, EventBus } from 'bubus' + +const HandlerEvent = BaseEvent.extend('HandlerEvent', {}) +const bus = new EventBus('SerialHandlerBus', { event_handler_concurrency: 'serial' }) +const log: string[] = [] + +bus.on(HandlerEvent, async () => { + log.push('h1_start') + await new Promise((resolve) => setTimeout(resolve, 10)) + log.push('h1_end') +}) + +bus.on(HandlerEvent, async () => { + log.push('h2_start') + await new Promise((resolve) => setTimeout(resolve, 10)) + log.push('h2_end') +}) + +await bus.emit(HandlerEvent({})).done() + +if (JSON.stringify(log) !== JSON.stringify(['h1_start', 'h1_end', 'h2_start', 'h2_end'])) { + throw new Error('expected serial handler execution order') +} +``` + + + + +## Notes + +- Best when handlers share mutable state or require strict ordering. +- Execution remains predictable but may increase per-event latency. diff --git a/docs/concurrency/immediate-execution.mdx b/docs/concurrency/immediate-execution.mdx new file mode 100644 index 0000000..13aa52f --- /dev/null +++ b/docs/concurrency/immediate-execution.mdx @@ -0,0 +1,182 @@ +--- +title: Immediate Execution (RPC-style) +description: Queue-jump behavior for awaiting child events inside handlers. +--- + +Immediate execution lets a handler emit a child event and await it like a direct async function call. + +When this happens inside a handler, the child event is processed immediately (queue-jump) instead of waiting behind unrelated queued events. + +Repository example files: +- [`examples/immediate_event_processing.py`](https://github.com/pirate/bbus/blob/main/examples/immediate_event_processing.py) +- [`bubus-ts/examples/immediate_event_processing.ts`](https://github.com/pirate/bbus/blob/main/bubus-ts/examples/immediate_event_processing.ts) + +## Core pattern + + + + +```python +from bubus import BaseEvent, EventBus + +class ParentEvent(BaseEvent[str]): + pass + +class ChildEvent(BaseEvent[str]): + pass + +bus = EventBus('RpcBus') + +async def on_parent(event: ParentEvent) -> str: + assert event.event_bus is not None + child = event.event_bus.emit(ChildEvent()) + await child # queue-jump while still inside this handler + value = await child.event_result() + return f'parent got: {value}' + +async def on_child(_: ChildEvent) -> str: + return 'child response' + +bus.on(ParentEvent, on_parent) +bus.on(ChildEvent, on_child) +``` + + + + +```ts +import { BaseEvent, EventBus } from 'bubus' +import { z } from 'zod' + +const ParentEvent = BaseEvent.extend('ParentEvent', { event_result_type: z.string() }) +const ChildEvent = BaseEvent.extend('ChildEvent', { event_result_type: z.string() }) + +const bus = new EventBus('RpcBus') + +bus.on(ParentEvent, async (event) => { + const child = event.event_bus!.emit(ChildEvent({})) + await child.done() // queue-jump while still inside this handler + return `parent got: ${child.event_result}` +}) + +bus.on(ChildEvent, async () => 'child response') +``` + + + + +## Execution order example + +In this pattern, sibling work can already be queued, but the awaited child still runs first. + + + + +```python +from bubus import BaseEvent, EventBus + +class ParentEvent(BaseEvent): + pass + +class ChildEvent(BaseEvent): + pass + +class SiblingEvent(BaseEvent): + pass + +bus = EventBus('OrderBus', event_concurrency='bus-serial', event_handler_concurrency='serial') +order: list[str] = [] + +async def on_parent(event: ParentEvent) -> None: + assert event.event_bus is not None + order.append('parent_start') + event.event_bus.emit(SiblingEvent()) + child = event.event_bus.emit(ChildEvent()) + await child + order.append('parent_end') + +async def on_child(_: ChildEvent) -> None: + order.append('child') + +async def on_sibling(_: SiblingEvent) -> None: + order.append('sibling') + +bus.on(ParentEvent, on_parent) +bus.on(ChildEvent, on_child) +bus.on(SiblingEvent, on_sibling) + +await bus.emit(ParentEvent()) +await bus.wait_until_idle() + +assert order.index('child') < order.index('parent_end') +assert order.index('parent_end') < order.index('sibling') +``` + + + + +```ts +import { BaseEvent, EventBus } from 'bubus' + +const ParentEvent = BaseEvent.extend('ParentEvent', {}) +const ChildEvent = BaseEvent.extend('ChildEvent', {}) +const SiblingEvent = BaseEvent.extend('SiblingEvent', {}) + +const bus = new EventBus('OrderBus', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'serial', +}) +const order: string[] = [] + +bus.on(ParentEvent, async (event) => { + order.push('parent_start') + event.event_bus!.emit(SiblingEvent({})) + const child = event.event_bus!.emit(ChildEvent({})) + await child.done() + order.push('parent_end') +}) + +bus.on(ChildEvent, async () => { + order.push('child') +}) + +bus.on(SiblingEvent, async () => { + order.push('sibling') +}) + +await bus.emit(ParentEvent({})).done() +await bus.waitUntilIdle() + +if (!(order.indexOf('child') < order.indexOf('parent_end'))) throw new Error('child should finish before parent resumes') +if (!(order.indexOf('parent_end') < order.indexOf('sibling'))) throw new Error('sibling should run after parent ends') +``` + + + + +## Interaction with concurrency modes + +- `event_concurrency = global-serial`: queue-jump still works, but all buses still share one global event slot. +- `event_concurrency = bus-serial`: queue-jump preempts that bus queue; other buses can continue processing independently. +- `event_concurrency = parallel`: events may already overlap; queue-jump still reduces parent latency for awaited child calls. +- `event_handler_concurrency = serial`: parent temporarily yields execution so child handlers can run without deadlock. +- `event_handler_concurrency = parallel`: child handlers can overlap with other handlers for the same event. +- `event_handler_completion = first`: winner semantics can cancel loser handlers and their in-flight child work. + +## Notes + +- In Python, `await child_event` inside a handler is the immediate path. +- In Python, `await child_event.event_completed()` keeps normal queue order (non-queue-jump wait). +- In TypeScript, use `await child_event.done()`. +- In TypeScript, `await child_event.eventCompleted()` keeps normal queue order (non-queue-jump wait). + +## Related pages + +- [Events: global-serial](./events-global-serial) +- [Events: bus-serial](./events-bus-serial) +- [Events: parallel](./events-parallel) +- [Handlers: serial](./handlers-serial) +- [Handlers: parallel](./handlers-parallel) +- [Handler Completion: all](./handler-completion-all) +- [Handler Completion: first](./handler-completion-first) +- [BaseEvent](../api/baseevent) diff --git a/docs/concurrency/timeouts.mdx b/docs/concurrency/timeouts.mdx new file mode 100644 index 0000000..52a84d2 --- /dev/null +++ b/docs/concurrency/timeouts.mdx @@ -0,0 +1,245 @@ +--- +title: Timeout Enforcement +description: Configure execution deadlines and slow-warning thresholds at bus, event, and handler levels. +--- + +Timeout controls operate at three levels: + +- Bus defaults (resolved on each bus at processing time when event-level values are unset) +- Per-event overrides (applies to one emitted event instance) +- Per-handler overrides (applies to one handler registration) + +Repository example files: +- [`examples/concurrency_options.py`](https://github.com/pirate/bbus/blob/main/examples/concurrency_options.py) +- [`bubus-ts/examples/concurrency_options.ts`](https://github.com/pirate/bbus/blob/main/bubus-ts/examples/concurrency_options.ts) +- [`examples/log_tree_demo.py`](https://github.com/pirate/bbus/blob/main/examples/log_tree_demo.py) +- [`bubus-ts/examples/log_tree_demo.ts`](https://github.com/pirate/bbus/blob/main/bubus-ts/examples/log_tree_demo.ts) + +## Timeout types + +### 1) Event timeout (`event_timeout`) + +The outer execution budget for an event. This also acts as an upper cap for each handler run for that event. + +### 2) Handler timeout (`event_handler_timeout` / `handler_timeout`) + +A handler-specific timeout budget. The effective handler timeout is resolved from handler -> event -> bus, then capped by `event_timeout` when both are set. + +### 3) Slow-warning thresholds (`event_slow_timeout`, `event_handler_slow_timeout`, `handler_slow_timeout`) + +These emit warnings when work is taking longer than expected: + +- `event_slow_timeout`: warns when event processing is still running past the threshold. +- `event_handler_slow_timeout` / `handler_slow_timeout`: warns when a handler run is still running past the threshold. + +Slow thresholds are warnings, not forced cancellation. + +## Where to set each value + +| Level | Execution timeout fields | Slow-warning fields | +| --- | --- | --- | +| Bus | `event_timeout` | `event_slow_timeout`, `event_handler_slow_timeout` | +| Event | `event_timeout`, `event_handler_timeout` | `event_slow_timeout`, `event_handler_slow_timeout` | +| Handler | `handler_timeout` | `handler_slow_timeout` | + +## Bus-level defaults + +Set default budgets and warning thresholds once when creating a bus. + + + + +```python +from bubus import EventBus + +bus = EventBus( + 'TimeoutBus', + event_timeout=30.0, + event_slow_timeout=10.0, + event_handler_slow_timeout=3.0, +) +``` + + + + +```ts +import { EventBus } from 'bubus' + +const bus = new EventBus('TimeoutBus', { + event_timeout: 30, + event_slow_timeout: 10, + event_handler_slow_timeout: 3, +}) +``` + + + + +## Event-level overrides + +Set per-event values when emitting an event instance. + + + + +```python +from bubus import BaseEvent + +class WorkEvent(BaseEvent): + pass + +event = bus.emit( + WorkEvent( + event_timeout=8.0, + event_handler_timeout=2.0, + event_slow_timeout=4.0, + event_handler_slow_timeout=1.0, + ) +) +``` + + + + +```ts +import { BaseEvent } from 'bubus' + +const WorkEvent = BaseEvent.extend('WorkEvent', {}) + +const event = bus.emit( + WorkEvent({ + event_timeout: 8, + event_handler_timeout: 2, + event_slow_timeout: 4, + event_handler_slow_timeout: 1, + }) +) +``` + + + + +## Handler-level overrides + +Set per-handler timeout and slow-warning overrides at registration time (or by updating the returned handler metadata). + + + + +```python +entry = bus.on(WorkEvent, slow_handler) +entry.handler_timeout = 1.5 +entry.handler_slow_timeout = 0.5 +``` + + + + +```ts +bus.on(WorkEvent, slowHandler, { + handler_timeout: 1.5, + handler_slow_timeout: 0.5, +}) +``` + + + + +## Precedence rules + +### Effective handler timeout + +1. Resolve handler timeout source: + - `handler_timeout` (handler level) + - else `event_handler_timeout` (event level) + - else bus `event_timeout` +2. Apply event cap: + - effective timeout is `min(resolved_handler_timeout, event_timeout)` when both are set + - if one is unset, the other value is used + - if both are unset, no timeout is enforced + +Resolution happens at processing time on each bus. +For forwarded events, an unset timeout/concurrency field uses the target bus defaults. + +### Effective handler slow-warning threshold + +Resolved in this order: + +1. `handler_slow_timeout` +2. `event_handler_slow_timeout` +3. `event_slow_timeout` +4. bus `event_handler_slow_timeout` +5. bus `event_slow_timeout` + +### Effective event slow-warning threshold + +Resolved in this order: + +1. `event_slow_timeout` +2. bus `event_slow_timeout` + +## Execution scope ordering + +Timeout and slow-monitor behavior is implemented as stacked runtime scopes. The ordering matters: + + + + +```python +# EventBus.step(...) +async with self.locks._run_with_event_lock(self, event): + await self._process_event(event, timeout=timeout) + +# EventBus.process_event(...) +async with asyncio.timeout(resolved_event_timeout): + async with with_slow_monitor(self._create_slow_event_warning_timer(event)): + await event._run_handlers(eventbus=self, handlers=applicable_handlers, timeout=resolved_event_timeout) + +# EventResult.run_handler(...) +async with eventbus.locks._run_with_handler_lock(eventbus, event, event_result): + with eventbus._run_with_handler_dispatch_context(event, event_result.handler_id): + async with event_result._run_with_timeout(event): + async with with_slow_monitor(handler_slow_monitor): + await event_result._call_handler(...) +``` + + + + +```ts +// EventBus.processEvent(...) +await this.locks._runWithEventLock(event, () => + this._runHandlersWithTimeout(event, pending_entries, resolved_event_timeout, () => + _runWithSlowMonitor(event._createSlowEventWarningTimer(), () => + scoped_event._runHandlers(pending_entries) + ) + ) +) + +// BaseEvent._runHandlers(...) + EventResult.runHandler(...) +await this.bus.locks._runWithHandlerLock(original, this.bus.event_handler_concurrency, async (handler_lock) => { + await entry.runHandler(handler_lock) +}) + +await this.bus.locks._runWithHandlerDispatchContext(this, async () => { + await _runWithAsyncContext(event._getDispatchContext() ?? null, async () => { + await _runWithTimeout(this.handler_timeout, () => this._createHandlerTimeoutError(event), () => + _runWithSlowMonitor(slow_handler_warning_timer, () => + _runWithAbortMonitor(() => this.handler.handler(handler_event), abort_signal) + ) + ) + }) +}) +``` + + + + +Event-level timeout finalization keeps cancellation semantics explicit: +- pending handlers -> cancelled +- started handlers -> aborted + +## Note on retry + +Bus/event timeouts are outer budgets. If you need per-attempt limits for retried handlers, use the `retry` decorator's `timeout` option. diff --git a/docs/docs.json b/docs/docs.json new file mode 100644 index 0000000..59f3d97 --- /dev/null +++ b/docs/docs.json @@ -0,0 +1,143 @@ +{ + "$schema": "https://mintlify.com/docs.json", + "name": "bubus", + "theme": "almond", + "favicon": "/favicon.svg", + "colors": { + "primary": "#440f75", + "light": "#725dc0", + "dark": "#24053b" + }, + "navigation": { + "tabs": [ + { + "tab": "Documentation", + "groups": [ + { + "group": "Getting Started", + "pages": [ + "index", + "quickstart" + ] + }, + { + "group": "Features", + "pages": [ + "features/typed-events", + "features/return-value-handling", + "features/fifo-processing", + "features/parent-child-tracking", + "features/forwarding-between-buses", + "features/event-pattern-matching", + "features/async-sync-handlers", + "features/event-history-store", + "features/find-events", + "features/event-debouncing", + "features/context-propagation" + ] + }, + { + "group": "API Reference", + "pages": [ + "api/eventbus", + "api/eventhistory", + "api/baseevent", + "api/eventresult", + "api/eventhandler", + "api/eventbusmiddleware", + "api/retry" + ] + }, + { + "group": "Concurrency Control", + "pages": [ + "concurrency/immediate-execution", + "concurrency/events-global-serial", + "concurrency/events-bus-serial", + "concurrency/events-parallel", + "concurrency/handlers-serial", + "concurrency/handlers-parallel", + "concurrency/handler-completion-all", + "concurrency/handler-completion-first", + "concurrency/timeouts", + "concurrency/backpressure", + "api/retry" + ] + }, + { + "group": "Integrations", + "pages": [ + { + "group": "Middlewares", + "expanded": true, + "pages": [ + "integrations/middlewares", + "integrations/middleware-otel-tracing", + "integrations/middleware-auto-error", + "integrations/middleware-auto-return", + "integrations/middleware-auto-handler-change", + "integrations/middleware-wal", + "integrations/middleware-logger", + "integrations/middleware-sqlite-history-mirror" + ] + }, + { + "group": "Bridges", + "expanded": true, + "pages": [ + "integrations/bridges", + "integrations/bridge-http", + "integrations/bridge-socket", + "integrations/bridge-redis", + "integrations/bridge-nats", + "integrations/bridge-postgres", + "integrations/bridge-jsonl", + "integrations/bridge-sqlite" + ] + } + ] + }, + { + "group": "Further Reading", + "pages": [ + "operations/performance", + "operations/supported-runtimes", + "operations/development", + "further-reading/events-suck", + "further-reading/similar-projects" + ] + } + ] + } + ] + }, + "description": "Fast strongly-typed Python + Typescript event bus library.", + "background": { + "decoration": "grid" + }, + "footer": { + "socials": { + "github": "https://github.com/pirate/bbus", + "x": "https://x.com/theSquashSH" + } + }, + "navbar": { + "links": [ + { + "label": "Github", + "href": "https://github.com/pirate/bbus" + }, + { + "label": "NPM", + "href": "https://www.npmjs.com/package/bubus" + }, + { + "label": "PyPI", + "href": "https://pypi.org/project/bubus/" + } + ] + }, + "seo": { + "indexing": "all" + } +} diff --git a/docs/favicon.svg b/docs/favicon.svg new file mode 100644 index 0000000..76cdc35 --- /dev/null +++ b/docs/favicon.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/docs/features/async-sync-handlers.mdx b/docs/features/async-sync-handlers.mdx new file mode 100644 index 0000000..55f1284 --- /dev/null +++ b/docs/features/async-sync-handlers.mdx @@ -0,0 +1,126 @@ +--- +title: Async and Sync Handlers +description: Mix sync and async handlers across functions and method styles. +--- + +Both runtimes support registering sync and async handlers together. + +Supported handler shapes: + +- bare functions (sync or async) +- static methods (sync or async) +- class-level methods (Python `@classmethod`; in TypeScript, class-level handlers are `static` methods) +- instance methods (sync or async) + + + + +```python +import asyncio +from bubus import EventBus, BaseEvent + +class WorkEvent(BaseEvent[str]): + task_id: str + +def bare_sync(event: WorkEvent) -> str: + return f'bare-sync:{event.task_id}' + +async def bare_async(event: WorkEvent) -> str: + await asyncio.sleep(0.01) + return f'bare-async:{event.task_id}' + +class HandlerSet: + def __init__(self, prefix: str) -> None: + self.prefix = prefix + + @staticmethod + def static_sync(event: WorkEvent) -> str: + return f'static-sync:{event.task_id}' + + @staticmethod + async def static_async(event: WorkEvent) -> str: + await asyncio.sleep(0.01) + return f'static-async:{event.task_id}' + + @classmethod + def class_sync(cls, event: WorkEvent) -> str: + return f'{cls.__name__}-class-sync:{event.task_id}' + + @classmethod + async def class_async(cls, event: WorkEvent) -> str: + await asyncio.sleep(0.01) + return f'{cls.__name__}-class-async:{event.task_id}' + + def instance_sync(self, event: WorkEvent) -> str: + return f'{self.prefix}-instance-sync:{event.task_id}' + + async def instance_async(self, event: WorkEvent) -> str: + await asyncio.sleep(0.01) + return f'{self.prefix}-instance-async:{event.task_id}' + +bus = EventBus('AppBus') +handlers = HandlerSet(prefix='svc') + +bus.on(WorkEvent, bare_sync) +bus.on(WorkEvent, bare_async) +bus.on(WorkEvent, HandlerSet.static_sync) +bus.on(WorkEvent, HandlerSet.static_async) +bus.on(WorkEvent, HandlerSet.class_sync) +bus.on(WorkEvent, HandlerSet.class_async) +bus.on(WorkEvent, handlers.instance_sync) +bus.on(WorkEvent, handlers.instance_async) +``` + + + + +```ts +import { BaseEvent, EventBus } from 'bubus' +import { z } from 'zod' + +const WorkEvent = BaseEvent.extend('WorkEvent', { + task_id: z.string(), +}) + +const bus = new EventBus('AppBus') + +const bareSync = (event: InstanceType) => `bare-sync:${event.task_id}` +const bareAsync = async (event: InstanceType) => { + await new Promise((resolve) => setTimeout(resolve, 10)) + return `bare-async:${event.task_id}` +} + +class HandlerSet { + constructor(private prefix: string) {} + + static staticSync(event: InstanceType) { + return `static-sync:${event.task_id}` + } + + static async staticAsync(event: InstanceType) { + await new Promise((resolve) => setTimeout(resolve, 10)) + return `static-async:${event.task_id}` + } + + instanceSync(event: InstanceType) { + return `${this.prefix}-instance-sync:${event.task_id}` + } + + async instanceAsync(event: InstanceType) { + await new Promise((resolve) => setTimeout(resolve, 10)) + return `${this.prefix}-instance-async:${event.task_id}` + } +} + +const handlers = new HandlerSet('svc') + +bus.on(WorkEvent, bareSync) +bus.on(WorkEvent, bareAsync) +bus.on(WorkEvent, HandlerSet.staticSync) +bus.on(WorkEvent, HandlerSet.staticAsync) +bus.on(WorkEvent, handlers.instanceSync.bind(handlers)) +bus.on(WorkEvent, handlers.instanceAsync.bind(handlers)) +``` + + + diff --git a/docs/features/context-propagation.mdx b/docs/features/context-propagation.mdx new file mode 100644 index 0000000..8513335 --- /dev/null +++ b/docs/features/context-propagation.mdx @@ -0,0 +1,110 @@ +--- +title: Context Propagation +description: Carry request-scoped context through emit and handler execution (ContextVars / AsyncLocalStorage). +--- + +Context propagation means values you set at request entry (like `request_id`, `user_id`, trace/span context) are still available inside event handlers later in the async call chain. + +This is commonly used in: + +- web servers (FastAPI, Fastify, Express/Nest adapters) +- observability and distributed tracing (OpenTelemetry) +- structured logging/correlation IDs + +## What this maps to per runtime + +- Python uses `ContextVars` (`contextvars.ContextVar`). +- TypeScript (Node/Bun) uses `AsyncLocalStorage`. + +Bubus captures ambient context at `emit(...)` time and restores it when handlers execute, so handler code sees the same request-local values. + +## Why this matters + +Without propagation, handler code often loses request-local state after async boundaries and queue scheduling. +With propagation, event handlers can log/trace as if they were still running in the original request scope. + + + + +```python +from contextvars import ContextVar +from bubus import EventBus, BaseEvent + +request_id: ContextVar[str] = ContextVar('request_id', default='') + +class RequestEvent(BaseEvent): + pass + +bus = EventBus('AppBus') + +async def handler(_: RequestEvent) -> None: + print(request_id.get()) + # req-123 + +bus.on(RequestEvent, handler) +request_id.set('req-123') +await bus.emit(RequestEvent()) +``` + + + + +```ts +import { AsyncLocalStorage } from 'node:async_hooks' +import { BaseEvent, EventBus } from 'bubus' + +const requestContext = new AsyncLocalStorage<{ requestId: string }>() +const RequestEvent = BaseEvent.extend('RequestEvent', {}) +const bus = new EventBus('AppBus') + +bus.on(RequestEvent, () => { + console.log(requestContext.getStore()?.requestId) +}) + +await requestContext.run({ requestId: 'req-123' }, async () => { + await bus.emit(RequestEvent({})).done() +}) +``` + + + + +## Web server style examples + +These patterns are typical in frameworks where each incoming request gets a request-local context object. + + + + +```python +# FastAPI-style shape (conceptual) +request_id.set(incoming_request.headers.get('x-request-id', 'generated-id')) +await bus.emit(RequestEvent()) +# handlers can still read request_id.get() +``` + + + + +```ts +// Fastify-style shape (conceptual) +await requestContext.run({ requestId: req.id }, async () => { + await bus.emit(RequestEvent({})).done() +}) +// handlers can still read requestContext.getStore() +``` + + + + +## Browser runtime note + +`AsyncLocalStorage` is a Node/Bun API and is not available in browser runtimes. + +In browsers: + +- Bubus still works normally for events. +- ambient async context propagation via `AsyncLocalStorage` is not available. +- pass correlation/tracing fields explicitly in event payloads when you need that metadata. + +See [Supported Runtimes](../operations/supported-runtimes) for runtime compatibility details. diff --git a/docs/features/event-debouncing.mdx b/docs/features/event-debouncing.mdx new file mode 100644 index 0000000..9ef4ffc --- /dev/null +++ b/docs/features/event-debouncing.mdx @@ -0,0 +1,194 @@ +--- +title: Event Debouncing +description: Deduplicate expensive event work using find(..., past/future) patterns. +--- + +Debouncing is most useful when events trigger expensive work: + +- screenshots or browser automation +- external API calls +- LLM/tool runs +- heavyweight DB/file operations + +Instead of starting duplicate work every time, reuse: + +- a recent matching event (`past` window), or +- a matching event that is about to be emitted by another caller (`future` wait), or +- both (history-first, then short future wait, then emit). + +Debouncing in Bubus is built from `find(...)` + conditional `emit(...)`. + +## Debounce building blocks + +- `past`: search recent history (`true`/`false`/seconds) +- `future`: optionally wait for a matching future emit (`true`/`false`/seconds) +- `where` / event-field filters: scope matching to the same "work key" (url, account_id, document_id, etc.) + +See [Find Events](./find-events) for full option semantics. + +## Pattern 1: Reuse recent completed work (history-only) + +Use when "fresh enough" cached results are acceptable. + + + + +```python +existing = await bus.find( + ScreenshotEvent, + where=lambda e: e.url == url, + past=10, # look back 10s + future=False, # do not wait +) + +event = existing or bus.emit(ScreenshotEvent(url=url)) +await event +result = await event.event_result() +``` + + + + +```ts +const existing = await bus.find( + ScreenshotEvent, + (event) => event.url === url, + { past: 10, future: false } +) + +const event = existing ?? bus.emit(ScreenshotEvent({ url })) +await event.done() +const result = event.event_result +``` + + + + +## Pattern 2: Coalesce concurrent callers (future-only) + +Use when many callers may request the same expensive action at the same time. + +Caller A emits first. Caller B waits briefly for that same event instead of emitting a duplicate. + + + + +```python +in_flight = await bus.find( + ScreenshotEvent, + where=lambda e: e.url == url, + past=False, # skip history + future=2, # wait up to 2s for another caller to emit +) + +event = in_flight or bus.emit(ScreenshotEvent(url=url)) +await event +result = await event.event_result() +``` + + + + +```ts +const inFlight = await bus.find( + ScreenshotEvent, + (event) => event.url === url, + { past: false, future: 2 } +) + +const event = inFlight ?? bus.emit(ScreenshotEvent({ url })) +await event.done() +const result = event.event_result +``` + + + + +## Pattern 3: Hybrid debounce (past + short future + emit) + +This is the most practical default for expensive endpoints. + +1. Reuse recent match. +2. If none, wait briefly for someone else to emit. +3. If still none, emit new work. + + + + +```python +event = ( + await bus.find(ScreenshotEvent, where=lambda e: e.url == url, past=10, future=False) + or await bus.find(ScreenshotEvent, where=lambda e: e.url == url, past=False, future=2) + or bus.emit(ScreenshotEvent(url=url)) +) + +await event +result = await event.event_result() +``` + + + + +```ts +const event = + (await bus.find(ScreenshotEvent, (e) => e.url === url, { past: 10, future: false })) ?? + (await bus.find(ScreenshotEvent, (e) => e.url === url, { past: false, future: 2 })) ?? + bus.emit(ScreenshotEvent({ url })) + +await event.done() +const result = event.event_result +``` + + + + +## Pattern 4: Keyed helper for repeated use + +Wrap the debounce logic once and reuse it for all expensive keyed actions. + + + + +```python +async def emit_debounced_screenshot(url: str): + event = ( + await bus.find(ScreenshotEvent, where=lambda e: e.url == url, past=15, future=False) + or await bus.find(ScreenshotEvent, where=lambda e: e.url == url, past=False, future=3) + or bus.emit(ScreenshotEvent(url=url)) + ) + await event + return await event.event_result() +``` + + + + +```ts +const emitDebouncedScreenshot = async (url: string) => { + const event = + (await bus.find(ScreenshotEvent, (e) => e.url === url, { past: 15, future: false })) ?? + (await bus.find(ScreenshotEvent, (e) => e.url === url, { past: false, future: 3 })) ?? + bus.emit(ScreenshotEvent({ url })) + + await event.done() + return event.event_result +} +``` + + + + +## Important behavior notes + +- `find(...)` resolves when an event is emitted, not when handlers finish. +- Always await completion after selecting a debounced event: + - Python: `await event`, then `await event.event_result()` + - TypeScript: `await event.done()`, then `event.event_result` +- Debouncing scope depends on your match key (`where` / event-field filters). + Use the narrowest key that represents "same work." +- Debouncing depends on retained history. If history is aggressively trimmed, your `past` window can become less effective. + +See also: + +- [Find Events](./find-events) +- [Event History Store](./event-history-store) diff --git a/docs/features/event-history-store.mdx b/docs/features/event-history-store.mdx new file mode 100644 index 0000000..43a03aa --- /dev/null +++ b/docs/features/event-history-store.mdx @@ -0,0 +1,113 @@ +--- +title: Event History Store +description: Understand queue vs history behavior and how retention settings trim old events. +--- + +Both runtimes expose two related (but different) runtime stores: + +- `pending_event_queue`: events accepted by the bus but not yet started by the runloop +- `event_history`: events the bus knows about (pending, started, and completed until trimmed) + +If you were looking for `pending_events_queue`, the runtime field is `pending_event_queue` in both Python and TypeScript. + +## What each store is for + +| Store | Purpose | Typical contents | +| --- | --- | --- | +| `pending_event_queue` | Scheduling buffer | events waiting their turn to start | +| `event_history` | Observability + lookup | recent pending/started/completed events, bounded by history settings | + +The key difference: queue is "what still needs to start", history is "what this bus has seen". + +## Retention config options + +| Option | Meaning | +| --- | --- | +| `max_history_size` | Max number of events retained in `event_history` (`null`/`None` means unbounded, `0` means keep only in-flight visibility). | +| `max_history_drop` | If `true`, accept new events and trim oldest history entries when over limit. If `false`, reject new events at the limit (for `max_history_size > 0`). | + +## Event lifecycle: queue -> history -> trim + +1. Emit: + - Event is accepted. + - Event is added to `event_history`. + - Event is enqueued into `pending_event_queue`. +2. Runloop begins processing: + - Event is removed from `pending_event_queue`. + - Event stays in `event_history` while handlers run. +3. Completion: + - Event is marked completed. + - Event may remain in `event_history` or be dropped based on retention settings. +4. Trimming: + - `max_history_size` and `max_history_drop` determine whether old history is removed or new emits are rejected. + +## Trimming behavior by mode + +- `max_history_size = None/null`: no automatic history limit. +- `max_history_size = 0`: completed events are removed immediately; only pending/in-flight visibility remains. +- `max_history_size > 0` and `max_history_drop = false`: bus rejects new emits once history reaches the limit. +- `max_history_size > 0` and `max_history_drop = true`: bus trims oldest history entries (prefers completed first; can drop uncompleted entries under extreme pressure). + +Both runtimes follow this policy. Internally, trim timing is implementation-specific (eager vs amortized cleanup), but externally the semantics above are the contract to rely on. + +## Common configurations + + + + +```python +from bubus import EventBus + +bounded_drop = EventBus(max_history_size=100, max_history_drop=True) +bounded_reject = EventBus(max_history_size=100, max_history_drop=False) +unbounded = EventBus(max_history_size=None) +in_flight_only = EventBus(max_history_size=0) +``` + + + + +```ts +import { EventBus } from 'bubus' + +const boundedDrop = new EventBus('BoundedDropBus', { max_history_size: 100, max_history_drop: true }) +const boundedReject = new EventBus('BoundedRejectBus', { max_history_size: 100, max_history_drop: false }) +const unbounded = new EventBus('UnboundedBus', { max_history_size: null }) +const inFlightOnly = new EventBus('InFlightBus', { max_history_size: 0 }) +``` + + + + +## Inspecting queue vs history at runtime + + + + +```python +event = bus.emit(MyEvent()) +pending_count = bus.pending_event_queue.qsize() if bus.pending_event_queue else 0 +history_count = len(bus.event_history) +print('pending_event_queue=', pending_count, 'event_history=', history_count) +# pending_event_queue= 1 event_history= 1 + +await event +pending_after = bus.pending_event_queue.qsize() if bus.pending_event_queue else 0 +history_after = len(bus.event_history) +print('after completion -> pending_event_queue=', pending_after, 'event_history=', history_after) +# after completion -> pending_event_queue= 0 event_history= 1 +``` + + + + +```ts +const event = bus.emit(MyEvent({})) +console.log('pending_event_queue=', bus.pending_event_queue.length, 'event_history=', bus.event_history.size) + +await event.done() +console.log('after completion -> pending_event_queue=', bus.pending_event_queue.length, 'event_history=', bus.event_history.size) +``` + + + diff --git a/docs/features/event-pattern-matching.mdx b/docs/features/event-pattern-matching.mdx new file mode 100644 index 0000000..e5c9c70 --- /dev/null +++ b/docs/features/event-pattern-matching.mdx @@ -0,0 +1,123 @@ +--- +title: Event Pattern Matching +description: Use classes, strings, or wildcards for both handler registration and event lookup. +--- + +Event patterns are shared across both APIs: + +- `bus.on(pattern, handler)` for subscriptions +- `bus.find(pattern, ...)` for history/future lookup + +Both accept the same pattern forms: + +- event class +- string event type name +- `'*'` wildcard (match everything) + +## Supported pattern forms + +| Pattern | Matches | Best for | +| --- | --- | --- | +| Event class (`UserActionEvent`) | One concrete event type | Strong typing end-to-end | +| String (`'UserActionEvent'`) | Events by type name | Dynamic routing/config-driven keys | +| `'*'` | All event types | Global observers, logging, bridges | + +## `.on(...)` and `.find(...)` use the same pattern model + +Use whichever operation you need, with the same pattern key: + +- subscribe: `bus.on(UserActionEvent, handler)` +- find by class: `await bus.find(UserActionEvent)` +- find by string: `await bus.find('UserActionEvent')` +- wildcard subscribe/find: `bus.on('*', ...)`, `await bus.find('*', ...)` + +## Examples + + + + +```python +from typing import Any +from bubus import BaseEvent, EventBus + +class UserActionEvent(BaseEvent[str]): + action: str + +bus = EventBus('AppBus') + +async def on_typed(event: UserActionEvent) -> str: + # event is strongly typed here + return f'action:{event.action}' + +def on_by_name(event: BaseEvent[Any]) -> None: + # string patterns are looser; payload fields are not statically known + print('by-name', event.event_type, getattr(event, 'action', None)) + # by-name UserActionEvent click + +def on_any(event: BaseEvent[Any]) -> None: + print('wildcard', event.event_type) + # wildcard UserActionEvent + +bus.on(UserActionEvent, on_typed) +bus.on('UserActionEvent', on_by_name) +bus.on('*', on_any) + +await bus.emit(UserActionEvent(action='click')).event_result() + +typed_match = await bus.find(UserActionEvent) # UserActionEvent | None +named_match = await bus.find('UserActionEvent') # BaseEvent[Any] | None +wildcard_match = await bus.find('*', future=5) # BaseEvent[Any] | None +``` + + + + +```ts +import { BaseEvent, EventBus } from 'bubus' +import { z } from 'zod' + +const UserActionEvent = BaseEvent.extend('UserActionEvent', { + action: z.string(), + event_result_type: z.string(), +}) + +const bus = new EventBus('AppBus') + +bus.on(UserActionEvent, (event) => { + // event is strongly typed here + return `action:${event.action}` +}) + +bus.on('UserActionEvent', (event) => { + // string patterns are looser; event is BaseEvent-like at compile time + console.log('by-name', event.event_type) + return undefined +}) + +bus.on('*', (event) => { + console.log('wildcard', event.event_type) + return undefined +}) + +const typedMatch = await bus.find(UserActionEvent) // InstanceType | null +const namedMatch = await bus.find('UserActionEvent') // BaseEvent | null +const wildcardMatch = await bus.find('*', { future: 5 }) // BaseEvent | null +``` + + + + +## Why event classes are preferred for typing + +Event classes preserve the most useful static typing: + +- handler input shape is specific (payload fields are known) +- event result typing stays aligned with `event_result_type` / generic result type +- `.find(EventClass)` returns the specific event type + +String keys and `'*'` are intentionally looser: + +- Python: treat as `BaseEvent[Any]` +- TypeScript: typed as base `BaseEvent`/unknown-oriented handler return checks + +Use string/wildcard patterns when you need dynamic behavior. Use classes whenever you want strict payload/result type hints through handlers and lookups. diff --git a/docs/features/fifo-processing.mdx b/docs/features/fifo-processing.mdx new file mode 100644 index 0000000..408c63f --- /dev/null +++ b/docs/features/fifo-processing.mdx @@ -0,0 +1,221 @@ +--- +title: FIFO Event Processing +description: Process queued events in deterministic first-in-first-out order. +--- + +Using the default options out-of-the-box, all events and handlers on a bus process in strict serial order to make execution order predictable and consistency easy. + +This is the default behavior because: + +- `event_concurrency='bus-serial'` +- `event_handler_concurrency='serial'` +- `event_handler_completion='all'` + +On a single bus, that means event `N+1` never starts before event `N` is complete, even if event `N+1` handlers are "faster". + +As you scale, you can tune these guarantees. See [Concurrency Control](../concurrency/immediate-execution) in the sidebar for all modes and tradeoffs. + +## Variable handler runtimes still stay FIFO + + + + +```python +import asyncio +from bubus import BaseEvent, EventBus + +class JobEvent(BaseEvent): + order: int + delay_s: float + +bus = EventBus('FifoBus') +started_order: list[int] = [] +completed_order: list[int] = [] + +async def on_job(event: JobEvent) -> None: + started_order.append(event.order) + await asyncio.sleep(event.delay_s) + completed_order.append(event.order) + +bus.on(JobEvent, on_job) + +emitted = [ + bus.emit(JobEvent(order=0, delay_s=0.030)), + bus.emit(JobEvent(order=1, delay_s=0.001)), + bus.emit(JobEvent(order=2, delay_s=0.020)), +] + +await bus.wait_until_idle() + +print(started_order) +# [0, 1, 2] +print(completed_order) +# [0, 1, 2] +print([event.event_started_at is not None for event in emitted]) +# [True, True, True] +print([event.event_completed_at is not None for event in emitted]) +# [True, True, True] +print(emitted[0].event_started_at <= emitted[1].event_started_at <= emitted[2].event_started_at) +# True +print(emitted[0].event_completed_at <= emitted[1].event_completed_at <= emitted[2].event_completed_at) +# True +``` + + + + +```ts +import { BaseEvent, EventBus } from 'bubus' +import { z } from 'zod' + +const JobEvent = BaseEvent.extend('JobEvent', { + order: z.number(), + delay_ms: z.number(), +}) + +const bus = new EventBus('FifoBus') +const startedOrder: number[] = [] +const completedOrder: number[] = [] + +bus.on(JobEvent, async (event) => { + startedOrder.push(event.order) + await new Promise((resolve) => setTimeout(resolve, event.delay_ms)) + completedOrder.push(event.order) +}) + +const emitted = [ + bus.emit(JobEvent({ order: 0, delay_ms: 30 })), + bus.emit(JobEvent({ order: 1, delay_ms: 1 })), + bus.emit(JobEvent({ order: 2, delay_ms: 20 })), +] + +await bus.waitUntilIdle() + +console.log(startedOrder) +// [0, 1, 2] +console.log(completedOrder) +// [0, 1, 2] +console.log(emitted.map((event) => Boolean(event.event_started_at))) +// [true, true, true] +console.log(emitted.map((event) => Boolean(event.event_completed_at))) +// [true, true, true] +console.log( + Date.parse(emitted[0].event_started_at!) <= + Date.parse(emitted[1].event_started_at!) && + Date.parse(emitted[1].event_started_at!) <= Date.parse(emitted[2].event_started_at!) +) +// true +console.log( + Date.parse(emitted[0].event_completed_at!) <= + Date.parse(emitted[1].event_completed_at!) && + Date.parse(emitted[1].event_completed_at!) <= Date.parse(emitted[2].event_completed_at!) +) +// true +``` + + + + +## Ambiguous case: slow then fast still runs serially + +Even if you emit a slow event and then a fast event right after, the fast one does not overtake on the same bus under defaults. + + + + +```python +import asyncio +from bubus import BaseEvent, EventBus + +class SlowEvent(BaseEvent): + name: str + +class FastEvent(BaseEvent): + name: str + +bus = EventBus('FifoBus') +trace: list[str] = [] + +async def on_slow(event: SlowEvent) -> None: + trace.append(f'start:{event.event_type}:{event.name}') + await asyncio.sleep(0.040) + trace.append(f'end:{event.event_type}:{event.name}') + +async def on_fast(event: FastEvent) -> None: + trace.append(f'start:{event.event_type}:{event.name}') + await asyncio.sleep(0.001) + trace.append(f'end:{event.event_type}:{event.name}') + +bus.on(SlowEvent, on_slow) +bus.on(FastEvent, on_fast) + +slow = bus.emit(SlowEvent(name='slow-a')) +fast = bus.emit(FastEvent(name='fast-b')) +await bus.wait_until_idle() + +print(trace) +# ['start:SlowEvent:slow-a', 'end:SlowEvent:slow-a', 'start:FastEvent:fast-b', 'end:FastEvent:fast-b'] +print(slow.event_completed_at <= fast.event_started_at) +# True +tree_lines = [ + line for line in bus.log_tree().splitlines() + if 'SlowEvent#' in line or 'FastEvent#' in line +] +print(tree_lines) +# ['β”œβ”€β”€ SlowEvent#6aa1 [14:09:10.120 (0.040s)]', '└── FastEvent#6aa2 [14:09:10.161 (0.001s)]'] +``` + + + + +```ts +import { BaseEvent, EventBus } from 'bubus' +import { z } from 'zod' + +const SlowEvent = BaseEvent.extend('SlowEvent', { + name: z.string(), +}) + +const FastEvent = BaseEvent.extend('FastEvent', { + name: z.string(), +}) + +const bus = new EventBus('FifoBus') +const trace: string[] = [] + +bus.on(SlowEvent, async (event) => { + trace.push(`start:${event.event_type}:${event.name}`) + await new Promise((resolve) => setTimeout(resolve, 40)) + trace.push(`end:${event.event_type}:${event.name}`) +}) + +bus.on(FastEvent, async (event) => { + trace.push(`start:${event.event_type}:${event.name}`) + await new Promise((resolve) => setTimeout(resolve, 1)) + trace.push(`end:${event.event_type}:${event.name}`) +}) + +const slow = bus.emit(SlowEvent({ name: 'slow-a' })) +const fast = bus.emit(FastEvent({ name: 'fast-b' })) +await bus.waitUntilIdle() + +console.log(trace) +// ['start:SlowEvent:slow-a', 'end:SlowEvent:slow-a', 'start:FastEvent:fast-b', 'end:FastEvent:fast-b'] +console.log(Date.parse(slow.event_completed_at!) <= Date.parse(fast.event_started_at!)) +// true +const treeLines = bus + .logTree() + .split('\n') + .filter((line) => line.includes('SlowEvent#') || line.includes('FastEvent#')) +console.log(treeLines) +// ['β”œβ”€β”€ βœ… SlowEvent#6aa1 [14:09:10.120 (0.040s)]', '└── βœ… FastEvent#6aa2 [14:09:10.161 (0.001s)]'] +``` + + + + +## Important exception: awaited child events + +Inside a running handler, if you emit and await a child event, that child can queue-jump for RPC-style behavior. This is the intentional exception to plain FIFO queue order. + +See [Immediate Execution (RPC-style)](../concurrency/immediate-execution) for exact behavior and mode interactions. diff --git a/docs/features/find-events.mdx b/docs/features/find-events.mdx new file mode 100644 index 0000000..ab89afd --- /dev/null +++ b/docs/features/find-events.mdx @@ -0,0 +1,228 @@ +--- +title: Find Events +description: Query history and optionally wait for matching future events. +--- + +`find(...)` is the unified lookup API: search history, wait for future events, or combine both. + +## Interface + + + + +```python +await bus.find( + event_type, # Event class, event type string, or '*' + where: Callable[[BaseEvent], bool] | None = None, + child_of: BaseEvent | None = None, + past: bool | float | timedelta = True, + future: bool | float = False, + **event_fields, # equality filters (event_status='completed', request_id='abc', ...) +) +``` + + + + +```ts +await bus.find(event_pattern, options?) +await bus.find(event_pattern, where, options?) + +// options: +{ + past?: boolean | number // seconds when number + future?: boolean | number // seconds when number + child_of?: BaseEvent | null + [event_field: string]: unknown // equality filters, e.g. event_status: 'completed' +} +``` + + + + +## Option semantics + +- `past` + - `true`: search all history (default) + - `false`: skip history + - `number` (or `timedelta` in Python): search recent history window +- `future` + - `false`: do not wait (default) + - `true`: wait indefinitely + - `number`: wait up to N seconds +- `where`: predicate filter +- `child_of`: match only descendants of the given parent event +- `event_fields`: strict equality filters on event fields/metadata + +Default behavior when omitted is history-only lookup (`past=True`, `future=False`). + +## Common use cases + +### 1) History lookup only (non-blocking) + + + + +```python +existing = await bus.find(ResponseEvent) +``` + + + + +```ts +const existing = await bus.find(ResponseEvent) +``` + + + + +### 2) Wait only for future events + + + + +```python +future = await bus.find(ResponseEvent, past=False, future=5) +``` + + + + +```ts +const future = await bus.find(ResponseEvent, { past: false, future: 5 }) +``` + + + + +### 3) Check recent history, then keep waiting briefly + + + + +```python +match = await bus.find(ResponseEvent, past=5, future=5) +``` + + + + +```ts +const match = await bus.find(ResponseEvent, { past: 5, future: 5 }) +``` + + + + +### 4) Filter by fields + predicate + + + + +```python +match = await bus.find( + ResponseEvent, + where=lambda e: e.request_id == my_id, + event_status='completed', + future=5, +) +``` + + + + +```ts +const match = await bus.find( + ResponseEvent, + (event) => event.request_id === myId, + { event_status: 'completed', future: 5 } +) +``` + + + + +### 5) Wildcard lookup across all event types + + + + +```python +any_completed = await bus.find( + '*', + where=lambda e: e.event_type.endswith('ResultEvent'), + event_status='completed', + future=5, +) +``` + + + + +```ts +const anyCompleted = await bus.find( + '*', + (event) => event.event_type.endsWith('ResultEvent'), + { event_status: 'completed', future: 5 } +) +``` + + + + +### 6) Find descendants of a specific parent event + + + + +```python +parent_event = await bus.emit(NavigateToUrlEvent(url='https://example.com')) +child = await bus.find(TabCreatedEvent, child_of=parent_event, past=5) +``` + + + + +```ts +const parentEvent = await bus.emit(NavigateToUrlEvent({ url: 'https://example.com' })).done() +const child = await bus.find(TabCreatedEvent, { child_of: parentEvent, past: 5 }) +``` + + + + +### 7) Debounce expensive work + + + + +```python +event = ( + await bus.find(ScreenshotEvent, past=10, future=False) + or await bus.find(ScreenshotEvent, past=False, future=5) + or bus.emit(ScreenshotEvent()) +) +await event +``` + + + + +```ts +const event = + (await bus.find(ScreenshotEvent, { past: 10, future: false })) ?? + (await bus.find(ScreenshotEvent, { past: false, future: 5 })) ?? + bus.emit(ScreenshotEvent({})) +await event.done() +``` + + + + +## Important behavior + +- `find()` resolves when an event is emitted, not when handlers finish. +- To wait for handler completion, await the returned event (`await event` in Python, `await event.done()` in TypeScript). +- If no match is found (or `future` times out), `find()` returns `None` / `null`. +- If both `past` and `future` are `false`, it returns immediately with no match. diff --git a/docs/features/forwarding-between-buses.mdx b/docs/features/forwarding-between-buses.mdx new file mode 100644 index 0000000..724c7d6 --- /dev/null +++ b/docs/features/forwarding-between-buses.mdx @@ -0,0 +1,275 @@ +--- +title: Forwarding Between Buses +description: Compose multiple buses with automatic forwarding loop prevention. +--- + +You can forward events across multiple buses while preserving event path metadata and loop safety. + +Repository example files: +- [`examples/forwarding_between_busses.py`](https://github.com/pirate/bbus/blob/main/examples/forwarding_between_busses.py) +- [`bubus-ts/examples/forwarding_between_busses.ts`](https://github.com/pirate/bbus/blob/main/bubus-ts/examples/forwarding_between_busses.ts) + +## Why multiple buses are useful + +Multiple buses let you separate concerns and tune runtime behavior per boundary: + +- service-local bus for business logic with strict ordering and useful history +- transport/relay bus focused on throughput and forwarding (little or no history retention) +- specialized buses for domains that need different timeout or concurrency policies + +This is especially useful in microservice-style designs, where each component has different consistency and observability needs. + +## Example: service buses with different policies + +In this example: + +- `AuthBus` is strict and debuggable: `event_concurrency='bus-serial'`, `event_handler_concurrency='serial'`, `max_history_size=100` +- `RelayBus` is a transport forwarder: `event_concurrency='parallel'`, `max_history_size=0` +- `BillingBus` is another service bus with its own settings + + + + +```python +from bubus import BaseEvent, EventBus + +class UserCreatedEvent(BaseEvent[str]): + user_id: str + +class AuthService: + def __init__(self) -> None: + self.bus = EventBus( + 'AuthBus', + event_concurrency='bus-serial', + event_handler_concurrency='serial', + max_history_size=100, + ) + self.bus.on(UserCreatedEvent, self.on_user_created) + + async def on_user_created(self, event: UserCreatedEvent) -> str: + return f'auth-ok:{event.user_id}' + +class RelayService: + def __init__(self) -> None: + self.bus = EventBus( + 'RelayBus', + event_concurrency='parallel', + max_history_size=0, + ) + +class BillingService: + def __init__(self) -> None: + self.bus = EventBus( + 'BillingBus', + event_concurrency='bus-serial', + event_handler_concurrency='serial', + max_history_size=100, + ) + self.bus.on(UserCreatedEvent, self.on_user_created) + + async def on_user_created(self, event: UserCreatedEvent) -> str: + return f'billing-ok:{event.user_id}' + +auth = AuthService() +relay = RelayService() +billing = BillingService() + +auth.bus.on('*', relay.bus.emit) +relay.bus.on('*', billing.bus.emit) + +result = await auth.bus.emit(UserCreatedEvent(user_id='u-a8d1')).event_result() +print(result) +# 'auth-ok:u-a8d1' + +root = auth.bus.emit(UserCreatedEvent(user_id='u-a8d1')) +await root +print(root.event_path) +# ['AuthBus#a8d1', 'RelayBus#3f2c', 'BillingBus#b91e'] +``` + + + + +```ts +import { BaseEvent, EventBus } from 'bubus' +import { z } from 'zod' + +const UserCreatedEvent = BaseEvent.extend('UserCreatedEvent', { + user_id: z.string(), + event_result_type: z.string(), +}) + +class AuthService { + bus = new EventBus('AuthBus', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'serial', + max_history_size: 100, + }) + + constructor() { + this.bus.on(UserCreatedEvent, this.onUserCreated) + } + + onUserCreated = async (event: InstanceType) => `auth-ok:${event.user_id}` +} + +class RelayService { + bus = new EventBus('RelayBus', { + event_concurrency: 'parallel', + max_history_size: 0, + }) +} + +class BillingService { + bus = new EventBus('BillingBus', { + event_concurrency: 'bus-serial', + event_handler_concurrency: 'serial', + max_history_size: 100, + }) + + constructor() { + this.bus.on(UserCreatedEvent, this.onUserCreated) + } + + onUserCreated = async (event: InstanceType) => `billing-ok:${event.user_id}` +} + +const auth = new AuthService() +const relay = new RelayService() +const billing = new BillingService() + +auth.bus.on('*', relay.bus.emit) +relay.bus.on('*', billing.bus.emit) + +const event = auth.bus.emit(UserCreatedEvent({ user_id: 'u-a8d1' })) +await event.done() +console.log(event.event_result) +// 'auth-ok:u-a8d1' +console.log(event.event_path) +// ['AuthBus#a8d1', 'RelayBus#3f2c', 'BillingBus#b91e'] +``` + + + + +## Uni-directional and bi-directional forwarding + +Forwarding can be one-way or two-way depending on your topology. + +- Uni-directional: one producer bus forwards to one consumer bus. +- Bi-directional: both buses forward to each other (common for peer sync). + + + + +```python +left = EventBus('LeftBus') +right = EventBus('RightBus') + +# uni-directional +left.on('*', right.emit) + +# bi-directional (add reverse path) +right.on('*', left.emit) +``` + + + + +```ts +const left = new EventBus('LeftBus') +const right = new EventBus('RightBus') + +// uni-directional +left.on('*', right.emit) + +// bi-directional (add reverse path) +right.on('*', left.emit) +``` + + + + +Loop prevention still applies in both modes: if an event already visited a bus (tracked in `event_path`), forwarding back to that bus is a no-op and it is not re-processed there. + +## How loop prevention works (`event_path`) + +Loop prevention is automatic and based on `event_path`: + +1. Each bus appends its own label (for example `AuthBus#a8d1`) to `event_path` when it first sees an event. +2. When a forwarding handler points to another bus, that bus checks whether its label is already in `event_path`. +3. If yes, forwarding to that bus is skipped (no-op), so cycles terminate naturally. + +This means you can wire cyclic topologies without infinite forwarding loops. + + + + +```python +from bubus import BaseEvent, EventBus + +class PingEvent(BaseEvent): + message: str + +bus_a = EventBus('BusA') +bus_b = EventBus('BusB') +bus_c = EventBus('BusC') + +# cycle: A -> B -> C -> A +bus_a.on('*', bus_b.emit) +bus_b.on('*', bus_c.emit) +bus_c.on('*', bus_a.emit) + +event = bus_a.emit(PingEvent(message='hello')) +await event + +print(event.event_path) +# ['BusA#a8d1', 'BusB#3f2c', 'BusC#b91e'] +``` + + + + +```ts +import { BaseEvent, EventBus } from 'bubus' +import { z } from 'zod' + +const PingEvent = BaseEvent.extend('PingEvent', { + message: z.string(), +}) + +const busA = new EventBus('BusA') +const busB = new EventBus('BusB') +const busC = new EventBus('BusC') + +// cycle: A -> B -> C -> A +busA.on('*', busB.emit) +busB.on('*', busC.emit) +busC.on('*', busA.emit) + +const event = busA.emit(PingEvent({ message: 'hello' })) +await event.done() + +console.log(event.event_path) +// ['BusA#a8d1', 'BusB#3f2c', 'BusC#b91e'] +``` + + + + +## Parent-child tracking across forwarded flows + +Parent-child tracking also works across forwarded flows: + +- if a forwarded event is handled on a downstream bus and that handler emits a child event, the child still links back to the parent via `event_parent_id` +- nested descendants emitted on downstream buses keep that lineage as they continue through forwarding +- this remains true for both queue-jumped children (`await child`) and normally queued children (emitted but not immediately awaited) + +See [Parent-Child Tracking](./parent-child-tracking) for a deeper walkthrough and tree-log example. +See [Immediate Execution (RPC-style)](../concurrency/immediate-execution) for queue-jump execution behavior. + +## Bridges are forwarding with transport + +Bridges are fundamentally the same forwarding pattern, but with serialization + remote transport in the middle. + +See [Bridges Overview](../integrations/bridges) for HTTP/Redis/NATS/Postgres/socket/file-backed bridge options and setup patterns. diff --git a/docs/features/parent-child-tracking.mdx b/docs/features/parent-child-tracking.mdx new file mode 100644 index 0000000..67a339d --- /dev/null +++ b/docs/features/parent-child-tracking.mdx @@ -0,0 +1,222 @@ +--- +title: Parent-Child Tracking +description: Trace nested event flows with automatic parent-child lineage and tree logs. +--- + +When a handler emits another event, Bubus automatically records lineage so you can understand call chains instead of guessing what triggered what. + +Repository example files: +- [`examples/parent_child_tracking.py`](https://github.com/pirate/bbus/blob/main/examples/parent_child_tracking.py) +- [`bubus-ts/examples/parent_child_tracking.ts`](https://github.com/pirate/bbus/blob/main/bubus-ts/examples/parent_child_tracking.ts) +- [`examples/log_tree_demo.py`](https://github.com/pirate/bbus/blob/main/examples/log_tree_demo.py) +- [`bubus-ts/examples/log_tree_demo.ts`](https://github.com/pirate/bbus/blob/main/bubus-ts/examples/log_tree_demo.ts) + +## What gets tracked + +- `event_parent_id`: points from child -> parent event +- `event_children`: aggregated list of children emitted by handler execution +- `event_emitted_by_handler_id`: which specific handler emitted the child + +This tracking works across nested chains (parent -> child -> grandchild) and is surfaced in event helpers and tree logs. + +## When links are created + +Parent-child links are recorded when you emit from inside a running handler context: + +- Python: `event.event_bus.emit(...)` +- TypeScript: `event.bus?.emit(...)` + +Using the event-scoped bus keeps ancestry metadata intact automatically. + +## Works across forwarded buses too + +Parent-child lineage is preserved even when the parent event has been forwarded between buses. + +If a forwarded event is handled on another bus and that handler emits a child: + +- the child still gets `event_parent_id = ` +- the child is linked under the emitting handler's `event_children` +- forwarding that child onward keeps the same lineage metadata + +Use the event-scoped bus in handlers (`event.event_bus` / `event.bus`) so the runtime can attach ancestry correctly. + +See also: [Forwarding Between Buses](./forwarding-between-buses) + +## Queue-jumped vs normally queued children + +Lineage tracking works in both execution styles: + +- Queue-jumped child events: + - emitted inside a handler and immediately awaited (`await child` / `await child.done()`) + - child may execute right away (RPC-style), but still gets normal parent linkage metadata +- Normally queued child events: + - emitted inside a handler but not immediately awaited + - child runs later via normal queue scheduling, and still keeps the same `event_parent_id` ancestry link + +In short: queue-jump changes *when* the child executes, not *whether* parent-child tracking is recorded. + +See [Immediate Execution (RPC-style)](../concurrency/immediate-execution) for queue-jump behavior details. + +## Full example: checkout -> reserve/charge/receipt (+ fraud grandchild) + + + + +```python +from bubus import BaseEvent, EventBus + +class CheckoutEvent(BaseEvent[str]): + order_id: str + +class ReserveInventoryEvent(BaseEvent[str]): + order_id: str + +class ChargeCardEvent(BaseEvent[str]): + order_id: str + +class FraudCheckEvent(BaseEvent[str]): + order_id: str + +class SendReceiptEvent(BaseEvent[str]): + order_id: str + +bus = EventBus('TreeBus') + +async def on_checkout(event: CheckoutEvent) -> str: + reserve = event.event_bus.emit(ReserveInventoryEvent(order_id=event.order_id)) + await reserve + reserve_id = await reserve.event_result() + + charge = event.event_bus.emit(ChargeCardEvent(order_id=event.order_id)) + await charge + charge_id = await charge.event_result() + + receipt = event.event_bus.emit(SendReceiptEvent(order_id=event.order_id)) + await receipt + receipt_id = await receipt.event_result() + + return f'{reserve_id}|{charge_id}|{receipt_id}' + +async def on_reserve(event: ReserveInventoryEvent) -> str: + return f'reserve:{event.order_id}' + +async def on_charge(event: ChargeCardEvent) -> str: + fraud = event.event_bus.emit(FraudCheckEvent(order_id=event.order_id)) + await fraud + fraud_status = await fraud.event_result() + return f'charge:{event.order_id}:{fraud_status}' + +async def on_fraud(event: FraudCheckEvent) -> str: + return f'fraud-ok:{event.order_id}' + +async def on_receipt(event: SendReceiptEvent) -> str: + return f'receipt:{event.order_id}' + +bus.on(CheckoutEvent, on_checkout) +bus.on(ReserveInventoryEvent, on_reserve) +bus.on(ChargeCardEvent, on_charge) +bus.on(FraudCheckEvent, on_fraud) +bus.on(SendReceiptEvent, on_receipt) + +root = bus.emit(CheckoutEvent(order_id='ord-123')) +result = await root.event_result() +await bus.wait_until_idle() + +print(result) +print(bus.log_tree()) +``` + + + + +```ts +import { BaseEvent, EventBus } from 'bubus' +import { z } from 'zod' + +const CheckoutEvent = BaseEvent.extend('CheckoutEvent', { + order_id: z.string(), + event_result_type: z.string(), +}) +const ReserveInventoryEvent = BaseEvent.extend('ReserveInventoryEvent', { + order_id: z.string(), + event_result_type: z.string(), +}) +const ChargeCardEvent = BaseEvent.extend('ChargeCardEvent', { + order_id: z.string(), + event_result_type: z.string(), +}) +const FraudCheckEvent = BaseEvent.extend('FraudCheckEvent', { + order_id: z.string(), + event_result_type: z.string(), +}) +const SendReceiptEvent = BaseEvent.extend('SendReceiptEvent', { + order_id: z.string(), + event_result_type: z.string(), +}) + +const bus = new EventBus('TreeBus') + +bus.on(CheckoutEvent, async (event) => { + const reserve = event.bus!.emit(ReserveInventoryEvent({ order_id: event.order_id })) + await reserve.done() + + const charge = event.bus!.emit(ChargeCardEvent({ order_id: event.order_id })) + await charge.done() + + const receipt = event.bus!.emit(SendReceiptEvent({ order_id: event.order_id })) + await receipt.done() + + return `${reserve.event_result}|${charge.event_result}|${receipt.event_result}` +}) + +bus.on(ReserveInventoryEvent, async (event) => `reserve:${event.order_id}`) +bus.on(ChargeCardEvent, async (event) => { + const fraud = event.bus!.emit(FraudCheckEvent({ order_id: event.order_id })) + await fraud.done() + return `charge:${event.order_id}:${fraud.event_result}` +}) + +bus.on(FraudCheckEvent, async (event) => `fraud-ok:${event.order_id}`) +bus.on(SendReceiptEvent, async (event) => `receipt:${event.order_id}`) + +const root = bus.emit(CheckoutEvent({ order_id: 'ord-123' })) +await root.done() +await bus.waitUntilIdle() + +console.log(root.event_result) +console.log(bus.logTree()) +``` + + + + +## Example tree output + +Captured from running the Python example above with `uv run` (IDs/timestamps vary run-to-run): + +```text +└── CheckoutEvent#b7c7 [10:10:54.522 (0.003s)] + └── βœ… TreeBus#ef2a.__main__.on_checkout#7a12 [10:10:54.522 (0.002s)] β†’ 'reserve:ord-123|charge:ord-123:fraud-ok:ord-123|receipt:ord-123' + β”œβ”€β”€ ReserveInventoryEvent#ca2f [10:10:54.522 (0.000s)] + β”‚ └── βœ… TreeBus#ef2a.__main__.on_reserve#1583 [10:10:54.522 (0.000s)] β†’ 'reserve:ord-123' + β”œβ”€β”€ ChargeCardEvent#b746 [10:10:54.523 (0.001s)] + β”‚ └── βœ… TreeBus#ef2a.__main__.on_charge#7d9c [10:10:54.523 (0.001s)] β†’ 'charge:ord-123:fraud-ok:ord-123' + β”‚ └── FraudCheckEvent#31e0 [10:10:54.523 (0.000s)] + β”‚ └── βœ… TreeBus#ef2a.__main__.on_fraud#4c4e [10:10:54.523 (0.000s)] β†’ 'fraud-ok:ord-123' + └── SendReceiptEvent#c399 [10:10:54.524 (0.000s)] + └── βœ… TreeBus#ef2a.__main__.on_receipt#de9f [10:10:54.524 (0.000s)] β†’ 'receipt:ord-123' +``` + +## Why this helps in practice + +- Debugging: quickly see causality chains instead of inspecting raw logs line-by-line. +- Reliability: timeout/cancellation behavior can be reasoned about by ancestry. +- Querying: combine lineage with `find(..., child_of=...)` to isolate event families. + +## Related pages + +- [Immediate Execution (RPC-style)](../concurrency/immediate-execution) +- [Forwarding Between Buses](./forwarding-between-buses) +- [OtelTracingMiddleware](../integrations/middleware-otel-tracing) +- [Find Events](./find-events) +- [BaseEvent](../api/baseevent) diff --git a/docs/features/return-value-handling.mdx b/docs/features/return-value-handling.mdx new file mode 100644 index 0000000..2ed9cf3 --- /dev/null +++ b/docs/features/return-value-handling.mdx @@ -0,0 +1,192 @@ +--- +title: Return Value Handling +description: Define typed handler returns and collect results from one emitted event. +--- + +Handler return values are captured in `EventResult` records and can be consumed as a single value or aggregated across handlers. + +Repository example files: +- [`examples/simple.py`](https://github.com/pirate/bbus/blob/main/examples/simple.py) +- [`bubus-ts/examples/simple.ts`](https://github.com/pirate/bbus/blob/main/bubus-ts/examples/simple.ts) + +## Typed return values + +Use the event result type to enforce return typing across handlers. + + + + +```python +from bubus import BaseEvent, EventBus + +class DoMathEvent(BaseEvent[int]): + a: int + b: int + +def add(event: DoMathEvent) -> int: + return event.a + event.b + +bus = EventBus('AppBus') +bus.on(DoMathEvent, add) + +event = await bus.emit(DoMathEvent(a=2, b=3)) +result = await event.event_result() +``` + + + + +```ts +import { BaseEvent, EventBus } from 'bubus' +import { z } from 'zod' + +const DoMathEvent = BaseEvent.extend('DoMathEvent', { + a: z.number(), + b: z.number(), + event_result_type: z.number(), +}) + +const bus = new EventBus('AppBus') +bus.on(DoMathEvent, (event) => event.a + event.b) + +const event = bus.emit(DoMathEvent({ a: 2, b: 3 })) +await event.done() +const result = event.event_result +``` + + + + +## Aggregating multiple handler results + +When multiple handlers respond to the same event, collect all results. + + + + +```python +from bubus import BaseEvent, EventBus + +class GetConfigEvent(BaseEvent[dict]): + pass + +async def user_config(_: GetConfigEvent) -> dict: + return {'debug': True, 'port': 8080} + +async def system_config(_: GetConfigEvent) -> dict: + return {'debug': False, 'timeout': 30} + +bus = EventBus('AppBus') +bus.on(GetConfigEvent, user_config) +bus.on(GetConfigEvent, system_config) + +event = await bus.emit(GetConfigEvent()) +values = await event.event_results_list(raise_if_any=False, raise_if_none=False) +# [ +# {'debug': True, 'port': 8080}, +# {'debug': False, 'timeout': 30} +# ] + +merged_config = {key: val for config in values for key, val in config.items()} +# {'debug': False, 'port': 8080, 'timeout': 30} +``` + + + + +```ts +import { BaseEvent, EventBus } from 'bubus' +import { z } from 'zod' + +const GetConfigEvent = BaseEvent.extend('GetConfigEvent', { + event_result_type: z.record(z.string(), z.unknown()), +}) + +const bus = new EventBus('AppBus') +bus.on(GetConfigEvent, async () => ({ debug: true, port: 8080 })) +bus.on(GetConfigEvent, async () => ({ debug: false, timeout: 30 })) + +const event = bus.emit(GetConfigEvent({})) +const values = await event.eventResultsList({ raise_if_any: false, raise_if_none: false }) +// [ +// {debug: true, port: 8080}, +// {debug: false, timeout: 30} +// ] +const merged_config = values.reduce((acc, value) => { + if (value && typeof value === 'object' && !Array.isArray(value)) { + Object.assign(acc, value) + } + return acc +}, {}) +// {debug: false, port: 8080, timeout: 30} +``` + + + + +## `event_results_list` / `eventResultsList` options + + + + +```python +event_results_list( + timeout: float | None = None, + include: EventResultFilter = ..., + raise_if_any: bool = True, + raise_if_none: bool = True, +) -> list[Any] + +# examples +await event.event_results_list(raise_if_any=False, raise_if_none=False) +await event.event_results_list(include=lambda event_result: isinstance(event_result.result, dict), raise_if_any=False) +await event.event_results_list(timeout=0.25) +``` + + + + +```ts +eventResultsList( + include?: (result: EventResultType | undefined, event_result: EventResult) => boolean, + options?: { + timeout?: number | null + include?: (result: EventResultType | undefined, event_result: EventResult) => boolean + raise_if_any?: boolean + raise_if_none?: boolean + } +): Promise | undefined>> + +// examples +await event.eventResultsList({ raise_if_any: false, raise_if_none: false }) +await event.eventResultsList((result) => typeof result === 'object', { raise_if_any: false }) +await event.eventResultsList({ timeout: 0.25 }) +``` + + + + +- `raise_if_any`: raise if any handler ended with an error. +- `raise_if_none`: raise only when no handlers returned a valid value after filtering; it does not raise just because one handler returned `None`/`null`/`undefined`. +- Default filtering in both runtimes includes only completed, successful, non-empty scalar/object/list values (and excludes forwarded `BaseEvent` returns). + +## Per-handler inspection + +Both implementations keep per-handler result metadata in addition to `event_results_list` / `eventResultsList`. + + + + +```python +by_name = {result.handler_name: result.result for result in event.event_results.values()} +``` + + + + +```ts +const byHandler = event.event_results +``` + + + diff --git a/docs/features/typed-events.mdx b/docs/features/typed-events.mdx new file mode 100644 index 0000000..8885c24 --- /dev/null +++ b/docs/features/typed-events.mdx @@ -0,0 +1,44 @@ +--- +title: Type-Safe Events +description: Define validated event payloads and event result types. +--- + +Events are strongly typed and validated in both runtimes. + +Repository example files: +- [`examples/simple.py`](https://github.com/pirate/bbus/blob/main/examples/simple.py) +- [`bubus-ts/examples/simple.ts`](https://github.com/pirate/bbus/blob/main/bubus-ts/examples/simple.ts) + + + + +```python +from typing import Any +from bubus import BaseEvent + +class OrderCreatedEvent(BaseEvent[dict[str, Any]]): + order_id: str + customer_id: str + total_amount: float +``` + + + + +```ts +import { BaseEvent } from 'bubus' +import { z } from 'zod' + +const OrderCreatedEvent = BaseEvent.extend('OrderCreatedEvent', { + order_id: z.string(), + customer_id: z.string(), + total_amount: z.number(), + event_result_type: z.object({ ok: z.boolean() }), +}) +``` + + + + +- Python payload validation is powered by Pydantic models. +- TypeScript payload and result validation is powered by Zod schemas. diff --git a/docs/further-reading/events-suck.mdx b/docs/further-reading/events-suck.mdx new file mode 100644 index 0000000..ad5f5aa --- /dev/null +++ b/docs/further-reading/events-suck.mdx @@ -0,0 +1,307 @@ +--- +title: "Don't Like Events?" +description: Practical patterns for teams who like events in theory but dislike event-driven DX in practice. +--- + +If you like events in theory but hate the day-to-day developer experience, this page is for you. + +Common pain points: + +- calling boilerplate (`emit` + await completion + unwrap result) for every request +- eventual consistency anxiety ("will my response event arrive?") +- duplicating signatures across schemas, handlers, and implementation functions + +The goal here is to keep event architecture benefits without forcing painful calling patterns. + +## 1) Pain: painful calling interface boilerplate + +You usually end up writing verbose call sites repeatedly. + +`events_suck.wrap(...)` gives you a method-shaped client API (`client.create(...)`) while still routing through events. + + + + +```python +# Without wrap: valid, but noisy at every call site +event = bus.emit(CreateUserEvent(name='bob', age=45)) +user_id = await event.event_result() +all_values = await event.event_results_list(raise_if_any=False, raise_if_none=False) + +# With wrap: looks like normal async function calls +SDKClient = events_suck.wrap('SDKClient', {'create': CreateUserEvent, 'update': UpdateUserEvent}) +client = SDKClient(bus=bus) +user_id = await client.create(name='bob', age=45, nickname='bobby') +updated = await client.update(id=user_id, age=46, source='sync') +``` + + + + +```ts +// Without wrap: valid, but noisy at every call site +const event = bus.emit(CreateUserEvent({ name: 'bob', age: 45 })) +await event.done() +const user_id = event.event_result +const all_values = await event.eventResultsList({ raise_if_any: false, raise_if_none: false }) + +// With wrap: looks like normal async function calls +const SDKClient = events_suck.wrap('SDKClient', { create: CreateUserEvent, update: UpdateUserEvent }) +const client = new SDKClient(bus) +const id = await client.create({ name: 'bob', age: 45 }, { nickname: 'bobby' }) +const updated = await client.update({ id: id ?? 'fallback-id', age: 46 }, { source: 'sync' }) +``` + + + + +### Minimal end-to-end `wrap(...)` wiring + + + + +```python +from bubus import BaseEvent, EventBus, events_suck + +class CreateUserEvent(BaseEvent[str]): + name: str + age: int + +class UpdateUserEvent(BaseEvent[bool]): + id: str + age: int | None = None + +class UserService: + def __init__(self) -> None: + self.users: dict[str, dict[str, int | str]] = {} + + async def on_create(self, event: CreateUserEvent) -> str: + user_id = f'user-{event.age}' + self.users[user_id] = {'id': user_id, 'name': event.name, 'age': event.age} + return user_id + + async def on_update(self, event: UpdateUserEvent) -> bool: + if event.id not in self.users: + return False + if event.age is not None: + self.users[event.id]['age'] = event.age + return True + +bus = EventBus('SDKBus') +service = UserService() + +bus.on(CreateUserEvent, service.on_create) +bus.on(UpdateUserEvent, service.on_update) + +SDKClient = events_suck.wrap('SDKClient', { + 'create': CreateUserEvent, + 'update': UpdateUserEvent, +}) +client = SDKClient(bus=bus) + +user_id = await client.create(name='bob', age=45, nickname='bobby') +updated = await client.update(id=user_id, age=46, source='sync') +``` + + + + +```ts +import { BaseEvent, EventBus, events_suck } from 'bubus' +import { z } from 'zod' + +const CreateUserEvent = BaseEvent.extend('CreateUserEvent', { + name: z.string(), + age: z.number(), + event_result_type: z.string(), +}) +const UpdateUserEvent = BaseEvent.extend('UpdateUserEvent', { + id: z.string(), + age: z.number().nullable().optional(), + event_result_type: z.boolean(), +}) + +type UserRecord = { id: string; name: string; age: number } +const users = new Map() + +const onCreate = async (event: InstanceType) => { + const user_id = `user-${event.age}` + users.set(user_id, { id: user_id, name: event.name, age: event.age }) + return user_id +} + +const onUpdate = async (event: InstanceType) => { + const existing = users.get(event.id) + if (!existing) return false + if (event.age !== undefined && event.age !== null) existing.age = event.age + users.set(event.id, existing) + return true +} + +const bus = new EventBus('SDKBus') +bus.on(CreateUserEvent, onCreate) +bus.on(UpdateUserEvent, onUpdate) + +const SDKClient = events_suck.wrap('SDKClient', { + create: CreateUserEvent, + update: UpdateUserEvent, +}) +const client = new SDKClient(bus) + +const user_id = await client.create({ name: 'bob', age: 45 }, { nickname: 'bobby' }) +const updated = await client.update({ id: user_id ?? 'fallback-id', age: 46 }, { source: 'sync' }) +``` + + + + +Related docs: + +- [Return Value Handling](../features/return-value-handling) +- [BaseEvent API](../api/baseevent) + +## 2) Pain: eventual consistency headaches + +If your mental model is "I called something, I need a result now," pure fire-and-forget event flows can feel stressful. + +Two patterns reduce that stress: + +- request/response on one bus with direct return values (`event_result` / `first()`) +- immediate execution for nested calls inside handlers (RPC-style queue-jump) + +These patterns feel function-like for in-process flows. If you later move a step across process/network boundaries (bridges), treat that edge as eventually consistent again. + +Immediate execution docs: [Immediate Execution (RPC-style)](../concurrency/immediate-execution) + +### Nested request/response with immediate execution + + + + +```python +class CheckoutEvent(BaseEvent[str]): + order_id: str + +class ChargeCardEvent(BaseEvent[str]): + order_id: str + +async def on_checkout(event: CheckoutEvent) -> str: + child = event.event_bus.emit(ChargeCardEvent(order_id=event.order_id)) + await child # immediate path while inside handler + receipt_id = await child.event_result() + return receipt_id + +async def on_charge(event: ChargeCardEvent) -> str: + return f'receipt-{event.order_id}' +``` + + + + +```ts +const CheckoutEvent = BaseEvent.extend('CheckoutEvent', { + order_id: z.string(), + event_result_type: z.string(), +}) +const ChargeCardEvent = BaseEvent.extend('ChargeCardEvent', { + order_id: z.string(), + event_result_type: z.string(), +}) + +bus.on(CheckoutEvent, async (event) => { + const child = event.bus!.emit(ChargeCardEvent({ order_id: event.order_id })) + await child.done() // immediate path while inside handler + return child.event_result ?? 'missing-receipt' +}) + +bus.on(ChargeCardEvent, async (event) => `receipt-${event.order_id}`) +``` + + + + +Related docs: + +- [Immediate Execution (RPC-style)](../concurrency/immediate-execution) +- [Timeout Enforcement](../concurrency/timeouts) +- [retry](../api/retry) + +## 3) Pain: defining signatures multiple times + +You can keep one source of truth for payload shapes and reuse it in implementation code. + +### Python: `@validate_call` + `make_events(...)` + `make_handler(...)` + +Use implementation function signatures as the source of truth, then generate event classes from them. + +```python +from bubus import EventBus, events_suck +from pydantic import validate_call + +@validate_call +def create_user(id: str | None, name: str, age: int) -> str: + return f'{name}-{age}' + +@validate_call +def update_user(id: str, age: int | None = None, **extra) -> bool: + return True + +events = events_suck.make_events({ + 'UserCreateEvent': create_user, + 'UserUpdateEvent': update_user, +}) + +bus = EventBus('LegacyBus') +bus.on(events.UserCreateEvent, events_suck.make_handler(create_user)) +bus.on(events.UserUpdateEvent, events_suck.make_handler(update_user)) + +UserClient = events_suck.wrap('UserClient', {'create': events.UserCreateEvent, 'update': events.UserUpdateEvent}) +client = UserClient(bus=bus) +``` + +### TypeScript: `zod` schema + `z.infer` shared with implementation + +Keep the schema as the source of truth, infer implementation input types from it, and reuse the same shape in `BaseEvent.extend(...)`. + +```ts +import { BaseEvent, EventBus, events_suck } from 'bubus' +import { z } from 'zod' + +const CreateUserInputSchema = z.object({ + id: z.string().nullable().optional(), + name: z.string(), + age: z.number(), +}) +type CreateUserInput = z.infer + +const UserCreateEvent = BaseEvent.extend('UserCreateEvent', { + ...CreateUserInputSchema.shape, + event_result_type: z.string(), +}) + +const bus = new EventBus('LegacyBus') +const create_user = async (input: CreateUserInput): Promise => `${input.name}-${input.age}` + +bus.on(UserCreateEvent, ({ id, name, age }) => create_user({ id, name, age })) + +const UserClient = events_suck.wrap('UserClient', { + create: UserCreateEvent, +}) + +const client = new UserClient(bus) +const id = await client.create({ id: null, name: 'bob', age: 45 }) +``` + +Related docs: + +- [Typed Events](../features/typed-events) +- [BaseEvent API](../api/baseevent) + +## Migration playbook + +1. Start with `wrap(...)` to clean up call-site boilerplate first. +2. Use immediate execution patterns where you need function-call-like request/response behavior. +3. Consolidate types with `@validate_call` (Python) or `z.infer` (TypeScript) to avoid signature drift. +4. Add timeouts/retry policies where needed, instead of forcing eventual-consistency semantics everywhere. + +You do not need to choose between clean DX and events. You can keep method-shaped APIs and adopt event internals incrementally. diff --git a/docs/further-reading/similar-projects.mdx b/docs/further-reading/similar-projects.mdx new file mode 100644 index 0000000..9fc5cb7 --- /dev/null +++ b/docs/further-reading/similar-projects.mdx @@ -0,0 +1,41 @@ +--- +title: Similar Projects +description: Similar projects and licensing details. +--- + +- https://www.cosmicpython.com/book/chapter_08_events_and_message_bus.html#message_bus_diagram ⭐️ +- https://developer.mozilla.org/en-US/docs/Web/API/EventTarget ⭐️ +- https://github.com/sindresorhus/emittery ⭐️ (equivalent for JS), https://github.com/EventEmitter2/EventEmitter2, https://github.com/vitaly-t/sub-events +- https://github.com/pytest-dev/pluggy ⭐️ +- https://github.com/teamhide/fastapi-event ⭐️ +- https://github.com/ethereum/lahja ⭐️ +- https://github.com/enricostara/eventure ⭐️ +- https://github.com/akhundMurad/diator ⭐️ +- https://github.com/n89nanda/pyeventbus +- https://github.com/iunary/aioemit +- https://github.com/dboslee/evently +- https://github.com/faust-streaming/faust +- https://github.com/ArcletProject/Letoderea +- https://github.com/seanpar203/event-bus +- https://github.com/n89nanda/pyeventbus +- https://github.com/nicolaszein/py-async-bus +- https://github.com/AngusWG/simple-event-bus +- https://www.joeltok.com/posts/2021-03-building-an-event-bus-in-python/ + +## Distributed Event Queues + +- [NATS](https://nats.io/) +- [Kafka](https://kafka.apache.org/) +- [RQ](https://python-rq.org/) +- [Celery](https://docs.celeryq.dev/) +- [Dramatiq](https://dramatiq.io/) +- [Huey](https://huey.readthedocs.io/) +- [RabbitMQ](https://www.rabbitmq.com/) + +--- + + +> [🧠 DeepWiki Docs](https://deepwiki.com/pirate/bbus) +> imageimage + +This project is licensed under the MIT License. diff --git a/docs/index.mdx b/docs/index.mdx new file mode 100644 index 0000000..9d51072 --- /dev/null +++ b/docs/index.mdx @@ -0,0 +1,87 @@ +--- +title: Overview +description: Unified docs for bubus Python and TypeScript implementations. +--- + +## `bubus`: Production-ready multi-language event bus + +bubus logo + +[![DeepWiki: Python](https://img.shields.io/badge/DeepWiki-bbus%2FPython-yellow.svg?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACwAAAAyCAYAAAAnWDnqAAAAAXNSR0IArs4c6QAAA05JREFUaEPtmUtyEzEQhtWTQyQLHNak2AB7ZnyXZMEjXMGeK/AIi+QuHrMnbChYY7MIh8g01fJoopFb0uhhEqqcbWTp06/uv1saEDv4O3n3dV60RfP947Mm9/SQc0ICFQgzfc4CYZoTPAswgSJCCUJUnAAoRHOAUOcATwbmVLWdGoH//PB8mnKqScAhsD0kYP3j/Yt5LPQe2KvcXmGvRHcDnpxfL2zOYJ1mFwrryWTz0advv1Ut4CJgf5uhDuDj5eUcAUoahrdY/56ebRWeraTjMt/00Sh3UDtjgHtQNHwcRGOC98BJEAEymycmYcWwOprTgcB6VZ5JK5TAJ+fXGLBm3FDAmn6oPPjR4rKCAoJCal2eAiQp2x0vxTPB3ALO2CRkwmDy5WohzBDwSEFKRwPbknEggCPB/imwrycgxX2NzoMCHhPkDwqYMr9tRcP5qNrMZHkVnOjRMWwLCcr8ohBVb1OMjxLwGCvjTikrsBOiA6fNyCrm8V1rP93iVPpwaE+gO0SsWmPiXB+jikdf6SizrT5qKasx5j8ABbHpFTx+vFXp9EnYQmLx02h1QTTrl6eDqxLnGjporxl3NL3agEvXdT0WmEost648sQOYAeJS9Q7bfUVoMGnjo4AZdUMQku50McDcMWcBPvr0SzbTAFDfvJqwLzgxwATnCgnp4wDl6Aa+Ax283gghmj+vj7feE2KBBRMW3FzOpLOADl0Isb5587h/U4gGvkt5v60Z1VLG8BhYjbzRwyQZemwAd6cCR5/XFWLYZRIMpX39AR0tjaGGiGzLVyhse5C9RKC6ai42ppWPKiBagOvaYk8lO7DajerabOZP46Lby5wKjw1HCRx7p9sVMOWGzb/vA1hwiWc6jm3MvQDTogQkiqIhJV0nBQBTU+3okKCFDy9WwferkHjtxib7t3xIUQtHxnIwtx4mpg26/HfwVNVDb4oI9RHmx5WGelRVlrtiw43zboCLaxv46AZeB3IlTkwouebTr1y2NjSpHz68WNFjHvupy3q8TFn3Hos2IAk4Ju5dCo8B3wP7VPr/FGaKiG+T+v+TQqIrOqMTL1VdWV1DdmcbO8KXBz6esmYWYKPwDL5b5FA1a0hwapHiom0r/cKaoqr+27/XcrS5UwSMbQAAAABJRU5ErkJggg==)](https://deepwiki.com/pirate/bbus) [![PyPI - Version](https://img.shields.io/pypi/v/bubus)](https://pypi.org/project/bubus/) [![NPM Version](https://img.shields.io/npm/v/bubus)](https://www.npmjs.com/package/bubus) [![GitHub License](https://img.shields.io/github/license/pirate/bbus)](https://github.com/pirate/bbus) + +Bubus is an in-memory event bus for async Python and TypeScript (Node and browser environments), built for predictable event-driven workflows with strong typing and consistent cross-language behavior. + +Core strengths: + +- Typed event payloads and typed handler return values +- Deterministic queue semantics with configurable concurrency +- Nested event lineage tracking (`event_parent_id` / `event_path`) +- Event forwarding, bridge transports, and middleware integration + +## Minimal usage + + + + +```python +from bubus import BaseEvent, EventBus + +class SomeEvent(BaseEvent): + some_data: int + +async def on_some_event(event: SomeEvent) -> None: + print(event.some_data) + # 132 + +bus = EventBus('MyBus') +bus.on(SomeEvent, on_some_event) + +await bus.emit(SomeEvent(some_data=132)) +``` + + + + +```ts +import { BaseEvent, EventBus } from 'bubus' +import { z } from 'zod' + +const SomeEvent = BaseEvent.extend('SomeEvent', { + some_data: z.number(), +}) + +const bus = new EventBus('MyBus') +bus.on(SomeEvent, async (event) => { + console.log(event.some_data) +}) + +await bus.emit(SomeEvent({ some_data: 132 })).done() +``` + + + + +See [Quickstart](./quickstart) for installation and first full example. + +## Repository examples + +Runnable end-to-end examples (Python + TypeScript) live in the repo: + +- Quickstart basics: + - [`examples/simple.py`](https://github.com/pirate/bbus/blob/main/examples/simple.py) + - [`bubus-ts/examples/simple.ts`](https://github.com/pirate/bbus/blob/main/bubus-ts/examples/simple.ts) +- Concurrency modes, overrides, and timeout behavior: + - [`examples/concurrency_options.py`](https://github.com/pirate/bbus/blob/main/examples/concurrency_options.py) + - [`bubus-ts/examples/concurrency_options.ts`](https://github.com/pirate/bbus/blob/main/bubus-ts/examples/concurrency_options.ts) +- Immediate execution (queue-jump) behavior: + - [`examples/immediate_event_processing.py`](https://github.com/pirate/bbus/blob/main/examples/immediate_event_processing.py) + - [`bubus-ts/examples/immediate_event_processing.ts`](https://github.com/pirate/bbus/blob/main/bubus-ts/examples/immediate_event_processing.ts) +- Forwarding between buses: + - [`examples/forwarding_between_busses.py`](https://github.com/pirate/bbus/blob/main/examples/forwarding_between_busses.py) + - [`bubus-ts/examples/forwarding_between_busses.ts`](https://github.com/pirate/bbus/blob/main/bubus-ts/examples/forwarding_between_busses.ts) +- Parent-child lineage tracking: + - [`examples/parent_child_tracking.py`](https://github.com/pirate/bbus/blob/main/examples/parent_child_tracking.py) + - [`bubus-ts/examples/parent_child_tracking.ts`](https://github.com/pirate/bbus/blob/main/bubus-ts/examples/parent_child_tracking.ts) +- Tree logs with nested timeout outcomes: + - [`examples/log_tree_demo.py`](https://github.com/pirate/bbus/blob/main/examples/log_tree_demo.py) + - [`bubus-ts/examples/log_tree_demo.ts`](https://github.com/pirate/bbus/blob/main/bubus-ts/examples/log_tree_demo.ts) diff --git a/docs/integrations/bridge-http.mdx b/docs/integrations/bridge-http.mdx new file mode 100644 index 0000000..167e881 --- /dev/null +++ b/docs/integrations/bridge-http.mdx @@ -0,0 +1,86 @@ +--- +title: HTTPEventBridge +description: Forward events over HTTP(S) endpoints. +--- + +`HTTPEventBridge` forwards event JSON over HTTP and can optionally expose an inbound HTTP listener. + +## Constructor params + +- `send_to`: optional outbound endpoint (`http://` or `https://`) +- `listen_on`: optional inbound endpoint (`http://` only) +- `name`: optional bridge label + + + + +```python +from bubus import HTTPEventBridge + +bridge = HTTPEventBridge( + send_to='https://peer.example.com/bubus_events', + listen_on='http://0.0.0.0:8002/bubus_events', + name='HttpBridge', +) +``` + + + + +```ts +import { HTTPEventBridge } from 'bubus' + +const bridge = new HTTPEventBridge({ + send_to: 'https://peer.example.com/bubus_events', + listen_on: 'http://0.0.0.0:8002/bubus_events', + name: 'HttpBridge', +}) +``` + + + + +## Setup with a bus + + + + +```python +from bubus import EventBus, HTTPEventBridge + +bus = EventBus('AppBus') +bridge = HTTPEventBridge( + send_to='https://peer.example.com/bubus_events', + listen_on='http://0.0.0.0:8002/bubus_events', +) + +bus.on('*', bridge.emit) +bridge.on('*', bus.emit) +``` + + + + +```ts +import { EventBus, HTTPEventBridge } from 'bubus' + +const bus = new EventBus('AppBus') +const bridge = new HTTPEventBridge({ + send_to: 'https://peer.example.com/bubus_events', + listen_on: 'http://0.0.0.0:8002/bubus_events', +}) + +bus.on('*', bridge.emit) +bridge.on('*', bus.emit) +``` + + + + +## Behavior + +- `emit(...)` serializes an event and sends a `POST` request to `send_to`. +- `on(...)` registers handlers on the bridge's internal inbound bus and auto-starts the listener when needed. +- Inbound payloads are parsed back into `BaseEvent`, reset to pending state, then emitted on the internal bus. +- `close()` shuts down listener/server resources and the internal bus. +- In TypeScript, listener mode (`listen_on`) is supported in Node.js runtimes. diff --git a/docs/integrations/bridge-jsonl.mdx b/docs/integrations/bridge-jsonl.mdx new file mode 100644 index 0000000..426ebdd --- /dev/null +++ b/docs/integrations/bridge-jsonl.mdx @@ -0,0 +1,76 @@ +--- +title: JSONLEventBridge +description: Forward events through newline-delimited JSON files. +--- + +`JSONLEventBridge` appends one event JSON payload per line and tails the file for inbound events. + +## Constructor params + +- `path`: JSONL file path +- `poll_interval`: tail polling interval in seconds (default `0.25`) +- `name`: optional bridge label + + + + +```python +from bubus import JSONLEventBridge + +bridge = JSONLEventBridge( + '/tmp/bubus_events.jsonl', + poll_interval=0.25, + name='JsonlBridge', +) +``` + + + + +```ts +import { JSONLEventBridge } from 'bubus' + +const bridge = new JSONLEventBridge('/tmp/bubus_events.jsonl', 0.25, 'JsonlBridge') +``` + + + + +## Setup with a bus + + + + +```python +from bubus import EventBus, JSONLEventBridge + +bus = EventBus('AppBus') +bridge = JSONLEventBridge('/tmp/bubus_events.jsonl') + +bus.on('*', bridge.emit) +bridge.on('*', bus.emit) +``` + + + + +```ts +import { EventBus, JSONLEventBridge } from 'bubus' + +const bus = new EventBus('AppBus') +const bridge = new JSONLEventBridge('/tmp/bubus_events.jsonl') + +bus.on('*', bridge.emit) +bridge.on('*', bus.emit) +``` + + + + +## Behavior + +- `emit(...)` appends compact JSON payload + newline to the file. +- `on(...)` auto-starts a tail loop and registers inbound handlers. +- Start cursor is initialized at current EOF, so only newly appended lines are processed. +- Malformed lines are ignored; valid lines are parsed into events, reset, and emitted on the internal bus. +- Runtime note: TypeScript JSONL bridge is Node.js-only. diff --git a/docs/integrations/bridge-nats.mdx b/docs/integrations/bridge-nats.mdx new file mode 100644 index 0000000..a463452 --- /dev/null +++ b/docs/integrations/bridge-nats.mdx @@ -0,0 +1,111 @@ +--- +title: NATSEventBridge +description: Forward events over NATS subjects. +--- + +`NATSEventBridge` publishes events to a NATS subject and subscribes to the same subject for inbound forwarding. + +## Optional dependencies + + + + +Install the NATS extra (recommended): + +```bash +pip install "bubus[nats]" +``` + +Equivalent direct dependency install: + +```bash +pip install nats-py +``` + + + + +Install the NATS client package: + +```bash +npm install bubus nats +``` + +This bridge is Node.js-only. + + + + +## Constructor params + +- `server`: NATS server URL (for example `nats://localhost:4222`) +- `subject`: subject name used for publish/subscribe +- `name`: optional bridge label + + + + +```python +from bubus import NATSEventBridge + +bridge = NATSEventBridge( + 'nats://localhost:4222', + 'bubus_events', + name='NatsBridge', +) +``` + + + + +```ts +import { NATSEventBridge } from 'bubus' + +const bridge = new NATSEventBridge( + 'nats://localhost:4222', + 'bubus_events', + 'NatsBridge' +) +``` + + + + +## Setup with a bus + + + + +```python +from bubus import EventBus, NATSEventBridge + +bus = EventBus('AppBus') +bridge = NATSEventBridge('nats://localhost:4222', 'bubus_events') + +bus.on('*', bridge.emit) +bridge.on('*', bus.emit) +``` + + + + +```ts +import { EventBus, NATSEventBridge } from 'bubus' + +const bus = new EventBus('AppBus') +const bridge = new NATSEventBridge('nats://localhost:4222', 'bubus_events') + +bus.on('*', bridge.emit) +bridge.on('*', bus.emit) +``` + + + + +## Behavior + +- `emit(...)` publishes serialized event JSON bytes to the configured subject. +- `on(...)` registers inbound handlers and auto-starts subscription. +- Inbound messages are decoded, reset, and re-emitted on the internal bus. +- `close()` drains/closes NATS connections and stops the internal bus. +- Runtime requirements: Python needs `nats-py`, TypeScript needs `nats` and Node.js. diff --git a/docs/integrations/bridge-postgres.mdx b/docs/integrations/bridge-postgres.mdx new file mode 100644 index 0000000..7b93e4e --- /dev/null +++ b/docs/integrations/bridge-postgres.mdx @@ -0,0 +1,114 @@ +--- +title: PostgresEventBridge +description: Forward events using PostgreSQL LISTEN/NOTIFY plus table storage. +--- + +`PostgresEventBridge` stores event payloads in a Postgres table and uses `LISTEN/NOTIFY` for low-latency fanout. + +## Optional dependencies + + + + +Install the Postgres extra (recommended): + +```bash +pip install "bubus[postgres]" +``` + +Equivalent direct dependency install: + +```bash +pip install asyncpg +``` + + + + +Install the Postgres client package: + +```bash +npm install bubus pg +``` + +This bridge is Node.js-only. + + + + +## Constructor params + +- `table_url`: `postgresql://user:pass@host:5432/dbname[/tablename]?...` +- `channel`: optional notify/listen channel (defaults to `bubus_events`) +- `name`: optional bridge label + + + + +```python +from bubus import PostgresEventBridge + +bridge = PostgresEventBridge( + 'postgresql://user:pass@localhost:5432/mydb/bubus_events', + channel='bubus_events', + name='PgBridge', +) +``` + + + + +```ts +import { PostgresEventBridge } from 'bubus' + +const bridge = new PostgresEventBridge( + 'postgresql://user:pass@localhost:5432/mydb/bubus_events', + 'bubus_events', + 'PgBridge' +) +``` + + + + +## Setup with a bus + + + + +```python +from bubus import EventBus, PostgresEventBridge + +bus = EventBus('AppBus') +bridge = PostgresEventBridge('postgresql://user:pass@localhost:5432/mydb/bubus_events') + +bus.on('*', bridge.emit) +bridge.on('*', bus.emit) +``` + + + + +```ts +import { EventBus, PostgresEventBridge } from 'bubus' + +const bus = new EventBus('AppBus') +const bridge = new PostgresEventBridge('postgresql://user:pass@localhost:5432/mydb/bubus_events') + +bus.on('*', bridge.emit) +bridge.on('*', bus.emit) +``` + + + + +## Behavior + +- `emit(...)` upserts event payload data into the bridge table, then sends `NOTIFY` with the event id. +- `on(...)` registers inbound handlers and auto-starts listener startup. +- On notifications, the bridge fetches the full row payload, reconstructs an event, resets it, and emits locally. +- Columns are created on demand only for `event_*` fields. +- The full event JSON is stored in `event_payload` (not just non-`event_*` fields). +- `event_*` columns are also stored as flat mirrored fields for indexing/querying. +- Rehydration merges `event_payload` with `event_*` column values back into a flat event object. +- Runtime requirements: Python needs `asyncpg`, TypeScript needs `pg` and Node.js. diff --git a/docs/integrations/bridge-redis.mdx b/docs/integrations/bridge-redis.mdx new file mode 100644 index 0000000..4db600e --- /dev/null +++ b/docs/integrations/bridge-redis.mdx @@ -0,0 +1,110 @@ +--- +title: RedisEventBridge +description: Forward events via Redis pub/sub channels. +--- + +`RedisEventBridge` publishes event payloads to a Redis channel and subscribes for inbound events on the same channel. + +## Optional dependencies + + + + +Install the Redis extra (recommended): + +```bash +pip install "bubus[redis]" +``` + +Equivalent direct dependency install: + +```bash +pip install redis +``` + + + + +Install the Redis client package: + +```bash +npm install bubus ioredis +``` + +This bridge is Node.js-only. + + + + +## Constructor params + +- `redis_url`: redis URL in the form `redis://user:pass@host:6379//` +- `channel`: optional channel override (defaults to URL channel segment or `bubus_events`) +- `name`: optional bridge label + + + + +```python +from bubus import RedisEventBridge + +bridge = RedisEventBridge( + 'redis://user:pass@localhost:6379/1/bubus_events', + name='RedisBridge', +) +``` + + + + +```ts +import { RedisEventBridge } from 'bubus' + +const bridge = new RedisEventBridge( + 'redis://user:pass@localhost:6379/1/bubus_events', + undefined, + 'RedisBridge' +) +``` + + + + +## Setup with a bus + + + + +```python +from bubus import EventBus, RedisEventBridge + +bus = EventBus('AppBus') +bridge = RedisEventBridge('redis://localhost:6379/0/bubus_events') + +bus.on('*', bridge.emit) +bridge.on('*', bus.emit) +``` + + + + +```ts +import { EventBus, RedisEventBridge } from 'bubus' + +const bus = new EventBus('AppBus') +const bridge = new RedisEventBridge('redis://localhost:6379/0/bubus_events') + +bus.on('*', bridge.emit) +bridge.on('*', bus.emit) +``` + + + + +## Behavior + +- `emit(...)` publishes serialized event JSON to the configured Redis channel. +- `on(...)` subscribes handlers for inbound messages and auto-starts the Redis subscriber. +- Incoming messages are parsed into events, reset, then emitted on the bridge's internal bus. +- `close()` unsubscribes and closes Redis clients. +- Runtime requirements: Python needs `redis` (`redis.asyncio`), TypeScript needs `ioredis` and Node.js. diff --git a/docs/integrations/bridge-socket.mdx b/docs/integrations/bridge-socket.mdx new file mode 100644 index 0000000..15116c4 --- /dev/null +++ b/docs/integrations/bridge-socket.mdx @@ -0,0 +1,71 @@ +--- +title: SocketEventBridge +description: Forward events through unix domain sockets. +--- + +`SocketEventBridge` uses a unix socket path for both send and listen directions. + +## Constructor params + +- `path`: unix socket path (absolute path recommended) +- `name`: optional bridge label + + + + +```python +from bubus import SocketEventBridge + +bridge = SocketEventBridge('/tmp/bubus_events.sock', name='SocketBridge') +``` + + + + +```ts +import { SocketEventBridge } from 'bubus' + +const bridge = new SocketEventBridge('/tmp/bubus_events.sock', 'SocketBridge') +``` + + + + +## Setup with a bus + + + + +```python +from bubus import EventBus, SocketEventBridge + +bus = EventBus('AppBus') +bridge = SocketEventBridge('/tmp/bubus_events.sock') + +bus.on('*', bridge.emit) +bridge.on('*', bus.emit) +``` + + + + +```ts +import { EventBus, SocketEventBridge } from 'bubus' + +const bus = new EventBus('AppBus') +const bridge = new SocketEventBridge('/tmp/bubus_events.sock') + +bus.on('*', bridge.emit) +bridge.on('*', bus.emit) +``` + + + + +## Behavior + +- `emit(...)` writes newline-delimited event JSON frames to the unix socket. +- `on(...)` subscribes handlers on the inbound side and auto-starts the socket listener. +- Incoming frames are decoded into events, reset, then emitted on the internal bus. +- `close()` stops the socket server and tears down the internal bus. +- TypeScript socket bridges require Node.js runtime support for unix sockets. diff --git a/docs/integrations/bridge-sqlite.mdx b/docs/integrations/bridge-sqlite.mdx new file mode 100644 index 0000000..b1c93db --- /dev/null +++ b/docs/integrations/bridge-sqlite.mdx @@ -0,0 +1,82 @@ +--- +title: SQLiteEventBridge +description: Forward events through a local SQLite table with polling. +--- + +`SQLiteEventBridge` writes events into a SQLite table and polls for newly inserted rows. + +## Constructor params + +- `path`: SQLite database file path +- `table`: table name (default `bubus_events`) +- `poll_interval`: polling interval in seconds (default `0.25`) +- `name`: optional bridge label + + + + +```python +from bubus import SQLiteEventBridge + +bridge = SQLiteEventBridge( + '/tmp/bubus_events.sqlite3', + table='bubus_events', + poll_interval=0.25, + name='SqliteBridge', +) +``` + + + + +```ts +import { SQLiteEventBridge } from 'bubus' + +const bridge = new SQLiteEventBridge('/tmp/bubus_events.sqlite3', 'bubus_events', 0.25, 'SqliteBridge') +``` + + + + +## Setup with a bus + + + + +```python +from bubus import EventBus, SQLiteEventBridge + +bus = EventBus('AppBus') +bridge = SQLiteEventBridge('/tmp/bubus_events.sqlite3') + +bus.on('*', bridge.emit) +bridge.on('*', bus.emit) +``` + + + + +```ts +import { EventBus, SQLiteEventBridge } from 'bubus' + +const bus = new EventBus('AppBus') +const bridge = new SQLiteEventBridge('/tmp/bubus_events.sqlite3') + +bus.on('*', bridge.emit) +bridge.on('*', bus.emit) +``` + + + + +## Behavior + +- `emit(...)` upserts event payload fields into the configured table. +- `on(...)` auto-starts polling and registers handlers on the internal bus. +- Columns are created on demand only for `event_*` fields. +- The full event JSON is stored in `event_payload` (not just non-`event_*` fields). +- `event_*` columns are also stored as flat mirrored fields for indexing/querying. +- Writes use SQLite JSON functions for `event_payload` (for example `json(?)`). +- Rehydration merges `event_payload` with `event_*` column values back into a flat event object. +- Rows are read in `(event_created_at, event_id)` order, converted back to events, reset, and emitted locally. +- Runtime notes: Python uses stdlib `sqlite3`; TypeScript requires Node.js with built-in `node:sqlite` (Node 22+). diff --git a/docs/integrations/bridges.mdx b/docs/integrations/bridges.mdx new file mode 100644 index 0000000..c867045 --- /dev/null +++ b/docs/integrations/bridges.mdx @@ -0,0 +1,53 @@ +--- +title: Overview +description: Transport bridges for forwarding events across files, sockets, and external services. +--- + +Bridges are optional connectors for forwarding serialized events between buses in different processes or machines. + +All bridges expose the same minimal surface: + +- `emit(...)` for outbound forwarding +- `on(...)` for inbound subscription +- `start()` and `close()` for lifecycle control + +## Quick setup + + + + +```python +from bubus import EventBus, RedisEventBridge + +bus = EventBus('AppBus') +bridge = RedisEventBridge('redis://localhost:6379/0/bubus_events') + +bus.on('*', bridge.emit) +bridge.on('*', bus.emit) +``` + + + + +```ts +import { EventBus, RedisEventBridge } from 'bubus' + +const bus = new EventBus('AppBus') +const bridge = new RedisEventBridge('redis://localhost:6379/0/bubus_events') + +bus.on('*', bridge.emit) +bridge.on('*', bus.emit) +``` + + + + +## Bridge pages + +- [HTTPEventBridge](./bridge-http) +- [SocketEventBridge](./bridge-socket) +- [RedisEventBridge](./bridge-redis) +- [NATSEventBridge](./bridge-nats) +- [PostgresEventBridge](./bridge-postgres) +- [JSONLEventBridge](./bridge-jsonl) +- [SQLiteEventBridge](./bridge-sqlite) diff --git a/docs/integrations/middleware-auto-error.mdx b/docs/integrations/middleware-auto-error.mdx new file mode 100644 index 0000000..4346839 --- /dev/null +++ b/docs/integrations/middleware-auto-error.mdx @@ -0,0 +1,31 @@ +--- +title: AutoErrorEventMiddleware +description: Emit auto error events when handlers fail. +--- + +`AutoErrorEventMiddleware` emits `{OriginalEventType}ErrorEvent` when a handler completes with an error. + +## Constructor params + +None. + +## Setup with EventBus + +```python +from bubus import EventBus +from bubus.middlewares import AutoErrorEventMiddleware + +bus = EventBus( + name='AppBus', + middlewares=[AutoErrorEventMiddleware()], +) +``` + +## Behavior + +- Runs on completed handler results. +- If a handler errored, emits a auto event with: + - `event_type`: `{OriginalEventType}ErrorEvent` + - `error`: original exception + - `error_type`: exception class name +- Skips source events ending in `ErrorEvent` or `ResultEvent` to prevent auto recursion. diff --git a/docs/integrations/middleware-auto-handler-change.mdx b/docs/integrations/middleware-auto-handler-change.mdx new file mode 100644 index 0000000..bd03c36 --- /dev/null +++ b/docs/integrations/middleware-auto-handler-change.mdx @@ -0,0 +1,29 @@ +--- +title: AutoHandlerChangeEventMiddleware +description: Emit auto events when handlers are registered/unregistered. +--- + +`AutoHandlerChangeEventMiddleware` emits metadata events whenever `EventBus.on(...)` or `EventBus.off(...)` changes handler registration. + +## Constructor params + +None. + +## Setup with EventBus + +```python +from bubus import EventBus +from bubus.middlewares import AutoHandlerChangeEventMiddleware + +bus = EventBus( + name='AppBus', + middlewares=[AutoHandlerChangeEventMiddleware()], +) +``` + +## Behavior + +- On registration, emits `BusHandlerRegisteredEvent(handler=...)`. +- On unregistration, emits `BusHandlerUnregisteredEvent(handler=...)`. +- Emits a deep-copied handler metadata object. +- Useful for auditing dynamic handler topology. diff --git a/docs/integrations/middleware-auto-return.mdx b/docs/integrations/middleware-auto-return.mdx new file mode 100644 index 0000000..0f0a223 --- /dev/null +++ b/docs/integrations/middleware-auto-return.mdx @@ -0,0 +1,32 @@ +--- +title: AutoReturnEventMiddleware +description: Emit auto result events for non-None handler returns. +--- + +`AutoReturnEventMiddleware` emits `{OriginalEventType}ResultEvent` for successful non-`None` handler return values. + +## Constructor params + +None. + +## Setup with EventBus + +```python +from bubus import EventBus +from bubus.middlewares import AutoReturnEventMiddleware + +bus = EventBus( + name='AppBus', + middlewares=[AutoReturnEventMiddleware()], +) +``` + +## Behavior + +- Runs on completed handler results. +- Emits auto result events only when: + - handler returned a non-`None` value + - handler did not error + - return value is not itself a `BaseEvent` +- Auto event payload uses `data=`. +- Skips source events ending in `ErrorEvent` or `ResultEvent`. diff --git a/docs/integrations/middleware-logger.mdx b/docs/integrations/middleware-logger.mdx new file mode 100644 index 0000000..eef71db --- /dev/null +++ b/docs/integrations/middleware-logger.mdx @@ -0,0 +1,29 @@ +--- +title: LoggerEventBusMiddleware +description: Log completed events to stdout and optional file. +--- + +`LoggerEventBusMiddleware` prints completed event summaries and can also write them to disk. + +## Constructor params + +- `log_path`: optional filesystem path for log output + +## Setup with EventBus + +```python +from bubus import EventBus +from bubus.middlewares import LoggerEventBusMiddleware + +bus = EventBus( + name='AppBus', + middlewares=[LoggerEventBusMiddleware('./events.log')], +) +``` + +## Behavior + +- Logs event summaries when events complete. +- Always prints to stdout. +- If `log_path` is provided, appends the same summary lines to the file. +- Creates parent directories for the log file automatically. diff --git a/docs/integrations/middleware-otel-tracing.mdx b/docs/integrations/middleware-otel-tracing.mdx new file mode 100644 index 0000000..cd88d69 --- /dev/null +++ b/docs/integrations/middleware-otel-tracing.mdx @@ -0,0 +1,63 @@ +--- +title: OtelTracingMiddleware +description: Emit OpenTelemetry spans for events and handlers. +--- + +`OtelTracingMiddleware` creates event and handler spans with parent-child linking. + +## Constructor params + +- `tracer`: optional explicit OpenTelemetry tracer instance +- `trace_api`: optional explicit `opentelemetry.trace` module + +## Setup with EventBus + +```python +from bubus import EventBus +from bubus.middlewares import OtelTracingMiddleware + +bus = EventBus( + name='AppBus', + middlewares=[OtelTracingMiddleware()], +) +``` + +## Setup with Sentry + +```python +import sentry_sdk +from opentelemetry import trace +from opentelemetry.sdk.trace import TracerProvider +from sentry_sdk.integrations.opentelemetry import SentrySpanProcessor + +from bubus import EventBus +from bubus.middlewares import OtelTracingMiddleware + +sentry_sdk.init( + dsn='https://@/', + traces_sample_rate=1.0, +) + +provider = TracerProvider() +provider.add_span_processor(SentrySpanProcessor()) +trace.set_tracer_provider(provider) + +bus = EventBus( + name='AppBus', + middlewares=[OtelTracingMiddleware()], +) +``` + +Install requirements: + +```bash +pip install sentry-sdk opentelemetry-api opentelemetry-sdk +``` + +## Behavior + +- Starts an event span when an event starts and ends it on completion. +- Starts one child span per handler execution. +- Records handler exceptions on handler spans. +- Links child events to parent handler spans where available. +- With Sentry OpenTelemetry integration enabled, these spans are exported to Sentry performance traces. diff --git a/docs/integrations/middleware-sqlite-history-mirror.mdx b/docs/integrations/middleware-sqlite-history-mirror.mdx new file mode 100644 index 0000000..8f06b7b --- /dev/null +++ b/docs/integrations/middleware-sqlite-history-mirror.mdx @@ -0,0 +1,29 @@ +--- +title: SQLiteHistoryMirrorMiddleware +description: Mirror event and handler snapshots into SQLite tables. +--- + +`SQLiteHistoryMirrorMiddleware` records event and handler-result snapshots into SQLite for queryable audit history. + +## Constructor params + +- `db_path`: SQLite file path + +## Setup with EventBus + +```python +from bubus import EventBus +from bubus.middlewares import SQLiteHistoryMirrorMiddleware + +bus = EventBus( + name='AppBus', + middlewares=[SQLiteHistoryMirrorMiddleware('./events.sqlite3')], +) +``` + +## Behavior + +- Records event lifecycle snapshots into `events_log`. +- Records handler result snapshots into `event_results_log`. +- Stores serialized payload JSON plus key metadata (event id/type, handler id/name, phase/status). +- Uses WAL mode and thread-safe connection access for concurrent writes. diff --git a/docs/integrations/middleware-wal.mdx b/docs/integrations/middleware-wal.mdx new file mode 100644 index 0000000..e3d8e27 --- /dev/null +++ b/docs/integrations/middleware-wal.mdx @@ -0,0 +1,29 @@ +--- +title: WALEventBusMiddleware +description: Persist completed events to a JSONL write-ahead log. +--- + +`WALEventBusMiddleware` appends completed event snapshots to a JSONL file. + +## Constructor params + +- `wal_path`: filesystem path for append-only JSONL output + +## Setup with EventBus + +```python +from bubus import EventBus +from bubus.middlewares import WALEventBusMiddleware + +bus = EventBus( + name='AppBus', + middlewares=[WALEventBusMiddleware('./events.jsonl')], +) +``` + +## Behavior + +- Writes one JSON line per completed event. +- Uses internal locking for thread-safe file appends. +- Creates parent directories automatically. +- Intended for replay/debug/audit workflows. diff --git a/docs/integrations/middlewares.mdx b/docs/integrations/middlewares.mdx new file mode 100644 index 0000000..8355638 --- /dev/null +++ b/docs/integrations/middlewares.mdx @@ -0,0 +1,87 @@ +--- +title: Overview +description: Middleware integrations for EventBus lifecycle hooks in Python and TypeScript. +--- + +Middlewares can observe event lifecycle transitions, react to handler registration changes, and add cross-cutting behavior around event execution. + +## Quick setup + + + + +```python +from bubus import EventBus +from bubus.middlewares import LoggerEventBusMiddleware, WALEventBusMiddleware + +bus = EventBus( + name='MyBus', + middlewares=[ + WALEventBusMiddleware('./events.jsonl'), + LoggerEventBusMiddleware('./events.log'), + ], +) +``` + + + + +```ts +import { BaseEvent, EventBus, type EventBusMiddleware, type EventStatus } from 'bubus' + +class LoggingMiddleware implements EventBusMiddleware { + async onEventChange(eventbus: EventBus, event: BaseEvent, status: EventStatus): Promise { + if (status === 'completed') { + console.log(`[${eventbus.label}] ${event.event_type}#${event.event_id.slice(-4)}`) + } + } +} + +const bus = new EventBus('MyBus', { + middlewares: [LoggingMiddleware], +}) +``` + + + + +## Middleware interface + +Use three optional hooks: + +- Event lifecycle: `on_event_change` / `onEventChange` +- Handler-result lifecycle: `on_event_result_change` / `onEventResultChange` +- Handler registration lifecycle: `on_bus_handlers_change` / `onBusHandlersChange` + +See [EventBusMiddleware](../api/eventbusmiddleware) for full signatures and custom middleware examples. + +## Lifecycle + +Middleware hooks receive lifecycle statuses in strict order: + +- Event hooks: `pending` -> `started` -> `completed` +- Event-result hooks: `pending` -> `started` -> `completed` +- Handler registration hooks: called when handlers are added or removed via `on(...)` and `off(...)` + +`status` passed to lifecycle hooks is never `error`. Handler failures are exposed on `event_result.status` and `event_result.error` during the `completed` callback. + +## Built-in classes + + + + +- [OtelTracingMiddleware](./middleware-otel-tracing) +- [AutoErrorEventMiddleware](./middleware-auto-error) +- [AutoReturnEventMiddleware](./middleware-auto-return) +- [AutoHandlerChangeEventMiddleware](./middleware-auto-handler-change) +- [WALEventBusMiddleware](./middleware-wal) +- [LoggerEventBusMiddleware](./middleware-logger) +- [SQLiteHistoryMirrorMiddleware](./middleware-sqlite-history-mirror) + + + + +TypeScript currently exposes the middleware interface only. No built-in middleware classes are exported yet. + + + diff --git a/docs/operations/development.mdx b/docs/operations/development.mdx new file mode 100644 index 0000000..703be55 --- /dev/null +++ b/docs/operations/development.mdx @@ -0,0 +1,71 @@ +--- +title: Development +description: Local development workflows for both Python and TypeScript. +--- + + + + +Set up the python development environment using `uv`: + +```bash +git clone https://github.com/pirate/bbus && cd bbus + +# Create virtual environment with Python 3.12 +uv venv --python 3.12 + +# Activate virtual environment (varies by OS) +source .venv/bin/activate # On Unix/macOS +# or +.venv\Scripts\activate # On Windows + +# Install dependencies +uv sync --dev --all-extras +``` + +Recommended once per clone: + +```bash +prek install # install pre-commit hooks +prek run --all-files # run pre-commit hooks on all files manually +``` + +```bash +# Run linter & type checker +uv run ruff check --fix +uv run ruff format +uv run pyright + +# Run all tests +uv run pytest -vxs --full-trace tests/ + +# Run specific test file +uv run pytest tests/test_eventbus.py + +# Run Python perf test suite +uv run pytest tests/test_eventbus_performance.py -vxs + +# Run the entire lint+test+examples+perf suite for both python and ts +./test.sh +``` + +> For Bubus-TS development see the `bubus-ts/README.md` `# Development` section. + + + + +```bash +git clone https://github.com/pirate/bbus bubus && cd bubus + +cd ./bubus-ts +pnpm install + +prek install # install pre-commit hooks +prek run --all-files # run pre-commit hooks on all files manually + +pnpm lint +pnpm test +``` + + + diff --git a/docs/operations/performance.mdx b/docs/operations/performance.mdx new file mode 100644 index 0000000..3fea001 --- /dev/null +++ b/docs/operations/performance.mdx @@ -0,0 +1,43 @@ +--- +title: Performance +description: Performance notes and benchmark snapshots. +--- + + + + +```bash +uv run pytest tests/test_eventbus_performance.py -vxs # run the performance test suite in python +``` + +| Runtime | 1 bus x 50k events x 1 handler | 500 buses x 100 events x 1 handler | 1 bus x 1 event x 50k parallel handlers | 1 bus x 50k events x 50k one-off handlers | Worst case (N buses x N events x N handlers) | +| ------------------ | ------------------ | ------------------ | ------------------ | ------------------ | ------------------ | +| Python | `0.179ms/event`, `0.235kb/event` | `0.191ms/event`, `0.191kb/event` | `0.035ms/handler`, `8.164kb/handler` | `0.255ms/event`, `0.185kb/event` | `0.351ms/event`, `5.867kb/event` | + + + + +### Performance comparison (local run, per-event) + +Measured locally on an `Apple M4 Pro` with: + +- `pnpm run perf:node` (`node v22.21.1`) +- `pnpm run perf:bun` (`bun v1.3.9`) +- `pnpm run perf:deno` (`deno v2.6.8`) +- `pnpm run perf:browser` (`chrome v145.0.7632.6`) + +| Runtime | 1 bus x 50k events x 1 handler | 500 buses x 100 events x 1 handler | 1 bus x 1 event x 50k parallel handlers | 1 bus x 50k events x 50k one-off handlers | Worst case (N buses x N events x N handlers) | +| ------------------ | ------------------------------ | ----------------------------------- | --------------------------------------- | ----------------------------------------- | --------------------------------------------- | +| Node | `0.015ms/event`, `0.6kb/event` | `0.058ms/event`, `0.1kb/event` | `0.021ms/handler`, `3.8kb/handler` | `0.028ms/event`, `0.6kb/event` | `0.442ms/event`, `0.9kb/event` | +| Bun | `0.011ms/event`, `2.5kb/event` | `0.054ms/event`, `1.0kb/event` | `0.006ms/handler`, `4.5kb/handler` | `0.019ms/event`, `2.8kb/event` | `0.441ms/event`, `3.1kb/event` | +| Deno | `0.018ms/event`, `1.2kb/event` | `0.063ms/event`, `0.4kb/event` | `0.024ms/handler`, `3.1kb/handler` | `0.064ms/event`, `2.6kb/event` | `0.461ms/event`, `7.9kb/event` | +| Browser (Chromium) | `0.030ms/event` | `0.197ms/event` | `0.022ms/handler` | `0.022ms/event` | `1.566ms/event` | + +Notes: + +- `kb/event` is peak RSS delta per event during active processing (most representative of OS-visible RAM in Activity Monitor / Task Manager, with `EventBus.max_history_size=1`) +- In `1 bus x 1 event x 50k parallel handlers` stats are shown per-handler for clarity, `0.02ms/handler * 50k handlers ~= 1000ms` for the entire event +- Browser runtime does not expose memory usage directly, in practice memory performance in-browser is comparable to Node (they both use V8) + + + diff --git a/docs/operations/supported-runtimes.mdx b/docs/operations/supported-runtimes.mdx new file mode 100644 index 0000000..7c4f8a4 --- /dev/null +++ b/docs/operations/supported-runtimes.mdx @@ -0,0 +1,30 @@ +--- +title: Supported Runtimes +description: Runtime support details for Python and TypeScript. +--- + + + + +`bubus` supports Python `3.11+`. + +- CPython 3.11 and newer +- OS-independent package support + + + + +`bubus-ts` supports all major JS runtimes. + +- Node.js (default development and test runtime) +- Browsers (ESM) +- Bun +- Deno + +### Browser support notes + +- The package output is ESM (`./dist/esm`) which is supported by all browsers [released after 2018](https://caniuse.com/?search=ESM) +- `AsyncLocalStorage` is preserved at emit time and used during handling when available (Node/Bun), otel/tracing context will work normally in those environments + + + diff --git a/docs/quickstart.mdx b/docs/quickstart.mdx new file mode 100644 index 0000000..2983871 --- /dev/null +++ b/docs/quickstart.mdx @@ -0,0 +1,89 @@ +--- +title: Quickstart +description: Get started quickly with bubus in Python or TypeScript. +--- + +Install bubus, define one typed event, register a handler, and emit the event. + +Repository example files: +- [`examples/simple.py`](https://github.com/pirate/bbus/blob/main/examples/simple.py) +- [`bubus-ts/examples/simple.ts`](https://github.com/pirate/bbus/blob/main/bubus-ts/examples/simple.ts) + +## Install + + + + +```bash +pip install bubus +``` + + + + +```bash +npm install bubus +``` + + + + +## First event + + + + +```python +import asyncio +from bubus import BaseEvent, EventBus + +class CreateUserEvent(BaseEvent[dict]): + email: str + +async def on_create_user(event: CreateUserEvent) -> dict: + user = await your_create_user_logic(event.email) + return {'user_id': user['id']} + +async def main() -> None: + bus = EventBus('MyAuthEventBus') + bus.on(CreateUserEvent, on_create_user) + + result = await bus.emit(CreateUserEvent(email='someuser@example.com')).event_result() + print(result) + # {'user_id': 'some-user-uuid'} + +asyncio.run(main()) +``` + + + + +```ts +import { BaseEvent, EventBus } from 'bubus' +import { z } from 'zod' + +const CreateUserEvent = BaseEvent.extend('CreateUserEvent', { + email: z.string(), + event_result_type: z.object({ user_id: z.string() }), +}) + +const bus = new EventBus('MyAuthEventBus') + +bus.on(CreateUserEvent, async (event) => { + const user = await yourCreateUserLogic(event.email) + return { user_id: user.id } +}) + +const event = bus.emit(CreateUserEvent({ email: 'someuser@example.com' })) +await event.done() +console.log(event.event_result) // { user_id: 'some-user-uuid' } +``` + + + + +## Next steps + +- Browse the [Features](./features/event-pattern-matching) section for behavior patterns. +- Use [API Reference](./api/eventbus) for signatures and options. +- See [Integrations](./integrations/bridges) for bridges and middleware. diff --git a/examples/concurrency_options.py b/examples/concurrency_options.py new file mode 100755 index 0000000..6366229 --- /dev/null +++ b/examples/concurrency_options.py @@ -0,0 +1,306 @@ +#!/usr/bin/env -S uv run python +"""Run: uv run python examples/concurrency_options.py""" + +import asyncio +import time +from typing import Literal + +from bubus import BaseEvent, EventBus, EventConcurrencyMode, EventHandlerConcurrencyMode + + +class WorkEvent(BaseEvent[None]): + lane: str + order: int + ms: int + + +class HandlerEvent(BaseEvent[None]): + label: str + + +class OverrideEvent(BaseEvent[None]): + label: str + order: int + ms: int + + +class TimeoutEvent(BaseEvent[str]): + ms: int + + +async def sleep_ms(ms: int) -> None: + await asyncio.sleep(ms / 1000.0) + + +def make_logger(section: str): + started_at = time.perf_counter() + + def log(message: str) -> None: + elapsed_ms = (time.perf_counter() - started_at) * 1000 + print(f'[{section}] +{elapsed_ms:.1f}ms {message}') + + return log + + +async def event_concurrency_demo() -> None: + global_log = make_logger('event:global-serial') + global_a = EventBus('GlobalSerialA', event_concurrency='global-serial', event_handler_concurrency='serial') + global_b = EventBus('GlobalSerialB', event_concurrency='global-serial', event_handler_concurrency='serial') + + try: + global_in_flight = 0 + global_max = 0 + + async def global_handler(event: WorkEvent) -> None: + nonlocal global_in_flight, global_max + global_in_flight += 1 + global_max = max(global_max, global_in_flight) + global_log(f'{event.lane}{event.order} start (global in-flight={global_in_flight})') + await sleep_ms(event.ms) + global_log(f'{event.lane}{event.order} end') + global_in_flight -= 1 + + global_a.on(WorkEvent, global_handler) + global_b.on(WorkEvent, global_handler) + + global_a.emit(WorkEvent(lane='A', order=0, ms=45)) + global_b.emit(WorkEvent(lane='B', order=0, ms=45)) + global_a.emit(WorkEvent(lane='A', order=1, ms=45)) + global_b.emit(WorkEvent(lane='B', order=1, ms=45)) + await asyncio.gather(global_a.wait_until_idle(), global_b.wait_until_idle()) + + global_log(f'max in-flight across both buses: {global_max} (expect 1 in global-serial)') + print('\n=== global_a.log_tree() ===') + print(global_a.log_tree()) + print('\n=== global_b.log_tree() ===') + print(global_b.log_tree()) + finally: + await global_a.stop(clear=True, timeout=0) + await global_b.stop(clear=True, timeout=0) + + bus_log = make_logger('event:bus-serial') + bus_a = EventBus('BusSerialA', event_concurrency='bus-serial', event_handler_concurrency='serial') + bus_b = EventBus('BusSerialB', event_concurrency='bus-serial', event_handler_concurrency='serial') + + try: + per_bus_in_flight: dict[str, int] = {'A': 0, 'B': 0} + per_bus_max: dict[str, int] = {'A': 0, 'B': 0} + mixed_global_in_flight = 0 + mixed_global_max = 0 + + async def bus_handler(event: WorkEvent) -> None: + nonlocal mixed_global_in_flight, mixed_global_max + lane = event.lane + mixed_global_in_flight += 1 + mixed_global_max = max(mixed_global_max, mixed_global_in_flight) + per_bus_in_flight[lane] += 1 + per_bus_max[lane] = max(per_bus_max[lane], per_bus_in_flight[lane]) + bus_log(f'{lane}{event.order} start (global={mixed_global_in_flight}, lane={per_bus_in_flight[lane]})') + await sleep_ms(event.ms) + bus_log(f'{lane}{event.order} end') + per_bus_in_flight[lane] -= 1 + mixed_global_in_flight -= 1 + + bus_a.on(WorkEvent, bus_handler) + bus_b.on(WorkEvent, bus_handler) + + bus_a.emit(WorkEvent(lane='A', order=0, ms=45)) + bus_b.emit(WorkEvent(lane='B', order=0, ms=45)) + bus_a.emit(WorkEvent(lane='A', order=1, ms=45)) + bus_b.emit(WorkEvent(lane='B', order=1, ms=45)) + await asyncio.gather(bus_a.wait_until_idle(), bus_b.wait_until_idle()) + + bus_log( + f'max in-flight global={mixed_global_max}, per-bus A={per_bus_max["A"]}, ' + f'B={per_bus_max["B"]} (expect global >= 2, per-bus = 1)' + ) + print('\n=== bus_a.log_tree() ===') + print(bus_a.log_tree()) + print('\n=== bus_b.log_tree() ===') + print(bus_b.log_tree()) + finally: + await bus_a.stop(clear=True, timeout=0) + await bus_b.stop(clear=True, timeout=0) + + +async def handler_concurrency_demo() -> None: + async def run_case(mode: Literal['serial', 'parallel']) -> None: + log = make_logger(f'handler:{mode}') + bus = EventBus(f'HandlerMode_{mode}', event_concurrency='parallel', event_handler_concurrency=mode) + + try: + in_flight = 0 + max_in_flight = 0 + + def make_handler(name: str, ms: int): + async def handler(event: HandlerEvent) -> None: + nonlocal in_flight, max_in_flight + in_flight += 1 + max_in_flight = max(max_in_flight, in_flight) + log(f'{event.label}:{name} start (handlers in-flight={in_flight})') + await sleep_ms(ms) + log(f'{event.label}:{name} end') + in_flight -= 1 + + return handler + + bus.on(HandlerEvent, make_handler('slow', 60)) + bus.on(HandlerEvent, make_handler('fast', 20)) + + event = bus.emit(HandlerEvent(label=mode)) + await event + await bus.wait_until_idle() + log(f'max handler overlap: {max_in_flight} (expect 1 for serial, >= 2 for parallel)') + print(f'\n=== {bus.name}.log_tree() ===') + print(bus.log_tree()) + finally: + await bus.stop(clear=True, timeout=0) + + await run_case('serial') + await run_case('parallel') + + +async def event_override_demo() -> None: + log = make_logger('override:precedence') + bus = EventBus('OverrideBus', event_concurrency='bus-serial', event_handler_concurrency='serial') + + try: + active_events: set[str] = set() + per_event_handlers: dict[str, int] = {} + active_handlers = 0 + max_handlers = 0 + max_events = 0 + + def reset_metrics() -> None: + nonlocal active_events, per_event_handlers, active_handlers, max_handlers, max_events + active_events = set() + per_event_handlers = {} + active_handlers = 0 + max_handlers = 0 + max_events = 0 + + def track_start(event: OverrideEvent, handler_name: str, label: str) -> None: + nonlocal active_handlers, max_handlers, max_events + active_handlers += 1 + max_handlers = max(max_handlers, active_handlers) + per_event_handlers[event.event_id] = per_event_handlers.get(event.event_id, 0) + 1 + active_events.add(event.event_id) + max_events = max(max_events, len(active_events)) + log(f'{label}:{event.order}:{handler_name} start (events={len(active_events)}, handlers={active_handlers})') + + def track_end(event: OverrideEvent, handler_name: str, label: str) -> None: + nonlocal active_handlers + active_handlers -= 1 + count = per_event_handlers.get(event.event_id, 1) - 1 + if count <= 0: + per_event_handlers.pop(event.event_id, None) + active_events.discard(event.event_id) + else: + per_event_handlers[event.event_id] = count + log(f'{label}:{event.order}:{handler_name} end') + + async def run_pair(label: str, use_override: bool) -> None: + reset_metrics() + + async def handler_a(event: OverrideEvent) -> None: + track_start(event, 'A', label) + await sleep_ms(event.ms) + track_end(event, 'A', label) + + async def handler_b(event: OverrideEvent) -> None: + track_start(event, 'B', label) + await sleep_ms(event.ms) + track_end(event, 'B', label) + + bus.off(OverrideEvent) + bus.on(OverrideEvent, handler_a) + bus.on(OverrideEvent, handler_b) + + if use_override: + bus.emit( + OverrideEvent( + label=label, + order=0, + ms=45, + event_concurrency=EventConcurrencyMode.PARALLEL, + event_handler_concurrency=EventHandlerConcurrencyMode.PARALLEL, + ) + ) + bus.emit( + OverrideEvent( + label=label, + order=1, + ms=45, + event_concurrency=EventConcurrencyMode.PARALLEL, + event_handler_concurrency=EventHandlerConcurrencyMode.PARALLEL, + ) + ) + else: + bus.emit(OverrideEvent(label=label, order=0, ms=45)) + bus.emit(OverrideEvent(label=label, order=1, ms=45)) + await bus.wait_until_idle() + log(f'{label} summary -> max events={max_events}, max handlers={max_handlers}') + + await run_pair('bus-defaults', use_override=False) + await run_pair('event-overrides', use_override=True) + + print('\n=== OverrideBus.log_tree() ===') + print(bus.log_tree()) + finally: + await bus.stop(clear=True, timeout=0) + + +async def handler_timeout_demo() -> None: + log = make_logger('timeout:handler-option') + bus = EventBus( + 'TimeoutBus', + event_concurrency='parallel', + event_handler_concurrency='parallel', + event_timeout=0.2, + ) + + try: + + async def slow_handler(event: TimeoutEvent) -> str: + log('slow handler start') + await sleep_ms(event.ms) + log('slow handler finished body (but may already be timed out)') + return 'slow' + + slow_entry = bus.on(TimeoutEvent, slow_handler) + slow_entry.handler_timeout = 0.03 + + async def fast_handler(_event: TimeoutEvent) -> str: + log('fast handler start') + await sleep_ms(10) + log('fast handler end') + return 'fast' + + fast_entry = bus.on(TimeoutEvent, fast_handler) + fast_entry.handler_timeout = 0.1 + + event = bus.emit(TimeoutEvent(ms=60, event_handler_timeout=0.5)) + await event + + slow_result = event.event_results.get(slow_entry.id) + handler_timed_out = slow_result is not None and isinstance(slow_result.error, TimeoutError) + log( + f'slow handler status={slow_result.status if slow_result else "missing"}, timeout_error={"yes" if handler_timed_out else "no"}' + ) + + await bus.wait_until_idle() + print('\n=== TimeoutBus.log_tree() ===') + print(bus.log_tree()) + finally: + await bus.stop(clear=True, timeout=0) + + +async def main() -> None: + await event_concurrency_demo() + await handler_concurrency_demo() + await event_override_demo() + await handler_timeout_demo() + + +if __name__ == '__main__': + asyncio.run(main()) diff --git a/examples/forwarding_between_busses.py b/examples/forwarding_between_busses.py new file mode 100755 index 0000000..0e68b20 --- /dev/null +++ b/examples/forwarding_between_busses.py @@ -0,0 +1,89 @@ +#!/usr/bin/env -S uv run python +"""Run: uv run python examples/forwarding_between_busses.py""" + +import asyncio + +from bubus import BaseEvent, EventBus + + +class ForwardedEvent(BaseEvent[None]): + message: str + + +async def main() -> None: + bus_a = EventBus('BusA') + bus_b = EventBus('BusB') + bus_c = EventBus('BusC') + + try: + handle_counts = {'BusA': 0, 'BusB': 0, 'BusC': 0} + seen_event_ids = {'BusA': set[str](), 'BusB': set[str](), 'BusC': set[str]()} + + def on_a(event: ForwardedEvent) -> None: + handle_counts['BusA'] += 1 + seen_event_ids['BusA'].add(event.event_id) + print(f'[BusA] handled {event.event_id} (count={handle_counts["BusA"]})') + + def on_b(event: ForwardedEvent) -> None: + handle_counts['BusB'] += 1 + seen_event_ids['BusB'].add(event.event_id) + print(f'[BusB] handled {event.event_id} (count={handle_counts["BusB"]})') + + def on_c(event: ForwardedEvent) -> None: + handle_counts['BusC'] += 1 + seen_event_ids['BusC'].add(event.event_id) + print(f'[BusC] handled {event.event_id} (count={handle_counts["BusC"]})') + + bus_a.on(ForwardedEvent, on_a) + bus_b.on(ForwardedEvent, on_b) + bus_c.on(ForwardedEvent, on_c) + + # Ring forwarding: + # A -> B -> C -> A + bus_a.on('*', bus_b.emit) + bus_b.on('*', bus_c.emit) + bus_c.on('*', bus_a.emit) + + print('Dispatching ForwardedEvent on BusA with cyclic forwarding A -> B -> C -> A') + + event = bus_a.emit(ForwardedEvent(message='hello across 3 buses')) + await event + await asyncio.gather(bus_a.wait_until_idle(), bus_b.wait_until_idle(), bus_c.wait_until_idle()) + + path = event.event_path + total_handles = handle_counts['BusA'] + handle_counts['BusB'] + handle_counts['BusC'] + + print('\nFinal propagation summary:') + print(f'- event_id: {event.event_id}') + print(f'- event_path: {" -> ".join(path)}') + print(f'- handle counts: {handle_counts}') + print( + '- unique ids seen per bus: ' + f'A={len(seen_event_ids["BusA"])}, ' + f'B={len(seen_event_ids["BusB"])}, ' + f'C={len(seen_event_ids["BusC"])}' + ) + print(f'- total handles: {total_handles}') + + handled_once_per_bus = handle_counts['BusA'] == 1 and handle_counts['BusB'] == 1 and handle_counts['BusC'] == 1 + visited_three_buses = len(path) == 3 + + if handled_once_per_bus and visited_three_buses: + print('\nLoop prevention confirmed: each bus handled the event at most once.') + else: + print('\nUnexpected forwarding result. Check handlers/forwarding setup.') + + print('\n=== BusA log_tree() ===') + print(bus_a.log_tree()) + print('\n=== BusB log_tree() ===') + print(bus_b.log_tree()) + print('\n=== BusC log_tree() ===') + print(bus_c.log_tree()) + finally: + await bus_a.stop(clear=True, timeout=0) + await bus_b.stop(clear=True, timeout=0) + await bus_c.stop(clear=True, timeout=0) + + +if __name__ == '__main__': + asyncio.run(main()) diff --git a/examples/immediate_event_processing.py b/examples/immediate_event_processing.py new file mode 100755 index 0000000..b8b8560 --- /dev/null +++ b/examples/immediate_event_processing.py @@ -0,0 +1,141 @@ +#!/usr/bin/env -S uv run python +"""Run: uv run python examples/immediate_event_processing.py""" + +import asyncio +from typing import Literal + +from bubus import BaseEvent, EventBus, EventConcurrencyMode + + +class ParentEvent(BaseEvent[None]): + mode: Literal['immediate', 'queued'] + + +class ChildEvent(BaseEvent[None]): + scenario: Literal['immediate', 'queued'] + + +class SiblingEvent(BaseEvent[None]): + scenario: Literal['immediate', 'queued'] + + +async def delay_ms(ms: int) -> None: + await asyncio.sleep(ms / 1000.0) + + +async def main() -> None: + bus_a = EventBus( + name='QueueJumpDemoA', + event_concurrency='bus-serial', + event_handler_concurrency='serial', + ) + bus_b = EventBus( + name='QueueJumpDemoB', + event_concurrency='bus-serial', + event_handler_concurrency='serial', + ) + + try: + step = 0 + + def log(message: str) -> None: + nonlocal step + step += 1 + print(f'{step:02d}. {message}') + + # Forward sibling/child events from bus_a -> bus_b. + def forward_child(event: ChildEvent) -> None: + log(f'[forward] {event.event_type}({event.scenario}) bus_a -> bus_b') + bus_b.emit(event) + + def forward_sibling(event: SiblingEvent) -> None: + log(f'[forward] {event.event_type}({event.scenario}) bus_a -> bus_b') + bus_b.emit(event) + + bus_a.on(ChildEvent, forward_child) + bus_a.on(SiblingEvent, forward_sibling) + + # Local handlers on bus_a. + async def on_child_a(event: ChildEvent) -> None: + log(f'[bus_a] child start ({event.scenario})') + await delay_ms(8) + log(f'[bus_a] child end ({event.scenario})') + + async def on_sibling_a(event: SiblingEvent) -> None: + log(f'[bus_a] sibling start ({event.scenario})') + await delay_ms(14) + log(f'[bus_a] sibling end ({event.scenario})') + + bus_a.on(ChildEvent, on_child_a) + bus_a.on(SiblingEvent, on_sibling_a) + + # Forwarded handlers on bus_b. + async def on_child_b(event: ChildEvent) -> None: + log(f'[bus_b] child start ({event.scenario})') + await delay_ms(4) + log(f'[bus_b] child end ({event.scenario})') + + async def on_sibling_b(event: SiblingEvent) -> None: + log(f'[bus_b] sibling start ({event.scenario})') + await delay_ms(6) + log(f'[bus_b] sibling end ({event.scenario})') + + bus_b.on(ChildEvent, on_child_b) + bus_b.on(SiblingEvent, on_sibling_b) + + # Parent handler queues sibling first, then child, then compares await behavior. + async def on_parent(event: ParentEvent) -> None: + log(f'[parent:{event.mode}] start') + + event.event_bus.emit(SiblingEvent(scenario=event.mode)) + log(f'[parent:{event.mode}] sibling queued') + + child = event.event_bus.emit(ChildEvent(scenario=event.mode)) + log(f'[parent:{event.mode}] child queued') + + if event.mode == 'immediate': + # Immediate: queue-jump by awaiting child directly inside handler context. + log(f'[parent:{event.mode}] await child') + await child + log(f'[parent:{event.mode}] child await resolved') + else: + # Queued: wait on completion signal without queue-jump processing. + log(f'[parent:{event.mode}] await child.event_completed()') + await child.event_completed() + log(f'[parent:{event.mode}] child.event_completed() resolved') + + log(f'[parent:{event.mode}] end') + + bus_a.on(ParentEvent, on_parent) + + async def run_scenario(mode: Literal['immediate', 'queued']) -> None: + log(f'----- scenario={mode} -----') + + parent = bus_a.emit( + ParentEvent( + mode=mode, + event_concurrency=EventConcurrencyMode.PARALLEL, + ) + ) + + await parent + await asyncio.gather(bus_a.wait_until_idle(), bus_b.wait_until_idle()) + log(f'----- done scenario={mode} -----') + + await run_scenario('immediate') + await run_scenario('queued') + + print('\nExpected behavior:') + print('- immediate: child runs before sibling (queue-jump) and parent resumes right after child.') + print('- queued: sibling runs first, child waits in normal queue order, parent resumes later.') + print('\n=== bus_a.log_tree() ===') + print(bus_a.log_tree()) + print('\n=== bus_b.log_tree() ===') + print(bus_b.log_tree()) + finally: + await bus_a.stop(clear=True, timeout=0) + await bus_b.stop(clear=True, timeout=0) + + +if __name__ == '__main__': + asyncio.run(main()) diff --git a/examples/log_tree_demo.py b/examples/log_tree_demo.py new file mode 100755 index 0000000..8a86d58 --- /dev/null +++ b/examples/log_tree_demo.py @@ -0,0 +1,90 @@ +#!/usr/bin/env -S uv run python +"""Run: uv run python examples/log_tree_demo.py""" + +import asyncio +from typing import Any + +from bubus import BaseEvent, EventBus + + +class RootEvent(BaseEvent[str]): + url: str + + +class ChildEvent(BaseEvent[str]): + tab_id: str + + +class GrandchildEvent(BaseEvent[str]): + status: str + + +async def delay_ms(ms: int) -> None: + await asyncio.sleep(ms / 1000.0) + + +async def main() -> None: + bus_a = EventBus('BusA') + bus_b = EventBus('BusB') + + try: + + async def forward_to_bus_b(event: BaseEvent[Any]) -> str: + await delay_ms(20) + bus_b.emit(event) + return 'forwarded_to_bus_b' + + bus_a.on('*', forward_to_bus_b) + + async def root_fast_handler(event: RootEvent) -> str: + await delay_ms(10) + child = event.event_bus.emit(ChildEvent(tab_id='tab-123', event_timeout=0.1)) + await child + return 'root_fast_handler_ok' + + async def root_slow_handler(event: RootEvent) -> str: + event.event_bus.emit(ChildEvent(tab_id='tab-timeout', event_timeout=0.1)) + await delay_ms(400) + return 'root_slow_handler_timeout' + + bus_a.on(RootEvent, root_fast_handler) + bus_a.on(RootEvent, root_slow_handler) + + async def child_slow_handler(_event: ChildEvent) -> str: + await delay_ms(150) + return 'child_slow_handler_done' + + async def child_fast_handler(event: ChildEvent) -> str: + await delay_ms(10) + grandchild = event.event_bus.emit(GrandchildEvent(status='ok', event_timeout=0.05)) + await grandchild + return 'child_handler_ok' + + async def grandchild_fast_handler(_event: GrandchildEvent) -> str: + await delay_ms(5) + return 'grandchild_fast_handler_ok' + + async def grandchild_slow_handler(_event: GrandchildEvent) -> str: + await delay_ms(60) + return 'grandchild_slow_handler_timeout' + + bus_b.on(ChildEvent, child_slow_handler) + bus_b.on(ChildEvent, child_fast_handler) + bus_b.on(GrandchildEvent, grandchild_fast_handler) + bus_b.on(GrandchildEvent, grandchild_slow_handler) + + root_event = bus_a.emit(RootEvent(url='https://example.com', event_timeout=0.25)) + await root_event + + print('\n=== BusA log_tree ===') + print(bus_a.log_tree()) + + print('\n=== BusB log_tree ===') + print(bus_b.log_tree()) + finally: + await bus_a.stop(clear=True, timeout=0) + await bus_b.stop(clear=True, timeout=0) + + +if __name__ == '__main__': + asyncio.run(main()) diff --git a/examples/parent_child_tracking.py b/examples/parent_child_tracking.py new file mode 100755 index 0000000..e327978 --- /dev/null +++ b/examples/parent_child_tracking.py @@ -0,0 +1,133 @@ +#!/usr/bin/env -S uv run python +"""Run: uv run python examples/parent_child_tracking.py""" + +import asyncio + +from bubus import BaseEvent, EventBus + + +class ParentEvent(BaseEvent[str]): + workflow: str + + +class ChildEvent(BaseEvent[str]): + stage: str + + +class GrandchildEvent(BaseEvent[str]): + note: str + + +def short_id(value: str | None) -> str: + if value is None: + return 'none' + return value[-8:] + + +async def main() -> None: + bus = EventBus(name='ParentChildTrackingBus') + + try: + + async def on_child(event: ChildEvent) -> str: + print(f'child handler start: {event.event_type}#{short_id(event.event_id)}') + + grandchild = event.event_bus.emit(GrandchildEvent(note=f'spawned by {event.stage}')) + print( + ' child dispatched grandchild: ' + f'{grandchild.event_type}#{short_id(grandchild.event_id)} ' + f'parent_id={short_id(grandchild.event_parent_id)}' + ) + + await grandchild + print(f' child resumed after awaiting grandchild: {short_id(grandchild.event_id)}') + return f'child_completed:{event.stage}' + + async def on_grandchild(event: GrandchildEvent) -> str: + print(f'grandchild handler: {event.event_type}#{short_id(event.event_id)} note="{event.note}"') + return f'grandchild_completed:{event.note}' + + async def on_parent(event: ParentEvent) -> str: + print(f'parent handler start: {event.event_type}#{short_id(event.event_id)} workflow="{event.workflow}"') + + awaited_child = event.event_bus.emit(ChildEvent(stage='awaited-child')) + print( + ' parent emitted child: ' + f'{awaited_child.event_type}#{short_id(awaited_child.event_id)} ' + f'parent_id={short_id(awaited_child.event_parent_id)}' + ) + await awaited_child + print(f' parent resumed after awaited child: {short_id(awaited_child.event_id)}') + + background_child = event.event_bus.emit(ChildEvent(stage='background-child')) + print( + ' parent dispatched second child: ' + f'{background_child.event_type}#{short_id(background_child.event_id)} ' + f'parent_id={short_id(background_child.event_parent_id)}' + ) + + direct_grandchild = event.event_bus.emit(GrandchildEvent(note='directly from parent')) + print( + ' parent dispatched grandchild type directly: ' + f'{direct_grandchild.event_type}#{short_id(direct_grandchild.event_id)} ' + f'parent_id={short_id(direct_grandchild.event_parent_id)}' + ) + await direct_grandchild + + return 'parent_completed' + + bus.on(ChildEvent, on_child) + bus.on(GrandchildEvent, on_grandchild) + bus.on(ParentEvent, on_parent) + + parent = bus.emit(ParentEvent(workflow='demo-parent-child-tracking')) + await parent + await bus.wait_until_idle() + + print('\n=== Event History Relationships ===') + history = sorted(bus.event_history.values(), key=lambda event: event.event_created_at) + + for item in history: + parent_event = bus.event_history.get(item.event_parent_id) if item.event_parent_id else None + print( + ' | '.join( + [ + f'{item.event_type}#{short_id(item.event_id)}', + ( + f'parent={parent_event.event_type}#{short_id(parent_event.event_id)}' + if parent_event is not None + else 'parent=none' + ), + f'isChildOfRoot={bus.event_is_child_of(item, parent)}', + f'rootIsParentOf={bus.event_is_parent_of(parent, item)}', + ] + ) + ) + + first_child = next((event for event in history if event.event_type == 'ChildEvent'), None) + nested_grandchild = next( + ( + event + for event in history + if event.event_type == 'GrandchildEvent' + and first_child is not None + and event.event_parent_id == first_child.event_id + ), + None, + ) + if first_child is not None and nested_grandchild is not None: + print( + 'grandchild->child relationship check: ' + f'{nested_grandchild.event_type}#{short_id(nested_grandchild.event_id)} ' + f'is child of {first_child.event_type}#{short_id(first_child.event_id)} = ' + f'{bus.event_is_child_of(nested_grandchild, first_child)}' + ) + + print('\n=== bus.log_tree() ===') + print(bus.log_tree()) + finally: + await bus.stop(clear=True, timeout=0) + + +if __name__ == '__main__': + asyncio.run(main()) diff --git a/examples/simple.py b/examples/simple.py new file mode 100755 index 0000000..393e6ca --- /dev/null +++ b/examples/simple.py @@ -0,0 +1,102 @@ +#!/usr/bin/env -S uv run python +"""Run: uv run python examples/simple.py""" + +import asyncio +from typing import Any, Literal + +from pydantic import BaseModel + +from bubus import BaseEvent, EventBus + + +class RegisterUserResult(BaseModel): + user_id: str + welcome_email_sent: bool + + +class RegisterUserEvent(BaseEvent[RegisterUserResult]): + email: str + plan: Literal['free', 'pro'] + event_result_type: Any = RegisterUserResult + + +class AuditEvent(BaseEvent[None]): + message: str + + +def short_id(event_id: str) -> str: + return event_id[-8:] + + +async def main() -> None: + bus = EventBus(name='SimpleExampleBus') + + try: + # 1) Observe every event via wildcard registration. + def on_wildcard(event: BaseEvent[Any]) -> None: + print(f'[wildcard] {event.event_type}#{short_id(event.event_id)}') + + bus.on('*', on_wildcard) + + # 2) Register a typed class handler. + async def on_register_user(event: RegisterUserEvent) -> RegisterUserResult: + print(f'[class handler] Creating account for {event.email} ({event.plan})') + return RegisterUserResult( + user_id=f'user_{event.email.split("@", maxsplit=1)[0]}', + welcome_email_sent=True, + ) + + bus.on(RegisterUserEvent, on_register_user) + + # 3) Register by string event type. + def on_audit(event: BaseEvent[Any]) -> None: + print(f'[string handler] Audit log: {getattr(event, "message", "")}') + + bus.on('AuditEvent', on_audit) + + # 4) Intentionally return an invalid shape for runtime result validation. + def on_register_user_invalid(_event: BaseEvent[Any]) -> object: + return {'user_id': 123, 'welcome_email_sent': 'yes'} + + bus.on('RegisterUserEvent', on_register_user_invalid) + + # Dispatch a simple event handled by string registration. + await bus.emit(AuditEvent(message='Starting simple bubus example')) + + # Dispatch typed event; one handler is valid, one is invalid. + register_event = bus.emit( + RegisterUserEvent( + email='ada@example.com', + plan='pro', + ) + ) + await register_event + + print('\nRegisterUserEvent handler outcomes:') + for result in register_event.event_results.values(): + if result.status == 'completed': + print(f'- {result.handler_name}: completed -> {result.result!r}') + continue + if result.status == 'error': + message = str(result.error) if result.error is not None else 'unknown error' + print(f'- {result.handler_name}: error -> {message}') + continue + print(f'- {result.handler_name}: {result.status}') + + first_valid = await register_event.event_result(raise_if_any=False, raise_if_none=False) + all_errors = [result.error for result in register_event.event_results.values() if result.error is not None] + + print(f'\nFirst valid parsed result: {first_valid!r}') + print(f'Total event errors: {len(all_errors)}') + for index, error in enumerate(all_errors, start=1): + print(f' {index}. {error}') + + await bus.wait_until_idle() + print('\n=== bus.log_tree() ===') + print(bus.log_tree()) + finally: + await bus.stop(clear=True, timeout=0) + + +if __name__ == '__main__': + asyncio.run(main()) diff --git a/pyproject.toml b/pyproject.toml index 132c3bc..64a42af 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,9 +2,10 @@ name = "bubus" description = "Advanced Pydantic-powered event bus with async support" authors = [{ name = "Nick Sweeting" }] -version = "1.5.6" +version = "2.2.1" readme = "README.md" -requires-python = ">=3.11,<4.0" +requires-python = ">=3.11" +urls = {Repository = "https://github.com/pirate/bbus"} classifiers = [ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", @@ -18,13 +19,57 @@ dependencies = [ "typing-extensions>=4.12.2", "uuid7>=0.1.0", ] +[project.optional-dependencies] +postgres = [ + "asyncpg>=0.31.0", +] +nats = [ + "nats-py>=2.13.1", +] +redis = [ + "redis>=7.1.1", +] +bridges = [ + "asyncpg>=0.31.0", + "nats-py>=2.13.1", + "redis>=7.1.1", +] -[project.urls] -Repository = "https://github.com/browser-use/bubus" +[dependency-groups] +dev = [ + "ruff>=0.15.1", + "build>=1.2.2", + "pytest>=8.3.5", + "pytest-asyncio>=1.1.0", + "pytest-httpserver>=1.0.8", + "ipdb>=0.13.13", + "codespell>=2.4.1", + "pyright>=1.1.404", + "ty>=0.0.1a19", + "pytest-xdist>=3.7.0", + "psutil>=7.0.0", + "pytest-cov>=6.2.1", + "pytest-timeout>=2.4.0", + "prek>=0.3.3", +] [build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" +requires = ["uv_build>=0.10.2,<0.11.0"] +build-backend = "uv_build" + +[tool.uv.build-backend] +module-name = "bubus" +module-root = "." +source-exclude = [ + "/examples", + "/tests", + "/bubus-ts", + "/test.sh", + "/.github", + "/.pytest_cache", + "/.cursor", + "/.claude", +] [tool.codespell] ignore-words-list = "bu,wit,dont,cant,wont,re-use,re-used,re-using,re-usable,thats,doesnt" @@ -40,6 +85,9 @@ select = ["ASYNC", "E", "F", "FAST", "I", "PLE"] ignore = ["ASYNC109", "E101", "E402", "E501", "F841", "E731", "W291"] # TODO: determine if adding timeouts to all the unbounded async functions is needed / worth-it so we can un-ignore ASYNC109 unfixable = ["E101", "E402", "E501", "F841", "E731"] +[tool.ruff.lint.per-file-ignores] +"tests/**/*.py" = ["ASYNC220", "ASYNC221", "ASYNC240"] + [tool.ruff.format] indent-style = "space" quote-style = "single" @@ -51,18 +99,17 @@ skip-magic-trailing-comma = false [tool.pyright] typeCheckingMode = "strict" reportMissingImports = "error" -reportMissingTypeStubs = false +reportMissingTypeStubs = "error" +reportPrivateUsage = "error" venvPath = "." venv = ".venv" - -[tool.hatch.build] -include = [ - "bubus/**/*.py", - "!tests/**/*.py", +include = ["bubus", "examples", "tests"] +executionEnvironments = [ + { root = "tests", extraPaths = ["."] }, ] -[tool.hatch.metadata] -allow-direct-references = true +[tool.ty.src] +include = ["bubus", "examples", "tests"] [tool.pytest.ini_options] asyncio_mode = "auto" @@ -84,6 +131,7 @@ log_level = "DEBUG" [tool.coverage.run] source = ["bubus"] omit = [ + "ui/*", "*/tests/*", "*/__pycache__/*", "*.pyc", @@ -108,21 +156,3 @@ precision = 2 [tool.coverage.html] directory = "htmlcov" - - -[tool.uv] -dev-dependencies = [ - "ruff>=0.11.2", - "build>=1.2.2", - "pytest>=8.3.5", - "pytest-asyncio>=1.1.0", - "pytest-httpserver>=1.0.8", - "ipdb>=0.13.13", - "pre-commit>=4.2.0", - "codespell>=2.4.1", - "pyright>=1.1.404", - "ty>=0.0.1a19", - "pytest-xdist>=3.7.0", - "psutil>=7.0.0", - "pytest-cov>=6.2.1", -] diff --git a/test.sh b/test.sh new file mode 100755 index 0000000..c31c561 --- /dev/null +++ b/test.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash +set -euo pipefail + +prek run --all-files + +# Run Python and TypeScript test phases sequentially to avoid cross-runtime +# resource contention that can cause performance-threshold flakes. +uv run pytest + +( + cd bubus-ts + pnpm run test +) + +shopt -s nullglob +python_example_pids=() +for example_file in examples/*.py; do + timeout 120 uv run python "$example_file" & + python_example_pids+=("$!") +done +for pid in "${python_example_pids[@]}"; do + wait "$pid" +done + +( + cd bubus-ts + shopt -s nullglob + ts_example_pids=() + for example_file in examples/*.ts; do + timeout 120 node --import tsx "$example_file" & + ts_example_pids+=("$!") + done + for pid in "${ts_example_pids[@]}"; do + wait "$pid" + done +) + +# Perf suites are expensive and can push total runtime well past the main CI budget. +# Run them explicitly with RUN_PERF=1. +if [[ "${RUN_PERF:-0}" == "1" ]]; then + uv run tests/performance_runtime.py + ( + cd bubus-ts + pnpm run perf + ) +else + echo "Skipping perf suites (set RUN_PERF=1 to include them)." +fi diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..5329cb1 --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1 @@ +"""Test support package for local utility entrypoints.""" diff --git a/tests/bridge_listener_worker.py b/tests/bridge_listener_worker.py new file mode 100644 index 0000000..0a7ab0b --- /dev/null +++ b/tests/bridge_listener_worker.py @@ -0,0 +1,59 @@ +from __future__ import annotations + +import asyncio +import json +import sys +from typing import Any + +from anyio import Path as AnyPath + +from bubus import HTTPEventBridge, SocketEventBridge +from bubus.bridge_jsonl import JSONLEventBridge +from bubus.bridge_nats import NATSEventBridge +from bubus.bridge_postgres import PostgresEventBridge +from bubus.bridge_redis import RedisEventBridge +from bubus.bridge_sqlite import SQLiteEventBridge + + +def _make_listener_bridge(config: dict[str, Any]) -> Any: + kind = str(config['kind']) + if kind == 'http': + return HTTPEventBridge(listen_on=str(config['endpoint'])) + if kind == 'socket': + return SocketEventBridge(path=str(config['path'])) + if kind == 'jsonl': + return JSONLEventBridge(str(config['path']), poll_interval=0.05) + if kind == 'sqlite': + return SQLiteEventBridge(str(config['path']), str(config['table']), poll_interval=0.05) + if kind == 'redis': + return RedisEventBridge(str(config['url'])) + if kind == 'nats': + return NATSEventBridge(str(config['server']), str(config['subject'])) + if kind == 'postgres': + return PostgresEventBridge(str(config['url'])) + raise ValueError(f'Unsupported bridge kind: {kind}') + + +async def _main(config_path: str) -> None: + config = json.loads(await AnyPath(config_path).read_text(encoding='utf-8')) + ready_path = AnyPath(str(config['ready_path'])) + output_path = AnyPath(str(config['output_path'])) + done = asyncio.Event() + + bridge = _make_listener_bridge(config) + + async def _on_event(event: Any) -> None: + await output_path.write_text(json.dumps(event.model_dump(mode='json')), encoding='utf-8') + done.set() + + bridge.on('IPCPingEvent', _on_event) + await bridge.start() + await ready_path.write_text('ready', encoding='utf-8') + try: + await asyncio.wait_for(done.wait(), timeout=30.0) + finally: + await bridge.close() + + +if __name__ == '__main__': + asyncio.run(_main(sys.argv[1])) diff --git a/tests/conftest.py b/tests/conftest.py index f8c1442..8ac7ecd 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,3 +1,4 @@ +import importlib import os import pytest @@ -6,4 +7,4 @@ @pytest.fixture(autouse=True) def set_log_level(): os.environ['BUBUS_LOGGING_LEVEL'] = 'WARNING' - import bubus # noqa # type: ignore + importlib.import_module('bubus') diff --git a/tests/performance_runtime.py b/tests/performance_runtime.py new file mode 100644 index 0000000..4353a7c --- /dev/null +++ b/tests/performance_runtime.py @@ -0,0 +1,152 @@ +from __future__ import annotations + +import argparse +import asyncio +import json +import logging +import sys +from typing import Any + +try: + from .performance_scenarios import PERF_SCENARIO_IDS, PerfInput, run_all_perf_scenarios, run_perf_scenario_by_id +except ImportError: # pragma: no cover - direct script execution path + from tests.performance_scenarios import PERF_SCENARIO_IDS, PerfInput, run_all_perf_scenarios, run_perf_scenario_by_id + +TABLE_MATRIX = [ + ('50k-events', '1 bus x 50k events x 1 handler'), + ('500-buses-x-100-events', '500 buses x 100 events x 1 handler'), + ('1-event-x-50k-parallel-handlers', '1 bus x 1 event x 50k parallel handlers'), + ('50k-one-off-handlers', '1 bus x 50k events x 50k one-off handlers'), + ('worst-case-forwarding-timeouts', 'Worst case (N buses x N events x N handlers)'), +] + + +def _format_cell(result: dict[str, Any]) -> str: + if result.get('ok') is False: + error = str(result.get('error') or 'failed') + compact = error.replace('\n', ' ').strip() + if len(compact) > 42: + compact = compact[:39] + '...' + return f'`failed: {compact}`' + + ms_per_event = float(result['ms_per_event']) + unit = str(result.get('ms_per_event_unit', 'event')) + latency = f'{ms_per_event:.3f}ms/{unit}' + + peak_rss_kb_per_event = result.get('peak_rss_kb_per_event') + if isinstance(peak_rss_kb_per_event, (int, float)): + peak_unit = str(result.get('peak_rss_unit', 'event')) + return f'`{latency}`, `{float(peak_rss_kb_per_event):.3f}kb/{peak_unit}`' + return f'`{latency}`' + + +def _print_markdown_matrix(runtime_name: str, results: list[dict[str, Any]]) -> None: + by_scenario = {str(result['scenario_id']): result for result in results} + + header_cols = ['Runtime'] + [label for _, label in TABLE_MATRIX] + print('| ' + ' | '.join(header_cols) + ' |') + print('|' + '|'.join([' ------------------ ' for _ in header_cols]) + '|') + + row_cells = [runtime_name] + for scenario_id, _ in TABLE_MATRIX: + result = by_scenario.get(scenario_id) + if result is None: + row_cells.append('`n/a`') + continue + row_cells.append(_format_cell(result)) + + print('| ' + ' | '.join(row_cells) + ' |') + + +def _build_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser(description='Run Python runtime performance scenarios for bubus') + parser.add_argument('--scenario', type=str, default=None, help=f'One scenario id: {", ".join(PERF_SCENARIO_IDS)}') + parser.add_argument( + '--no-json', + action='store_false', + dest='json', + help='Disable full JSON output (enabled by default).', + ) + parser.set_defaults(json=True) + parser.add_argument( + '--in-process', + action='store_true', + help='Run all scenarios in one process (default runs each scenario in an isolated subprocess).', + ) + parser.add_argument('--child-json', action='store_true', help=argparse.SUPPRESS) + return parser + + +async def _run_scenario_in_subprocess(scenario_id: str) -> dict[str, Any]: + proc = await asyncio.create_subprocess_exec( + sys.executable, + '-m', + 'tests.performance_runtime', + '--scenario', + scenario_id, + '--child-json', + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + ) + stdout, stderr = await proc.communicate() + if proc.returncode != 0: + raise RuntimeError( + f'Perf child process failed for scenario={scenario_id!r} exit={proc.returncode} stderr={stderr.decode().strip()}' + ) + payload = stdout.decode().strip() + if not payload: + raise RuntimeError(f'Perf child process produced no output for scenario={scenario_id!r}') + return json.loads(payload) + + +async def _main_async() -> int: + args = _build_parser().parse_args() + logging.getLogger('bubus').setLevel(logging.CRITICAL) + + perf_input = PerfInput(runtime_name='python', log=(lambda _: None) if args.child_json else print) + + if not args.child_json: + print('[python] runtime perf harness starting') + + results: list[dict[str, Any]] + + if args.scenario: + if args.scenario not in PERF_SCENARIO_IDS: + raise ValueError(f'Unknown --scenario value {args.scenario!r}. Expected one of: {", ".join(PERF_SCENARIO_IDS)}') + result = await run_perf_scenario_by_id(perf_input, args.scenario) + result['scenario_id'] = args.scenario + results = [result] + elif args.in_process: + raw_results: list[dict[str, Any]] = await run_all_perf_scenarios(perf_input) + results = [] + for scenario_id, result in zip(PERF_SCENARIO_IDS, raw_results, strict=True): + result_copy = dict(result) + result_copy['scenario_id'] = scenario_id + results.append(result_copy) + else: + results = [] + for scenario_id in PERF_SCENARIO_IDS: + results.append(await _run_scenario_in_subprocess(scenario_id)) + + if args.child_json: + print(json.dumps(results[0], default=str)) + return 0 + + print('[python] runtime perf harness complete') + print('') + print('Markdown matrix row (copy into README):') + _print_markdown_matrix('Python', results) + + if args.json: + print('') + print(json.dumps(results, indent=2, default=str)) + + return 0 + + +def main() -> int: + return asyncio.run(_main_async()) + + +if __name__ == '__main__': + raise SystemExit(main()) diff --git a/tests/performance_scenarios.py b/tests/performance_scenarios.py new file mode 100644 index 0000000..bc2c266 --- /dev/null +++ b/tests/performance_scenarios.py @@ -0,0 +1,823 @@ +from __future__ import annotations + +import asyncio +import gc +import math +import os +import time +from collections.abc import Callable +from dataclasses import dataclass, field +from typing import Any + +import psutil + +from bubus import BaseEvent, EventBus + +TRIM_TARGET = 1 +HISTORY_LIMIT_STREAM = 512 +HISTORY_LIMIT_ON_OFF = 128 +HISTORY_LIMIT_EPHEMERAL_BUS = 128 +HISTORY_LIMIT_FIXED_HANDLERS = 128 +HISTORY_LIMIT_WORST_CASE = 128 +WORST_CASE_IMMEDIATE_TIMEOUT_MS = 0.0001 +WORST_CASE_IMMEDIATE_TIMEOUT_SECONDS = WORST_CASE_IMMEDIATE_TIMEOUT_MS / 1000.0 + + +@dataclass(slots=True) +class PerfLimits: + single_run_ms: float = 120_000.0 + worst_case_ms: float = 180_000.0 + + +@dataclass(slots=True) +class PerfInput: + runtime_name: str = 'python' + log: Callable[[str], None] = print + now: Callable[[], float] = lambda: time.perf_counter() * 1000.0 + limits: PerfLimits = field(default_factory=PerfLimits) + + async def sleep(self, ms: float) -> None: + await asyncio.sleep(ms / 1000.0) + + def force_gc(self) -> None: + gc.collect() + + def get_memory_usage(self) -> dict[str, int]: + process = psutil.Process(os.getpid()) + return {'rss': int(process.memory_info().rss)} + + def get_cpu_time_ms(self) -> float: + process = psutil.Process(os.getpid()) + cpu = process.cpu_times() + return float((cpu.user + cpu.system) * 1000.0) + + +@dataclass(slots=True) +class MemoryTracker: + hooks: PerfInput + baseline_rss: int = 0 + peak_rss: int = 0 + + def __post_init__(self) -> None: + snapshot = self.hooks.get_memory_usage() + self.baseline_rss = snapshot['rss'] + self.peak_rss = snapshot['rss'] + + def sample(self) -> None: + snapshot = self.hooks.get_memory_usage() + if snapshot['rss'] > self.peak_rss: + self.peak_rss = snapshot['rss'] + + def peak_rss_kb_per_event(self, events: int) -> float | None: + if events <= 0: + return None + delta = float(max(0, self.peak_rss - self.baseline_rss)) + return (delta / 1024.0) / float(events) + + +class PerfSimpleEvent(BaseEvent[int]): + batch_id: int = 0 + value: int = 0 + + +class PerfTrimEvent(BaseEvent[None]): + pass + + +class PerfTrimEphemeralEvent(BaseEvent[None]): + pass + + +class PerfFixedHandlersEvent(BaseEvent[int]): + base_value: int = 0 + + +class PerfTrimFixedHandlersEvent(BaseEvent[None]): + pass + + +class PerfRequestEvent(BaseEvent[int]): + value: int = 0 + + +class PerfTrimOnOffEvent(BaseEvent[None]): + pass + + +class WCParent(BaseEvent[int]): + iteration: int = 0 + value: int = 0 + + +class WCChild(BaseEvent[int]): + iteration: int = 0 + value: int = 0 + + +class WCGrandchild(BaseEvent[int]): + iteration: int = 0 + value: int = 0 + + +class WCTrimEvent(BaseEvent[None]): + pass + + +def _format_ms_per_event(value: float, unit: str = 'event') -> str: + return f'{value:.3f}ms/{unit}' + + +def _format_kb_per_unit(value: float, unit: str = 'event') -> str: + return f'{value:.3f}kb/{unit}' + + +def _format_ms(value: float) -> str: + return f'{value:.3f}ms' + + +async def _wait_for_runtime_settle(hooks: PerfInput) -> None: + await hooks.sleep(50) + + +async def _trim_bus_history_to_one_event(bus: EventBus, trim_event_type: type[BaseEvent[Any]]) -> None: + prev_size = bus.event_history.max_history_size + prev_drop = bus.event_history.max_history_drop + bus.event_history.max_history_size = TRIM_TARGET + bus.event_history.max_history_drop = True + ev = bus.emit(trim_event_type()) + await ev + await bus.wait_until_idle() + assert len(bus.event_history) <= TRIM_TARGET, f'trim-to-1 failed for {bus}: {len(bus.event_history)}/{TRIM_TARGET}' + bus.event_history.max_history_size = prev_size + bus.event_history.max_history_drop = prev_drop + + +async def _dispatch_naive( + bus: EventBus, + events: list[BaseEvent[Any]], + on_dispatched: Callable[[BaseEvent[Any]], None] | None = None, +) -> tuple[list[BaseEvent[Any]], str | None]: + queued: list[BaseEvent[Any]] = [] + error: str | None = None + + for event in events: + try: + queued_event = bus.emit(event) + queued.append(queued_event) + if on_dispatched is not None: + on_dispatched(queued_event) + except Exception as exc: + error = f'{type(exc).__name__}: {exc}' + break + + if queued: + await asyncio.gather(*queued, return_exceptions=True) + await bus.wait_until_idle() + + return queued, error + + +def _scenario_result( + *, + scenario: str, + total_events: int, + total_ms: float, + ms_per_event: float, + ms_per_event_unit: str, + peak_rss_kb_per_event: float | None, + peak_rss_unit: str = 'event', + throughput: int, + ok: bool, + error: str | None, + extra: dict[str, Any] | None = None, +) -> dict[str, Any]: + result: dict[str, Any] = { + 'scenario': scenario, + 'ok': ok, + 'error': error, + 'total_events': total_events, + 'total_ms': total_ms, + 'ms_per_event': ms_per_event, + 'ms_per_event_unit': ms_per_event_unit, + 'ms_per_event_label': _format_ms_per_event(ms_per_event, ms_per_event_unit), + 'peak_rss_kb_per_event': peak_rss_kb_per_event, + 'peak_rss_unit': peak_rss_unit, + 'peak_rss_kb_per_event_label': ( + None if peak_rss_kb_per_event is None else _format_kb_per_unit(peak_rss_kb_per_event, peak_rss_unit) + ), + 'throughput': throughput, + } + if extra: + result.update(extra) + return result + + +def _record(hooks: PerfInput, metrics: dict[str, Any]) -> None: + parts = [ + f'events={metrics.get("total_events", "n/a")}', + f'total={_format_ms(float(metrics.get("total_ms", 0.0)))}', + f'latency={_format_ms_per_event(float(metrics.get("ms_per_event", 0.0)), str(metrics.get("ms_per_event_unit", "event")))}', + ] + peak_rss = metrics.get('peak_rss_kb_per_event') + if isinstance(peak_rss, (int, float)): + parts.append(f'peak_rss={_format_kb_per_unit(float(peak_rss), str(metrics.get("peak_rss_unit", "event")))}') + parts.append(f'throughput={int(metrics.get("throughput", 0))}/s') + parts.append(f'ok={"yes" if metrics.get("ok", False) else "no"}') + if metrics.get('error'): + parts.append(f'error={metrics["error"]}') + if isinstance(metrics.get('cpu_ms'), (int, float)): + parts.append(f'cpu={_format_ms(float(metrics["cpu_ms"]))}') + if isinstance(metrics.get('cpu_ms_per_event'), (int, float)): + parts.append( + f'cpu_per_unit={_format_ms_per_event(float(metrics["cpu_ms_per_event"]), str(metrics.get("ms_per_event_unit", "event")))}' + ) + hooks.log(f'[{hooks.runtime_name}] {metrics["scenario"]}: ' + ' '.join(parts)) + + +async def run_perf_50k_events(input: PerfInput) -> dict[str, Any]: + hooks = input + scenario = '50k events' + total_events = int(50_000) + bus = EventBus( + name='Perf50kBus', + max_history_size=HISTORY_LIMIT_STREAM, + max_history_drop=True, + middlewares=[], + ) + + processed_count: int = 0 + checksum: int = 0 + expected_checksum: int = 0 + sampled_early_event_ids: list[str] = [] + + def simple_handler(event: PerfSimpleEvent) -> None: + nonlocal processed_count, checksum + processed_count += 1 + checksum += event.value + event.batch_id + + bus.on(PerfSimpleEvent, simple_handler) + + memory = MemoryTracker(hooks) + t0 = hooks.now() + cpu_t0 = hooks.get_cpu_time_ms() + + batch_size = 512 + dispatched_events: int = 0 + dispatch_error: str | None = None + while dispatched_events < total_events: + queued_batch: list[BaseEvent[Any]] = [] + this_batch = min(batch_size, total_events - dispatched_events) + for _ in range(this_batch): + i = dispatched_events + batch_id = i // batch_size + value = (i % 97) + 1 + expected_checksum += value + batch_id + try: + queued_event = bus.emit(PerfSimpleEvent(batch_id=batch_id, value=value)) + queued_batch.append(queued_event) + if len(sampled_early_event_ids) < 64: + sampled_early_event_ids.append(queued_event.event_id) + dispatched_events += 1 + except Exception as exc: + dispatch_error = f'{type(exc).__name__}: {exc}' + break + if queued_batch: + await asyncio.gather(*queued_batch, return_exceptions=True) + await bus.wait_until_idle() + if dispatched_events % 2048 == 0: + memory.sample() + if dispatch_error is not None: + break + + memory.sample() + + await _trim_bus_history_to_one_event(bus, PerfTrimEvent) + t1 = hooks.now() + cpu_t1 = hooks.get_cpu_time_ms() + await _wait_for_runtime_settle(hooks) + memory.sample() + + total_ms = t1 - t0 + ms_denominator = max(dispatched_events, 1) + ms_per_event = total_ms / float(ms_denominator) + throughput = int(round(dispatched_events / max(total_ms / 1000.0, 1e-9))) + peak_rss_kb_per_event = memory.peak_rss_kb_per_event(ms_denominator) + cpu_ms = max(0.0, cpu_t1 - cpu_t0) + cpu_ms_per_event = cpu_ms / float(ms_denominator) + + expected_for_dispatched = 0 + for i in range(dispatched_events): + batch_id = i // 512 + value = (i % 97) + 1 + expected_for_dispatched += value + batch_id + + sampled_evicted_count = sum(1 for event_id in sampled_early_event_ids if event_id not in bus.event_history) + ok = ( + dispatch_error is None + and dispatched_events == total_events + and processed_count == dispatched_events + and checksum == expected_for_dispatched + ) + + result = _scenario_result( + scenario=scenario, + total_events=dispatched_events, + total_ms=total_ms, + ms_per_event=ms_per_event, + ms_per_event_unit='event', + peak_rss_kb_per_event=peak_rss_kb_per_event, + throughput=throughput, + ok=ok, + error=dispatch_error, + extra={ + 'attempted_events': total_events, + 'processed_count': processed_count, + 'checksum': checksum, + 'expected_checksum': expected_for_dispatched, + 'sampled_evicted_count': sampled_evicted_count, + 'cpu_ms': cpu_ms, + 'cpu_ms_per_event': cpu_ms_per_event, + }, + ) + + await bus.stop(timeout=0, clear=True) + _record(hooks, result) + return result + + +async def run_perf_ephemeral_buses(input: PerfInput) -> dict[str, Any]: + hooks = input + scenario = '500 buses x 100 events' + total_buses = int(500) + events_per_bus = int(100) + attempted_events = int(total_buses * events_per_bus) + + processed_count: int = 0 + checksum: int = 0 + expected_checksum: int = 0 + dispatched_events: int = 0 + first_error: str | None = None + + memory = MemoryTracker(hooks) + t0 = hooks.now() + cpu_t0 = hooks.get_cpu_time_ms() + + for bus_index in range(total_buses): + bus = EventBus( + name=f'PerfEphemeralBus_{bus_index}', + max_history_size=HISTORY_LIMIT_EPHEMERAL_BUS, + max_history_drop=True, + middlewares=[], + ) + + def bus_handler(event: PerfSimpleEvent) -> None: + nonlocal processed_count, checksum + processed_count += 1 + checksum += (event.batch_id * 7) + event.value + + bus.on(PerfSimpleEvent, bus_handler) + + events: list[BaseEvent[Any]] = [] + for i in range(events_per_bus): + value = ((bus_index * events_per_bus + i) % 89) + 1 + events.append(PerfSimpleEvent(batch_id=bus_index, value=value)) + + queued, err = await _dispatch_naive(bus, events) + dispatched_events += len(queued) + for i in range(len(queued)): + value = ((bus_index * events_per_bus + i) % 89) + 1 + expected_checksum += (bus_index * 7) + value + + if err and first_error is None: + first_error = err + + memory.sample() + await _trim_bus_history_to_one_event(bus, PerfTrimEphemeralEvent) + await bus.stop(timeout=0, clear=True) + + if bus_index % 10 == 0: + memory.sample() + + total_ms = hooks.now() - t0 + cpu_t1 = hooks.get_cpu_time_ms() + await _wait_for_runtime_settle(hooks) + memory.sample() + + ms_denominator = max(dispatched_events, 1) + ms_per_event = total_ms / float(ms_denominator) + peak_rss_kb_per_event = memory.peak_rss_kb_per_event(ms_denominator) + throughput = int(round(dispatched_events / max(total_ms / 1000.0, 1e-9))) + cpu_ms = max(0.0, cpu_t1 - cpu_t0) + cpu_ms_per_event = cpu_ms / float(ms_denominator) + + ok = ( + first_error is None + and dispatched_events == attempted_events + and processed_count == dispatched_events + and checksum == expected_checksum + ) + + result = _scenario_result( + scenario=scenario, + total_events=dispatched_events, + total_ms=total_ms, + ms_per_event=ms_per_event, + ms_per_event_unit='event', + peak_rss_kb_per_event=peak_rss_kb_per_event, + throughput=throughput, + ok=ok, + error=first_error, + extra={ + 'attempted_events': attempted_events, + 'processed_count': processed_count, + 'checksum': checksum, + 'expected_checksum': expected_checksum, + 'cpu_ms': cpu_ms, + 'cpu_ms_per_event': cpu_ms_per_event, + }, + ) + + _record(hooks, result) + return result + + +async def run_perf_single_event_many_fixed_handlers(input: PerfInput) -> dict[str, Any]: + hooks = input + scenario = '1 event x 50k parallel handlers' + total_events = int(1) + total_handlers = int(50_000) + bus = EventBus( + name='PerfFixedHandlersBus', + max_history_size=HISTORY_LIMIT_FIXED_HANDLERS, + max_history_drop=True, + event_handler_concurrency='parallel', + middlewares=[], + ) + + processed_count: int = 0 + checksum: int = 0 + base_value = 11 + expected_checksum: int = 0 + + for i in range(total_handlers): + weight = (i % 29) + 1 + expected_checksum += base_value + weight + + def make_handler(local_weight: int, index: int) -> Callable[[PerfFixedHandlersEvent], None]: + def fixed_handler(event: PerfFixedHandlersEvent) -> None: + nonlocal processed_count, checksum + processed_count += 1 + checksum += event.base_value + local_weight + + fixed_handler.__name__ = f'fixed_handler_{index}' + return fixed_handler + + bus.on(PerfFixedHandlersEvent, make_handler(weight, i)) + + memory = MemoryTracker(hooks) + t0 = hooks.now() + cpu_t0 = hooks.get_cpu_time_ms() + + error: str | None = None + try: + event = bus.emit(PerfFixedHandlersEvent(base_value=base_value)) + await event + await bus.wait_until_idle() + except Exception as exc: + error = f'{type(exc).__name__}: {exc}' + + total_ms = hooks.now() - t0 + cpu_t1 = hooks.get_cpu_time_ms() + await _wait_for_runtime_settle(hooks) + memory.sample() + + ms_per_event = total_ms / float(max(total_handlers, 1)) + peak_rss_kb_per_event = memory.peak_rss_kb_per_event(total_handlers) + throughput = int(round(total_events / max(total_ms / 1000.0, 1e-9))) + cpu_ms = max(0.0, cpu_t1 - cpu_t0) + cpu_ms_per_event = cpu_ms / float(max(total_handlers, 1)) + + ok = error is None and processed_count == total_handlers and checksum == expected_checksum + + result = _scenario_result( + scenario=scenario, + total_events=total_events, + total_ms=total_ms, + ms_per_event=ms_per_event, + ms_per_event_unit='handler', + peak_rss_kb_per_event=peak_rss_kb_per_event, + peak_rss_unit='handler', + throughput=throughput, + ok=ok, + error=error, + extra={ + 'processed_count': processed_count, + 'checksum': checksum, + 'expected_checksum': expected_checksum, + 'total_handlers': total_handlers, + 'cpu_ms': cpu_ms, + 'cpu_ms_per_event': cpu_ms_per_event, + }, + ) + + await _trim_bus_history_to_one_event(bus, PerfTrimFixedHandlersEvent) + await bus.stop(timeout=0, clear=True) + _record(hooks, result) + return result + + +async def run_perf_on_off_churn(input: PerfInput) -> dict[str, Any]: + hooks = input + scenario = '50k one-off handlers over 50k events' + total_events = int(50_000) + bus = EventBus( + name='PerfOnOffBus', + max_history_size=HISTORY_LIMIT_ON_OFF, + max_history_drop=True, + middlewares=[], + ) + + processed_count: int = 0 + checksum: int = 0 + expected_checksum: int = 0 + error: str | None = None + event_key = PerfRequestEvent.__name__ + + memory = MemoryTracker(hooks) + t0 = hooks.now() + cpu_t0 = hooks.get_cpu_time_ms() + + for i in range(total_events): + weight = (i % 13) + 1 + value = (i % 101) + 1 + expected_checksum += value + weight + + def one_off_handler(event: PerfRequestEvent) -> None: + nonlocal processed_count, checksum + processed_count += 1 + checksum += event.value + weight + + handler_entry = bus.on(PerfRequestEvent, one_off_handler) + + try: + ev = bus.emit(PerfRequestEvent(value=value)) + await ev + except Exception as exc: + error = f'{type(exc).__name__}: {exc}' + break + + bus.off(event_key, handler_entry.id) + + if i % 1000 == 0: + memory.sample() + + await bus.wait_until_idle() + total_ms = hooks.now() - t0 + cpu_t1 = hooks.get_cpu_time_ms() + await _wait_for_runtime_settle(hooks) + memory.sample() + + ms_denominator = max(processed_count, 1) + ms_per_event = total_ms / float(ms_denominator) + peak_rss_kb_per_event = memory.peak_rss_kb_per_event(ms_denominator) + throughput = int(round(processed_count / max(total_ms / 1000.0, 1e-9))) + cpu_ms = max(0.0, cpu_t1 - cpu_t0) + cpu_ms_per_event = cpu_ms / float(ms_denominator) + + ok = ( + error is None + and processed_count == total_events + and checksum == expected_checksum + and len(bus.handlers_by_key.get(event_key, [])) == 0 + ) + + result = _scenario_result( + scenario=scenario, + total_events=processed_count, + total_ms=total_ms, + ms_per_event=ms_per_event, + ms_per_event_unit='event', + peak_rss_kb_per_event=peak_rss_kb_per_event, + throughput=throughput, + ok=ok, + error=error, + extra={ + 'attempted_events': total_events, + 'processed_count': processed_count, + 'checksum': checksum, + 'expected_checksum': expected_checksum, + 'cpu_ms': cpu_ms, + 'cpu_ms_per_event': cpu_ms_per_event, + }, + ) + + await _trim_bus_history_to_one_event(bus, PerfTrimOnOffEvent) + await bus.stop(timeout=0, clear=True) + _record(hooks, result) + return result + + +async def run_perf_worst_case(input: PerfInput) -> dict[str, Any]: + hooks = input + scenario = 'worst-case forwarding + timeouts' + total_iterations = int(500) + history_limit = HISTORY_LIMIT_WORST_CASE + bus_a = EventBus(name='PerfWorstCaseA', max_history_size=history_limit, max_history_drop=True, middlewares=[]) + bus_b = EventBus(name='PerfWorstCaseB', max_history_size=history_limit, max_history_drop=True, middlewares=[]) + bus_c = EventBus(name='PerfWorstCaseC', max_history_size=history_limit, max_history_drop=True, middlewares=[]) + + parent_handled_a: int = 0 + parent_handled_b: int = 0 + child_handled: int = 0 + grandchild_handled: int = 0 + timeout_count: int = 0 + cancel_count: int = 0 + checksum: int = 0 + error: str | None = None + + def parent_b_handler(event: WCParent) -> None: + nonlocal parent_handled_b, checksum + parent_handled_b += 1 + checksum += event.value + 3 + + async def child_handler(event: WCChild) -> None: + nonlocal child_handled, checksum + child_handled += 1 + checksum += (event.value * 2) + event.iteration + gc_event = event.event_bus.emit(WCGrandchild(iteration=event.iteration, value=event.value + 1)) + if event.event_timeout is not None: + await hooks.sleep(0) + await gc_event + + def grandchild_handler(event: WCGrandchild) -> None: + nonlocal grandchild_handled, checksum + grandchild_handled += 1 + checksum += (event.value * 3) + event.iteration + + bus_b.on(WCParent, parent_b_handler) + bus_c.on(WCChild, child_handler) + bus_c.on(WCGrandchild, grandchild_handler) + + memory = MemoryTracker(hooks) + t0 = hooks.now() + cpu_t0 = hooks.get_cpu_time_ms() + + try: + for iteration in range(total_iterations): + should_timeout = iteration % 5 == 0 + value = (iteration % 37) + 1 + + async def ephemeral_handler(event: WCParent) -> None: + nonlocal parent_handled_a, checksum + parent_handled_a += 1 + checksum += event.value + 11 + child = event.event_bus.emit( + WCChild( + iteration=event.iteration, + value=event.value, + # event_timeout is in seconds; mirror TS near-zero timeout value. + event_timeout=WORST_CASE_IMMEDIATE_TIMEOUT_SECONDS if should_timeout else None, + ) + ) + bus_c.emit(child) + try: + await child + except Exception: + pass + + ephemeral_entry = bus_a.on(WCParent, ephemeral_handler) + parent = WCParent(iteration=iteration, value=value) + ev_a = bus_a.emit(parent) + bus_b.emit(parent) + await ev_a + bus_a.off(WCParent, ephemeral_entry.id) + + if iteration % 10 == 0: + await bus_a.find(WCParent, future=0.001) + if iteration % 5 == 0: + memory.sample() + + await bus_a.wait_until_idle() + await bus_b.wait_until_idle() + await bus_c.wait_until_idle() + except Exception as exc: + error = f'{type(exc).__name__}: {exc}' + + for event in bus_c.event_history.values(): + for event_result in event.event_results.values(): + if isinstance(event_result.error, TimeoutError): + timeout_count += 1 + if isinstance(event_result.error, asyncio.CancelledError): + cancel_count += 1 + + total_ms = hooks.now() - t0 + cpu_t1 = hooks.get_cpu_time_ms() + estimated_events = total_iterations * 3 + ms_per_event = total_ms / float(max(estimated_events, 1)) + peak_rss_kb_per_event = memory.peak_rss_kb_per_event(max(estimated_events, 1)) + cpu_ms = max(0.0, cpu_t1 - cpu_t0) + cpu_ms_per_event = cpu_ms / float(max(estimated_events, 1)) + + ok = ( + error is None + and parent_handled_a == total_iterations + and parent_handled_b == total_iterations + and len(bus_a.handlers_by_key.get(WCParent.__name__, [])) == 0 + ) + + result = _scenario_result( + scenario=scenario, + total_events=estimated_events, + total_ms=total_ms, + ms_per_event=ms_per_event, + ms_per_event_unit='event', + peak_rss_kb_per_event=peak_rss_kb_per_event, + throughput=int(round(estimated_events / max(total_ms / 1000.0, 1e-9))), + ok=ok, + error=error, + extra={ + 'parent_handled_a': parent_handled_a, + 'parent_handled_b': parent_handled_b, + 'child_handled': child_handled, + 'grandchild_handled': grandchild_handled, + 'timeout_count': timeout_count, + 'cancel_count': cancel_count, + 'checksum': checksum, + 'cpu_ms': cpu_ms, + 'cpu_ms_per_event': cpu_ms_per_event, + }, + ) + + await _trim_bus_history_to_one_event(bus_a, WCTrimEvent) + await _trim_bus_history_to_one_event(bus_b, WCTrimEvent) + await _trim_bus_history_to_one_event(bus_c, WCTrimEvent) + await _wait_for_runtime_settle(hooks) + + await bus_a.stop(timeout=0, clear=True) + await bus_b.stop(timeout=0, clear=True) + await bus_c.stop(timeout=0, clear=True) + + _record(hooks, result) + return result + + +PERF_SCENARIO_RUNNERS: dict[str, Callable[[PerfInput], Any]] = { + '50k-events': run_perf_50k_events, + '500-buses-x-100-events': run_perf_ephemeral_buses, + '1-event-x-50k-parallel-handlers': run_perf_single_event_many_fixed_handlers, + '50k-one-off-handlers': run_perf_on_off_churn, + 'worst-case-forwarding-timeouts': run_perf_worst_case, +} + +PERF_SCENARIO_IDS = tuple(PERF_SCENARIO_RUNNERS.keys()) + + +async def run_perf_scenario_by_id(input: PerfInput, scenario_id: str) -> dict[str, Any]: + scenario = PERF_SCENARIO_RUNNERS.get(scenario_id) + if scenario is None: + raise ValueError(f'unknown perf scenario {scenario_id!r}, expected one of: {", ".join(PERF_SCENARIO_IDS)}') + + try: + result = await scenario(input) + except Exception as exc: + result = { + 'scenario': scenario_id, + 'ok': False, + 'error': f'{type(exc).__name__}: {exc}', + 'total_events': 0, + 'total_ms': 0.0, + 'ms_per_event': 0.0, + 'ms_per_event_unit': 'event', + 'throughput': 0, + 'peak_rss_kb_per_event': None, + 'peak_rss_kb_per_event_label': None, + } + + heap_delta_after_gc_mb = await _measure_heap_delta_after_gc(input) + if heap_delta_after_gc_mb is not None: + result['heap_delta_after_gc_mb'] = round(heap_delta_after_gc_mb, 3) + input.log(f'[{input.runtime_name}] {result["scenario"]}: heap_delta_after_gc={result["heap_delta_after_gc_mb"]:.3f}mb') + + return result + + +async def run_all_perf_scenarios(input: PerfInput) -> list[dict[str, Any]]: + results: list[dict[str, Any]] = [] + for scenario_id in PERF_SCENARIO_IDS: + results.append(await run_perf_scenario_by_id(input, scenario_id)) + return results + + +async def _measure_heap_delta_after_gc(input: PerfInput) -> float | None: + process = psutil.Process(os.getpid()) + before = float(process.memory_info().rss) + + for _ in range(4): + input.force_gc() + await input.sleep(15) + + after = float(process.memory_info().rss) + delta_mb = max(0.0, (after - before) / (1024.0 * 1024.0)) + if math.isnan(delta_mb): + return None + return delta_mb diff --git a/tests/subtests/test_eventbus_cross_runtime_features.py b/tests/subtests/test_eventbus_cross_runtime_features.py new file mode 100644 index 0000000..b68c5ce --- /dev/null +++ b/tests/subtests/test_eventbus_cross_runtime_features.py @@ -0,0 +1,378 @@ +# pyright: basic +"""Cross-feature parity guarantees shared with TypeScript runtime tests.""" + +from __future__ import annotations + +import asyncio +from contextvars import ContextVar + +import pytest + +from bubus import BaseEvent, EventBus +from bubus.event_handler import EventHandlerAbortedError, EventHandlerCancelledError, EventHandlerTimeoutError + +request_id_var: ContextVar[str] = ContextVar('request_id', default='') + + +class QueueJumpRootEvent(BaseEvent[str]): + pass + + +class QueueJumpChildEvent(BaseEvent[str]): + pass + + +class QueueJumpSiblingEvent(BaseEvent[str]): + pass + + +class ConcurrencyIntersectionEvent(BaseEvent[str]): + token: int + + +class TimeoutEnforcementEvent(BaseEvent[str]): + event_timeout: float | None = 0.02 + + +class TimeoutFollowupEvent(BaseEvent[str]): + pass + + +class ZeroHistoryEvent(BaseEvent[str]): + value: str + + +class ContextParentEvent(BaseEvent[str]): + pass + + +class ContextChildEvent(BaseEvent[str]): + pass + + +class PendingVisibilityEvent(BaseEvent[str]): + tag: str + + +class BackpressureEvent(BaseEvent[str]): + value: str + + +async def test_queue_jump_preserves_parent_child_lineage_and_find_visibility() -> None: + bus = EventBus( + name='ParityQueueJumpBus', + event_concurrency='bus-serial', + event_handler_concurrency='serial', + ) + execution_order: list[str] = [] + child_event_id: str | None = None + + async def on_root(event: QueueJumpRootEvent) -> str: + execution_order.append('root:start') + child = event.event_bus.emit(QueueJumpChildEvent()) + await child + execution_order.append('root:end') + return 'root-ok' + + async def on_child(event: QueueJumpChildEvent) -> str: + nonlocal child_event_id + child_event_id = event.event_id + execution_order.append('child') + await asyncio.sleep(0.005) + return 'child-ok' + + async def on_sibling(_event: QueueJumpSiblingEvent) -> str: + execution_order.append('sibling') + return 'sibling-ok' + + bus.on(QueueJumpRootEvent, on_root) + bus.on(QueueJumpChildEvent, on_child) + bus.on(QueueJumpSiblingEvent, on_sibling) + + try: + root = bus.emit(QueueJumpRootEvent()) + sibling = bus.emit(QueueJumpSiblingEvent()) + await root + await sibling + await bus.wait_until_idle() + + assert execution_order == ['root:start', 'child', 'root:end', 'sibling'] + + found_child = await bus.find(QueueJumpChildEvent, child_of=root, past=True, future=False) + assert found_child is not None + assert child_event_id is not None + assert found_child.event_id == child_event_id + assert found_child.event_parent_id == root.event_id + root_result = next(result for result in root.event_results.values() if result.handler_name.endswith('on_root')) + assert any(child.event_id == found_child.event_id for child in root_result.event_children) + finally: + await bus.stop() + + +async def test_concurrency_intersection_parallel_events_with_serial_handlers() -> None: + bus = EventBus( + name='ParityConcurrencyIntersectionBus', + event_concurrency='parallel', + event_handler_concurrency='serial', + max_history_size=None, + ) + + current_by_event: dict[str, int] = {} + max_by_event: dict[str, int] = {} + global_current = 0 + global_max = 0 + counter_lock = asyncio.Lock() + + async def tracked_handler(event: ConcurrencyIntersectionEvent) -> str: + nonlocal global_current, global_max + async with counter_lock: + current = current_by_event.get(event.event_id, 0) + 1 + current_by_event[event.event_id] = current + max_by_event[event.event_id] = max(max_by_event.get(event.event_id, 0), current) + + global_current += 1 + global_max = max(global_max, global_current) + + await asyncio.sleep(0.01) + + async with counter_lock: + current_by_event[event.event_id] = max(0, current_by_event.get(event.event_id, 1) - 1) + global_current -= 1 + + return f'ok-{event.token}' + + async def tracked_handler_a(event: ConcurrencyIntersectionEvent) -> str: + return await tracked_handler(event) + + async def tracked_handler_b(event: ConcurrencyIntersectionEvent) -> str: + return await tracked_handler(event) + + bus.on(ConcurrencyIntersectionEvent, tracked_handler_a) + bus.on(ConcurrencyIntersectionEvent, tracked_handler_b) + + try: + events = [bus.emit(ConcurrencyIntersectionEvent(token=index)) for index in range(8)] + await asyncio.gather(*events) + await bus.wait_until_idle() + + for event in events: + assert max_by_event.get(event.event_id) == 1 + assert all(result.status == 'completed' for result in event.event_results.values()) + + assert global_max >= 2 + finally: + await bus.stop() + + +async def test_timeout_enforcement_does_not_break_followup_processing_or_queue_state() -> None: + bus = EventBus(name='ParityTimeoutEnforcementBus', event_handler_concurrency='parallel') + + async def slow_handler_a(_event: TimeoutEnforcementEvent) -> str: + await asyncio.sleep(0.2) + return 'slow-a' + + async def slow_handler_b(_event: TimeoutEnforcementEvent) -> str: + await asyncio.sleep(0.2) + return 'slow-b' + + async def followup_handler(_event: TimeoutFollowupEvent) -> str: + return 'followup-ok' + + bus.on(TimeoutEnforcementEvent, slow_handler_a) + bus.on(TimeoutEnforcementEvent, slow_handler_b) + bus.on(TimeoutFollowupEvent, followup_handler) + + try: + timed_out = await bus.emit(TimeoutEnforcementEvent()) + assert timed_out.event_status == 'completed' + assert timed_out.event_results + assert all(result.status == 'error' for result in timed_out.event_results.values()) + assert all( + isinstance(result.error, (EventHandlerAbortedError, EventHandlerTimeoutError, EventHandlerCancelledError)) + for result in timed_out.event_results.values() + ) + + followup = await bus.emit(TimeoutFollowupEvent()) + followup_result = await followup.event_result(raise_if_any=False, raise_if_none=False) + assert followup_result == 'followup-ok' + assert all(result.status == 'completed' for result in followup.event_results.values()) + + await bus.wait_until_idle() + assert bus.pending_event_queue is not None + assert bus.pending_event_queue.qsize() == 0 + assert not bus.in_flight_event_ids + finally: + await bus.stop() + + +async def test_zero_history_backpressure_with_find_future_still_resolves_new_events() -> None: + bus = EventBus(name='ParityZeroHistoryBus', max_history_size=0, max_history_drop=False) + + async def handler(event: ZeroHistoryEvent) -> str: + return f'ok:{event.value}' + + bus.on(ZeroHistoryEvent, handler) + + try: + first = await bus.emit(ZeroHistoryEvent(value='first')) + assert first.event_id not in bus.event_history + + past = await bus.find(ZeroHistoryEvent, past=True, future=False) + assert past is None + + captured_future_id: str | None = None + + async def dispatch_later() -> None: + nonlocal captured_future_id + await asyncio.sleep(0.02) + future_event = bus.emit(ZeroHistoryEvent(value='future')) + captured_future_id = future_event.event_id + + future_task = asyncio.create_task(dispatch_later()) + future_match = await bus.find(ZeroHistoryEvent, where=lambda event: event.value == 'future', past=False, future=1.0) + await future_task + + assert future_match is not None + assert future_match.value == 'future' + assert captured_future_id is not None + assert future_match.event_id == captured_future_id + + await bus.wait_until_idle() + assert len(bus.event_history) == 0 + finally: + await bus.stop() + + +async def test_context_propagates_through_forwarding_and_child_dispatch_with_lineage_intact() -> None: + bus_a = EventBus(name='ParityContextForwardA') + bus_b = EventBus(name='ParityContextForwardB') + + captured_parent_request_id: str | None = None + captured_child_request_id: str | None = None + parent_event_id: str | None = None + child_parent_id: str | None = None + + async def on_parent(event: ContextParentEvent) -> str: + nonlocal captured_parent_request_id, parent_event_id + captured_parent_request_id = request_id_var.get() + parent_event_id = event.event_id + + child = event.event_bus.emit(ContextChildEvent()) + await child + return 'parent-ok' + + async def on_child(event: ContextChildEvent) -> str: + nonlocal captured_child_request_id, child_parent_id + captured_child_request_id = request_id_var.get() + child_parent_id = event.event_parent_id + return 'child-ok' + + bus_a.on('*', bus_b.emit) + bus_b.on(ContextParentEvent, on_parent) + bus_b.on(ContextChildEvent, on_child) + + request_id = 'fc81f432-98cd-7a06-824c-dafed74761bb' + token = request_id_var.set(request_id) + try: + parent = await bus_a.emit(ContextParentEvent()) + await bus_b.wait_until_idle() + + assert captured_parent_request_id == request_id + assert captured_child_request_id == request_id + assert parent_event_id is not None + assert child_parent_id == parent_event_id + assert parent.event_path[0].startswith('ParityContextForwardA#') + assert any(path.startswith('ParityContextForwardB#') for path in parent.event_path) + + found_child = await bus_b.find(ContextChildEvent, child_of=parent, past=True, future=False) + assert found_child is not None + assert found_child.event_parent_id == parent.event_id + finally: + request_id_var.reset(token) + await bus_a.stop() + await bus_b.stop() + + +async def test_pending_queue_find_visibility_transitions_to_completed_after_release() -> None: + bus = EventBus( + name='ParityPendingFindBus', + event_concurrency='bus-serial', + event_handler_concurrency='serial', + max_history_size=None, + ) + started = asyncio.Event() + release = asyncio.Event() + + async def handler(event: PendingVisibilityEvent) -> str: + if event.tag == 'blocking': + started.set() + await release.wait() + return f'ok:{event.tag}' + + bus.on(PendingVisibilityEvent, handler) + + try: + blocking = bus.emit(PendingVisibilityEvent(tag='blocking')) + await started.wait() + + queued = bus.emit(PendingVisibilityEvent(tag='queued')) + await asyncio.sleep(0.01) + + pending = await bus.find( + PendingVisibilityEvent, + where=lambda event: event.tag == 'queued', + past=True, + future=False, + event_status='pending', + ) + assert pending is not None + assert pending.event_id == queued.event_id + + release.set() + await blocking + await queued + await bus.wait_until_idle() + + completed = await bus.find( + PendingVisibilityEvent, + where=lambda event: event.tag == 'queued', + past=True, + future=False, + event_status='completed', + ) + assert completed is not None + assert completed.event_id == queued.event_id + assert bus.pending_event_queue is not None + assert bus.pending_event_queue.qsize() == 0 + assert not bus.in_flight_event_ids + finally: + await bus.stop() + + +async def test_history_backpressure_rejects_overflow_and_preserves_findable_history() -> None: + bus = EventBus(name='ParityBackpressureBus', max_history_size=1, max_history_drop=False) + + async def handler(event: BackpressureEvent) -> str: + return f'ok:{event.value}' + + bus.on(BackpressureEvent, handler) + + try: + first = await bus.emit(BackpressureEvent(value='first')) + assert len(bus.event_history) == 1 + assert first.event_id in bus.event_history + + found_first = await bus.find(BackpressureEvent, where=lambda event: event.value == 'first', past=True, future=False) + assert found_first is not None + assert found_first.event_id == first.event_id + + with pytest.raises(RuntimeError): + _ = bus.emit(BackpressureEvent(value='second')) + + assert len(bus.event_history) == 1 + assert first.event_id in bus.event_history + assert bus.pending_event_queue is not None + assert bus.pending_event_queue.qsize() == 0 + assert not bus.in_flight_event_ids + finally: + await bus.stop() diff --git a/tests/test_attribute_error_fix.py b/tests/test_attribute_error_fix.py deleted file mode 100644 index b74c237..0000000 --- a/tests/test_attribute_error_fix.py +++ /dev/null @@ -1,126 +0,0 @@ -"""Test that the AttributeError bug related to 'event_processed_at' is fixed""" - -import asyncio -from datetime import UTC, datetime -from typing import Any - -from bubus import BaseEvent, EventBus - - -class SampleEvent(BaseEvent[str]): - event_result_type: Any = str - - data: str = 'test' - - -def test_event_started_at_with_deserialized_event(): - """Test that event_started_at works even with events created through deserialization""" - # Create an event and convert to dict (simulating serialization) - event = SampleEvent(data='original') - event_dict = event.model_dump() - - # Create a new event from the dict (simulating deserialization) - deserialized_event = SampleEvent.model_validate(event_dict) - - # This should not raise AttributeError - assert deserialized_event.event_started_at is None - assert deserialized_event.event_completed_at is None - - -def test_event_started_at_with_json_deserialization(): - """Test that event_started_at works with JSON deserialization""" - # Create an event and convert to JSON - event = SampleEvent(data='json_test') - json_str = event.model_dump_json() - - # Create a new event from JSON - deserialized_event = SampleEvent.model_validate_json(json_str) - - # This should not raise AttributeError - assert deserialized_event.event_started_at is None - assert deserialized_event.event_completed_at is None - - -async def test_event_started_at_after_processing(): - """Test that event_started_at works correctly after event processing""" - bus = EventBus(name='TestBus') - - # Handler that does nothing - async def test_handler(event: SampleEvent) -> str: - await asyncio.sleep(0.01) - return 'done' - - bus.on('SampleEvent', test_handler) - - # Dispatch event - event = await bus.dispatch(SampleEvent(data='processing_test')) - - # Check timestamps - should not raise AttributeError - assert event.event_started_at is not None - assert event.event_completed_at is not None - assert isinstance(event.event_started_at, datetime) - assert isinstance(event.event_completed_at, datetime) - - await bus.stop() - - -async def test_event_without_handlers(): - """Test that events without handlers still work with timestamp properties""" - event = SampleEvent(data='no_handlers') - - # Should not raise AttributeError when accessing these properties - assert event.event_started_at is None # No handlers started - assert event.event_completed_at is None # Not complete yet - - # Initialize the completion signal (normally done when dispatched) - _ = event.event_completed_signal - - # Mark as processed manually (simulating what happens in event_mark_complete_if_all_handlers_completed) - event.event_mark_complete_if_all_handlers_completed() - - # After marking complete, it should be set - # When no handlers but event is processed, event_started_at returns event_processed_at - assert event.event_started_at is not None # Uses event_processed_at - assert event.event_completed_at is not None # Now it's complete - - -def test_event_with_manually_set_processed_at(): - """Test events where event_processed_at is manually set (like in test_log_history_tree.py)""" - event = SampleEvent(data='manual') - - # Initialize the completion signal - _ = event.event_completed_signal - - # Manually set the processed timestamp (as done in tests) - if hasattr(event, 'event_processed_at'): - event.event_processed_at = datetime.now(UTC) - - # Should not raise AttributeError - assert event.event_started_at is not None # Should use event_processed_at - # Note: Since we set event_processed_at and there are no handlers, event_completed_at will also return event_processed_at - assert event.event_completed_at is not None - - # Add a handler result to make it incomplete - event.event_result_update(handler=lambda e: None, status='started') - assert event.event_completed_at is None # Now it's not complete - - # Complete the handler - list(event.event_results.values())[0].update(status='completed', result='done') - event.event_mark_complete_if_all_handlers_completed() - assert event.event_completed_at is not None - - -def test_event_copy_preserves_private_attrs(): - """Test that copying events preserves private attributes""" - event = SampleEvent(data='copy_test') - - # Access properties to ensure private attrs are initialized - _ = event.event_started_at - _ = event.event_completed_at - - # Create a copy using model_copy - copied_event = event.model_copy() - - # Should not raise AttributeError - assert copied_event.event_started_at is None - assert copied_event.event_completed_at is None diff --git a/tests/test_auto_event_result_type.py b/tests/test_auto_event_result_type.py deleted file mode 100644 index 2fed419..0000000 --- a/tests/test_auto_event_result_type.py +++ /dev/null @@ -1,345 +0,0 @@ -"""Test automatic event_result_type extraction from Generic type parameters.""" - -from typing import Any - -import pytest -from pydantic import BaseModel, TypeAdapter, ValidationError - -from bubus.models import BaseEvent, _extract_basemodel_generic_arg # type: ignore - - -class UserData(BaseModel): - name: str - age: int - - -class TaskResult(BaseModel): - task_id: str - status: str - - -class ModuleLevelResult(BaseModel): - """Module-level result type for testing auto-detection.""" - - result_id: str - data: dict[str, Any] - success: bool - - -class NestedModuleResult(BaseModel): - """Another module-level type for testing complex generics.""" - - items: list[str] - metadata: dict[str, int] - - -class EmailMessage(BaseModel): - """Module-level type for testing _extract_basemodel_generic_arg.""" - - subject: str - body: str - recipients: list[str] - - -def test_builtin_types_auto_extraction(): - """Test that built-in types are automatically extracted from Generic parameters.""" - - class StringEvent(BaseEvent[str]): - message: str = 'Hello' - - class IntEvent(BaseEvent[int]): - number: int = 42 - - class FloatEvent(BaseEvent[float]): - value: float = 3.14 - - string_event = StringEvent() - int_event = IntEvent() - float_event = FloatEvent() - - assert string_event.event_result_type is str - assert int_event.event_result_type is int - assert float_event.event_result_type is float - - -def test_custom_pydantic_models_auto_extraction(): - """Test that custom Pydantic models are automatically extracted.""" - - class UserEvent(BaseEvent[UserData]): - user_id: str = 'user123' - event_result_type: Any = UserData # Set manually for local test scope - - class TaskEvent(BaseEvent[TaskResult]): - batch_id: str = 'batch456' - event_result_type: Any = TaskResult # Set manually for local test scope - - user_event = UserEvent() - task_event = TaskEvent() - - assert user_event.event_result_type is UserData - assert task_event.event_result_type is TaskResult - - -def test_complex_generic_types_auto_extraction(): - """Test that complex generic types are automatically extracted.""" - - class ListEvent(BaseEvent[list[str]]): - pass - - class DictEvent(BaseEvent[dict[str, int]]): - pass - - class SetEvent(BaseEvent[set[int]]): - pass - - list_event = ListEvent() - dict_event = DictEvent() - set_event = SetEvent() - - assert list_event.event_result_type == list[str] - assert dict_event.event_result_type == dict[str, int] - assert set_event.event_result_type == set[int] - - -def test_complex_generic_with_custom_types(): - """Test complex generics containing custom types.""" - - class TaskListEvent(BaseEvent[list[TaskResult]]): - batch_id: str = 'batch456' - event_result_type: Any = list[TaskResult] # Set manually for local test scope - - task_list_event = TaskListEvent() - - assert task_list_event.event_result_type == list[TaskResult] - - -def test_explicit_override_still_works(): - """Test that explicit event_result_type overrides still work (backwards compatibility).""" - - class OverrideEvent(BaseEvent[str]): - event_result_type: Any = int # Override to int instead of str - - override_event = OverrideEvent() - - # Should use the explicit override, not the auto-extracted str - assert override_event.event_result_type is int - - -def test_no_generic_parameter(): - """Test that events without generic parameters don't get auto-set types.""" - - class PlainEvent(BaseEvent): - message: str = 'plain' - - plain_event = PlainEvent() - - # Should remain None since no generic parameter was provided - assert plain_event.event_result_type is None - - -def test_none_generic_parameter(): - """Test that BaseEvent[None] results in None type.""" - - class NoneEvent(BaseEvent[None]): - message: str = 'none' - - none_event = NoneEvent() - - # Should be set to None - assert none_event.event_result_type is None - - -def test_nested_inheritance(): - """Test that generic type extraction works with nested inheritance.""" - - class BaseUserEvent(BaseEvent[UserData]): - event_result_type: Any = UserData # Set manually for local test scope - - class SpecificUserEvent(BaseUserEvent): - specific_field: str = 'specific' - - specific_event = SpecificUserEvent() - - # Should inherit the generic type from parent - assert specific_event.event_result_type is UserData - - -def test_module_level_types_auto_extraction(): - """Test that module-level types are automatically detected without manual override.""" - - class ModuleEvent(BaseEvent[ModuleLevelResult]): - operation: str = 'test_op' - # No manual event_result_type needed - should be auto-detected - - class NestedModuleEvent(BaseEvent[NestedModuleResult]): - batch_id: str = 'batch123' - # No manual event_result_type needed - should be auto-detected - - module_event = ModuleEvent() - nested_event = NestedModuleEvent() - - # Should auto-detect the module-level types - assert module_event.event_result_type is ModuleLevelResult - assert nested_event.event_result_type is NestedModuleResult - - -def test_complex_module_level_generics(): - """Test complex generics with module-level types are auto-detected.""" - - class ListModuleEvent(BaseEvent[list[ModuleLevelResult]]): - batch_size: int = 10 - # No manual override - should auto-detect list[ModuleLevelResult] - - class DictModuleEvent(BaseEvent[dict[str, NestedModuleResult]]): - mapping_type: str = 'result_map' - # No manual override - should auto-detect dict[str, NestedModuleResult] - - list_event = ListModuleEvent() - dict_event = DictModuleEvent() - - # Should auto-detect complex generics with module-level types - assert list_event.event_result_type == list[ModuleLevelResult] - assert dict_event.event_result_type == dict[str, NestedModuleResult] - - -async def test_module_level_runtime_enforcement(): - """Test that module-level auto-detected types are enforced at runtime.""" - from bubus import EventBus - - class RuntimeEvent(BaseEvent[ModuleLevelResult]): - operation: str = 'runtime_test' - # Auto-detected type should be enforced - - # Verify auto-detection worked - test_event = RuntimeEvent() - assert test_event.event_result_type is ModuleLevelResult, f'Auto-detection failed: got {test_event.event_result_type}' - - bus = EventBus(name='runtime_test_bus') - - def correct_handler(event: RuntimeEvent): - # Return dict that matches ModuleLevelResult schema - return {'result_id': 'test123', 'data': {'key': 'value'}, 'success': True} - - def incorrect_handler(event: RuntimeEvent): - # Return something that doesn't match ModuleLevelResult - return {'wrong': 'format'} - - # Test correct handler - bus.on('RuntimeEvent', correct_handler) - - event1 = RuntimeEvent() - await bus.dispatch(event1) - result1 = await event1.event_result() - - # Should be cast to ModuleLevelResult - assert isinstance(result1, ModuleLevelResult) - assert result1.result_id == 'test123' - assert result1.data == {'key': 'value'} - assert result1.success is True - - # Test incorrect handler - bus.handlers.clear() # Clear previous handler - bus.on('RuntimeEvent', incorrect_handler) - - event2 = RuntimeEvent() - await bus.dispatch(event2) - - # Should get an error due to validation failure - handler_id = list(event2.event_results.keys())[0] - event_result = event2.event_results[handler_id] - - assert event_result.status == 'error' - assert isinstance(event_result.error, Exception) - - await bus.stop(clear=True) - - -def test_extract_basemodel_generic_arg_basic(): - """Test _extract_basemodel_generic_arg with basic types.""" - - # Test BaseEvent[int] - class IntResultEvent(BaseEvent[int]): - pass - - result = _extract_basemodel_generic_arg(IntResultEvent) - assert result is int - - -def test_extract_basemodel_generic_arg_dict(): - """Test _extract_basemodel_generic_arg with dict types.""" - - # Test BaseEvent[dict[str, int]] - class DictIntEvent(BaseEvent[dict[str, int]]): - pass - - result = _extract_basemodel_generic_arg(DictIntEvent) - assert result == dict[str, int] - - -def test_extract_basemodel_generic_arg_dict_with_module_type(): - """Test _extract_basemodel_generic_arg with dict containing module-level type.""" - - # Test BaseEvent[dict[str, EmailMessage]] - class DictEmailEvent(BaseEvent[dict[str, EmailMessage]]): - pass - - result = _extract_basemodel_generic_arg(DictEmailEvent) - assert result == dict[str, EmailMessage] - - -def test_extract_basemodel_generic_arg_dict_with_local_type(): - """Test _extract_basemodel_generic_arg with dict containing locally defined type.""" - - # Define local type - class EmailAttachment(BaseModel): - filename: str - content: bytes - mime_type: str - - # Test BaseEvent[dict[str, EmailAttachment]] - class DictAttachmentEvent(BaseEvent[dict[str, EmailAttachment]]): - pass - - result = _extract_basemodel_generic_arg(DictAttachmentEvent) - assert result == dict[str, EmailAttachment] - - -def test_extract_basemodel_generic_arg_no_generic(): - """Test _extract_basemodel_generic_arg with BaseEvent (no generic parameter).""" - - # Test BaseEvent without generic parameter - class PlainEvent(BaseEvent): - pass - - result = _extract_basemodel_generic_arg(PlainEvent) - assert result is None - - -def test_type_adapter_validation(): - """Test that TypeAdapter can validate extracted types properly.""" - - # Test dict[str, int] validation - class DictIntEvent(BaseEvent[dict[str, int]]): - pass - - extracted_type = _extract_basemodel_generic_arg(DictIntEvent) - adapter = TypeAdapter(extracted_type) - - # Valid data should work - valid_data = {'abc': 123, 'def': 456} - result = adapter.validate_python(valid_data) - assert result == valid_data - - # Invalid data should raise ValidationError - invalid_data = {'abc': 'badvalue'} - with pytest.raises(ValidationError) as exc_info: - adapter.validate_python(invalid_data) - - # Check that the error is about the wrong type - errors = exc_info.value.errors() - assert len(errors) > 0 - assert any('int' in str(error) for error in errors) - - -if __name__ == '__main__': - pytest.main([__file__, '-v', '-s']) diff --git a/tests/test_base_event.py b/tests/test_base_event.py new file mode 100644 index 0000000..bc051bb --- /dev/null +++ b/tests/test_base_event.py @@ -0,0 +1,552 @@ +import asyncio +import gc + +import pytest +from pydantic import ValidationError + +from bubus import BaseEvent, EventBus + + +@pytest.fixture(autouse=True) +async def cleanup_eventbus_instances(): + """Ensure EventBus instances are cleaned up between tests""" + yield + # Force garbage collection to clean up any lingering EventBus instances + gc.collect() + # Give event loops time to clean up + await asyncio.sleep(0.01) + + +class MainEvent(BaseEvent[None]): + message: str = 'test' + + +class ChildEvent(BaseEvent[None]): + data: str = 'child' + + +class GrandchildEvent(BaseEvent[None]): + info: str = 'grandchild' + + +async def test_event_bus_aliases_bus_property(): + bus = EventBus(name='AliasBus') + seen_bus = None + seen_event_bus = None + + async def handler(event: MainEvent): + nonlocal seen_bus, seen_event_bus + seen_bus = event.bus + seen_event_bus = event.event_bus + + bus.on(MainEvent, handler) + await bus.emit(MainEvent()) + assert seen_bus is bus + assert seen_event_bus is bus + assert seen_bus is seen_event_bus + await bus.stop() + + +async def test_await_event_queue_jumps_inside_handler(): + class ParentEvent(BaseEvent[None]): + pass + + class ChildEvent(BaseEvent[None]): + pass + + class SiblingEvent(BaseEvent[None]): + pass + + bus = EventBus( + name='QueueJumpAwaitEventBus', + event_concurrency='bus-serial', + event_handler_concurrency='serial', + ) + order: list[str] = [] + + async def on_parent(event: ParentEvent) -> None: + order.append('parent_start') + event.bus.emit(SiblingEvent()) + child = event.bus.emit(ChildEvent()) + await child + order.append('parent_end') + + async def on_child(_: ChildEvent) -> None: + order.append('child') + + async def on_sibling(_: SiblingEvent) -> None: + order.append('sibling') + + bus.on(ParentEvent, on_parent) + bus.on(ChildEvent, on_child) + bus.on(SiblingEvent, on_sibling) + + await bus.emit(ParentEvent()) + await bus.wait_until_idle() + assert order == ['parent_start', 'child', 'parent_end', 'sibling'] + await bus.stop() + + +async def test_event_completed_waits_in_queue_order_inside_handler(): + class ParentEvent(BaseEvent[None]): + pass + + class ChildEvent(BaseEvent[None]): + pass + + class SiblingEvent(BaseEvent[None]): + pass + + bus = EventBus( + name='QueueOrderEventCompletedBus', + event_concurrency='parallel', + event_handler_concurrency='parallel', + ) + order: list[str] = [] + + async def on_parent(event: ParentEvent) -> None: + order.append('parent_start') + event.bus.emit(SiblingEvent()) + child = event.bus.emit(ChildEvent()) + await child.event_completed() + order.append('parent_end') + + async def on_child(_: ChildEvent) -> None: + order.append('child_start') + await asyncio.sleep(0.001) + order.append('child_end') + + async def on_sibling(_: SiblingEvent) -> None: + order.append('sibling_start') + await asyncio.sleep(0.001) + order.append('sibling_end') + + bus.on(ParentEvent, on_parent) + bus.on(ChildEvent, on_child) + bus.on(SiblingEvent, on_sibling) + + await bus.emit(ParentEvent()) + await bus.wait_until_idle() + assert order.index('sibling_start') < order.index('child_start') + assert order.index('child_end') < order.index('parent_end') + await bus.stop() + + +async def test_reserved_runtime_fields_are_rejected(): + with pytest.raises(ValidationError, match='Field "bus" is reserved'): + MainEvent.model_validate({'bus': 'payload_bus_field'}) + with pytest.raises(ValidationError, match='Field "first" is reserved'): + MainEvent.model_validate({'first': 'payload_first_field'}) + with pytest.raises(ValidationError, match='Field "toString" is reserved'): + MainEvent.model_validate({'toString': 'payload_to_string_field'}) + with pytest.raises(ValidationError, match='Field "toJSON" is reserved'): + MainEvent.model_validate({'toJSON': 'payload_to_json_field'}) + with pytest.raises(ValidationError, match='Field "fromJSON" is reserved'): + MainEvent.model_validate({'fromJSON': 'payload_from_json_field'}) + + +async def test_unknown_event_prefixed_field_rejected_in_payload(): + with pytest.raises(ValidationError, match='starts with "event_" but is not a recognized BaseEvent field'): + MainEvent.model_validate({'event_some_field_we_dont_recognize': 123}) + + +async def test_model_prefixed_field_rejected_in_payload(): + with pytest.raises(ValidationError, match='starts with "model_" and is reserved for Pydantic model internals'): + MainEvent.model_validate({'model_something_random': 123}) + + +async def test_builtin_event_prefixed_override_is_allowed(): + class AllowedTimeoutOverrideEvent(BaseEvent[None]): + event_timeout: float | None = 234234 + event_slow_timeout: float | None = 12 + + event = AllowedTimeoutOverrideEvent() + assert event.event_timeout == 234234 + assert event.event_slow_timeout == 12 + + +async def test_event_at_fields_are_recognized(): + event = MainEvent.model_validate( + { + 'event_created_at': '2025-01-02T03:04:05.678901234Z', + 'event_started_at': '2025-01-02T03:04:06.100000000Z', + 'event_completed_at': '2025-01-02T03:04:07.200000000Z', + 'event_slow_timeout': 1.5, + 'event_emitted_by_handler_id': '018f8e40-1234-7000-8000-000000000301', + 'event_pending_bus_count': 2, + } + ) + assert event.event_created_at == '2025-01-02T03:04:05.678901234Z' + assert event.event_started_at == '2025-01-02T03:04:06.100000000Z' + assert event.event_completed_at == '2025-01-02T03:04:07.200000000Z' + assert event.event_slow_timeout == 1.5 + assert event.event_emitted_by_handler_id == '018f8e40-1234-7000-8000-000000000301' + assert event.event_pending_bus_count == 2 + + +async def test_python_serialized_at_fields_are_strings(): + bus = EventBus(name='TsIntBus') + + class TsIntEvent(BaseEvent[str]): + value: str = 'ok' + + bus.on(TsIntEvent, lambda _event: 'done') + event = await bus.emit(TsIntEvent(value='hello')) + + payload = event.model_dump(mode='json') + assert isinstance(payload['event_created_at'], str) + assert isinstance(payload['event_started_at'], str) + assert isinstance(payload['event_completed_at'], str) + assert payload['event_created_at'].endswith('Z') + assert payload['event_started_at'].endswith('Z') + assert payload['event_completed_at'].endswith('Z') + + first_result = next(iter(event.event_results.values())) + result_payload = first_result.model_dump(mode='json') + assert isinstance(result_payload['handler_registered_at'], str) + assert result_payload['handler_registered_at'].endswith('Z') + + await bus.stop() + + +async def test_builtin_model_prefixed_override_is_allowed(): + class AllowedModelConfigOverrideEvent(BaseEvent[None]): + model_config = BaseEvent.model_config | {'title': 'AllowedModelConfigOverrideEvent'} + + event = AllowedModelConfigOverrideEvent() + assert event.event_type == 'AllowedModelConfigOverrideEvent' + + +async def test_event_bus_property_single_bus(): + """Test bus property with a single EventBus instance""" + bus = EventBus(name='TestBus') + + # Track if handler was called + handler_called = False + dispatched_child = None + + async def handler(event: MainEvent): + nonlocal handler_called, dispatched_child + handler_called = True + + # Should be able to access event_bus inside handler + assert event.bus == bus + assert event.bus.name == 'TestBus' + + # Should be able to dispatch child events using the property + dispatched_child = await event.bus.emit(ChildEvent()) + + bus.on(MainEvent, handler) + + # Dispatch event and wait for completion + await bus.emit(MainEvent()) + + assert handler_called + assert dispatched_child is not None + assert isinstance(dispatched_child, ChildEvent) + + await bus.stop() + + +async def test_event_bus_property_multiple_buses(): + """Test bus property with multiple EventBus instances""" + bus1 = EventBus(name='Bus1') + bus2 = EventBus(name='Bus2') + + handler1_called = False + handler2_called = False + + async def handler1(event: MainEvent): + nonlocal handler1_called + handler1_called = True + # Inside bus1 handler, event_bus should return bus1 + assert event.bus == bus1 + assert event.bus.name == 'Bus1' + + async def handler2(event: MainEvent): + nonlocal handler2_called + handler2_called = True + # Inside bus2 handler, event_bus should return bus2 + assert event.bus == bus2 + assert event.bus.name == 'Bus2' + + bus1.on(MainEvent, handler1) + bus2.on(MainEvent, handler2) + + # Dispatch to bus1 + await bus1.emit(MainEvent(message='bus1')) + assert handler1_called + + # Dispatch to bus2 + await bus2.emit(MainEvent(message='bus2')) + assert handler2_called + + await bus1.stop() + await bus2.stop() + + +async def test_event_bus_property_with_forwarding(): + """Test bus property with event forwarding between buses""" + bus1 = EventBus(name='Bus1') + bus2 = EventBus(name='Bus2') + + # Forward all events from bus1 to bus2 + bus1.on('*', bus2.emit) + + handler_bus = None + handler_complete = asyncio.Event() + + async def handler(event: MainEvent): + nonlocal handler_bus + # When forwarded, the event_bus should be the bus currently processing + handler_bus = event.bus + handler_complete.set() + + bus2.on(MainEvent, handler) + + # Dispatch to bus1, which forwards to bus2 + event = bus1.emit(MainEvent()) + + # Wait for handler to complete + await handler_complete.wait() + + # The handler in bus2 should see bus2 as the event_bus + assert handler_bus is not None + assert handler_bus.name == 'Bus2' + # Verify it's the same bus instance (they should be the same object) + assert handler_bus is bus2 + + # Also wait for the event to fully complete + await event + + await bus1.stop() + await bus2.stop() + + +async def test_event_bus_property_outside_handler(): + """Test that bus property raises error when accessed outside handler""" + bus = EventBus(name='TestBus') + + event = MainEvent() + + # Should raise error when accessed outside handler context + with pytest.raises(AttributeError, match='bus property can only be accessed from within an event handler'): + _ = event.bus + + # Even after dispatching, accessing outside handler should fail + dispatched_event = await bus.emit(event) + + with pytest.raises(AttributeError, match='bus property can only be accessed from within an event handler'): + _ = dispatched_event.bus + + await bus.stop() + + +async def test_event_bus_property_nested_handlers(): + """Test bus property in nested handler scenarios""" + bus = EventBus(name='MainBus') + + inner_bus_name = None + + async def outer_handler(event: MainEvent): + # Dispatch a child event from within handler + child = ChildEvent() + + async def inner_handler(child_event: ChildEvent): + nonlocal inner_bus_name + # Both parent and child should see the same bus + assert child_event.bus == event.bus + inner_bus_name = child_event.bus.name + + bus.on(ChildEvent, inner_handler) + await event.bus.emit(child) + + bus.on(MainEvent, outer_handler) + + await bus.emit(MainEvent()) + + assert inner_bus_name == 'MainBus' + + await bus.stop() + + +async def test_event_bus_property_no_active_bus(): + """Test bus property when EventBus has been garbage collected""" + # This is a tricky edge case - create and destroy a bus + + event = None + + async def create_and_dispatch(): + nonlocal event + bus = EventBus(name='TempBus') + + async def handler(e: MainEvent): + # Save the event for later + nonlocal event + event = e + + bus.on(MainEvent, handler) + await bus.emit(MainEvent()) + await bus.stop() + # Bus goes out of scope here and may be garbage collected + + await create_and_dispatch() + + # Force garbage collection + import gc + + gc.collect() + + # Event exists but bus might be gone + assert event is not None + + # Create a new handler context to test + new_bus = EventBus(name='NewBus') + + error_raised = False + + async def new_handler(e: MainEvent): + nonlocal error_raised + assert event is not None + try: + # The old event doesn't belong to this bus + _ = event.bus + except RuntimeError: + error_raised = True + + new_bus.on(MainEvent, new_handler) + await new_bus.emit(MainEvent()) + + # Should have raised an error since the original bus is gone + assert error_raised + + await new_bus.stop() + + +async def test_event_bus_property_child_dispatch(): + """Test bus property when dispatching child events from handlers""" + bus = EventBus(name='MainBus') + + # Track execution order and bus references + execution_order: list[str] = [] + child_event_ref = None + grandchild_event_ref = None + + async def parent_handler(event: MainEvent): + execution_order.append('parent_start') + + # Verify we can access event_bus + assert event.bus == bus + assert event.bus.name == 'MainBus' + + # Dispatch a child event using event.bus + nonlocal child_event_ref + child_event_ref = event.bus.emit(ChildEvent(data='from_parent')) + + # The child event should start processing immediately within our handler + # (due to the deadlock prevention in BaseEvent.__await__) + await child_event_ref + + execution_order.append('parent_end') + + async def child_handler(event: ChildEvent): + execution_order.append('child_start') + + # Child should see the same bus + assert event.bus == bus + assert event.bus.name == 'MainBus' + assert event.data == 'from_parent' + + # Dispatch a grandchild event + nonlocal grandchild_event_ref + grandchild_event_ref = event.bus.emit(GrandchildEvent(info='from_child')) + + # Wait for grandchild to complete + await grandchild_event_ref + + execution_order.append('child_end') + + async def grandchild_handler(event: GrandchildEvent): + execution_order.append('grandchild_start') + + # Grandchild should also see the same bus + assert event.bus == bus + assert event.bus.name == 'MainBus' + assert event.info == 'from_child' + + execution_order.append('grandchild_end') + + # Register handlers + bus.on(MainEvent, parent_handler) + bus.on(ChildEvent, child_handler) + bus.on(GrandchildEvent, grandchild_handler) + + # Dispatch the parent event + parent_event = await bus.emit(MainEvent(message='start')) + + # Verify execution order - child events should complete before parent + assert execution_order == ['parent_start', 'child_start', 'grandchild_start', 'grandchild_end', 'child_end', 'parent_end'] + + # Verify all events completed + assert parent_event.event_status == 'completed' + assert child_event_ref is not None + assert child_event_ref.event_status == 'completed' + assert grandchild_event_ref is not None + assert grandchild_event_ref.event_status == 'completed' + + # Verify parent-child relationships + assert child_event_ref.event_parent_id == parent_event.event_id + assert grandchild_event_ref.event_parent_id == child_event_ref.event_id + + await bus.stop() + + +async def test_event_bus_property_multi_bus_child_dispatch(): + """Test bus property when child events are dispatched across multiple buses""" + bus1 = EventBus(name='Bus1') + bus2 = EventBus(name='Bus2') + + # Forward all events from bus1 to bus2 + bus1.on('*', bus2.emit) + + child_dispatch_bus = None + child_handler_bus = None + handlers_complete = asyncio.Event() + + async def parent_handler(event: MainEvent): + # This handler runs in bus2 (due to forwarding) + assert event.bus == bus2 + + # Dispatch child using event.bus (should dispatch to bus2) + nonlocal child_dispatch_bus + child_dispatch_bus = event.bus + await event.bus.emit(ChildEvent(data='from_bus2_handler')) + + async def child_handler(event: ChildEvent): + # Child handler should see bus2 as well + nonlocal child_handler_bus + child_handler_bus = event.bus + assert event.data == 'from_bus2_handler' + handlers_complete.set() + + # Only register handlers on bus2 + bus2.on(MainEvent, parent_handler) + bus2.on(ChildEvent, child_handler) + + # Dispatch to bus1, which forwards to bus2 + parent_event = bus1.emit(MainEvent(message='start')) + + # Wait for handlers to complete + await asyncio.wait_for(handlers_complete.wait(), timeout=5.0) + + # Also await the parent event + await parent_event + + # Verify child was dispatched to bus2 + assert child_dispatch_bus is not None + assert child_handler_bus is not None + assert id(child_dispatch_bus) == id(bus2) + assert id(child_handler_bus) == id(bus2) + + await bus1.stop() + await bus2.stop() diff --git a/tests/test_base_event_runtime_state.py b/tests/test_base_event_runtime_state.py new file mode 100644 index 0000000..cd36b33 --- /dev/null +++ b/tests/test_base_event_runtime_state.py @@ -0,0 +1,200 @@ +"""Test that the AttributeError bug related to 'event_completed_at' is fixed""" + +import asyncio +from contextlib import suppress + +from bubus import BaseEvent, EventBus +from bubus.helpers import monotonic_datetime + + +class SampleEvent(BaseEvent[str]): + data: str = 'test' + + +def _noop_handler(_event: SampleEvent) -> None: + return + + +def test_event_started_at_with_deserialized_event(): + """Test that event_started_at works even with events created through deserialization""" + # Create an event and convert to dict (simulating serialization) + event = SampleEvent(data='original') + event_dict = event.model_dump() + + # Create a new event from the dict (simulating deserialization) + deserialized_event = SampleEvent.model_validate(event_dict) + + # This should not raise AttributeError + assert deserialized_event.event_started_at is None + assert deserialized_event.event_completed_at is None + + +def test_event_started_at_with_json_deserialization(): + """Test that event_started_at works with JSON deserialization""" + # Create an event and convert to JSON + event = SampleEvent(data='json_test') + json_str = event.model_dump_json() + + # Create a new event from JSON + deserialized_event = SampleEvent.model_validate_json(json_str) + + # This should not raise AttributeError + assert deserialized_event.event_started_at is None + assert deserialized_event.event_completed_at is None + + +async def test_event_started_at_after_processing(): + """Test that event_started_at works correctly after event processing""" + bus = EventBus(name='TestBus') + + # Handler that does nothing + async def test_handler(event: SampleEvent) -> str: + await asyncio.sleep(0.01) + return 'done' + + bus.on('SampleEvent', test_handler) + + # Dispatch event + event = await bus.emit(SampleEvent(data='processing_test')) + + # Check timestamps - should not raise AttributeError + assert event.event_started_at is not None + assert event.event_completed_at is not None + assert isinstance(event.event_started_at, str) + assert isinstance(event.event_completed_at, str) + + await bus.stop() + + +async def test_event_without_handlers(): + """Test that events without handlers still work with timestamp properties""" + event = SampleEvent(data='no_handlers') + bus = EventBus(name='TestBusNoHandlers') + + # Should not raise AttributeError when accessing these properties + assert event.event_started_at is None # No handlers started + assert event.event_completed_at is None # Not complete yet + + processed_event = await bus.emit(event) + await bus.stop() + + # After marking complete, it should be set + # When no handlers but event is completed, event_started_at returns event_completed_at + assert processed_event.event_started_at is not None # Uses event_completed_at + assert processed_event.event_completed_at is not None # Now it's complete + assert processed_event.event_status == 'completed' + assert processed_event.event_started_at == processed_event.event_completed_at + + +async def test_event_with_manually_set_completed_at(): + """Test events where event_completed_at is manually set (like in test_eventbus_log_tree.py)""" + event = SampleEvent(data='manual') + bus = EventBus(name='TestBusManualCompletedAt') + + # Initialize the completion signal + _ = event.event_completed_signal + + # Manually set the completed timestamp (as done in tests) + if hasattr(event, 'event_completed_at'): + event.event_completed_at = monotonic_datetime() + + # Stateful runtime fields are no longer derived from event_results/event_completed_at on read. + # Manually assigning event_completed_at alone does not mutate status/started_at. + assert event.event_started_at is None + assert event.event_status == 'pending' + assert event.event_completed_at is not None + + # Reconcile state through public lifecycle processing. + processed_event = await bus.emit(event) + assert processed_event.event_status == 'completed' + assert processed_event.event_started_at is not None + assert processed_event.event_completed_at is not None + + # Also exercise the "existing completed handler results" completion path. + seeded_event = SampleEvent(data='manual-seeded-result') + seeded_result = seeded_event.event_result_update(handler=_noop_handler, status='started') + assert seeded_event.event_status == 'started' + assert seeded_event.event_completed_at is None + seeded_result.update(status='completed', result='done') + assert seeded_event.event_completed_at is None + + reconciled_seeded_event = await bus.emit(seeded_event) + await bus.stop() + assert reconciled_seeded_event.event_status == 'completed' + assert reconciled_seeded_event.event_started_at is not None + assert reconciled_seeded_event.event_completed_at is not None + + +def test_event_copy_preserves_private_attrs(): + """Test that copying events preserves private attributes""" + event = SampleEvent(data='copy_test') + + # Access properties to ensure private attrs are initialized + _ = event.event_started_at + _ = event.event_completed_at + + # Create a copy using model_copy + copied_event = event.model_copy() + + # Should not raise AttributeError + assert copied_event.event_started_at is None + assert copied_event.event_completed_at is None + + +def test_event_started_at_is_serialized_and_stateful(): + """event_started_at should be included in JSON dumps and remain stable once set.""" + event = SampleEvent(data='serialize-started-at') + + pending_payload = event.model_dump(mode='json') + assert 'event_started_at' in pending_payload + assert pending_payload['event_started_at'] is None + + event.event_result_update(handler=_noop_handler, status='started') + first_started_at = event.model_dump(mode='json')['event_started_at'] + assert isinstance(first_started_at, str) + + forced_started_at = '2020-01-01T00:00:00.000000000Z' + result = next(iter(event.event_results.values())) + result.started_at = forced_started_at + + second_started_at = event.model_dump(mode='json')['event_started_at'] + assert isinstance(second_started_at, str) + assert second_started_at == first_started_at + assert second_started_at != forced_started_at + + +async def test_event_status_is_serialized_and_stateful(): + """event_status should be included in JSON dumps and track lifecycle transitions via runtime updates.""" + event = SampleEvent(data='serialize-status') + + pending_payload = event.model_dump(mode='json') + assert pending_payload['event_status'] == 'pending' + + bus = EventBus(name='TestBusSerializeStatus') + handler_entered = asyncio.Event() + release_handler = asyncio.Event() + + async def slow_handler(_event: SampleEvent) -> str: + handler_entered.set() + await release_handler.wait() + return 'ok' + + bus.on('SampleEvent', slow_handler) + + processing_task = asyncio.create_task(bus.emit(event).event_completed()) + try: + await asyncio.wait_for(handler_entered.wait(), timeout=1.0) + started_payload = event.model_dump(mode='json') + assert started_payload['event_status'] == 'started' + + release_handler.set() + completed_event = await asyncio.wait_for(processing_task, timeout=1.0) + completed_payload = completed_event.model_dump(mode='json') + assert completed_payload['event_status'] == 'completed' + finally: + release_handler.set() + if not processing_task.done(): + processing_task.cancel() + with suppress(asyncio.CancelledError): + await processing_task + await bus.stop() diff --git a/tests/test_bridges.py b/tests/test_bridges.py new file mode 100644 index 0000000..d4313b4 --- /dev/null +++ b/tests/test_bridges.py @@ -0,0 +1,453 @@ +"""Process-isolated roundtrip tests for bridge transports.""" + +from __future__ import annotations + +import asyncio +import json +import socket +import sqlite3 +import subprocess +import sys +import tempfile +import time +from collections.abc import AsyncIterator +from contextlib import asynccontextmanager +from datetime import datetime +from pathlib import Path +from shutil import rmtree +from typing import Any + +import pytest +from uuid_extensions import uuid7str + +from bubus import BaseEvent, HTTPEventBridge, SocketEventBridge +from bubus.bridge_jsonl import JSONLEventBridge +from bubus.bridge_nats import NATSEventBridge +from bubus.bridge_postgres import PostgresEventBridge +from bubus.bridge_redis import RedisEventBridge +from bubus.bridge_sqlite import SQLiteEventBridge + + +class IPCPingEvent(BaseEvent): + label: str + + +_TEST_RUN_ID = f'{int(time.time() * 1000)}-{uuid7str()[-8:]}' + + +def _make_temp_dir(prefix: str) -> Path: + return Path(tempfile.mkdtemp(prefix=f'{prefix}-{_TEST_RUN_ID}-')) + + +def _free_tcp_port() -> int: + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: + sock.bind(('127.0.0.1', 0)) + return int(sock.getsockname()[1]) + + +def _canonical(payload: dict[str, Any]) -> dict[str, Any]: + normalized: dict[str, Any] = {} + for key, value in payload.items(): + if key.endswith('_at') and isinstance(value, str): + try: + normalized[key] = datetime.fromisoformat(value).timestamp() + continue + except ValueError: + pass + normalized[key] = value + return normalized + + +def _normalize_roundtrip_payload(payload: dict[str, Any]) -> dict[str, Any]: + normalized = _canonical(payload) + normalized.pop('event_id', None) + normalized.pop('event_path', None) + # Dispatch now materializes event_concurrency defaults on the receiving bus. + if normalized.get('event_concurrency') is None: + normalized['event_concurrency'] = 'bus-serial' + # Dispatch also materializes handler-level defaults on the receiving bus. + if normalized.get('event_handler_concurrency') is None: + normalized['event_handler_concurrency'] = 'serial' + if normalized.get('event_handler_completion') is None: + normalized['event_handler_completion'] = 'all' + # event_status/event_started_at are now serialized, but the receiving bus + # can advance them while handling the event. Normalize in-flight statuses. + if normalized.get('event_status') in ('pending', 'started'): + normalized['event_status'] = 'pending' + normalized['event_started_at'] = None + normalized['event_completed_at'] = None + return normalized + + +@asynccontextmanager +async def _running_process(command: list[str], *, cwd: Path | None = None) -> AsyncIterator[subprocess.Popen[str]]: + process = subprocess.Popen( + command, + cwd=str(cwd) if cwd else None, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + ) + try: + yield process + finally: + if process.poll() is None: + process.terminate() + try: + process.wait(timeout=5) + except subprocess.TimeoutExpired: + process.kill() + process.wait(timeout=5) + + +async def _wait_for_port(port: int, timeout: float = 30.0) -> None: + deadline = time.monotonic() + timeout + while time.monotonic() < deadline: + try: + _, writer = await asyncio.open_connection('127.0.0.1', port) + writer.close() + await writer.wait_closed() + return + except OSError: + await asyncio.sleep(0.05) + raise TimeoutError(f'port did not open in time: {port}') + + +async def _wait_for_path(path: Path, *, process: subprocess.Popen[str], timeout: float = 30.0) -> None: + deadline = time.monotonic() + timeout + while time.monotonic() < deadline: + if path.exists(): + return + if process.poll() is not None: + stdout, stderr = process.communicate() + raise AssertionError(f'worker exited early ({process.returncode})\nstdout:\n{stdout}\nstderr:\n{stderr}') + await asyncio.sleep(0.05) + raise TimeoutError(f'path did not appear in time: {path}') + + +def _make_sender_bridge(kind: str, config: dict[str, Any], *, low_latency: bool = False) -> Any: + if kind == 'http': + return HTTPEventBridge(send_to=str(config['endpoint'])) + if kind == 'socket': + return SocketEventBridge(path=str(config['path'])) + if kind == 'jsonl': + return JSONLEventBridge(str(config['path']), poll_interval=0.001 if low_latency else 0.05) + if kind == 'sqlite': + return SQLiteEventBridge( + str(config['path']), + str(config['table']), + poll_interval=0.001 if low_latency else 0.05, + ) + if kind == 'redis': + return RedisEventBridge(str(config['url'])) + if kind == 'nats': + return NATSEventBridge(str(config['server']), str(config['subject'])) + if kind == 'postgres': + return PostgresEventBridge(str(config['url'])) + raise ValueError(f'Unsupported bridge kind: {kind}') + + +def _make_listener_bridge(kind: str, config: dict[str, Any], *, low_latency: bool = False) -> Any: + if kind == 'http': + return HTTPEventBridge(listen_on=str(config['endpoint'])) + if kind == 'socket': + return SocketEventBridge(path=str(config['path'])) + if kind == 'jsonl': + return JSONLEventBridge(str(config['path']), poll_interval=0.001 if low_latency else 0.05) + if kind == 'sqlite': + return SQLiteEventBridge( + str(config['path']), + str(config['table']), + poll_interval=0.001 if low_latency else 0.05, + ) + if kind == 'redis': + return RedisEventBridge(str(config['url'])) + if kind == 'nats': + return NATSEventBridge(str(config['server']), str(config['subject'])) + if kind == 'postgres': + return PostgresEventBridge(str(config['url'])) + raise ValueError(f'Unsupported bridge kind: {kind}') + + +async def _measure_warm_latency_ms(kind: str, config: dict[str, Any]) -> float: + attempts = 3 + last_error: BaseException | None = None + + for _attempt in range(attempts): + sender = _make_sender_bridge(kind, config, low_latency=True) + receiver = _make_listener_bridge(kind, config, low_latency=True) + + run_suffix = uuid7str()[-8:] + warmup_prefix = f'warmup_{run_suffix}_' + measured_prefix = f'measured_{run_suffix}_' + warmup_count_target = 5 + measured_count_target = 1000 + + warmup_seen_count = 0 + measured_seen_count = 0 + warmup_seen = asyncio.Event() + measured_seen = asyncio.Event() + + async def _on_event(event: BaseEvent[Any]) -> None: + nonlocal warmup_seen_count, measured_seen_count + label = getattr(event, 'label', '') + if not isinstance(label, str): + return + if label.startswith(warmup_prefix): + warmup_seen_count += 1 + if warmup_seen_count >= warmup_count_target: + warmup_seen.set() + return + if label.startswith(measured_prefix): + measured_seen_count += 1 + if measured_seen_count >= measured_count_target: + measured_seen.set() + + try: + await sender.start() + await receiver.start() + receiver.on('IPCPingEvent', _on_event) + await asyncio.sleep(0.1) + + for index in range(warmup_count_target): + await sender.emit( + IPCPingEvent( + label=f'{warmup_prefix}{index}', + ) + ) + await asyncio.wait_for(warmup_seen.wait(), timeout=60.0) + + start_ns = time.perf_counter_ns() + for index in range(measured_count_target): + await sender.emit( + IPCPingEvent( + label=f'{measured_prefix}{index}', + ) + ) + await asyncio.wait_for(measured_seen.wait(), timeout=600.0) + elapsed_ms = (time.perf_counter_ns() - start_ns) / 1_000_000.0 + return elapsed_ms / measured_count_target + except TimeoutError as exc: + last_error = exc + finally: + await sender.close() + await receiver.close() + + await asyncio.sleep(0.2) + + raise RuntimeError(f'bridge latency measurement timed out after {attempts} attempts: {kind}') from last_error + + +async def _assert_roundtrip(kind: str, config: dict[str, Any]) -> None: + temp_path = _make_temp_dir(f'bubus-bridge-{kind}') + try: + worker_config_path = temp_path / 'worker_config.json' + worker_ready_path = temp_path / 'worker_ready' + received_event_path = temp_path / 'received_event.json' + worker_config = { + **config, + 'kind': kind, + 'ready_path': str(worker_ready_path), + 'output_path': str(received_event_path), + } + worker_config_path.write_text(json.dumps(worker_config), encoding='utf-8') + + sender = _make_sender_bridge(kind, config) + + worker = subprocess.Popen( + [sys.executable, str(Path(__file__).with_name('bridge_listener_worker.py')), str(worker_config_path)], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + ) + try: + await _wait_for_path(worker_ready_path, process=worker) + if kind == 'postgres': + await sender.start() + outbound = IPCPingEvent( + label=f'{kind}_ok', + event_result_type={ + '$schema': 'https://json-schema.org/draft/2020-12/schema', + 'type': 'object', + 'properties': { + 'ok': {'type': 'boolean'}, + 'score': {'type': 'number'}, + 'tags': {'type': 'array', 'items': {'type': 'string'}}, + }, + 'required': ['ok', 'score', 'tags'], + 'additionalProperties': False, + }, + ) + await sender.emit(outbound) + await _wait_for_path(received_event_path, process=worker) + received_payload = json.loads(received_event_path.read_text(encoding='utf-8')) + assert 'event_status' in received_payload + assert 'event_started_at' in received_payload + assert _normalize_roundtrip_payload(received_payload) == _normalize_roundtrip_payload( + outbound.model_dump(mode='json') + ) + finally: + await sender.close() + if worker.poll() is None: + worker.terminate() + try: + worker.wait(timeout=5) + except subprocess.TimeoutExpired: + worker.kill() + worker.wait(timeout=5) + finally: + rmtree(temp_path, ignore_errors=True) + + +@pytest.mark.asyncio +async def test_http_event_bridge_roundtrip_between_processes() -> None: + endpoint = f'http://127.0.0.1:{_free_tcp_port()}/events' + await _assert_roundtrip('http', {'endpoint': endpoint}) + latency_ms = await _measure_warm_latency_ms('http', {'endpoint': endpoint}) + print(f'LATENCY python http {latency_ms:.3f}ms') + + +@pytest.mark.asyncio +async def test_socket_event_bridge_roundtrip_between_processes() -> None: + socket_path = Path('/tmp') / f'bb-{_TEST_RUN_ID}-{uuid7str()[-8:]}.sock' + await _assert_roundtrip('socket', {'path': str(socket_path)}) + latency_ms = await _measure_warm_latency_ms('socket', {'path': str(socket_path)}) + print(f'LATENCY python socket {latency_ms:.3f}ms') + + +def test_socket_event_bridge_rejects_long_socket_paths() -> None: + long_path = '/tmp/' + ('a' * 100) + '.sock' + with pytest.raises(ValueError, match='too long'): + SocketEventBridge(path=long_path) + + +@pytest.mark.asyncio +async def test_jsonl_event_bridge_roundtrip_between_processes() -> None: + temp_dir = _make_temp_dir('bubus-jsonl') + try: + jsonl_path = temp_dir / 'events.jsonl' + await _assert_roundtrip('jsonl', {'path': str(jsonl_path)}) + latency_ms = await _measure_warm_latency_ms('jsonl', {'path': str(jsonl_path)}) + print(f'LATENCY python jsonl {latency_ms:.3f}ms') + finally: + rmtree(temp_dir, ignore_errors=True) + + +@pytest.mark.asyncio +async def test_sqlite_event_bridge_roundtrip_between_processes() -> None: + temp_dir = _make_temp_dir('bubus-sqlite') + try: + sqlite_path = temp_dir / 'events.sqlite3' + await _assert_roundtrip('sqlite', {'path': str(sqlite_path), 'table': 'bubus_events'}) + + with sqlite3.connect(sqlite_path) as conn: + columns = {str(row[1]) for row in conn.execute('PRAGMA table_info("bubus_events")').fetchall()} + assert 'event_payload' in columns + assert 'label' not in columns + assert all(column == 'event_payload' or column.startswith('event_') for column in columns) + + row = conn.execute( + 'SELECT event_payload FROM "bubus_events" ORDER BY COALESCE("event_created_at", \'\') DESC LIMIT 1' + ).fetchone() + assert row is not None + payload = json.loads(str(row[0])) + assert payload.get('label') == 'sqlite_ok' + + measure_sqlite_path = temp_dir / 'events.measure.sqlite3' + latency_ms = await _measure_warm_latency_ms('sqlite', {'path': str(measure_sqlite_path), 'table': 'bubus_events'}) + print(f'LATENCY python sqlite {latency_ms:.3f}ms') + finally: + rmtree(temp_dir, ignore_errors=True) + + +@pytest.mark.asyncio +async def test_redis_event_bridge_roundtrip_between_processes() -> None: + temp_dir = _make_temp_dir('bubus-redis') + try: + port = _free_tcp_port() + command = [ + 'redis-server', + '--save', + '', + '--appendonly', + 'no', + '--bind', + '127.0.0.1', + '--port', + str(port), + '--dir', + str(temp_dir), + ] + async with _running_process(command) as redis_process: + await _wait_for_port(port) + await _assert_roundtrip('redis', {'url': f'redis://127.0.0.1:{port}/1/bubus_events'}) + latency_ms = await _measure_warm_latency_ms('redis', {'url': f'redis://127.0.0.1:{port}/1/bubus_events'}) + print(f'LATENCY python redis {latency_ms:.3f}ms') + assert redis_process.poll() is None + finally: + rmtree(temp_dir, ignore_errors=True) + + +@pytest.mark.asyncio +async def test_nats_event_bridge_roundtrip_between_processes() -> None: + port = _free_tcp_port() + command = ['nats-server', '-a', '127.0.0.1', '-p', str(port)] + async with _running_process(command) as nats_process: + await _wait_for_port(port) + await _assert_roundtrip('nats', {'server': f'nats://127.0.0.1:{port}', 'subject': 'bubus_events'}) + latency_ms = await _measure_warm_latency_ms('nats', {'server': f'nats://127.0.0.1:{port}', 'subject': 'bubus_events'}) + print(f'LATENCY python nats {latency_ms:.3f}ms') + assert nats_process.poll() is None + + +@pytest.mark.asyncio +async def test_postgres_event_bridge_roundtrip_between_processes() -> None: + temp_dir = _make_temp_dir('bubus-postgres') + try: + data_dir = temp_dir / 'pgdata' + initdb = subprocess.run( + ['initdb', '-D', str(data_dir), '-A', 'trust', '-U', 'postgres'], + capture_output=True, + text=True, + check=False, + ) + assert initdb.returncode == 0, f'initdb failed\nstdout:\n{initdb.stdout}\nstderr:\n{initdb.stderr}' + + port = _free_tcp_port() + command = ['postgres', '-D', str(data_dir), '-h', '127.0.0.1', '-p', str(port), '-k', '/tmp'] + async with _running_process(command) as postgres_process: + await _wait_for_port(port) + await _assert_roundtrip('postgres', {'url': f'postgresql://postgres@127.0.0.1:{port}/postgres/bubus_events'}) + + asyncpg = __import__('asyncpg') + conn = await asyncpg.connect(f'postgresql://postgres@127.0.0.1:{port}/postgres') + try: + rows = await conn.fetch( + """ + SELECT column_name + FROM information_schema.columns + WHERE table_schema = 'public' AND table_name = $1 + """, + 'bubus_events', + ) + columns = {str(row['column_name']) for row in rows} + assert 'event_payload' in columns + assert 'label' not in columns + assert all(column == 'event_payload' or column.startswith('event_') for column in columns) + + row = await conn.fetchrow( + 'SELECT event_payload FROM "bubus_events" ORDER BY COALESCE("event_created_at", \'\') DESC LIMIT 1' + ) + assert row is not None + payload = json.loads(str(row['event_payload'])) + assert payload.get('label') == 'postgres_ok' + finally: + await conn.close() + + latency_ms = await _measure_warm_latency_ms( + 'postgres', {'url': f'postgresql://postgres@127.0.0.1:{port}/postgres/bubus_events'} + ) + print(f'LATENCY python postgres {latency_ms:.3f}ms') + assert postgres_process.poll() is None + finally: + rmtree(temp_dir, ignore_errors=True) diff --git a/tests/test_comprehensive_patterns.py b/tests/test_comprehensive_patterns.py index b98b211..35e9b02 100644 --- a/tests/test_comprehensive_patterns.py +++ b/tests/test_comprehensive_patterns.py @@ -1,5 +1,7 @@ """Test comprehensive event patterns including forwarding, async/sync dispatch, and parent-child tracking.""" +# pyright: reportUnusedVariable=false + import asyncio from typing import Any @@ -42,7 +44,7 @@ def child_bus2_event_handler(event: BaseEvent[str]) -> str: return 'forwarded bus result' bus2.on('*', child_bus2_event_handler) # register a handler on bus2 - bus1.on('*', bus2.dispatch) # forward all events from bus1 -> bus2 + bus1.on('*', bus2.emit) # forward all events from bus1 -> bus2 async def parent_bus1_handler(event: ParentEvent) -> str: # Only process the parent ParentEvent @@ -54,34 +56,36 @@ async def parent_bus1_handler(event: ParentEvent) -> str: # Pattern 1: Async dispatch - handlers run after parent completes print('\n1. Testing async dispatch...') - child_event_async = bus1.dispatch(QueuedChildEvent()) + child_event_async = bus1.emit(QueuedChildEvent()) print(f' child_event_async.event_status = {child_event_async.event_status}') assert child_event_async.event_status != 'completed' # Pattern 2: Sync dispatch with await - handlers run immediately print('\n2. Testing sync dispatch (await)...') - child_event_sync = await bus1.dispatch(ImmediateChildEvent()) + child_event_sync = await bus1.emit(ImmediateChildEvent()) print(f' child_event_sync.event_status = {child_event_sync.event_status}') assert child_event_sync.event_status == 'completed' # Check that forwarded handler result is available print('\n3. Checking forwarded handler results...') print(f' child_event_sync.event_results: {child_event_sync.event_results}') - print(f' child_event_sync.event_result_type: {child_event_sync.event_result_type}') event_results = await child_event_sync.event_results_list(raise_if_none=False) print(f' Results: {event_results}') - # The forwarding handler (bus.dispatch) returns the event object itself + # The forwarding handler (bus.emit) returns the event object itself # We need to check if the child event was processed on bus2 # Check that the event was forwarded by looking at: # 1. The event path includes bus2 - assert 'bus2' in child_event_sync.event_path + assert bus2.label in child_event_sync.event_path # 2. Debug what handlers processed this event print(' Handlers that processed this event:') for result in child_event_sync.event_results.values(): print(f' - {result.handler_name} (bus: {result.eventbus_name})') - # The event was processed by bus1 using bus2.dispatch handler + # The event was forwarded from bus1 and processed by bus2. + assert any( + result.eventbus_name == 'bus1' and 'emit' in result.handler_name for result in child_event_sync.event_results.values() + ) assert any( - 'bus2' in result.handler_name and 'dispatch' in result.handler_name + result.eventbus_name == 'bus2' and 'child_bus2_event_handler' in result.handler_name for result in child_event_sync.event_results.values() ) print(' Event was successfully forwarded to bus2') @@ -104,12 +108,15 @@ async def parent_bus1_handler(event: ParentEvent) -> str: # Dispatch parent event and wait for completion print('\nDispatching parent event...') - parent_event = await bus1.dispatch(ParentEvent()) + parent_event = await bus1.emit(ParentEvent()) # Wait for all buses to finish processing await bus1.wait_until_idle() await bus2.wait_until_idle() + # This is a happy-path test: no handler should have errored. + assert all(result.error is None for result in parent_event.event_results.values()), parent_event.event_results + # Verify all child events have correct parent print('\n5. Verifying all events have correct parent...') all_events = list(bus1.event_history.values()) @@ -120,8 +127,8 @@ async def parent_bus1_handler(event: ParentEvent) -> str: ) # Child events should have parent's ID - child_events = [e for e in all_events if isinstance(e, (ImmediateChildEvent, QueuedChildEvent))] - assert all(event.event_parent_id == parent_event.event_id for event in child_events) + event_children = [e for e in all_events if isinstance(e, (ImmediateChildEvent, QueuedChildEvent))] + assert all(event.event_parent_id == parent_event.event_id for event in event_children) # Sort results by sequence number to see actual execution order sorted_results = sorted(results, key=lambda x: x[0]) @@ -173,6 +180,47 @@ async def parent_bus1_handler(event: ParentEvent) -> str: await bus2.stop(clear=True) +async def test_await_forwarded_event_waits_for_target_bus_handlers(): + """ + Awaiting a dispatched event on source bus must wait for forwarded target-bus + handlers too, not only the source forwarding handler. + """ + bus_src = EventBus(name='ForwardWaitSrc') + bus_dst = EventBus(name='ForwardWaitDst') + + class ForwardedEvent(BaseEvent[str]): + pass + + target_started = asyncio.Event() + target_finished = asyncio.Event() + + async def target_handler(event: ForwardedEvent) -> str: + target_started.set() + await asyncio.sleep(0.05) + target_finished.set() + return 'target_done' + + bus_src.on('*', bus_dst.emit) + bus_dst.on(ForwardedEvent, target_handler) + + try: + t0 = asyncio.get_running_loop().time() + event = await bus_src.emit(ForwardedEvent()) + elapsed = asyncio.get_running_loop().time() - t0 + + assert target_started.is_set() + assert target_finished.is_set() + assert elapsed >= 0.04 + assert any( + result.eventbus_name == 'ForwardWaitDst' and result.handler_name.endswith('target_handler') + for result in event.event_results.values() + ), event.event_results + assert all(result.status in ('completed', 'error') for result in event.event_results.values()) + finally: + await bus_src.stop(clear=True) + await bus_dst.stop(clear=True) + + async def test_race_condition_stress(): """Stress test to ensure no race conditions.""" print('\n=== Test Race Condition Stress ===') @@ -189,17 +237,17 @@ async def child_handler(event: BaseEvent[str]) -> str: await asyncio.sleep(0.001) return f'child_done_{bus_name}' - async def parent_handler(event: BaseEvent[str]) -> str: + async def parent_handler(event: BaseEvent[Any]) -> str: # Dispatch multiple children in different ways children: list[BaseEvent[Any]] = [] # Async dispatches for _ in range(3): - children.append(bus1.dispatch(QueuedChildEvent())) + children.append(bus1.emit(QueuedChildEvent())) # Sync dispatches for _ in range(3): - child = await bus1.dispatch(ImmediateChildEvent()) + child = await bus1.emit(ImmediateChildEvent()) assert child.event_status == 'completed' children.append(child) @@ -211,7 +259,7 @@ def bad_handler(bad: BaseEvent[Any]) -> None: pass # Setup forwarding - bus1.on('*', bus2.dispatch) + bus1.on('*', bus2.emit) bus1.on(QueuedChildEvent, child_handler) bus1.on(ImmediateChildEvent, child_handler) bus2.on(QueuedChildEvent, child_handler) @@ -223,13 +271,15 @@ def bad_handler(bad: BaseEvent[Any]) -> None: for run in range(5): results.clear() - await bus1.dispatch(BaseEvent()) + await bus1.emit(BaseEvent()) await bus1.wait_until_idle() await bus2.wait_until_idle() # Should have 6 child events processed on each bus - assert results.count('child_bus1') == 6, f'Run {run}: Expected 6 child_bus1, got {results.count("child_bus1")}' - assert results.count('child_bus2') == 6, f'Run {run}: Expected 6 child_bus2, got {results.count("child_bus2")}' + bus1_results = [entry for entry in results if entry.startswith(f'child_{bus1.label}')] + bus2_results = [entry for entry in results if entry.startswith(f'child_{bus2.label}')] + assert len(bus1_results) == 6, f'Run {run}: Expected 6 child_{bus1.label}, got {len(bus1_results)}' + assert len(bus2_results) == 6, f'Run {run}: Expected 6 child_{bus2.label}, got {len(bus2_results)}' print('βœ… No race conditions detected!') @@ -244,10 +294,665 @@ def bad_handler(bad: BaseEvent[Any]) -> None: await bus2.stop(clear=True) +async def test_awaited_child_jumps_queue_no_overshoot(): + """ + Test the edge case in BaseEvent.__await__() (models.py): + - When a handler dispatches and awaits a child event, that child should + execute immediately (jumping the FIFO queue) + - Other queued events (Event2, Event3) should NOT be processed (no overshoot) + - FIFO order should be maintained for remaining events after completion + """ + print('\n=== Test Awaited Child Jumps Queue (No Overshoot) ===') + + bus = EventBus(name='TestBus', max_history_size=100) + execution_order: list[str] = [] + + class Event1(BaseEvent[str]): + pass + + class Event2(BaseEvent[str]): + pass + + class Event3(BaseEvent[str]): + pass + + class ChildEvent(BaseEvent[str]): + pass + + async def event1_handler(event: Event1) -> str: + execution_order.append('Event1_start') + # Dispatch and await child - this should jump the queue + child = bus.emit(ChildEvent()) + execution_order.append('Child_dispatched') + await child + execution_order.append('Child_await_returned') + execution_order.append('Event1_end') + return 'event1_done' + + async def event2_handler(event: Event2) -> str: + execution_order.append('Event2_start') + execution_order.append('Event2_end') + return 'event2_done' + + async def event3_handler(event: Event3) -> str: + execution_order.append('Event3_start') + execution_order.append('Event3_end') + return 'event3_done' + + async def child_handler(event: ChildEvent) -> str: + execution_order.append('Child_start') + execution_order.append('Child_end') + return 'child_done' + + bus.on(Event1, event1_handler) + bus.on(Event2, event2_handler) + bus.on(Event3, event3_handler) + bus.on(ChildEvent, child_handler) + + try: + # Dispatch all three events (they go into the queue) + event1 = bus.emit(Event1()) + event2 = bus.emit(Event2()) + event3 = bus.emit(Event3()) + + # Verify events are queued + await asyncio.sleep(0) # Let dispatch settle + print(f'After dispatch: E1={event1.event_status}, E2={event2.event_status}, E3={event3.event_status}') + + # Await Event1 - this triggers processing and the child should jump queue + await event1 + + print(f'After await event1: {execution_order}') + print(f'Statuses: E1={event1.event_status}, E2={event2.event_status}, E3={event3.event_status}') + + # KEY ASSERTION 1: Child executed during Event1's handler (jumped queue) + assert 'Child_start' in execution_order, 'Child should have executed' + assert 'Child_end' in execution_order, 'Child should have completed' + child_start_idx = execution_order.index('Child_start') + child_end_idx = execution_order.index('Child_end') + event1_end_idx = execution_order.index('Event1_end') + assert child_start_idx < event1_end_idx, 'Child should execute before Event1 ends' + assert child_end_idx < event1_end_idx, 'Child should complete before Event1 ends' + + # KEY ASSERTION 2: Event2 and Event3 did NOT execute yet (no overshoot) + assert 'Event2_start' not in execution_order, f'Event2 should NOT have started (no overshoot). Order: {execution_order}' + assert 'Event3_start' not in execution_order, f'Event3 should NOT have started (no overshoot). Order: {execution_order}' + + # KEY ASSERTION 3: Event2 and Event3 have not completed yet. + # They may be marked "started" once dequeued, even if their handlers haven't run. + assert event2.event_status in ('pending', 'started'), f'Event2 should be pending/started, got {event2.event_status}' + assert event3.event_status in ('pending', 'started'), f'Event3 should be pending/started, got {event3.event_status}' + + # Now let the remaining events process + await bus.wait_until_idle() + + print(f'Final execution order: {execution_order}') + + # KEY ASSERTION 4: FIFO order maintained - Event2 before Event3 + event2_start_idx = execution_order.index('Event2_start') + event3_start_idx = execution_order.index('Event3_start') + assert event2_start_idx < event3_start_idx, 'FIFO: Event2 should start before Event3' + + # Verify all completed + assert event2.event_status == 'completed' + assert event3.event_status == 'completed' + + # KEY ASSERTION 5: event_history reflects dispatch order, but started_at/completed_at + # timestamps reflect actual execution order (post-reordering) + history_list = list(bus.event_history.values()) + history_types = [e.__class__.__name__ for e in history_list] + print(f'Event history (dispatch order): {history_types}') + + # Find the child event and E2/E3 + child_event = next(e for e in history_list if isinstance(e, ChildEvent)) + event2_from_history = next(e for e in history_list if isinstance(e, Event2)) + event3_from_history = next(e for e in history_list if isinstance(e, Event3)) + + # Verify execution order via timestamps: Child should have started before E2 and E3 + assert child_event.event_started_at is not None, 'Child should have started_at timestamp' + assert event2_from_history.event_started_at is not None, 'Event2 should have started_at timestamp' + assert event3_from_history.event_started_at is not None, 'Event3 should have started_at timestamp' + + assert child_event.event_started_at < event2_from_history.event_started_at, ( + f'Child should have started before Event2. Child: {child_event.event_started_at}, E2: {event2_from_history.event_started_at}' + ) + assert child_event.event_started_at < event3_from_history.event_started_at, ( + f'Child should have started before Event3. Child: {child_event.event_started_at}, E3: {event3_from_history.event_started_at}' + ) + + print(f'Child started_at: {child_event.event_started_at}') + print(f'Event2 started_at: {event2_from_history.event_started_at}') + print(f'Event3 started_at: {event3_from_history.event_started_at}') + + print('βœ… Awaited child jumps queue, no overshoot, FIFO maintained!') + + finally: + await bus.stop(clear=True) + + +async def test_dispatch_multiple_await_one_skips_others(): + """ + Test that when a handler dispatches multiple events and awaits only one, + the awaited event jumps the queue while the non-awaited ones stay in place. + + Scenario: + - Queue: [E1, E2, E3] + - E1 handler dispatches ChildA, ChildB, ChildC (queue becomes [E2, E3, ChildA, ChildB, ChildC]) + - E1 handler awaits only ChildB + - ChildB should jump to front and execute immediately + - ChildA and ChildC should NOT execute (they stay behind E2, E3 in queue) + - E2 and E3 should NOT execute during E1's handler + """ + print('\n=== Test Dispatch Multiple, Await One ===') + + bus = EventBus(name='MultiDispatchBus', max_history_size=100) + execution_order: list[str] = [] + + class Event1(BaseEvent[str]): + pass + + class Event2(BaseEvent[str]): + pass + + class Event3(BaseEvent[str]): + pass + + class ChildA(BaseEvent[str]): + pass + + class ChildB(BaseEvent[str]): + pass + + class ChildC(BaseEvent[str]): + pass + + async def event1_handler(event: Event1) -> str: + execution_order.append('Event1_start') + + # Dispatch three children but only await the middle one + child_a = bus.emit(ChildA()) + execution_order.append('ChildA_dispatched') + + child_b = bus.emit(ChildB()) + execution_order.append('ChildB_dispatched') + + child_c = bus.emit(ChildC()) + execution_order.append('ChildC_dispatched') + + # Only await ChildB - it should jump the queue + await child_b + execution_order.append('ChildB_await_returned') + + execution_order.append('Event1_end') + return 'event1_done' + + async def event2_handler(event: Event2) -> str: + execution_order.append('Event2_start') + execution_order.append('Event2_end') + return 'event2_done' + + async def event3_handler(event: Event3) -> str: + execution_order.append('Event3_start') + execution_order.append('Event3_end') + return 'event3_done' + + async def child_a_handler(event: ChildA) -> str: + execution_order.append('ChildA_start') + execution_order.append('ChildA_end') + return 'child_a_done' + + async def child_b_handler(event: ChildB) -> str: + execution_order.append('ChildB_start') + execution_order.append('ChildB_end') + return 'child_b_done' + + async def child_c_handler(event: ChildC) -> str: + execution_order.append('ChildC_start') + execution_order.append('ChildC_end') + return 'child_c_done' + + bus.on(Event1, event1_handler) + bus.on(Event2, event2_handler) + bus.on(Event3, event3_handler) + bus.on(ChildA, child_a_handler) + bus.on(ChildB, child_b_handler) + bus.on(ChildC, child_c_handler) + + try: + # Dispatch E1, E2, E3 + event1 = bus.emit(Event1()) + event2 = bus.emit(Event2()) + event3 = bus.emit(Event3()) + + # Await E1 + await event1 + + print(f'After await event1: {execution_order}') + + # ChildB should have executed (it was awaited) + assert 'ChildB_start' in execution_order, 'ChildB should have executed' + assert 'ChildB_end' in execution_order, 'ChildB should have completed' + + # ChildB should have executed before Event1 ended (queue jump worked) + child_b_end_idx = execution_order.index('ChildB_end') + event1_end_idx = execution_order.index('Event1_end') + assert child_b_end_idx < event1_end_idx, 'ChildB should complete before Event1 ends' + + # ChildA and ChildC should NOT have executed BEFORE Event1 ended (no overshoot) + # They may have executed after Event1 completed (via background task), which is fine + if 'ChildA_start' in execution_order: + child_a_start_idx = execution_order.index('ChildA_start') + assert child_a_start_idx > event1_end_idx, f'ChildA should NOT start before Event1 ends. Order: {execution_order}' + if 'ChildC_start' in execution_order: + child_c_start_idx = execution_order.index('ChildC_start') + assert child_c_start_idx > event1_end_idx, f'ChildC should NOT start before Event1 ends. Order: {execution_order}' + + # E2 and E3 should NOT have executed BEFORE Event1 ended (no overshoot) + if 'Event2_start' in execution_order: + event2_start_idx = execution_order.index('Event2_start') + assert event2_start_idx > event1_end_idx, f'Event2 should NOT start before Event1 ends. Order: {execution_order}' + if 'Event3_start' in execution_order: + event3_start_idx = execution_order.index('Event3_start') + assert event3_start_idx > event1_end_idx, f'Event3 should NOT start before Event1 ends. Order: {execution_order}' + + # Now process remaining events + await bus.wait_until_idle() + + print(f'Final execution order: {execution_order}') + + # Verify FIFO order for remaining: E2, E3, ChildA, ChildC + # (ChildA and ChildC were dispatched after E2/E3 were already queued) + event2_start_idx = execution_order.index('Event2_start') + event3_start_idx = execution_order.index('Event3_start') + child_a_start_idx = execution_order.index('ChildA_start') + child_c_start_idx = execution_order.index('ChildC_start') + + assert event2_start_idx < event3_start_idx, 'FIFO: E2 before E3' + assert event3_start_idx < child_a_start_idx, 'FIFO: E3 before ChildA' + assert child_a_start_idx < child_c_start_idx, 'FIFO: ChildA before ChildC' + + print('βœ… Dispatch multiple, await one works correctly!') + + finally: + await bus.stop(clear=True) + + +async def test_multi_bus_forwarding_with_queued_events(): + """ + Test queue jumping with multiple buses that have forwarding set up, + where both buses already have events queued. + + Scenario: + - Bus1 has [E1, E2] queued + - Bus2 has [E3, E4] queued + - E1's handler dispatches Child to Bus1 and awaits it + - Child should jump Bus1's queue (ahead of E2) + - Bus2 should continue processing independently (bus-serial is per-bus) + """ + print('\n=== Test Multi-Bus Forwarding With Queued Events ===') + + bus1 = EventBus(name='Bus1', max_history_size=100) + bus2 = EventBus(name='Bus2', max_history_size=100) + execution_order: list[str] = [] + + class Event1(BaseEvent[str]): + pass + + class Event2(BaseEvent[str]): + pass + + class Event3(BaseEvent[str]): + pass + + class Event4(BaseEvent[str]): + pass + + class ChildEvent(BaseEvent[str]): + pass + + async def event1_handler(event: Event1) -> str: + execution_order.append('Bus1_Event1_start') + # Dispatch child to bus1 and await + child = bus1.emit(ChildEvent()) + execution_order.append('Child_dispatched_to_Bus1') + await child + execution_order.append('Child_await_returned') + execution_order.append('Bus1_Event1_end') + return 'event1_done' + + async def event2_handler(event: Event2) -> str: + execution_order.append('Bus1_Event2_start') + execution_order.append('Bus1_Event2_end') + return 'event2_done' + + async def event3_handler(event: Event3) -> str: + execution_order.append('Bus2_Event3_start') + execution_order.append('Bus2_Event3_end') + return 'event3_done' + + async def event4_handler(event: Event4) -> str: + execution_order.append('Bus2_Event4_start') + execution_order.append('Bus2_Event4_end') + return 'event4_done' + + async def child_handler(event: ChildEvent) -> str: + execution_order.append('Child_start') + execution_order.append('Child_end') + return 'child_done' + + # Register handlers on respective buses + bus1.on(Event1, event1_handler) + bus1.on(Event2, event2_handler) + bus1.on(ChildEvent, child_handler) + + bus2.on(Event3, event3_handler) + bus2.on(Event4, event4_handler) + + try: + # Queue events on both buses + event1 = bus1.emit(Event1()) + event2 = bus1.emit(Event2()) + event3 = bus2.emit(Event3()) + event4 = bus2.emit(Event4()) + + await asyncio.sleep(0) # Let dispatch settle + + print(f'Bus1 queue size: {bus1.pending_event_queue.qsize() if bus1.pending_event_queue else 0}') + print(f'Bus2 queue size: {bus2.pending_event_queue.qsize() if bus2.pending_event_queue else 0}') + + # Await E1 - child should jump Bus1's queue + await event1 + + print(f'After await event1: {execution_order}') + + # Child should have executed + assert 'Child_start' in execution_order, 'Child should have executed' + assert 'Child_end' in execution_order, 'Child should have completed' + + # Child should have executed before Event1 ended + child_end_idx = execution_order.index('Child_end') + event1_end_idx = execution_order.index('Bus1_Event1_end') + assert child_end_idx < event1_end_idx, 'Child should complete before Event1 ends' + + # E2 on Bus1 should NOT have executed yet + assert 'Bus1_Event2_start' not in execution_order, f'E2 on Bus1 should NOT have started. Order: {execution_order}' + + # Bus2 runs independently under bus-serial event concurrency. + # Its queued events may already be running while Bus1 awaits the child. + assert 'Bus2_Event3_start' in execution_order, f'E3 on Bus2 should have started. Order: {execution_order}' + + # Now process remaining events on both buses + await bus1.wait_until_idle() + await bus2.wait_until_idle() + + print(f'Final execution order: {execution_order}') + + # Verify all events eventually executed + assert 'Bus1_Event2_start' in execution_order + assert 'Bus2_Event3_start' in execution_order + assert 'Bus2_Event4_start' in execution_order + + print('βœ… Multi-bus forwarding with queued events works correctly!') + + finally: + await bus1.stop(clear=True) + await bus2.stop(clear=True) + + +async def test_await_already_completed_event(): + """ + Test that awaiting an event that's already completed is a no-op. + The event isn't in the queue anymore, so there's nothing to reorder. + """ + print('\n=== Test Await Already Completed Event ===') + + bus = EventBus(name='AlreadyCompletedBus', max_history_size=100) + execution_order: list[str] = [] + + class Event1(BaseEvent[str]): + pass + + class Event2(BaseEvent[str]): + pass + + async def event1_handler(event: Event1) -> str: + execution_order.append('Event1_start') + execution_order.append('Event1_end') + return 'event1_done' + + async def event2_handler(event: Event2) -> str: + execution_order.append('Event2_start') + execution_order.append('Event2_end') + return 'event2_done' + + bus.on(Event1, event1_handler) + bus.on(Event2, event2_handler) + + try: + # Dispatch and await E1 first + event1 = await bus.emit(Event1()) + assert event1.event_status == 'completed' + + # Now dispatch E2 + event2 = bus.emit(Event2()) + + # Await E1 again - should be a no-op since it's already completed + await event1 # Should return immediately + + print(f'After second await event1: {execution_order}') + + # E2 should NOT have executed yet (we didn't trigger processing) + # The second await on completed E1 should just return without processing queue + assert event2.event_status == 'pending', f'E2 should still be pending, got {event2.event_status}' + + # Complete E2 + await bus.wait_until_idle() + + print(f'Final execution order: {execution_order}') + print('βœ… Await already completed event works correctly!') + + finally: + await bus.stop(clear=True) + + +async def test_multiple_awaits_same_event(): + """ + Test that multiple concurrent awaits on the same event work correctly. + Only the first await should trigger queue reordering; subsequent awaits + should just wait on the completion signal. + """ + print('\n=== Test Multiple Awaits Same Event ===') + + bus = EventBus(name='MultiAwaitBus', max_history_size=100) + execution_order: list[str] = [] + await_results: list[str] = [] + child_ref: BaseEvent[str] | None = None + + class Event1(BaseEvent[str]): + pass + + class Event2(BaseEvent[str]): + pass + + class ChildEvent(BaseEvent[str]): + pass + + async def event1_handler(event: Event1) -> str: + nonlocal child_ref + execution_order.append('Event1_start') + + # Dispatch child + child = bus.emit(ChildEvent()) + child_ref = child + + # Create multiple concurrent awaits on the same child + async def await_child(name: str): + await child + await_results.append(f'{name}_completed') + + # Start two concurrent awaits + task1 = asyncio.create_task(await_child('await1')) + task2 = asyncio.create_task(await_child('await2')) + + # Wait for both + await asyncio.gather(task1, task2) + execution_order.append('Both_awaits_completed') + + execution_order.append('Event1_end') + return 'event1_done' + + async def event2_handler(event: Event2) -> str: + execution_order.append('Event2_start') + execution_order.append('Event2_end') + return 'event2_done' + + async def child_handler(event: ChildEvent) -> str: + execution_order.append('Child_start') + await asyncio.sleep(0.01) # Small delay to ensure both awaits are waiting + execution_order.append('Child_end') + return 'child_done' + + bus.on(Event1, event1_handler) + bus.on(Event2, event2_handler) + bus.on(ChildEvent, child_handler) + + try: + event1 = bus.emit(Event1()) + event2 = bus.emit(Event2()) + + await event1 + + print(f'After await event1: {execution_order}') + print(f'Await results: {await_results}') + + # Both awaits should have completed + assert len(await_results) == 2, f'Both awaits should complete, got {await_results}' + assert 'await1_completed' in await_results + assert 'await2_completed' in await_results + + # Child should have executed exactly once and before Event1 ended + assert execution_order.count('Child_start') == 1 + assert execution_order.count('Child_end') == 1 + assert 'Child_start' in execution_order + assert 'Child_end' in execution_order + child_end_idx = execution_order.index('Child_end') + event1_end_idx = execution_order.index('Event1_end') + assert child_end_idx < event1_end_idx + + # Child event should have exactly one handler result (no double-run). + assert child_ref is not None + assert len(child_ref.event_results) == 1 + + # E2 should NOT have executed yet + assert 'Event2_start' not in execution_order, f'E2 should NOT have started. Order: {execution_order}' + + await bus.wait_until_idle() + + print(f'Final execution order: {execution_order}') + print('βœ… Multiple awaits same event works correctly!') + + finally: + await bus.stop(clear=True) + + +async def test_deeply_nested_awaited_children(): + """ + Test deeply nested awaited children: Event1 awaits Child1, which awaits Child2. + All should complete before Event2 starts (no overshoot at any level). + """ + print('\n=== Test Deeply Nested Awaited Children ===') + + bus = EventBus(name='DeepNestedBus', max_history_size=100) + execution_order: list[str] = [] + + class Event1(BaseEvent[str]): + pass + + class Event2(BaseEvent[str]): + pass + + class Child1(BaseEvent[str]): + pass + + class Child2(BaseEvent[str]): + pass + + async def event1_handler(event: Event1) -> str: + execution_order.append('Event1_start') + child1 = bus.emit(Child1()) + await child1 + execution_order.append('Event1_end') + return 'event1_done' + + async def child1_handler(event: Child1) -> str: + execution_order.append('Child1_start') + child2 = bus.emit(Child2()) + await child2 + execution_order.append('Child1_end') + return 'child1_done' + + async def child2_handler(event: Child2) -> str: + execution_order.append('Child2_start') + execution_order.append('Child2_end') + return 'child2_done' + + async def event2_handler(event: Event2) -> str: + execution_order.append('Event2_start') + execution_order.append('Event2_end') + return 'event2_done' + + bus.on(Event1, event1_handler) + bus.on(Child1, child1_handler) + bus.on(Child2, child2_handler) + bus.on(Event2, event2_handler) + + try: + event1 = bus.emit(Event1()) + event2 = bus.emit(Event2()) + + await event1 + + print(f'After await event1: {execution_order}') + + # All nested children should have completed + assert 'Child1_start' in execution_order + assert 'Child1_end' in execution_order + assert 'Child2_start' in execution_order + assert 'Child2_end' in execution_order + + # Verify nesting order: Child2 completes before Child1 + child2_end_idx = execution_order.index('Child2_end') + child1_end_idx = execution_order.index('Child1_end') + event1_end_idx = execution_order.index('Event1_end') + assert child2_end_idx < child1_end_idx < event1_end_idx + + # E2 should NOT have started + assert 'Event2_start' not in execution_order, f'E2 should NOT have started. Order: {execution_order}' + + await bus.wait_until_idle() + + print(f'Final execution order: {execution_order}') + + # E2 should start after E1 ends + event2_start_idx = execution_order.index('Event2_start') + assert event2_start_idx > event1_end_idx + + print('βœ… Deeply nested awaited children works correctly!') + + finally: + await bus.stop(clear=True) + + async def main(): """Run all tests.""" await test_comprehensive_patterns() await test_race_condition_stress() + await test_awaited_child_jumps_queue_no_overshoot() + await test_dispatch_multiple_await_one_skips_others() + await test_multi_bus_forwarding_with_queued_events() + await test_await_already_completed_event() + await test_multiple_awaits_same_event() + await test_deeply_nested_awaited_children() if __name__ == '__main__': diff --git a/tests/test_cross_runtime_roundtrip.py b/tests/test_cross_runtime_roundtrip.py new file mode 100644 index 0000000..a4ae7ea --- /dev/null +++ b/tests/test_cross_runtime_roundtrip.py @@ -0,0 +1,591 @@ +import asyncio +import json +import os +import shutil +import subprocess +from dataclasses import dataclass +from pathlib import Path +from types import NoneType +from typing import Any + +import pytest +from pydantic import BaseModel, TypeAdapter, ValidationError +from typing_extensions import TypedDict + +from bubus import BaseEvent, EventBus +from bubus.helpers import CleanShutdownQueue + +SUBPROCESS_TIMEOUT_SECONDS = 30 +EVENT_WAIT_TIMEOUT_SECONDS = 15 + + +class ScreenshotRegion(BaseModel): + id: str + label: str + score: float + visible: bool + + +class ScreenshotResult(BaseModel): + image_url: str + width: int + height: int + tags: list[str] + is_animated: bool + confidence_scores: list[float] + metadata: dict[str, float] + regions: list[ScreenshotRegion] + + +class PyTsTypedDictResult(TypedDict): + name: str + active: bool + count: int + + +@dataclass(slots=True) +class PyTsDataclassResult: + name: str + score: float + tags: list[str] + + +@dataclass(slots=True) +class RoundtripCase: + event: BaseEvent[Any] + valid_results: list[Any] + invalid_results: list[Any] + + +class PyTsIntResultEvent(BaseEvent[int]): + value: int + label: str + + +class PyTsFloatResultEvent(BaseEvent[float]): + marker: str + + +class PyTsStringResultEvent(BaseEvent[str]): + marker: str + + +class PyTsBoolResultEvent(BaseEvent[bool]): + marker: str + + +class PyTsNullResultEvent(BaseEvent[NoneType]): + marker: str + + +class PyTsStringListResultEvent(BaseEvent[list[str]]): + marker: str + + +class PyTsDictResultEvent(BaseEvent[dict[str, int]]): + marker: str + + +class PyTsNestedMapResultEvent(BaseEvent[dict[str, list[int]]]): + marker: str + + +class PyTsTypedDictResultEvent(BaseEvent[PyTsTypedDictResult]): + marker: str + + +class PyTsDataclassResultEvent(BaseEvent[PyTsDataclassResult]): + marker: str + + +class PyTsScreenshotEvent(BaseEvent[ScreenshotResult]): + target_id: str + quality: str + + +def _value_repr(value: Any) -> str: + try: + return json.dumps(value, sort_keys=True) + except TypeError: + return repr(value) + + +def _accepts_result_type(result_type: Any, value: Any) -> bool: + try: + TypeAdapter(result_type).validate_python(value) + except ValidationError: + return False + return True + + +def _assert_result_type_semantics_equal( + original_result_type: Any, + candidate_schema_json: dict[str, Any], + valid_results: list[Any], + invalid_results: list[Any], + context: str, +) -> None: + hydrated = BaseEvent[Any].model_validate({'event_type': 'SchemaSemanticsEvent', 'event_result_type': candidate_schema_json}) + candidate_result_type = hydrated.event_result_type + assert candidate_result_type is not None, f'{context}: missing candidate result type after hydration' + + for value in valid_results: + original_ok = _accepts_result_type(original_result_type, value) + candidate_ok = _accepts_result_type(candidate_result_type, value) + assert original_ok, f'{context}: original schema should accept {_value_repr(value)}' + assert candidate_ok, f'{context}: candidate schema should accept {_value_repr(value)}' + + for value in invalid_results: + original_ok = _accepts_result_type(original_result_type, value) + candidate_ok = _accepts_result_type(candidate_result_type, value) + assert not original_ok, f'{context}: original schema should reject {_value_repr(value)}' + assert not candidate_ok, f'{context}: candidate schema should reject {_value_repr(value)}' + + for value in [*valid_results, *invalid_results]: + original_ok = _accepts_result_type(original_result_type, value) + candidate_ok = _accepts_result_type(candidate_result_type, value) + assert candidate_ok == original_ok, ( + f'{context}: schema decision mismatch for {_value_repr(value)} (expected {original_ok}, got {candidate_ok})' + ) + + +def _build_python_roundtrip_cases() -> list[RoundtripCase]: + parent = PyTsIntResultEvent( + value=7, + label='parent', + event_path=['PyBus#aaaa'], + event_timeout=12.5, + ) + + screenshot_event = PyTsScreenshotEvent( + target_id='0c1ccf21-65c0-7390-8b64-9182e985740e', + quality='high', + event_parent_id=parent.event_id, + event_path=['PyBus#aaaa', 'TsBridge#bbbb'], + event_timeout=33.0, + ) + + float_event = PyTsFloatResultEvent( + marker='float', + event_parent_id=parent.event_id, + event_path=['PyBus#aaaa'], + ) + string_event = PyTsStringResultEvent( + marker='string', + event_parent_id=parent.event_id, + event_path=['PyBus#aaaa'], + ) + bool_event = PyTsBoolResultEvent( + marker='bool', + event_path=['PyBus#aaaa'], + ) + null_event = PyTsNullResultEvent( + marker='null', + event_path=['PyBus#aaaa'], + ) + list_event = PyTsStringListResultEvent( + marker='list[str]', + event_parent_id=parent.event_id, + event_path=['PyBus#aaaa'], + ) + dict_event = PyTsDictResultEvent( + marker='dict[str,int]', + event_path=['PyBus#aaaa'], + ) + nested_map_event = PyTsNestedMapResultEvent( + marker='dict[str,list[int]]', + event_path=['PyBus#aaaa'], + ) + typed_dict_event = PyTsTypedDictResultEvent( + marker='typeddict', + event_path=['PyBus#aaaa'], + ) + dataclass_event = PyTsDataclassResultEvent( + marker='dataclass', + event_path=['PyBus#aaaa'], + ) + + return [ + RoundtripCase( + event=parent, + valid_results=[0, -5, 42], + invalid_results=[{}, [], 'not-int'], + ), + RoundtripCase( + event=float_event, + valid_results=[0.5, 12.25, 3], + invalid_results=[{}, [], 'not-number'], + ), + RoundtripCase( + event=string_event, + valid_results=['ok', ''], + invalid_results=[{}, [], 123], + ), + RoundtripCase( + event=bool_event, + valid_results=[True, False], + invalid_results=[{}, [], 'not-bool'], + ), + RoundtripCase( + event=null_event, + valid_results=[None], + invalid_results=[{}, [], 0, False, 'not-null'], + ), + RoundtripCase( + event=list_event, + valid_results=[['a', 'b'], []], + invalid_results=[{}, 'not-list', 123], + ), + RoundtripCase( + event=dict_event, + valid_results=[{'ok': 1, 'failed': 2}, {}], + invalid_results=[['not', 'dict'], 'bad', 123], + ), + RoundtripCase( + event=nested_map_event, + valid_results=[{'a': [1, 2], 'b': []}, {}], + invalid_results=[{'a': 'not-list'}, ['bad'], 123], + ), + RoundtripCase( + event=typed_dict_event, + valid_results=[{'name': 'alpha', 'active': True, 'count': 2}], + invalid_results=[{'name': 'alpha'}, {'name': 123, 'active': True, 'count': 2}], + ), + RoundtripCase( + event=dataclass_event, + valid_results=[{'name': 'model', 'score': 0.85, 'tags': ['a', 'b']}], + invalid_results=[{'name': 'model', 'score': 'not-number', 'tags': ['a']}, {'name': 'model', 'score': 1.0}], + ), + RoundtripCase( + event=screenshot_event, + valid_results=[ + { + 'image_url': 'https://img.local/1.png', + 'width': 1920, + 'height': 1080, + 'tags': ['hero', 'dashboard'], + 'is_animated': False, + 'confidence_scores': [0.95, 0.89], + 'metadata': {'score': 0.99, 'variance': 0.01}, + 'regions': [ + {'id': '98f51f1d-b10a-7cd9-8ee6-cb706153f717', 'label': 'face', 'score': 0.9, 'visible': True}, + {'id': '5f234e9d-29e9-7921-8cf2-2a65f6ba3bdd', 'label': 'button', 'score': 0.7, 'visible': False}, + ], + } + ], + invalid_results=[ + { + 'image_url': 123, + 'width': 1920, + 'height': 1080, + 'tags': ['hero'], + 'is_animated': False, + 'confidence_scores': [0.95], + 'metadata': {'score': 0.99}, + 'regions': [{'id': '98f51f1d-b10a-7cd9-8ee6-cb706153f717', 'label': 'face', 'score': 0.9, 'visible': True}], + }, + { + 'image_url': 'https://img.local/1.png', + 'width': 1920, + 'height': 1080, + 'tags': ['hero'], + 'is_animated': False, + 'confidence_scores': [0.95], + 'metadata': {'score': 0.99}, + 'regions': [{'id': 123, 'label': 'face', 'score': 0.9, 'visible': True}], + }, + ], + ), + ] + + +def _ts_roundtrip_events(payload: list[dict[str, Any]], tmp_path: Path) -> list[dict[str, Any]]: + node_bin = shutil.which('node') + assert node_bin is not None, 'node is required for python<->ts roundtrip tests' + + repo_root = Path(__file__).resolve().parents[1] + ts_root = repo_root / 'bubus-ts' + assert (ts_root / 'dist' / 'esm' / 'index.js').exists(), ( + 'bubus-ts dist/esm build not found. Run `pnpm --dir bubus-ts run build` before cross-runtime tests.' + ) + + in_path = tmp_path / 'python_events.json' + out_path = tmp_path / 'ts_events.json' + in_path.write_text(json.dumps(payload, indent=2), encoding='utf-8') + + ts_script = """ +import { readFileSync, writeFileSync } from 'node:fs' +import { BaseEvent } from './dist/esm/index.js' + +const inputPath = process.env.BUBUS_PY_TS_INPUT_PATH +const outputPath = process.env.BUBUS_PY_TS_OUTPUT_PATH +if (!inputPath || !outputPath) { + throw new Error('missing BUBUS_PY_TS_INPUT_PATH or BUBUS_PY_TS_OUTPUT_PATH') +} + +const raw = JSON.parse(readFileSync(inputPath, 'utf8')) +if (!Array.isArray(raw)) { + throw new Error('expected array payload') +} + +const roundtripped = raw.map((item) => BaseEvent.fromJSON(item).toJSON()) +writeFileSync(outputPath, JSON.stringify(roundtripped, null, 2), 'utf8') +""" + + env = os.environ.copy() + env['BUBUS_PY_TS_INPUT_PATH'] = str(in_path) + env['BUBUS_PY_TS_OUTPUT_PATH'] = str(out_path) + try: + proc = subprocess.run( + [node_bin, '--input-type=module', '-e', ts_script], + cwd=ts_root, + env=env, + capture_output=True, + text=True, + timeout=SUBPROCESS_TIMEOUT_SECONDS, + ) + except subprocess.TimeoutExpired as exc: + pytest.fail(f'node/esm event roundtrip timed out after {SUBPROCESS_TIMEOUT_SECONDS}s: {exc}') + + assert proc.returncode == 0, f'node/esm roundtrip failed:\nstdout:\n{proc.stdout}\nstderr:\n{proc.stderr}' + return json.loads(out_path.read_text(encoding='utf-8')) + + +def _ts_roundtrip_bus(payload: dict[str, Any], tmp_path: Path) -> dict[str, Any]: + node_bin = shutil.which('node') + assert node_bin is not None, 'node is required for python<->ts roundtrip tests' + + repo_root = Path(__file__).resolve().parents[1] + ts_root = repo_root / 'bubus-ts' + assert (ts_root / 'dist' / 'esm' / 'index.js').exists(), ( + 'bubus-ts dist/esm build not found. Run `pnpm --dir bubus-ts run build` before cross-runtime tests.' + ) + + in_path = tmp_path / 'python_bus.json' + out_path = tmp_path / 'ts_bus.json' + in_path.write_text(json.dumps(payload, indent=2), encoding='utf-8') + + ts_script = """ +import { readFileSync, writeFileSync } from 'node:fs' +import { EventBus } from './dist/esm/index.js' + +const inputPath = process.env.BUBUS_PY_TS_BUS_INPUT_PATH +const outputPath = process.env.BUBUS_PY_TS_BUS_OUTPUT_PATH +if (!inputPath || !outputPath) { + throw new Error('missing BUBUS_PY_TS_BUS_INPUT_PATH or BUBUS_PY_TS_BUS_OUTPUT_PATH') +} + +const raw = JSON.parse(readFileSync(inputPath, 'utf8')) +if (!raw || typeof raw !== 'object' || Array.isArray(raw)) { + throw new Error('expected object payload') +} + +const roundtripped = EventBus.fromJSON(raw).toJSON() +writeFileSync(outputPath, JSON.stringify(roundtripped, null, 2), 'utf8') +""" + + env = os.environ.copy() + env['BUBUS_PY_TS_BUS_INPUT_PATH'] = str(in_path) + env['BUBUS_PY_TS_BUS_OUTPUT_PATH'] = str(out_path) + try: + proc = subprocess.run( + [node_bin, '--input-type=module', '-e', ts_script], + cwd=ts_root, + env=env, + capture_output=True, + text=True, + timeout=SUBPROCESS_TIMEOUT_SECONDS, + ) + except subprocess.TimeoutExpired as exc: + pytest.fail(f'node/esm bus roundtrip timed out after {SUBPROCESS_TIMEOUT_SECONDS}s: {exc}') + + assert proc.returncode == 0, f'node/esm bus roundtrip failed:\nstdout:\n{proc.stdout}\nstderr:\n{proc.stderr}' + return json.loads(out_path.read_text(encoding='utf-8')) + + +def test_python_to_ts_roundtrip_preserves_event_fields_and_result_type_semantics(tmp_path: Path) -> None: + cases = _build_python_roundtrip_cases() + events = [entry.event for entry in cases] + cases_by_type = {entry.event.event_type: entry for entry in cases} + python_dumped = [event.model_dump(mode='json') for event in events] + + # Ensure Python emits JSONSchema for return value types before sending to TS. + for event_dump in python_dumped: + assert 'event_result_type' in event_dump + assert isinstance(event_dump['event_result_type'], dict) + + ts_roundtripped = _ts_roundtrip_events(python_dumped, tmp_path) + assert len(ts_roundtripped) == len(python_dumped) + + for i, original in enumerate(python_dumped): + ts_event = ts_roundtripped[i] + assert isinstance(ts_event, dict) + + event_type = str(original.get('event_type')) + semantics_case = cases_by_type.get(event_type) + assert semantics_case is not None, f'missing semantics case for event_type={event_type}' + + # Every field Python emitted should survive through TS serialization. + for key, value in original.items(): + assert key in ts_event, f'missing key after ts roundtrip: {key}' + if key == 'event_result_type': + assert isinstance(ts_event[key], dict), 'event_result_type should serialize as JSON schema dict' + _assert_result_type_semantics_equal( + semantics_case.event.event_result_type, + ts_event[key], + semantics_case.valid_results, + semantics_case.invalid_results, + f'ts roundtrip {event_type}', + ) + else: + assert ts_event[key] == value, f'field changed after ts roundtrip: {key}' + + # Verify we can load back into Python BaseEvent and keep the same payload/semantics. + restored = BaseEvent[Any].model_validate(ts_event) + restored_dump = restored.model_dump(mode='json') + for key, value in original.items(): + assert key in restored_dump, f'missing key after python reload: {key}' + if key == 'event_result_type': + assert isinstance(restored_dump[key], dict), 'event_result_type should remain JSON schema after reload' + _assert_result_type_semantics_equal( + semantics_case.event.event_result_type, + restored_dump[key], + semantics_case.valid_results, + semantics_case.invalid_results, + f'python reload {event_type}', + ) + else: + assert restored_dump[key] == value, f'field changed after python reload: {key}' + + +async def test_python_to_ts_roundtrip_schema_enforcement_after_reload(tmp_path: Path) -> None: + events = [entry.event for entry in _build_python_roundtrip_cases()] + python_dumped = [event.model_dump(mode='json') for event in events] + ts_roundtripped = _ts_roundtrip_events(python_dumped, tmp_path) + + screenshot_payload = next(event for event in ts_roundtripped if event.get('event_type') == 'PyTsScreenshotEvent') + + wrong_bus = EventBus(name='py_ts_py_wrong_shape') + + async def wrong_shape_handler(event: BaseEvent[Any]) -> dict[str, Any]: + return { + 'image_url': 123, # wrong: should be string + 'width': '1920', # wrong: should be int + 'height': 1080, + 'tags': ['a', 'b'], + 'is_animated': 'false', # wrong: should be bool + 'confidence_scores': [0.9, 0.8], + 'metadata': {'score': 0.99}, + 'regions': [{'id': '98f51f1d-b10a-7cd9-8ee6-cb706153f717', 'label': 'face', 'score': 0.9, 'visible': True}], + } + + wrong_bus.on('PyTsScreenshotEvent', wrong_shape_handler) + wrong_event = BaseEvent[Any].model_validate(screenshot_payload) + assert isinstance(wrong_event.event_result_type, type) + assert issubclass(wrong_event.event_result_type, BaseModel) + await asyncio.wait_for(wrong_bus.emit(wrong_event), timeout=EVENT_WAIT_TIMEOUT_SECONDS) + wrong_result = next(iter(wrong_event.event_results.values())) + assert wrong_result.status == 'error' + assert wrong_result.error is not None + await wrong_bus.stop() + + right_bus = EventBus(name='py_ts_py_right_shape') + + async def right_shape_handler(event: BaseEvent[Any]) -> dict[str, Any]: + return { + 'image_url': 'https://img.local/1.png', + 'width': 1920, + 'height': 1080, + 'tags': ['hero', 'dashboard'], + 'is_animated': False, + 'confidence_scores': [0.95, 0.89], + 'metadata': {'score': 0.99, 'variance': 0.01}, + 'regions': [ + {'id': '98f51f1d-b10a-7cd9-8ee6-cb706153f717', 'label': 'face', 'score': 0.9, 'visible': True}, + {'id': '5f234e9d-29e9-7921-8cf2-2a65f6ba3bdd', 'label': 'button', 'score': 0.7, 'visible': False}, + ], + } + + right_bus.on('PyTsScreenshotEvent', right_shape_handler) + right_event = BaseEvent[Any].model_validate(screenshot_payload) + assert isinstance(right_event.event_result_type, type) + assert issubclass(right_event.event_result_type, BaseModel) + await asyncio.wait_for(right_bus.emit(right_event), timeout=EVENT_WAIT_TIMEOUT_SECONDS) + right_result = next(iter(right_event.event_results.values())) + assert right_result.status == 'completed' + assert right_result.error is None + assert right_result.result is not None + await right_bus.stop() + + +class PyTsBusResumeEvent(BaseEvent[str]): + label: str + + +@pytest.mark.asyncio +async def test_python_to_ts_to_python_bus_roundtrip_rehydrates_and_resumes(tmp_path: Path) -> None: + source_bus = EventBus( + name='PyTsBusSource', + id='018f8e40-1234-7000-8000-00000000bb22', + event_handler_detect_file_paths=False, + event_handler_concurrency='serial', + event_handler_completion='all', + ) + + async def handler_one(event: PyTsBusResumeEvent) -> str: + return f'h1:{event.label}' + + async def handler_two(event: PyTsBusResumeEvent) -> str: + return f'h2:{event.label}' + + handler_one_entry = source_bus.on(PyTsBusResumeEvent, handler_one) + handler_two_entry = source_bus.on(PyTsBusResumeEvent, handler_two) + assert handler_one_entry.id is not None + assert handler_two_entry.id is not None + handler_one_id = handler_one_entry.id + handler_two_id = handler_two_entry.id + + event_one = PyTsBusResumeEvent(label='e1') + event_two = PyTsBusResumeEvent(label='e2') + seeded = event_one.event_result_update(handler=handler_one_entry, eventbus=source_bus, status='pending') + event_one.event_result_update(handler=handler_two_entry, eventbus=source_bus, status='pending') + seeded.update(status='completed', result='seeded') + + source_bus.event_history[event_one.event_id] = event_one + source_bus.event_history[event_two.event_id] = event_two + source_bus.pending_event_queue = CleanShutdownQueue[BaseEvent[Any]](maxsize=0) + source_bus.pending_event_queue.put_nowait(event_one) + source_bus.pending_event_queue.put_nowait(event_two) + + source_dump = source_bus.model_dump() + ts_roundtripped = _ts_roundtrip_bus(source_dump, tmp_path) + restored = EventBus.validate(ts_roundtripped) + restored_dump = restored.model_dump() + + assert restored_dump['handlers'] == source_dump['handlers'] + assert restored_dump['handlers_by_key'] == source_dump['handlers_by_key'] + assert restored_dump['pending_event_queue'] == source_dump['pending_event_queue'] + assert set(restored_dump['event_history']) == set(source_dump['event_history']) + + restored_event_one = restored.event_history[event_one.event_id] + preseeded = restored_event_one.event_results[handler_one_id] + assert preseeded.status == 'completed' + assert preseeded.result == 'seeded' + assert preseeded.handler is restored.handlers[handler_one_id] + + trigger = restored.emit(PyTsBusResumeEvent(label='e3')) + await asyncio.wait_for(trigger, timeout=EVENT_WAIT_TIMEOUT_SECONDS) + + done_one = restored.event_history[event_one.event_id] + done_two = restored.event_history[event_two.event_id] + done_three = restored.event_history[trigger.event_id] + assert done_three.event_status == 'completed' + if restored.pending_event_queue is not None: + assert restored.pending_event_queue.qsize() == 0 + assert all(result.status == 'completed' for result in done_one.event_results.values()) + assert all(result.status == 'completed' for result in done_two.event_results.values()) + assert done_one.event_results[handler_one_id].result == 'seeded' + assert done_one.event_results[handler_two_id].result is None + + await source_bus.stop(clear=True) + await restored.stop(clear=True) diff --git a/tests/test_event_bus_property.py b/tests/test_event_bus_property.py deleted file mode 100644 index 4bd5655..0000000 --- a/tests/test_event_bus_property.py +++ /dev/null @@ -1,370 +0,0 @@ -import asyncio -import gc -from typing import Any - -import pytest - -from bubus import BaseEvent, EventBus - - -@pytest.fixture(autouse=True) -async def cleanup_eventbus_instances(): - """Ensure EventBus instances are cleaned up between tests""" - yield - # Force garbage collection to clean up any lingering EventBus instances - gc.collect() - # Give event loops time to clean up - await asyncio.sleep(0.01) - - -class MainEvent(BaseEvent[None]): - event_result_type: Any = None - - message: str = 'test' - - -class ChildEvent(BaseEvent[None]): - event_result_type: Any = None - - data: str = 'child' - - -class GrandchildEvent(BaseEvent[None]): - event_result_type: Any = None - - info: str = 'grandchild' - - -async def test_event_bus_property_single_bus(): - """Test event_bus property with a single EventBus instance""" - bus = EventBus(name='TestBus') - - # Track if handler was called - handler_called = False - dispatched_child = None - - async def handler(event: MainEvent): - nonlocal handler_called, dispatched_child - handler_called = True - - # Should be able to access event_bus inside handler - assert event.event_bus == bus - assert event.event_bus.name == 'TestBus' - - # Should be able to dispatch child events using the property - dispatched_child = await event.event_bus.dispatch(ChildEvent()) - - bus.on(MainEvent, handler) - - # Dispatch event and wait for completion - await bus.dispatch(MainEvent()) - - assert handler_called - assert dispatched_child is not None - assert isinstance(dispatched_child, ChildEvent) - - await bus.stop() - - -async def test_event_bus_property_multiple_buses(): - """Test event_bus property with multiple EventBus instances""" - bus1 = EventBus(name='Bus1') - bus2 = EventBus(name='Bus2') - - handler1_called = False - handler2_called = False - - async def handler1(event: MainEvent): - nonlocal handler1_called - handler1_called = True - # Inside bus1 handler, event_bus should return bus1 - assert event.event_bus == bus1 - assert event.event_bus.name == 'Bus1' - - async def handler2(event: MainEvent): - nonlocal handler2_called - handler2_called = True - # Inside bus2 handler, event_bus should return bus2 - assert event.event_bus == bus2 - assert event.event_bus.name == 'Bus2' - - bus1.on(MainEvent, handler1) - bus2.on(MainEvent, handler2) - - # Dispatch to bus1 - await bus1.dispatch(MainEvent(message='bus1')) - assert handler1_called - - # Dispatch to bus2 - await bus2.dispatch(MainEvent(message='bus2')) - assert handler2_called - - await bus1.stop() - await bus2.stop() - - -async def test_event_bus_property_with_forwarding(): - """Test event_bus property with event forwarding between buses""" - bus1 = EventBus(name='Bus1') - bus2 = EventBus(name='Bus2') - - # Forward all events from bus1 to bus2 - bus1.on('*', bus2.dispatch) - - handler_bus = None - handler_complete = asyncio.Event() - - async def handler(event: MainEvent): - nonlocal handler_bus - # When forwarded, the event_bus should be the bus currently processing - handler_bus = event.event_bus - handler_complete.set() - - bus2.on(MainEvent, handler) - - # Dispatch to bus1, which forwards to bus2 - event = bus1.dispatch(MainEvent()) - - # Wait for handler to complete - await handler_complete.wait() - - # The handler in bus2 should see bus2 as the event_bus - assert handler_bus is not None - assert handler_bus.name == 'Bus2' - # Verify it's the same bus instance (they should be the same object) - assert handler_bus is bus2 - - # Also wait for the event to fully complete - await event - - await bus1.stop() - await bus2.stop() - - -async def test_event_bus_property_outside_handler(): - """Test that event_bus property raises error when accessed outside handler""" - bus = EventBus(name='TestBus') - - event = MainEvent() - - # Should raise error when accessed outside handler context - with pytest.raises(AttributeError, match='event_bus property can only be accessed from within an event handler'): - _ = event.event_bus - - # Even after dispatching, accessing outside handler should fail - dispatched_event = await bus.dispatch(event) - - with pytest.raises(AttributeError, match='event_bus property can only be accessed from within an event handler'): - _ = dispatched_event.event_bus - - await bus.stop() - - -async def test_event_bus_property_nested_handlers(): - """Test event_bus property in nested handler scenarios""" - bus = EventBus(name='MainBus') - - inner_bus_name = None - - async def outer_handler(event: MainEvent): - # Dispatch a child event from within handler - child = ChildEvent() - - async def inner_handler(child_event: ChildEvent): - nonlocal inner_bus_name - # Both parent and child should see the same bus - assert child_event.event_bus == event.event_bus - inner_bus_name = child_event.event_bus.name - - bus.on(ChildEvent, inner_handler) - await event.event_bus.dispatch(child) - - bus.on(MainEvent, outer_handler) - - await bus.dispatch(MainEvent()) - - assert inner_bus_name == 'MainBus' - - await bus.stop() - - -async def test_event_bus_property_no_active_bus(): - """Test event_bus property when EventBus has been garbage collected""" - # This is a tricky edge case - create and destroy a bus - - event = None - - async def create_and_dispatch(): - nonlocal event - bus = EventBus(name='TempBus') - - async def handler(e: MainEvent): - # Save the event for later - nonlocal event - event = e - - bus.on(MainEvent, handler) - await bus.dispatch(MainEvent()) - await bus.stop() - # Bus goes out of scope here and may be garbage collected - - await create_and_dispatch() - - # Force garbage collection - import gc - - gc.collect() - - # Event exists but bus might be gone - assert event is not None - - # Create a new handler context to test - new_bus = EventBus(name='NewBus') - - error_raised = False - - async def new_handler(e: MainEvent): - nonlocal error_raised - assert event is not None - try: - # The old event doesn't belong to this bus - _ = event.event_bus - except RuntimeError: - error_raised = True - - new_bus.on(MainEvent, new_handler) - await new_bus.dispatch(MainEvent()) - - # Should have raised an error since the original bus is gone - assert error_raised - - await new_bus.stop() - - -async def test_event_bus_property_child_dispatch(): - """Test event_bus property when dispatching child events from handlers""" - bus = EventBus(name='MainBus') - - # Track execution order and bus references - execution_order: list[str] = [] - child_event_ref = None - grandchild_event_ref = None - - async def parent_handler(event: MainEvent): - execution_order.append('parent_start') - - # Verify we can access event_bus - assert event.event_bus == bus - assert event.event_bus.name == 'MainBus' - - # Dispatch a child event using event.event_bus - nonlocal child_event_ref - child_event_ref = event.event_bus.dispatch(ChildEvent(data='from_parent')) - - # The child event should start processing immediately within our handler - # (due to the deadlock prevention in BaseEvent.__await__) - await child_event_ref - - execution_order.append('parent_end') - - async def child_handler(event: ChildEvent): - execution_order.append('child_start') - - # Child should see the same bus - assert event.event_bus == bus - assert event.event_bus.name == 'MainBus' - assert event.data == 'from_parent' - - # Dispatch a grandchild event - nonlocal grandchild_event_ref - grandchild_event_ref = event.event_bus.dispatch(GrandchildEvent(info='from_child')) - - # Wait for grandchild to complete - await grandchild_event_ref - - execution_order.append('child_end') - - async def grandchild_handler(event: GrandchildEvent): - execution_order.append('grandchild_start') - - # Grandchild should also see the same bus - assert event.event_bus == bus - assert event.event_bus.name == 'MainBus' - assert event.info == 'from_child' - - execution_order.append('grandchild_end') - - # Register handlers - bus.on(MainEvent, parent_handler) - bus.on(ChildEvent, child_handler) - bus.on(GrandchildEvent, grandchild_handler) - - # Dispatch the parent event - parent_event = await bus.dispatch(MainEvent(message='start')) - - # Verify execution order - child events should complete before parent - assert execution_order == ['parent_start', 'child_start', 'grandchild_start', 'grandchild_end', 'child_end', 'parent_end'] - - # Verify all events completed - assert parent_event.event_status == 'completed' - assert child_event_ref is not None - assert child_event_ref.event_status == 'completed' - assert grandchild_event_ref is not None - assert grandchild_event_ref.event_status == 'completed' - - # Verify parent-child relationships - assert child_event_ref.event_parent_id == parent_event.event_id - assert grandchild_event_ref.event_parent_id == child_event_ref.event_id - - await bus.stop() - - -async def test_event_bus_property_multi_bus_child_dispatch(): - """Test event_bus property when child events are dispatched across multiple buses""" - bus1 = EventBus(name='Bus1') - bus2 = EventBus(name='Bus2') - - # Forward all events from bus1 to bus2 - bus1.on('*', bus2.dispatch) - - child_dispatch_bus = None - child_handler_bus = None - handlers_complete = asyncio.Event() - - async def parent_handler(event: MainEvent): - # This handler runs in bus2 (due to forwarding) - assert event.event_bus == bus2 - - # Dispatch child using event.event_bus (should dispatch to bus2) - nonlocal child_dispatch_bus - child_dispatch_bus = event.event_bus - await event.event_bus.dispatch(ChildEvent(data='from_bus2_handler')) - - async def child_handler(event: ChildEvent): - # Child handler should see bus2 as well - nonlocal child_handler_bus - child_handler_bus = event.event_bus - assert event.data == 'from_bus2_handler' - handlers_complete.set() - - # Only register handlers on bus2 - bus2.on(MainEvent, parent_handler) - bus2.on(ChildEvent, child_handler) - - # Dispatch to bus1, which forwards to bus2 - parent_event = bus1.dispatch(MainEvent(message='start')) - - # Wait for handlers to complete - await asyncio.wait_for(handlers_complete.wait(), timeout=5.0) - - # Also await the parent event - await parent_event - - # Verify child was dispatched to bus2 - assert child_dispatch_bus is not None - assert child_handler_bus is not None - assert id(child_dispatch_bus) == id(bus2) - assert id(child_handler_bus) == id(bus2) - - await bus1.stop() - await bus2.stop() diff --git a/tests/test_event_handler.py b/tests/test_event_handler.py new file mode 100644 index 0000000..ad3065d --- /dev/null +++ b/tests/test_event_handler.py @@ -0,0 +1,470 @@ +import asyncio +from typing import Any + +from bubus import BaseEvent, EventBus, EventHandlerCompletionMode, EventHandlerConcurrencyMode, EventResult + + +class CompletionEvent(BaseEvent[str]): + pass + + +class IntCompletionEvent(BaseEvent[int]): + pass + + +class ChildCompletionEvent(BaseEvent[str]): + pass + + +class BoolCompletionEvent(BaseEvent[bool]): + pass + + +class StrCompletionEvent(BaseEvent[str]): + pass + + +async def test_event_handler_completion_bus_default_first_serial() -> None: + bus = EventBus( + name='CompletionDefaultFirstBus', + event_handler_concurrency=EventHandlerConcurrencyMode.SERIAL, + event_handler_completion=EventHandlerCompletionMode.FIRST, + ) + second_handler_called = False + + async def first_handler(_event: CompletionEvent) -> str: + return 'first' + + async def second_handler(_event: CompletionEvent) -> str: + nonlocal second_handler_called + second_handler_called = True + return 'second' + + bus.on(CompletionEvent, first_handler) + bus.on(CompletionEvent, second_handler) + + try: + event = bus.emit(CompletionEvent()) + assert event.event_handler_completion is None + + await event + assert second_handler_called is False + + result = await event.event_result(raise_if_any=False, raise_if_none=False) + assert result == 'first' + + first_result = next(result for result in event.event_results.values() if result.handler_name.endswith('first_handler')) + second_result = next(result for result in event.event_results.values() if result.handler_name.endswith('second_handler')) + assert first_result.status == 'completed' + assert second_result.status == 'error' + assert isinstance(second_result.error, asyncio.CancelledError) + finally: + await bus.stop() + + +async def test_event_handler_completion_explicit_override_beats_bus_default() -> None: + bus = EventBus( + name='CompletionOverrideBus', + event_handler_concurrency=EventHandlerConcurrencyMode.SERIAL, + event_handler_completion=EventHandlerCompletionMode.FIRST, + ) + second_handler_called = False + + async def first_handler(_event: CompletionEvent) -> str: + return 'first' + + async def second_handler(_event: CompletionEvent) -> str: + nonlocal second_handler_called + second_handler_called = True + return 'second' + + bus.on(CompletionEvent, first_handler) + bus.on(CompletionEvent, second_handler) + + try: + event = bus.emit(CompletionEvent(event_handler_completion=EventHandlerCompletionMode.ALL)) + assert event.event_handler_completion == EventHandlerCompletionMode.ALL + await event + assert second_handler_called is True + finally: + await bus.stop() + + +async def test_event_parallel_first_races_and_cancels_non_winners() -> None: + bus = EventBus( + name='CompletionParallelFirstBus', + event_handler_concurrency=EventHandlerConcurrencyMode.SERIAL, + event_handler_completion=EventHandlerCompletionMode.ALL, + ) + slow_started = False + + async def slow_handler_started(_event: CompletionEvent) -> str: + nonlocal slow_started + slow_started = True + await asyncio.sleep(0.5) + return 'slow-started' + + async def fast_winner(_event: CompletionEvent) -> str: + await asyncio.sleep(0.01) + return 'winner' + + async def slow_handler_pending_or_started(_event: CompletionEvent) -> str: + await asyncio.sleep(0.5) + return 'slow-other' + + bus.on(CompletionEvent, slow_handler_started) + bus.on(CompletionEvent, fast_winner) + bus.on(CompletionEvent, slow_handler_pending_or_started) + + try: + event = bus.emit( + CompletionEvent( + event_handler_concurrency=EventHandlerConcurrencyMode.PARALLEL, + event_handler_completion=EventHandlerCompletionMode.FIRST, + ) + ) + assert event.event_handler_concurrency == EventHandlerConcurrencyMode.PARALLEL + assert event.event_handler_completion == EventHandlerCompletionMode.FIRST + + started = asyncio.get_running_loop().time() + await event + elapsed = asyncio.get_running_loop().time() - started + assert elapsed < 0.2 + assert slow_started is True + + winner_result = next(result for result in event.event_results.values() if result.handler_name.endswith('fast_winner')) + assert winner_result.status == 'completed' + assert winner_result.error is None + assert winner_result.result == 'winner' + + loser_results = [result for result in event.event_results.values() if not result.handler_name.endswith('fast_winner')] + assert len(loser_results) == 2 + assert all(result.status == 'error' for result in loser_results) + assert all(isinstance(result.error, asyncio.CancelledError) for result in loser_results) + + resolved = await event.event_result(raise_if_any=False, raise_if_none=True) + assert resolved == 'winner' + finally: + await bus.stop() + + +async def test_event_first_shortcut_sets_mode_and_cancels_parallel_losers() -> None: + bus = EventBus( + name='CompletionFirstShortcutBus', + event_handler_concurrency=EventHandlerConcurrencyMode.PARALLEL, + event_handler_completion=EventHandlerCompletionMode.ALL, + ) + slow_handler_completed = False + + async def fast_handler(_event: CompletionEvent) -> str: + await asyncio.sleep(0.01) + return 'fast' + + async def slow_handler(_event: CompletionEvent) -> str: + nonlocal slow_handler_completed + await asyncio.sleep(0.5) + slow_handler_completed = True + return 'slow' + + bus.on(CompletionEvent, fast_handler) + bus.on(CompletionEvent, slow_handler) + + try: + event = bus.emit(CompletionEvent()) + assert event.event_handler_completion is None + + first_value = await event.first() + + assert first_value == 'fast' + assert event.event_handler_completion == EventHandlerCompletionMode.FIRST + assert slow_handler_completed is False + + error_results = [result for result in event.event_results.values() if result.status == 'error'] + assert error_results + assert any(isinstance(result.error, asyncio.CancelledError) for result in error_results) + finally: + await bus.stop() + + +async def test_event_first_preserves_falsy_results() -> None: + bus = EventBus( + name='CompletionFalsyBus', + event_handler_concurrency=EventHandlerConcurrencyMode.SERIAL, + event_handler_completion=EventHandlerCompletionMode.ALL, + ) + second_handler_called = False + + async def zero_handler(_event: IntCompletionEvent) -> int: + return 0 + + async def second_handler(_event: IntCompletionEvent) -> int: + nonlocal second_handler_called + second_handler_called = True + return 99 + + bus.on(IntCompletionEvent, zero_handler) + bus.on(IntCompletionEvent, second_handler) + + try: + event = bus.emit(IntCompletionEvent()) + result = await event.first() + assert result == 0 + assert second_handler_called is False + finally: + await bus.stop() + + +async def test_event_first_preserves_false_and_empty_string_results() -> None: + bool_bus = EventBus( + name='CompletionFalsyFalseBus', + event_handler_concurrency=EventHandlerConcurrencyMode.SERIAL, + event_handler_completion=EventHandlerCompletionMode.ALL, + ) + bool_second_handler_called = False + + async def bool_first_handler(_event: BoolCompletionEvent) -> bool: + return False + + async def bool_second_handler(_event: BoolCompletionEvent) -> bool: + nonlocal bool_second_handler_called + bool_second_handler_called = True + return True + + bool_bus.on(BoolCompletionEvent, bool_first_handler) + bool_bus.on(BoolCompletionEvent, bool_second_handler) + + try: + bool_event = bool_bus.emit(BoolCompletionEvent()) + bool_result = await bool_event.first() + assert bool_result is False + assert bool_second_handler_called is False + finally: + await bool_bus.stop() + + str_bus = EventBus( + name='CompletionFalsyEmptyStringBus', + event_handler_concurrency=EventHandlerConcurrencyMode.SERIAL, + event_handler_completion=EventHandlerCompletionMode.ALL, + ) + str_second_handler_called = False + + async def str_first_handler(_event: StrCompletionEvent) -> str: + return '' + + async def str_second_handler(_event: StrCompletionEvent) -> str: + nonlocal str_second_handler_called + str_second_handler_called = True + return 'second' + + str_bus.on(StrCompletionEvent, str_first_handler) + str_bus.on(StrCompletionEvent, str_second_handler) + + try: + str_event = str_bus.emit(StrCompletionEvent()) + str_result = await str_event.first() + assert str_result == '' + assert str_second_handler_called is False + finally: + await str_bus.stop() + + +async def test_event_first_skips_none_result_and_uses_next_winner() -> None: + bus = EventBus( + name='CompletionNoneSkipBus', + event_handler_concurrency=EventHandlerConcurrencyMode.SERIAL, + event_handler_completion=EventHandlerCompletionMode.ALL, + ) + third_handler_called = False + + async def none_handler(_event: CompletionEvent) -> None: + return None + + async def winner_handler(_event: CompletionEvent) -> str: + return 'winner' + + async def third_handler(_event: CompletionEvent) -> str: + nonlocal third_handler_called + third_handler_called = True + return 'third' + + bus.on(CompletionEvent, none_handler) + bus.on(CompletionEvent, winner_handler) + bus.on(CompletionEvent, third_handler) + + try: + event = bus.emit(CompletionEvent()) + result = await event.first() + assert result == 'winner' + assert third_handler_called is False + + none_result = next(result for result in event.event_results.values() if result.handler_name.endswith('none_handler')) + winner_result = next(result for result in event.event_results.values() if result.handler_name.endswith('winner_handler')) + assert none_result.status == 'completed' + assert none_result.result is None + assert winner_result.status == 'completed' + assert winner_result.result == 'winner' + finally: + await bus.stop() + + +async def test_event_first_skips_baseevent_result_and_uses_next_winner() -> None: + bus = EventBus( + name='CompletionBaseEventSkipBus', + event_handler_concurrency=EventHandlerConcurrencyMode.SERIAL, + event_handler_completion=EventHandlerCompletionMode.ALL, + ) + third_handler_called = False + + async def baseevent_handler(_event: CompletionEvent) -> ChildCompletionEvent: + return ChildCompletionEvent() + + async def winner_handler(_event: CompletionEvent) -> str: + return 'winner' + + async def third_handler(_event: CompletionEvent) -> str: + nonlocal third_handler_called + third_handler_called = True + return 'third' + + bus.on(CompletionEvent, baseevent_handler) + bus.on(CompletionEvent, winner_handler) + bus.on(CompletionEvent, third_handler) + + try: + event = bus.emit(CompletionEvent()) + result = await event.first() + assert result == 'winner' + assert third_handler_called is False + + def include_completed_values(event_result: EventResult[Any]) -> bool: + return event_result.status == 'completed' and event_result.error is None and event_result.result is not None + + first_completed_value = await event.event_result( + include=include_completed_values, + raise_if_any=False, + raise_if_none=True, + ) + # Typed accessors normalize BaseEvent results to None. + assert first_completed_value is None + + await event.event_results_list( + include=include_completed_values, + raise_if_any=False, + raise_if_none=True, + ) + values_by_handler_id = { + handler_id: result.result for handler_id, result in event.event_results.items() if include_completed_values(result) + } + assert any(value == 'winner' for value in values_by_handler_id.values()) + assert any(isinstance(value, ChildCompletionEvent) for value in values_by_handler_id.values()) + + values_by_handler_name = { + result.handler_name: result.result for result in event.event_results.values() if include_completed_values(result) + } + assert any(value == 'winner' for value in values_by_handler_name.values()) + assert any(isinstance(value, ChildCompletionEvent) for value in values_by_handler_name.values()) + + values_list = await event.event_results_list( + include=include_completed_values, + raise_if_any=False, + raise_if_none=True, + ) + assert 'winner' in values_list + assert None in values_list + + # Raw event_results keep the underlying BaseEvent result. + baseevent_result = next( + result for result in event.event_results.values() if result.handler_name.endswith('baseevent_handler') + ) + assert isinstance(baseevent_result.result, ChildCompletionEvent) + finally: + await bus.stop() + + +async def test_event_first_returns_none_when_all_handlers_fail() -> None: + bus = EventBus(name='CompletionAllFailBus', event_handler_concurrency='parallel') + + async def fail_fast(_event: CompletionEvent) -> str: + raise RuntimeError('boom1') + + async def fail_slow(_event: CompletionEvent) -> str: + await asyncio.sleep(0.01) + raise RuntimeError('boom2') + + bus.on(CompletionEvent, fail_fast) + bus.on(CompletionEvent, fail_slow) + + try: + event = bus.emit(CompletionEvent()) + result = await event.first() + assert result is None + finally: + await bus.stop() + + +# Consolidated from tests/test_event_handler_concurrency.py + + +from bubus import BaseEvent + + +class ConcurrencyEvent(BaseEvent[str]): + pass + + +async def test_event_handler_concurrency_bus_default_remains_unset_on_dispatch() -> None: + bus = EventBus(name='ConcurrencyDefaultBus', event_handler_concurrency=EventHandlerConcurrencyMode.PARALLEL) + + async def one_handler(_event: ConcurrencyEvent) -> str: + return 'ok' + + bus.on(ConcurrencyEvent, one_handler) + + try: + event = bus.emit(ConcurrencyEvent()) + assert event.event_handler_concurrency is None + await event + finally: + await bus.stop() + + +async def test_event_handler_concurrency_per_event_override_controls_execution_mode() -> None: + bus = EventBus(name='ConcurrencyPerEventBus', event_handler_concurrency=EventHandlerConcurrencyMode.PARALLEL) + inflight_by_event_id: dict[str, int] = {} + max_inflight_by_event_id: dict[str, int] = {} + counter_lock = asyncio.Lock() + + async def track_concurrency(event: ConcurrencyEvent) -> None: + event_id = event.event_id + async with counter_lock: + current_inflight = inflight_by_event_id.get(event_id, 0) + 1 + inflight_by_event_id[event_id] = current_inflight + max_inflight_by_event_id[event_id] = max(max_inflight_by_event_id.get(event_id, 0), current_inflight) + await asyncio.sleep(0.02) + async with counter_lock: + inflight_by_event_id[event_id] = max(inflight_by_event_id.get(event_id, 1) - 1, 0) + + async def handler_a(event: ConcurrencyEvent) -> str: + await track_concurrency(event) + return 'a' + + async def handler_b(event: ConcurrencyEvent) -> str: + await track_concurrency(event) + return 'b' + + bus.on(ConcurrencyEvent, handler_a) + bus.on(ConcurrencyEvent, handler_b) + + try: + serial_event = bus.emit(ConcurrencyEvent(event_handler_concurrency=EventHandlerConcurrencyMode.SERIAL)) + parallel_event = bus.emit(ConcurrencyEvent(event_handler_concurrency=EventHandlerConcurrencyMode.PARALLEL)) + assert serial_event.event_handler_concurrency == EventHandlerConcurrencyMode.SERIAL + assert parallel_event.event_handler_concurrency == EventHandlerConcurrencyMode.PARALLEL + + await serial_event + await parallel_event + + assert max_inflight_by_event_id.get(serial_event.event_id) == 1 + assert max_inflight_by_event_id.get(parallel_event.event_id, 0) >= 2 + finally: + await bus.stop() diff --git a/tests/test_event_result.py b/tests/test_event_result.py new file mode 100644 index 0000000..9b56323 --- /dev/null +++ b/tests/test_event_result.py @@ -0,0 +1,1025 @@ +"""Test typed event results with automatic casting.""" + +# pyright: reportAssertTypeFailure=false +# pyright: reportUnnecessaryIsInstance=false + +import asyncio +import logging +from typing import Any, Literal, assert_type + +from pydantic import BaseModel + +from bubus import BaseEvent, EventBus + + +class ScreenshotEventResult(BaseModel): + screenshot_base64: bytes | None = None + error: str | None = None + + +class ScreenshotEvent(BaseEvent[ScreenshotEventResult]): + screenshot_width: int = 1080 + screenshot_height: int = 900 + + +class StringEvent(BaseEvent[str]): + pass + + +class IntEvent(BaseEvent[int]): + pass + + +async def test_pydantic_model_result_casting(): + """Test that handler results are automatically cast to Pydantic models.""" + bus = EventBus(name='pydantic_test_bus') + + def screenshot_handler(event: ScreenshotEvent): + # Return a dict that should be cast to ScreenshotEventResult + return {'screenshot_base64': b'fake_screenshot_data', 'error': None} + + bus.on('ScreenshotEvent', screenshot_handler) + + event = ScreenshotEvent(screenshot_width=1920, screenshot_height=1080) + await bus.emit(event) + + # Get the result + result = await event.event_result() + + # Verify it was cast to the correct type + assert isinstance(result, ScreenshotEventResult) + assert result.screenshot_base64 == b'fake_screenshot_data' + assert result.error is None + + await bus.stop(clear=True) + + +async def test_builtin_type_casting(): + """Test that handler results are automatically cast to built-in types.""" + bus = EventBus(name='builtin_test_bus') + + def string_handler(event: StringEvent): + return '42' # Return a proper string + + def int_handler(event: IntEvent): + return 123 # Return a proper int + + bus.on('StringEvent', string_handler) + bus.on('IntEvent', int_handler) + + # Test string validation + string_event = StringEvent() + await bus.emit(string_event) + string_result = await string_event.event_result() + assert isinstance(string_result, str) + assert string_result == '42' + + # Test int validation + int_event = IntEvent() + await bus.emit(int_event) + int_result = await int_event.event_result() + assert isinstance(int_result, int) + assert int_result == 123 + await bus.stop(clear=True) + + +async def test_casting_failure_handling(): + """Test that casting failures are handled gracefully.""" + bus = EventBus(name='failure_test_bus') + + def bad_handler(event: IntEvent): + return 'not_a_number' # Should fail validation as int + + bus.on('IntEvent', bad_handler) + + event = IntEvent() + await bus.emit(event) + + # The event should complete but the result should be an error + await event.event_results_list(raise_if_any=False, raise_if_none=False) + handler_id = list(event.event_results.keys())[0] + event_result = event.event_results[handler_id] + + assert event_result.status == 'error' + assert isinstance(event_result.error, ValueError) + assert 'expected event_result_type' in str(event_result.error) + + await bus.stop(clear=True) + + +async def test_no_casting_when_no_result_type(): + """Test that events without result_type work normally.""" + bus = EventBus(name='normal_test_bus') + + class NormalEvent(BaseEvent[None]): + pass # No event_result_type specified + + def normal_handler(event: NormalEvent): + return {'raw': 'data'} + + bus.on('NormalEvent', normal_handler) + + event = NormalEvent() + await bus.emit(event) + + result = await event.event_result() + + # Should remain as original dict, no casting + assert isinstance(result, dict) + assert result == {'raw': 'data'} + + await bus.stop(clear=True) + + +async def test_result_type_stored_in_event_result(): + """Test that result_type is stored in EventResult for inspection.""" + bus = EventBus(name='storage_test_bus') + + def handler(event: StringEvent): + return '123' # Already a string, will validate successfully + + bus.on('StringEvent', handler) + + event = StringEvent() + await bus.emit(event) + + # Check that result_type is accessible + handler_id = list(event.event_results.keys())[0] + event_result = event.event_results[handler_id] + + assert event_result.result_type is str + assert isinstance(event_result.result, str) + assert event_result.result == '123' + + await bus.stop(clear=True) + + +async def test_typed_accessors_normalize_forwarded_event_results_to_none(): + """Typed accessors should not surface BaseEvent forwarding returns as typed payloads.""" + bus = EventBus(name='forwarded_result_normalization_bus') + + class ForwardingTypedEvent(BaseEvent[int]): + pass + + def forward_handler(event: ForwardingTypedEvent): + return BaseEvent(event_type='ForwardedEventFromHandler') + + bus.on(ForwardingTypedEvent, forward_handler) + + event = await bus.emit(ForwardingTypedEvent()) + + def include_all(_: Any) -> bool: + return True + + result = await event.event_result(include=include_all, raise_if_any=False, raise_if_none=False) + results_list = await event.event_results_list(include=include_all, raise_if_any=False, raise_if_none=False) + assert result is None + assert results_list == [None] + + await bus.stop(clear=True) + + +async def test_run_handler_marks_started_after_handler_lock_entry(): + """Result status should remain pending while waiting on the handler lock.""" + bus = EventBus(name='handler_start_order_bus', event_handler_concurrency='serial') + + class LockOrderEvent(BaseEvent[str]): + pass + + first_handler_started = asyncio.Event() + release_first_handler = asyncio.Event() + + async def first_handler(_event: LockOrderEvent) -> str: + first_handler_started.set() + await release_first_handler.wait() + return 'first' + + async def second_handler(_event: LockOrderEvent) -> str: + return 'second' + + first_entry = bus.on(LockOrderEvent, first_handler) + second_entry = bus.on(LockOrderEvent, second_handler) + event = LockOrderEvent() + pending_event = bus.emit(event) + await first_handler_started.wait() + + assert first_entry.id in event.event_results + assert second_entry.id in event.event_results + assert event.event_results[first_entry.id].status == 'started' + assert event.event_results[second_entry.id].status == 'pending' + + release_first_handler.set() + await pending_event.event_completed() + assert event.event_results[first_entry.id].status == 'completed' + assert event.event_results[second_entry.id].status == 'completed' + assert event.event_results[first_entry.id].result == 'first' + assert event.event_results[second_entry.id].result == 'second' + + await bus.stop(clear=True) + + +async def test_run_handler_starts_slow_monitor_after_lock_wait(caplog: Any): + """Slow handler warning should be based on handler runtime, not lock wait time.""" + bus = EventBus( + name='handler_slow_monitor_start_order_bus', + event_handler_concurrency='serial', + event_handler_slow_timeout=1.0, + ) + + class SlowMonitorOrderEvent(BaseEvent[str]): + pass + + first_handler_started = asyncio.Event() + release_first_handler = asyncio.Event() + + async def first_handler(_event: SlowMonitorOrderEvent) -> str: + first_handler_started.set() + await release_first_handler.wait() + return 'first' + + async def second_handler(_event: SlowMonitorOrderEvent) -> str: + await asyncio.sleep(0.03) + return 'ok' + + bus.on(SlowMonitorOrderEvent, first_handler) + slow_entry = bus.on(SlowMonitorOrderEvent, second_handler) + slow_entry.handler_slow_timeout = 0.01 + event = SlowMonitorOrderEvent() + + caplog.set_level(logging.WARNING, logger='bubus') + + pending_event = bus.emit(event) + await first_handler_started.wait() + await asyncio.sleep(0.03) + + slow_handler_messages_before_release = [ + record.message + for record in caplog.records + if 'Slow event handler' in record.message and slow_entry.label in record.message + ] + assert slow_handler_messages_before_release == [] + + release_first_handler.set() + + try: + await pending_event.event_completed() + finally: + await bus.stop(clear=True) + + slow_handler_messages_after_release = [ + record.message + for record in caplog.records + if 'Slow event handler' in record.message and slow_entry.label in record.message + ] + assert slow_handler_messages_after_release + + +async def test_find_type_inference(): + """Test that EventBus.find() returns the correct typed event.""" + bus = EventBus(name='expect_type_test_bus') + + class CustomResult(BaseModel): + data: str + + class SpecificEvent(BaseEvent[CustomResult]): + request_id: str = 'd1ca37d6-fdda-7e2b-8658-c8bb34034376' + + # Validate inline isinstance usage works with await find() + async def dispatch_inline_isinstance(): + await asyncio.sleep(0.01) + bus.emit(SpecificEvent(request_id='57d2fad5-8864-7f52-89ea-e4200dbf3599')) + + inline_isinstance_task = asyncio.create_task(dispatch_inline_isinstance()) + assert isinstance(await bus.find(SpecificEvent, past=False, future=1.0), SpecificEvent) + await inline_isinstance_task + + # Validate inline assert_type usage works with await find() + async def dispatch_inline_assert_type(): + await asyncio.sleep(0.01) + bus.emit(SpecificEvent(request_id='87d233ab-822c-71e7-8564-39cd69531436')) + + inline_type_task = asyncio.create_task(dispatch_inline_assert_type()) + assert_type(await bus.find(SpecificEvent, past=False, future=1.0), SpecificEvent | None) + await inline_type_task + + # Validate assert_type with isinstance expression + async def dispatch_inline_isinstance_type(): + await asyncio.sleep(0.01) + bus.emit(SpecificEvent(request_id='9853009a-1c66-70fa-89da-e9407d0c66dc')) + + inline_isinstance_type_task = asyncio.create_task(dispatch_inline_isinstance_type()) + assert_type(isinstance(await bus.find(SpecificEvent, past=False, future=1.0), SpecificEvent), bool) + await inline_isinstance_type_task + + # Start a task that will dispatch the event + async def dispatch_later(): + await asyncio.sleep(0.01) + bus.emit(SpecificEvent(request_id='34f39b71-07a5-719b-8734-a1b0ee5d5c27')) + + dispatch_task = asyncio.create_task(dispatch_later()) + + # Use find with the event class - should return SpecificEvent type + expected_event = await bus.find(SpecificEvent, past=False, future=1.0) + assert expected_event is not None + assert isinstance(expected_event, SpecificEvent) + + # Type checking - this should work without cast + assert_type(expected_event, SpecificEvent) # Verify type is SpecificEvent, not BaseEvent[Any] + + # Runtime check + assert type(expected_event) is SpecificEvent + assert expected_event.request_id == '34f39b71-07a5-719b-8734-a1b0ee5d5c27' + + # Test with filters - type should still be preserved + async def dispatch_multiple(): + await asyncio.sleep(0.01) + bus.emit(SpecificEvent(request_id='32b90140-a7ee-7ae7-830c-71a099e93cb3')) + bus.emit(SpecificEvent(request_id='519664bf-c9fa-7654-896b-fb0cc5b6adab')) + + dispatch_task2 = asyncio.create_task(dispatch_multiple()) + + # find with where filter + def is_correct(event: SpecificEvent) -> bool: + return event.request_id == '519664bf-c9fa-7654-896b-fb0cc5b6adab' + + filtered_event = await bus.find( + SpecificEvent, + where=is_correct, + past=False, + future=1.0, + ) + assert filtered_event is not None + + assert_type(filtered_event, SpecificEvent) # Should still be SpecificEvent + assert isinstance(filtered_event, SpecificEvent) + assert type(filtered_event) is SpecificEvent + assert filtered_event.request_id == '519664bf-c9fa-7654-896b-fb0cc5b6adab' + + # Test with string event type - returns BaseEvent[Any] + async def dispatch_string_event(): + await asyncio.sleep(0.01) + bus.emit(BaseEvent(event_type='StringEvent')) + + dispatch_task3 = asyncio.create_task(dispatch_string_event()) + string_event = await bus.find('StringEvent', past=False, future=1.0) + assert string_event is not None + + assert_type(string_event, BaseEvent[Any]) # Should be BaseEvent[Any] + assert string_event.event_type == 'StringEvent' + + await dispatch_task + await dispatch_task2 + await dispatch_task3 + + await bus.stop(clear=True) + + +async def test_find_past_type_inference(): + """Test that EventBus.find() with past-window returns the correct typed event.""" + bus = EventBus(name='query_type_test_bus') + + class QueryEvent(BaseEvent[str]): + pass + + # Dispatch an event so it appears in history + event = bus.emit(QueryEvent()) + await bus.wait_until_idle() + + assert isinstance(await bus.find(QueryEvent, past=10, future=False), QueryEvent) + assert_type(await bus.find(QueryEvent, past=10, future=False), QueryEvent | None) + assert_type(isinstance(await bus.find(QueryEvent, past=10, future=False), QueryEvent), bool) + queried = await bus.find(QueryEvent, past=10, future=False) + + assert queried is not None + assert isinstance(queried, QueryEvent) + assert_type(queried, QueryEvent) + assert queried.event_id == event.event_id + + await bus.stop(clear=True) + + +async def test_dispatch_type_inference(): + """Test that EventBus.emit() returns the same type as its input.""" + bus = EventBus(name='type_inference_test_bus') + + class CustomResult(BaseModel): + value: str + + class CustomEvent(BaseEvent[CustomResult]): + pass + + # Create an event instance + original_event = CustomEvent() + + # Dispatch should return the same type WITHOUT needing cast() + dispatched_event = bus.emit(original_event) + assert isinstance(dispatched_event, CustomEvent) + + # Type checking - this should work without cast + assert_type(dispatched_event, CustomEvent) # Should be CustomEvent, not BaseEvent[Any] + + # Runtime check + assert type(dispatched_event) is CustomEvent + assert dispatched_event is original_event # Should be the same object + + # The returned event should be fully typed + async def handler(event: CustomEvent) -> CustomResult: + return CustomResult(value='test') + + bus.on('CustomEvent', handler) + + # Validate inline isinstance usage works with emit() + another_event = CustomEvent() + assert isinstance(bus.emit(another_event), CustomEvent) + + # Validate assert_type captures emit() return type when called inline + type_event = CustomEvent() + dispatched_type_event = bus.emit(type_event) + assert_type(dispatched_type_event, CustomEvent) + + # Validate assert_type with isinstance expression using emit() + isinstance_type_event = CustomEvent() + assert_type(isinstance(bus.emit(isinstance_type_event), CustomEvent), Literal[True]) + + # We should be able to use it without casting + result = await dispatched_event.event_result() + + # Type checking for the result + assert_type(result, CustomResult | None) # Should be CustomResult | None + + # Test that we can access type-specific attributes without cast + # This would fail type checking if dispatched_event was BaseEvent[Any] + assert dispatched_event.event_type == 'CustomEvent' + + # Demonstrate the improvement - no cast needed! + # Before: event = cast(CustomEvent, bus.emit(CustomEvent())) + # After: event = bus.emit(CustomEvent()) # Type is preserved! + + await another_event.event_result() + await type_event.event_result() + await isinstance_type_event.event_result() + + await bus.stop(clear=True) + + +# Consolidated from tests/test_auto_event_result_schema.py + +# Test automatic event_result_type extraction from Generic type parameters. + +from dataclasses import dataclass + +import pytest +from pydantic import BaseModel, TypeAdapter, ValidationError +from pydantic_core import to_jsonable_python +from typing_extensions import TypedDict + +from bubus.base_event import BaseEvent +from bubus.helpers import extract_basemodel_generic_arg + + +def _to_plain(value: Any) -> Any: + return to_jsonable_python(value) + + +def _event_result_schema_json(event: BaseEvent[Any]) -> dict[str, Any]: + raw_schema = event.model_dump(mode='json')['event_result_type'] + return TypeAdapter(dict[str, Any]).validate_python(raw_schema) + + +class UserData(BaseModel): + name: str + age: int + + +class TaskResult(BaseModel): + task_id: str + status: str + + +class ModuleLevelResult(BaseModel): + """Module-level result type for testing auto-detection.""" + + result_id: str + data: dict[str, Any] + success: bool + + +class NestedModuleResult(BaseModel): + """Another module-level type for testing complex generics.""" + + items: list[str] + metadata: dict[str, int] + + +class EmailMessage(BaseModel): + """Module-level type for testing extract_basemodel_generic_arg.""" + + subject: str + body: str + recipients: list[str] + + +class ProfileResult(TypedDict): + user_id: str + active: bool + score: int + + +class OptionalProfileResult(TypedDict, total=False): + nickname: str + age: int + + +@dataclass +class DataClassResult: + task_id: str + priority: int + + +def test_builtin_types_auto_extraction(): + """Built-in Generic[T] values populate result schema.""" + + class StringEvent(BaseEvent[str]): + message: str = 'Hello' + + class IntEvent(BaseEvent[int]): + number: int = 42 + + class FloatEvent(BaseEvent[float]): + value: float = 3.14 + + string_event = StringEvent() + int_event = IntEvent() + float_event = FloatEvent() + + assert string_event.event_result_type is str + assert int_event.event_result_type is int + assert float_event.event_result_type is float + + +def test_custom_pydantic_models_auto_extraction(): + """Custom Pydantic result schemas are extracted from Generic[T].""" + + class UserEvent(BaseEvent[UserData]): + user_id: str = 'fbf27f90-5cc9-798d-8f41-e09f2689f208' + + class TaskEvent(BaseEvent[TaskResult]): + batch_id: str = 'b497c95e-a753-77e6-8739-e8a2d3d8ae42' + + user_event = UserEvent() + task_event = TaskEvent() + + assert user_event.event_result_type is UserData + assert task_event.event_result_type is TaskResult + + +def test_complex_generic_types_auto_extraction(): + """Complex Generic[T] values are extracted.""" + + class ListEvent(BaseEvent[list[str]]): + pass + + class DictEvent(BaseEvent[dict[str, int]]): + pass + + class SetEvent(BaseEvent[set[int]]): + pass + + list_event = ListEvent() + dict_event = DictEvent() + set_event = SetEvent() + + assert list_event.event_result_type == list[str] + assert dict_event.event_result_type == dict[str, int] + assert set_event.event_result_type == set[int] + + +def test_complex_generic_with_custom_types(): + """Test complex generics containing custom types.""" + + class TaskListEvent(BaseEvent[list[TaskResult]]): + batch_id: str = 'batch456' + + task_list_event = TaskListEvent() + + assert task_list_event.event_result_type == list[TaskResult] + + +@pytest.mark.parametrize( + ('json_schema', 'expected_schema'), + [ + ({'type': 'string'}, str), + ({'type': 'number'}, float), + ({'type': 'integer'}, int), + ({'type': 'boolean'}, bool), + ({'type': 'null'}, type(None)), + ], +) +def test_json_schema_primitive_deserialization(json_schema: dict[str, str], expected_schema: Any): + """Primitive JSON Schema payloads reconstruct to Python runtime types.""" + event = BaseEvent[Any].model_validate({'event_type': 'SchemaEvent', 'event_result_type': json_schema}) + + assert event.event_result_type is expected_schema + serialized_schema = _event_result_schema_json(event) + assert serialized_schema.get('type') == json_schema['type'] + + +def test_json_schema_list_of_models_deserialization(): + """Array schemas with $defs/$ref rehydrate into list[BaseModel]-compatible validators.""" + json_schema = TypeAdapter(list[UserData]).json_schema() + event = BaseEvent[Any].model_validate({'event_type': 'SchemaEvent', 'event_result_type': json_schema}) + + adapter = TypeAdapter(event.event_result_type) + validated = TypeAdapter(list[Any]).validate_python(adapter.validate_python([{'name': 'alice', 'age': 33}])) + assert len(validated) == 1 + assert isinstance(validated[0], BaseModel) + assert validated[0].model_dump() == {'name': 'alice', 'age': 33} + + serialized_schema = _event_result_schema_json(event) + assert serialized_schema.get('type') == 'array' + assert '$defs' in serialized_schema + + +def test_json_schema_nested_object_collection_deserialization(): + """Nested dict[str, list[BaseModel]] schemas rehydrate into fully typed validators.""" + json_schema = TypeAdapter(dict[str, list[TaskResult]]).json_schema() + event = BaseEvent[Any].model_validate({'event_type': 'SchemaEvent', 'event_result_type': json_schema}) + + adapter = TypeAdapter(event.event_result_type) + validated = adapter.validate_python({'batch_a': [{'task_id': '6b2e9266-87c4-7d4a-81e5-a6026165e14b', 'status': 'ok'}]}) + assert isinstance(validated, dict) + assert isinstance(validated['batch_a'], list) + assert isinstance(validated['batch_a'][0], BaseModel) + assert validated['batch_a'][0].model_dump() == {'task_id': '6b2e9266-87c4-7d4a-81e5-a6026165e14b', 'status': 'ok'} + + serialized_schema = _event_result_schema_json(event) + assert serialized_schema.get('type') == 'object' + assert '$defs' in serialized_schema + + +@pytest.mark.parametrize( + ('shape', 'payload'), + [ + (list[str], ['a', 'b']), + (tuple[str, int], ['a', 7]), + (dict[str, list[int]], {'scores': [1, 2, 3]}), + (list[tuple[str, int]], [['x', 1], ['y', 2]]), + (list[UserData], [{'name': 'alice', 'age': 33}]), + (dict[str, list[TaskResult]], {'batch_a': [{'task_id': '6b2e9266-87c4-7d4a-81e5-a6026165e14b', 'status': 'ok'}]}), + ], +) +def test_json_schema_top_level_shape_deserialization_matrix(shape: Any, payload: Any): + """Top-level collection shapes rehydrate into equivalent runtime validators.""" + json_schema = TypeAdapter(shape).json_schema() + event = BaseEvent[Any].model_validate({'event_type': 'SchemaEvent', 'event_result_type': json_schema}) + + hydrated_adapter = TypeAdapter(event.event_result_type) + expected_adapter = TypeAdapter(shape) + + hydrated_value = hydrated_adapter.validate_python(payload) + expected_value = expected_adapter.validate_python(payload) + assert _to_plain(hydrated_value) == _to_plain(expected_value) + + serialized_schema = _event_result_schema_json(event) + assert '$schema' in serialized_schema + + +def test_json_schema_typed_dict_rehydrates_to_pydantic_model(): + """TypedDict schemas rehydrate into dynamic pydantic models.""" + json_schema = TypeAdapter(ProfileResult).json_schema() + event = BaseEvent[Any].model_validate({'event_type': 'SchemaEvent', 'event_result_type': json_schema}) + + assert isinstance(event.event_result_type, type) + assert issubclass(event.event_result_type, BaseModel) + + adapter = TypeAdapter(event.event_result_type) + validated = adapter.validate_python({'user_id': 'e692b6cb-ae63-773b-8557-3218f7ce5ced', 'active': True, 'score': 9}) + assert isinstance(validated, BaseModel) + assert validated.model_dump() == {'user_id': 'e692b6cb-ae63-773b-8557-3218f7ce5ced', 'active': True, 'score': 9} + + +def test_json_schema_optional_typed_dict_is_lax_on_missing_fields(): + """Non-required TypedDict fields should not fail hydration-time validation.""" + json_schema = TypeAdapter(OptionalProfileResult).json_schema() + event = BaseEvent[Any].model_validate({'event_type': 'SchemaEvent', 'event_result_type': json_schema}) + + adapter = TypeAdapter(event.event_result_type) + empty_validated = adapter.validate_python({}) + assert isinstance(empty_validated, BaseModel) + + partial_validated = adapter.validate_python({'nickname': 'squash'}) + assert isinstance(partial_validated, BaseModel) + assert partial_validated.model_dump(exclude_none=True) == {'nickname': 'squash'} + + +def test_json_schema_dataclass_rehydrates_to_pydantic_model(): + """Dataclass schemas rehydrate into dynamic pydantic models.""" + json_schema = TypeAdapter(DataClassResult).json_schema() + event = BaseEvent[Any].model_validate({'event_type': 'SchemaEvent', 'event_result_type': json_schema}) + + adapter = TypeAdapter(event.event_result_type) + validated = adapter.validate_python({'task_id': '16272e4a-6936-7e87-872b-0eadeb911f9d', 'priority': 2}) + assert isinstance(validated, BaseModel) + assert validated.model_dump() == {'task_id': '16272e4a-6936-7e87-872b-0eadeb911f9d', 'priority': 2} + + +def test_json_schema_list_of_dataclass_rehydrates_to_list_of_models(): + """Nested dataclass objects inside collections should rehydrate cleanly.""" + json_schema = TypeAdapter(list[DataClassResult]).json_schema() + event = BaseEvent[Any].model_validate({'event_type': 'SchemaEvent', 'event_result_type': json_schema}) + + adapter = TypeAdapter(event.event_result_type) + validated = adapter.validate_python([{'task_id': '78cfaa39-d697-7ef5-8e62-19b94b2cb48e', 'priority': 5}]) + assert isinstance(validated, list) + assert isinstance(validated[0], BaseModel) + assert validated[0].model_dump() == {'task_id': '78cfaa39-d697-7ef5-8e62-19b94b2cb48e', 'priority': 5} + + +async def test_json_schema_nested_object_and_array_runtime_enforcement(): + """Nested object/array schemas reconstructed from JSON enforce handler return values.""" + from bubus import EventBus + + nested_schema = { + 'type': 'object', + 'properties': { + 'items': {'type': 'array', 'items': {'type': 'integer'}}, + 'meta': {'type': 'object', 'additionalProperties': {'type': 'boolean'}}, + }, + 'required': ['items', 'meta'], + } + + bus = EventBus(name='nested_schema_runtime_bus') + + async def valid_handler(event: BaseEvent[Any]) -> dict[str, Any]: + return {'items': [1, 2, 3], 'meta': {'ok': True, 'cached': False}} + + bus.on('NestedSchemaEvent', valid_handler) + + valid_event = BaseEvent[Any].model_validate({'event_type': 'NestedSchemaEvent', 'event_result_type': nested_schema}) + await bus.emit(valid_event) + valid_result = next(iter(valid_event.event_results.values())) + assert valid_result.status == 'completed' + assert valid_result.error is None + assert isinstance(valid_result.result, BaseModel) + assert valid_result.result.model_dump() == {'items': [1, 2, 3], 'meta': {'ok': True, 'cached': False}} + + bus.handlers.clear() + + async def invalid_handler(event: BaseEvent[Any]) -> dict[str, Any]: + return {'items': ['not-an-int'], 'meta': {'ok': 'yes'}} + + bus.on('NestedSchemaEvent', invalid_handler) + invalid_event = BaseEvent[Any].model_validate({'event_type': 'NestedSchemaEvent', 'event_result_type': nested_schema}) + await bus.emit(invalid_event) + invalid_result = next(iter(invalid_event.event_results.values())) + assert invalid_result.status == 'error' + assert invalid_result.error is not None + + await bus.stop(clear=True) + + +def test_no_generic_parameter(): + """Test that events without generic parameters don't get auto-set types.""" + + class PlainEvent(BaseEvent): + message: str = 'plain' + + plain_event = PlainEvent() + + # Should remain None since no schema was provided + assert plain_event.event_result_type is None + + +def test_none_generic_parameter(): + """Test that BaseEvent[None] results in None type.""" + + class NoneEvent(BaseEvent[None]): + message: str = 'none' + + none_event = NoneEvent() + + # Should remain unset + assert none_event.event_result_type is None + + +def test_nested_inheritance(): + """Test that generic type extraction works with nested inheritance.""" + + class BaseUserEvent(BaseEvent[UserData]): + pass + + class SpecificUserEvent(BaseUserEvent): + specific_field: str = 'specific' + + specific_event = SpecificUserEvent() + + # Should inherit schema/type metadata from parent generic. + assert specific_event.event_result_type is UserData + + +def test_module_level_types_auto_extraction(): + """Test that module-level schemas are automatically detected.""" + + class ModuleEvent(BaseEvent[ModuleLevelResult]): + operation: str = 'test_op' + + class NestedModuleEvent(BaseEvent[NestedModuleResult]): + batch_id: str = 'batch123' + + module_event = ModuleEvent() + nested_event = NestedModuleEvent() + + # Should auto-detect module-level schemas. + assert module_event.event_result_type is ModuleLevelResult + assert nested_event.event_result_type is NestedModuleResult + + +def test_complex_module_level_generics(): + """Test complex generics with module-level types are auto-detected.""" + + class ListModuleEvent(BaseEvent[list[ModuleLevelResult]]): + batch_size: int = 10 + + class DictModuleEvent(BaseEvent[dict[str, NestedModuleResult]]): + mapping_type: str = 'result_map' + + list_event = ListModuleEvent() + dict_event = DictModuleEvent() + + # Should auto-detect complex schemas. + assert list_event.event_result_type == list[ModuleLevelResult] + assert dict_event.event_result_type == dict[str, NestedModuleResult] + + +async def test_module_level_runtime_enforcement(): + """Test that module-level auto-detected types are enforced at runtime.""" + from bubus import EventBus + + class RuntimeEvent(BaseEvent[ModuleLevelResult]): + operation: str = 'runtime_test' + + # Verify auto-detection worked + test_event = RuntimeEvent() + assert test_event.event_result_type is ModuleLevelResult, f'Auto-detection failed: got {test_event.event_result_type}' + + bus = EventBus(name='runtime_test_bus') + + def correct_handler(event: RuntimeEvent): + # Return dict that matches ModuleLevelResult schema + return {'result_id': 'e1bb315c-472f-7bd1-8e72-c8502e1a9a36', 'data': {'key': 'value'}, 'success': True} + + def incorrect_handler(event: RuntimeEvent): + # Return something that doesn't match ModuleLevelResult + return {'wrong': 'format'} + + # Test correct handler + bus.on('RuntimeEvent', correct_handler) + + event1 = RuntimeEvent() + await bus.emit(event1) + result1 = await event1.event_result() + + # Should be cast to ModuleLevelResult + assert isinstance(result1, ModuleLevelResult) + assert result1.result_id == 'e1bb315c-472f-7bd1-8e72-c8502e1a9a36' + assert result1.data == {'key': 'value'} + assert result1.success is True + + # Test incorrect handler + bus.handlers.clear() # Clear previous handler + bus.on('RuntimeEvent', incorrect_handler) + + event2 = RuntimeEvent() + await bus.emit(event2) + + # Should get an error due to validation failure + handler_id = list(event2.event_results.keys())[0] + event_result = event2.event_results[handler_id] + + assert event_result.status == 'error' + assert isinstance(event_result.error, Exception) + + await bus.stop(clear=True) + + +def test_extract_basemodel_generic_arg_basic(): + """Test extract_basemodel_generic_arg with basic types.""" + + # Test BaseEvent[int] + class IntResultEvent(BaseEvent[int]): + pass + + result = extract_basemodel_generic_arg(IntResultEvent) + assert result is int + + +def test_extract_basemodel_generic_arg_dict(): + """Test extract_basemodel_generic_arg with dict types.""" + + # Test BaseEvent[dict[str, int]] + class DictIntEvent(BaseEvent[dict[str, int]]): + pass + + result = extract_basemodel_generic_arg(DictIntEvent) + assert result == dict[str, int] + + +def test_extract_basemodel_generic_arg_dict_with_module_type(): + """Test extract_basemodel_generic_arg with dict containing module-level type.""" + + # Test BaseEvent[dict[str, EmailMessage]] + class DictEmailEvent(BaseEvent[dict[str, EmailMessage]]): + pass + + result = extract_basemodel_generic_arg(DictEmailEvent) + assert result == dict[str, EmailMessage] + + +def test_extract_basemodel_generic_arg_dict_with_local_type(): + """Test extract_basemodel_generic_arg with dict containing locally defined type.""" + + # Define local type + class EmailAttachment(BaseModel): + filename: str + content: bytes + mime_type: str + + # Test BaseEvent[dict[str, EmailAttachment]] + class DictAttachmentEvent(BaseEvent[dict[str, EmailAttachment]]): + pass + + result = extract_basemodel_generic_arg(DictAttachmentEvent) + assert result == dict[str, EmailAttachment] + + +def test_extract_basemodel_generic_arg_no_generic(): + """Test extract_basemodel_generic_arg with BaseEvent (no generic parameter).""" + + # Test BaseEvent without generic parameter + class PlainEvent(BaseEvent): + pass + + result = extract_basemodel_generic_arg(PlainEvent) + assert result is None + + +def test_type_adapter_validation(): + """Test that TypeAdapter can validate extracted types properly.""" + + # Test dict[str, int] validation + class DictIntEvent(BaseEvent[dict[str, int]]): + pass + + extracted_type = extract_basemodel_generic_arg(DictIntEvent) + adapter = TypeAdapter(extracted_type) + + # Valid data should work + valid_data = {'abc': 123, 'def': 456} + result = adapter.validate_python(valid_data) + assert result == valid_data + + # Invalid data should raise ValidationError + invalid_data = {'abc': 'badvalue'} + with pytest.raises(ValidationError) as exc_info: + adapter.validate_python(invalid_data) + + # Check that the error is about the wrong type + errors = exc_info.value.errors() + assert len(errors) > 0 + assert any('int' in str(error) for error in errors) + + +if __name__ == '__main__': + pytest.main([__file__, '-v', '-s']) + + +# Consolidated from tests/test_simple_typed_results.py (rewritten for strict assertions) + + +async def test_simple_typed_result_model_roundtrip_and_status() -> None: + bus = EventBus(name='typed_result_simple_bus') + + class SimpleResult(BaseModel): + value: str + count: int + + class SimpleTypedEvent(BaseEvent[SimpleResult]): + event_result_type: Any = SimpleResult + + def handler(_event: SimpleTypedEvent) -> SimpleResult: + return SimpleResult(value='hello', count=42) + + handler_entry = bus.on(SimpleTypedEvent, handler) + + try: + completed_event = await bus.emit(SimpleTypedEvent()) + assert completed_event.event_status == 'completed' + assert handler_entry.id in completed_event.event_results + + event_result = completed_event.event_results[handler_entry.id] + assert event_result.status == 'completed' + assert event_result.error is None + assert isinstance(event_result.result, SimpleResult) + assert event_result.result == SimpleResult(value='hello', count=42) + finally: + await bus.stop(clear=True) diff --git a/tests/test_event_result_handler_metadata.py b/tests/test_event_result_handler_metadata.py new file mode 100644 index 0000000..e00877f --- /dev/null +++ b/tests/test_event_result_handler_metadata.py @@ -0,0 +1,168 @@ +from typing import cast +from uuid import NAMESPACE_DNS, uuid4, uuid5 + +import pytest + +from bubus.base_event import BaseEvent, EventResult +from bubus.event_bus import EventBus +from bubus.event_handler import EventHandler, EventHandlerCallable + + +class StandaloneEvent(BaseEvent[str]): + data: str + + +@pytest.mark.asyncio +async def test_event_result_run_handler_with_base_event() -> None: + """EventResult should run correctly when called directly with a real BaseEvent.""" + event = StandaloneEvent(data='ok') + + async def handler(_event: StandaloneEvent) -> str: + return 'ok' + + handler_entry = EventHandler.from_callable( + handler=cast(EventHandlerCallable, handler), + event_pattern='StandaloneEvent', + eventbus_name='Standalone', + eventbus_id='dafc8026-409b-7794-8067-62e302999216', + ) + + event_result: EventResult[str] = EventResult( + event_id=event.event_id, + handler=handler_entry, + timeout=event.event_timeout, + result_type=str, + ) + + test_bus = EventBus(name='StandaloneTest1') + result_value = await event_result.run_handler( + event, + eventbus=test_bus, + timeout=event.event_timeout, + ) + + assert result_value == 'ok' + assert event_result.status == 'completed' + assert event_result.result == 'ok' + await test_bus.stop() + + +@pytest.mark.asyncio +async def test_event_and_result_without_eventbus() -> None: + """Verify BaseEvent + EventResult work without instantiating an EventBus.""" + + event = StandaloneEvent(data='message') + + def handler(evt: StandaloneEvent) -> str: + return evt.data.upper() + + handler_entry = EventHandler.from_callable( + handler=cast(EventHandlerCallable, handler), + event_pattern='StandaloneEvent', + eventbus_name='EventBus', + eventbus_id='00000000-0000-0000-0000-000000000000', + ) + assert handler_entry.id is not None + handler_id = handler_entry.id + event_result = event.event_result_update(handler=handler_entry, status='pending') + + test_bus = EventBus(name='StandaloneTest2') + value = await event_result.run_handler( + event, + eventbus=test_bus, + timeout=event.event_timeout, + ) + + assert value == 'MESSAGE' + assert event_result.status == 'completed' + assert event.event_results[handler_id] is event_result + + await test_bus.emit(event).event_completed() + assert event.event_completed_at is not None + await test_bus.stop() + + +def test_event_handler_model_is_serializable() -> None: + """EventHandler is a Pydantic model and can round-trip serialized metadata.""" + + def handler(event: StandaloneEvent) -> str: + return event.data + + entry = EventHandler.from_callable( + handler=cast(EventHandlerCallable, handler), + event_pattern='StandaloneEvent', + eventbus_name='StandaloneBus', + eventbus_id='018f8e40-1234-7000-8000-000000001234', + ) + + dumped = entry.model_dump(mode='json') + assert dumped['event_pattern'] == 'StandaloneEvent' + assert dumped['eventbus_name'] == 'StandaloneBus' + assert dumped.get('handler') is None + + loaded = EventHandler.model_validate(dumped) + assert loaded.id == entry.id + assert loaded.event_pattern == entry.event_pattern + assert loaded.handler is None + + +def test_event_handler_id_matches_typescript_uuidv5_algorithm() -> None: + entry = EventHandler( + handler_name='pkg.module.handler', + handler_file_path='~/project/app.py:123', + handler_registered_at='2025-01-02T03:04:05.678901000Z', + event_pattern='StandaloneEvent', + eventbus_name='StandaloneBus', + eventbus_id='018f8e40-1234-7000-8000-000000001234', + ) + + namespace = uuid5(NAMESPACE_DNS, 'bubus-handler') + expected_seed = '018f8e40-1234-7000-8000-000000001234|pkg.module.handler|~/project/app.py:123|2025-01-02T03:04:05.678901000Z|StandaloneEvent' + expected_id = str(uuid5(namespace, expected_seed)) + + assert entry.compute_handler_id() == expected_id + assert entry.id == expected_id + + +def test_event_handler_model_detects_handler_file_path() -> None: + def handler(event: StandaloneEvent) -> str: + return event.data + + entry = EventHandler.from_callable( + handler=cast(EventHandlerCallable, handler), + event_pattern='StandaloneEvent', + eventbus_name='StandaloneBus', + eventbus_id='018f8e40-1234-7000-8000-000000001234', + ) + + assert entry.handler_file_path is not None + expected_suffix = f'test_event_result_handler_metadata.py:{handler.__code__.co_firstlineno}' + assert entry.handler_file_path.endswith(expected_suffix) + + +def test_event_result_serializes_handler_metadata_and_derived_fields() -> None: + """EventResult stores handler metadata and derives convenience fields from it.""" + + def handler(event: StandaloneEvent) -> str: + return event.data + + entry = EventHandler.from_callable( + handler=cast(EventHandlerCallable, handler), + event_pattern='StandaloneEvent', + eventbus_name='StandaloneBus', + eventbus_id='018f8e40-1234-7000-8000-000000001234', + ) + + result = EventResult( + event_id=str(uuid4()), + handler=entry, + ) + payload = result.model_dump(mode='json') + + assert 'handler' not in payload + assert 'result_type' not in payload + assert payload['handler_id'] == entry.id + assert payload['handler_name'] == entry.handler_name + assert payload['handler_event_pattern'] == entry.event_pattern + assert payload['eventbus_id'] == entry.eventbus_id + assert payload['eventbus_name'] == entry.eventbus_name diff --git a/tests/test_eventbus.py b/tests/test_eventbus.py index b4cb977..8dc8438 100644 --- a/tests/test_eventbus.py +++ b/tests/test_eventbus.py @@ -15,16 +15,15 @@ """ import asyncio -import json -import os import time -from datetime import datetime, timezone +from datetime import datetime, timedelta, timezone from typing import Any import pytest from pydantic import Field from bubus import BaseEvent, EventBus +from bubus.helpers import monotonic_datetime class CreateAgentTaskEvent(BaseEvent): @@ -48,19 +47,11 @@ class UserActionEvent(BaseEvent): class SystemEventModel(BaseEvent): """Test event model for system events""" - event_name: str + name: str severity: str = 'info' details: dict[str, Any] = Field(default_factory=dict) -class MockAgent: - """Mock agent for testing""" - - def __init__(self, name: str = 'TestAgent'): - self.name = name - self.events_received = [] - - @pytest.fixture async def eventbus(): """Create an event bus for testing""" @@ -72,30 +63,33 @@ async def eventbus(): @pytest.fixture async def parallel_eventbus(): """Create an event bus with parallel handler execution""" - bus = EventBus(parallel_handlers=True) + bus = EventBus(event_handler_concurrency='parallel') yield bus await bus.stop() -@pytest.fixture -def mock_agent(): - """Create a mock agent""" - return MockAgent() - - class TestEventBusBasics: """Test basic EventBus functionality""" - async def test_eventbus_initialization(self, mock_agent: MockAgent): + async def test_eventbus_initialization(self): """Test that EventBus initializes correctly""" bus = EventBus() assert bus._is_running is False assert bus._runloop_task is None assert len(bus.event_history) == 0 - assert len(bus.handlers['*']) == 0 # No default logger anymore + assert len(bus.handlers_by_key.get('*', [])) == 0 # No default logger anymore + assert bus.event_history.max_history_drop is False + + def test_eventbus_accepts_custom_id(self): + """EventBus constructor accepts id=... to set bus UUID.""" + custom_id = '018f8e40-1234-7000-8000-000000001234' + bus = EventBus(id=custom_id) - async def test_auto_start_and_stop(self, mock_agent): + assert bus.id == custom_id + assert bus.label.endswith('#1234') + + async def test_auto_start_and_stop(self): """Test auto-start functionality and stopping the event bus""" bus = EventBus() @@ -104,7 +98,7 @@ async def test_auto_start_and_stop(self, mock_agent): assert bus._runloop_task is None # Auto-start by emitting an event - bus.dispatch(UserActionEvent(action='test', user_id='user123')) + bus.emit(UserActionEvent(action='test', user_id='50d357df-e68c-7111-8a6c-7018569514b0')) await bus.wait_until_idle() # Should be running after auto-start @@ -115,6 +109,26 @@ async def test_auto_start_and_stop(self, mock_agent): await bus.stop() assert bus._is_running is False + async def test_wait_until_idle_recovers_when_idle_flag_was_cleared(self): + """wait_until_idle should not hang if _on_idle was cleared after work finished.""" + bus = EventBus() + + async def handler(_event: UserActionEvent) -> None: + return None + + bus.on(UserActionEvent, handler) + + try: + await bus.emit(UserActionEvent(action='test', user_id='e692b6cb-ae63-773b-8557-3218f7ce5ced')) + await bus.wait_until_idle() + + assert bus._on_idle is not None + bus._on_idle.clear() + + await asyncio.wait_for(bus.wait_until_idle(), timeout=1.0) + finally: + await bus.stop() + # Stop again should be idempotent await bus.stop() assert bus._is_running is False @@ -126,14 +140,14 @@ class TestEventEnqueueing: async def test_emit_and_result(self, eventbus): """Test event emission in async and sync contexts, and result() pattern""" # Test async emission - event = UserActionEvent(action='login', user_id='user123', event_timeout=1) - queued = eventbus.dispatch(event) + event = UserActionEvent(action='login', user_id='50d357df-e68c-7111-8a6c-7018569514b0', event_timeout=1) + queued = eventbus.emit(event) # Check immediate result assert isinstance(queued, UserActionEvent) assert queued.event_type == 'UserActionEvent' assert queued.action == 'login' - assert queued.user_id == 'user123' + assert queued.user_id == '50d357df-e68c-7111-8a6c-7018569514b0' assert queued.event_id is not None assert queued.event_created_at is not None assert queued.event_started_at is None # Not started yet @@ -151,17 +165,90 @@ async def test_emit_and_result(self, eventbus): # Check event history assert len(eventbus.event_history) == 1 - def test_emit_sync(self, mock_agent): + def test_emit_sync(self): """Test sync event emission""" bus = EventBus() - event = SystemEventModel(event_name='startup', severity='info') + event = SystemEventModel(name='startup', severity='info') with pytest.raises(RuntimeError) as e: - bus.dispatch(event) + bus.emit(event) assert 'no event loop is running' in str(e.value) assert len(bus.event_history) == 0 + async def test_emit_alias_dispatches_event(self, eventbus): + """Test EventBus.emit() alias dispatches and processes events.""" + handled_event_ids: list[str] = [] + + async def user_handler(event: UserActionEvent) -> str: + handled_event_ids.append(event.event_id) + return 'handled' + + eventbus.on(UserActionEvent, user_handler) + + event = UserActionEvent(action='alias', user_id='50d357df-e68c-7111-8a6c-7018569514b0') + queued = eventbus.emit(event) + + assert queued is event + completed = await queued + assert completed.event_status == 'completed' + assert handled_event_ids == [event.event_id] + assert eventbus.label in completed.event_path + + async def test_unbounded_history_disables_history_rejection(self): + """When max_history_size=None, dispatch should not reject on history size.""" + bus = EventBus(name='NoLimitBus', max_history_size=None) + + processed = 0 + + async def slow_handler(event: BaseEvent) -> None: + nonlocal processed + await asyncio.sleep(0.01) + processed += 1 + + bus.on('SlowEvent', slow_handler) + + events: list[BaseEvent] = [] + + try: + for _ in range(150): + events.append(bus.emit(BaseEvent(event_type='SlowEvent'))) + + await asyncio.gather(*events) + await bus.wait_until_idle() + assert processed == 150 + finally: + await bus.stop(clear=True) + + async def test_zero_history_size_keeps_inflight_and_drops_on_completion(self): + """max_history_size=0 keeps in-flight events but removes them as soon as they complete.""" + bus = EventBus(name='ZeroHistoryBus', max_history_size=0, max_history_drop=False) + + first_handler_started = asyncio.Event() + release_handlers = asyncio.Event() + + async def slow_handler(_event: BaseEvent[Any]) -> None: + first_handler_started.set() + await release_handlers.wait() + + bus.on('SlowEvent', slow_handler) + + try: + first = bus.emit(BaseEvent(event_type='SlowEvent')) + await asyncio.wait_for(first_handler_started.wait(), timeout=1.0) + second = bus.emit(BaseEvent(event_type='SlowEvent')) + + assert first.event_id in bus.event_history + assert second.event_id in bus.event_history + + release_handlers.set() + await asyncio.gather(first, second) + await bus.wait_until_idle() + + assert len(bus.event_history) == 0 + finally: + await bus.stop(clear=True) + class TestHandlerRegistration: """Test handler registration and execution""" @@ -177,7 +264,7 @@ async def user_handler(event: UserActionEvent) -> str: # Handler for event type by model class async def system_handler(event: SystemEventModel) -> str: - results['model'].append(event.event_name) + results['model'].append(event.name) return 'system_handled' # Universal handler @@ -191,8 +278,8 @@ async def universal_handler(event: BaseEvent) -> str: eventbus.on('*', universal_handler) # Emit events - eventbus.dispatch(UserActionEvent(action='login', user_id='u1')) - eventbus.dispatch(SystemEventModel(event_name='startup')) + eventbus.emit(UserActionEvent(action='login', user_id='e692b6cb-ae63-773b-8557-3218f7ce5ced')) + eventbus.emit(SystemEventModel(name='startup')) await eventbus.wait_until_idle() # Verify all handlers were called correctly @@ -200,6 +287,37 @@ async def universal_handler(event: BaseEvent) -> str: assert results['model'] == ['startup'] assert set(results['universal']) == {'UserActionEvent', 'SystemEventModel'} + async def test_class_matcher_matches_generic_base_event_by_event_type(self, eventbus): + """Class listeners should still match generic BaseEvent payloads by event_type string.""" + + class DifferentNameFromClass(BaseEvent): + pass + + seen: list[str] = [] + + async def class_handler(event: BaseEvent) -> None: + seen.append(f'class:{event.event_type}') + + async def string_handler(event: BaseEvent) -> None: + seen.append(f'string:{event.event_type}') + + async def wildcard_handler(event: BaseEvent) -> None: + seen.append(f'wildcard:{event.event_type}') + + eventbus.on(DifferentNameFromClass, class_handler) + eventbus.on('DifferentNameFromClass', string_handler) + eventbus.on('*', wildcard_handler) + + eventbus.emit(BaseEvent(event_type='DifferentNameFromClass')) + await eventbus.wait_until_idle() + + assert seen == [ + 'class:DifferentNameFromClass', + 'string:DifferentNameFromClass', + 'wildcard:DifferentNameFromClass', + ] + assert len(eventbus.handlers_by_key.get('DifferentNameFromClass', [])) == 2 + async def test_multiple_handlers_parallel(self, parallel_eventbus): """Test that multiple handlers run in parallel""" eventbus = parallel_eventbus @@ -224,7 +342,7 @@ async def slow_handler_2(event: BaseEvent) -> str: # Emit event and wait start = time.time() - event = await eventbus.dispatch(UserActionEvent(action='test', user_id='u1')) + event = await eventbus.emit(UserActionEvent(action='test', user_id='e692b6cb-ae63-773b-8557-3218f7ce5ced')) duration = time.time() - start # Check handlers ran in parallel (should take ~0.1s, not 0.2s) @@ -238,7 +356,7 @@ async def slow_handler_2(event: BaseEvent) -> str: assert handler1_result is not None and handler1_result.result == 'handler1' assert handler2_result is not None and handler2_result.result == 'handler2' - def test_handler_can_be_sync_or_async(self, mock_agent): + def test_handler_can_be_sync_or_async(self): """Test that both sync and async handlers are accepted""" bus = EventBus() @@ -253,7 +371,7 @@ async def async_handler(event: BaseEvent) -> str: bus.on('TestEvent', async_handler) # Check both were registered - assert len(bus.handlers['TestEvent']) == 2 + assert len(bus.handlers_by_key.get('TestEvent', [])) == 2 async def test_class_and_instance_method_handlers(self, eventbus): """Test using class and instance methods as handlers""" @@ -291,18 +409,22 @@ def static_method_handler(event: UserActionEvent) -> str: processor1 = EventProcessor('Processor1', 10) processor2 = EventProcessor('Processor2', 20) - # Register instance methods + # Register instance methods (suppress warning about same-named handlers from different instances) + import warnings + eventbus.on(UserActionEvent, processor1.sync_method_handler) eventbus.on(UserActionEvent, processor1.async_method_handler) - eventbus.on(UserActionEvent, processor2.sync_method_handler) + with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) + eventbus.on(UserActionEvent, processor2.sync_method_handler) # Register class and static methods eventbus.on('UserActionEvent', EventProcessor.class_method_handler) eventbus.on('UserActionEvent', EventProcessor.static_method_handler) # Dispatch event - event = UserActionEvent(action='test_methods', user_id='u123') - completed_event = await eventbus.dispatch(event) + event = UserActionEvent(action='test_methods', user_id='dab45f48-9e3a-7042-80f8-ac8f07b6cfe3') + completed_event = await eventbus.emit(event) # Verify all handlers were called assert len(results) == 5 @@ -337,6 +459,56 @@ def static_method_handler(event: UserActionEvent) -> str: assert 'Handled by static method' in results_list +class TestEventForwarding: + """Tests for event forwarding between buses.""" + + @pytest.mark.asyncio + async def test_forwarding_loop_prevention(self): + bus_a = EventBus(name='ForwardBusA') + bus_b = EventBus(name='ForwardBusB') + bus_c = EventBus(name='ForwardBusC') + + class LoopEvent(BaseEvent[str]): + pass + + seen: dict[str, int] = {'A': 0, 'B': 0, 'C': 0} + + async def handler_a(event: LoopEvent) -> str: + seen['A'] += 1 + return 'handled-a' + + async def handler_b(event: LoopEvent) -> str: + seen['B'] += 1 + return 'handled-b' + + async def handler_c(event: LoopEvent) -> str: + seen['C'] += 1 + return 'handled-c' + + bus_a.on(LoopEvent, handler_a) + bus_b.on(LoopEvent, handler_b) + bus_c.on(LoopEvent, handler_c) + + # Create a forwarding cycle A -> B -> C -> A, which should be broken automatically. + bus_a.on('*', bus_b.emit) + bus_b.on('*', bus_c.emit) + bus_c.on('*', bus_a.emit) + + try: + event = await bus_a.emit(LoopEvent()) + + await bus_a.wait_until_idle() + await bus_b.wait_until_idle() + await bus_c.wait_until_idle() + + assert seen == {'A': 1, 'B': 1, 'C': 1} + assert event.event_path == [bus_a.label, bus_b.label, bus_c.label] + finally: + await bus_a.stop(clear=True) + await bus_b.stop(clear=True) + await bus_c.stop(clear=True) + + class TestFIFOOrdering: """Test FIFO event processing""" @@ -360,7 +532,9 @@ async def handler(event: UserActionEvent) -> int: # Emit 20 events rapidly for i in range(20): - eventbus.dispatch(UserActionEvent(action=f'test_{i}', user_id='u1', metadata={'order': i})) + eventbus.emit( + UserActionEvent(action=f'test_{i}', user_id='e692b6cb-ae63-773b-8557-3218f7ce5ced', metadata={'order': i}) + ) await eventbus.wait_until_idle() @@ -390,7 +564,7 @@ async def working_handler(event: BaseEvent) -> str: eventbus.on('UserActionEvent', working_handler) # Emit and wait for result - event = await eventbus.dispatch(UserActionEvent(action='test', user_id='u1')) + event = await eventbus.emit(UserActionEvent(action='test', user_id='e692b6cb-ae63-773b-8557-3218f7ce5ced')) # Verify error capture and isolation failing_result = next((r for r in event.event_results.values() if r.handler_name.endswith('failing_handler')), None) @@ -402,6 +576,40 @@ async def working_handler(event: BaseEvent) -> str: assert working_result.result == 'worked' assert results == ['success'] + async def test_event_result_raises_exception_group_when_multiple_handlers_fail(self, eventbus): + """event_result() should raise ExceptionGroup when multiple handler failures exist.""" + + async def failing_handler_one(event: BaseEvent) -> str: + raise ValueError('first failure') + + async def failing_handler_two(event: BaseEvent) -> str: + raise RuntimeError('second failure') + + eventbus.on('UserActionEvent', failing_handler_one) + eventbus.on('UserActionEvent', failing_handler_two) + + event = await eventbus.emit(UserActionEvent(action='test', user_id='e692b6cb-ae63-773b-8557-3218f7ce5ced')) + + with pytest.raises(ExceptionGroup) as exc_info: + await event.event_result() + + grouped_errors = exc_info.value.exceptions + assert len(grouped_errors) == 2 + assert {type(err) for err in grouped_errors} == {ValueError, RuntimeError} + + async def test_event_result_single_handler_error_raises_original_exception(self, eventbus): + """event_result() should preserve original exception type when only one handler fails.""" + + async def failing_handler(event: BaseEvent) -> str: + raise ValueError('single failure') + + eventbus.on('UserActionEvent', failing_handler) + + event = await eventbus.emit(UserActionEvent(action='test', user_id='e692b6cb-ae63-773b-8557-3218f7ce5ced')) + + with pytest.raises(ValueError, match='single failure'): + await event.event_result() + class TestBatchOperations: """Test batch event operations""" @@ -409,13 +617,13 @@ class TestBatchOperations: async def test_batch_emit_with_gather(self, eventbus): """Test batch event emission with asyncio.gather""" events = [ - UserActionEvent(action='login', user_id='u1'), - SystemEventModel(event_name='startup'), - UserActionEvent(action='logout', user_id='u1'), + UserActionEvent(action='login', user_id='e692b6cb-ae63-773b-8557-3218f7ce5ced'), + SystemEventModel(name='startup'), + UserActionEvent(action='logout', user_id='e692b6cb-ae63-773b-8557-3218f7ce5ced'), ] # Enqueue batch - emitted_events = [eventbus.dispatch(event) for event in events] + emitted_events = [eventbus.emit(event) for event in events] results = await asyncio.gather(*emitted_events) # Check all processed @@ -432,8 +640,8 @@ async def test_write_ahead_log_captures_all_events(self, eventbus): # Emit several events events = [] for i in range(5): - event = UserActionEvent(action=f'action_{i}', user_id='u1') - events.append(eventbus.dispatch(event)) + event = UserActionEvent(action=f'action_{i}', user_id='e692b6cb-ae63-773b-8557-3218f7ce5ced') + events.append(eventbus.emit(event)) await eventbus.wait_until_idle() @@ -468,7 +676,7 @@ async def slow_handler(event: BaseEvent) -> str: eventbus.on('UserActionEvent', slow_handler) # Enqueue without waiting - event = eventbus.dispatch(UserActionEvent(action='test', user_id='u1')) + event = eventbus.emit(UserActionEvent(action='test', user_id='e692b6cb-ae63-773b-8557-3218f7ce5ced')) completion_order.append('enqueue_done') # Wait for completion @@ -483,7 +691,7 @@ async def slow_handler(event: BaseEvent) -> str: class TestEdgeCases: """Test edge cases and special scenarios""" - async def test_stop_with_pending_events(self, mock_agent): + async def test_stop_with_pending_events(self): """Test stopping event bus with events still in queue""" bus = EventBus() @@ -496,7 +704,7 @@ async def slow_handler(event: BaseEvent) -> str: # Enqueue events but don't wait for i in range(5): - bus.dispatch(UserActionEvent(action=f'action_{i}', user_id='u1')) + bus.emit(UserActionEvent(action=f'action_{i}', user_id='e692b6cb-ae63-773b-8557-3218f7ce5ced')) # Stop immediately await bus.stop() @@ -514,19 +722,18 @@ async def test_event_with_complex_data(self, eventbus): } } - event = SystemEventModel(event_name='complex', details=complex_data) + event = SystemEventModel(name='complex', details=complex_data) - result = await eventbus.dispatch(event) + result = await eventbus.emit(event) # Check data preserved assert result.details['nested']['list'][2]['inner'] == 'value' async def test_concurrent_emit_calls(self, eventbus): """Test multiple concurrent emit calls""" - # Create many events concurrently, but respect the max_pending_events limit - # We'll create them in batches to avoid hitting the limit + # Create many events concurrently in batches to keep this test deterministic. total_events = 100 - batch_size = 50 # Stay well under the default limit of 100 + batch_size = 50 all_tasks = [] for batch_start in range(0, total_events, batch_size): @@ -534,9 +741,9 @@ async def test_concurrent_emit_calls(self, eventbus): batch_tasks = [] for i in range(batch_start, batch_end): - event = UserActionEvent(action=f'concurrent_{i}', user_id='u1') + event = UserActionEvent(action=f'concurrent_{i}', user_id='e692b6cb-ae63-773b-8557-3218f7ce5ced') # Emit returns the event syncresultsonously, but we need to wait for completion - emitted_event = eventbus.dispatch(event) + emitted_event = eventbus.emit(event) batch_tasks.append(emitted_event) # Wait for this batch to complete before starting the next @@ -571,8 +778,8 @@ async def handler(event: UserActionEvent): # Emit events num_events = 20 for i in range(num_events): - event = UserActionEvent(action=f'mixed_{i}', user_id='u1', metadata={'order': i}) - eventbus.dispatch(event) + event = UserActionEvent(action=f'mixed_{i}', user_id='e692b6cb-ae63-773b-8557-3218f7ce5ced', metadata={'order': i}) + eventbus.emit(event) # Wait for all events to process await eventbus.wait_until_idle() @@ -595,45 +802,69 @@ async def test_event_subclass_type(self, eventbus): # Create a specific event type event = CreateAgentTaskEvent( - user_id='test_user', agent_session_id='12345678-1234-5678-1234-567812345678', llm_model='test-model', task='test task' + user_id='371bbd3c-5231-7ff0-8aef-e63732a8d40f', + agent_session_id='12345678-1234-5678-1234-567812345678', + llm_model='test-model', + task='test task', ) # Enqueue it - result = eventbus.dispatch(event) + result = eventbus.emit(event) # Check type is preserved - should be class name assert result.event_type == 'CreateAgentTaskEvent' assert isinstance(result, BaseEvent) - async def test_event_schema_auto_generation(self, eventbus): - """Test that event_schema is automatically set with the correct format""" - - version = os.getenv('LIBRARY_VERSION', '1.0.0') - - # Test various event types + async def test_event_type_and_version_identity_fields(self, eventbus): + """event_type + event_version identify payload shape""" base_event = BaseEvent(event_type='TestEvent') - assert base_event.event_schema == f'bubus.models.BaseEvent@{version}' + assert base_event.event_type == 'TestEvent' + assert base_event.event_version == '0.0.1' task_event = CreateAgentTaskEvent( - user_id='test_user', agent_session_id='12345678-1234-5678-1234-567812345678', llm_model='test-model', task='test task' + user_id='371bbd3c-5231-7ff0-8aef-e63732a8d40f', + agent_session_id='12345678-1234-5678-1234-567812345678', + llm_model='test-model', + task='test task', ) - assert task_event.event_schema == f'test_eventbus.CreateAgentTaskEvent@{version}' + assert task_event.event_type == 'CreateAgentTaskEvent' + assert task_event.event_version == '0.0.1' + + # Check identity fields are preserved after emit + result = eventbus.emit(task_event) + assert result.event_type == task_event.event_type + assert result.event_version == task_event.event_version + + async def test_event_version_defaults_and_overrides(self, eventbus): + """event_version supports class defaults, runtime override, and JSON roundtrip.""" + + base_event = BaseEvent(event_type='TestVersionEvent') + assert base_event.event_version == '0.0.1' + + class VersionedEvent(BaseEvent): + event_version = '1.2.3' + data: str + + class_default = VersionedEvent(data='x') + assert class_default.event_version == '1.2.3' - user_event = UserActionEvent(action='login', user_id='user123') - assert user_event.event_schema == f'test_eventbus.UserActionEvent@{version}' + runtime_override = VersionedEvent(data='x', event_version='9.9.9') + assert runtime_override.event_version == '9.9.9' - # Check schema is preserved after emit - result = eventbus.dispatch(task_event) - assert result.event_schema == task_event.event_schema + dispatched = eventbus.emit(VersionedEvent(data='queued')) + assert dispatched.event_version == '1.2.3' + + restored = BaseEvent.model_validate(dispatched.model_dump(mode='json')) + assert restored.event_version == '1.2.3' async def test_automatic_event_type_derivation(self, eventbus): """Test that event_type is automatically derived from class name when not specified""" # Test automatic derivation - event = UserActionEvent(action='test', user_id='u1') + event = UserActionEvent(action='test', user_id='e692b6cb-ae63-773b-8557-3218f7ce5ced') assert event.event_type == 'UserActionEvent' - event2 = SystemEventModel(event_name='startup') + event2 = SystemEventModel(name='startup') assert event2.event_type == 'SystemEventModel' # Create inline event class without explicit event_type @@ -652,8 +883,8 @@ async def handler(event): eventbus.on('UserActionEvent', handler) eventbus.on('InlineTestEvent', handler) - await eventbus.dispatch(event) - await eventbus.dispatch(inline_event) + await eventbus.emit(event) + await eventbus.emit(inline_event) await eventbus.wait_until_idle() assert len(received) == 2 @@ -680,115 +911,13 @@ async def handler(event): eventbus.on('CustomEventType', handler) eventbus.on('OverrideEvent', handler) # This won't match - await eventbus.dispatch(event) + await eventbus.emit(event) await eventbus.wait_until_idle() assert len(received) == 1 assert received[0].event_type == 'CustomEventType' -class TestWALPersistence: - """Test automatic WAL persistence functionality""" - - async def test_wal_persistence_handler(self, tmp_path): - """Test that events are automatically persisted to WAL file""" - # Create event bus with WAL path - wal_path = tmp_path / 'test_events.jsonl' - bus = EventBus(name='TestBus', wal_path=wal_path) - - try: - # Emit some events - events = [] - for i in range(3): - event = UserActionEvent(action=f'action_{i}', user_id=f'user_{i}') - emitted_event = bus.dispatch(event) - completed_event = await emitted_event - events.append(completed_event) - - # Wait for processing - await bus.wait_until_idle() - - # Check WAL file exists - assert wal_path.exists() - - # Read and verify JSONL content - lines = wal_path.read_text().strip().split('\n') - assert len(lines) == 3 - - # Parse each line as JSON - for i, line in enumerate(lines): - data = json.loads(line) - assert data['action'] == f'action_{i}' - assert data['user_id'] == f'user_{i}' - assert data['event_type'] == 'UserActionEvent' - assert isinstance(data['event_created_at'], str) - datetime.fromisoformat(data['event_created_at']) - - finally: - await bus.stop() - - async def test_wal_persistence_creates_parent_dir(self, tmp_path): - """Test that WAL persistence creates parent directories""" - # Use a nested path that doesn't exist - wal_path = tmp_path / 'nested' / 'dirs' / 'events.jsonl' - assert not wal_path.parent.exists() - - # Create event bus - bus = EventBus(name='TestBus', wal_path=wal_path) - - try: - # Emit an event - event = bus.dispatch(UserActionEvent(action='test', user_id='u1')) - await event - - # Wait for WAL persistence to complete - await bus.wait_until_idle() - - # Parent directory should be created after event is processed - assert wal_path.parent.exists() - - # Check file was created - assert wal_path.exists() - finally: - await bus.stop() - - async def test_wal_persistence_skips_incomplete_events(self, tmp_path): - """Test that WAL persistence only writes completed events""" - wal_path = tmp_path / 'incomplete_events.jsonl' - bus = EventBus(name='TestBus', wal_path=wal_path) - - try: - # Add a slow handler that will delay completion - async def slow_handler(event: BaseEvent) -> str: - await asyncio.sleep(0.1) - return 'slow' - - bus.on('UserActionEvent', slow_handler) - - # Emit event without waiting - event = bus.dispatch(UserActionEvent(action='test', user_id='u1')) - - # Check file doesn't exist yet (event not completed) - assert not wal_path.exists() - - # Wait for completion - event = await event - await bus.wait_until_idle() - - # Now file should exist with completed event - assert wal_path.exists() - lines = wal_path.read_text().strip().split('\n') - assert len(lines) == 1 - data = json.loads(lines[0]) - assert data['event_type'] == 'UserActionEvent' - # The WAL should have been written after the event completed - assert data['action'] == 'test' - assert data['user_id'] == 'u1' - - finally: - await bus.stop() - - class TestEventBusHierarchy: """Test hierarchical EventBus subscription patterns""" @@ -823,14 +952,14 @@ async def subchild_handler(event: BaseEvent) -> str: # Subscribe buses to each other: parent <- child <- subchild # Child forwards events to parent - child_bus.on('*', parent_bus.dispatch) + child_bus.on('*', parent_bus.emit) # Subchild forwards events to child - subchild_bus.on('*', child_bus.dispatch) + subchild_bus.on('*', child_bus.emit) try: # Emit event from the bottom of hierarchy - event = UserActionEvent(action='bubble_test', user_id='test_user') - emitted = subchild_bus.dispatch(event) + event = UserActionEvent(action='bubble_test', user_id='371bbd3c-5231-7ff0-8aef-e63732a8d40f') + emitted = subchild_bus.emit(event) # Wait for event to bubble up await subchild_bus.wait_until_idle() @@ -844,11 +973,11 @@ async def subchild_handler(event: BaseEvent) -> str: # Verify event_path shows the complete journey final_event = events_at_parent[0] - assert final_event.event_path == ['SubchildBus', 'ChildBus', 'ParentBus'] + assert final_event.event_path == [subchild_bus.label, child_bus.label, parent_bus.label] # Verify it's the same event content assert final_event.action == 'bubble_test' - assert final_event.user_id == 'test_user' + assert final_event.user_id == '371bbd3c-5231-7ff0-8aef-e63732a8d40f' assert final_event.event_id == emitted.event_id # Test event emitted at middle level @@ -856,8 +985,8 @@ async def subchild_handler(event: BaseEvent) -> str: events_at_child.clear() events_at_subchild.clear() - middle_event = SystemEventModel(event_name='middle_test') - child_bus.dispatch(middle_event) + middle_event = SystemEventModel(name='middle_test') + child_bus.emit(middle_event) await child_bus.wait_until_idle() await parent_bus.wait_until_idle() @@ -866,7 +995,7 @@ async def subchild_handler(event: BaseEvent) -> str: assert len(events_at_subchild) == 0 assert len(events_at_child) == 1 assert len(events_at_parent) == 1 - assert events_at_parent[0].event_path == ['ChildBus', 'ParentBus'] + assert events_at_parent[0].event_path == [child_bus.label, parent_bus.label] finally: await parent_bus.stop() @@ -903,20 +1032,39 @@ async def peer3_handler(event: BaseEvent) -> str: peer3.on('*', peer3_handler) # Create circular subscription: peer1 -> peer2 -> peer3 -> peer1 - peer1.on('*', peer2.dispatch) - peer2.on('*', peer3.dispatch) - peer3.on('*', peer1.dispatch) # This completes the circle + peer1.on('*', peer2.emit) + peer2.on('*', peer3.emit) + peer3.on('*', peer1.emit) # This completes the circle + + def dump_bus_state() -> str: + buses = [peer1, peer2, peer3] + lines: list[str] = [] + for bus in buses: + queue_size = bus.pending_event_queue.qsize() if bus.pending_event_queue else 0 + lines.append( + f'{bus.label} queue={queue_size} active={len(bus.in_flight_event_ids)} processing={len(bus.processing_event_ids)} history={len(bus.event_history)}' + ) + lines.append('--- peer1.log_tree() ---') + lines.append(peer1.log_tree()) + lines.append('--- peer2.log_tree() ---') + lines.append(peer2.log_tree()) + lines.append('--- peer3.log_tree() ---') + lines.append(peer3.log_tree()) + return '\n'.join(lines) try: # Emit event from peer1 - event = UserActionEvent(action='circular_test', user_id='test_user') - emitted = peer1.dispatch(event) + event = UserActionEvent(action='circular_test', user_id='371bbd3c-5231-7ff0-8aef-e63732a8d40f') + emitted = peer1.emit(event) # Wait for all processing to complete await asyncio.sleep(0.2) # Give time for any potential loops - await peer1.wait_until_idle() - await peer2.wait_until_idle() - await peer3.wait_until_idle() + try: + await asyncio.wait_for(peer1.wait_until_idle(), timeout=5) + await asyncio.wait_for(peer2.wait_until_idle(), timeout=5) + await asyncio.wait_for(peer3.wait_until_idle(), timeout=5) + except TimeoutError: + pytest.fail(f'Circular test stalled during first propagation.\n{dump_bus_state()}') # Each peer should receive the event exactly once assert len(events_at_peer1) == 1 @@ -924,9 +1072,9 @@ async def peer3_handler(event: BaseEvent) -> str: assert len(events_at_peer3) == 1 # Check event paths show the propagation but no loops - assert events_at_peer1[0].event_path == ['Peer1', 'Peer2', 'Peer3'] - assert events_at_peer2[0].event_path == ['Peer1', 'Peer2', 'Peer3'] - assert events_at_peer3[0].event_path == ['Peer1', 'Peer2', 'Peer3'] + assert events_at_peer1[0].event_path == [peer1.label, peer2.label, peer3.label] + assert events_at_peer2[0].event_path == [peer1.label, peer2.label, peer3.label] + assert events_at_peer3[0].event_path == [peer1.label, peer2.label, peer3.label] # The event should NOT come back to peer1 from peer3 # because peer3's emit handler will detect peer1 is already in the path @@ -939,22 +1087,25 @@ async def peer3_handler(event: BaseEvent) -> str: events_at_peer2.clear() events_at_peer3.clear() - event2 = SystemEventModel(event_name='circular_test_2') - peer2.dispatch(event2) + event2 = SystemEventModel(name='circular_test_2') + peer2.emit(event2) await asyncio.sleep(0.2) - await peer1.wait_until_idle() - await peer2.wait_until_idle() - await peer3.wait_until_idle() + try: + await asyncio.wait_for(peer1.wait_until_idle(), timeout=5) + await asyncio.wait_for(peer2.wait_until_idle(), timeout=5) + await asyncio.wait_for(peer3.wait_until_idle(), timeout=5) + except TimeoutError: + pytest.fail(f'Circular test stalled during second propagation.\n{dump_bus_state()}') # Should visit peer2 -> peer3 -> peer1, then stop assert len(events_at_peer1) == 1 assert len(events_at_peer2) == 1 assert len(events_at_peer3) == 1 - assert events_at_peer2[0].event_path == ['Peer2', 'Peer3', 'Peer1'] - assert events_at_peer3[0].event_path == ['Peer2', 'Peer3', 'Peer1'] - assert events_at_peer1[0].event_path == ['Peer2', 'Peer3', 'Peer1'] + assert events_at_peer2[0].event_path == [peer2.label, peer3.label, peer1.label] + assert events_at_peer3[0].event_path == [peer2.label, peer3.label, peer1.label] + assert events_at_peer1[0].event_path == [peer2.label, peer3.label, peer1.label] finally: await peer1.stop() @@ -962,128 +1113,120 @@ async def peer3_handler(event: BaseEvent) -> str: await peer3.stop() -class TestExpectMethod: - """Test the expect() method functionality""" +class TestFindMethod: + """Test find() behavior for future waits and filtering.""" - async def test_expect_basic(self, eventbus): - """Test basic expect functionality""" + async def test_find_future_basic(self, eventbus): + """Test basic future find functionality.""" # Start waiting for an event that hasn't been dispatched yet - expect_task = asyncio.create_task(eventbus.expect('UserActionEvent', timeout=1.0)) + find_task = asyncio.create_task(eventbus.find('UserActionEvent', past=False, future=1.0)) - # Give expect time to register handler + # Give find time to register waiter await asyncio.sleep(0.01) # Dispatch the event - dispatched = eventbus.dispatch(UserActionEvent(action='login', user_id='user123')) + dispatched = eventbus.emit(UserActionEvent(action='login', user_id='50d357df-e68c-7111-8a6c-7018569514b0')) - # Wait for expect to resolve - received = await expect_task + # Wait for find to resolve + received = await find_task # Verify we got the right event assert received.event_type == 'UserActionEvent' assert received.action == 'login' - assert received.user_id == 'user123' + assert received.user_id == '50d357df-e68c-7111-8a6c-7018569514b0' assert received.event_id == dispatched.event_id - async def test_expect_with_predicate(self, eventbus): - """Test expect with predicate filtering""" + async def test_find_future_with_predicate(self, eventbus): + """Test future find with where predicate filtering.""" # Dispatch some events that don't match - eventbus.dispatch(UserActionEvent(action='logout', user_id='user456')) - eventbus.dispatch(UserActionEvent(action='login', user_id='user789')) + eventbus.emit(UserActionEvent(action='logout', user_id='eab58ec9-90ea-7758-893f-afed99518f43')) + eventbus.emit(UserActionEvent(action='login', user_id='dce05df3-8e9b-7159-84f9-5ab894dddbd7')) - # Start expecting with predicate - expect_task = asyncio.create_task( - eventbus.expect('UserActionEvent', predicate=lambda e: e.user_id == 'user123', timeout=1.0) + find_task = asyncio.create_task( + eventbus.find( + 'UserActionEvent', where=lambda e: e.user_id == '50d357df-e68c-7111-8a6c-7018569514b0', past=False, future=1.0 + ) ) - # Give expect time to register + # Give find time to register await asyncio.sleep(0.01) # Dispatch more events - eventbus.dispatch(UserActionEvent(action='update', user_id='user456')) - target_event = eventbus.dispatch(UserActionEvent(action='login', user_id='user123')) - eventbus.dispatch(UserActionEvent(action='delete', user_id='user789')) + eventbus.emit(UserActionEvent(action='update', user_id='eab58ec9-90ea-7758-893f-afed99518f43')) + target_event = eventbus.emit(UserActionEvent(action='login', user_id='50d357df-e68c-7111-8a6c-7018569514b0')) + eventbus.emit(UserActionEvent(action='delete', user_id='dce05df3-8e9b-7159-84f9-5ab894dddbd7')) # Wait for the matching event - received = await expect_task + received = await find_task # Should get the event matching the predicate - assert received.user_id == 'user123' + assert received.user_id == '50d357df-e68c-7111-8a6c-7018569514b0' assert received.event_id == target_event.event_id - async def test_expect_timeout(self, eventbus): - """Test expect timeout behavior""" - # Expect an event that will never come - with pytest.raises(TimeoutError): - await eventbus.expect('NonExistentEvent', timeout=0.1) + async def test_find_future_timeout(self, eventbus): + """Test future find timeout behavior.""" + result = await eventbus.find('NonExistentEvent', past=False, future=0.1) + assert result is None - async def test_expect_with_model_class(self, eventbus): - """Test expect with model class instead of string""" - # Start expecting by model class - expect_task = asyncio.create_task(eventbus.expect(SystemEventModel, timeout=1.0)) + async def test_find_future_with_model_class(self, eventbus): + """Test future find with model class instead of string.""" + find_task = asyncio.create_task(eventbus.find(SystemEventModel, past=False, future=1.0)) await asyncio.sleep(0.01) # Dispatch different event types - eventbus.dispatch(UserActionEvent(action='test', user_id='u1')) - target = eventbus.dispatch(SystemEventModel(event_name='startup', severity='info')) + eventbus.emit(UserActionEvent(action='test', user_id='e692b6cb-ae63-773b-8557-3218f7ce5ced')) + target = eventbus.emit(SystemEventModel(name='startup', severity='info')) # Should receive the SystemEventModel - received = await expect_task + received = await find_task assert isinstance(received, SystemEventModel) - assert received.event_name == 'startup' + assert received.name == 'startup' assert received.event_id == target.event_id - async def test_multiple_concurrent_expects(self, eventbus): - """Test multiple concurrent expect calls""" - # Set up multiple expects for different events - expect1 = asyncio.create_task(eventbus.expect('UserActionEvent', predicate=lambda e: e.action == 'normal', timeout=2.0)) - expect2 = asyncio.create_task(eventbus.expect('SystemEventModel', timeout=2.0)) - expect3 = asyncio.create_task(eventbus.expect('UserActionEvent', predicate=lambda e: e.action == 'special', timeout=2.0)) + async def test_multiple_concurrent_future_finds(self, eventbus): + """Test multiple concurrent future find calls.""" + find1 = asyncio.create_task( + eventbus.find('UserActionEvent', where=lambda e: e.action == 'normal', past=False, future=2.0) + ) + find2 = asyncio.create_task(eventbus.find('SystemEventModel', past=False, future=2.0)) + find3 = asyncio.create_task( + eventbus.find('UserActionEvent', where=lambda e: e.action == 'special', past=False, future=2.0) + ) await asyncio.sleep(0.1) # Give more time for handlers to register # Dispatch events - e1 = eventbus.dispatch(UserActionEvent(action='normal', user_id='u1')) - e2 = eventbus.dispatch(SystemEventModel(event_name='test')) - e3 = eventbus.dispatch(UserActionEvent(action='special', user_id='u2')) + e1 = eventbus.emit(UserActionEvent(action='normal', user_id='e692b6cb-ae63-773b-8557-3218f7ce5ced')) + e2 = eventbus.emit(SystemEventModel(name='test')) + e3 = eventbus.emit(UserActionEvent(action='special', user_id='2a312e4d-3035-7883-86b9-578ce47046b2')) # Wait for all events to be processed await eventbus.wait_until_idle() - # Wait for all expects - r1, r2, r3 = await asyncio.gather(expect1, expect2, expect3) + # Wait for all find tasks + r1, r2, r3 = await asyncio.gather(find1, find2, find3) # Verify results assert r1.event_id == e1.event_id # Normal UserActionEvent assert r2.event_id == e2.event_id # SystemEventModel assert r3.event_id == e3.event_id # Special UserActionEvent - async def test_expect_handler_cleanup(self, eventbus): - """Test that temporary handlers are properly cleaned up""" - # Check initial handler count - initial_handlers = len(eventbus.handlers.get('TestEvent', [])) - - # Create an expect that times out - try: - await eventbus.expect('TestEvent', timeout=0.1) - except TimeoutError: - pass - - # Handler should be cleaned up - assert len(eventbus.handlers.get('TestEvent', [])) == initial_handlers + async def test_find_waiter_cleanup(self, eventbus): + """Test that temporary find waiters are properly cleaned up.""" + initial_waiters = len(eventbus.find_waiters) + result = await eventbus.find('TestEvent', past=False, future=0.1) + assert result is None + assert len(eventbus.find_waiters) == initial_waiters - # Create an expect that succeeds - expect_task = asyncio.create_task(eventbus.expect('TestEvent2', timeout=1.0)) + find_task = asyncio.create_task(eventbus.find('TestEvent2', past=False, future=1.0)) await asyncio.sleep(0.01) - eventbus.dispatch(BaseEvent(event_type='TestEvent2')) - await expect_task - - # Handler should be cleaned up - assert len(eventbus.handlers.get('TestEvent2', [])) == 0 + eventbus.emit(BaseEvent(event_type='TestEvent2')) + await find_task + assert len(eventbus.find_waiters) == initial_waiters - async def test_expect_receives_completed_event(self, eventbus): - """Test that expect receives events after they're fully processed""" + async def test_find_future_receives_dispatched_event_before_completion(self, eventbus): + """Test that future find resolves before slow handlers complete.""" processing_complete = False async def slow_handler(event: BaseEvent) -> str: @@ -1095,25 +1238,143 @@ async def slow_handler(event: BaseEvent) -> str: # Register a slow handler eventbus.on('SlowEvent', slow_handler) - # Start expecting - expect_task = asyncio.create_task(eventbus.expect('SlowEvent', timeout=1.0)) + # Start future find + find_task = asyncio.create_task(eventbus.find('SlowEvent', past=False, future=1.0)) await asyncio.sleep(0.01) # Dispatch event - eventbus.dispatch(BaseEvent(event_type='SlowEvent')) + eventbus.emit(BaseEvent(event_type='SlowEvent')) - # Wait for expect - received = await expect_task + # Wait for find + received = await find_task - # At this point, the slow handler should have run - # but we receive the event as soon as it matches assert received.event_type == 'SlowEvent' - # The event might not be fully completed yet since expect - # triggers as soon as the event is processed by its handler + assert processing_complete is False + + # Find resolves on dispatch; handler result entries may or may not exist yet. + slow_result = next( + (res for res in received.event_results.values() if res.handler_name.endswith('slow_handler')), + None, + ) + if slow_result is not None: + assert slow_result.status != 'completed' + + await eventbus.wait_until_idle() + assert processing_complete is True + + +class TestFindPastMethod: + """Tests for history-only find behavior.""" + + async def test_find_past_returns_most_recent(self, eventbus): + # Dispatch two events and ensure the newest is returned + eventbus.emit(UserActionEvent(action='first', user_id='e692b6cb-ae63-773b-8557-3218f7ce5ced')) + latest = eventbus.emit(UserActionEvent(action='second', user_id='2a312e4d-3035-7883-86b9-578ce47046b2')) + await eventbus.wait_until_idle() + + match = await eventbus.find('UserActionEvent', past=10, future=False) + assert match is not None + assert match.event_id == latest.event_id + + async def test_find_past_respects_time_window(self, eventbus): + event = eventbus.emit(UserActionEvent(action='old', user_id='e692b6cb-ae63-773b-8557-3218f7ce5ced')) + await eventbus.wait_until_idle() + old_created_at = datetime.fromisoformat(event.event_created_at) - timedelta(seconds=30) + event.event_created_at = monotonic_datetime(old_created_at.isoformat().replace('+00:00', 'Z')) + + match = await eventbus.find('UserActionEvent', past=10, future=False) + assert match is None + + async def test_find_past_can_match_incomplete_events(self, eventbus): + processing = asyncio.Event() - async def test_expect_with_complex_predicate(self, eventbus): - """Test expect with complex predicate logic""" + async def slow_handler(evt: UserActionEvent) -> None: + await asyncio.sleep(0.05) + processing.set() + + eventbus.on('UserActionEvent', slow_handler) + + pending_event = eventbus.emit(UserActionEvent(action='slow', user_id='e692b6cb-ae63-773b-8557-3218f7ce5ced')) + + # While handler is running, past find can still match in-flight events + in_flight = await eventbus.find('UserActionEvent', past=10, future=False) + assert in_flight is not None + assert in_flight.event_id == pending_event.event_id + + await pending_event + await processing.wait() + + match = await eventbus.find('UserActionEvent', past=10, future=False) + assert match is not None + assert match.event_id == pending_event.event_id + + +class TestDebouncePatterns: + """End-to-end scenarios for debounce-style flows.""" + + class DebounceEvent(BaseEvent): + user_id: int + + async def test_debounce_prefers_recent_history(self, eventbus): + # First event completes + initial = await eventbus.emit(self.DebounceEvent(user_id=123)) + await eventbus.wait_until_idle() + + # Compose the debounce pattern: find(past) -> find(future) -> dispatch + resolved = ( + await eventbus.find(self.DebounceEvent, past=10, future=False) + or await eventbus.find(self.DebounceEvent, past=False, future=0.05) + or await eventbus.emit(self.DebounceEvent(user_id=123)) + ) + + assert resolved is not None + assert resolved.event_id == initial.event_id + + total_events = sum(1 for event in eventbus.event_history.values() if isinstance(event, self.DebounceEvent)) + assert total_events == 1 + + async def test_debounce_dispatches_when_recent_missing(self, eventbus): + resolved = ( + await eventbus.find(self.DebounceEvent, past=1, future=False) + or await eventbus.find(self.DebounceEvent, past=False, future=0.05) + or await eventbus.emit(self.DebounceEvent(user_id=999)) + ) + + assert resolved is not None + assert isinstance(resolved, self.DebounceEvent) + assert resolved.user_id == 999 + + await eventbus.wait_until_idle() + + total_events = sum(1 for event in eventbus.event_history.values() if isinstance(event, self.DebounceEvent)) + assert total_events == 1 + + async def test_debounce_uses_future_match_before_dispatch_fallback(self, eventbus): + async def dispatch_after_delay() -> BaseEvent: + await asyncio.sleep(0.02) + return eventbus.emit(self.DebounceEvent(user_id=555)) + + dispatch_task = asyncio.create_task(dispatch_after_delay()) + + resolved = ( + await eventbus.find(self.DebounceEvent, past=1, future=False) + or await eventbus.find(self.DebounceEvent, past=False, future=0.1) + or await eventbus.emit(self.DebounceEvent(user_id=999)) + ) + + dispatched = await dispatch_task + assert resolved is not None + assert isinstance(resolved, self.DebounceEvent) + assert resolved.event_id == dispatched.event_id + assert resolved.user_id == 555 + + await eventbus.wait_until_idle() + total_events = sum(1 for event in eventbus.event_history.values() if isinstance(event, self.DebounceEvent)) + assert total_events == 1 + + async def test_find_with_complex_predicate(self, eventbus): + """Test future find with complex predicate logic.""" events_seen = [] def complex_predicate(event: UserActionEvent) -> bool: @@ -1124,37 +1385,21 @@ def complex_predicate(event: UserActionEvent) -> bool: return result return False - expect_task = asyncio.create_task(eventbus.expect('UserActionEvent', predicate=complex_predicate, timeout=1.0)) + find_task = asyncio.create_task(eventbus.find('UserActionEvent', where=complex_predicate, past=False, future=1.0)) await asyncio.sleep(0.01) # Dispatch events - eventbus.dispatch(UserActionEvent(action='first', user_id='u1')) - eventbus.dispatch(UserActionEvent(action='second', user_id='u2')) - eventbus.dispatch(UserActionEvent(action='target', user_id='u3')) # Won't match yet - eventbus.dispatch(UserActionEvent(action='target', user_id='u4')) # This should match + eventbus.emit(UserActionEvent(action='first', user_id='e692b6cb-ae63-773b-8557-3218f7ce5ced')) + eventbus.emit(UserActionEvent(action='second', user_id='2a312e4d-3035-7883-86b9-578ce47046b2')) + eventbus.emit(UserActionEvent(action='target', user_id='6eb8a717-e19d-728b-8905-97f7e20c002e')) # Won't match yet + eventbus.emit(UserActionEvent(action='target', user_id='840ea1d0-3500-7be5-8f73-5fd29bb46e89')) # This should match - received = await expect_task + received = await find_task - assert received.user_id == 'u4' + assert received.user_id == '840ea1d0-3500-7be5-8f73-5fd29bb46e89' assert len(events_seen) == 4 - async def test_expect_in_sync_context(self, mock_agent): - """Test that expect can be used from sync code that later awaits""" - bus = EventBus() - - # This simulates calling expect from sync code - expect_coroutine = bus.expect('SyncEvent', timeout=1.0) - - # Dispatch event - bus.dispatch(BaseEvent(event_type='SyncEvent')) - - # Later await the coroutine - result = await expect_coroutine - assert result.event_type == 'SyncEvent' - - await bus.stop() - class TestEventResults: """Test the event results functionality on BaseEvent""" @@ -1168,20 +1413,17 @@ async def test_handler(event): eventbus.on('UserActionEvent', test_handler) - result = eventbus.dispatch(UserActionEvent(action='test', user_id='u1')) + result = eventbus.emit(UserActionEvent(action='test', user_id='e692b6cb-ae63-773b-8557-3218f7ce5ced')) assert isinstance(result, BaseEvent) # Wait for completion await result - # Get results by handler ID - all_results = await result.event_results_flat_dict() - assert isinstance(all_results, dict) - # Should contain only test_handler result - assert len(all_results) == 1 - assert all_results['result'] == 'test_result' + all_results = await result.event_results_list() + assert isinstance(all_results, list) + assert all_results == [{'result': 'test_result'}] # Test with no specific handlers - result_no_handlers = eventbus.dispatch(BaseEvent(event_type='NoHandlersEvent')) + result_no_handlers = eventbus.emit(BaseEvent(event_type='NoHandlersEvent')) await result_no_handlers # Should have no handlers assert len(result_no_handlers.event_results) == 0 @@ -1207,7 +1449,7 @@ async def handler3(event): eventbus.on('TestEvent', handler3) # Test indexing - event = await eventbus.dispatch(BaseEvent(event_type='TestEvent')) + event = await eventbus.emit(BaseEvent(event_type='TestEvent')) # Get results by handler name handler1_result = next((r for r in event.event_results.values() if r.handler_name.endswith('handler1')), None) @@ -1231,7 +1473,7 @@ async def late_handler(event): eventbus.on('TestEvent', early_handler) eventbus.on('TestEvent', late_handler) - result = await eventbus.dispatch(BaseEvent(event_type='TestEvent')) + result = await eventbus.emit(BaseEvent(event_type='TestEvent')) # Check both handlers ran assert len(result.event_results) == 2 @@ -1241,8 +1483,8 @@ async def late_handler(event): assert late_result is not None and late_result.result == 'late' # With empty handlers - eventbus.handlers['EmptyEvent'] = [] - results_empty = eventbus.dispatch(BaseEvent(event_type='EmptyEvent')) + eventbus.handlers_by_key['EmptyEvent'] = [] + results_empty = eventbus.emit(BaseEvent(event_type='EmptyEvent')) await results_empty # Should have no handlers assert len(results_empty.event_results) == 0 @@ -1267,7 +1509,7 @@ async def unique_handler(event): eventbus.on('TestEvent', process_data2) eventbus.on('TestEvent', unique_handler) - event = await eventbus.dispatch(BaseEvent(event_type='TestEvent')) + event = await eventbus.emit(BaseEvent(event_type='TestEvent')) # Check results - with duplicate names, both handlers run process_results = [r for r in event.event_results.values() if r.handler_name.endswith('process_data')] @@ -1294,10 +1536,10 @@ async def handler2(event): eventbus.on('TestEvent', handler1) eventbus.on('TestEvent', handler2) - event = await eventbus.dispatch(BaseEvent(event_type='TestEvent')) + event = await eventbus.emit(BaseEvent(event_type='TestEvent')) - # Get results by handler ID using the method that exists - results = await event.event_results_by_handler_id() + await event.event_results_list() + results = {handler_id: result.result for handler_id, result in event.event_results.items()} # All handlers present with unique IDs even with same name # Should have 2 results: handler1, handler2 @@ -1305,8 +1547,8 @@ async def handler2(event): assert 'v1' in results.values() assert 'v2' in results.values() - async def test_flat_dict(self, eventbus): - """Test event_results_flat_dict() merging""" + async def test_manual_dict_merge(self, eventbus): + """Users can merge dict handler results manually from event_results_list().""" async def config_base(event): return {'debug': False, 'port': 8080, 'name': 'base'} @@ -1317,8 +1559,11 @@ async def config_override(event): eventbus.on('GetConfig', config_base) eventbus.on('GetConfig', config_override) - event = await eventbus.dispatch(BaseEvent(event_type='GetConfig')) - merged = await event.event_results_flat_dict(raise_if_conflicts=False) + event = await eventbus.emit(BaseEvent(event_type='GetConfig')) + merged = {} + for result in await event.event_results_list(include=lambda r: isinstance(r.result, dict), raise_if_any=False): + assert isinstance(result, dict) + merged.update(result) # Later handlers override earlier ones assert merged == { @@ -1333,14 +1578,43 @@ async def bad_handler(event): return 'not a dict' eventbus.on('BadConfig', bad_handler) - event_bad = await eventbus.dispatch(BaseEvent(event_type='BadConfig')) - - # Non-dict results should be skipped, not raise error - merged_bad = await event_bad.event_results_flat_dict() + event_bad = await eventbus.emit(BaseEvent(event_type='BadConfig')) + + merged_bad = {} + for result in await event_bad.event_results_list( + include=lambda r: isinstance(r.result, dict), + raise_if_any=False, + raise_if_none=False, + ): + assert isinstance(result, dict) + merged_bad.update(result) assert merged_bad == {} # Empty dict since no dict results - async def test_flat_list(self, eventbus): - """Test event_results_flat_list() concatenation""" + async def test_manual_dict_merge_conflicts_last_write_wins(self, eventbus): + """Manual dict merge from results is explicit and uses user-defined conflict behavior.""" + + async def handler_one(event): + return {'shared': 1, 'unique1': 'a'} + + async def handler_two(event): + return {'shared': 2, 'unique2': 'b'} + + eventbus.on('ConflictEvent', handler_one) + eventbus.on('ConflictEvent', handler_two) + + event = await eventbus.emit(BaseEvent(event_type='ConflictEvent')) + + merged = {} + for result in await event.event_results_list(include=lambda r: isinstance(r.result, dict), raise_if_any=False): + assert isinstance(result, dict) + merged.update(result) + + assert merged['shared'] == 2 + assert merged['unique1'] == 'a' + assert merged['unique2'] == 'b' + + async def test_manual_list_flatten(self, eventbus): + """Users can flatten list handler results manually from event_results_list().""" async def errors1(event): return ['error1', 'error2'] @@ -1355,8 +1629,13 @@ async def errors3(event): eventbus.on('GetErrors', errors2) eventbus.on('GetErrors', errors3) - event = await eventbus.dispatch(BaseEvent(event_type='GetErrors')) - all_errors = await event.event_results_flat_list() + event = await eventbus.emit(BaseEvent(event_type='GetErrors')) + all_errors = [ + item + for result in await event.event_results_list(include=lambda r: isinstance(r.result, list), raise_if_any=False) + if isinstance(result, list) + for item in result + ] # Check that all errors are collected (order may vary due to handler execution) assert all_errors == ['error1', 'error2', 'error3', 'error4', 'error5'] @@ -1366,9 +1645,18 @@ async def single_value(event): return 'single' eventbus.on('GetSingle', single_value) - event_single = await eventbus.dispatch(BaseEvent(event_type='GetSingle')) - - result = await event_single.event_results_flat_list(raise_if_none=False) + event_single = await eventbus.emit(BaseEvent(event_type='GetSingle')) + + result = [ + item + for nested in await event_single.event_results_list( + include=lambda r: isinstance(r.result, list), + raise_if_any=False, + raise_if_none=False, + ) + if isinstance(nested, list) + for item in nested + ] assert 'single' not in result # Single values should be skipped, as they are not lists assert len(result) == 0 @@ -1384,7 +1672,7 @@ async def handler_b(event): eventbus.on('TestEvent', handler_a) eventbus.on('TestEvent', handler_b) - event = await eventbus.dispatch(BaseEvent(event_type='TestEvent')) + event = await eventbus.emit(BaseEvent(event_type='TestEvent')) # Access results by handler name handler_a_result = next((r for r in event.event_results.values() if r.handler_name.endswith('handler_a')), None) @@ -1400,7 +1688,7 @@ async def my_handler(event): return 'my_result' eventbus.on('TestEvent', my_handler) - event = await eventbus.dispatch(BaseEvent(event_type='TestEvent')) + event = await eventbus.emit(BaseEvent(event_type='TestEvent')) # Access result by handler name my_handler_result = next((r for r in event.event_results.values() if r.handler_name.endswith('my_handler')), None) @@ -1440,12 +1728,12 @@ async def bus3_handler(event): bus3.on('TestEvent', bus3_handler) # Set up forwarding chain - bus1.on('*', bus2.dispatch) - bus2.on('*', bus3.dispatch) + bus1.on('*', bus2.emit) + bus2.on('*', bus3.emit) try: # Dispatch from bus1 - event = bus1.dispatch(BaseEvent(event_type='TestEvent')) + event = bus1.emit(BaseEvent(event_type='TestEvent')) # Wait for all buses to complete processing await bus1.wait_until_idle() @@ -1491,10 +1779,10 @@ async def plugin_handler2(event): bus2.on('DataEvent', plugin_handler2) # Forward from bus1 to bus2 - bus1.on('*', bus2.dispatch) + bus1.on('*', bus2.emit) try: - event = bus1.dispatch(BaseEvent(event_type='DataEvent')) + event = bus1.emit(BaseEvent(event_type='DataEvent')) # Wait for processing await bus1.wait_until_idle() @@ -1511,7 +1799,7 @@ async def plugin_handler2(event): assert plugin2_result is not None and plugin2_result.result == 'plugin_result2' # Check event path shows forwarding - assert event.event_path == ['MainBus', 'PluginBus'] + assert event.event_path == [bus1.label, bus2.label] finally: await bus1.stop() @@ -1522,7 +1810,7 @@ class TestComplexIntegration: """Complex integration test with all features""" async def test_complex_multi_bus_scenario(self, caplog): - """Test complex scenario with multiple buses, duplicate names, and all query methods""" + """Test complex scenario with multiple buses, duplicate names, and lookup flows""" # Create a hierarchy of buses app_bus = EventBus(name='AppBus') auth_bus = EventBus(name='AuthBus') @@ -1567,12 +1855,12 @@ async def data_process(event): data_bus.on('ValidationRequest', data_process) # Set up forwarding - app_bus.on('*', auth_bus.dispatch) - auth_bus.on('*', data_bus.dispatch) + app_bus.on('*', auth_bus.emit) + auth_bus.on('*', data_bus.emit) try: # Dispatch event - event = app_bus.dispatch(BaseEvent(event_type='ValidationRequest')) + event = app_bus.emit(BaseEvent(event_type='ValidationRequest')) # Wait for all processing await app_bus.wait_until_idle() @@ -1590,17 +1878,23 @@ async def data_process(event): assert len(process_results) >= 2 # Auth and Data buses # Check event path shows forwarding through all buses - assert 'AppBus' in event.event_path - assert 'AuthBus' in event.event_path - assert 'DataBus' in event.event_path - - # Test flat dict merging - dict_result = await event.event_results_flat_dict() + assert app_bus.label in event.event_path + assert auth_bus.label in event.event_path + assert data_bus.label in event.event_path + + dict_result: dict[str, Any] = {} + for result in await event.event_results_list(include=lambda r: isinstance(r.result, dict), raise_if_any=False): + assert isinstance(result, dict) + dict_result.update(result) # Should have merged all dict returns assert 'app_valid' in dict_result and 'auth_valid' in dict_result and 'data_valid' in dict_result - # Test flat list - list_result = await event.event_results_flat_list() + list_result = [ + item + for result in await event.event_results_list(include=lambda r: isinstance(r.result, list), raise_if_any=False) + if isinstance(result, list) + for item in result + ] # Should include all list items assert any('log' in str(item) for item in list_result) @@ -1610,7 +1904,7 @@ async def data_process(event): await data_bus.stop(timeout=0, clear=True) async def test_event_result_type_enforcement_with_dict(self): - """Test that handlers returning wrong types get errors when event expects dict result""" + """Test that handlers returning wrong types get errors when event expects dict result.""" bus = EventBus(name='TestBus') # Create an event that expects dict results @@ -1642,7 +1936,7 @@ async def list_handler(event): try: # Dispatch event - event = bus.dispatch(DictResultEvent()) + event = bus.emit(DictResultEvent()) await bus.wait_until_idle() event = await event @@ -1664,8 +1958,14 @@ async def list_handler(event): assert 'did not match expected event_result_type' in error_msg assert 'dict' in error_msg - # event_results_flat_dict should still work when raise_if_any=False, only including valid dict results - dict_result = await event.event_results_flat_dict(raise_if_any=False) + dict_result: dict[str, Any] = {} + for result in await event.event_results_list( + include=lambda r: isinstance(r.result, dict), + raise_if_any=False, + raise_if_none=False, + ): + assert isinstance(result, dict) + dict_result.update(result) assert 'key1' in dict_result and 'key2' in dict_result assert len(dict_result) == 2 # Only the two dict results @@ -1673,7 +1973,7 @@ async def list_handler(event): await bus.stop(timeout=0, clear=True) async def test_event_result_type_enforcement_with_list(self): - """Test that handlers returning wrong types get errors when event expects list result""" + """Test that handlers returning wrong types get errors when event expects list result.""" bus = EventBus(name='TestBus') # Create an event that expects list results @@ -1705,7 +2005,7 @@ async def int_handler(event): try: # Dispatch event - event = bus.dispatch(ListResultEvent()) + event = bus.emit(ListResultEvent()) await bus.wait_until_idle() event = await event @@ -1727,8 +2027,16 @@ async def int_handler(event): assert 'did not match expected event_result_type' in error_msg assert 'list' in error_msg - # event_results_flat_list should still work when raise_if_any=False, only including valid list results - list_result = await event.event_results_flat_list(raise_if_any=False) + list_result = [ + item + for result in await event.event_results_list( + include=lambda r: isinstance(r.result, list), + raise_if_any=False, + raise_if_none=False, + ) + if isinstance(result, list) + for item in result + ] assert list_result == [1, 2, 3, 'a', 'b', 'c'] # Flattened from both list handlers finally: diff --git a/tests/test_eventbus_debounce.py b/tests/test_eventbus_debounce.py new file mode 100644 index 0000000..edc412b --- /dev/null +++ b/tests/test_eventbus_debounce.py @@ -0,0 +1,290 @@ +import asyncio +from datetime import UTC, datetime + +from bubus import BaseEvent, EventBus + + +class ParentEvent(BaseEvent[str]): + pass + + +class ScreenshotEvent(BaseEvent[str]): + target_id: str = '' + full_page: bool = False + + +class SyncEvent(BaseEvent[str]): + pass + + +TARGET_ID_1 = '9b447756-908c-7b75-8a51-4a2c2b4d9b14' +TARGET_ID_2 = '194870e1-fa02-70a4-8101-d10d57c3449c' + + +class TestDebouncingPattern: + """Tests for the debouncing pattern: find() or emit().""" + + async def test_simple_debounce_with_child_of_reuses_recent_event(self): + bus = EventBus() + + try: + child_ref: list[BaseEvent] = [] + + async def parent_handler(event: ParentEvent) -> str: + child = await bus.emit(ScreenshotEvent(target_id='0c1ccf21-65c0-7390-8b64-9182e985740e')) + child_ref.append(child) + return 'parent_done' + + bus.on(ParentEvent, parent_handler) + bus.on(ScreenshotEvent, lambda e: 'screenshot_done') + + parent = await bus.emit(ParentEvent()) + await bus.wait_until_idle() + + reused = await ( + await bus.find( + ScreenshotEvent, + child_of=parent, + past=10, + future=False, + ) + or bus.emit(ScreenshotEvent(target_id='d41abf01-392b-7f28-8992-3105a258867d')) + ) + + assert reused.event_id == child_ref[0].event_id + assert reused.event_parent_id == parent.event_id + finally: + await bus.stop(clear=True) + + async def test_returns_existing_fresh_event(self): + bus = EventBus() + + try: + bus.on(ScreenshotEvent, lambda e: 'done') + + original = await bus.emit(ScreenshotEvent(target_id=TARGET_ID_1)) + + def is_fresh(event: ScreenshotEvent) -> bool: + if event.event_completed_at is None: + return False + completed_at = datetime.fromisoformat(event.event_completed_at) + return (datetime.now(UTC) - completed_at).seconds < 5 + + def matches_fresh_tab(event: ScreenshotEvent) -> bool: + return event.target_id == TARGET_ID_1 and is_fresh(event) + + result = await ( + await bus.find( + ScreenshotEvent, + where=matches_fresh_tab, + past=True, + future=False, + ) + or bus.emit(ScreenshotEvent(target_id=TARGET_ID_1)) + ) + + assert result.event_id == original.event_id + + finally: + await bus.stop(clear=True) + + async def test_advanced_debounce_prefers_history_then_waits_future_then_dispatches(self): + bus = EventBus() + + try: + pending_event = asyncio.create_task(bus.find(SyncEvent, past=False, future=0.5)) + + async def dispatch_later() -> None: + await asyncio.sleep(0.05) + await bus.emit(SyncEvent()) + + dispatch_task = asyncio.create_task(dispatch_later()) + + resolved_event = await ( + (await bus.find(SyncEvent, past=True, future=False)) or (await pending_event) or bus.emit(SyncEvent()) + ) + + await dispatch_task + assert resolved_event is not None + assert resolved_event.event_type == 'SyncEvent' + finally: + await bus.stop(clear=True) + + async def test_dispatches_new_when_no_match(self): + bus = EventBus() + + try: + bus.on(ScreenshotEvent, lambda e: 'done') + + result = await ( + await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == TARGET_ID_1, + past=True, + future=False, + ) + or bus.emit(ScreenshotEvent(target_id=TARGET_ID_1)) + ) + + assert result is not None + assert result.target_id == TARGET_ID_1 + assert result.event_status == 'completed' + + finally: + await bus.stop(clear=True) + + async def test_dispatches_new_when_stale(self): + bus = EventBus() + + try: + bus.on(ScreenshotEvent, lambda e: 'done') + + await bus.emit(ScreenshotEvent(target_id=TARGET_ID_1)) + + result = await ( + await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == TARGET_ID_1 and False, + past=True, + future=False, + ) + or bus.emit(ScreenshotEvent(target_id=TARGET_ID_1)) + ) + + assert result is not None + screenshots = [e for e in bus.event_history.values() if isinstance(e, ScreenshotEvent)] + assert len(screenshots) == 2 + + finally: + await bus.stop(clear=True) + + async def test_find_past_only_returns_immediately_without_waiting(self): + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + start = datetime.now(UTC) + result = await bus.find(ParentEvent, past=True, future=False) + elapsed = (datetime.now(UTC) - start).total_seconds() + + assert result is None + assert elapsed < 0.05 + + finally: + await bus.stop(clear=True) + + async def test_find_past_float_returns_immediately_without_waiting(self): + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + start = datetime.now(UTC) + result = await bus.find(ParentEvent, past=5, future=False) + elapsed = (datetime.now(UTC) - start).total_seconds() + + assert result is None + assert elapsed < 0.05 + + finally: + await bus.stop(clear=True) + + async def test_or_chain_without_waiting_finds_existing(self): + bus = EventBus() + + try: + bus.on(ScreenshotEvent, lambda e: 'done') + + original = await bus.emit(ScreenshotEvent(target_id=TARGET_ID_1)) + + start = datetime.now(UTC) + result = await ( + await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == TARGET_ID_1, + past=True, + future=False, + ) + or bus.emit(ScreenshotEvent(target_id=TARGET_ID_1)) + ) + elapsed = (datetime.now(UTC) - start).total_seconds() + + assert result.event_id == original.event_id + assert elapsed < 0.1 + + finally: + await bus.stop(clear=True) + + async def test_or_chain_without_waiting_dispatches_when_no_match(self): + bus = EventBus() + + try: + bus.on(ScreenshotEvent, lambda e: 'done') + + start = datetime.now(UTC) + result = await ( + await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == TARGET_ID_1, + past=True, + future=False, + ) + or bus.emit(ScreenshotEvent(target_id=TARGET_ID_1)) + ) + elapsed = (datetime.now(UTC) - start).total_seconds() + + assert result is not None + assert result.target_id == TARGET_ID_1 + assert elapsed < 0.1 + + finally: + await bus.stop(clear=True) + + async def test_or_chain_multiple_sequential_lookups(self): + bus = EventBus() + + try: + bus.on(ScreenshotEvent, lambda e: 'done') + + start = datetime.now(UTC) + + result1 = await ( + await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == TARGET_ID_1, + past=True, + future=False, + ) + or bus.emit(ScreenshotEvent(target_id=TARGET_ID_1)) + ) + + result2 = await ( + await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == TARGET_ID_1, + past=True, + future=False, + ) + or bus.emit(ScreenshotEvent(target_id=TARGET_ID_1)) + ) + + result3 = await ( + await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == TARGET_ID_2, + past=True, + future=False, + ) + or bus.emit(ScreenshotEvent(target_id=TARGET_ID_2)) + ) + + elapsed = (datetime.now(UTC) - start).total_seconds() + + assert result1.event_id == result2.event_id + assert result3.event_id != result1.event_id + assert result3.target_id == TARGET_ID_2 + assert elapsed < 0.2 + + finally: + await bus.stop(clear=True) diff --git a/tests/test_eventbus_dispatch_contextvars.py b/tests/test_eventbus_dispatch_contextvars.py new file mode 100644 index 0000000..eee3563 --- /dev/null +++ b/tests/test_eventbus_dispatch_contextvars.py @@ -0,0 +1,431 @@ +""" +Tests for ContextVar propagation through event dispatch and handler execution. + +This addresses GitHub issue #20: ContextVar values set before dispatch should +be accessible inside event handlers. + +The key insight is that context must be captured at DISPATCH time (when the +user calls bus.emit()), not at PROCESSING time (when the event is pulled +from the queue and handlers are executed). +""" + +# pyright: reportUnusedVariable=false +# pyright: reportUnusedFunction=false + +import asyncio +from contextvars import ContextVar +from typing import Any + +import pytest + +from bubus import BaseEvent, EventBus + +# Test context variables (simulating user-defined context like request_id) +request_id_var: ContextVar[str] = ContextVar('request_id', default='') +user_id_var: ContextVar[str] = ContextVar('user_id', default='') +trace_id_var: ContextVar[str] = ContextVar('trace_id', default='') + + +class SimpleEvent(BaseEvent[str]): + """Simple event for context propagation tests.""" + + pass + + +class ChildEvent(BaseEvent[str]): + """Child event for nested context tests.""" + + pass + + +class TestContextPropagation: + """Test that ContextVar values propagate from dispatch site to handlers.""" + + async def test_contextvar_propagates_to_handler(self): + """ + Basic test: ContextVar set before dispatch should be accessible in handler. + + This is the core issue from GitHub #20. + """ + bus = EventBus(name='ContextTestBus') + captured_values: dict[str, str] = {} + + async def handler(event: SimpleEvent) -> str: + # These should have the values set BEFORE dispatch, not defaults + captured_values['request_id'] = request_id_var.get() + captured_values['user_id'] = user_id_var.get() + return 'handled' + + bus.on(SimpleEvent, handler) + + try: + # Set context values (simulating FastAPI request context) + request_id_var.set('req-12345') + user_id_var.set('user-abc') + + # Dispatch and await + event = await bus.emit(SimpleEvent()) + + # Handler should have seen the context values + assert captured_values['request_id'] == 'req-12345', f"Expected 'req-12345', got '{captured_values['request_id']}'" + assert captured_values['user_id'] == 'user-abc', f"Expected 'user-abc', got '{captured_values['user_id']}'" + + finally: + await bus.stop(clear=True) + + async def test_contextvar_propagates_through_nested_handlers(self): + """ + Nested dispatch: Context should propagate through parent -> child handlers. + + When a handler dispatches and awaits a child event, the child handler + should also have access to the original context. + """ + bus = EventBus(name='NestedContextBus') + captured_parent: dict[str, str] = {} + captured_child: dict[str, str] = {} + + async def parent_handler(event: SimpleEvent) -> str: + captured_parent['request_id'] = request_id_var.get() + captured_parent['trace_id'] = trace_id_var.get() + + # Dispatch child event + child = await bus.emit(ChildEvent()) + return 'parent_done' + + async def child_handler(event: ChildEvent) -> str: + captured_child['request_id'] = request_id_var.get() + captured_child['trace_id'] = trace_id_var.get() + return 'child_done' + + bus.on(SimpleEvent, parent_handler) + bus.on(ChildEvent, child_handler) + + try: + # Set context + request_id_var.set('req-nested-123') + trace_id_var.set('trace-xyz') + + await bus.emit(SimpleEvent()) + + # Both handlers should see the context + assert captured_parent['request_id'] == 'req-nested-123' + assert captured_parent['trace_id'] == 'trace-xyz' + assert captured_child['request_id'] == 'req-nested-123' + assert captured_child['trace_id'] == 'trace-xyz' + + finally: + await bus.stop(clear=True) + + async def test_context_isolation_between_dispatches(self): + """ + Different dispatches should have isolated contexts. + + If dispatch A sets request_id='req-A' and dispatch B sets request_id='req-B', + handler A should see 'req-A' and handler B should see 'req-B'. + """ + bus = EventBus(name='IsolationTestBus') + captured_values: list[str] = [] + + async def handler(event: SimpleEvent) -> str: + # Small delay to ensure both handlers run + await asyncio.sleep(0.01) + captured_values.append(request_id_var.get()) + return 'handled' + + bus.on(SimpleEvent, handler) + + try: + # Dispatch two events with different contexts + async def dispatch_with_context(req_id: str): + request_id_var.set(req_id) + await bus.emit(SimpleEvent()) + + # Run both dispatches + request_id_var.set('req-A') + event_a = bus.emit(SimpleEvent()) + + request_id_var.set('req-B') + event_b = bus.emit(SimpleEvent()) + + await event_a + await event_b + + # Each handler should have seen its own context + # Note: order might vary, so just check both values are present + assert 'req-A' in captured_values, f"Expected 'req-A' in {captured_values}" + assert 'req-B' in captured_values, f"Expected 'req-B' in {captured_values}" + + finally: + await bus.stop(clear=True) + + async def test_context_propagates_to_parallel_handler_concurrency(self): + """ + When event_handler_concurrency='parallel', all handlers should see the dispatch context. + """ + bus = EventBus(name='ParallelContextBus', event_handler_concurrency='parallel') + captured_values: list[str] = [] + lock = asyncio.Lock() + + async def handler1(event: SimpleEvent) -> str: + async with lock: + captured_values.append(f'h1:{request_id_var.get()}') + return 'h1_done' + + async def handler2(event: SimpleEvent) -> str: + async with lock: + captured_values.append(f'h2:{request_id_var.get()}') + return 'h2_done' + + bus.on(SimpleEvent, handler1) + bus.on(SimpleEvent, handler2) + + try: + request_id_var.set('req-parallel') + await bus.emit(SimpleEvent()) + + assert 'h1:req-parallel' in captured_values, f"Handler1 didn't see context: {captured_values}" + assert 'h2:req-parallel' in captured_values, f"Handler2 didn't see context: {captured_values}" + + finally: + await bus.stop(clear=True) + + async def test_context_propagates_through_event_forwarding(self): + """ + When events are forwarded between buses, context should propagate. + """ + bus1 = EventBus(name='Bus1') + bus2 = EventBus(name='Bus2') + captured_bus1: dict[str, str] = {} + captured_bus2: dict[str, str] = {} + + async def bus1_handler(event: SimpleEvent) -> str: + captured_bus1['request_id'] = request_id_var.get() + return 'bus1_done' + + async def bus2_handler(event: SimpleEvent) -> str: + captured_bus2['request_id'] = request_id_var.get() + return 'bus2_done' + + bus1.on(SimpleEvent, bus1_handler) + bus1.on('*', bus2.emit) # Forward all events to bus2 + bus2.on(SimpleEvent, bus2_handler) + + try: + request_id_var.set('req-forwarded') + await bus1.emit(SimpleEvent()) + await bus2.wait_until_idle() + + assert captured_bus1['request_id'] == 'req-forwarded', f"Bus1 handler didn't see context: {captured_bus1}" + assert captured_bus2['request_id'] == 'req-forwarded', f"Bus2 handler didn't see context: {captured_bus2}" + + finally: + await bus1.stop(clear=True) + await bus2.stop(clear=True) + + async def test_handler_can_modify_context_without_affecting_parent(self): + """ + Handler modifications to ContextVar should not affect the parent context. + + This ensures context is properly copied, not shared. + """ + bus = EventBus(name='ModifyContextBus') + parent_value_after_child: str = '' + + async def parent_handler(event: SimpleEvent) -> str: + nonlocal parent_value_after_child + # Set a value in parent + request_id_var.set('parent-value') + + # Dispatch child which will modify the context + await bus.emit(ChildEvent()) + + # Parent's context should be unchanged + parent_value_after_child = request_id_var.get() + return 'parent_done' + + async def child_handler(event: ChildEvent) -> str: + # Modify context in child + request_id_var.set('child-modified') + return 'child_done' + + bus.on(SimpleEvent, parent_handler) + bus.on(ChildEvent, child_handler) + + try: + await bus.emit(SimpleEvent()) + + # Parent should still see its own value, not child's modification + assert parent_value_after_child == 'parent-value', ( + f"Parent context was modified by child: got '{parent_value_after_child}'" + ) + + finally: + await bus.stop(clear=True) + + async def test_event_parent_id_tracking_still_works(self): + """ + Critical: Internal context vars (event_parent_id tracking) must still work + when we propagate dispatch-time context. + + This ensures our context merging doesn't break the bubus internals. + """ + bus = EventBus(name='ParentIdTrackingBus') + parent_event_id: str | None = None + child_event_parent_id: str | None = None + + async def parent_handler(event: SimpleEvent) -> str: + nonlocal parent_event_id + parent_event_id = event.event_id + + # Child event should automatically get parent_id set + child = await bus.emit(ChildEvent()) + return 'parent_done' + + async def child_handler(event: ChildEvent) -> str: + nonlocal child_event_parent_id + child_event_parent_id = event.event_parent_id + return 'child_done' + + bus.on(SimpleEvent, parent_handler) + bus.on(ChildEvent, child_handler) + + try: + # Set user context (to ensure we're testing the merge scenario) + request_id_var.set('req-parent-tracking') + + await bus.emit(SimpleEvent()) + + # Verify parent ID tracking works + assert parent_event_id is not None, 'Parent event ID was not captured' + assert child_event_parent_id is not None, 'Child event parent ID was not set' + assert child_event_parent_id == parent_event_id, ( + f"Child's parent_id ({child_event_parent_id}) doesn't match parent's id ({parent_event_id})" + ) + + finally: + await bus.stop(clear=True) + + async def test_dispatch_context_and_parent_id_both_work(self): + """ + Both user-defined ContextVars AND internal event tracking must work together. + + This is the key test for context stacking/merging. + """ + bus = EventBus(name='CombinedContextBus') + results: dict[str, Any] = {} + + async def parent_handler(event: SimpleEvent) -> str: + results['parent_request_id'] = request_id_var.get() + results['parent_event_id'] = event.event_id + + # Dispatch child - should get both user context AND parent tracking + child = await bus.emit(ChildEvent()) + return 'parent_done' + + async def child_handler(event: ChildEvent) -> str: + results['child_request_id'] = request_id_var.get() + results['child_event_parent_id'] = event.event_parent_id + return 'child_done' + + bus.on(SimpleEvent, parent_handler) + bus.on(ChildEvent, child_handler) + + try: + # Set user context + request_id_var.set('req-combined-test') + + await bus.emit(SimpleEvent()) + + # User context should propagate + assert results['parent_request_id'] == 'req-combined-test', ( + f"Parent didn't see user context: {results['parent_request_id']}" + ) + assert results['child_request_id'] == 'req-combined-test', ( + f"Child didn't see user context: {results['child_request_id']}" + ) + + # Internal parent tracking should also work + assert results['child_event_parent_id'] == results['parent_event_id'], ( + f'Parent ID tracking broken: child.parent_id={results["child_event_parent_id"]}, parent.id={results["parent_event_id"]}' + ) + + finally: + await bus.stop(clear=True) + + async def test_deeply_nested_context_and_parent_tracking(self): + """ + Test that both user context and parent tracking work through multiple levels. + """ + bus = EventBus(name='DeepNestingBus') + results: list[dict[str, Any]] = [] + + class Level2Event(BaseEvent[str]): + pass + + class Level3Event(BaseEvent[str]): + pass + + async def level1_handler(event: SimpleEvent) -> str: + results.append( + { + 'level': 1, + 'request_id': request_id_var.get(), + 'event_id': event.event_id, + 'parent_id': event.event_parent_id, + } + ) + await bus.emit(Level2Event()) + return 'level1_done' + + async def level2_handler(event: Level2Event) -> str: + results.append( + { + 'level': 2, + 'request_id': request_id_var.get(), + 'event_id': event.event_id, + 'parent_id': event.event_parent_id, + } + ) + await bus.emit(Level3Event()) + return 'level2_done' + + async def level3_handler(event: Level3Event) -> str: + results.append( + { + 'level': 3, + 'request_id': request_id_var.get(), + 'event_id': event.event_id, + 'parent_id': event.event_parent_id, + } + ) + return 'level3_done' + + bus.on(SimpleEvent, level1_handler) + bus.on(Level2Event, level2_handler) + bus.on(Level3Event, level3_handler) + + try: + request_id_var.set('req-deep-nesting') + + await bus.emit(SimpleEvent()) + + # All levels should see the user context + assert len(results) == 3, f'Expected 3 levels, got {len(results)}' + for r in results: + assert r['request_id'] == 'req-deep-nesting', f"Level {r['level']} didn't see user context: {r['request_id']}" + + # Parent chain should be correct + assert results[0]['parent_id'] is None, 'Level 1 should have no parent' + assert results[1]['parent_id'] == results[0]['event_id'], ( + f'Level 2 parent mismatch: {results[1]["parent_id"]} != {results[0]["event_id"]}' + ) + assert results[2]['parent_id'] == results[1]['event_id'], ( + f'Level 3 parent mismatch: {results[2]["parent_id"]} != {results[1]["event_id"]}' + ) + + finally: + await bus.stop(clear=True) + + +if __name__ == '__main__': + pytest.main([__file__, '-v', '-s']) diff --git a/tests/test_eventbus_dispatch_defaults.py b/tests/test_eventbus_dispatch_defaults.py new file mode 100644 index 0000000..2f8df52 --- /dev/null +++ b/tests/test_eventbus_dispatch_defaults.py @@ -0,0 +1,106 @@ +from bubus import ( + BaseEvent, + EventBus, + EventConcurrencyMode, + EventHandlerCompletionMode, + EventHandlerConcurrencyMode, +) + + +class PropagationEvent(BaseEvent[str]): + pass + + +class ConcurrencyOverrideEvent(BaseEvent[str]): + event_concurrency: EventConcurrencyMode | None = EventConcurrencyMode.GLOBAL_SERIAL + + +class HandlerOverrideEvent(BaseEvent[str]): + event_handler_concurrency: EventHandlerConcurrencyMode | None = EventHandlerConcurrencyMode.SERIAL + event_handler_completion: EventHandlerCompletionMode | None = EventHandlerCompletionMode.ALL + + +async def test_event_concurrency_remains_unset_on_dispatch_and_resolves_during_processing() -> None: + bus = EventBus(name='EventConcurrencyDefaultBus', event_concurrency='parallel') + + async def handler(_event: BaseEvent[str]) -> str: + return 'ok' + + bus.on(PropagationEvent, handler) + try: + implicit = bus.emit(PropagationEvent()) + explicit_none = bus.emit(PropagationEvent(event_concurrency=None)) + + assert implicit.event_concurrency is None + assert explicit_none.event_concurrency is None + + await implicit + await explicit_none + finally: + await bus.stop() + + +async def test_event_concurrency_class_override_beats_bus_default() -> None: + bus = EventBus(name='EventConcurrencyOverrideBus', event_concurrency='parallel') + + async def handler(_event: BaseEvent[str]) -> str: + return 'ok' + + bus.on(ConcurrencyOverrideEvent, handler) + try: + event = bus.emit(ConcurrencyOverrideEvent()) + assert event.event_concurrency == 'global-serial' + await event + finally: + await bus.stop() + + +async def test_handler_defaults_remain_unset_on_dispatch_and_resolve_during_processing() -> None: + bus = EventBus( + name='HandlerDefaultsBus', + event_handler_concurrency='parallel', + event_handler_completion='first', + ) + + async def handler(_event: BaseEvent[str]) -> str: + return 'ok' + + bus.on(PropagationEvent, handler) + try: + implicit = bus.emit(PropagationEvent()) + explicit_none = bus.emit( + PropagationEvent( + event_handler_concurrency=None, + event_handler_completion=None, + ) + ) + + assert implicit.event_handler_concurrency is None + assert implicit.event_handler_completion is None + assert explicit_none.event_handler_concurrency is None + assert explicit_none.event_handler_completion is None + + await implicit + await explicit_none + finally: + await bus.stop() + + +async def test_handler_class_override_beats_bus_default() -> None: + bus = EventBus( + name='HandlerDefaultsOverrideBus', + event_handler_concurrency='parallel', + event_handler_completion='first', + ) + + async def handler(_event: BaseEvent[str]) -> str: + return 'ok' + + bus.on(HandlerOverrideEvent, handler) + try: + event = bus.emit(HandlerOverrideEvent()) + assert event.event_handler_concurrency == 'serial' + assert event.event_handler_completion == 'all' + await event + finally: + await bus.stop() diff --git a/tests/test_parent_event_tracking.py b/tests/test_eventbus_dispatch_parent_tracking.py similarity index 82% rename from tests/test_parent_event_tracking.py rename to tests/test_eventbus_dispatch_parent_tracking.py index c11090c..f07b3f1 100644 --- a/tests/test_parent_event_tracking.py +++ b/tests/test_eventbus_dispatch_parent_tracking.py @@ -41,20 +41,20 @@ class TestParentEventTracking: async def test_basic_parent_tracking(self, eventbus: EventBus): """Test that child events automatically get event_parent_id""" - child_events: list[BaseEvent[Any]] = [] + event_children: list[BaseEvent[Any]] = [] async def parent_handler(event: ParentEvent) -> str: # Handler that dispatches a child event child = ChildEvent(data=f'child_of_{event.message}') - eventbus.dispatch(child) - child_events.append(child) + eventbus.emit(child) + event_children.append(child) return 'parent_handled' - eventbus.on('ParentEvent', parent_handler) # type: ignore[reportUnknownArgumentType] + eventbus.on('ParentEvent', parent_handler) # Dispatch parent event parent = ParentEvent(message='test_parent') - parent_result = eventbus.dispatch(parent) + parent_result = eventbus.emit(parent) # Wait for processing await eventbus.wait_until_idle() @@ -67,8 +67,8 @@ async def parent_handler(event: ParentEvent) -> str: assert parent_handler_result is not None and parent_handler_result.result == 'parent_handled' # Verify child has event_parent_id set - assert len(child_events) == 1 - child = child_events[0] + assert len(event_children) == 1 + child = event_children[0] assert child.event_parent_id == parent.event_id async def test_multi_level_parent_tracking(self, eventbus: EventBus): @@ -78,13 +78,13 @@ async def test_multi_level_parent_tracking(self, eventbus: EventBus): async def parent_handler(event: BaseEvent[str]) -> str: events_by_level['parent'] = event child = ChildEvent(data='child_data') - eventbus.dispatch(child) + eventbus.emit(child) return 'parent' async def child_handler(event: BaseEvent[str]) -> str: events_by_level['child'] = event grandchild = GrandchildEvent(value=42) - eventbus.dispatch(grandchild) + eventbus.emit(grandchild) return 'child' async def grandchild_handler(event: BaseEvent[str]) -> str: @@ -98,7 +98,7 @@ async def grandchild_handler(event: BaseEvent[str]) -> str: # Start the chain parent = ParentEvent(message='root') - eventbus.dispatch(parent) + eventbus.emit(parent) # Wait for all processing await eventbus.wait_until_idle() @@ -115,44 +115,44 @@ async def grandchild_handler(event: BaseEvent[str]) -> str: async def test_multiple_children_same_parent(self, eventbus: EventBus): """Test multiple child events from same parent""" - child_events: list[BaseEvent[Any]] = [] + event_children: list[BaseEvent[Any]] = [] async def parent_handler(event: BaseEvent[str]) -> str: # Dispatch multiple children for i in range(3): child = ChildEvent(data=f'child_{i}') - eventbus.dispatch(child) - child_events.append(child) + eventbus.emit(child) + event_children.append(child) return 'spawned_children' eventbus.on('ParentEvent', parent_handler) # Dispatch parent parent = ParentEvent(message='multi_child_parent') - eventbus.dispatch(parent) + eventbus.emit(parent) await eventbus.wait_until_idle() # All children should have same parent - assert len(child_events) == 3 - for child in child_events: + assert len(event_children) == 3 + for child in event_children: assert child.event_parent_id == parent.event_id - async def test_parallel_handlers_parent_tracking(self, eventbus: EventBus): - """Test parent tracking with parallel handlers""" + async def test_parallel_handler_concurrency_parent_tracking(self, eventbus: EventBus): + """Test parent tracking with parallel handler concurrency mode.""" events_from_handlers: dict[str, list[BaseEvent[Any]]] = {'h1': [], 'h2': []} async def handler1(event: BaseEvent[str]) -> str: await asyncio.sleep(0.01) # Simulate work child = ChildEvent(data='from_h1') - eventbus.dispatch(child) + eventbus.emit(child) events_from_handlers['h1'].append(child) return 'h1' async def handler2(event: BaseEvent[str]) -> str: await asyncio.sleep(0.02) # Different timing child = ChildEvent(data='from_h2') - eventbus.dispatch(child) + eventbus.emit(child) events_from_handlers['h2'].append(child) return 'h2' @@ -162,7 +162,7 @@ async def handler2(event: BaseEvent[str]) -> str: # Dispatch parent parent = ParentEvent(message='parallel_test') - eventbus.dispatch(parent) + eventbus.emit(parent) await eventbus.wait_until_idle() @@ -179,22 +179,22 @@ async def test_explicit_parent_not_overridden(self, eventbus: EventBus): async def parent_handler(event: BaseEvent[Any]) -> str: nonlocal captured_child # Create child with explicit event_parent_id - explicit_parent_id = '01234567-89ab-cdef-0123-456789abcdef' + explicit_parent_id = '018f8e40-1234-7000-8000-000000001234' child = ChildEvent(data='explicit', event_parent_id=explicit_parent_id) - eventbus.dispatch(child) + eventbus.emit(child) captured_child = child return 'dispatched' eventbus.on('ParentEvent', parent_handler) parent = ParentEvent(message='test') - eventbus.dispatch(parent) + eventbus.emit(parent) await eventbus.wait_until_idle() # Explicit event_parent_id should be preserved assert captured_child is not None - assert captured_child.event_parent_id == '01234567-89ab-cdef-0123-456789abcdef' + assert captured_child.event_parent_id == '018f8e40-1234-7000-8000-000000001234' assert captured_child.event_parent_id != parent.event_id async def test_cross_eventbus_parent_tracking(self): @@ -207,7 +207,7 @@ async def test_cross_eventbus_parent_tracking(self): async def bus1_handler(event: BaseEvent[Any]) -> str: # Dispatch child to bus2 child = ChildEvent(data='cross_bus_child') - bus2.dispatch(child) + bus2.emit(child) captured_events.append(('bus1', event, child)) return 'bus1_handled' @@ -221,7 +221,7 @@ async def bus2_handler(event: BaseEvent[str]) -> str: try: # Dispatch parent to bus1 parent = ParentEvent(message='cross_bus_test') - bus1.dispatch(parent) + bus1.emit(parent) await bus1.wait_until_idle() await bus2.wait_until_idle() @@ -240,35 +240,35 @@ async def bus2_handler(event: BaseEvent[str]) -> str: async def test_sync_handler_parent_tracking(self, eventbus: EventBus): """Test parent tracking works with sync handlers""" - child_events: list[BaseEvent[Any]] = [] + event_children: list[BaseEvent[Any]] = [] def sync_parent_handler(event: BaseEvent[str]) -> str: # Sync handler that dispatches child child = ChildEvent(data='from_sync') - eventbus.dispatch(child) - child_events.append(child) + eventbus.emit(child) + event_children.append(child) return 'sync_handled' eventbus.on('ParentEvent', sync_parent_handler) parent = ParentEvent(message='sync_test') - eventbus.dispatch(parent) + eventbus.emit(parent) await eventbus.wait_until_idle() # Parent tracking should work even with sync handlers - assert len(child_events) == 1 - assert child_events[0].event_parent_id == parent.event_id + assert len(event_children) == 1 + assert event_children[0].event_parent_id == parent.event_id async def test_error_handler_parent_tracking(self, eventbus: EventBus): """Test parent tracking when handler errors occur""" - child_events: list[BaseEvent[Any]] = [] + event_children: list[BaseEvent[Any]] = [] async def failing_handler(event: BaseEvent[str]) -> str: # Dispatch child before failing child = ChildEvent(data='before_error') - eventbus.dispatch(child) - child_events.append(child) + eventbus.emit(child) + event_children.append(child) raise ValueError( 'Handler error - expected to fail - testing that parent event tracking works even when handlers error' ) @@ -276,21 +276,21 @@ async def failing_handler(event: BaseEvent[str]) -> str: async def success_handler(event: BaseEvent[str]) -> str: # This should still run child = ChildEvent(data='after_error') - eventbus.dispatch(child) - child_events.append(child) + eventbus.emit(child) + event_children.append(child) return 'success' eventbus.on('ParentEvent', failing_handler) eventbus.on('ParentEvent', success_handler) parent = ParentEvent(message='error_test') - eventbus.dispatch(parent) + eventbus.emit(parent) await eventbus.wait_until_idle() # Both children should have event_parent_id despite error - assert len(child_events) == 2 - for child in child_events: + assert len(event_children) == 2 + for child in event_children: assert child.event_parent_id == parent.event_id async def test_event_children_tracking(self, eventbus: EventBus): @@ -300,7 +300,7 @@ async def parent_handler(event: ParentEvent) -> str: # Dispatch multiple child events for i in range(3): child = ChildEvent(data=f'child_{i}') - eventbus.dispatch(child) + eventbus.emit(child) return 'parent_done' async def child_handler(event: ChildEvent) -> str: @@ -312,7 +312,7 @@ async def child_handler(event: ChildEvent) -> str: # Dispatch parent event parent = ParentEvent(message='test_children_tracking') - parent_event = eventbus.dispatch(parent) + parent_event = eventbus.emit(parent) # Wait for all events to be processed await eventbus.wait_until_idle() @@ -332,12 +332,12 @@ async def test_nested_event_children_tracking(self, eventbus: EventBus): async def parent_handler(event: ParentEvent) -> str: child = ChildEvent(data='level1') - eventbus.dispatch(child) + eventbus.emit(child) return 'parent' async def child_handler(event: ChildEvent) -> str: grandchild = GrandchildEvent(value=42) - eventbus.dispatch(grandchild) + eventbus.emit(grandchild) return 'child' async def grandchild_handler(event: GrandchildEvent) -> str: @@ -348,7 +348,7 @@ async def grandchild_handler(event: GrandchildEvent) -> str: eventbus.on('GrandchildEvent', grandchild_handler) parent = ParentEvent(message='nested_test') - parent_event = eventbus.dispatch(parent) + parent_event = eventbus.emit(parent) await eventbus.wait_until_idle() await parent_event @@ -368,15 +368,15 @@ async def test_multiple_handlers_event_children(self, eventbus: EventBus): async def handler1(event: ParentEvent) -> str: child1 = ChildEvent(data='from_handler1') - eventbus.dispatch(child1) + eventbus.emit(child1) return 'h1' async def handler2(event: ParentEvent) -> str: # Dispatch 2 children from this handler child2 = ChildEvent(data='from_handler2_a') child3 = ChildEvent(data='from_handler2_b') - eventbus.dispatch(child2) - eventbus.dispatch(child3) + eventbus.emit(child2) + eventbus.emit(child3) return 'h2' async def child_handler(event: ChildEvent) -> str: @@ -387,7 +387,7 @@ async def child_handler(event: ChildEvent) -> str: eventbus.on('ChildEvent', child_handler) parent = ParentEvent(message='multi_handler_test') - parent_event = eventbus.dispatch(parent) + parent_event = eventbus.emit(parent) await eventbus.wait_until_idle() await parent_event @@ -408,7 +408,7 @@ async def handler(event: ParentEvent) -> str: eventbus.on('ParentEvent', handler) parent = ParentEvent(message='no_children_test') - parent_event = eventbus.dispatch(parent) + parent_event = eventbus.emit(parent) await eventbus.wait_until_idle() await parent_event @@ -421,10 +421,10 @@ async def test_forwarded_events_not_counted_as_children(self, eventbus: EventBus try: # Forward all events from bus1 to bus2 - eventbus.on('*', bus2.dispatch) + eventbus.on('*', bus2.emit) parent = ParentEvent(message='forward_test') - parent_event = eventbus.dispatch(parent) + parent_event = eventbus.emit(parent) await eventbus.wait_until_idle() await bus2.wait_until_idle() await parent_event @@ -435,20 +435,23 @@ async def test_forwarded_events_not_counted_as_children(self, eventbus: EventBus finally: await bus2.stop(clear=True) - async def test_event_are_all_children_complete(self, eventbus: EventBus): - """Test the event_are_all_children_complete method""" + async def test_parent_completion_waits_for_all_children(self, eventbus: EventBus): + """Parent event completion should wait until all dispatched children complete.""" completion_order: list[str] = [] + child_started = asyncio.Event() + release_children = asyncio.Event() async def parent_handler(event: ParentEvent) -> str: child1 = ChildEvent(data='child1') child2 = ChildEvent(data='child2') - eventbus.dispatch(child1) - eventbus.dispatch(child2) + eventbus.emit(child1) + eventbus.emit(child2) completion_order.append('parent_handler') return 'parent' async def child_handler(event: ChildEvent) -> str: - await asyncio.sleep(0.01) # Simulate work + child_started.set() + await release_children.wait() completion_order.append(f'child_handler_{event.data}') return f'handled_{event.data}' @@ -456,18 +459,19 @@ async def child_handler(event: ChildEvent) -> str: eventbus.on('ChildEvent', child_handler) parent = ParentEvent(message='completion_test') - parent_event = eventbus.dispatch(parent) + parent_event = eventbus.emit(parent) - # Check completion status during processing - # At this point, parent handler hasn't run yet, so no children exist - print(f'Children immediately after dispatch: {len(parent.event_children)}') - assert parent.event_are_all_children_complete() # No children yet, so technically complete + # Wait until at least one child handler has started and is blocked. + await child_started.wait() + assert len(parent.event_children) >= 1 + assert parent.event_completed_at is None + assert parent.event_status != 'completed' - # Wait for all processing + release_children.set() await parent_event # Now all children should be complete - assert parent.event_are_all_children_complete() + assert parent.event_status == 'completed' assert len(parent.event_children) == 2 for child in parent.event_children: assert child.event_status == 'completed' diff --git a/tests/test_eventbus_edge_cases.py b/tests/test_eventbus_edge_cases.py new file mode 100644 index 0000000..86c270f --- /dev/null +++ b/tests/test_eventbus_edge_cases.py @@ -0,0 +1,103 @@ +import asyncio +import time + +import pytest + +from bubus import BaseEvent, EventBus, EventStatus + + +class ResetCoverageEvent(BaseEvent[None]): + label: str + + +class IdleTimeoutCoverageEvent(BaseEvent[None]): + label: str = 'slow' + + +class StopCoverageEvent(BaseEvent[None]): + label: str = 'stop' + + +@pytest.mark.asyncio +async def test_event_reset_creates_fresh_pending_event_for_cross_bus_dispatch(): + bus_a = EventBus(name='ResetCoverageBusA') + bus_b = EventBus(name='ResetCoverageBusB') + seen_a: list[str] = [] + seen_b: list[str] = [] + + bus_a.on(ResetCoverageEvent, lambda event: seen_a.append(event.label)) + bus_b.on(ResetCoverageEvent, lambda event: seen_b.append(event.label)) + + completed = await bus_a.emit(ResetCoverageEvent(label='hello')) + assert completed.event_status == EventStatus.COMPLETED + assert len(completed.event_results) == 1 + + fresh = completed.event_reset() + assert fresh.event_id != completed.event_id + assert fresh.event_status == EventStatus.PENDING + assert fresh.event_completed_at is None + assert fresh.event_results == {} + + forwarded = await bus_b.emit(fresh) + assert forwarded.event_status == EventStatus.COMPLETED + assert seen_a == ['hello'] + assert seen_b == ['hello'] + assert any(path.startswith('ResetCoverageBusA#') for path in forwarded.event_path) + assert any(path.startswith('ResetCoverageBusB#') for path in forwarded.event_path) + + await bus_a.stop(timeout=0, clear=True) + await bus_b.stop(timeout=0, clear=True) + + +@pytest.mark.asyncio +async def test_wait_until_idle_timeout_path_recovers_after_inflight_handler_finishes(): + bus = EventBus(name='IdleTimeoutCoverageBus') + handler_started = asyncio.Event() + release_handler = asyncio.Event() + + async def slow_handler(event: IdleTimeoutCoverageEvent) -> None: + handler_started.set() + await release_handler.wait() + + bus.on(IdleTimeoutCoverageEvent, slow_handler) + pending = bus.emit(IdleTimeoutCoverageEvent()) + await handler_started.wait() + + start = time.perf_counter() + await bus.wait_until_idle(timeout=0.01) + elapsed = time.perf_counter() - start + assert elapsed < 0.5 + assert pending.event_status != EventStatus.COMPLETED + + release_handler.set() + await pending + await bus.wait_until_idle(timeout=1.0) + assert pending.event_status == EventStatus.COMPLETED + + await bus.stop(timeout=0, clear=True) + + +@pytest.mark.asyncio +async def test_stop_timeout_zero_clears_running_bus_and_releases_name(): + bus_name = 'StopCoverageBus' + bus = EventBus(name=bus_name) + + async def slow_handler(event: StopCoverageEvent) -> None: + await asyncio.sleep(0.2) + + bus.on(StopCoverageEvent, slow_handler) + _pending = bus.emit(StopCoverageEvent()) + await asyncio.sleep(0) + + start = time.perf_counter() + await bus.stop(timeout=0, clear=True) + elapsed = time.perf_counter() - start + + assert elapsed < 0.5 + assert bus.name.startswith('_stopped_') + assert all(instance is not bus for instance in list(EventBus.all_instances)) + + replacement = EventBus(name=bus_name) + replacement.on(StopCoverageEvent, lambda event: None) + await replacement.emit(StopCoverageEvent()) + await replacement.stop(timeout=0, clear=True) diff --git a/tests/test_eventbus_error_handling.py b/tests/test_eventbus_error_handling.py new file mode 100644 index 0000000..463660f --- /dev/null +++ b/tests/test_eventbus_error_handling.py @@ -0,0 +1,123 @@ +import asyncio + +from bubus import ( + BaseEvent, + EventBus, + EventHandlerAbortedError, + EventHandlerCancelledError, + EventHandlerCompletionMode, + EventHandlerConcurrencyMode, + EventHandlerResultSchemaError, + EventHandlerTimeoutError, +) + + +class TaxonomyEvent(BaseEvent[str]): + pass + + +class IntTaxonomyEvent(BaseEvent[int]): + pass + + +async def test_result_schema_mismatch_uses_event_handler_result_schema_error() -> None: + bus = EventBus(name='TaxonomySchemaBus') + + def wrong_type(_event: IntTaxonomyEvent) -> str: + return 'not-an-int' + + bus.on(IntTaxonomyEvent, wrong_type) + + try: + event = await bus.emit(IntTaxonomyEvent()) + await bus.wait_until_idle() + result = next(iter(event.event_results.values())) + assert result.status == 'error' + assert isinstance(result.error, EventHandlerResultSchemaError) + finally: + await bus.stop() + + +async def test_handler_timeout_uses_event_handler_timeout_error() -> None: + bus = EventBus(name='TaxonomyTimeoutBus') + + class TimeoutEvent(BaseEvent[str]): + event_timeout: float | None = 0.2 + event_handler_timeout: float | None = 0.01 + + async def slow_handler(_event: TimeoutEvent) -> str: + await asyncio.sleep(0.05) + return 'slow' + + bus.on(TimeoutEvent, slow_handler) + + try: + event = await bus.emit(TimeoutEvent()) + await bus.wait_until_idle() + result = next(iter(event.event_results.values())) + assert result.status == 'error' + assert isinstance(result.error, EventHandlerTimeoutError) + assert isinstance(result.error, TimeoutError) + finally: + await bus.stop() + + +async def test_first_mode_pending_non_winner_uses_cancelled_error_class() -> None: + bus = EventBus( + name='TaxonomyFirstPendingBus', + event_handler_concurrency=EventHandlerConcurrencyMode.SERIAL, + event_handler_completion=EventHandlerCompletionMode.FIRST, + ) + + async def winner(_event: TaxonomyEvent) -> str: + return 'winner' + + async def never_runs(_event: TaxonomyEvent) -> str: + await asyncio.sleep(0.1) + return 'loser' + + bus.on(TaxonomyEvent, winner) + bus.on(TaxonomyEvent, never_runs) + + try: + event = await bus.emit(TaxonomyEvent()) + await bus.wait_until_idle() + + loser_result = next(result for result in event.event_results.values() if result.handler_name.endswith('never_runs')) + assert loser_result.status == 'error' + assert isinstance(loser_result.error, EventHandlerCancelledError) + assert isinstance(loser_result.error, asyncio.CancelledError) + finally: + await bus.stop() + + +async def test_parallel_first_started_loser_uses_aborted_error_class() -> None: + bus = EventBus( + name='TaxonomyFirstParallelBus', + event_handler_concurrency=EventHandlerConcurrencyMode.PARALLEL, + event_handler_completion=EventHandlerCompletionMode.FIRST, + ) + slow_started = asyncio.Event() + + async def slow_loser(_event: TaxonomyEvent) -> str: + slow_started.set() + await asyncio.sleep(5) + return 'slow' + + async def fast_winner(_event: TaxonomyEvent) -> str: + await slow_started.wait() + return 'winner' + + bus.on(TaxonomyEvent, slow_loser) + bus.on(TaxonomyEvent, fast_winner) + + try: + event = await bus.emit(TaxonomyEvent()) + await bus.wait_until_idle() + + slow_result = next(result for result in event.event_results.values() if result.handler_name.endswith('slow_loser')) + assert slow_result.status == 'error' + assert isinstance(slow_result.error, EventHandlerAbortedError) + assert isinstance(slow_result.error, asyncio.CancelledError) + finally: + await bus.stop() diff --git a/tests/test_eventbus_find.py b/tests/test_eventbus_find.py new file mode 100644 index 0000000..8dc7b9d --- /dev/null +++ b/tests/test_eventbus_find.py @@ -0,0 +1,1606 @@ +""" +Tests for the unified find() method and tree traversal helpers. + +Addresses GitHub Issues #10 (debouncing) and #15 (past + child_of lookup). +""" + +# pyright: reportUnknownMemberType=false +# pyright: reportUnknownLambdaType=false +# pyright: reportAttributeAccessIssue=false +# pyright: reportUnknownVariableType=false +# pyright: reportUnusedVariable=false + +import asyncio +from datetime import UTC, datetime + +import pytest + +from bubus import BaseEvent, EventBus + + +# Test event types +class ParentEvent(BaseEvent[str]): + pass + + +class ChildEvent(BaseEvent[str]): + pass + + +class GrandchildEvent(BaseEvent[str]): + pass + + +class UnrelatedEvent(BaseEvent[str]): + pass + + +class ScreenshotEvent(BaseEvent[str]): + """Example event for debouncing tests.""" + + target_id: str = '' + full_page: bool = False + + +class NavigateEvent(BaseEvent[str]): + """Example event for race condition tests.""" + + url: str = '' + + +class TabCreatedEvent(BaseEvent[str]): + """Example event that fires as result of navigation.""" + + tab_id: str = '' + + +class SystemEvent(BaseEvent[str]): + pass + + +class UserActionEvent(BaseEvent[str]): + action: str = '' + user_id: str = '' + + +class NumberedEvent(BaseEvent[str]): + value: int = 0 + + +TARGET_ID_1 = '9b447756-908c-7b75-8a51-4a2c2b4d9b14' +TARGET_ID_2 = '194870e1-fa02-70a4-8101-d10d57c3449c' +TARGET_ID_3 = '7d787f06-07fd-7406-8be7-0255fb41f459' +TARGET_ID_4 = 'a2c7f40b-a8a7-78b2-84ef-9f8c60c40a24' +TARGET_ID_CHILD = '12f38f3d-d8a7-7ae2-8778-bc27e285ea34' + + +# ============================================================================= +# Tree Traversal Helper Tests +# ============================================================================= + + +class TestEventIsChildOf: + """Tests for event_is_child_of() method.""" + + async def test_direct_child_returns_true(self): + """event_is_child_of returns True for direct parent-child relationship.""" + bus = EventBus() + + try: + # Create parent-child relationship via dispatch inside handler + child_event_ref: list[BaseEvent] = [] + + async def parent_handler(event: ParentEvent) -> str: + child = await bus.emit(ChildEvent()) + child_event_ref.append(child) + return 'parent_done' + + bus.on(ParentEvent, parent_handler) + bus.on(ChildEvent, lambda e: 'child_done') + + parent = await bus.emit(ParentEvent()) + await bus.wait_until_idle() + + child = child_event_ref[0] + + # Verify the relationship + assert bus.event_is_child_of(child, parent) is True + + finally: + await bus.stop(clear=True) + + async def test_grandchild_returns_true(self): + """event_is_child_of returns True for grandparent relationship.""" + bus = EventBus() + + try: + grandchild_ref: list[BaseEvent] = [] + + async def parent_handler(event: ParentEvent) -> str: + await bus.emit(ChildEvent()) + return 'parent_done' + + async def child_handler(event: ChildEvent) -> str: + grandchild = await bus.emit(GrandchildEvent()) + grandchild_ref.append(grandchild) + return 'child_done' + + bus.on(ParentEvent, parent_handler) + bus.on(ChildEvent, child_handler) + bus.on(GrandchildEvent, lambda e: 'grandchild_done') + + parent = await bus.emit(ParentEvent()) + await bus.wait_until_idle() + + grandchild = grandchild_ref[0] + + # Grandchild should be descendant of parent + assert bus.event_is_child_of(grandchild, parent) is True + + finally: + await bus.stop(clear=True) + + async def test_unrelated_events_returns_false(self): + """event_is_child_of returns False for unrelated events.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'parent_done') + bus.on(UnrelatedEvent, lambda e: 'unrelated_done') + + parent = await bus.emit(ParentEvent()) + unrelated = await bus.emit(UnrelatedEvent()) + + assert bus.event_is_child_of(unrelated, parent) is False + + finally: + await bus.stop(clear=True) + + async def test_same_event_returns_false(self): + """event_is_child_of returns False when checking event against itself.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + event = await bus.emit(ParentEvent()) + + assert bus.event_is_child_of(event, event) is False + + finally: + await bus.stop(clear=True) + + async def test_reversed_relationship_returns_false(self): + """event_is_child_of returns False when parent/child are reversed.""" + bus = EventBus() + + try: + child_ref: list[BaseEvent] = [] + + async def parent_handler(event: ParentEvent) -> str: + child = await bus.emit(ChildEvent()) + child_ref.append(child) + return 'parent_done' + + bus.on(ParentEvent, parent_handler) + bus.on(ChildEvent, lambda e: 'child_done') + + parent = await bus.emit(ParentEvent()) + await bus.wait_until_idle() + + child = child_ref[0] + + # Parent is NOT a child of child + assert bus.event_is_child_of(parent, child) is False + + finally: + await bus.stop(clear=True) + + +class TestEventIsParentOf: + """Tests for event_is_parent_of() method.""" + + async def test_direct_parent_returns_true(self): + """event_is_parent_of returns True for direct parent-child relationship.""" + bus = EventBus() + + try: + child_ref: list[BaseEvent] = [] + + async def parent_handler(event: ParentEvent) -> str: + child = await bus.emit(ChildEvent()) + child_ref.append(child) + return 'parent_done' + + bus.on(ParentEvent, parent_handler) + bus.on(ChildEvent, lambda e: 'child_done') + + parent = await bus.emit(ParentEvent()) + await bus.wait_until_idle() + + child = child_ref[0] + + # Parent IS parent of child + assert bus.event_is_parent_of(parent, child) is True + + finally: + await bus.stop(clear=True) + + async def test_grandparent_returns_true(self): + """event_is_parent_of returns True for grandparent relationship.""" + bus = EventBus() + + try: + grandchild_ref: list[BaseEvent] = [] + + async def parent_handler(event: ParentEvent) -> str: + await bus.emit(ChildEvent()) + return 'parent_done' + + async def child_handler(event: ChildEvent) -> str: + grandchild = await bus.emit(GrandchildEvent()) + grandchild_ref.append(grandchild) + return 'child_done' + + bus.on(ParentEvent, parent_handler) + bus.on(ChildEvent, child_handler) + bus.on(GrandchildEvent, lambda e: 'grandchild_done') + + parent = await bus.emit(ParentEvent()) + await bus.wait_until_idle() + + grandchild = grandchild_ref[0] + + # Parent IS ancestor of grandchild + assert bus.event_is_parent_of(parent, grandchild) is True + + finally: + await bus.stop(clear=True) + + +# ============================================================================= +# find() Basic Functionality Tests +# ============================================================================= + + +class TestFindPastOnly: + """Tests for find(past=True, future=False) history lookup behavior.""" + + async def test_max_history_zero_disables_past_but_future_still_works(self): + """With max_history_size=0, future find resolves on dispatch but completed events are not searchable in past.""" + bus = EventBus(max_history_size=0) + + try: + bus.on(ParentEvent, lambda e: 'done') + + find_future_task = asyncio.create_task(bus.find(ParentEvent, past=False, future=1)) + await asyncio.sleep(0) + + dispatched = bus.emit(ParentEvent()) + found_future = await find_future_task + assert found_future is not None + assert found_future.event_id == dispatched.event_id + + await dispatched + assert dispatched.event_id not in bus.event_history + + found_past = await bus.find(ParentEvent, past=True, future=False) + assert found_past is None + finally: + await bus.stop(clear=True) + + async def test_returns_matching_event_from_history(self): + """find(past=True, future=False) returns event from history.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch event first + dispatched = await bus.emit(ParentEvent()) + + # Find it in history (past=True = search all history) + found = await bus.find(ParentEvent, past=True, future=False) + + assert found is not None + assert found.event_id == dispatched.event_id + + finally: + await bus.stop(clear=True) + + async def test_history_lookup_is_bus_scoped(self): + """find(past=True, future=False) only searches this bus history.""" + bus_a = EventBus(name='FindScopeA') + bus_b = EventBus(name='FindScopeB') + + try: + bus_b.on(NumberedEvent, lambda e: 'done') + await bus_b.emit(NumberedEvent(value=10)) + + found_on_a = await bus_a.find(NumberedEvent, past=True, future=False) + found_on_b = await bus_b.find(NumberedEvent, past=True, future=False) + + assert found_on_a is None + assert found_on_b is not None + assert found_on_b.value == 10 + finally: + await bus_a.stop(clear=True) + await bus_b.stop(clear=True) + + async def test_found_event_retains_origin_bus_label(self): + """Events returned by find() keep the bus label in event_path.""" + bus = EventBus(name='FindBusRef') + + try: + bus.on(NumberedEvent, lambda e: 'done') + await bus.emit(NumberedEvent(value=7)) + + found = await bus.find(NumberedEvent, past=True, future=False) + assert found is not None + assert found.event_path + assert found.event_path[-1] == bus.label + finally: + await bus.stop(clear=True) + + async def test_past_float_filters_by_time_window(self): + """find(past=0.1) only returns events from last 0.1 seconds.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch an event + _old_event = await bus.emit(ParentEvent()) + + # Wait a bit + await asyncio.sleep(0.15) + + # Dispatch another event + new_event = await bus.emit(ParentEvent()) + + # With a very short past window, should only find the new event + found = await bus.find(ParentEvent, past=0.1, future=False) + assert found is not None + assert found.event_id == new_event.event_id + + # With a longer past window, should still find new event (most recent first) + found = await bus.find(ParentEvent, past=1.0, future=False) + assert found is not None + assert found.event_id == new_event.event_id + + finally: + await bus.stop(clear=True) + + async def test_past_float_returns_none_when_all_events_too_old(self): + """find(past=0.05) returns None if all events are older than 0.05 seconds.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch an event + await bus.emit(ParentEvent()) + + # Wait longer than our window + await asyncio.sleep(0.15) + + # With very short past window, should find nothing + found = await bus.find(ParentEvent, past=0.05, future=False) + assert found is None + + finally: + await bus.stop(clear=True) + + async def test_returns_none_when_no_match(self): + """find(past=True, future=False) returns None when no matching event.""" + bus = EventBus() + + try: + # No events dispatched + found = await bus.find(ParentEvent, past=True, future=False) + + assert found is None + + finally: + await bus.stop(clear=True) + + async def test_respects_where_filter(self): + """find() applies where filter correctly.""" + bus = EventBus() + + try: + bus.on(ScreenshotEvent, lambda e: 'done') + + # Dispatch two events with different target_ids + await bus.emit(ScreenshotEvent(target_id=TARGET_ID_1)) + event2 = await bus.emit(ScreenshotEvent(target_id=TARGET_ID_2)) + + # Find only the one with target_id='194870e1-fa02-70a4-8101-d10d57c3449c' + found = await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == TARGET_ID_2, + past=True, + future=False, + ) + + assert found is not None + assert found.event_id == event2.event_id + + finally: + await bus.stop(clear=True) + + async def test_returns_most_recent_match(self): + """find() returns most recent matching event from history.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch multiple events + await bus.emit(ParentEvent()) + await asyncio.sleep(0.01) # Ensure different timestamps + event2 = await bus.emit(ParentEvent()) + + # Should return the most recent + found = await bus.find(ParentEvent, past=True, future=False) + + assert found is not None + assert found.event_id == event2.event_id + + finally: + await bus.stop(clear=True) + + async def test_past_includes_in_progress_events(self): + """History search should include pending/started events, matching TS semantics.""" + bus = EventBus() + + try: + release_handler = asyncio.Event() + + async def slow_handler(event: ParentEvent) -> str: + await release_handler.wait() + return 'done' + + bus.on(ParentEvent, slow_handler) + + dispatched = bus.emit(ParentEvent()) + await asyncio.sleep(0.02) # Let handler start. + + found_while_running = await bus.find(ParentEvent, past=True, future=False) + assert found_while_running is not None + assert found_while_running.event_id == dispatched.event_id + assert found_while_running.event_status in ('pending', 'started') + + release_handler.set() + await dispatched + await bus.wait_until_idle() + + found_after_completion = await bus.find(ParentEvent, past=True, future=False) + assert found_after_completion is not None + assert found_after_completion.event_id == dispatched.event_id + finally: + await bus.stop(clear=True) + + async def test_find_default_is_past_only_no_future_wait(self): + """find() with no windows defaults to past=True, future=False.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + start = datetime.now(UTC) + found = await bus.find(ParentEvent) + elapsed = (datetime.now(UTC) - start).total_seconds() + + assert found is None + assert elapsed < 0.05 + finally: + await bus.stop(clear=True) + + async def test_find_supports_event_field_keyword_filters(self): + """find(..., **kwargs) applies metadata equality filters.""" + bus = EventBus() + + try: + release = asyncio.Event() + + async def slow_handler(event: ParentEvent) -> str: + await release.wait() + return 'done' + + bus.on(ParentEvent, slow_handler) + + in_flight = bus.emit(ParentEvent()) + await asyncio.sleep(0.02) + + pending_or_started = await bus.find(ParentEvent, past=True, future=False, event_status='started') + if pending_or_started is None: + pending_or_started = await bus.find(ParentEvent, past=True, future=False, event_status='pending') + + assert pending_or_started is not None + assert pending_or_started.event_id == in_flight.event_id + + release.set() + await in_flight + completed = await bus.find(ParentEvent, past=True, future=False, event_status='completed') + assert completed is not None + assert completed.event_id == in_flight.event_id + finally: + await bus.stop(clear=True) + + async def test_find_supports_event_id_and_event_timeout_filters(self): + """find(..., **kwargs) supports exact-match metadata equality filters.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + event_a = await bus.emit(ParentEvent(event_timeout=11)) + await bus.emit(ParentEvent(event_timeout=22)) + + found = await bus.find( + ParentEvent, + past=True, + future=False, + event_id=event_a.event_id, + event_timeout=11, + ) + assert found is not None + assert found.event_id == event_a.event_id + + mismatch = await bus.find( + ParentEvent, + past=True, + future=False, + event_id=event_a.event_id, + event_timeout=22, + ) + assert mismatch is None + finally: + await bus.stop(clear=True) + + async def test_find_supports_non_event_data_field_filters(self): + """find(..., **kwargs) supports exact-match filters for non event_* fields too.""" + bus = EventBus() + + try: + bus.on(UserActionEvent, lambda e: 'done') + + await bus.emit(UserActionEvent(action='logout', user_id='28536f9b-4031-7f53-827f-98c24c1b3839')) + expected = await bus.emit(UserActionEvent(action='login', user_id='b57fcb67-faeb-7a56-8907-116d8cbb1472')) + + found = await bus.find( + UserActionEvent, past=True, future=False, action='login', user_id='b57fcb67-faeb-7a56-8907-116d8cbb1472' + ) + assert found is not None + assert found.event_id == expected.event_id + + not_found = await bus.find(UserActionEvent, past=True, future=False, action='signup') + assert not_found is None + finally: + await bus.stop(clear=True) + + async def test_find_wildcard_with_where_filter_matches_history(self): + """find('*', where=..., past=True) matches across event types in history.""" + bus = EventBus() + + try: + bus.on(UserActionEvent, lambda e: 'done') + bus.on(SystemEvent, lambda e: 'done') + + expected = await bus.emit(UserActionEvent(action='login', user_id='b57fcb67-faeb-7a56-8907-116d8cbb1472')) + await bus.emit(SystemEvent()) + + found = await bus.find( + '*', + where=lambda event: ( + isinstance(event, UserActionEvent) and event.user_id == 'b57fcb67-faeb-7a56-8907-116d8cbb1472' + ), + past=True, + future=False, + ) + + assert found is not None + assert found.event_id == expected.event_id + assert found.event_type == 'UserActionEvent' + finally: + await bus.stop(clear=True) + + +class TestFindFutureOnly: + """Tests for find(past=False, future=...) future wait behavior.""" + + async def test_waits_for_future_event(self): + """find(past=False, future=1) waits for event to be dispatched.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Start waiting for event + async def dispatch_after_delay(): + await asyncio.sleep(0.05) + return await bus.emit(ParentEvent()) + + find_task = asyncio.create_task(bus.find(ParentEvent, past=False, future=1)) + dispatch_task = asyncio.create_task(dispatch_after_delay()) + + found, dispatched = await asyncio.gather(find_task, dispatch_task) + + assert found is not None + assert found.event_id == dispatched.event_id + + finally: + await bus.stop(clear=True) + + async def test_future_float_timeout(self): + """find(future=0.01) times out quickly when no event.""" + bus = EventBus() + + try: + start = datetime.now(UTC) + found = await bus.find(ParentEvent, past=False, future=0.01) + elapsed = (datetime.now(UTC) - start).total_seconds() + + assert found is None + assert elapsed < 0.1 # Should timeout quickly + + finally: + await bus.stop(clear=True) + + async def test_ignores_past_events(self): + """find(past=False, future=...) ignores events already in history.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch event first + await bus.emit(ParentEvent()) + + # Should NOT find it (past=False), and timeout quickly + found = await bus.find(ParentEvent, past=False, future=0.01) + + assert found is None + + finally: + await bus.stop(clear=True) + + async def test_ignores_inflight_events_dispatched_before_find(self): + """find(past=False, future=...) ignores already-dispatched in-flight events.""" + bus = EventBus() + + try: + release = asyncio.Event() + + async def slow_handler(event: ParentEvent) -> str: + await release.wait() + return 'done' + + bus.on(ParentEvent, slow_handler) + + in_flight = bus.emit(ParentEvent()) + await asyncio.sleep(0.01) + + found = await bus.find(ParentEvent, past=False, future=0.05) + assert found is None + + release.set() + await in_flight + finally: + await bus.stop(clear=True) + + async def test_future_works_with_string_event_type(self): + """find('EventName', ...) resolves using string keys, not just model classes.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + async def dispatch_after_delay(): + await asyncio.sleep(0.05) + return await bus.emit(ParentEvent()) + + find_task = asyncio.create_task(bus.find('ParentEvent', past=False, future=1)) + dispatch_task = asyncio.create_task(dispatch_after_delay()) + + found, dispatched = await asyncio.gather(find_task, dispatch_task) + + assert found is not None + assert found.event_id == dispatched.event_id + assert found.event_type == 'ParentEvent' + finally: + await bus.stop(clear=True) + + async def test_find_wildcard_with_where_filter_waits_for_future_match(self): + """find('*', where=..., past=False) waits for matching future event only.""" + bus = EventBus() + + try: + bus.on(SystemEvent, lambda e: 'done') + bus.on(UserActionEvent, lambda e: 'done') + + find_task = asyncio.create_task( + bus.find( + '*', + where=lambda event: event.event_type == 'UserActionEvent' and getattr(event, 'action', None) == 'special', + past=False, + future=0.3, + ) + ) + + await asyncio.sleep(0.02) + await bus.emit(SystemEvent()) + await bus.emit(UserActionEvent(action='normal', user_id='16ced2b3-de40-7d9b-85c8-c02241a00354')) + expected = await bus.emit(UserActionEvent(action='special', user_id='391ce6ed-aa72-73d6-87c4-5e20f3c6fc63')) + + found = await find_task + assert found is not None + assert found.event_id == expected.event_id + assert found.event_type == 'UserActionEvent' + finally: + await bus.stop(clear=True) + + async def test_future_class_pattern_matches_generic_base_event_by_event_type(self): + """find(SomeEventClass) should match BaseEvent(event_type='SomeEventClass').""" + bus = EventBus() + + try: + + class DifferentNameFromClass(BaseEvent[str]): + pass + + bus.on('DifferentNameFromClass', lambda e: 'done') + + async def dispatch_after_delay(): + await asyncio.sleep(0.05) + return await bus.emit(BaseEvent(event_type='DifferentNameFromClass')) + + find_task = asyncio.create_task(bus.find(DifferentNameFromClass, past=False, future=1)) + dispatch_task = asyncio.create_task(dispatch_after_delay()) + + found, dispatched = await asyncio.gather(find_task, dispatch_task) + + assert found is not None + assert found.event_id == dispatched.event_id + assert found.event_type == 'DifferentNameFromClass' + finally: + await bus.stop(clear=True) + + async def test_multiple_concurrent_find_waiters_resolve_correct_events(self): + """Concurrent find() waiters should each resolve to the correct event.""" + bus = EventBus() + + try: + # Keep one permanent handler so we can assert temporary find handlers are cleaned up. + bus.on(ScreenshotEvent, lambda e: 'done') + baseline_handler_count = len(bus.handlers_by_key.get('ScreenshotEvent', [])) + + wait_for_a = asyncio.create_task( + bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == TARGET_ID_3, + past=False, + future=1, + ) + ) + wait_for_b = asyncio.create_task( + bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == TARGET_ID_4, + past=False, + future=1, + ) + ) + + await asyncio.sleep(0.02) + event_a = await bus.emit(ScreenshotEvent(target_id=TARGET_ID_3)) + event_b = await bus.emit(ScreenshotEvent(target_id=TARGET_ID_4)) + + found_a, found_b = await asyncio.gather(wait_for_a, wait_for_b) + + assert found_a is not None + assert found_b is not None + assert found_a.event_id == event_a.event_id + assert found_b.event_id == event_b.event_id + + # All temporary find handlers should be removed. + assert len(bus.handlers_by_key.get('ScreenshotEvent', [])) == baseline_handler_count + finally: + await bus.stop(clear=True) + + async def test_find_future_resolves_before_handlers_complete(self): + """find(future=...) resolves on dispatch, before slow handlers complete.""" + bus = EventBus() + + try: + processing_complete = False + + async def slow_handler(event: ParentEvent) -> str: + nonlocal processing_complete + await asyncio.sleep(0.1) + processing_complete = True + return 'done' + + bus.on(ParentEvent, slow_handler) + + find_task = asyncio.create_task(bus.find(ParentEvent, past=False, future=1)) + await asyncio.sleep(0.01) + + dispatched = bus.emit(ParentEvent()) + found = await find_task + + assert found is not None + assert found.event_id == dispatched.event_id + assert processing_complete is False + assert found.event_status in ('pending', 'started') + + await bus.wait_until_idle() + assert processing_complete is True + finally: + await bus.stop(clear=True) + + async def test_find_returns_coroutine_that_can_be_awaited_later(self): + """A started find(...) coroutine should resolve later after dispatch.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + find_task = asyncio.create_task( + bus.find( + ParentEvent, + where=lambda e: e.event_type == 'ParentEvent', + past=False, + future=1, + ) + ) + + await asyncio.sleep(0.05) + dispatched = await bus.emit(ParentEvent()) + + found = await find_task + assert found is not None + assert found.event_id == dispatched.event_id + finally: + await bus.stop(clear=True) + + +class TestFindNeitherPastNorFuture: + """Tests for find(past=False, future=False) - should return None.""" + + async def test_returns_none_immediately(self): + """find(past=False, future=False) returns None immediately.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch event + await bus.emit(ParentEvent()) + + # With both past and future disabled, should return None + start = datetime.now(UTC) + found = await bus.find(ParentEvent, past=False, future=False) + elapsed = (datetime.now(UTC) - start).total_seconds() + + assert found is None + assert elapsed < 0.1 # Should be instant + + finally: + await bus.stop(clear=True) + + +class TestFindPastAndFuture: + """Tests for find(past=..., future=...) - combined search.""" + + async def test_returns_past_event_immediately(self): + """find(past=True, future=5) returns past event without waiting.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch event first + dispatched = await bus.emit(ParentEvent()) + + # Should find it immediately from history + start = datetime.now(UTC) + found = await bus.find(ParentEvent, past=True, future=5) + elapsed = (datetime.now(UTC) - start).total_seconds() + + assert found is not None + assert found.event_id == dispatched.event_id + assert elapsed < 0.1 # Should be nearly instant + + finally: + await bus.stop(clear=True) + + async def test_waits_for_future_when_no_past_match(self): + """find(past=True, future=1) waits for future if no past match.""" + bus = EventBus() + + try: + bus.on(ChildEvent, lambda e: 'done') + + # Different event type in history + bus.on(ParentEvent, lambda e: 'done') + await bus.emit(ParentEvent()) + + # Start waiting for ChildEvent (not in history) + async def dispatch_after_delay(): + await asyncio.sleep(0.05) + return await bus.emit(ChildEvent()) + + find_task = asyncio.create_task(bus.find(ChildEvent, past=True, future=1)) + dispatch_task = asyncio.create_task(dispatch_after_delay()) + + found, dispatched = await asyncio.gather(find_task, dispatch_task) + + assert found is not None + assert found.event_id == dispatched.event_id + + finally: + await bus.stop(clear=True) + + async def test_past_and_future_independent_control(self): + """past=0.05, future=0.05 uses different windows for each.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch an old event + await bus.emit(ParentEvent()) + await asyncio.sleep(0.15) + + # With short past window (0.05s), old event won't be found + # With short future window (0.05s), will timeout + start = datetime.now(UTC) + found = await bus.find(ParentEvent, past=0.05, future=0.05) + elapsed = (datetime.now(UTC) - start).total_seconds() + + assert found is None + # Should have waited ~0.05s for future + assert 0.04 < elapsed < 0.15 + + finally: + await bus.stop(clear=True) + + async def test_past_true_future_float(self): + """past=True searches all history, future=0.1 waits up to 0.1s.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch an old event + dispatched = await bus.emit(ParentEvent()) + await asyncio.sleep(0.15) + + # past=True should find the old event (no time window) + found = await bus.find(ParentEvent, past=True, future=0.1) + + assert found is not None + assert found.event_id == dispatched.event_id + + finally: + await bus.stop(clear=True) + + async def test_past_float_future_true_would_wait_forever(self): + """past=0.05 with old events + future=True - verify past window works.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch an old event + await bus.emit(ParentEvent()) + await asyncio.sleep(0.15) + + # past=0.05 won't find old event, but we dispatch a new one + async def dispatch_after_delay(): + await asyncio.sleep(0.05) + return await bus.emit(ParentEvent()) + + find_task = asyncio.create_task(bus.find(ParentEvent, past=0.05, future=1)) + dispatch_task = asyncio.create_task(dispatch_after_delay()) + + found, dispatched = await asyncio.gather(find_task, dispatch_task) + + # Should find the new event from future wait + assert found is not None + assert found.event_id == dispatched.event_id + + finally: + await bus.stop(clear=True) + + async def test_most_recent_wins_across_completed_and_inflight(self): + """find(past=True, future=True) returns newest event even when it is in-flight.""" + bus = EventBus() + + try: + release = asyncio.Event() + + async def numbered_handler(event: NumberedEvent) -> str: + if event.value == 2: + await release.wait() + return f'handled-{event.value}' + + bus.on(NumberedEvent, numbered_handler) + + await bus.emit(NumberedEvent(value=1)) + in_flight = bus.emit(NumberedEvent(value=2)) + await asyncio.sleep(0.01) + + found = await bus.find(NumberedEvent, past=True, future=True) + assert found is not None + assert found.event_id == in_flight.event_id + assert found.event_status in ('pending', 'started') + + release.set() + await in_flight + finally: + await bus.stop(clear=True) + + +# ============================================================================= +# find() with child_of Tests +# ============================================================================= + + +class TestFindWithChildOf: + """Tests for find() with child_of parameter.""" + + async def test_returns_child_of_specified_parent(self): + """find(child_of=parent) returns event that is child of parent.""" + bus = EventBus() + + try: + child_ref: list[BaseEvent] = [] + + async def parent_handler(event: ParentEvent) -> str: + child = await bus.emit(ChildEvent()) + child_ref.append(child) + return 'parent_done' + + bus.on(ParentEvent, parent_handler) + bus.on(ChildEvent, lambda e: 'child_done') + + parent = await bus.emit(ParentEvent()) + await bus.wait_until_idle() + + # Find child of parent + found = await bus.find(ChildEvent, child_of=parent, past=True, future=False) + + assert found is not None + assert found.event_id == child_ref[0].event_id + + finally: + await bus.stop(clear=True) + + async def test_returns_none_for_non_child(self): + """find(child_of=parent) returns None if event is not a child.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'parent_done') + bus.on(UnrelatedEvent, lambda e: 'unrelated_done') + + parent = await bus.emit(ParentEvent()) + await bus.emit(UnrelatedEvent()) + + # Should not find UnrelatedEvent as child of parent + found = await bus.find(UnrelatedEvent, child_of=parent, past=True, future=False) + + assert found is None + + finally: + await bus.stop(clear=True) + + async def test_finds_grandchild(self): + """find(child_of=grandparent) returns grandchild event.""" + bus = EventBus() + + try: + grandchild_ref: list[BaseEvent] = [] + + async def parent_handler(event: ParentEvent) -> str: + await bus.emit(ChildEvent()) + return 'parent_done' + + async def child_handler(event: ChildEvent) -> str: + grandchild = await bus.emit(GrandchildEvent()) + grandchild_ref.append(grandchild) + return 'child_done' + + bus.on(ParentEvent, parent_handler) + bus.on(ChildEvent, child_handler) + bus.on(GrandchildEvent, lambda e: 'grandchild_done') + + parent = await bus.emit(ParentEvent()) + await bus.wait_until_idle() + + # Find grandchild of parent + found = await bus.find(GrandchildEvent, child_of=parent, past=True, future=False) + + assert found is not None + assert found.event_id == grandchild_ref[0].event_id + + finally: + await bus.stop(clear=True) + + async def test_child_of_works_across_forwarded_buses(self): + """find(child_of=parent) works when events are forwarded across buses.""" + main_bus = EventBus(name='MainBus') + auth_bus = EventBus(name='AuthBus') + + try: + child_ref: list[BaseEvent] = [] + + # Forward ParentEvent from main_bus to auth_bus + main_bus.on(ParentEvent, auth_bus.emit) + + # auth_bus handles ParentEvent and dispatches a ChildEvent + async def auth_handler(event: ParentEvent) -> str: + child = await auth_bus.emit(ChildEvent()) + child_ref.append(child) + return 'auth_done' + + auth_bus.on(ParentEvent, auth_handler) + auth_bus.on(ChildEvent, lambda e: 'child_done') + + # Dispatch on main_bus, which forwards to auth_bus + parent = await main_bus.emit(ParentEvent()) + await main_bus.wait_until_idle() + await auth_bus.wait_until_idle() + + # Find child event on auth_bus using parent from main_bus + found = await auth_bus.find(ChildEvent, child_of=parent, past=5, future=5) + + assert found is not None + assert found.event_id == child_ref[0].event_id + + finally: + await main_bus.stop(clear=True) + await auth_bus.stop(clear=True) + + async def test_future_wait_with_child_of(self): + """find(child_of=..., past=False, future=...) waits for future matching child.""" + bus = EventBus() + + try: + + async def parent_handler(event: ParentEvent) -> str: + await asyncio.sleep(0.03) + await bus.emit(ChildEvent()) + return 'parent_done' + + bus.on(ParentEvent, parent_handler) + bus.on(ChildEvent, lambda e: 'child_done') + + parent = bus.emit(ParentEvent()) + + found = await bus.find( + ChildEvent, + child_of=parent, + past=False, + future=0.3, + ) + assert found is not None + assert found.event_parent_id == parent.event_id + + await parent + finally: + await bus.stop(clear=True) + + +# ============================================================================= +# find() coverage for historical lookup/wait patterns +# ============================================================================= + + +class TestFindLegacyPatternCoverage: + """Tests that find() covers all historical lookup/wait patterns.""" + + async def test_find_waits_for_future_event(self): + """find(past=False, future=...) waits for future events.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + async def dispatch_after_delay(): + await asyncio.sleep(0.05) + return await bus.emit(ParentEvent()) + + find_task = asyncio.create_task(bus.find(ParentEvent, past=False, future=1)) + dispatch_task = asyncio.create_task(dispatch_after_delay()) + + found, dispatched = await asyncio.gather(find_task, dispatch_task) + + assert found is not None + assert found.event_id == dispatched.event_id + + finally: + await bus.stop(clear=True) + + async def test_find_with_include_style_filter(self): + """find(where=...) supports include-style filters.""" + bus = EventBus() + + try: + bus.on(ScreenshotEvent, lambda e: 'done') + + async def dispatch_events(): + await asyncio.sleep(0.02) + await bus.emit(ScreenshotEvent(target_id='32b90140-a7ee-7ae7-830c-71a099e93cb3')) + await asyncio.sleep(0.02) + return await bus.emit(ScreenshotEvent(target_id='519664bf-c9fa-7654-896b-fb0cc5b6adab')) + + find_task = asyncio.create_task( + bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == '519664bf-c9fa-7654-896b-fb0cc5b6adab', + past=False, + future=1, + ) + ) + dispatch_task = asyncio.create_task(dispatch_events()) + + found, dispatched = await asyncio.gather(find_task, dispatch_task) + + assert found is not None + assert found.target_id == '519664bf-c9fa-7654-896b-fb0cc5b6adab' + + finally: + await bus.stop(clear=True) + + async def test_find_with_exclude_style_filter(self): + """find(where=...) supports exclude-style filters.""" + bus = EventBus() + + try: + bus.on(ScreenshotEvent, lambda e: 'done') + + async def dispatch_events(): + await asyncio.sleep(0.02) + await bus.emit(ScreenshotEvent(target_id='1556eff9-dea5-78ae-8219-7bb92f787370')) + await asyncio.sleep(0.02) + return await bus.emit(ScreenshotEvent(target_id='45c2761f-3475-72aa-8dd8-b3cf4a4923e2')) + + find_task = asyncio.create_task( + bus.find( + ScreenshotEvent, + where=lambda e: e.target_id != '1556eff9-dea5-78ae-8219-7bb92f787370', + past=False, + future=1, + ) + ) + dispatch_task = asyncio.create_task(dispatch_events()) + + found, dispatched = await asyncio.gather(find_task, dispatch_task) + + assert found is not None + assert found.target_id == '45c2761f-3475-72aa-8dd8-b3cf4a4923e2' + + finally: + await bus.stop(clear=True) + + async def test_find_with_past_true_and_future_timeout(self): + """find(past=True, future=...) finds already-dispatched events.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch event first + dispatched = await bus.emit(ParentEvent()) + + found = await bus.find(ParentEvent, past=True, future=5) + + assert found is not None + assert found.event_id == dispatched.event_id + + finally: + await bus.stop(clear=True) + + async def test_find_with_past_float_and_future_timeout(self): + """find(past=5.0, future=...) searches recent history first.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch event first + dispatched = await bus.emit(ParentEvent()) + + found = await bus.find(ParentEvent, past=5.0, future=1) + + assert found is not None + assert found.event_id == dispatched.event_id + + finally: + await bus.stop(clear=True) + + async def test_find_with_child_of_and_future_timeout(self): + """find(child_of=parent) filters by parent relationship.""" + bus = EventBus() + + try: + child_ref: list[BaseEvent] = [] + + async def parent_handler(event: ParentEvent) -> str: + child = await bus.emit(ChildEvent()) + child_ref.append(child) + return 'parent_done' + + bus.on(ParentEvent, parent_handler) + bus.on(ChildEvent, lambda e: 'child_done') + + parent = await bus.emit(ParentEvent()) + await bus.wait_until_idle() + + found = await bus.find(ChildEvent, child_of=parent, past=True, future=5) + + assert found is not None + assert found.event_id == child_ref[0].event_id + + finally: + await bus.stop(clear=True) + + +# ============================================================================= +# Race Condition Fix Tests (Issue #15) +# ============================================================================= + + +class TestRaceConditionFix: + """Tests for race conditions where events fire before lookup starts.""" + + async def test_find_catches_already_fired_event(self): + """find(past=True) catches event that fired before the call.""" + bus = EventBus() + + try: + tab_ref: list[BaseEvent] = [] + + async def navigate_handler(event: NavigateEvent) -> str: + # This synchronously creates the tab event + tab = await bus.emit(TabCreatedEvent(tab_id='06bee4cf-9f51-7e5d-82d3-65f35169329c')) + tab_ref.append(tab) + return 'navigate_done' + + bus.on(NavigateEvent, navigate_handler) + bus.on(TabCreatedEvent, lambda e: 'tab_created') + + # Dispatch navigation - tab event fires during handler + nav_event = await bus.emit(NavigateEvent(url='https://example.com')) + + # By now TabCreatedEvent has already fired + # Using find(past=True) should catch it + found = await bus.find(TabCreatedEvent, child_of=nav_event, past=True, future=False) + + assert found is not None + assert found.event_id == tab_ref[0].event_id + + finally: + await bus.stop(clear=True) + + async def test_child_of_filters_to_correct_parent(self): + """child_of correctly filters to events from the right parent.""" + bus = EventBus() + + try: + + async def navigate_handler(event: NavigateEvent) -> str: + await bus.emit(TabCreatedEvent(tab_id=f'tab_for_{event.url}')) + return 'navigate_done' + + bus.on(NavigateEvent, navigate_handler) + bus.on(TabCreatedEvent, lambda e: 'tab_created') + + # Two navigations, each creates a tab + nav1 = await bus.emit(NavigateEvent(url='site1')) + nav2 = await bus.emit(NavigateEvent(url='site2')) + + # Find tab created by nav1 specifically + tab1 = await bus.find(TabCreatedEvent, child_of=nav1, past=True, future=False) + + # Find tab created by nav2 specifically + tab2 = await bus.find(TabCreatedEvent, child_of=nav2, past=True, future=False) + + assert tab1 is not None + assert tab2 is not None + assert tab1.tab_id == 'tab_for_site1' + assert tab2.tab_id == 'tab_for_site2' + + finally: + await bus.stop(clear=True) + + +# ============================================================================= +# New Parameter Combination Tests +# ============================================================================= + + +class TestNewParameterCombinations: + """Tests for the new bool | float parameter combinations.""" + + async def test_past_true_future_false_searches_all_history(self): + """past=True, future=False searches all history instantly.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch event and wait + dispatched = await bus.emit(ParentEvent()) + await asyncio.sleep(0.1) + + # Should find old event with past=True + found = await bus.find(ParentEvent, past=True, future=False) + assert found is not None + assert found.event_id == dispatched.event_id + + finally: + await bus.stop(clear=True) + + async def test_past_float_future_false_filters_by_age(self): + """past=0.05, future=False only searches last 0.05 seconds.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch event + await bus.emit(ParentEvent()) + await asyncio.sleep(0.1) # Make it old + + # past=0.05 means "events in last 0.05 seconds" = nothing old + found = await bus.find(ParentEvent, past=0.05, future=False) + assert found is None + + finally: + await bus.stop(clear=True) + + async def test_past_false_future_float_waits_for_timeout(self): + """past=False, future=0.05 waits up to 0.05 seconds.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + start = datetime.now(UTC) + found = await bus.find(ParentEvent, past=False, future=0.05) + elapsed = (datetime.now(UTC) - start).total_seconds() + + assert found is None + assert 0.04 < elapsed < 0.15 # Should wait ~0.05s + + finally: + await bus.stop(clear=True) + + async def test_past_true_future_true_searches_all_and_waits_forever(self): + """past=True, future=True searches all history, would wait forever.""" + bus = EventBus() + + try: + bus.on(ParentEvent, lambda e: 'done') + + # Dispatch an old event + dispatched = await bus.emit(ParentEvent()) + await asyncio.sleep(0.1) + + # past=True should find the old event immediately + start = datetime.now(UTC) + found = await bus.find(ParentEvent, past=True, future=True) + elapsed = (datetime.now(UTC) - start).total_seconds() + + assert found is not None + assert found.event_id == dispatched.event_id + assert elapsed < 0.1 # Should be instant (found in past) + + finally: + await bus.stop(clear=True) + + async def test_find_with_where_and_past_float(self): + """where filter combined with past=float works correctly.""" + bus = EventBus() + + try: + bus.on(ScreenshotEvent, lambda e: 'done') + + # Dispatch events with different target_ids + await bus.emit(ScreenshotEvent(target_id=TARGET_ID_1)) + await asyncio.sleep(0.15) + event2 = await bus.emit(ScreenshotEvent(target_id=TARGET_ID_2)) + + # Find with both where filter and past window + found = await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == TARGET_ID_2, + past=0.1, # Only search last 0.1 seconds + future=False, + ) + assert found is not None + assert found.event_id == event2.event_id + + # tab1 is too old for the past window + found = await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == TARGET_ID_1, + past=0.1, + future=False, + ) + assert found is None + + finally: + await bus.stop(clear=True) + + async def test_find_with_child_of_and_past_float(self): + """child_of filter combined with past=float works correctly.""" + bus = EventBus() + + try: + child_ref: list[BaseEvent] = [] + + async def parent_handler(event: ParentEvent) -> str: + child = await bus.emit(ChildEvent()) + child_ref.append(child) + return 'done' + + bus.on(ParentEvent, parent_handler) + bus.on(ChildEvent, lambda e: 'done') + + parent = await bus.emit(ParentEvent()) + await bus.wait_until_idle() + + # Find child with past window - should work since event is fresh + found = await bus.find( + ChildEvent, + child_of=parent, + past=5, # 5 second window + future=False, + ) + assert found is not None + assert found.event_id == child_ref[0].event_id + + finally: + await bus.stop(clear=True) + + async def test_find_with_all_parameters(self): + """All parameters combined work correctly.""" + bus = EventBus() + + try: + child_ref: list[BaseEvent] = [] + + async def parent_handler(event: ParentEvent) -> str: + child = await bus.emit(ScreenshotEvent(target_id=TARGET_ID_CHILD)) + child_ref.append(child) + return 'done' + + bus.on(ParentEvent, parent_handler) + bus.on(ScreenshotEvent, lambda e: 'done') + + parent = await bus.emit(ParentEvent()) + await bus.wait_until_idle() + + # Find with all parameters + found = await bus.find( + ScreenshotEvent, + where=lambda e: e.target_id == TARGET_ID_CHILD, + child_of=parent, + past=5, + future=False, + ) + assert found is not None + assert found.event_id == child_ref[0].event_id + assert found.target_id == TARGET_ID_CHILD + + finally: + await bus.stop(clear=True) + + +if __name__ == '__main__': + pytest.main([__file__, '-v', '-s']) diff --git a/tests/test_eventbus_forwarding.py b/tests/test_eventbus_forwarding.py new file mode 100644 index 0000000..6989152 --- /dev/null +++ b/tests/test_eventbus_forwarding.py @@ -0,0 +1,222 @@ +import asyncio + +import pytest + +from bubus import BaseEvent, EventBus, EventHandlerConcurrencyMode + + +class RelayEvent(BaseEvent[str]): + """Minimal event used for forwarding completion race regression coverage.""" + + +class SelfParentForwardEvent(BaseEvent[str]): + """Event used to guard against self-parent cycles during forwarding.""" + + +class ForwardedDefaultsTriggerEvent(BaseEvent[None]): + """Event that emits forwarded children to validate per-bus default resolution.""" + + +class ForwardedDefaultsChildEvent(BaseEvent[str]): + """Forwarded child event used to validate local-default vs explicit-override behavior.""" + + mode: str + + +class ForwardedFirstDefaultsEvent(BaseEvent[str]): + """Forwarded event used to validate first-mode behavior against processing-bus defaults.""" + + +def _dump_bus_state(buses: list[EventBus]) -> str: + lines: list[str] = [] + for bus in buses: + queue_size = bus.pending_event_queue.qsize() if bus.pending_event_queue else 0 + lines.append( + f'{bus.label} queue={queue_size} active={len(bus.in_flight_event_ids)} ' + f'processing={len(bus.processing_event_ids)} history={len(bus.event_history)}' + ) + for bus in buses: + lines.append(f'--- {bus.label}.log_tree() ---') + lines.append(bus.log_tree()) + return '\n'.join(lines) + + +@pytest.mark.asyncio +async def test_forwarded_event_does_not_leave_stale_active_ids(): + """ + Regression test for the original forwarding completion race: + an event could be marked completed while another bus still retained its + event_id in in_flight_event_ids, causing wait_until_idle() to hang. + """ + peer1 = EventBus(name='RacePeer1') + peer2 = EventBus(name='RacePeer2') + peer3 = EventBus(name='RacePeer3') + buses = [peer1, peer2, peer3] + + async def local_handler(_event: BaseEvent[str]) -> str: + return 'ok' + + peer1.on('*', local_handler) + peer2.on('*', local_handler) + peer3.on('*', local_handler) + + # Circular forwarding: peer1 -> peer2 -> peer3 -> peer1 + peer1.on('*', peer2.emit) + peer2.on('*', peer3.emit) + peer3.on('*', peer1.emit) + + async def wait_all_idle(timeout: float = 5.0) -> None: + for bus in buses: + await asyncio.wait_for(bus.wait_until_idle(), timeout=timeout) + + try: + # Warm-up propagation (this setup made the original bug deterministic on + # the immediately-following dispatch from peer2). + peer1.emit(RelayEvent()) + await asyncio.sleep(0.2) + await wait_all_idle() + + second = peer2.emit(RelayEvent()) + await asyncio.sleep(0.2) + try: + await wait_all_idle() + except TimeoutError: + pytest.fail(f'Forwarding completion race left bus(es) non-idle.\n{_dump_bus_state(buses)}') + + assert second.event_status == 'completed' + for bus in buses: + assert second.event_id not in bus.in_flight_event_ids + assert second.event_id not in bus.processing_event_ids + + finally: + await peer1.stop(clear=True) + await peer2.stop(clear=True) + await peer3.stop(clear=True) + + +@pytest.mark.asyncio +async def test_forwarding_same_event_does_not_set_self_parent_id(): + origin = EventBus(name='SelfParentOrigin') + target = EventBus(name='SelfParentTarget') + + async def on_origin(_event: SelfParentForwardEvent) -> str: + return 'origin-ok' + + async def on_target(_event: SelfParentForwardEvent) -> str: + return 'target-ok' + + origin.on(SelfParentForwardEvent, on_origin) + target.on(SelfParentForwardEvent, on_target) + origin.on('*', target.emit) + + try: + event = origin.emit(SelfParentForwardEvent()) + await event + await asyncio.gather(origin.wait_until_idle(), target.wait_until_idle()) + + assert event.event_parent_id is None + assert event.event_path == [origin.label, target.label] + finally: + await origin.stop(clear=True) + await target.stop(clear=True) + + +@pytest.mark.asyncio +async def test_forwarded_event_uses_processing_bus_defaults_unless_overridden(): + bus_a = EventBus(name='ForwardDefaultsA', event_handler_concurrency='serial') + bus_b = EventBus(name='ForwardDefaultsB', event_handler_concurrency='parallel') + log: list[str] = [] + + async def handler_1(event: ForwardedDefaultsChildEvent) -> str: + log.append(f'{event.mode}:b1_start') + await asyncio.sleep(0.015) + log.append(f'{event.mode}:b1_end') + return 'b1' + + async def handler_2(event: ForwardedDefaultsChildEvent) -> str: + log.append(f'{event.mode}:b2_start') + await asyncio.sleep(0.005) + log.append(f'{event.mode}:b2_end') + return 'b2' + + async def trigger(event: ForwardedDefaultsTriggerEvent) -> None: + assert event.event_bus is not None + inherited = event.event_bus.emit(ForwardedDefaultsChildEvent(mode='inherited', event_timeout=None)) + bus_b.emit(inherited) + await inherited + + overridden = event.event_bus.emit( + ForwardedDefaultsChildEvent( + mode='override', + event_timeout=None, + event_handler_concurrency=EventHandlerConcurrencyMode.SERIAL, + ) + ) + bus_b.emit(overridden) + await overridden + + bus_b.on(ForwardedDefaultsChildEvent, handler_1) + bus_b.on(ForwardedDefaultsChildEvent, handler_2) + bus_a.on(ForwardedDefaultsTriggerEvent, trigger) + + try: + top = bus_a.emit(ForwardedDefaultsTriggerEvent()) + await top + await asyncio.gather(bus_a.wait_until_idle(), bus_b.wait_until_idle()) + + inherited_b1_end = log.index('inherited:b1_end') + inherited_b2_start = log.index('inherited:b2_start') + assert inherited_b2_start < inherited_b1_end, ( + f'inherited defaults should use bus_b parallel handler concurrency; got log: {log}' + ) + + override_b1_end = log.index('override:b1_end') + override_b2_start = log.index('override:b2_start') + assert override_b1_end < override_b2_start, ( + f'explicit event override should force serial handler concurrency; got log: {log}' + ) + finally: + await bus_a.stop(clear=True) + await bus_b.stop(clear=True) + + +@pytest.mark.asyncio +async def test_forwarded_first_mode_uses_processing_bus_handler_concurrency_defaults(): + bus_a = EventBus( + name='ForwardedFirstDefaultsA', + event_handler_concurrency='serial', + event_handler_completion='all', + ) + bus_b = EventBus( + name='ForwardedFirstDefaultsB', + event_handler_concurrency='parallel', + event_handler_completion='first', + ) + log: list[str] = [] + + async def slow_handler(_event: ForwardedFirstDefaultsEvent) -> str: + log.append('slow_start') + await asyncio.sleep(0.02) + log.append('slow_end') + return 'slow' + + async def fast_handler(_event: ForwardedFirstDefaultsEvent) -> str: + log.append('fast_start') + await asyncio.sleep(0.001) + log.append('fast_end') + return 'fast' + + bus_a.on('*', bus_b.emit) + bus_b.on(ForwardedFirstDefaultsEvent, slow_handler) + bus_b.on(ForwardedFirstDefaultsEvent, fast_handler) + + try: + result = await bus_a.emit(ForwardedFirstDefaultsEvent(event_timeout=None)).first() + await asyncio.gather(bus_a.wait_until_idle(), bus_b.wait_until_idle()) + + assert result == 'fast', f'first-mode on processing bus should pick fast handler; got result={result!r} log={log}' + assert 'slow_start' in log, f'slow handler should start under parallel first-mode; log={log}' + assert 'fast_start' in log, f'fast handler should start under parallel first-mode; log={log}' + finally: + await bus_a.stop(clear=True) + await bus_b.stop(clear=True) diff --git a/tests/test_eventbus_locking.py b/tests/test_eventbus_locking.py new file mode 100644 index 0000000..b84d0c5 --- /dev/null +++ b/tests/test_eventbus_locking.py @@ -0,0 +1,339 @@ +import asyncio + +import pytest + +from bubus import BaseEvent, EventBus, EventConcurrencyMode +from bubus.retry import retry + + +class GlobalSerialEvent(BaseEvent[str]): + order: int = 0 + source: str = 'a' + + +class PerBusSerialEvent(BaseEvent[str]): + order: int = 0 + source: str = 'a' + + +class ParallelEvent(BaseEvent[str]): + order: int = 0 + + +class ParallelHandlerEvent(BaseEvent[str]): + pass + + +class OverrideParallelEvent(BaseEvent[str]): + order: int = 0 + event_concurrency: EventConcurrencyMode | None = EventConcurrencyMode.PARALLEL + + +class OverrideSerialEvent(BaseEvent[str]): + order: int = 0 + event_concurrency: EventConcurrencyMode | None = EventConcurrencyMode.BUS_SERIAL + + +class ParentEvent(BaseEvent[str]): + pass + + +class ChildEvent(BaseEvent[str]): + pass + + +class SiblingEvent(BaseEvent[str]): + pass + + +class HandlerLockEvent(BaseEvent[str]): + order: int = 0 + source: str = 'a' + + +@pytest.mark.asyncio +async def test_event_concurrency_global_serial_allows_only_one_inflight_across_buses() -> None: + bus_a = EventBus(name='GlobalSerialA', event_concurrency='global-serial') + bus_b = EventBus(name='GlobalSerialB', event_concurrency='global-serial') + in_flight = 0 + max_in_flight = 0 + starts: list[str] = [] + + async def handler(event: GlobalSerialEvent) -> None: + nonlocal in_flight, max_in_flight + in_flight += 1 + max_in_flight = max(max_in_flight, in_flight) + starts.append(f'{event.source}:{event.order}') + await asyncio.sleep(0.01) + in_flight -= 1 + + bus_a.on(GlobalSerialEvent, handler) + bus_b.on(GlobalSerialEvent, handler) + + try: + for i in range(3): + bus_a.emit(GlobalSerialEvent(order=i, source='a')) + bus_b.emit(GlobalSerialEvent(order=i, source='b')) + + await asyncio.gather(bus_a.wait_until_idle(), bus_b.wait_until_idle()) + + starts_a = [int(value.split(':')[1]) for value in starts if value.startswith('a:')] + starts_b = [int(value.split(':')[1]) for value in starts if value.startswith('b:')] + assert max_in_flight == 1 + assert starts_a == [0, 1, 2] + assert starts_b == [0, 1, 2] + finally: + await bus_a.stop(clear=True, timeout=0) + await bus_b.stop(clear=True, timeout=0) + + +@pytest.mark.asyncio +async def test_event_concurrency_bus_serial_serializes_per_bus_but_overlaps_across_buses() -> None: + bus_a = EventBus(name='BusSerialA', event_concurrency='bus-serial') + bus_b = EventBus(name='BusSerialB', event_concurrency='bus-serial') + b_started = asyncio.Event() + + in_flight_global = 0 + max_in_flight_global = 0 + in_flight_a = 0 + in_flight_b = 0 + max_in_flight_a = 0 + max_in_flight_b = 0 + + async def on_a(_event: PerBusSerialEvent) -> None: + nonlocal in_flight_global, max_in_flight_global, in_flight_a, max_in_flight_a + in_flight_global += 1 + in_flight_a += 1 + max_in_flight_global = max(max_in_flight_global, in_flight_global) + max_in_flight_a = max(max_in_flight_a, in_flight_a) + await b_started.wait() + await asyncio.sleep(0.01) + in_flight_global -= 1 + in_flight_a -= 1 + + async def on_b(_event: PerBusSerialEvent) -> None: + nonlocal in_flight_global, max_in_flight_global, in_flight_b, max_in_flight_b + in_flight_global += 1 + in_flight_b += 1 + max_in_flight_global = max(max_in_flight_global, in_flight_global) + max_in_flight_b = max(max_in_flight_b, in_flight_b) + b_started.set() + await asyncio.sleep(0.01) + in_flight_global -= 1 + in_flight_b -= 1 + + bus_a.on(PerBusSerialEvent, on_a) + bus_b.on(PerBusSerialEvent, on_b) + + try: + bus_a.emit(PerBusSerialEvent(order=0, source='a')) + bus_b.emit(PerBusSerialEvent(order=0, source='b')) + await asyncio.gather(bus_a.wait_until_idle(), bus_b.wait_until_idle()) + + assert max_in_flight_a == 1 + assert max_in_flight_b == 1 + assert max_in_flight_global >= 2 + finally: + await bus_a.stop(clear=True, timeout=0) + await bus_b.stop(clear=True, timeout=0) + + +@pytest.mark.asyncio +async def test_event_concurrency_parallel_allows_same_bus_events_to_overlap() -> None: + bus = EventBus(name='ParallelEventBus', event_concurrency='parallel', event_handler_concurrency='parallel') + release = asyncio.Event() + overlap_seen = asyncio.Event() + + in_flight = 0 + max_in_flight = 0 + + async def handler(_event: ParallelEvent) -> None: + nonlocal in_flight, max_in_flight + in_flight += 1 + max_in_flight = max(max_in_flight, in_flight) + if in_flight >= 2: + overlap_seen.set() + await release.wait() + await asyncio.sleep(0.005) + in_flight -= 1 + + bus.on(ParallelEvent, handler) + + try: + first = bus.emit(ParallelEvent(order=0)) + second = bus.emit(ParallelEvent(order=1)) + await asyncio.wait_for(overlap_seen.wait(), timeout=1.0) + release.set() + await asyncio.gather(first, second) + await bus.wait_until_idle() + + assert max_in_flight >= 2 + finally: + await bus.stop(clear=True, timeout=0) + + +@pytest.mark.asyncio +async def test_event_handler_concurrency_parallel_runs_handlers_for_same_event_concurrently() -> None: + bus = EventBus(name='ParallelHandlerBus', event_concurrency='bus-serial', event_handler_concurrency='parallel') + release = asyncio.Event() + overlap_seen = asyncio.Event() + in_flight = 0 + max_in_flight = 0 + + async def handler_a(_event: ParallelHandlerEvent) -> None: + nonlocal in_flight, max_in_flight + in_flight += 1 + max_in_flight = max(max_in_flight, in_flight) + if in_flight >= 2: + overlap_seen.set() + await release.wait() + in_flight -= 1 + + async def handler_b(_event: ParallelHandlerEvent) -> None: + nonlocal in_flight, max_in_flight + in_flight += 1 + max_in_flight = max(max_in_flight, in_flight) + if in_flight >= 2: + overlap_seen.set() + await release.wait() + in_flight -= 1 + + bus.on(ParallelHandlerEvent, handler_a) + bus.on(ParallelHandlerEvent, handler_b) + + try: + event = bus.emit(ParallelHandlerEvent()) + await asyncio.wait_for(overlap_seen.wait(), timeout=1.0) + release.set() + await event + await bus.wait_until_idle() + + assert max_in_flight >= 2 + finally: + await bus.stop(clear=True, timeout=0) + + +@pytest.mark.asyncio +async def test_event_concurrency_override_parallel_beats_bus_serial_default() -> None: + bus = EventBus(name='OverrideParallelBus', event_concurrency='bus-serial', event_handler_concurrency='parallel') + release = asyncio.Event() + overlap_seen = asyncio.Event() + in_flight = 0 + max_in_flight = 0 + + async def handler(_event: OverrideParallelEvent) -> None: + nonlocal in_flight, max_in_flight + in_flight += 1 + max_in_flight = max(max_in_flight, in_flight) + if in_flight >= 2: + overlap_seen.set() + await release.wait() + in_flight -= 1 + + bus.on(OverrideParallelEvent, handler) + + try: + first = bus.emit(OverrideParallelEvent(order=0, event_concurrency=EventConcurrencyMode.PARALLEL)) + second = bus.emit(OverrideParallelEvent(order=1, event_concurrency=EventConcurrencyMode.PARALLEL)) + await asyncio.wait_for(overlap_seen.wait(), timeout=1.0) + release.set() + await asyncio.gather(first, second) + await bus.wait_until_idle() + + assert max_in_flight >= 2 + finally: + await bus.stop(clear=True, timeout=0) + + +@pytest.mark.asyncio +async def test_event_concurrency_override_bus_serial_beats_bus_parallel_default() -> None: + bus = EventBus(name='OverrideBusSerialBus', event_concurrency='parallel', event_handler_concurrency='parallel') + release = asyncio.Event() + in_flight = 0 + max_in_flight = 0 + + async def handler(_event: OverrideSerialEvent) -> None: + nonlocal in_flight, max_in_flight + in_flight += 1 + max_in_flight = max(max_in_flight, in_flight) + await release.wait() + in_flight -= 1 + + bus.on(OverrideSerialEvent, handler) + + try: + first = bus.emit(OverrideSerialEvent(order=0, event_concurrency=EventConcurrencyMode.BUS_SERIAL)) + second = bus.emit(OverrideSerialEvent(order=1, event_concurrency=EventConcurrencyMode.BUS_SERIAL)) + await asyncio.sleep(0.02) + assert max_in_flight == 1 + + release.set() + await asyncio.gather(first, second) + await bus.wait_until_idle() + finally: + await bus.stop(clear=True, timeout=0) + + +@pytest.mark.asyncio +async def test_queue_jump_awaited_child_preempts_queued_sibling_on_same_bus() -> None: + bus = EventBus(name='QueueJumpBus', event_concurrency='bus-serial', event_handler_concurrency='serial') + order: list[str] = [] + + async def on_parent(event: ParentEvent) -> None: + order.append('parent_start') + child = event.event_bus.emit(ChildEvent()) + await child + order.append('parent_end') + + async def on_child(_event: ChildEvent) -> None: + order.append('child_start') + await asyncio.sleep(0.005) + order.append('child_end') + + async def on_sibling(_event: SiblingEvent) -> None: + order.append('sibling') + + bus.on(ParentEvent, on_parent) + bus.on(ChildEvent, on_child) + bus.on(SiblingEvent, on_sibling) + + try: + parent = bus.emit(ParentEvent()) + sibling = bus.emit(SiblingEvent()) + await asyncio.gather(parent, sibling) + await bus.wait_until_idle() + + assert order == ['parent_start', 'child_start', 'child_end', 'parent_end', 'sibling'] + finally: + await bus.stop(clear=True, timeout=0) + + +@pytest.mark.asyncio +async def test_retry_global_handler_lock_serializes_handlers_across_buses() -> None: + bus_a = EventBus(name='GlobalHandlerA', event_concurrency='parallel', event_handler_concurrency='serial') + bus_b = EventBus(name='GlobalHandlerB', event_concurrency='parallel', event_handler_concurrency='serial') + + in_flight = 0 + max_in_flight = 0 + + @retry(semaphore_scope='global', semaphore_name='eventbus_locking_global_handler', semaphore_limit=1) + async def locked_handler(_event: HandlerLockEvent) -> None: + nonlocal in_flight, max_in_flight + in_flight += 1 + max_in_flight = max(max_in_flight, in_flight) + await asyncio.sleep(0.005) + in_flight -= 1 + + bus_a.on(HandlerLockEvent, locked_handler) + bus_b.on(HandlerLockEvent, locked_handler) + + try: + for i in range(4): + bus_a.emit(HandlerLockEvent(order=i, source='a')) + bus_b.emit(HandlerLockEvent(order=i, source='b')) + + await asyncio.gather(bus_a.wait_until_idle(), bus_b.wait_until_idle()) + assert max_in_flight == 1 + finally: + await bus_a.stop(clear=True, timeout=0) + await bus_b.stop(clear=True, timeout=0) diff --git a/tests/test_log_history_tree.py b/tests/test_eventbus_log_tree.py similarity index 59% rename from tests/test_log_history_tree.py rename to tests/test_eventbus_log_tree.py index ede0b48..3d50654 100644 --- a/tests/test_log_history_tree.py +++ b/tests/test_eventbus_log_tree.py @@ -1,9 +1,9 @@ """Test the EventBus.log_tree() method""" -from datetime import UTC, datetime -from typing import Any +from typing import Any, Literal -from bubus import BaseEvent, EventBus, EventResult +from bubus import BaseEvent, EventBus, EventHandler, EventResult +from bubus.helpers import monotonic_datetime class RootEvent(BaseEvent[str]): @@ -15,25 +15,53 @@ class ChildEvent(BaseEvent[list[int]]): class GrandchildEvent(BaseEvent[str]): - event_result_type: Any = str - nested: dict[str, int] = {'level': 3} +def _result_with_handler( + *, + bus: EventBus, + event_id: str, + handler_id: str, + handler_name: str, + status: Literal['pending', 'started', 'completed', 'error'], + started_at: str | None = None, + completed_at: str | None = None, + result: Any = None, + error: BaseException | None = None, +) -> EventResult[Any]: + handler = EventHandler( + id=handler_id, + handler_name=handler_name, + eventbus_id=bus.id, + eventbus_name=bus.name, + event_pattern='*', + ) + return EventResult[Any]( + event_id=event_id, + handler=handler, + status=status, + started_at=started_at, + completed_at=completed_at, + result=result, + error=error, + ) + + def test_log_history_tree_single_event(capsys: Any) -> None: """Test tree output with a single event""" bus = EventBus(name='SingleBus') # Create and add event to history event = RootEvent(data='test') - event.event_processed_at = datetime.now(UTC) + event.event_completed_at = monotonic_datetime() bus.event_history[event.event_id] = event captured_str = bus.log_tree() # captured = capsys.readouterr() # captured_str = captured.out + captured.err - assert '└──' in captured_str and 'βœ…' in captured_str and 'RootEvent' in captured_str + assert '└──' in captured_str and 'RootEvent' in captured_str # Should show start time and duration assert '[' in captured_str and ']' in captured_str @@ -44,27 +72,26 @@ def test_log_history_tree_with_handlers(capsys: Any) -> None: # Create event with handler results event = RootEvent(data='test') - event.event_processed_at = datetime.now(UTC) + event.event_completed_at = monotonic_datetime() # Add handler result - handler_id = f'{id(bus)}.123456' - event.event_results[handler_id] = EventResult[str]( + handler_id = '018f8e40-1234-7000-8000-000000000101' + event.event_results[handler_id] = _result_with_handler( + bus=bus, event_id=event.event_id, handler_id=handler_id, handler_name='test_handler', - eventbus_id=str(id(bus)), - eventbus_name='HandlerBus', status='completed', - started_at=datetime.now(UTC), - completed_at=datetime.now(UTC), + started_at=monotonic_datetime(), + completed_at=monotonic_datetime(), result='status: success', ) bus.event_history[event.event_id] = event captured_str = bus.log_tree() - assert '└── βœ… RootEvent#' in captured_str - assert '└── βœ… HandlerBus.test_handler#' in captured_str + assert '└── RootEvent#' in captured_str + assert f'└── βœ… {bus.label}.test_handler#' in captured_str assert "'status: success'" in captured_str @@ -73,26 +100,25 @@ def test_log_history_tree_with_errors(capsys: Any) -> None: bus = EventBus(name='ErrorBus') event = RootEvent() - event.event_processed_at = datetime.now(UTC) + event.event_completed_at = monotonic_datetime() # Add error result - handler_id = f'{id(bus)}.789' - event.event_results[handler_id] = EventResult[str]( + handler_id = '018f8e40-1234-7000-8000-000000000102' + event.event_results[handler_id] = _result_with_handler( + bus=bus, event_id=event.event_id, handler_id=handler_id, handler_name='error_handler', - eventbus_id=str(id(bus)), - eventbus_name='ErrorBus', status='error', - started_at=datetime.now(UTC), - completed_at=datetime.now(UTC), + started_at=monotonic_datetime(), + completed_at=monotonic_datetime(), error=ValueError('Test error message'), ) bus.event_history[event.event_id] = event captured_str = bus.log_tree() - assert 'ErrorBus.error_handler#' in captured_str + assert f'{bus.label}.error_handler#' in captured_str assert 'ValueError: Test error message' in captured_str @@ -102,63 +128,60 @@ def test_log_history_tree_complex_nested() -> None: # Create root event root = RootEvent(data='root_data') - root.event_processed_at = datetime.now(UTC) + root.event_completed_at = monotonic_datetime() # Add root handler with child events - root_handler_id = f'{id(bus)}.1001' - root.event_results[root_handler_id] = EventResult[str]( + root_handler_id = '018f8e40-1234-7000-8000-000000000103' + root.event_results[root_handler_id] = _result_with_handler( + bus=bus, event_id=root.event_id, handler_id=root_handler_id, handler_name='root_handler', - eventbus_id=str(id(bus)), - eventbus_name='ComplexBus', status='completed', - started_at=datetime.now(UTC), - completed_at=datetime.now(UTC), + started_at=monotonic_datetime(), + completed_at=monotonic_datetime(), result='Root processed', ) # Create child event child = ChildEvent(value=100) child.event_parent_id = root.event_id - child.event_processed_at = datetime.now(UTC) + child.event_completed_at = monotonic_datetime() # Add child to root handler's event_children root.event_results[root_handler_id].event_children.append(child) # Add child handler with grandchild - child_handler_id = f'{id(bus)}.2001' - child.event_results[child_handler_id] = EventResult[list[int]]( + child_handler_id = '018f8e40-1234-7000-8000-000000000104' + child.event_results[child_handler_id] = _result_with_handler( + bus=bus, event_id=child.event_id, handler_id=child_handler_id, handler_name='child_handler', - eventbus_id=str(id(bus)), - eventbus_name='ComplexBus', status='completed', - started_at=datetime.now(UTC), - completed_at=datetime.now(UTC), + started_at=monotonic_datetime(), + completed_at=monotonic_datetime(), result=[1, 2, 3], ) # Create grandchild grandchild = GrandchildEvent() grandchild.event_parent_id = child.event_id - grandchild.event_processed_at = datetime.now(UTC) + grandchild.event_completed_at = monotonic_datetime() # Add grandchild to child handler's event_children child.event_results[child_handler_id].event_children.append(grandchild) # Add grandchild handler - grandchild_handler_id = f'{id(bus)}.3001' - grandchild.event_results[grandchild_handler_id] = EventResult[str]( + grandchild_handler_id = '018f8e40-1234-7000-8000-000000000105' + grandchild.event_results[grandchild_handler_id] = _result_with_handler( + bus=bus, event_id=grandchild.event_id, handler_id=grandchild_handler_id, handler_name='grandchild_handler', - eventbus_id=str(id(bus)), - eventbus_name='ComplexBus', status='completed', - started_at=datetime.now(UTC), - completed_at=datetime.now(UTC), + started_at=monotonic_datetime(), + completed_at=monotonic_datetime(), result=None, ) @@ -170,12 +193,12 @@ def test_log_history_tree_complex_nested() -> None: output = bus.log_tree() # Check structure - note that events may appear both as handler children and in parent mapping - assert '└── βœ… RootEvent#' in output - assert 'βœ… ComplexBus.root_handler#' in output - assert 'βœ… ChildEvent#' in output - assert 'βœ… ComplexBus.child_handler#' in output - assert 'βœ… GrandchildEvent#' in output - assert 'βœ… ComplexBus.grandchild_handler#' in output + assert '└── RootEvent#' in output + assert f'βœ… {bus.label}.root_handler#' in output + assert 'ChildEvent#' in output + assert f'βœ… {bus.label}.child_handler#' in output + assert 'GrandchildEvent#' in output + assert f'βœ… {bus.label}.grandchild_handler#' in output # Check result formatting assert "'Root processed'" in output @@ -189,10 +212,10 @@ def test_log_history_tree_multiple_roots(capsys: Any) -> None: # Create multiple root events root1 = RootEvent(data='first') - root1.event_processed_at = datetime.now(UTC) + root1.event_completed_at = monotonic_datetime() root2 = RootEvent(data='second') - root2.event_processed_at = datetime.now(UTC) + root2.event_completed_at = monotonic_datetime() bus.event_history[root1.event_id] = root1 bus.event_history[root2.event_id] = root2 @@ -200,8 +223,8 @@ def test_log_history_tree_multiple_roots(capsys: Any) -> None: captured_str = bus.log_tree() # Both roots should be shown - assert captured_str.count('β”œβ”€β”€ βœ… RootEvent#') == 1 # First root - assert captured_str.count('└── βœ… RootEvent#') == 1 # Last root + assert captured_str.count('β”œβ”€β”€ RootEvent#') == 1 # First root + assert captured_str.count('└── RootEvent#') == 1 # Last root def test_log_history_tree_timing_info(capsys: Any) -> None: @@ -209,19 +232,18 @@ def test_log_history_tree_timing_info(capsys: Any) -> None: bus = EventBus(name='TimingBus') event = RootEvent() - event.event_processed_at = datetime.now(UTC) + event.event_completed_at = monotonic_datetime() # Add handler with timing - start_time = datetime.now(UTC) - end_time = datetime.now(UTC) + start_time = monotonic_datetime() + end_time = monotonic_datetime() - handler_id = f'{id(bus)}.999' - event.event_results[handler_id] = EventResult[str]( + handler_id = '018f8e40-1234-7000-8000-000000000106' + event.event_results[handler_id] = _result_with_handler( + bus=bus, event_id=event.event_id, handler_id=handler_id, handler_name='timed_handler', - eventbus_id=str(id(bus)), - eventbus_name='TimingBus', status='completed', started_at=start_time, completed_at=end_time, @@ -243,20 +265,19 @@ def test_log_history_tree_running_handler(capsys: Any) -> None: event = RootEvent() # Add running handler (started but not completed) - handler_id = f'{id(bus)}.555' - event.event_results[handler_id] = EventResult[str]( + handler_id = '018f8e40-1234-7000-8000-000000000107' + event.event_results[handler_id] = _result_with_handler( + bus=bus, event_id=event.event_id, handler_id=handler_id, handler_name='running_handler', - eventbus_id=str(id(bus)), - eventbus_name='RunningBus', status='started', - started_at=datetime.now(UTC), + started_at=monotonic_datetime(), completed_at=None, ) bus.event_history[event.event_id] = event captured_str = bus.log_tree() - assert 'RunningBus.running_handler#' in captured_str + assert f'{bus.label}.running_handler#' in captured_str assert 'RootEvent#' in captured_str # Event should also show as running diff --git a/tests/test_eventbus_name_conflict_gc.py b/tests/test_eventbus_name_conflict_gc.py new file mode 100644 index 0000000..e04b396 --- /dev/null +++ b/tests/test_eventbus_name_conflict_gc.py @@ -0,0 +1,303 @@ +# pyright: basic +""" +Tests for EventBus name conflict resolution with garbage collection. + +Tests that EventBus instances that would be garbage collected don't cause +name conflicts when creating new instances with the same name. +""" + +import asyncio +import gc +import weakref + +import pytest + +from bubus import BaseEvent, EventBus + + +class TestNameConflictGC: + """Test EventBus name conflict resolution with garbage collection""" + + def test_name_conflict_with_live_reference(self): + """Test that name conflict generates a warning and auto-generates a unique name""" + # Create an EventBus with a specific name + bus1 = EventBus(name='GCTestConflict') + + # Try to create another with the same name - should warn and auto-generate unique name + with pytest.warns(UserWarning, match='EventBus with name "GCTestConflict" already exists'): + bus2 = EventBus(name='GCTestConflict') + + # The second bus should have a unique name + assert bus2.name.startswith('GCTestConflict_') + assert bus2.name != 'GCTestConflict' + assert len(bus2.name) == len('GCTestConflict_') + 8 # Original name + underscore + 8 char suffix + + def test_name_no_conflict_after_deletion(self): + """Test that name conflict is NOT raised after the existing bus is deleted and GC runs""" + import gc + + # Create an EventBus with a specific name + bus1 = EventBus(name='GCTestBus1') + + # Delete the reference and force GC + del bus1 + gc.collect() # Force garbage collection to release the WeakSet reference + + # Creating another with the same name should work since the first one was collected + bus2 = EventBus(name='GCTestBus1') + assert bus2.name == 'GCTestBus1' + + def test_name_no_conflict_with_no_reference(self): + """Test that name conflict is NOT raised when the existing bus was never assigned""" + import gc + + # Create an EventBus with a specific name but don't keep a reference + EventBus(name='GCTestBus2') # No assignment, will be garbage collected + gc.collect() # Force garbage collection + + # Creating another with the same name should work since the first one is gone + bus2 = EventBus(name='GCTestBus2') + assert bus2.name == 'GCTestBus2' + + def test_name_conflict_with_weak_reference_only(self): + """Test that name conflict is NOT raised when only weak references exist""" + import gc + + # Create an EventBus and keep only a weak reference + bus1 = EventBus(name='GCTestBus3') + weak_ref = weakref.ref(bus1) + + # Verify the weak reference works + assert weak_ref() is bus1 + + # Delete the strong reference and force GC + del bus1 + gc.collect() # Force garbage collection + + # At this point, only the weak reference exists (and the WeakSet reference) + # Creating another with the same name should work + bus2 = EventBus(name='GCTestBus3') + assert bus2.name == 'GCTestBus3' + + # The weak reference should now return None + assert weak_ref() is None + + def test_multiple_buses_with_gc(self): + """Test multiple EventBus instances with some being garbage collected""" + import gc + + # Create multiple buses, some with strong refs, some without + bus1 = EventBus(name='GCMulti1') + EventBus(name='GCMulti2') # Will be GC'd + bus3 = EventBus(name='GCMulti3') + EventBus(name='GCMulti4') # Will be GC'd + + gc.collect() # Force garbage collection + + # Should be able to create new buses with the names of GC'd buses + bus2_new = EventBus(name='GCMulti2') + bus4_new = EventBus(name='GCMulti4') + + # But not with names of buses that still exist - they get auto-generated names + with pytest.warns(UserWarning, match='EventBus with name "GCMulti1" already exists'): + bus1_conflict = EventBus(name='GCMulti1') + assert bus1_conflict.name.startswith('GCMulti1_') + + with pytest.warns(UserWarning, match='EventBus with name "GCMulti3" already exists'): + bus3_conflict = EventBus(name='GCMulti3') + assert bus3_conflict.name.startswith('GCMulti3_') + + @pytest.mark.asyncio + async def test_name_conflict_after_stop_and_clear(self): + """Test that clearing an EventBus allows reusing its name""" + import gc + + # Create an EventBus + bus1 = EventBus(name='GCStopClear') + + # Stop and clear it (this renames the bus to _stopped_* and removes from all_instances) + await bus1.stop(clear=True) + + # Delete the reference and force GC + del bus1 + gc.collect() + + # Now we should be able to create a new one with the same name + bus2 = EventBus(name='GCStopClear') + assert bus2.name == 'GCStopClear' + + def test_weakset_behavior(self): + """Test that the WeakSet properly tracks EventBus instances""" + initial_count = len(EventBus.all_instances) + + # Create some buses + bus1 = EventBus(name='WeakTest1') + bus2 = EventBus(name='WeakTest2') + bus3 = EventBus(name='WeakTest3') + + # Check they're tracked + assert len(EventBus.all_instances) == initial_count + 3 + + # Delete one + del bus2 + + # The WeakSet should automatically remove it (no gc.collect needed) + # But we need to check the actual buses in the set, not just the count + names = {bus.name for bus in EventBus.all_instances if hasattr(bus, 'name') and bus.name.startswith('WeakTest')} + assert 'WeakTest1' in names + assert 'WeakTest3' in names + # WeakTest2 might still be there until the next iteration + + def test_eventbus_removed_from_weakset(self): + """Test that dead EventBus instances are removed from WeakSet after GC""" + import gc + + # Create a bus that will be "dead" (no strong references) + EventBus(name='GCDeadBus') + gc.collect() # Force garbage collection + + # When we try to create a new bus with the same name, it should work + bus = EventBus(name='GCDeadBus') + assert bus.name == 'GCDeadBus' + + # The dead bus should have been removed from all_instances + names = [b.name for b in EventBus.all_instances if hasattr(b, 'name') and b.name == 'GCDeadBus'] + assert len(names) == 1 # Only the new one + + def test_concurrent_name_creation(self): + """Test that concurrent creation with same name generates warning and unique name""" + # This tests the edge case where two buses might be created nearly simultaneously + bus1 = EventBus(name='ConcurrentTest') + + # Even if we're in the middle of checking, the second one should get a unique name + with pytest.warns(UserWarning, match='EventBus with name "ConcurrentTest" already exists'): + bus2 = EventBus(name='ConcurrentTest') + + assert bus1.name == 'ConcurrentTest' + assert bus2.name.startswith('ConcurrentTest_') + assert bus2.name != bus1.name + + @pytest.mark.asyncio + async def test_unreferenced_buses_with_history_can_be_cleaned_without_instance_leak(self): + """ + Buses with populated history may outlive local scope while runloops are still active, + but they must be releasable via explicit cleanup without leaking all_instances. + """ + import gc + + class GcHistoryEvent(BaseEvent[str]): + pass + + baseline_instances = len(EventBus.all_instances) + refs: list[weakref.ReferenceType[EventBus]] = [] + + async def create_and_fill_bus(index: int) -> weakref.ReferenceType[EventBus]: + bus = EventBus(name=f'GCNoStopBus_{index}') + bus.on(GcHistoryEvent, lambda e: 'ok') + for _ in range(40): + await bus.emit(GcHistoryEvent()) + await bus.wait_until_idle() + return weakref.ref(bus) + + for i in range(30): + refs.append(await create_and_fill_bus(i)) + + # Encourage GC/finalization first (best effort without explicit stop()). + for _ in range(20): + gc.collect() + await asyncio.sleep(0.02) + + alive_buses = [ref() for ref in refs if ref() is not None] + still_live = [bus for bus in alive_buses if bus is not None] + + # Deterministically clean up anything still alive. + for bus in still_live: + await bus.stop(clear=True, timeout=0) + # Loop variable keeps a strong ref to the last bus in CPython. + if still_live: + del bus + del still_live + del alive_buses + + # Final GC and WeakSet purge. + for _ in range(10): + gc.collect() + await asyncio.sleep(0.01) + _ = list(EventBus.all_instances) + + assert all(ref() is None for ref in refs), 'all buses should be collectable after cleanup' + assert len(EventBus.all_instances) <= baseline_instances + + @pytest.mark.asyncio + async def test_unreferenced_buses_with_history_are_collected_without_stop(self): + """ + Unreferenced buses should be collectable without explicit stop(clear=True), + even after processing events and populating history. + """ + import gc + + class GcImplicitEvent(BaseEvent[str]): + pass + + baseline_instances = len(EventBus.all_instances) + refs: list[weakref.ReferenceType[EventBus]] = [] + + async def create_and_fill_bus(index: int) -> weakref.ReferenceType[EventBus]: + bus = EventBus(name=f'GCImplicitNoStop_{index}') + bus.on(GcImplicitEvent, lambda e: 'ok') + for _ in range(30): + await bus.emit(GcImplicitEvent()) + await bus.wait_until_idle() + return weakref.ref(bus) + + for i in range(20): + refs.append(await create_and_fill_bus(i)) + + for _ in range(80): + gc.collect() + await asyncio.sleep(0.02) + if all(ref() is None for ref in refs): + break + + # Force WeakSet iteration to purge any dead refs. + _ = list(EventBus.all_instances) + + assert all(ref() is None for ref in refs), 'all unreferenced buses should be collected without stop()' + assert len(EventBus.all_instances) <= baseline_instances + + def test_subclass_registry_and_global_lock_are_collected_with_subclass(self): + """ + When a temporary EventBus subclass goes out of scope, its class-scoped + all_instances registry and global-serial lock should be collectable too. + """ + subclass_ref = None + registry_ref = None + lock_ref = None + bus_ref = None + + def create_scoped_subclass() -> None: + class ScopedSubclassBus(EventBus): + pass + + bus = ScopedSubclassBus(name='ScopedSubclassBus', event_concurrency='global-serial') + nonlocal subclass_ref, registry_ref, lock_ref, bus_ref + subclass_ref = weakref.ref(ScopedSubclassBus) + registry_ref = weakref.ref(ScopedSubclassBus.all_instances) + lock_ref = weakref.ref(bus.event_global_serial_lock) + bus_ref = weakref.ref(bus) + + create_scoped_subclass() + assert subclass_ref is not None + assert registry_ref is not None + assert lock_ref is not None + assert bus_ref is not None + + for _ in range(500): + gc.collect() + if subclass_ref() is None and registry_ref() is None and lock_ref() is None and bus_ref() is None: + break + + assert bus_ref() is None, 'subclass bus instance should be collectable' + assert subclass_ref() is None, 'subclass type should be collectable' + assert registry_ref() is None, 'subclass all_instances registry should be collectable' + assert lock_ref() is None, 'subclass global-serial lock should be collectable' diff --git a/tests/test_eventbus_on_off.py b/tests/test_eventbus_on_off.py new file mode 100644 index 0000000..c8555d3 --- /dev/null +++ b/tests/test_eventbus_on_off.py @@ -0,0 +1,196 @@ +import inspect +from collections.abc import Callable, Coroutine +from typing import TYPE_CHECKING, Any, assert_type + +import pytest + +from bubus.base_event import BaseEvent +from bubus.event_bus import EventBus +from bubus.event_handler import EventHandler, _normalize_handler_callable # pyright: ignore[reportPrivateUsage] + +if TYPE_CHECKING: + + class _RegistryTypingEvent(BaseEvent[str]): + pass + + def _typed_sync_handler(event: _RegistryTypingEvent) -> str: + return event.event_type + + async def _typed_async_handler(event: _RegistryTypingEvent) -> str: ... + + _wrapped_sync = _normalize_handler_callable(_typed_sync_handler) + _wrapped_async = _normalize_handler_callable(_typed_async_handler) + assert_type(_wrapped_sync, Callable[[_RegistryTypingEvent], Coroutine[Any, Any, str]]) + assert_type(_wrapped_async, Callable[[_RegistryTypingEvent], Coroutine[Any, Any, str]]) + + +@pytest.mark.asyncio +async def test_on_stores_eventhandler_entry_and_index() -> None: + bus = EventBus(name='RegistryBus') + + def handler(event: BaseEvent[Any]) -> str: + return event.event_type + + entry = bus.on('RegistryEvent', handler) + + assert isinstance(entry, EventHandler) + assert entry.id is not None + assert entry.id in bus.handlers + assert bus.handlers[entry.id] is entry + assert 'RegistryEvent' in bus.handlers_by_key + assert entry.id in bus.handlers_by_key['RegistryEvent'] + + dispatched = bus.emit(BaseEvent(event_type='RegistryEvent')) + completed = await dispatched + assert entry.id in completed.event_results + assert completed.event_results[entry.id].handler.id == entry.id + + await bus.stop(clear=True) + + +@pytest.mark.asyncio +async def test_off_removes_by_callable_id_entry_or_all() -> None: + bus = EventBus(name='RegistryOffBus') + + def handler_a(event: BaseEvent[Any]) -> None: + return None + + def handler_b(event: BaseEvent[Any]) -> None: + return None + + def handler_c(event: BaseEvent[Any]) -> None: + return None + + entry_a = bus.on('RegistryEvent', handler_a) + entry_b = bus.on('RegistryEvent', handler_b) + entry_c = bus.on('RegistryEvent', handler_c) + assert entry_a.id and entry_b.id and entry_c.id + + bus.off('RegistryEvent', handler_a) + assert entry_a.id not in bus.handlers + assert entry_a.id not in bus.handlers_by_key['RegistryEvent'] + assert entry_b.id in bus.handlers + + bus.off('RegistryEvent', entry_b.id) + assert entry_b.id not in bus.handlers + assert entry_b.id not in bus.handlers_by_key['RegistryEvent'] + assert entry_c.id in bus.handlers + + bus.off('RegistryEvent', entry_c) + assert entry_c.id not in bus.handlers + assert 'RegistryEvent' not in bus.handlers_by_key + + bus.on('RegistryEvent', handler_a) + bus.on('RegistryEvent', handler_b) + bus.off('RegistryEvent') + assert 'RegistryEvent' not in bus.handlers_by_key + assert all(entry.event_pattern != 'RegistryEvent' for entry in bus.handlers.values()) + + await bus.stop(clear=True) + + +@pytest.mark.asyncio +async def test_on_normalizes_sync_handler_to_async_callable() -> None: + bus = EventBus(name='RegistryNormalizeBus') + + class RegistryNormalizeEvent(BaseEvent[str]): + event_timeout: float | None = 0.2 + + calls: list[str] = [] + + def sync_handler(event: RegistryNormalizeEvent) -> str: + calls.append(event.event_id) + return 'normalized' + + entry = bus.on(RegistryNormalizeEvent, sync_handler) + + assert entry.handler is sync_handler + assert entry._handler_async is not None # pyright: ignore[reportPrivateUsage] + assert inspect.iscoroutinefunction(entry._handler_async) # pyright: ignore[reportPrivateUsage] + assert entry.handler_name.endswith('sync_handler') + + direct_result = await entry._handler_async(RegistryNormalizeEvent()) # pyright: ignore[reportPrivateUsage] + assert direct_result == 'normalized' + + dispatched = bus.emit(RegistryNormalizeEvent()) + completed = await dispatched + result = completed.event_results[entry.id] + + assert result.status == 'completed' + assert result.result == 'normalized' + assert len(calls) == 2 + + await bus.stop(clear=True) + + +@pytest.mark.asyncio +async def test_on_keeps_async_handlers_normalized_through_handler_async() -> None: + bus = EventBus(name='RegistryAsyncNormalizeBus') + + class RegistryAsyncNormalizeEvent(BaseEvent[str]): + event_timeout: float | None = 0.2 + + calls: list[str] = [] + + async def async_handler(event: RegistryAsyncNormalizeEvent) -> str: + calls.append(event.event_id) + return 'async_normalized' + + entry = bus.on(RegistryAsyncNormalizeEvent, async_handler) + + assert entry.handler is async_handler + assert entry._handler_async is async_handler # pyright: ignore[reportPrivateUsage] + assert inspect.iscoroutinefunction(entry._handler_async) # pyright: ignore[reportPrivateUsage] + + direct_result = await entry._handler_async(RegistryAsyncNormalizeEvent()) # pyright: ignore[reportPrivateUsage] + assert direct_result == 'async_normalized' + + dispatched = bus.emit(RegistryAsyncNormalizeEvent()) + completed = await dispatched + result = completed.event_results[entry.id] + + assert result.status == 'completed' + assert result.result == 'async_normalized' + assert len(calls) == 2 + + await bus.stop(clear=True) + + +@pytest.mark.asyncio +async def test_handler_async_preserves_typed_arg_return_contracts_for_sync_handlers() -> None: + bus = EventBus(name='RegistryTypingSyncBus') + + class RegistryTypingEvent(BaseEvent[str]): + required_token: str + + def typed_sync_handler(event: RegistryTypingEvent) -> str: + return event.required_token + + entry = bus.on(RegistryTypingEvent, typed_sync_handler) + handler_async = entry._handler_async # pyright: ignore[reportPrivateUsage] + assert handler_async is not None + result = await handler_async(RegistryTypingEvent(required_token='sync')) + assert isinstance(result, str) + assert result == 'sync' + + await bus.stop(clear=True) + + +@pytest.mark.asyncio +async def test_handler_async_preserves_typed_arg_return_contracts_for_async_handlers() -> None: + bus = EventBus(name='RegistryTypingAsyncBus') + + class RegistryTypingEvent(BaseEvent[str]): + required_token: str + + async def typed_async_handler(event: RegistryTypingEvent) -> str: + return event.required_token + + entry = bus.on(RegistryTypingEvent, typed_async_handler) + handler_async = entry._handler_async # pyright: ignore[reportPrivateUsage] + assert handler_async is not None + result = await handler_async(RegistryTypingEvent(required_token='async')) + assert isinstance(result, str) + assert result == 'async' + + await bus.stop(clear=True) diff --git a/tests/test_eventbus_performance.py b/tests/test_eventbus_performance.py new file mode 100644 index 0000000..3809e44 --- /dev/null +++ b/tests/test_eventbus_performance.py @@ -0,0 +1,1435 @@ +import asyncio +import functools +import gc +import inspect +import logging +import math +import os +import time +from collections.abc import Callable +from contextlib import contextmanager +from typing import Any, Literal + +import psutil +import pytest + +import bubus.base_event as base_event_module +import bubus.event_bus as event_bus_module +from bubus import BaseEvent, EventBus, EventHandlerAbortedError, EventHandlerCancelledError, EventHandlerTimeoutError + +pytestmark = pytest.mark.timeout(120, method='thread') + + +@contextmanager +def suppress_bubus_warning_logs() -> Any: + """Reduce intentional timeout warning spam during stress scenarios.""" + + bubus_logger = logging.getLogger('bubus') + previous_level = bubus_logger.level + bubus_logger.setLevel(logging.ERROR) + try: + yield + finally: + bubus_logger.setLevel(previous_level) + + +def get_memory_usage_mb(): + """Get current process memory usage in MB""" + process = psutil.Process(os.getpid()) + return process.memory_info().rss / 1024 / 1024 + + +def percentile(values: list[float], q: float) -> float: + """Simple percentile helper without numpy dependency.""" + if not values: + return 0.0 + sorted_values = sorted(values) + pos = (len(sorted_values) - 1) * q + low = math.floor(pos) + high = math.ceil(pos) + if low == high: + return sorted_values[int(pos)] + return sorted_values[low] + (sorted_values[high] - sorted_values[low]) * (pos - low) + + +async def dispatch_and_measure( + bus: EventBus, + event_factory: Callable[[], BaseEvent[Any]], + total_events: int, + batch_size: int = 40, +) -> tuple[float, float, float, float, float]: + """ + Dispatch many events and return: + (throughput_events_per_sec, dispatch_p50_ms, dispatch_p95_ms, done_p50_ms, done_p95_ms) + """ + dispatch_latencies_ms: list[float] = [] + done_latencies_ms: list[float] = [] + pending: list[tuple[BaseEvent[Any], float]] = [] + + async def wait_one(item: tuple[BaseEvent[Any], float]) -> None: + event, t_dispatch_done = item + await event + done_latencies_ms.append((time.perf_counter() - t_dispatch_done) * 1000) + + start = time.perf_counter() + for _ in range(total_events): + t0 = time.perf_counter() + event = bus.emit(event_factory()) + dispatch_latencies_ms.append((time.perf_counter() - t0) * 1000) + pending.append((event, time.perf_counter())) + if len(pending) >= batch_size: + await asyncio.gather(*(wait_one(item) for item in pending)) + pending.clear() + + if pending: + await asyncio.gather(*(wait_one(item) for item in pending)) + await bus.wait_until_idle() + + elapsed = time.perf_counter() - start + throughput = total_events / max(elapsed, 1e-9) + return ( + throughput, + percentile(dispatch_latencies_ms, 0.50), + percentile(dispatch_latencies_ms, 0.95), + percentile(done_latencies_ms, 0.50), + percentile(done_latencies_ms, 0.95), + ) + + +async def run_mode_throughput_benchmark( + *, + event_handler_concurrency: Literal['serial', 'parallel'], + total_events: int = 5_000, + batch_size: int = 50, +) -> tuple[int, float]: + """Run a basic no-op throughput benchmark for one handler mode.""" + bus = EventBus( + name=f'ThroughputFloor_{event_handler_concurrency}', + event_handler_concurrency=event_handler_concurrency, + middlewares=[], + max_history_drop=True, + ) + + processed = 0 + + async def handler(event: SimpleEvent) -> None: + nonlocal processed + processed += 1 + + bus.on(SimpleEvent, handler) + + pending: list[BaseEvent[Any]] = [] + start = time.time() + try: + for _ in range(total_events): + pending.append(bus.emit(SimpleEvent())) + if len(pending) >= batch_size: + await asyncio.gather(*pending) + pending.clear() + + if pending: + await asyncio.gather(*pending) + + await bus.wait_until_idle() + finally: + await bus.stop(timeout=0, clear=True) + + duration = time.time() - start + throughput = total_events / max(duration, 1e-9) + return processed, throughput + + +async def run_io_fanout_benchmark( + *, + event_handler_concurrency: Literal['serial', 'parallel'], + total_events: int = 800, + handlers_per_event: int = 4, + sleep_seconds: float = 0.0015, + batch_size: int = 40, +) -> tuple[int, float]: + """Benchmark I/O-bound fanout to compare serial vs parallel handler mode.""" + bus = EventBus( + name=f'Fanout_{event_handler_concurrency}', + event_handler_concurrency=event_handler_concurrency, + middlewares=[], + max_history_drop=True, + ) + + handled = 0 + + for index in range(handlers_per_event): + + async def handler(event: SimpleEvent) -> None: + nonlocal handled + await asyncio.sleep(sleep_seconds) + handled += 1 + + handler.__name__ = f'fanout_handler_{index}' + bus.on(SimpleEvent, handler) + + pending: list[BaseEvent[Any]] = [] + start = time.time() + try: + for _ in range(total_events): + pending.append(bus.emit(SimpleEvent())) + if len(pending) >= batch_size: + await asyncio.gather(*pending) + pending.clear() + + if pending: + await asyncio.gather(*pending) + + await bus.wait_until_idle() + finally: + await bus.stop(timeout=0, clear=True) + + duration = time.time() - start + return handled, duration + + +def throughput_floor_for_mode(event_handler_concurrency: Literal['serial', 'parallel']) -> int: + """ + Conservative per-mode floor to catch severe regressions while avoiding CI flakiness. + """ + if event_handler_concurrency == 'parallel': + return 500 + return 600 + + +def throughput_regression_floor( + first_run_throughput: float, + *, + min_fraction: float, + hard_floor: float, +) -> float: + """ + Scenario+mode regression threshold using same-run baseline + absolute safety floor. + """ + return max(hard_floor, first_run_throughput * min_fraction) + + +class MethodProfiler: + """Lightweight monkeypatch profiler for selected class methods.""" + + def __init__(self) -> None: + self.stats: dict[str, dict[str, float]] = {} + self._restore: list[tuple[type[Any], str, Any]] = [] + + def instrument( + self, + owner: type[Any], + method_name_or_ref: str | Callable[..., Any], + label: str | None = None, + ) -> None: + if isinstance(method_name_or_ref, str): + method_name = method_name_or_ref + else: + method_name = getattr(method_name_or_ref, '__name__', '') + if not method_name: + raise ValueError('method_name_or_ref callable must define __name__') + original = getattr(owner, method_name) + metric_name = label or f'{owner.__name__}.{method_name}' + wrapped_method: Any + + if inspect.iscoroutinefunction(original): + + @functools.wraps(original) + async def wrapped_async(*args: Any, **kwargs: Any) -> Any: + started = time.perf_counter() + try: + return await original(*args, **kwargs) + finally: + elapsed = time.perf_counter() - started + metric = self.stats.setdefault(metric_name, {'calls': 0.0, 'total_s': 0.0}) + metric['calls'] += 1.0 + metric['total_s'] += elapsed + + wrapped_method = wrapped_async + else: + + @functools.wraps(original) + def wrapped_sync(*args: Any, **kwargs: Any) -> Any: + started = time.perf_counter() + try: + return original(*args, **kwargs) + finally: + elapsed = time.perf_counter() - started + metric = self.stats.setdefault(metric_name, {'calls': 0.0, 'total_s': 0.0}) + metric['calls'] += 1.0 + metric['total_s'] += elapsed + + wrapped_method = wrapped_sync + + self._restore.append((owner, method_name, original)) + setattr(owner, method_name, wrapped_method) + + def restore(self) -> None: + for owner, method_name, original in reversed(self._restore): + setattr(owner, method_name, original) + self._restore.clear() + + def top_lines(self, limit: int = 12) -> list[str]: + ranked = sorted(self.stats.items(), key=lambda item: item[1]['total_s'], reverse=True) + lines: list[str] = [] + for name, metric in ranked[:limit]: + calls = int(metric['calls']) + total_s = metric['total_s'] + avg_us = (total_s * 1_000_000.0) / max(calls, 1) + lines.append(f'{name}: calls={calls:,} total={total_s:.3f}s avg={avg_us:.1f}us') + return lines + + +async def run_contention_round( + *, + event_handler_concurrency: Literal['serial', 'parallel'], + bus_count: int = 10, + events_per_bus: int = 120, + batch_size: int = 20, +) -> dict[str, float]: + """ + Concurrently dispatch on many buses to stress global lock contention. + """ + buses = [ + EventBus( + name=f'LockContention_{i}_{event_handler_concurrency}', + event_handler_concurrency=event_handler_concurrency, + middlewares=[], + max_history_drop=True, + ) + for i in range(bus_count) + ] + counters = [0 for _ in range(bus_count)] + dispatch_latencies_ms: list[float] = [] + done_latencies_ms: list[float] = [] + + for index, bus in enumerate(buses): + + def make_handler(handler_index: int): + async def handler(event: SimpleEvent) -> None: + counters[handler_index] += 1 + + handler.__name__ = f'contention_handler_{handler_index}' + return handler + + bus.on(SimpleEvent, make_handler(index)) + + async def wait_batch(batch: list[tuple[BaseEvent[Any], float]]) -> None: + async def wait_one(item: tuple[BaseEvent[Any], float]) -> None: + event, dispatch_done_at = item + await event + done_latencies_ms.append((time.perf_counter() - dispatch_done_at) * 1000) + + await asyncio.gather(*(wait_one(item) for item in batch)) + + async def producer(bus: EventBus) -> None: + pending: list[tuple[BaseEvent[Any], float]] = [] + for _ in range(events_per_bus): + t0 = time.perf_counter() + event = bus.emit(SimpleEvent()) + dispatch_latencies_ms.append((time.perf_counter() - t0) * 1000) + pending.append((event, time.perf_counter())) + if len(pending) >= batch_size: + await wait_batch(pending) + pending.clear() + + if pending: + await wait_batch(pending) + await bus.wait_until_idle() + + total_events = bus_count * events_per_bus + start = time.perf_counter() + try: + await asyncio.gather(*(producer(bus) for bus in buses)) + finally: + await asyncio.gather(*(bus.stop(timeout=0, clear=True) for bus in buses)) + + duration = time.perf_counter() - start + return { + 'throughput': total_events / max(duration, 1e-9), + 'dispatch_p50_ms': percentile(dispatch_latencies_ms, 0.50), + 'dispatch_p95_ms': percentile(dispatch_latencies_ms, 0.95), + 'done_p50_ms': percentile(done_latencies_ms, 0.50), + 'done_p95_ms': percentile(done_latencies_ms, 0.95), + 'fairness_min': float(min(counters)), + 'fairness_max': float(max(counters)), + } + + +class SimpleEvent(BaseEvent): + """Simple event without Generic for performance testing""" + + pass + + +@pytest.mark.asyncio +async def test_20k_events_with_memory_control(): + """Test processing 20k events with no memory leaks""" + + # Record initial memory + gc.collect() + initial_memory = get_memory_usage_mb() + print(f'\nInitial memory: {initial_memory:.1f} MB') + + # Use bounded history with drop enabled to allow sustained flooding. + bus = EventBus(name='ManyEvents', middlewares=[], max_history_drop=True) + + print('EventBus settings:') + print(f' max_history_size: {bus.event_history.max_history_size}') + print(f' queue maxsize: {bus.pending_event_queue.maxsize if bus.pending_event_queue else "not created"}') + print('Starting event dispatch...') + + processed_count = 0 + + async def handler(event: SimpleEvent) -> None: + nonlocal processed_count + processed_count += 1 + + bus.on('SimpleEvent', handler) + + total_events = 20_000 # Reduced for faster tests + + start_time = time.time() + memory_samples: list[float] = [] + max_memory = initial_memory + + # Dispatch all events as fast as possible (naive flood). + dispatched = 0 + pending_events: list[BaseEvent[Any]] = [] + + while dispatched < total_events: + event = bus.emit(SimpleEvent()) + pending_events.append(event) + dispatched += 1 + if dispatched <= 5: + print(f'Dispatched event {dispatched}') + + # Sample memory every 10k events + if dispatched % 10_000 == 0 and dispatched > 0: + gc.collect() + current_memory = get_memory_usage_mb() + memory_samples.append(current_memory) + max_memory = max(max_memory, current_memory) + elapsed = time.time() - start_time + rate = dispatched / elapsed + print( + f'Progress: {dispatched:,} events, ' + f'Memory: {current_memory:.1f} MB (+{current_memory - initial_memory:.1f} MB), ' + f'History: {len(bus.event_history)}, ' + f'Rate: {rate:.0f} events/sec' + ) + + # Wait for all remaining events to complete + if pending_events: + await asyncio.gather(*pending_events) + + # Final wait + await bus.wait_until_idle() + + duration = time.time() - start_time + + # Final memory check + gc.collect() + final_memory = get_memory_usage_mb() + memory_growth = final_memory - initial_memory + peak_growth = max_memory - initial_memory + + print('\nFinal Results:') + print(f'Processed: {processed_count:,} events') + print(f'Duration: {duration:.2f} seconds') + print(f'Rate: {processed_count / duration:,.0f} events/sec') + print(f'Initial memory: {initial_memory:.1f} MB') + print(f'Peak memory: {max_memory:.1f} MB (+{peak_growth:.1f} MB)') + print(f'Final memory: {final_memory:.1f} MB (+{memory_growth:.1f} MB)') + + # Debug: Check if dispatch pipeline still has work + queue_size = bus.pending_event_queue.qsize() if bus.pending_event_queue else 0 + print(f'DEBUG: Queue size: {queue_size}') + print(f'DEBUG: In-flight event ids: {len(bus.in_flight_event_ids)}') + + # Safely get event history size without iterating + try: + history_size = len(bus.event_history) + print(f'Event history size: {history_size} (capped at {bus.event_history.max_history_size})') + except Exception as e: + print(f'ERROR getting event history size: {type(e).__name__}: {e}') + + # Verify results + print('DEBUG: About to check processed_count assertion...') + assert processed_count == total_events, f'Only processed {processed_count} of {total_events}' + print('DEBUG: About to check duration assertion...') + assert duration < 360.0, f'Took {duration:.2f}s, should be < 360s' + + # Check memory usage stayed reasonable + print('DEBUG: About to check memory assertion...') + assert peak_growth < 300.0, f'Memory grew by {peak_growth:.1f} MB at peak, indicates memory leak' + + # Check event history is properly limited + print('DEBUG: About to check history size assertions...') + assert bus.event_history.max_history_size is not None + assert len(bus.event_history) <= bus.event_history.max_history_size, ( + f'Event history has {len(bus.event_history)} events, should be <= {bus.event_history.max_history_size}' + ) + + # Explicitly clean up the bus to prevent hanging + print('\nCleaning up EventBus...') + queue_size_before_stop = bus.pending_event_queue.qsize() if bus.pending_event_queue else 0 + print(f'Before stop - Queue size: {queue_size_before_stop}') + print(f'Before stop - In-flight event ids: {len(bus.in_flight_event_ids)}') + + await bus.stop(timeout=0, clear=True) + print('EventBus stopped successfully') + + +@pytest.mark.asyncio +async def test_hard_limit_enforcement(): + """Test that max_history_drop=False rejects dispatches at max_history_size.""" + bus = EventBus( + name='HardLimitTest', + max_history_size=100, + max_history_drop=False, + middlewares=[], + ) + + try: + # Create a slow handler to keep events pending + async def slow_handler(event: SimpleEvent) -> None: + await asyncio.sleep(0.5) # Reduced from 10s to 0.5s + + bus.on('SimpleEvent', slow_handler) + + # Try to dispatch more than the configured history limit + events_dispatched = 0 + errors = 0 + + for _ in range(200): + try: + bus.emit(SimpleEvent()) + events_dispatched += 1 + except RuntimeError as e: + if 'history limit reached' in str(e): + errors += 1 + else: + raise + + print(f'\nDispatched {events_dispatched} events') + print(f'Hit history-limit error {errors} times') + + # Should reject once limit is reached + assert bus.event_history.max_history_size is not None + assert events_dispatched <= bus.event_history.max_history_size + assert errors > 0 + + finally: + # Properly stop the bus to clean up pending tasks + await bus.stop(timeout=0, clear=True) # Don't wait, just force cleanup + + +@pytest.mark.asyncio +async def test_cleanup_prioritizes_pending(): + """Test that cleanup keeps pending events and removes completed ones""" + bus = EventBus(name='CleanupTest', max_history_size=10, max_history_drop=True, middlewares=[]) + + try: + # Process some events to completion + completed_events: list[BaseEvent[Any]] = [] + for _ in range(5): + event = bus.emit(BaseEvent(event_type='QuickEvent')) + completed_events.append(event) + + await asyncio.gather(*completed_events) + + # Add pending events with slow handler (reduced sleep time) + async def slow_handler(event: BaseEvent) -> None: + if event.event_type == 'SlowEvent': + await asyncio.sleep(0.5) # Reduced from 10s to 0.5s + + bus.on('*', slow_handler) + + pending_events: list[BaseEvent[Any]] = [] + for _ in range(10): + event = bus.emit(BaseEvent(event_type='SlowEvent')) + pending_events.append(event) + + # Give them time to start + await asyncio.sleep(0.1) + + # Check history - should prioritize keeping pending events + history_types: dict[str, int] = {} + for event in bus.event_history.values(): + status = event.event_status + history_types[status] = history_types.get(status, 0) + 1 + + print('\nHistory after cleanup:') + print(f' Total: {len(bus.event_history)} (max: {bus.event_history.max_history_size})') + print(f' By status: {history_types}') + + # Should have removed completed events to make room for pending + assert bus.event_history.max_history_size is not None + assert ( + len(bus.event_history) <= bus.event_history.max_history_size * 1.2 + ) # allow for some overhead to avoid frequent gc pausing + assert history_types.get('pending', 0) + history_types.get('started', 0) >= 5 + + finally: + # Properly stop the bus to clean up pending tasks + await bus.stop(timeout=0, clear=True) # Don't wait, just force cleanup + + +@pytest.mark.asyncio +async def test_ephemeral_buses_with_forwarding_churn(): + """ + Closest Python equivalent to request-scoped bus churn: + create short-lived buses, forward between them, process events, then clear. + """ + total_bus_pairs = 60 + events_per_pair = 20 + total_events = total_bus_pairs * events_per_pair + initial_instances = len(EventBus.all_instances) + + handled_a = 0 + handled_b = 0 + + start = time.time() + + for idx in range(total_bus_pairs): + bus_a = EventBus(name=f'EphemeralA_{idx}_{os.getpid()}', middlewares=[], max_history_drop=True) + bus_b = EventBus(name=f'EphemeralB_{idx}_{os.getpid()}', middlewares=[], max_history_drop=True) + + async def handler_a(event: SimpleEvent) -> None: + nonlocal handled_a + handled_a += 1 + + async def handler_b(event: SimpleEvent) -> None: + nonlocal handled_b + handled_b += 1 + + bus_a.on(SimpleEvent, handler_a) + bus_b.on(SimpleEvent, handler_b) + bus_a.on('*', bus_b.emit) + + try: + pending = [bus_a.emit(SimpleEvent()) for _ in range(events_per_pair)] + await asyncio.gather(*pending) + await bus_a.wait_until_idle() + await bus_b.wait_until_idle() + + assert ( + bus_a.event_history.max_history_size is None or len(bus_a.event_history) <= bus_a.event_history.max_history_size + ) + assert ( + bus_b.event_history.max_history_size is None or len(bus_b.event_history) <= bus_b.event_history.max_history_size + ) + finally: + await bus_a.stop(timeout=0, clear=True) + await bus_b.stop(timeout=0, clear=True) + + duration = time.time() - start + gc.collect() + + assert handled_a == total_events + assert handled_b == total_events + assert len(EventBus.all_instances) <= initial_instances + assert duration < 180.0, f'Ephemeral bus churn took too long: {duration:.2f}s' + + +@pytest.mark.asyncio +async def test_forwarding_queue_jump_timeout_mix_stays_stable(): + """ + Stress a mixed path in Python: + parent handler awaits forwarded child events, with intermittent child timeouts. + """ + + class MixedParentEvent(BaseEvent): + iteration: int = 0 + event_timeout: float | None = 0.2 + + class MixedChildEvent(BaseEvent): + iteration: int = 0 + event_timeout: float | None = 0.05 + + history_limit = 500 + total_iterations = 300 + + bus_a = EventBus(name='MixedPathA', max_history_size=history_limit, max_history_drop=True, middlewares=[]) + bus_b = EventBus(name='MixedPathB', max_history_size=history_limit, max_history_drop=True, middlewares=[]) + + parent_handled = 0 + child_handled = 0 + child_events: list[MixedChildEvent] = [] + + async def child_handler(event: MixedChildEvent) -> str: + nonlocal child_handled + child_handled += 1 + if event.iteration % 7 == 0: + await asyncio.sleep(0.01) + else: + await asyncio.sleep(0.0005) + return 'child_done' + + async def parent_handler(event: MixedParentEvent) -> str: + nonlocal parent_handled + parent_handled += 1 + + child_timeout = 0.001 if event.iteration % 7 == 0 else 0.05 + child = bus_a.emit(MixedChildEvent(iteration=event.iteration, event_timeout=child_timeout)) + bus_b.emit(child) + child_events.append(child) + await child + return 'parent_done' + + bus_a.on(MixedParentEvent, parent_handler) + bus_b.on(MixedChildEvent, child_handler) + + start = time.time() + try: + with suppress_bubus_warning_logs(): + for i in range(total_iterations): + await bus_a.emit(MixedParentEvent(iteration=i)) + await bus_a.wait_until_idle() + await bus_b.wait_until_idle() + finally: + await bus_a.stop(timeout=0, clear=True) + await bus_b.stop(timeout=0, clear=True) + + duration = time.time() - start + + assert parent_handled == total_iterations + assert child_handled == total_iterations + timeout_count = sum( + 1 + for child in child_events + if any( + isinstance( + result.error, + (TimeoutError, EventHandlerTimeoutError, EventHandlerAbortedError, EventHandlerCancelledError), + ) + for result in child.event_results.values() + ) + ) + assert timeout_count > 0 + assert len(bus_a.event_history) <= history_limit + assert len(bus_b.event_history) <= history_limit + assert duration < 180.0, f'Mixed forwarding/queue-jump/timeout path took too long: {duration:.2f}s' + + +@pytest.mark.asyncio +async def test_history_bound_is_strict_after_idle(): + """After steady-state processing, history should stay within max_history_size.""" + bus = EventBus(name='StrictHistoryBound', max_history_size=25, max_history_drop=True, middlewares=[]) + + async def handler(event: SimpleEvent) -> None: + return None + + bus.on(SimpleEvent, handler) + + try: + for _ in range(200): + await bus.emit(SimpleEvent()) + + await bus.wait_until_idle() + assert len(bus.event_history) <= 25 + finally: + await bus.stop(timeout=0, clear=True) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + 'event_handler_concurrency', + ['serial', 'parallel'], + ids=['serial_handler_concurrency', 'parallel_handler_concurrency'], +) +async def test_basic_throughput_floor_regression_guard(event_handler_concurrency: Literal['serial', 'parallel']): + """ + Throughput regression guard across Python's handler concurrency modes. + Keeps threshold conservative to avoid CI flakiness while still catching + severe slowdowns. + """ + processed, rate = await run_mode_throughput_benchmark(event_handler_concurrency=event_handler_concurrency) + + assert processed == 5_000 + minimum_rate = throughput_floor_for_mode(event_handler_concurrency) + mode = event_handler_concurrency + assert rate >= minimum_rate, f'{mode} throughput regression: {rate:.0f} events/sec (expected >= {minimum_rate} events/sec)' + + +@pytest.mark.asyncio +async def test_event_handler_concurrency_mode_improves_io_bound_fanout(): + """ + For I/O-bound workloads with multiple handlers per event, parallel mode should + provide a meaningful speedup versus serial mode. + """ + serial_handled, serial_duration = await run_io_fanout_benchmark(event_handler_concurrency='serial') + parallel_handled, parallel_duration = await run_io_fanout_benchmark(event_handler_concurrency='parallel') + + expected_total = 800 * 4 + assert serial_handled == expected_total + assert parallel_handled == expected_total + assert parallel_duration < serial_duration * 0.8, ( + f'Expected parallel handler mode to be faster for I/O fanout; ' + f'serial={serial_duration:.2f}s parallel={parallel_duration:.2f}s' + ) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + 'event_handler_concurrency', + ['serial', 'parallel'], + ids=['serial_handler_concurrency', 'parallel_handler_concurrency'], +) +async def test_forwarding_throughput_floor_across_modes(event_handler_concurrency: Literal['serial', 'parallel']): + """ + Regression guard for forwarding path in both handler execution modes. + """ + source_bus = EventBus( + name=f'ForwardSource_{event_handler_concurrency}', + event_handler_concurrency=event_handler_concurrency, + middlewares=[], + max_history_drop=True, + ) + target_bus = EventBus( + name=f'ForwardTarget_{event_handler_concurrency}', + event_handler_concurrency=event_handler_concurrency, + middlewares=[], + max_history_drop=True, + ) + + handled = 0 + + async def sink_handler(event: SimpleEvent) -> None: + nonlocal handled + handled += 1 + + source_bus.on('*', target_bus.emit) + target_bus.on(SimpleEvent, sink_handler) + + total_events = 3_000 + pending: list[BaseEvent[Any]] = [] + batch_size = 40 + start = time.time() + try: + for _ in range(total_events): + pending.append(source_bus.emit(SimpleEvent())) + if len(pending) >= batch_size: + await asyncio.gather(*pending) + pending.clear() + + if pending: + await asyncio.gather(*pending) + await source_bus.wait_until_idle() + await target_bus.wait_until_idle() + finally: + await source_bus.stop(timeout=0, clear=True) + await target_bus.stop(timeout=0, clear=True) + + duration = time.time() - start + throughput = total_events / max(duration, 1e-9) + floor = 200 + + assert handled == total_events + mode = event_handler_concurrency + assert throughput >= floor, ( + f'{mode} forwarding throughput regression: {throughput:.0f} events/sec (expected >= {floor} events/sec)' + ) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + 'event_handler_concurrency', + ['serial', 'parallel'], + ids=['serial_handler_concurrency', 'parallel_handler_concurrency'], +) +async def test_global_lock_contention_multi_bus_matrix(event_handler_concurrency: Literal['serial', 'parallel']): + """ + High-contention benchmark: many buses dispatching concurrently under global lock. + """ + phase1 = await run_contention_round(event_handler_concurrency=event_handler_concurrency) + phase2 = await run_contention_round(event_handler_concurrency=event_handler_concurrency) + + expected_per_bus = 120.0 + hard_floor = 120.0 + regression_floor = throughput_regression_floor( + phase1['throughput'], + min_fraction=0.55, + hard_floor=90.0, + ) + + assert phase1['fairness_min'] == expected_per_bus + assert phase1['fairness_max'] == expected_per_bus + assert phase2['fairness_min'] == expected_per_bus + assert phase2['fairness_max'] == expected_per_bus + assert phase1['throughput'] >= hard_floor, ( + f'lock-contention throughput too low: {phase1["throughput"]:.0f} events/sec (expected >= {hard_floor:.0f})' + ) + assert phase2['throughput'] >= regression_floor, ( + f'lock-contention regression: phase1={phase1["throughput"]:.0f} ' + f'phase2={phase2["throughput"]:.0f} ' + f'(required >= {regression_floor:.0f})' + ) + assert phase2['dispatch_p95_ms'] < 75.0 + assert phase2['done_p95_ms'] < 750.0 + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + 'handlers_per_event', + [10, 30], + ids=['fanout_10_handlers', 'fanout_30_handlers'], +) +async def test_event_handler_concurrency_mode_scales_with_high_fanout(handlers_per_event: int): + """ + High fanout benchmark to catch regressions in parallel handler scheduling. + """ + serial_handled, serial_duration = await run_io_fanout_benchmark( + event_handler_concurrency='serial', + total_events=400, + handlers_per_event=handlers_per_event, + sleep_seconds=0.001, + batch_size=25, + ) + parallel_handled, parallel_duration = await run_io_fanout_benchmark( + event_handler_concurrency='parallel', + total_events=400, + handlers_per_event=handlers_per_event, + sleep_seconds=0.001, + batch_size=25, + ) + + expected_total = 400 * handlers_per_event + speedup = serial_duration / max(parallel_duration, 1e-9) + minimum_speedup = 1.2 if handlers_per_event == 10 else 1.5 + + assert serial_handled == expected_total + assert parallel_handled == expected_total + assert speedup >= minimum_speedup, ( + f'Parallel fanout speedup too small for {handlers_per_event} handlers/event: ' + f'{speedup:.2f}x (expected >= {minimum_speedup:.2f}x)' + ) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + 'event_handler_concurrency', + ['serial', 'parallel'], + ids=['serial_handler_concurrency', 'parallel_handler_concurrency'], +) +async def test_queue_jump_perf_matrix_by_mode(event_handler_concurrency: Literal['serial', 'parallel']): + """ + Queue-jump throughput/latency matrix (parent awaits child on same bus) by mode. + """ + + class QueueJumpParentEvent(BaseEvent): + iteration: int = 0 + event_timeout: float | None = 0.2 + + class QueueJumpChildEvent(BaseEvent): + iteration: int = 0 + event_timeout: float | None = 0.2 + + bus = EventBus( + name=f'QueueJump_{event_handler_concurrency}', + event_handler_concurrency=event_handler_concurrency, + middlewares=[], + max_history_drop=True, + ) + + parent_count = 0 + child_count = 0 + phase_counter = 0 + + async def child_handler(event: QueueJumpChildEvent) -> None: + nonlocal child_count + child_count += 1 + await asyncio.sleep(0.0005) + + async def parent_handler(event: QueueJumpParentEvent) -> None: + nonlocal parent_count + parent_count += 1 + child = bus.emit(QueueJumpChildEvent(iteration=event.iteration)) + await child + + bus.on(QueueJumpParentEvent, parent_handler) + bus.on(QueueJumpChildEvent, child_handler) + + def parent_factory() -> QueueJumpParentEvent: + nonlocal phase_counter + event = QueueJumpParentEvent(iteration=phase_counter) + phase_counter += 1 + return event + + try: + phase1 = await dispatch_and_measure(bus, parent_factory, total_events=500, batch_size=20) + phase2 = await dispatch_and_measure(bus, parent_factory, total_events=500, batch_size=20) + finally: + await bus.stop(timeout=0, clear=True) + + hard_floor = 60.0 + regression_floor = throughput_regression_floor(phase1[0], min_fraction=0.50, hard_floor=50.0) + + assert parent_count == 1_000 + assert child_count == 1_000 + assert phase1[0] >= hard_floor, f'queue-jump throughput too low: {phase1[0]:.0f} events/sec (expected >= {hard_floor:.0f})' + assert phase2[0] >= regression_floor, ( + f'queue-jump regression: phase1={phase1[0]:.0f} phase2={phase2[0]:.0f} (required >= {regression_floor:.0f})' + ) + assert phase2[2] < 45.0 + assert phase2[4] < 360.0 + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + 'event_handler_concurrency', + ['serial', 'parallel'], + ids=['serial_handler_concurrency', 'parallel_handler_concurrency'], +) +async def test_forwarding_chain_perf_matrix_by_mode(event_handler_concurrency: Literal['serial', 'parallel']): + """ + Forwarding chain A -> B -> C throughput/latency matrix by mode. + """ + source_bus = EventBus( + name=f'ChainSource_{event_handler_concurrency}', + event_handler_concurrency=event_handler_concurrency, + max_history_size=120, + middlewares=[], + max_history_drop=True, + ) + middle_bus = EventBus( + name=f'ChainMiddle_{event_handler_concurrency}', + event_handler_concurrency=event_handler_concurrency, + max_history_size=120, + middlewares=[], + max_history_drop=True, + ) + sink_bus = EventBus( + name=f'ChainSink_{event_handler_concurrency}', + event_handler_concurrency=event_handler_concurrency, + max_history_size=120, + middlewares=[], + max_history_drop=True, + ) + + sink_count = 0 + + async def sink_handler(event: SimpleEvent) -> None: + nonlocal sink_count + sink_count += 1 + + async def forward_to_middle(event: BaseEvent[Any]) -> None: + while True: + try: + middle_bus.emit(event) + return + except asyncio.QueueFull: + await asyncio.sleep(0) + except RuntimeError as exc: + if 'history limit reached' not in str(exc): + raise + await asyncio.sleep(0) + + async def forward_to_sink(event: BaseEvent[Any]) -> None: + while True: + try: + sink_bus.emit(event) + return + except asyncio.QueueFull: + await asyncio.sleep(0) + except RuntimeError as exc: + if 'history limit reached' not in str(exc): + raise + await asyncio.sleep(0) + + source_bus.on('*', forward_to_middle) + middle_bus.on('*', forward_to_sink) + sink_bus.on(SimpleEvent, sink_handler) + + try: + phase1 = await dispatch_and_measure(source_bus, SimpleEvent, total_events=500, batch_size=5) + phase2 = await dispatch_and_measure(source_bus, SimpleEvent, total_events=500, batch_size=5) + await source_bus.wait_until_idle() + await middle_bus.wait_until_idle() + await sink_bus.wait_until_idle() + finally: + await source_bus.stop(timeout=0, clear=True) + await middle_bus.stop(timeout=0, clear=True) + await sink_bus.stop(timeout=0, clear=True) + + hard_floor = 35.0 + regression_floor = throughput_regression_floor(phase1[0], min_fraction=0.45, hard_floor=20.0) + + assert sink_count == 1_000 + assert phase1[0] >= hard_floor + assert phase2[0] >= regression_floor + assert phase2[2] < 120.0 + assert phase2[4] < 1050.0 + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + 'event_handler_concurrency', + ['serial', 'parallel'], + ids=['serial_handler_concurrency', 'parallel_handler_concurrency'], +) +async def test_timeout_churn_perf_matrix_by_mode(event_handler_concurrency: Literal['serial', 'parallel']): + """ + Timeout-heavy phase followed by healthy phase should keep throughput healthy. + """ + + class TimeoutChurnEvent(BaseEvent): + mode: str = 'slow' + iteration: int = 0 + event_timeout: float | None = 0.01 + + bus = EventBus( + name=f'TimeoutChurn_{event_handler_concurrency}', + event_handler_concurrency=event_handler_concurrency, + middlewares=[], + max_history_drop=True, + ) + + timeout_phase_events: list[TimeoutChurnEvent] = [] + recovery_phase_events: list[TimeoutChurnEvent] = [] + timeout_counter = 0 + recovery_counter = 0 + + async def handler(event: TimeoutChurnEvent) -> None: + if event.mode == 'slow': + await asyncio.sleep(0.006) + else: + await asyncio.sleep(0) + + bus.on(TimeoutChurnEvent, handler) + + def timeout_factory() -> TimeoutChurnEvent: + nonlocal timeout_counter + is_slow = (timeout_counter % 3) != 0 + event = TimeoutChurnEvent( + mode='slow' if is_slow else 'fast', + iteration=timeout_counter, + event_timeout=0.001 if is_slow else 0.02, + ) + timeout_phase_events.append(event) + timeout_counter += 1 + return event + + def recovery_factory() -> TimeoutChurnEvent: + nonlocal recovery_counter + event = TimeoutChurnEvent( + mode='fast', + iteration=10_000 + recovery_counter, + # Keep recovery timeout comfortably above scheduler jitter so any + # recovery error signals a real lock/cancellation bug, not timing noise. + event_timeout=0.05, + ) + recovery_phase_events.append(event) + recovery_counter += 1 + return event + + try: + with suppress_bubus_warning_logs(): + timeout_phase = await dispatch_and_measure(bus, timeout_factory, total_events=180, batch_size=20) + recovery_phase = await dispatch_and_measure(bus, recovery_factory, total_events=500, batch_size=25) + finally: + await bus.stop(timeout=0, clear=True) + + timeout_count = sum( + 1 + for event in timeout_phase_events + if event.mode == 'slow' + and any( + isinstance( + result.error, + (TimeoutError, EventHandlerTimeoutError, EventHandlerAbortedError, EventHandlerCancelledError), + ) + for result in event.event_results.values() + ) + ) + recovery_errors = sum( + 1 for event in recovery_phase_events if any(result.error is not None for result in event.event_results.values()) + ) + hard_floor = 120.0 + regression_floor = throughput_regression_floor( + timeout_phase[0], + min_fraction=0.45, + hard_floor=100.0, + ) + + assert timeout_count > 0 + assert recovery_errors == 0 + assert recovery_phase[0] >= hard_floor + assert recovery_phase[0] >= regression_floor + assert recovery_phase[2] < 36.0 + assert recovery_phase[4] < 70.0 + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + 'event_handler_concurrency', + ['serial', 'parallel'], + ids=['serial_handler_concurrency', 'parallel_handler_concurrency'], +) +async def test_memory_envelope_by_mode_for_capped_history(event_handler_concurrency: Literal['serial', 'parallel']): + """ + Mode-specific memory slope/envelope check with capped history. + """ + bus = EventBus( + name=f'MemoryEnvelope_{event_handler_concurrency}', + event_handler_concurrency=event_handler_concurrency, + max_history_size=60, + middlewares=[], + max_history_drop=True, + ) + + async def handler(event: SimpleEvent) -> None: + return None + + bus.on(SimpleEvent, handler) + + gc.collect() + before_mb = get_memory_usage_mb() + + try: + metrics = await dispatch_and_measure(bus, SimpleEvent, total_events=6_000, batch_size=40) + done_mb = get_memory_usage_mb() + gc.collect() + gc_mb = get_memory_usage_mb() + retained = len(bus.event_history) + finally: + await bus.stop(timeout=0, clear=True) + + done_delta = done_mb - before_mb + gc_delta = gc_mb - before_mb + per_dispatched_kb = (max(done_delta, 0.0) * 1024.0) / 6_000 + per_retained_mb = max(gc_delta, 0.0) / max(retained, 1) + done_budget = 130.0 if event_handler_concurrency == 'parallel' else 110.0 + gc_budget = 70.0 if event_handler_concurrency == 'parallel' else 60.0 + + assert retained <= 60 + assert metrics[0] >= 450.0 + assert metrics[2] < 30.0 + assert metrics[4] < 60.0 + assert done_delta < done_budget + assert gc_delta < gc_budget + assert per_dispatched_kb < 32.0 + assert per_retained_mb < 1.5 + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + 'event_handler_concurrency', + ['serial', 'parallel'], + ids=['serial_handler_concurrency', 'parallel_handler_concurrency'], +) +async def test_max_history_none_single_bus_stress_matrix(event_handler_concurrency: Literal['serial', 'parallel']): + """ + Unlimited-history mode stress for single bus: throughput + memory envelope. + """ + bus = EventBus( + name=f'UnlimitedSingle_{event_handler_concurrency}', + event_handler_concurrency=event_handler_concurrency, + max_history_size=None, + middlewares=[], + max_history_drop=True, + ) + processed = 0 + + async def handler(event: SimpleEvent) -> None: + nonlocal processed + processed += 1 + + bus.on(SimpleEvent, handler) + + gc.collect() + before_mb = get_memory_usage_mb() + try: + phase1 = await dispatch_and_measure(bus, SimpleEvent, total_events=1_500, batch_size=120) + phase2 = await dispatch_and_measure(bus, SimpleEvent, total_events=1_500, batch_size=120) + done_mb = get_memory_usage_mb() + gc.collect() + gc_mb = get_memory_usage_mb() + history_size = len(bus.event_history) + finally: + await bus.stop(timeout=0, clear=True) + + done_delta = done_mb - before_mb + gc_delta = gc_mb - before_mb + per_event_mb = max(gc_delta, 0.0) / 3_000 + hard_floor = 220.0 + regression_floor = throughput_regression_floor(phase1[0], min_fraction=0.55, hard_floor=170.0) + + assert processed == 3_000 + assert history_size == 3_000 + assert phase1[0] >= hard_floor + assert phase2[0] >= regression_floor + assert phase2[2] < 36.0 + assert phase2[4] < 240.0 + assert done_delta < 260.0 + assert gc_delta < 220.0 + assert per_event_mb < 0.08 + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + 'event_handler_concurrency', + ['serial', 'parallel'], + ids=['serial_handler_concurrency', 'parallel_handler_concurrency'], +) +async def test_max_history_none_forwarding_chain_stress_matrix(event_handler_concurrency: Literal['serial', 'parallel']): + """ + Unlimited-history forwarding chain (A -> B -> C) stress by mode. + """ + source_bus = EventBus( + name=f'UnlimitedChainSource_{event_handler_concurrency}', + event_handler_concurrency=event_handler_concurrency, + max_history_size=None, + middlewares=[], + max_history_drop=True, + ) + middle_bus = EventBus( + name=f'UnlimitedChainMiddle_{event_handler_concurrency}', + event_handler_concurrency=event_handler_concurrency, + max_history_size=None, + middlewares=[], + max_history_drop=True, + ) + sink_bus = EventBus( + name=f'UnlimitedChainSink_{event_handler_concurrency}', + event_handler_concurrency=event_handler_concurrency, + max_history_size=None, + middlewares=[], + max_history_drop=True, + ) + + sink_count = 0 + + async def sink_handler(event: SimpleEvent) -> None: + nonlocal sink_count + sink_count += 1 + + source_bus.on('*', middle_bus.emit) + middle_bus.on('*', sink_bus.emit) + sink_bus.on(SimpleEvent, sink_handler) + + gc.collect() + before_mb = get_memory_usage_mb() + try: + phase1 = await dispatch_and_measure(source_bus, SimpleEvent, total_events=900, batch_size=100) + phase2 = await dispatch_and_measure(source_bus, SimpleEvent, total_events=900, batch_size=100) + done_mb = get_memory_usage_mb() + gc.collect() + gc_mb = get_memory_usage_mb() + source_hist = len(source_bus.event_history) + middle_hist = len(middle_bus.event_history) + sink_hist = len(sink_bus.event_history) + finally: + await source_bus.stop(timeout=0, clear=True) + await middle_bus.stop(timeout=0, clear=True) + await sink_bus.stop(timeout=0, clear=True) + + gc_delta = gc_mb - before_mb + done_delta = done_mb - before_mb + hard_floor = 170.0 + regression_floor = throughput_regression_floor(phase1[0], min_fraction=0.55, hard_floor=130.0) + + assert sink_count == 1_800 + assert source_hist == 1_800 + assert middle_hist == 1_800 + assert sink_hist == 1_800 + assert phase1[0] >= hard_floor + assert phase2[0] >= regression_floor + assert phase2[2] < 45.0 + assert phase2[4] < 300.0 + assert done_delta < 320.0 + assert gc_delta < 280.0 + + +@pytest.mark.asyncio +async def test_perf_debug_hot_path_breakdown() -> None: + """ + Debug-only perf test: + profiles key hot-path methods to confirm where time is spent before optimizing. + """ + profiler = MethodProfiler() + instrumented: list[tuple[type[Any], str]] = [ + (event_bus_module.ReentrantLock, '__aenter__'), + (event_bus_module.ReentrantLock, '__aexit__'), + (event_bus_module.EventBus, '_get_handlers_for_event'), + (event_bus_module.EventBus, '_process_event'), + (event_bus_module.EventBus, '_run_handler'), + (event_bus_module.EventHistory, 'trim_event_history'), + (base_event_module.BaseEvent, '_create_pending_handler_results'), + ] + for owner, method_ref in instrumented: + profiler.instrument(owner, method_ref) + + class DebugParentEvent(BaseEvent): + idx: int = 0 + event_timeout: float | None = 0.2 + + class DebugChildEvent(BaseEvent): + idx: int = 0 + event_timeout: float | None = 0.2 + + bus_a = EventBus(name='PerfDebugA', middlewares=[], max_history_drop=True) + bus_b = EventBus(name='PerfDebugB', middlewares=[], max_history_drop=True) + + forwarded_simple_count = 0 + child_count = 0 + parent_counter = 0 + + async def forwarded_simple_handler(event: SimpleEvent) -> None: + nonlocal forwarded_simple_count + forwarded_simple_count += 1 + + async def child_handler(event: DebugChildEvent) -> None: + nonlocal child_count + child_count += 1 + await asyncio.sleep(0) + + async def parent_handler(event: DebugParentEvent) -> None: + child = bus_a.emit(DebugChildEvent(idx=event.idx)) + bus_b.emit(child) + await child + + bus_a.on('*', bus_b.emit) + bus_b.on(SimpleEvent, forwarded_simple_handler) + bus_a.on(DebugParentEvent, parent_handler) + bus_b.on(DebugChildEvent, child_handler) + + def parent_factory() -> DebugParentEvent: + nonlocal parent_counter + event = DebugParentEvent(idx=parent_counter) + parent_counter += 1 + return event + + gc.collect() + before_mb = get_memory_usage_mb() + start = time.perf_counter() + try: + simple_metrics = await dispatch_and_measure(bus_a, SimpleEvent, total_events=2_000, batch_size=50) + parent_metrics = await dispatch_and_measure(bus_a, parent_factory, total_events=600, batch_size=20) + await bus_a.wait_until_idle() + await bus_b.wait_until_idle() + finally: + await bus_a.stop(timeout=0, clear=True) + await bus_b.stop(timeout=0, clear=True) + profiler.restore() + elapsed = time.perf_counter() - start + done_mb = get_memory_usage_mb() + gc.collect() + gc_mb = get_memory_usage_mb() + + print('\n[perf-debug] scenario=global_fifo_forwarding_queue_jump') + print(f'[perf-debug] elapsed_s={elapsed:.3f}') + print( + f'[perf-debug] simple throughput={simple_metrics[0]:.0f}/s dispatch_p95={simple_metrics[2]:.3f}ms done_p95={simple_metrics[4]:.3f}ms' + ) + print( + f'[perf-debug] queue_jump throughput={parent_metrics[0]:.0f}/s dispatch_p95={parent_metrics[2]:.3f}ms done_p95={parent_metrics[4]:.3f}ms' + ) + print(f'[perf-debug] memory_mb before={before_mb:.1f} done={done_mb:.1f} gc={gc_mb:.1f}') + print(f'[perf-debug] forwarded_simple_count={forwarded_simple_count:,} child_count={child_count:,}') + print('[perf-debug] hot_path_top_total_time:') + for line in profiler.top_lines(limit=14): + print(f'[perf-debug] {line}') + + assert forwarded_simple_count == 2_000 + assert child_count == 600 diff --git a/tests/test_eventbus_retry_integration.py b/tests/test_eventbus_retry_integration.py new file mode 100644 index 0000000..1001f38 --- /dev/null +++ b/tests/test_eventbus_retry_integration.py @@ -0,0 +1,404 @@ +import asyncio +import time + +from bubus import BaseEvent, EventBus +from bubus.retry import retry + + +class TestRetryWithEventBus: + """Test @retry decorator with EventBus handlers.""" + + async def test_retry_decorator_on_eventbus_handler(self): + """Test that @retry decorator works correctly when applied to EventBus handlers.""" + handler_calls: list[tuple[str, float]] = [] + + class TestEvent(BaseEvent[str]): + """Simple test event.""" + + message: str + + bus = EventBus(name='test_retry_bus') + + @retry( + max_attempts=3, + retry_after=0.1, + timeout=1.0, + semaphore_limit=1, + semaphore_scope='global', + ) + async def retrying_handler(event: TestEvent) -> str: + call_time = time.time() + handler_calls.append(('called', call_time)) + + if len(handler_calls) < 3: + raise ValueError(f'Attempt {len(handler_calls)} failed') + + return f'Success: {event.message}' + + bus.on('TestEvent', retrying_handler) + + event = TestEvent(message='Hello retry!') + completed_event = await bus.emit(event) + await bus.wait_until_idle(timeout=5) + + assert len(handler_calls) == 3, f'Expected 3 attempts, got {len(handler_calls)}' + for i in range(1, len(handler_calls)): + delay = handler_calls[i][1] - handler_calls[i - 1][1] + assert delay >= 0.08, f'Retry delay {i} was {delay:.3f}s, expected >= 0.08s' + + assert completed_event.event_status == 'completed' + handler_result = await completed_event.event_result() + assert handler_result == 'Success: Hello retry!' + + await bus.stop() + + async def test_retry_with_semaphore_on_multiple_handlers(self): + """Test @retry decorator with semaphore limiting concurrent handler executions.""" + active_handlers: list[int] = [] + max_concurrent = 0 + handler_results: dict[int, list[tuple[str, float]]] = {1: [], 2: [], 3: [], 4: []} + + class WorkEvent(BaseEvent[str]): + """Event that triggers work.""" + + work_id: int + + bus = EventBus(name='test_concurrent_bus', event_handler_concurrency='parallel') + + def create_handler(handler_id: int): + @retry( + max_attempts=1, + timeout=5.0, + semaphore_limit=2, + semaphore_name='test_handler_sem', + semaphore_scope='global', + ) + async def limited_handler(event: WorkEvent) -> str: + nonlocal max_concurrent + active_handlers.append(handler_id) + handler_results[handler_id].append(('started', time.time())) + + current_concurrent = len(active_handlers) + max_concurrent = max(max_concurrent, current_concurrent) + await asyncio.sleep(0.2) + + active_handlers.remove(handler_id) + handler_results[handler_id].append(('completed', time.time())) + return f'Handler {handler_id} processed work {event.work_id}' + + limited_handler.__name__ = f'limited_handler_{handler_id}' + return limited_handler + + for i in range(1, 5): + handler = create_handler(i) + bus.on('WorkEvent', handler) + + event = WorkEvent(work_id=1) + await bus.emit(event) + await bus.wait_until_idle(timeout=3) + + assert max_concurrent == 2, f'Max concurrent was {max_concurrent}, expected exactly 2 with semaphore_limit=2' + for handler_id in range(1, 5): + assert len(handler_results[handler_id]) == 2, f'Handler {handler_id} should have started and completed' + + await bus.stop() + + async def test_retry_timeout_with_eventbus_handler(self): + """Test that retry timeout works correctly with EventBus handlers.""" + + class TimeoutEvent(BaseEvent[str]): + """Event for timeout testing.""" + + test_id: str + event_timeout: float | None = 1 + + bus = EventBus(name='test_timeout_bus') + handler_started = False + + @retry( + max_attempts=1, + timeout=0.2, + ) + async def wrapped_handler(event: TimeoutEvent) -> str: + nonlocal handler_started + handler_started = True + await asyncio.sleep(5) + return 'Should not reach here' + + bus.on(TimeoutEvent, wrapped_handler) + + event = TimeoutEvent(test_id='7ebbd9f4-755a-7f13-828a-183dfe2d4302') + await bus.emit(event) + await bus.wait_until_idle(timeout=2) + + assert handler_started, 'Handler should have started' + assert len(event.event_results) == 1 + result = next(iter(event.event_results.values())) + assert result.status == 'error' + assert result.error is not None + assert isinstance(result.error, TimeoutError) + + await bus.stop() + + async def test_retry_with_event_type_filter(self): + """Test retry decorator with specific exception types.""" + + class RetryTestEvent(BaseEvent[str]): + """Event for testing retry on specific exceptions.""" + + attempt_limit: int + + bus = EventBus(name='test_exception_filter_bus') + attempt_count = 0 + + @retry( + max_attempts=4, + retry_after=0.05, + timeout=1.0, + retry_on_errors=[ValueError, RuntimeError], + ) + async def selective_retry_handler(event: RetryTestEvent) -> str: + nonlocal attempt_count + attempt_count += 1 + + if attempt_count == 1: + raise ValueError('This should be retried') + if attempt_count == 2: + raise RuntimeError('This should also be retried') + if attempt_count == 3: + raise TypeError('This should NOT be retried') + + return 'Success' + + bus.on('RetryTestEvent', selective_retry_handler) + + event = RetryTestEvent(attempt_limit=3) + await bus.emit(event) + await bus.wait_until_idle(timeout=2) + + assert attempt_count == 3, f'Expected 3 attempts, got {attempt_count}' + handler_id = list(event.event_results.keys())[0] + result = event.event_results[handler_id] + assert result.status == 'error' + assert isinstance(result.error, TypeError) + assert 'This should NOT be retried' in str(result.error) + + await bus.stop() + + async def test_retry_decorated_method_class_scope_serializes_across_instances(self): + """Class scope semaphore should serialize bound method handlers across instances.""" + + class ScopeClassEvent(BaseEvent[str]): + pass + + bus = EventBus(name='test_scope_class_bus', event_handler_concurrency='parallel') + active = 0 + max_active = 0 + + class SomeService: + @retry( + max_attempts=1, + semaphore_scope='class', + semaphore_limit=1, + semaphore_name='on_scope_class_event', + ) + async def on_scope_class_event(self, _event: ScopeClassEvent) -> str: + nonlocal active, max_active + active += 1 + max_active = max(max_active, active) + await asyncio.sleep(0.05) + active -= 1 + return 'ok' + + service_a = SomeService() + service_b = SomeService() + bus.on(ScopeClassEvent, service_a.on_scope_class_event) + bus.on(ScopeClassEvent, service_b.on_scope_class_event) + + event = await bus.emit(ScopeClassEvent()) + await event.event_completed() + + assert max_active == 1, f'class scope should serialize across instances, got max_active={max_active}' + await bus.stop() + + async def test_retry_decorated_method_instance_scope_allows_parallel_across_instances(self): + """Instance scope semaphore should allow bound handlers from different instances to overlap.""" + + class ScopeInstanceEvent(BaseEvent[str]): + pass + + bus = EventBus(name='test_scope_instance_bus', event_handler_concurrency='parallel') + active = 0 + max_active = 0 + calls = 0 + + class SomeService: + @retry( + max_attempts=1, + semaphore_scope='instance', + semaphore_limit=1, + semaphore_name='on_scope_instance_event', + ) + async def on_scope_instance_event(self, _event: ScopeInstanceEvent) -> str: + nonlocal active, max_active, calls + active += 1 + max_active = max(max_active, active) + calls += 1 + await asyncio.sleep(0.05) + active -= 1 + return 'ok' + + service_a = SomeService() + service_b = SomeService() + bus.on(ScopeInstanceEvent, service_a.on_scope_instance_event) + bus.on(ScopeInstanceEvent, service_b.on_scope_instance_event) + + event = await bus.emit(ScopeInstanceEvent()) + await event.event_completed() + + assert calls == 2, f'expected both handlers to run, got calls={calls}' + assert max_active == 2, f'instance scope should allow overlap across instances, got max_active={max_active}' + await bus.stop() + + async def test_retry_decorated_method_global_scope_serializes_all_bound_handlers(self): + """Global scope semaphore should serialize bound method handlers across all instances.""" + + class ScopeGlobalEvent(BaseEvent[str]): + pass + + bus = EventBus(name='test_scope_global_bus', event_handler_concurrency='parallel') + active = 0 + max_active = 0 + + class SomeService: + @retry( + max_attempts=1, + semaphore_scope='global', + semaphore_limit=1, + semaphore_name='on_scope_global_event', + ) + async def on_scope_global_event(self, _event: ScopeGlobalEvent) -> str: + nonlocal active, max_active + active += 1 + max_active = max(max_active, active) + await asyncio.sleep(0.05) + active -= 1 + return 'ok' + + service_a = SomeService() + service_b = SomeService() + bus.on(ScopeGlobalEvent, service_a.on_scope_global_event) + bus.on(ScopeGlobalEvent, service_b.on_scope_global_event) + + event = await bus.emit(ScopeGlobalEvent()) + await event.event_completed() + + assert max_active == 1, f'global scope should serialize all handlers, got max_active={max_active}' + await bus.stop() + + async def test_retry_hof_bind_after_wrap_instance_scope_preserves_instance_isolation(self): + """HOF pattern retry(...)(fn) then bind to instances should keep instance-scope isolation.""" + + class HofBindEvent(BaseEvent[str]): + pass + + bus = EventBus(name='test_hof_bind_bus', event_handler_concurrency='parallel') + active = 0 + max_active = 0 + + @retry( + max_attempts=1, + semaphore_scope='instance', + semaphore_limit=1, + semaphore_name='hof_bind_handler', + ) + async def handler(self: object, _event: HofBindEvent) -> str: + nonlocal active, max_active + active += 1 + max_active = max(max_active, active) + await asyncio.sleep(0.05) + active -= 1 + return 'ok' + + class Holder: + pass + + holder_a = Holder() + holder_b = Holder() + bus.on(HofBindEvent, handler.__get__(holder_a, Holder)) + bus.on(HofBindEvent, handler.__get__(holder_b, Holder)) + + event = await bus.emit(HofBindEvent()) + await event.event_completed() + + assert max_active == 2, f'bind-after-wrap instance scope should allow overlap, got max_active={max_active}' + await bus.stop() + + async def test_retry_wrapping_emit_retries_full_dispatch_cycle(self): + """Retry wrapper around emit+event_completed should retry full event dispatch when handler errors.""" + + class TabsEvent(BaseEvent[str]): + pass + + class DOMEvent(BaseEvent[str]): + pass + + class ScreenshotEvent(BaseEvent[str]): + pass + + bus = EventBus(name='test_retry_emit_bus', event_handler_concurrency='parallel') + tabs_attempts = 0 + dom_calls = 0 + screenshot_calls = 0 + + async def tabs_handler(_event: TabsEvent) -> str: + nonlocal tabs_attempts + tabs_attempts += 1 + if tabs_attempts < 3: + raise RuntimeError(f'tabs fail attempt {tabs_attempts}') + return 'tabs ok' + + async def dom_handler(_event: DOMEvent) -> str: + nonlocal dom_calls + dom_calls += 1 + return 'dom ok' + + async def screenshot_handler(_event: ScreenshotEvent) -> str: + nonlocal screenshot_calls + screenshot_calls += 1 + return 'screenshot ok' + + bus.on(TabsEvent, tabs_handler) + bus.on(DOMEvent, dom_handler) + bus.on(ScreenshotEvent, screenshot_handler) + + @retry(max_attempts=4) + async def emit_tabs_with_retry() -> TabsEvent: + tabs_event = await bus.emit(TabsEvent()) + await tabs_event.event_completed() + failed_results = [result for result in tabs_event.event_results.values() if result.status == 'error'] + if failed_results: + first_error = failed_results[0].error + if isinstance(first_error, Exception): + raise first_error + raise RuntimeError(f'tabs emit failed with non-exception error payload: {first_error!r}') + return tabs_event + + async def emit_and_wait(event: BaseEvent[str]): + emitted = await bus.emit(event) + await emitted.event_completed() + return emitted + + tabs_event, dom_event, screenshot_event = await asyncio.gather( + emit_tabs_with_retry(), + emit_and_wait(DOMEvent()), + emit_and_wait(ScreenshotEvent()), + ) + + assert tabs_attempts == 3, f'expected 3 attempts for tabs flow, got {tabs_attempts}' + assert tabs_event.event_status == 'completed' + assert dom_calls == 1 + assert screenshot_calls == 1 + assert dom_event.event_status == 'completed' + assert screenshot_event.event_status == 'completed' + await bus.stop() diff --git a/tests/test_eventbus_serialization.py b/tests/test_eventbus_serialization.py new file mode 100644 index 0000000..fb04f6f --- /dev/null +++ b/tests/test_eventbus_serialization.py @@ -0,0 +1,114 @@ +from collections import deque +from typing import Any, cast + +from bubus.base_event import BaseEvent, EventResult +from bubus.event_bus import EventBus +from bubus.helpers import CleanShutdownQueue + + +class SerializableEvent(BaseEvent[str]): + value: str = 'payload' + + +def _make_bus_with_pending_event() -> tuple[EventBus, SerializableEvent, str]: + bus = EventBus( + name='SerializableBus', + id='018f8e40-1234-7000-8000-000000001234', + max_history_size=500, + max_history_drop=False, + event_concurrency='parallel', + event_handler_concurrency='parallel', + event_handler_completion='first', + event_timeout=None, + event_slow_timeout=34.0, + event_handler_slow_timeout=12.0, + event_handler_detect_file_paths=False, + ) + + def handler(event: SerializableEvent) -> str: + return event.value + + handler_entry = bus.on(SerializableEvent, handler) + assert handler_entry.id is not None + handler_id = handler_entry.id + + event = SerializableEvent(value='roundtrip') + event_result = EventResult[str]( + event_id=event.event_id, + handler=handler_entry, + status='completed', + result='ok', + ) + event.event_results[handler_id] = event_result + bus.event_history[event.event_id] = event + + queue = CleanShutdownQueue[BaseEvent[Any]](maxsize=0) + queue.put_nowait(event) + bus.pending_event_queue = queue + return bus, event, handler_id + + +def test_eventbus_model_dump_json_roundtrip_uses_id_keyed_structures() -> None: + bus, event, handler_id = _make_bus_with_pending_event() + + payload = bus.model_dump() + assert payload['handlers'].keys() == {handler_id} + assert payload['handlers_by_key'].keys() == {'SerializableEvent'} + assert payload['event_history'].keys() == {event.event_id} + assert payload['pending_event_queue'] == [event.event_id] + assert all(event_id in payload['event_history'] for event_id in payload['pending_event_queue']) + + restored = EventBus.validate(bus.model_dump_json()) + assert restored.id == bus.id + assert restored.name == bus.name + assert restored.event_history.max_history_size == bus.event_history.max_history_size + assert restored.event_history.max_history_drop == bus.event_history.max_history_drop + assert str(restored.event_concurrency) == str(bus.event_concurrency) + assert str(restored.event_handler_concurrency) == str(bus.event_handler_concurrency) + assert str(restored.event_handler_completion) == str(bus.event_handler_completion) + assert restored.event_timeout == bus.event_timeout + assert restored.event_slow_timeout == bus.event_slow_timeout + assert restored.event_handler_slow_timeout == bus.event_handler_slow_timeout + assert restored.event_handler_detect_file_paths == bus.event_handler_detect_file_paths + + restored_event = restored.event_history[event.event_id] + restored_result = restored_event.event_results[handler_id] + assert restored_result.handler is restored.handlers[handler_id] + assert restored_result.handler.handler is not None + assert restored_result.handler(restored_event) is None + + assert restored.pending_event_queue is not None + queue = cast(deque[BaseEvent[Any]], getattr(restored.pending_event_queue, '_queue')) + assert len(queue) == 1 + assert queue[0] is restored_event + + +def test_eventbus_validate_creates_missing_handler_entries_from_event_results() -> None: + bus, event, handler_id = _make_bus_with_pending_event() + payload = bus.model_dump() + + payload['handlers'] = {} + payload['handlers_by_key'] = {} + + restored = EventBus.validate(payload) + assert handler_id in restored.handlers + assert 'SerializableEvent' in restored.handlers_by_key + assert handler_id in restored.handlers_by_key['SerializableEvent'] + + restored_result = restored.event_history[event.event_id].event_results[handler_id] + assert restored_result.handler is restored.handlers[handler_id] + assert restored_result.handler.handler is not None + assert restored_result.handler(restored.event_history[event.event_id]) is None + + +def test_eventbus_model_dump_promotes_pending_events_into_event_history() -> None: + bus = EventBus(name='QueueOnlyBus', event_handler_detect_file_paths=False) + event = SerializableEvent(value='queued-only') + + queue = CleanShutdownQueue[BaseEvent[Any]](maxsize=0) + queue.put_nowait(event) + bus.pending_event_queue = queue + + payload = bus.model_dump() + assert payload['pending_event_queue'] == [event.event_id] + assert event.event_id in payload['event_history'] diff --git a/tests/test_eventbus_subclass_isolation.py b/tests/test_eventbus_subclass_isolation.py new file mode 100644 index 0000000..80f180a --- /dev/null +++ b/tests/test_eventbus_subclass_isolation.py @@ -0,0 +1,33 @@ +from __future__ import annotations + +from bubus import BaseEvent, EventBus + + +def test_eventbus_subclasses_isolate_registries_and_global_serial_locks() -> None: + class IsolatedBusA(EventBus): + pass + + class IsolatedBusB(EventBus): + pass + + bus_a1 = IsolatedBusA('IsolatedBusA1', event_concurrency='global-serial') + bus_a2 = IsolatedBusA('IsolatedBusA2', event_concurrency='global-serial') + bus_b1 = IsolatedBusB('IsolatedBusB1', event_concurrency='global-serial') + + assert bus_a1 in IsolatedBusA.all_instances + assert bus_a2 in IsolatedBusA.all_instances + assert bus_b1 not in IsolatedBusA.all_instances + assert bus_b1 in IsolatedBusB.all_instances + assert bus_a1 not in IsolatedBusB.all_instances + assert bus_a1 not in EventBus.all_instances + assert bus_b1 not in EventBus.all_instances + + lock_a1 = bus_a1.locks.get_lock_for_event(bus_a1, BaseEvent()) + lock_a2 = bus_a2.locks.get_lock_for_event(bus_a2, BaseEvent()) + lock_b1 = bus_b1.locks.get_lock_for_event(bus_b1, BaseEvent()) + + assert lock_a1 is not None + assert lock_a2 is not None + assert lock_b1 is not None + assert lock_a1 is lock_a2 + assert lock_a1 is not lock_b1 diff --git a/tests/test_eventbus_timeout.py b/tests/test_eventbus_timeout.py new file mode 100644 index 0000000..e8093d1 --- /dev/null +++ b/tests/test_eventbus_timeout.py @@ -0,0 +1,691 @@ +"""Test for per-handler timeout enforcement matching the exact scenario from the issue""" + +import asyncio +import logging + +import pytest + +from bubus import ( + BaseEvent, + EventBus, + EventHandlerAbortedError, + EventHandlerCancelledError, + EventHandlerTimeoutError, +) +from bubus.retry import retry + + +# Event definitions +class TopmostEvent(BaseEvent[str]): + """Event for navigating to a URL""" + + url: str = 'https://example.com' + + event_timeout: float | None = 5.0 + + +class ChildEvent(BaseEvent[str]): + """Event for tab creation""" + + tab_id: str = 'tab-123' + + event_timeout: float | None = 2 + + +class GrandchildEvent(BaseEvent[str]): + """Event for navigation completion""" + + success: bool = True + + event_timeout: float | None = 1 + + +# Watchdog classes +class HandlerClass1: + async def on_TopmostEvent(self, event: TopmostEvent) -> str: + """Completes quickly - 1 second""" + await asyncio.sleep(0.1) + return 'HandlerClass1.on_TopmostEvent completed after 0.1s' + + async def on_GrandchildEvent(self, event: GrandchildEvent) -> str: + """Starts but gets interrupted after 1 second by parent timeout""" + await asyncio.sleep(5) # Would take 5 seconds but will be interrupted + return 'HandlerClass1.on_GrandchildEvent completed after 5s' + + +class HandlerClass2: + async def on_GrandchildEvent(self, event: GrandchildEvent) -> str: + """Completes instantly""" + # No sleep - completes immediately + return 'HandlerClass2.on_GrandchildEvent completed immediately' + + +class HandlerClass3: + async def on_GrandchildEvent(self, event: GrandchildEvent) -> str: + """Never gets to run - pending when timeout occurs""" + await asyncio.sleep(0.2) + return 'HandlerClass3.on_GrandchildEvent completed after 0.2s' + + +class HandlerClass4: + async def on_GrandchildEvent(self, event: GrandchildEvent) -> str: + """Never gets to run - pending when timeout occurs""" + await asyncio.sleep(0.1) + return 'HandlerClass4.on_GrandchildEvent completed after 0.1s' + + +class MainClass0: + def __init__(self, bus: EventBus): + self.bus = bus + + async def on_TopmostEvent(self, event: TopmostEvent) -> str: + """Takes 11 seconds total - dispatches ChildEvent""" + # Do some work + await asyncio.sleep(1) + + # Dispatch and wait for ChildEvent + child_event = self.bus.emit(ChildEvent()) + try: + await child_event # This will timeout after 10s + except Exception as e: + print(f'DEBUG: Parent caught child error: {type(e).__name__}: {e}') + + import threading + + all_tasks = asyncio.all_tasks() + print(f'\nOutstanding asyncio tasks ({len(all_tasks)}):') + for task in all_tasks: + print(f' - {task.get_name()}: {task._state} - {task.get_coro()}') + + # List all threads + all_threads = threading.enumerate() + print(f'\nActive threads ({len(all_threads)}):') + for thread in all_threads: + print(f' - {thread.name}: {thread.is_alive()}') + + raise + + # Would continue but won't get here due to timeout + return 'MainClass0.on_TopmostEvent completed after all child events' + + async def on_ChildEvent(self, event: ChildEvent) -> str: + """Takes 10 seconds - will timeout, dispatches GrandchildEvent""" + # Dispatch GrandchildEvent immediately + grandchild_event = self.bus.emit(GrandchildEvent()) + + # Wait for GrandchildEvent to complete + # This will take 9s (MainClass0) + 0s (AboutBlank) + partial HandlerClass1 time + # Since handlers run serially and we have a 10s timeout, we'll timeout while + # HandlerClass1 is still running (after about 1s of its 5s execution) + await grandchild_event # .event_result(raise_if_any=False, raise_if_none=True, timeout=15) + + # Would continue but we timeout first + return 'MainClass0.on_ChildEvent completed after GrandchildEvent() finished processing' + + async def on_GrandchildEvent(self, event: GrandchildEvent) -> str: + """Completes in 5 seconds""" + # print('GRANDCHILD EVENT HANDLING STARTED') + await asyncio.sleep(2) + return 'MainClass0.on_GrandchildEvent completed after 2s' + + +@pytest.mark.asyncio +async def test_nested_timeout_scenario_from_issue(): + """Test the exact timeout scenario described in the issue + + This tests: + 1. TopmostEvent with 30s timeout dispatches ChildEvent + 2. ChildEvent with 10s timeout times out after 10s + 3. GrandchildEvent is dispatched from ChildEvent handler + 4. Some handlers complete, some are interrupted, some never run + 5. The timeout tree logging shows the complete hierarchy + """ + # Create single event bus + bus = EventBus(name='MainClass0EventBus') + + # Create instances + handlerclass1 = HandlerClass1() + handlerclass2 = HandlerClass2() + handlerclass3 = HandlerClass3() + handlerclass4 = HandlerClass4() + mainclass0 = MainClass0(bus) + + # Register handlers for TopmostEvent + bus.on('TopmostEvent', handlerclass1.on_TopmostEvent) + bus.on('TopmostEvent', mainclass0.on_TopmostEvent) + + # Register handlers for ChildEvent + bus.on('ChildEvent', mainclass0.on_ChildEvent) + + # Register handlers for GrandchildEvent (order matters for the test) + bus.on('GrandchildEvent', mainclass0.on_GrandchildEvent) + bus.on('GrandchildEvent', handlerclass2.on_GrandchildEvent) + bus.on('GrandchildEvent', handlerclass1.on_GrandchildEvent) + bus.on('GrandchildEvent', handlerclass3.on_GrandchildEvent) + bus.on('GrandchildEvent', handlerclass4.on_GrandchildEvent) + + # Dispatch the root event + navigate_event = bus.emit(TopmostEvent()) + + # Wait for it to complete (will fail due to timeout) + # with pytest.raises((RuntimeError, TimeoutError)) as exc_info: + try: + await ( + navigate_event + ) # .event_result(raise_if_any=True, raise_if_none=True, timeout=20) # The event should complete with an error + except Exception as e: + print(f'Exception caught: {type(e).__name__}: {e}') + raise + + # import ipdb; ipdb.set_trace() + + # print('-----------------------------------------------------') + # print(f"Exception caught: {type(exc_info.value).__name__}: {exc_info.value}") + # # assert 'ChildEvent' in str(exc_info.value) or 'ChildEvent' in str(exc_info.value) + + await bus.stop(clear=True, timeout=0) + + +@pytest.mark.asyncio +async def test_handler_timeout_marks_error_and_other_handlers_still_complete(): + """Focused timeout behavior: one handler times out, another still completes.""" + bus = EventBus(name='TimeoutFocusedBus') + + class TimeoutFocusedEvent(BaseEvent[str]): + event_timeout: float | None = 0.2 + event_handler_timeout: float | None = 0.01 + + execution_order: list[str] = [] + + async def slow_handler(event: TimeoutFocusedEvent) -> str: + execution_order.append('slow_start') + await asyncio.sleep(0.05) + execution_order.append('slow_end') + return 'slow' + + async def fast_handler(event: TimeoutFocusedEvent) -> str: + execution_order.append('fast_start') + return 'fast' + + bus.on(TimeoutFocusedEvent, slow_handler) + bus.on(TimeoutFocusedEvent, fast_handler) + + try: + event = await bus.emit(TimeoutFocusedEvent()) + await bus.wait_until_idle() + + slow_result = next((r for r in event.event_results.values() if r.handler_name.endswith('slow_handler')), None) + fast_result = next((r for r in event.event_results.values() if r.handler_name.endswith('fast_handler')), None) + + assert slow_result is not None + assert slow_result.status == 'error' + assert isinstance(slow_result.error, TimeoutError) + + assert fast_result is not None + assert fast_result.status == 'completed' + assert fast_result.result == 'fast' + assert 'fast_start' in execution_order + finally: + await bus.stop(clear=True, timeout=0) + + +@pytest.mark.asyncio +async def test_event_timeout_is_hard_cap_across_serial_handlers(): + bus = EventBus(name='EventHardCapBus') + + class HardCapEvent(BaseEvent[str]): + event_timeout: float | None = 0.05 + + async def first_handler(_event: HardCapEvent) -> str: + await asyncio.sleep(0.03) + return 'first' + + async def second_handler(_event: HardCapEvent) -> str: + await asyncio.sleep(0.03) + return 'second' + + async def pending_handler(_event: HardCapEvent) -> str: + return 'pending' + + bus.on(HardCapEvent, first_handler) + bus.on(HardCapEvent, second_handler) + bus.on(HardCapEvent, pending_handler) + + try: + event = await bus.emit(HardCapEvent()) + await bus.wait_until_idle() + + first_result = next(result for result in event.event_results.values() if result.handler_name.endswith('first_handler')) + second_result = next(result for result in event.event_results.values() if result.handler_name.endswith('second_handler')) + pending_result = next( + result for result in event.event_results.values() if result.handler_name.endswith('pending_handler') + ) + + assert first_result.status == 'completed' + assert first_result.result == 'first' + assert second_result.status == 'error' + assert isinstance(second_result.error, EventHandlerAbortedError) + assert pending_result.status == 'error' + assert isinstance(pending_result.error, EventHandlerCancelledError) + finally: + await bus.stop(clear=True, timeout=0) + + +@pytest.mark.asyncio +async def test_event_timeout_is_hard_cap_in_parallel_mode() -> None: + bus = EventBus(name='EventHardCapParallelBus', event_handler_concurrency='parallel') + + class HardCapParallelEvent(BaseEvent[str]): + event_timeout: float | None = 0.03 + + async def slow_a(_event: HardCapParallelEvent) -> str: + await asyncio.sleep(0.1) + return 'a' + + async def slow_b(_event: HardCapParallelEvent) -> str: + await asyncio.sleep(0.1) + return 'b' + + bus.on(HardCapParallelEvent, slow_a) + bus.on(HardCapParallelEvent, slow_b) + + try: + event = await bus.emit(HardCapParallelEvent()) + await bus.wait_until_idle() + + assert len(event.event_results) == 2 + assert all(result.status == 'error' for result in event.event_results.values()) + assert all( + isinstance(result.error, (EventHandlerAbortedError, EventHandlerCancelledError, EventHandlerTimeoutError)) + for result in event.event_results.values() + ) + assert any( + isinstance(result.error, (EventHandlerAbortedError, EventHandlerTimeoutError)) + for result in event.event_results.values() + ) + finally: + await bus.stop(clear=True, timeout=0) + + +@pytest.mark.asyncio +async def test_event_level_timeout_marks_started_parallel_handlers_as_aborted_or_timed_out() -> None: + bus = EventBus(name='EventHardCapParallelAbortedOnlyBus', event_handler_concurrency='parallel') + + class HardCapParallelAbortOnlyEvent(BaseEvent[str]): + event_timeout: float | None = 0.03 + + started_a = asyncio.Event() + started_b = asyncio.Event() + both_started = asyncio.Event() + + async def slow_a(_event: HardCapParallelAbortOnlyEvent) -> str: + started_a.set() + if started_b.is_set(): + both_started.set() + await both_started.wait() + await asyncio.sleep(0.2) + return 'a' + + async def slow_b(_event: HardCapParallelAbortOnlyEvent) -> str: + started_b.set() + if started_a.is_set(): + both_started.set() + await both_started.wait() + await asyncio.sleep(0.2) + return 'b' + + bus.on(HardCapParallelAbortOnlyEvent, slow_a) + bus.on(HardCapParallelAbortOnlyEvent, slow_b) + + try: + event = bus.emit(HardCapParallelAbortOnlyEvent()) + await asyncio.wait_for(asyncio.gather(started_a.wait(), started_b.wait()), timeout=0.2) + both_started.set() + await event + await bus.wait_until_idle() + + assert len(event.event_results) == 2 + assert all(result.status == 'error' for result in event.event_results.values()) + assert all( + isinstance(result.error, (EventHandlerAbortedError, EventHandlerTimeoutError)) + for result in event.event_results.values() + ) + assert not any(isinstance(result.error, EventHandlerCancelledError) for result in event.event_results.values()) + finally: + await bus.stop(clear=True, timeout=0) + + +@pytest.mark.asyncio +async def test_event_timeout_does_not_relabel_preexisting_handler_timeout() -> None: + bus = EventBus(name='EventTimeoutPreservesHandlerTimeoutBus', event_handler_concurrency='parallel') + + class MixedTimeoutEvent(BaseEvent[str]): + event_timeout: float | None = 0.05 + + @retry(max_attempts=1, timeout=0.01) + async def handler_with_own_timeout(_event: MixedTimeoutEvent) -> str: + await asyncio.sleep(0.05) + return 'own-timeout' + + async def long_running_handler(_event: MixedTimeoutEvent) -> str: + await asyncio.sleep(0.2) + return 'long-running' + + bus.on(MixedTimeoutEvent, handler_with_own_timeout) + bus.on(MixedTimeoutEvent, long_running_handler) + + try: + event = bus.emit(MixedTimeoutEvent()) + await event + await bus.wait_until_idle() + + results = list(event.event_results.values()) + assert len(results) == 2 + assert any(isinstance(result.error, EventHandlerTimeoutError) for result in results) + assert any(isinstance(result.error, EventHandlerAbortedError) for result in results) + finally: + await bus.stop(clear=True, timeout=0) + + +@pytest.mark.asyncio +async def test_multi_bus_timeout_is_recorded_on_target_bus(): + """Closest Python equivalent: same event dispatched to two buses, timeout on target bus is captured.""" + bus_a = EventBus(name='MultiTimeoutA') + bus_b = EventBus(name='MultiTimeoutB') + + class MultiBusTimeoutEvent(BaseEvent[str]): + event_timeout: float | None = 0.01 + + async def slow_target_handler(event: MultiBusTimeoutEvent) -> str: + await asyncio.sleep(0.05) + return 'slow' + + bus_b.on(MultiBusTimeoutEvent, slow_target_handler) + + try: + event = MultiBusTimeoutEvent() + bus_a.emit(event) + bus_b.emit(event) + await bus_b.wait_until_idle() + + bus_b_result = next((r for r in event.event_results.values() if r.eventbus_name == bus_b.name), None) + assert bus_b_result is not None + assert bus_b_result.status == 'error' + assert isinstance(bus_b_result.error, EventHandlerAbortedError) + assert event.event_path == [bus_a.label, bus_b.label] + finally: + await bus_a.stop(clear=True, timeout=0) + await bus_b.stop(clear=True, timeout=0) + + +@pytest.mark.asyncio +async def test_followup_event_runs_after_parent_timeout_in_queue_jump_path(): + """ + Regression guard: timeout in a handler that awaited a child event should not + stall subsequent events on the same bus. + """ + bus = EventBus(name='TimeoutQueueJumpFollowupBus') + + class ParentEvent(BaseEvent[str]): + event_timeout: float | None = 0.02 + + class ChildEvent(BaseEvent[str]): + event_timeout: float | None = 0.2 + + class TailEvent(BaseEvent[str]): + event_timeout: float | None = 0.2 + + tail_runs = 0 + + async def child_handler(event: ChildEvent) -> str: + await asyncio.sleep(0.001) + return 'child_done' + + async def parent_handler(event: ParentEvent) -> str: + child = bus.emit(ChildEvent()) + await child + await asyncio.sleep(0.05) # Exceeds parent timeout + return 'parent_done' + + async def tail_handler(event: TailEvent) -> str: + nonlocal tail_runs + tail_runs += 1 + return 'tail_done' + + bus.on(ParentEvent, parent_handler) + bus.on(ChildEvent, child_handler) + bus.on(TailEvent, tail_handler) + + try: + parent = await bus.emit(ParentEvent()) + await bus.wait_until_idle() + + parent_result = next(iter(parent.event_results.values())) + assert parent_result.status == 'error' + assert isinstance(parent_result.error, EventHandlerAbortedError) + + tail = bus.emit(TailEvent()) + completed_tail = await asyncio.wait_for(tail, timeout=1.0) + assert completed_tail.event_status == 'completed' + assert tail_runs == 1 + finally: + await bus.stop(clear=True, timeout=0) + + +@pytest.mark.asyncio +async def test_forwarded_timeout_path_does_not_stall_followup_events(): + """ + Regression guard: if a forwarded awaited child times out, subsequent events + should still run on both source and target buses. + """ + bus_a = EventBus(name='TimeoutForwardA') + bus_b = EventBus(name='TimeoutForwardB') + + class ParentEvent(BaseEvent[str]): + # This test validates child-timeout recovery, not parent-timeout behavior. + # Keep parent timeout well above observed queue/lock jitter in full-suite runs. + event_timeout: float | None = 1.0 + + class ChildEvent(BaseEvent[str]): + event_timeout: float | None = 0.01 + + class TailEvent(BaseEvent[str]): + event_timeout: float | None = 0.2 + + bus_a_tail_runs = 0 + bus_b_tail_runs = 0 + child_ref: ChildEvent | None = None + + async def parent_handler(event: ParentEvent) -> str: + nonlocal child_ref + child = bus_a.emit(ChildEvent()) + child_ref = child + await child + return 'parent_done' + + async def slow_child_handler(event: ChildEvent) -> str: + await asyncio.sleep(0.05) # Guaranteed timeout on child. + return 'child_done' + + async def tail_handler_a(event: TailEvent) -> str: + nonlocal bus_a_tail_runs + bus_a_tail_runs += 1 + return 'tail_a' + + async def tail_handler_b(event: TailEvent) -> str: + nonlocal bus_b_tail_runs + bus_b_tail_runs += 1 + return 'tail_b' + + bus_a.on(ParentEvent, parent_handler) + bus_a.on(TailEvent, tail_handler_a) + bus_a.on('*', bus_b.emit) + bus_b.on(ChildEvent, slow_child_handler) + bus_b.on(TailEvent, tail_handler_b) + + try: + parent = await bus_a.emit(ParentEvent()) + await bus_a.wait_until_idle() + await bus_b.wait_until_idle() + + parent_result = next(result for result in parent.event_results.values() if result.handler_name.endswith('parent_handler')) + assert parent_result.status == 'completed' + + assert child_ref is not None + assert any( + isinstance(result.error, (EventHandlerTimeoutError, EventHandlerAbortedError)) + for result in child_ref.event_results.values() + ), child_ref.event_results + + # Lock/queue state should remain healthy after timeout. + tail = bus_a.emit(TailEvent()) + completed_tail = await asyncio.wait_for(tail, timeout=1.0) + await bus_a.wait_until_idle() + await bus_b.wait_until_idle() + + assert completed_tail.event_status == 'completed' + assert bus_a_tail_runs == 1 + assert bus_b_tail_runs == 1 + finally: + await bus_a.stop(clear=True, timeout=0) + await bus_b.stop(clear=True, timeout=0) + + +# Consolidated from tests/test_event_timeout_defaults.py + + +class TimeoutDefaultsEvent(BaseEvent[str]): + pass + + +@pytest.mark.asyncio +async def test_processing_time_timeout_defaults_do_not_mutate_event_fields() -> None: + bus = EventBus( + name='TimeoutDefaultsCopyBus', + event_timeout=12.0, + event_slow_timeout=34.0, + event_handler_slow_timeout=56.0, + ) + + async def handler(_event: TimeoutDefaultsEvent) -> str: + return 'ok' + + bus.on(TimeoutDefaultsEvent, handler) + + try: + event = bus.emit(TimeoutDefaultsEvent()) + assert event.event_timeout is None + assert event.event_handler_timeout is None + assert event.event_handler_slow_timeout is None + assert getattr(event, 'event_slow_timeout', None) is None + + completed = await event + handler_result = next(iter(completed.event_results.values())) + assert handler_result.timeout is not None and abs(handler_result.timeout - 12.0) < 1e-9 + finally: + await bus.stop() + + +@pytest.mark.asyncio +async def test_handler_timeout_resolution_matches_ts_precedence() -> None: + bus = EventBus(name='TimeoutPrecedenceBus', event_timeout=0.2) + + async def default_handler(_event: TimeoutDefaultsEvent) -> str: + await asyncio.sleep(0.001) + return 'default' + + async def overridden_handler(_event: TimeoutDefaultsEvent) -> str: + await asyncio.sleep(0.001) + return 'override' + + bus.on(TimeoutDefaultsEvent, default_handler) + overridden_entry = bus.on(TimeoutDefaultsEvent, overridden_handler) + overridden_entry.handler_timeout = 0.12 + + try: + event = await bus.emit(TimeoutDefaultsEvent(event_timeout=0.2, event_handler_timeout=0.05)) + + default_result = next( + result for result in event.event_results.values() if result.handler_name.endswith('default_handler') + ) + overridden_result = next( + result for result in event.event_results.values() if result.handler_name.endswith('overridden_handler') + ) + + assert default_result.timeout is not None and abs(default_result.timeout - 0.05) < 1e-9 + assert overridden_result.timeout is not None and abs(overridden_result.timeout - 0.12) < 1e-9 + + tighter_event_timeout = await bus.emit(TimeoutDefaultsEvent(event_timeout=0.08, event_handler_timeout=0.2)) + tighter_default = next( + result for result in tighter_event_timeout.event_results.values() if result.handler_name.endswith('default_handler') + ) + tighter_overridden = next( + result + for result in tighter_event_timeout.event_results.values() + if result.handler_name.endswith('overridden_handler') + ) + + assert tighter_default.timeout is not None and abs(tighter_default.timeout - 0.08) < 1e-9 + assert tighter_overridden.timeout is not None and abs(tighter_overridden.timeout - 0.08) < 1e-9 + finally: + await bus.stop() + + +@pytest.mark.asyncio +async def test_event_handler_detect_file_paths_toggle() -> None: + bus = EventBus(name='NoDetectPathsBus', event_handler_detect_file_paths=False) + + async def handler(_event: TimeoutDefaultsEvent) -> str: + return 'ok' + + try: + entry = bus.on(TimeoutDefaultsEvent, handler) + assert entry.handler_file_path is None + finally: + await bus.stop() + + +@pytest.mark.asyncio +async def test_handler_slow_warning_uses_event_handler_slow_timeout(caplog: pytest.LogCaptureFixture) -> None: + caplog.set_level(logging.WARNING, logger='bubus') + bus = EventBus( + name='SlowHandlerWarnBus', + event_timeout=0.5, + event_slow_timeout=None, + event_handler_slow_timeout=0.01, + ) + + async def slow_handler(_event: TimeoutDefaultsEvent) -> str: + await asyncio.sleep(0.03) + return 'ok' + + bus.on(TimeoutDefaultsEvent, slow_handler) + + try: + await bus.emit(TimeoutDefaultsEvent()) + assert any('Slow event handler:' in record.message for record in caplog.records) + finally: + await bus.stop() + + +@pytest.mark.asyncio +async def test_event_slow_warning_uses_event_slow_timeout(caplog: pytest.LogCaptureFixture) -> None: + caplog.set_level(logging.WARNING, logger='bubus') + bus = EventBus( + name='SlowEventWarnBus', + event_timeout=0.5, + event_slow_timeout=0.01, + event_handler_slow_timeout=None, + ) + + async def slow_event_handler(_event: TimeoutDefaultsEvent) -> str: + await asyncio.sleep(0.03) + return 'ok' + + bus.on(TimeoutDefaultsEvent, slow_event_handler) + + try: + await bus.emit(TimeoutDefaultsEvent()) + assert any('Slow event processing:' in record.message for record in caplog.records) + finally: + await bus.stop() diff --git a/tests/test_events_suck.py b/tests/test_events_suck.py new file mode 100644 index 0000000..278ab89 --- /dev/null +++ b/tests/test_events_suck.py @@ -0,0 +1,143 @@ +import inspect +from typing import Any + +from bubus import BaseEvent, EventBus, events_suck + + +class CreateUserEvent(BaseEvent[str]): + id: str | None = None + name: str + age: int + nickname: str | None = None + + +class UpdateUserEvent(BaseEvent[bool]): + id: str + name: str | None = None + age: int | None = None + source: str | None = None + + +class SomeLegacyImperativeClass: + def __init__(self): + self.calls: list[tuple[str, dict[str, Any]]] = [] + + def create(self, id: str | None, name: str, age: int) -> str: + self.calls.append(('create', {'id': id, 'name': name, 'age': age})) + return f'{name}-{age}' + + def update(self, id: str, name: str | None = None, age: int | None = None, **extra: Any) -> bool: + self.calls.append(('update', {'id': id, 'name': name, 'age': age, **extra})) + return bool(id) + + +def ping_user(user_id: str) -> str: + return f'pong:{user_id}' + + +async def test_events_suck_wrap_emits_and_returns_first_result(): + bus = EventBus('EventsSuckBus') + seen_payloads: list[dict[str, Any]] = [] + + async def on_create(event: CreateUserEvent) -> str: + seen_payloads.append( + { + 'id': event.id, + 'name': event.name, + 'age': event.age, + 'nickname': event.nickname, + } + ) + return 'user-123' + + async def on_update(event: UpdateUserEvent) -> bool: + seen_payloads.append( + { + 'id': event.id, + 'name': event.name, + 'age': event.age, + 'source': event.source, + } + ) + return event.age == 46 + + bus.on(CreateUserEvent, on_create) + bus.on(UpdateUserEvent, on_update) + + MySDKClient = events_suck.wrap( + 'MySDKClient', + { + 'create': CreateUserEvent, + 'update': UpdateUserEvent, + }, + ) + client = MySDKClient(bus=bus) + + created_id = await client.create(name='bob', age=45, nickname='bobby') + updated = await client.update(id=created_id, age=46, source='sync') + + assert created_id == 'user-123' + assert updated is True + assert seen_payloads == [ + {'id': None, 'name': 'bob', 'age': 45, 'nickname': 'bobby'}, + {'id': created_id, 'name': None, 'age': 46, 'source': 'sync'}, + ] + + await bus.stop(clear=True) + + +def test_events_suck_wrap_builds_typed_method_signature(): + TestClient = events_suck.wrap('TestClient', {'create': CreateUserEvent}) + signature = inspect.signature(TestClient.create) + params = signature.parameters + + assert list(params) == ['self', 'id', 'name', 'age', 'nickname', 'extra'] + assert params['id'].annotation == str | None + assert params['id'].default is None + assert params['name'].annotation is str + assert params['name'].default is inspect.Parameter.empty + assert params['age'].annotation is int + assert params['nickname'].annotation == str | None + assert params['nickname'].default is None + assert params['extra'].kind == inspect.Parameter.VAR_KEYWORD + assert signature.return_annotation is str + + +async def test_events_suck_make_events_and_make_handler_runtime_binding(): + events = events_suck.make_events( + { + 'FooBarAPICreateEvent': SomeLegacyImperativeClass.create, + 'FooBarAPIUpdateEvent': SomeLegacyImperativeClass.update, + 'FooBarAPIPingEvent': ping_user, + } + ) + FooBarAPICreateEvent = events.FooBarAPICreateEvent + FooBarAPIUpdateEvent = events.FooBarAPIUpdateEvent + FooBarAPIPingEvent = events.FooBarAPIPingEvent + + assert FooBarAPICreateEvent.model_fields['id'].annotation == str | None + assert FooBarAPICreateEvent.model_fields['name'].annotation is str + assert FooBarAPICreateEvent.model_fields['age'].annotation is int + assert FooBarAPICreateEvent.model_fields['event_result_type'].default is str + + bus = EventBus('LegacyBus') + impl = SomeLegacyImperativeClass() + bus.on(FooBarAPICreateEvent, events_suck.make_handler(impl.create)) + bus.on(FooBarAPIUpdateEvent, events_suck.make_handler(impl.update)) + bus.on(FooBarAPIPingEvent, events_suck.make_handler(ping_user)) + + create_result = await bus.emit(FooBarAPICreateEvent(name='bob', age=45)).event_result() + update_result = await bus.emit( + FooBarAPIUpdateEvent(id='4ddee2b7-782f-7bbf-84ff-6aad2693982e', age=46, source='sync') + ).event_result() + ping_result = await bus.emit(FooBarAPIPingEvent(user_id='e692b6cb-ae63-773b-8557-3218f7ce5ced')).event_result() + + assert create_result == 'bob-45' + assert update_result is True + assert ping_result == 'pong:e692b6cb-ae63-773b-8557-3218f7ce5ced' + assert impl.calls == [ + ('create', {'id': None, 'name': 'bob', 'age': 45}), + ('update', {'id': '4ddee2b7-782f-7bbf-84ff-6aad2693982e', 'name': None, 'age': 46, 'source': 'sync'}), + ] + + await bus.stop(clear=True) diff --git a/tests/test_handler_timeout.py b/tests/test_handler_timeout.py deleted file mode 100644 index 3952429..0000000 --- a/tests/test_handler_timeout.py +++ /dev/null @@ -1,178 +0,0 @@ -"""Test for per-handler timeout enforcement matching the exact scenario from the issue""" - -import asyncio - -import pytest - -from bubus import BaseEvent, EventBus - - -# Event definitions -class TopmostEvent(BaseEvent[str]): - """Event for navigating to a URL""" - - url: str = 'https://example.com' - - event_timeout: float | None = 5.0 - - -class ChildEvent(BaseEvent[str]): - """Event for tab creation""" - - tab_id: str = 'tab-123' - - event_timeout: float | None = 2 - - -class GrandchildEvent(BaseEvent[str]): - """Event for navigation completion""" - - success: bool = True - - event_timeout: float | None = 1 - - -# Watchdog classes -class HandlerClass1: - async def on_TopmostEvent(self, event: TopmostEvent) -> str: - """Completes quickly - 1 second""" - await asyncio.sleep(0.1) - return 'HandlerClass1.on_TopmostEvent completed after 0.1s' - - async def on_GrandchildEvent(self, event: GrandchildEvent) -> str: - """Starts but gets interrupted after 1 second by parent timeout""" - await asyncio.sleep(5) # Would take 5 seconds but will be interrupted - return 'HandlerClass1.on_GrandchildEvent completed after 5s' - - -class HandlerClass2: - async def on_GrandchildEvent(self, event: GrandchildEvent) -> str: - """Completes instantly""" - # No sleep - completes immediately - return 'HandlerClass2.on_GrandchildEvent completed immediately' - - -class HandlerClass3: - async def on_GrandchildEvent(self, event: GrandchildEvent) -> str: - """Never gets to run - pending when timeout occurs""" - await asyncio.sleep(0.2) - return 'HandlerClass3.on_GrandchildEvent completed after 0.2s' - - -class HandlerClass4: - async def on_GrandchildEvent(self, event: GrandchildEvent) -> str: - """Never gets to run - pending when timeout occurs""" - await asyncio.sleep(0.1) - return 'HandlerClass4.on_GrandchildEvent completed after 0.1s' - - -class MainClass0: - def __init__(self, bus: EventBus): - self.bus = bus - - async def on_TopmostEvent(self, event: TopmostEvent) -> str: - """Takes 11 seconds total - dispatches ChildEvent""" - # Do some work - await asyncio.sleep(1) - - # Dispatch and wait for ChildEvent - child_event = self.bus.dispatch(ChildEvent()) - try: - await child_event # This will timeout after 10s - except Exception as e: - print(f'DEBUG: Parent caught child error: {type(e).__name__}: {e}') - - import threading - - all_tasks = asyncio.all_tasks() - print(f'\nOutstanding asyncio tasks ({len(all_tasks)}):') - for task in all_tasks: - print(f' - {task.get_name()}: {task._state} - {task.get_coro()}') - - # List all threads - all_threads = threading.enumerate() - print(f'\nActive threads ({len(all_threads)}):') - for thread in all_threads: - print(f' - {thread.name}: {thread.is_alive()}') - - raise - - # Would continue but won't get here due to timeout - return 'MainClass0.on_TopmostEvent completed after all child events' - - async def on_ChildEvent(self, event: ChildEvent) -> str: - """Takes 10 seconds - will timeout, dispatches GrandchildEvent""" - # Dispatch GrandchildEvent immediately - grandchild_event = self.bus.dispatch(GrandchildEvent()) - - # Wait for GrandchildEvent to complete - # This will take 9s (MainClass0) + 0s (AboutBlank) + partial HandlerClass1 time - # Since handlers run serially and we have a 10s timeout, we'll timeout while - # HandlerClass1 is still running (after about 1s of its 5s execution) - await grandchild_event # .event_result(raise_if_any=False, raise_if_none=True, timeout=15) - - # Would continue but we timeout first - return 'MainClass0.on_ChildEvent completed after GrandchildEvent() finished processing' - - async def on_GrandchildEvent(self, event: GrandchildEvent) -> str: - """Completes in 5 seconds""" - # print('GRANDCHILD EVENT HANDLING STARTED') - await asyncio.sleep(2) - return 'MainClass0.on_GrandchildEvent completed after 2s' - - -@pytest.mark.asyncio -async def test_nested_timeout_scenario_from_issue(): - """Test the exact timeout scenario described in the issue - - This tests: - 1. TopmostEvent with 30s timeout dispatches ChildEvent - 2. ChildEvent with 10s timeout times out after 10s - 3. GrandchildEvent is dispatched from ChildEvent handler - 4. Some handlers complete, some are interrupted, some never run - 5. The timeout tree logging shows the complete hierarchy - """ - # Create single event bus - bus = EventBus(name='MainClass0EventBus') - - # Create instances - handlerclass1 = HandlerClass1() - handlerclass2 = HandlerClass2() - handlerclass3 = HandlerClass3() - handlerclass4 = HandlerClass4() - mainclass0 = MainClass0(bus) - - # Register handlers for TopmostEvent - bus.on('TopmostEvent', handlerclass1.on_TopmostEvent) - bus.on('TopmostEvent', mainclass0.on_TopmostEvent) - - # Register handlers for ChildEvent - bus.on('ChildEvent', mainclass0.on_ChildEvent) - - # Register handlers for GrandchildEvent (order matters for the test) - bus.on('GrandchildEvent', mainclass0.on_GrandchildEvent) - bus.on('GrandchildEvent', handlerclass2.on_GrandchildEvent) - bus.on('GrandchildEvent', handlerclass1.on_GrandchildEvent) - bus.on('GrandchildEvent', handlerclass3.on_GrandchildEvent) - bus.on('GrandchildEvent', handlerclass4.on_GrandchildEvent) - - # Dispatch the root event - navigate_event = bus.dispatch(TopmostEvent()) - - # Wait for it to complete (will fail due to timeout) - # with pytest.raises((RuntimeError, TimeoutError)) as exc_info: - try: - await ( - navigate_event - ) # .event_result(raise_if_any=True, raise_if_none=True, timeout=20) # The event should complete with an error - except Exception as e: - print(f'Exception caught: {type(e).__name__}: {e}') - raise - - # import ipdb; ipdb.set_trace() - - # print('-----------------------------------------------------') - # print(f"Exception caught: {type(exc_info.value).__name__}: {exc_info.value}") - # # assert 'ChildEvent' in str(exc_info.value) or 'ChildEvent' in str(exc_info.value) - - await bus.stop(clear=True, timeout=0) diff --git a/tests/test_lock_manager.py b/tests/test_lock_manager.py new file mode 100644 index 0000000..f588128 --- /dev/null +++ b/tests/test_lock_manager.py @@ -0,0 +1,186 @@ +import asyncio + +# pyright: reportPrivateUsage=false +import pytest + +from bubus import BaseEvent, EventBus, EventConcurrencyMode, EventHandlerConcurrencyMode +from bubus.lock_manager import ReentrantLock + + +async def test_reentrant_lock_nested_context_reuses_single_permit() -> None: + lock = ReentrantLock() + + assert lock.locked() is False + async with lock: + assert lock.locked() is True + async with lock: + assert lock.locked() is True + assert lock.locked() is True + assert lock.locked() is False + + +async def test_reentrant_lock_serializes_across_tasks() -> None: + lock = ReentrantLock() + active = 0 + max_active = 0 + + async def worker() -> None: + nonlocal active, max_active + async with lock: + active += 1 + max_active = max(max_active, active) + await asyncio.sleep(0.01) + active -= 1 + + await asyncio.gather(*(worker() for _ in range(4))) + assert max_active == 1 + + +async def test_lock_manager_get_lock_for_event_modes() -> None: + bus = EventBus(name='LockManagerEventModesBus', event_concurrency='bus-serial') + event = BaseEvent(event_type='LockModesEvent') + + assert bus.locks.get_lock_for_event(bus, event) is bus.event_bus_serial_lock + + event.event_concurrency = EventConcurrencyMode.GLOBAL_SERIAL + assert bus.locks.get_lock_for_event(bus, event) is bus.event_global_serial_lock + + event.event_concurrency = EventConcurrencyMode.PARALLEL + assert bus.locks.get_lock_for_event(bus, event) is None + + await bus.stop() + + +async def test_lock_manager_get_lock_for_event_handler_modes() -> None: + bus = EventBus(name='LockManagerHandlerModesBus', event_handler_concurrency='serial') + event = BaseEvent(event_type='LockHandlerModesEvent') + + assert event._get_handler_lock() is None # pyright: ignore[reportPrivateUsage] + event_result = event.event_result_update(handler=lambda _event: None) + handler_lock = bus.locks.get_lock_for_event_handler(bus, event, event_result) + assert handler_lock is not None + assert event._get_handler_lock() is handler_lock # pyright: ignore[reportPrivateUsage] + + event.event_handler_concurrency = EventHandlerConcurrencyMode.PARALLEL + handler_result = event.event_result_update(handler=lambda _event: None) + assert bus.locks.get_lock_for_event_handler(bus, event, handler_result) is None + + await bus.stop() + + +async def test_run_with_event_lock_and_handler_lock_respect_parallel_bypass() -> None: + bus = EventBus( + name='LockManagerBypassBus', + event_concurrency='bus-serial', + event_handler_concurrency='serial', + ) + + parallel_event = BaseEvent( + event_type='ParallelBypassEvent', + event_concurrency=EventConcurrencyMode.PARALLEL, + event_handler_concurrency=EventHandlerConcurrencyMode.PARALLEL, + ) + + async with bus.locks._run_with_event_lock(bus, parallel_event): + assert bus.event_bus_serial_lock.locked() is False + + parallel_result = parallel_event.event_result_update(handler=lambda _event: None) + async with bus.locks._run_with_handler_lock(bus, parallel_event, parallel_result): + assert parallel_event._get_handler_lock() is None # pyright: ignore[reportPrivateUsage] + + serial_event = BaseEvent(event_type='SerialAcquireEvent') + async with bus.locks._run_with_event_lock(bus, serial_event): + assert bus.event_bus_serial_lock.locked() is True + + serial_result = serial_event.event_result_update(handler=lambda _event: None) + async with bus.locks._run_with_handler_lock(bus, serial_event, serial_result): + lock = serial_event._get_handler_lock() # pyright: ignore[reportPrivateUsage] + assert lock is not None + assert lock.locked() is True + + lock = serial_event._get_handler_lock() # pyright: ignore[reportPrivateUsage] + assert lock is not None + assert lock.locked() is False + + await bus.stop() + + +async def test_handler_dispatch_context_marks_and_restores_lock_depth() -> None: + bus = EventBus(name='LockDispatchContextBus', event_concurrency='bus-serial') + event = BaseEvent(event_type='DispatchContextEvent') + + lock = bus.locks.get_lock_for_event(bus, event) + assert lock is not None + assert lock._depth() == 0 # pyright: ignore[reportPrivateUsage] + + with bus.locks._run_with_handler_dispatch_context(bus, event): + assert lock._depth() == 1 # pyright: ignore[reportPrivateUsage] + async with lock: + assert lock._depth() == 2 # pyright: ignore[reportPrivateUsage] + assert lock._depth() == 1 # pyright: ignore[reportPrivateUsage] + + assert lock._depth() == 0 # pyright: ignore[reportPrivateUsage] + await bus.stop() + + +async def test_reentrant_lock_releases_and_reraises_on_exception() -> None: + lock = ReentrantLock() + + with pytest.raises(RuntimeError, match='reentrant-lock-error'): + async with lock: + assert lock.locked() is True + raise RuntimeError('reentrant-lock-error') + + assert lock.locked() is False + + +async def test_run_with_event_lock_releases_and_reraises_on_exception() -> None: + bus = EventBus(name='LockManagerEventErrorBus', event_concurrency='bus-serial') + event = BaseEvent(event_type='EventLockErrorEvent') + + lock = bus.locks.get_lock_for_event(bus, event) + assert lock is not None + assert lock.locked() is False + + with pytest.raises(RuntimeError, match='event-lock-error'): + async with bus.locks._run_with_event_lock(bus, event): + assert lock.locked() is True + raise RuntimeError('event-lock-error') + + assert lock.locked() is False + await bus.stop() + + +async def test_run_with_handler_lock_releases_and_reraises_on_exception() -> None: + bus = EventBus(name='LockManagerHandlerErrorBus', event_handler_concurrency='serial') + event = BaseEvent(event_type='HandlerLockErrorEvent') + event_result = event.event_result_update(handler=lambda _event: None) + + with pytest.raises(RuntimeError, match='handler-lock-error'): + async with bus.locks._run_with_handler_lock(bus, event, event_result): + lock = event._get_handler_lock() # pyright: ignore[reportPrivateUsage] + assert lock is not None + assert lock.locked() is True + raise RuntimeError('handler-lock-error') + + lock = event._get_handler_lock() # pyright: ignore[reportPrivateUsage] + assert lock is not None + assert lock.locked() is False + await bus.stop() + + +async def test_handler_dispatch_context_restores_depth_and_reraises_on_exception() -> None: + bus = EventBus(name='LockDispatchContextErrorBus', event_concurrency='bus-serial') + event = BaseEvent(event_type='DispatchContextErrorEvent') + + lock = bus.locks.get_lock_for_event(bus, event) + assert lock is not None + assert lock._depth() == 0 # pyright: ignore[reportPrivateUsage] + + with pytest.raises(RuntimeError, match='dispatch-context-error'): + with bus.locks._run_with_handler_dispatch_context(bus, event): + assert lock._depth() == 1 # pyright: ignore[reportPrivateUsage] + raise RuntimeError('dispatch-context-error') + + assert lock._depth() == 0 # pyright: ignore[reportPrivateUsage] + await bus.stop() diff --git a/tests/test_middleware.py b/tests/test_middleware.py new file mode 100644 index 0000000..67968e3 --- /dev/null +++ b/tests/test_middleware.py @@ -0,0 +1,971 @@ +# pyright: basic +"""Consolidated middleware tests.""" + +from __future__ import annotations + +import asyncio +import json +import multiprocessing +import sqlite3 +from collections.abc import Sequence +from datetime import datetime +from pathlib import Path +from typing import Any + +import pytest +from pydantic import Field + +from bubus import BaseEvent, EventBus, SQLiteHistoryMirrorMiddleware +from bubus.middlewares import ( + AutoErrorEventMiddleware, + AutoHandlerChangeEventMiddleware, + AutoReturnEventMiddleware, + BusHandlerRegisteredEvent, + BusHandlerUnregisteredEvent, + EventBusMiddleware, + LoggerEventBusMiddleware, + OtelTracingMiddleware, + WALEventBusMiddleware, +) + + +class UserActionEvent(BaseEvent): + """Test event model for user actions.""" + + action: str + user_id: str + metadata: dict[str, Any] = Field(default_factory=dict) + + +class TestWALPersistence: + """Test automatic WAL persistence functionality""" + + async def test_wal_persistence_handler(self, tmp_path): + """Test that events are automatically persisted to WAL file""" + # Create event bus with WAL path + wal_path = tmp_path / 'test_events.jsonl' + bus = EventBus(name='TestBus', middlewares=[WALEventBusMiddleware(wal_path)]) + + try: + # Emit some events + events = [] + for i in range(3): + event = UserActionEvent(action=f'action_{i}', user_id=f'user_{i}') + emitted_event = bus.emit(event) + completed_event = await emitted_event + events.append(completed_event) + + # Wait for processing + await bus.wait_until_idle() + + # Check WAL file exists + assert wal_path.exists() + + # Read and verify JSONL content + lines = wal_path.read_text().strip().split('\n') + assert len(lines) == 3 + + # Parse each line as JSON + for i, line in enumerate(lines): + data = json.loads(line) + assert data['action'] == f'action_{i}' + assert data['user_id'] == f'user_{i}' + assert data['event_type'] == 'UserActionEvent' + assert isinstance(data['event_created_at'], str) + datetime.fromisoformat(data['event_created_at']) + + finally: + await bus.stop() + + async def test_wal_persistence_creates_parent_dir(self, tmp_path): + """Test that WAL persistence creates parent directories""" + # Use a nested path that doesn't exist + wal_path = tmp_path / 'nested' / 'dirs' / 'events.jsonl' + assert not wal_path.parent.exists() + + # Create event bus + bus = EventBus(name='TestBus', middlewares=[WALEventBusMiddleware(wal_path)]) + + try: + # Emit an event + event = bus.emit(UserActionEvent(action='test', user_id='e692b6cb-ae63-773b-8557-3218f7ce5ced')) + await event + + # Wait for WAL persistence to complete + await bus.wait_until_idle() + + # Parent directory should be created after event is processed + assert wal_path.parent.exists() + + # Check file was created + assert wal_path.exists() + finally: + await bus.stop() + + async def test_wal_persistence_skips_incomplete_events(self, tmp_path): + """Test that WAL persistence only writes completed events""" + wal_path = tmp_path / 'incomplete_events.jsonl' + bus = EventBus(name='TestBus', middlewares=[WALEventBusMiddleware(wal_path)]) + + try: + # Add a slow handler that will delay completion + async def slow_handler(event: BaseEvent) -> str: + await asyncio.sleep(0.1) + return 'slow' + + bus.on('UserActionEvent', slow_handler) + + # Emit event without waiting + event = bus.emit(UserActionEvent(action='test', user_id='e692b6cb-ae63-773b-8557-3218f7ce5ced')) + + # Check file doesn't exist yet (event not completed) + assert not wal_path.exists() + + # Wait for completion + event = await event + await bus.wait_until_idle() + + # Now file should exist with completed event + assert wal_path.exists() + lines = wal_path.read_text().strip().split('\n') + assert len(lines) == 1 + data = json.loads(lines[0]) + assert data['event_type'] == 'UserActionEvent' + # The WAL should have been written after the event completed + assert data['action'] == 'test' + assert data['user_id'] == 'e692b6cb-ae63-773b-8557-3218f7ce5ced' + + finally: + await bus.stop() + + +class TestHandlerMiddleware: + """Tests for the handler middleware pipeline.""" + + async def test_middleware_constructor_auto_inits_classes_and_keeps_hook_order(self): + calls: list[str] = [] + + class ClassMiddleware(EventBusMiddleware): + def __init__(self): + calls.append('class:init') + + async def on_event_result_change(self, eventbus: EventBus, event: BaseEvent, event_result, status): + if status == 'started': + calls.append('class:started') + elif status == 'completed': + calls.append('class:completed') + + class InstanceMiddleware(EventBusMiddleware): + async def on_event_result_change(self, eventbus: EventBus, event: BaseEvent, event_result, status): + if status == 'started': + calls.append('instance:started') + elif status == 'completed': + calls.append('instance:completed') + + instance_middleware = InstanceMiddleware() + bus = EventBus(middlewares=[ClassMiddleware, instance_middleware]) + bus.on('UserActionEvent', lambda event: 'ok') + + try: + completed = await bus.emit(UserActionEvent(action='test', user_id='d592b79f-4dd9-7d4d-88b1-0d0db7d84fcf')) + await bus.wait_until_idle() + + assert isinstance(bus.middlewares[0], ClassMiddleware) + assert bus.middlewares[1] is instance_middleware + assert completed.event_results + assert calls == [ + 'class:init', + 'class:started', + 'instance:started', + 'class:completed', + 'instance:completed', + ] + finally: + await bus.stop() + + async def test_middleware_wraps_successful_handler(self): + calls: list[tuple[str, str]] = [] + + class TrackingMiddleware(EventBusMiddleware): + def __init__(self, call_log: list[tuple[str, str]]): + self.call_log = call_log + + async def on_event_result_change(self, eventbus: EventBus, event: BaseEvent, event_result, status): + if status == 'started': + self.call_log.append(('before', event_result.status)) + elif status == 'completed': + self.call_log.append(('after', event_result.status)) + + bus = EventBus(middlewares=[TrackingMiddleware(calls)]) + bus.on('UserActionEvent', lambda event: 'ok') + + try: + completed = await bus.emit(UserActionEvent(action='test', user_id='d592b79f-4dd9-7d4d-88b1-0d0db7d84fcf')) + await bus.wait_until_idle() + + assert completed.event_results + result = next(iter(completed.event_results.values())) + assert result.status == 'completed' + assert result.result == 'ok' + assert calls == [('before', 'started'), ('after', 'completed')] + finally: + await bus.stop() + + async def test_middleware_observes_handler_errors(self): + observations: list[tuple[str, str]] = [] + + class ErrorMiddleware(EventBusMiddleware): + def __init__(self, log: list[tuple[str, str]]): + self.log = log + + async def on_event_result_change(self, eventbus: EventBus, event: BaseEvent, event_result, status): + if status == 'started': + self.log.append(('before', event_result.status)) + elif status == 'completed' and event_result.error: + self.log.append(('error', type(event_result.error).__name__)) + + async def failing_handler(event: BaseEvent) -> None: + raise ValueError('boom') + + bus = EventBus(middlewares=[ErrorMiddleware(observations)]) + bus.on('UserActionEvent', failing_handler) + + try: + event = await bus.emit(UserActionEvent(action='fail', user_id='16599da2-bf1d-7a5d-8e6e-ba01f216519a')) + await bus.wait_until_idle() + + result = next(iter(event.event_results.values())) + assert result.status == 'error' + assert isinstance(result.error, ValueError) + assert observations == [('before', 'started'), ('error', 'ValueError')] + finally: + await bus.stop() + + async def test_middleware_hook_statuses_never_emit_error(self): + observed_event_statuses: list[str] = [] + observed_result_hook_statuses: list[str] = [] + observed_result_runtime_statuses: list[str] = [] + + class LifecycleMiddleware(EventBusMiddleware): + async def on_event_change(self, eventbus: EventBus, event: BaseEvent, status): + observed_event_statuses.append(str(status)) + + async def on_event_result_change(self, eventbus: EventBus, event: BaseEvent, event_result, status): + observed_result_hook_statuses.append(str(status)) + observed_result_runtime_statuses.append(event_result.status) + + async def failing_handler(event: BaseEvent) -> None: + raise ValueError('boom') + + bus = EventBus(middlewares=[LifecycleMiddleware()], max_history_size=None) + bus.on(UserActionEvent, failing_handler) + + try: + event = await bus.emit(UserActionEvent(action='fail', user_id='2a312e4d-3035-7883-86b9-578ce47046b2')) + await bus.wait_until_idle() + + result = next(iter(event.event_results.values())) + assert result.status == 'error' + assert isinstance(result.error, ValueError) + + assert observed_event_statuses == ['pending', 'started', 'completed'] + assert observed_result_hook_statuses == ['pending', 'started', 'completed'] + assert observed_result_runtime_statuses[-1] == 'error' + assert 'error' not in observed_event_statuses + assert 'error' not in observed_result_hook_statuses + finally: + await bus.stop() + + async def test_middleware_event_status_order_is_deterministic_for_each_event(self): + event_statuses_by_id: dict[str, list[str]] = {} + + class LifecycleMiddleware(EventBusMiddleware): + async def on_event_change(self, eventbus: EventBus, event: BaseEvent, status): + event_statuses_by_id.setdefault(event.event_id, []).append(str(status)) + + async def handler(_event: UserActionEvent) -> str: + await asyncio.sleep(0) + return 'ok' + + bus = EventBus(middlewares=[LifecycleMiddleware()], max_history_size=None) + bus.on(UserActionEvent, handler) + + batch_count = 5 + events_per_batch = 50 + try: + for batch_index in range(batch_count): + events = [ + bus.emit( + UserActionEvent( + action='deterministic', + user_id=f'u-{batch_index}-{event_index}', + ) + ) + for event_index in range(events_per_batch) + ] + await asyncio.gather(*events) + await bus.wait_until_idle() + + for event in events: + assert event_statuses_by_id[event.event_id] == ['pending', 'started', 'completed'] + + assert len(event_statuses_by_id) == batch_count * events_per_batch + finally: + await bus.stop() + + async def test_middleware_event_and_result_lifecycle_remains_monotonic_on_timeout(self): + observed_event_statuses: list[str] = [] + observed_result_transitions: list[tuple[str, str, str]] = [] + + class LifecycleMiddleware(EventBusMiddleware): + async def on_event_change(self, eventbus: EventBus, event: BaseEvent, status): + observed_event_statuses.append(str(status)) + + async def on_event_result_change(self, eventbus: EventBus, event: BaseEvent, event_result, status): + observed_result_transitions.append((event_result.handler_name, str(status), event_result.status)) + + class TimeoutLifecycleEvent(BaseEvent[str]): + event_timeout: float | None = 0.02 + + async def slow_handler(_event: TimeoutLifecycleEvent) -> str: + await asyncio.sleep(0.05) + return 'slow' + + async def pending_handler(_event: TimeoutLifecycleEvent) -> str: + return 'pending' + + bus = EventBus(middlewares=[LifecycleMiddleware()]) + bus.on(TimeoutLifecycleEvent, slow_handler) + bus.on(TimeoutLifecycleEvent, pending_handler) + + try: + await bus.emit(TimeoutLifecycleEvent()) + await bus.wait_until_idle() + + assert observed_event_statuses == ['pending', 'started', 'completed'] + + slow_transitions = [entry for entry in observed_result_transitions if entry[0].endswith('slow_handler')] + pending_transitions = [entry for entry in observed_result_transitions if entry[0].endswith('pending_handler')] + + assert [status for _, status, _ in slow_transitions] == ['pending', 'started', 'completed'] + assert [result_status for _, _, result_status in slow_transitions] == ['pending', 'started', 'error'] + + assert [status for _, status, _ in pending_transitions] == ['pending', 'completed'] + assert [result_status for _, _, result_status in pending_transitions] == ['pending', 'error'] + finally: + await bus.stop() + + async def test_auto_error_event_middleware_emits_and_guards_recursion(self): + seen: list[tuple[str, str]] = [] + bus = EventBus(middlewares=[AutoErrorEventMiddleware()]) + + class UserActionEventErrorEvent(BaseEvent[None]): + error_type: str + + async def fail_handler(event: BaseEvent) -> None: + raise ValueError('boom') + + async def fail_auto(event: UserActionEventErrorEvent) -> None: + raise RuntimeError('nested') + + async def on_auto_error_event(event: UserActionEventErrorEvent) -> None: + seen.append((event.event_type, event.error_type)) + + bus.on(UserActionEvent, fail_handler) + bus.on(UserActionEventErrorEvent, on_auto_error_event) + bus.on(UserActionEventErrorEvent, fail_auto) + + try: + await bus.emit(UserActionEvent(action='fail', user_id='e692b6cb-ae63-773b-8557-3218f7ce5ced')) + await bus.wait_until_idle() + assert seen == [('UserActionEventErrorEvent', 'ValueError')] + assert await bus.find('UserActionEventErrorEventErrorEvent', past=True, future=False) is None + finally: + await bus.stop() + + async def test_auto_return_event_middleware_emits_and_guards_recursion(self): + seen: list[tuple[str, Any]] = [] + bus = EventBus(middlewares=[AutoReturnEventMiddleware()]) + + class UserActionEventResultEvent(BaseEvent[None]): + data: Any + + async def ok_handler(event: BaseEvent) -> int: + return 123 + + async def non_none_auto(event: UserActionEventResultEvent) -> str: + return 'nested' + + async def on_auto_result_event(event: UserActionEventResultEvent) -> None: + seen.append((event.event_type, event.data)) + + bus.on(UserActionEvent, ok_handler) + bus.on(UserActionEventResultEvent, on_auto_result_event) + bus.on(UserActionEventResultEvent, non_none_auto) + + try: + await bus.emit(UserActionEvent(action='ok', user_id='2a312e4d-3035-7883-86b9-578ce47046b2')) + await bus.wait_until_idle() + assert seen == [('UserActionEventResultEvent', 123)] + assert await bus.find('UserActionEventResultEventResultEvent', past=True, future=False) is None + finally: + await bus.stop() + + async def test_auto_return_event_middleware_skips_baseevent_returns(self): + seen: list[tuple[str, Any]] = [] + bus = EventBus(middlewares=[AutoReturnEventMiddleware()]) + + class UserActionEventResultEvent(BaseEvent[None]): + data: Any + + class ReturnedEvent(BaseEvent): + value: int + + async def returns_event(event: BaseEvent) -> ReturnedEvent: + return ReturnedEvent(value=7) + + async def on_auto_result_event(event: UserActionEventResultEvent) -> None: + seen.append((event.event_type, event.data)) + + bus.on(UserActionEvent, returns_event) + bus.on(UserActionEventResultEvent, on_auto_result_event) + + try: + parent = await bus.emit(UserActionEvent(action='ok', user_id='6eb8a717-e19d-728b-8905-97f7e20c002e')) + await bus.wait_until_idle() + assert len(parent.event_results) == 1 + only_result = next(iter(parent.event_results.values())) + assert isinstance(only_result.result, ReturnedEvent) + assert seen == [] + assert await bus.find('UserActionEventResultEvent', past=True, future=False) is None + finally: + await bus.stop() + + async def test_auto_handler_change_event_middleware_emits_registered_and_unregistered(self): + registered: list[BusHandlerRegisteredEvent] = [] + unregistered: list[BusHandlerUnregisteredEvent] = [] + bus = EventBus(middlewares=[AutoHandlerChangeEventMiddleware()]) + + def on_registered(event: BusHandlerRegisteredEvent) -> None: + registered.append(event) + + def on_unregistered(event: BusHandlerUnregisteredEvent) -> None: + unregistered.append(event) + + bus.on(BusHandlerRegisteredEvent, on_registered) + bus.on(BusHandlerUnregisteredEvent, on_unregistered) + + async def target_handler(event: UserActionEvent) -> None: + return None + + try: + handler_entry = bus.on(UserActionEvent, target_handler) + await bus.wait_until_idle() + + bus.off(UserActionEvent, handler_entry) + await bus.wait_until_idle() + + matching_registered = [event for event in registered if event.handler.id == handler_entry.id] + matching_unregistered = [event for event in unregistered if event.handler.id == handler_entry.id] + assert matching_registered + assert matching_unregistered + assert matching_registered[-1].handler.eventbus_id == bus.id + assert matching_registered[-1].handler.eventbus_name == bus.name + assert matching_registered[-1].handler.event_pattern == 'UserActionEvent' + assert matching_unregistered[-1].handler.event_pattern == 'UserActionEvent' + finally: + await bus.stop() + + async def test_otel_tracing_middleware_tracks_parent_event_and_handler_spans(self): + class RootEvent(BaseEvent): + pass + + class ChildEvent(BaseEvent): + pass + + class FakeSpan: + def __init__(self, name: str, context: Any = None): + self.name = name + self.context = context + self.attrs: dict[str, Any] = {} + self.errors: list[str] = [] + self.ended = False + + def set_attribute(self, key: str, value: Any): + self.attrs[key] = value + + def record_exception(self, error: BaseException): + self.errors.append(type(error).__name__) + + def end(self): + self.ended = True + + class FakeTracer: + def __init__(self): + self.spans: list[FakeSpan] = [] + + def start_span(self, name: str, context: Any = None): + span = FakeSpan(name, context=context) + self.spans.append(span) + return span + + class FakeTraceAPI: + @staticmethod + def set_span_in_context(span: FakeSpan): + return {'parent': span} + + tracer = FakeTracer() + bus = EventBus(middlewares=[OtelTracingMiddleware(tracer=tracer, trace_api=FakeTraceAPI())], name='TraceBus') + + async def child_handler(event: ChildEvent) -> None: + return None + + async def root_handler(event: RootEvent) -> None: + child = event.event_bus.emit(ChildEvent()) + await child + + bus.on(RootEvent, root_handler) + bus.on(ChildEvent, child_handler) + + try: + await bus.emit(RootEvent()) + await bus.wait_until_idle() + + root_event_span = next(span for span in tracer.spans if span.attrs.get('bubus.event_type') == 'RootEvent') + root_handler_span = next( + span for span in tracer.spans if str(span.attrs.get('bubus.handler_name', '')).endswith('root_handler') + ) + child_event_span = next(span for span in tracer.spans if span.attrs.get('bubus.event_type') == 'ChildEvent') + child_handler_span = next( + span for span in tracer.spans if str(span.attrs.get('bubus.handler_name', '')).endswith('child_handler') + ) + + assert root_handler_span.context['parent'] is root_event_span + assert child_event_span.context['parent'] is root_handler_span + assert child_handler_span.context['parent'] is child_event_span + assert root_event_span.attrs.get('bubus.bus_name') == bus.label + assert root_handler_span.attrs.get('bubus.bus_name') == bus.label + assert child_event_span.attrs.get('bubus.bus_name') == bus.label + assert child_handler_span.attrs.get('bubus.bus_name') == bus.label + assert all(span.ended for span in tracer.spans) + finally: + await bus.stop() + + +class TestSQLiteHistoryMirror: + async def test_sqlite_history_persists_events_and_results(self, tmp_path): + db_path = tmp_path / 'events.sqlite' + middleware = SQLiteHistoryMirrorMiddleware(db_path) + bus = EventBus(middlewares=[middleware]) + + async def handler(event: BaseEvent) -> str: + return 'ok' + + bus.on('UserActionEvent', handler) + + try: + await bus.emit(UserActionEvent(action='ping', user_id='b57fcb67-faeb-7a56-8907-116d8cbb1472')) + await bus.wait_until_idle() + + conn = sqlite3.connect(db_path) + events = conn.execute('SELECT phase, event_status FROM events_log ORDER BY id').fetchall() + assert [phase for phase, _ in events] == ['pending', 'started', 'completed'] + assert [status for _, status in events] == ['pending', 'started', 'completed'] + + result_rows = conn.execute( + 'SELECT phase, status, result_repr, error_repr FROM event_results_log ORDER BY id' + ).fetchall() + conn.close() + + assert [phase for phase, *_ in result_rows] == ['pending', 'started', 'completed'] + assert [status for _, status, *_ in result_rows] == ['pending', 'started', 'completed'] + assert result_rows[-1][2] == "'ok'" + assert result_rows[-1][3] is None + finally: + await bus.stop() + + def test_sqlite_history_close_is_idempotent(self, tmp_path): + db_path = tmp_path / 'events.sqlite' + middleware = SQLiteHistoryMirrorMiddleware(db_path) + + middleware.close() + middleware.close() + + with pytest.raises(sqlite3.ProgrammingError): + middleware._conn.execute('SELECT 1') + + +class TestLoggerMiddleware: + async def test_logger_middleware_writes_file(self, tmp_path): + log_path = tmp_path / 'events.log' + bus = EventBus(middlewares=[LoggerEventBusMiddleware(log_path)]) + + async def handler(event: BaseEvent) -> str: + return 'logged' + + bus.on('UserActionEvent', handler) + + try: + await bus.emit(UserActionEvent(action='log', user_id='1d4087d7-e791-702f-80b9-0fb09b726bc6')) + await bus.wait_until_idle() + + assert log_path.exists() + contents = log_path.read_text().strip().splitlines() + assert contents + assert 'UserActionEvent' in contents[-1] + finally: + await bus.stop() + + async def test_logger_middleware_stdout_only(self, capsys): + bus = EventBus(middlewares=[LoggerEventBusMiddleware()]) + + async def handler(event: BaseEvent) -> str: + return 'stdout' + + bus.on('UserActionEvent', handler) + + try: + await bus.emit(UserActionEvent(action='log', user_id='1d4087d7-e791-702f-80b9-0fb09b726bc6')) + await bus.wait_until_idle() + + captured = capsys.readouterr() + assert 'UserActionEvent' in captured.out + assert 'stdout' not in captured.err + finally: + await bus.stop() + + async def test_sqlite_history_records_errors(self, tmp_path): + db_path = tmp_path / 'events.sqlite' + middleware = SQLiteHistoryMirrorMiddleware(db_path) + bus = EventBus(middlewares=[middleware]) + + async def failing_handler(event: BaseEvent) -> None: + raise RuntimeError('handler boom') + + bus.on('UserActionEvent', failing_handler) + + try: + await bus.emit(UserActionEvent(action='boom', user_id='28536f9b-4031-7f53-827f-98c24c1b3839')) + await bus.wait_until_idle() + + conn = sqlite3.connect(db_path) + result_rows = conn.execute('SELECT phase, status, error_repr FROM event_results_log ORDER BY id').fetchall() + events = conn.execute('SELECT phase, event_status FROM events_log ORDER BY id').fetchall() + conn.close() + + assert [phase for phase, *_ in result_rows] == ['pending', 'started', 'completed'] + assert [status for _, status, *_ in result_rows] == ['pending', 'started', 'error'] + assert 'RuntimeError' in result_rows[-1][2] + assert [phase for phase, _ in events] == ['pending', 'started', 'completed'] + assert [status for _, status in events] == ['pending', 'started', 'completed'] + finally: + await bus.stop() + + +class MiddlewarePatternEvent(BaseEvent[str]): + pass + + +async def _flush_hook_tasks(ticks: int = 6) -> None: + for _ in range(ticks): + await asyncio.sleep(0) + + +async def test_middleware_hooks_cover_class_string_and_wildcard_patterns() -> None: + event_statuses_by_id: dict[str, list[str]] = {} + result_hook_statuses_by_handler: dict[str, list[str]] = {} + result_runtime_statuses_by_handler: dict[str, list[str]] = {} + handler_change_records: list[dict[str, Any]] = [] + + class RecordingMiddleware(EventBusMiddleware): + async def on_event_change(self, eventbus: EventBus, event: BaseEvent[Any], status) -> None: + event_statuses_by_id.setdefault(event.event_id, []).append(str(status)) + + async def on_event_result_change(self, eventbus: EventBus, event: BaseEvent[Any], event_result, status) -> None: + handler_id = event_result.handler_id + result_hook_statuses_by_handler.setdefault(handler_id, []).append(str(status)) + result_runtime_statuses_by_handler.setdefault(handler_id, []).append(event_result.status) + + async def on_bus_handlers_change(self, eventbus: EventBus, handler, registered: bool) -> None: + handler_change_records.append( + { + 'handler_id': handler.id, + 'event_pattern': handler.event_pattern, + 'registered': registered, + 'eventbus_id': handler.eventbus_id, + } + ) + + bus = EventBus(name='MiddlewareHookPatternParityBus', middlewares=[RecordingMiddleware()]) + + async def class_handler(event: MiddlewarePatternEvent) -> str: + return 'class-result' + + async def string_handler(event: BaseEvent[Any]) -> str: + assert event.event_type == 'MiddlewarePatternEvent' + return 'string-result' + + async def wildcard_handler(event: BaseEvent[Any]) -> str: + return f'wildcard:{event.event_type}' + + class_entry = bus.on(MiddlewarePatternEvent, class_handler) + string_entry = bus.on('MiddlewarePatternEvent', string_handler) + wildcard_entry = bus.on('*', wildcard_handler) + + try: + await _flush_hook_tasks() + + registered_records = [record for record in handler_change_records if record['registered'] is True] + assert len(registered_records) == 3 + + expected_patterns = { + class_entry.id: 'MiddlewarePatternEvent', + string_entry.id: 'MiddlewarePatternEvent', + wildcard_entry.id: '*', + } + assert {record['handler_id'] for record in registered_records} == set(expected_patterns) + for record in registered_records: + assert record['event_pattern'] == expected_patterns[record['handler_id']] + assert record['eventbus_id'] == bus.id + + event = await bus.emit(MiddlewarePatternEvent(event_timeout=0.2)) + await bus.wait_until_idle() + + assert str(event.event_status) == 'completed' + assert event_statuses_by_id[event.event_id] == ['pending', 'started', 'completed'] + assert set(event.event_results) == set(expected_patterns) + + for handler_id in expected_patterns: + assert result_hook_statuses_by_handler[handler_id] == ['pending', 'started', 'completed'] + assert result_runtime_statuses_by_handler[handler_id] == ['pending', 'started', 'completed'] + + assert event.event_results[class_entry.id].result == 'class-result' + assert event.event_results[string_entry.id].result == 'string-result' + assert event.event_results[wildcard_entry.id].result == 'wildcard:MiddlewarePatternEvent' + + bus.off(MiddlewarePatternEvent, class_entry) + bus.off('MiddlewarePatternEvent', string_entry) + bus.off('*', wildcard_entry) + await _flush_hook_tasks() + + unregistered_records = [record for record in handler_change_records if record['registered'] is False] + assert len(unregistered_records) == 3 + assert {record['handler_id'] for record in unregistered_records} == set(expected_patterns) + for record in unregistered_records: + assert record['event_pattern'] == expected_patterns[record['handler_id']] + finally: + await bus.stop() + + +async def test_middleware_hooks_cover_string_and_wildcard_patterns_for_ad_hoc_baseevent() -> None: + event_statuses_by_id: dict[str, list[str]] = {} + result_hook_statuses_by_handler: dict[str, list[str]] = {} + result_runtime_statuses_by_handler: dict[str, list[str]] = {} + handler_change_records: list[dict[str, Any]] = [] + + class RecordingMiddleware(EventBusMiddleware): + async def on_event_change(self, eventbus: EventBus, event: BaseEvent[Any], status) -> None: + event_statuses_by_id.setdefault(event.event_id, []).append(str(status)) + + async def on_event_result_change(self, eventbus: EventBus, event: BaseEvent[Any], event_result, status) -> None: + handler_id = event_result.handler_id + result_hook_statuses_by_handler.setdefault(handler_id, []).append(str(status)) + result_runtime_statuses_by_handler.setdefault(handler_id, []).append(event_result.status) + + async def on_bus_handlers_change(self, eventbus: EventBus, handler, registered: bool) -> None: + handler_change_records.append( + { + 'handler_id': handler.id, + 'event_pattern': handler.event_pattern, + 'registered': registered, + 'eventbus_id': handler.eventbus_id, + } + ) + + bus = EventBus(name='MiddlewareHookStringPatternParityBus', middlewares=[RecordingMiddleware()]) + ad_hoc_event_type = 'AdHocPatternEvent' + + async def string_handler(event: BaseEvent[Any]) -> str: + assert event.event_type == ad_hoc_event_type + return f'string:{event.event_type}' + + async def wildcard_handler(event: BaseEvent[Any]) -> str: + return f'wildcard:{event.event_type}' + + string_entry = bus.on(ad_hoc_event_type, string_handler) + wildcard_entry = bus.on('*', wildcard_handler) + + try: + await _flush_hook_tasks() + + registered_records = [record for record in handler_change_records if record['registered'] is True] + assert len(registered_records) == 2 + + expected_patterns = { + string_entry.id: ad_hoc_event_type, + wildcard_entry.id: '*', + } + assert {record['handler_id'] for record in registered_records} == set(expected_patterns) + for record in registered_records: + assert record['event_pattern'] == expected_patterns[record['handler_id']] + assert record['eventbus_id'] == bus.id + + event = await bus.emit(BaseEvent(event_type=ad_hoc_event_type, event_timeout=0.2)) + await bus.wait_until_idle() + + assert str(event.event_status) == 'completed' + assert event_statuses_by_id[event.event_id] == ['pending', 'started', 'completed'] + assert set(event.event_results) == set(expected_patterns) + + for handler_id in expected_patterns: + assert result_hook_statuses_by_handler[handler_id] == ['pending', 'started', 'completed'] + assert result_runtime_statuses_by_handler[handler_id] == ['pending', 'started', 'completed'] + + assert event.event_results[string_entry.id].result == f'string:{ad_hoc_event_type}' + assert event.event_results[wildcard_entry.id].result == f'wildcard:{ad_hoc_event_type}' + + bus.off(ad_hoc_event_type, string_entry) + bus.off('*', wildcard_entry) + await _flush_hook_tasks() + + unregistered_records = [record for record in handler_change_records if record['registered'] is False] + assert len(unregistered_records) == 2 + assert {record['handler_id'] for record in unregistered_records} == set(expected_patterns) + for record in unregistered_records: + assert record['event_pattern'] == expected_patterns[record['handler_id']] + finally: + await bus.stop() + + +class HistoryTestEvent(BaseEvent): + """Event for verifying middleware mirroring behaviour.""" + + payload: str + should_fail: bool = False + + +def _summarize_history(history: dict[str, BaseEvent[Any]]) -> list[dict[str, Any]]: + """Collect comparable information about events stored in history.""" + summary: list[dict[str, Any]] = [] + for event in history.values(): + handler_results = [ + { + 'handler_name': result.handler_name.rsplit('.', 1)[-1], + 'status': result.status, + 'result': result.result, + 'error': repr(result.error) if result.error else None, + } + for result in sorted(event.event_results.values(), key=lambda r: r.handler_name) + ] + summary.append( + { + 'event_type': event.event_type, + 'event_status': event.event_status, + 'event_path_length': len(event.event_path), + 'children': sorted(child.event_type for child in event.event_children), + 'handler_results': handler_results, + } + ) + return sorted(summary, key=lambda record: record['event_type']) + + +async def _run_scenario( + *, + middlewares: Sequence[Any] = (), + should_fail: bool = False, +) -> list[dict[str, Any]]: + """Execute a simple scenario and return the history summary.""" + bus = EventBus(middlewares=list(middlewares)) + + async def ok_handler(event: HistoryTestEvent) -> str: + return f'ok-{event.payload}' + + async def conditional_handler(event: HistoryTestEvent) -> str: + if event.should_fail: + raise RuntimeError('boom') + return 'fine' + + bus.on('HistoryTestEvent', ok_handler) + bus.on('HistoryTestEvent', conditional_handler) + + try: + await bus.emit(HistoryTestEvent(payload='payload', should_fail=should_fail)) + await bus.wait_until_idle() + finally: + summary = _summarize_history(bus.event_history) + await bus.stop() + + return summary + + +@pytest.mark.asyncio +async def test_sqlite_mirror_matches_inmemory_success(tmp_path: Path) -> None: + db_path = tmp_path / 'events_success.sqlite' + in_memory_result = await _run_scenario() + sqlite_result = await _run_scenario(middlewares=[SQLiteHistoryMirrorMiddleware(db_path)]) + assert sqlite_result == in_memory_result + + conn = sqlite3.connect(db_path) + event_phases = conn.execute('SELECT phase FROM events_log ORDER BY id').fetchall() + conn.close() + assert {phase for (phase,) in event_phases} >= {'pending', 'started', 'completed'} + + +@pytest.mark.asyncio +async def test_sqlite_mirror_matches_inmemory_error(tmp_path: Path) -> None: + db_path = tmp_path / 'events_error.sqlite' + in_memory_result = await _run_scenario(should_fail=True) + sqlite_result = await _run_scenario( + middlewares=[SQLiteHistoryMirrorMiddleware(db_path)], + should_fail=True, + ) + assert sqlite_result == in_memory_result + + conn = sqlite3.connect(db_path) + phases = conn.execute('SELECT DISTINCT phase FROM events_log').fetchall() + conn.close() + assert {phase for (phase,) in phases} >= {'pending', 'started', 'completed'} + + +def _worker_dispatch(db_path: str, worker_id: int) -> None: + """Process entrypoint for exercising concurrent writes.""" + + async def run() -> None: + middleware = SQLiteHistoryMirrorMiddleware(Path(db_path)) + bus = EventBus(name=f'WorkerBus{worker_id}', middlewares=[middleware]) + + async def handler(event: HistoryTestEvent) -> str: + return f'worker-{worker_id}' + + bus.on('HistoryTestEvent', handler) + try: + await bus.emit(HistoryTestEvent(payload=f'worker-{worker_id}')) + await bus.wait_until_idle() + finally: + await bus.stop() + + asyncio.run(run()) + + +def test_sqlite_mirror_supports_concurrent_processes(tmp_path: Path) -> None: + db_path = tmp_path / 'shared_history.sqlite' + ctx = multiprocessing.get_context('spawn') + processes = [ctx.Process(target=_worker_dispatch, args=(str(db_path), idx)) for idx in range(3)] + for proc in processes: + proc.start() + for proc in processes: + proc.join(timeout=20) + assert proc.exitcode == 0 + + conn = sqlite3.connect(db_path) + events = conn.execute('SELECT DISTINCT eventbus_name FROM events_log').fetchall() + results_count = conn.execute('SELECT COUNT(*) FROM event_results_log').fetchone() + conn.close() + + bus_labels = {name for (name,) in events} + assert len(bus_labels) == 3 + for idx in range(3): + assert any(label.startswith(f'WorkerBus{idx}#') and len(label.rsplit('#', 1)[-1]) == 4 for label in bus_labels) + assert results_count is not None + # Each worker records pending/started/completed for its single handler + assert results_count[0] == 9 diff --git a/tests/test_name_conflict_gc.py b/tests/test_name_conflict_gc.py deleted file mode 100644 index 0e42655..0000000 --- a/tests/test_name_conflict_gc.py +++ /dev/null @@ -1,158 +0,0 @@ -# pyright: basic -""" -Tests for EventBus name conflict resolution with garbage collection. - -Tests that EventBus instances that would be garbage collected don't cause -name conflicts when creating new instances with the same name. -""" - -import weakref - -import pytest - -from bubus import EventBus - - -class TestNameConflictGC: - """Test EventBus name conflict resolution with garbage collection""" - - def test_name_conflict_with_live_reference(self): - """Test that name conflict generates a warning and auto-generates a unique name""" - # Create an EventBus with a specific name - bus1 = EventBus(name='TestBus') - - # Try to create another with the same name - should warn and auto-generate unique name - with pytest.warns(UserWarning, match='EventBus with name "TestBus" already exists'): - bus2 = EventBus(name='TestBus') - - # The second bus should have a unique name - assert bus2.name.startswith('TestBus_') - assert bus2.name != 'TestBus' - assert len(bus2.name) == len('TestBus_') + 8 # Original name + underscore + 8 char suffix - - def test_name_no_conflict_after_deletion(self): - """Test that name conflict is NOT raised after the existing bus is deleted""" - # Create an EventBus with a specific name - bus1 = EventBus(name='TestBus') - - # Delete the reference - del bus1 - - # Creating another with the same name should work since the first one has no references - bus2 = EventBus(name='TestBus') - assert bus2.name == 'TestBus' - - def test_name_no_conflict_with_no_reference(self): - """Test that name conflict is NOT raised when the existing bus was never assigned""" - # Create an EventBus with a specific name but don't keep a reference - EventBus(name='TestBus') # No assignment, will be garbage collected - - # Creating another with the same name should work since the first one is gone - bus2 = EventBus(name='TestBus') - assert bus2.name == 'TestBus' - - def test_name_conflict_with_weak_reference_only(self): - """Test that name conflict is NOT raised when only weak references exist""" - # Create an EventBus and keep only a weak reference - bus1 = EventBus(name='TestBus') - weak_ref = weakref.ref(bus1) - - # Verify the weak reference works - assert weak_ref() is bus1 - - # Delete the strong reference - del bus1 - - # At this point, only the weak reference exists (and the WeakSet reference) - # Creating another with the same name should work - bus2 = EventBus(name='TestBus') - assert bus2.name == 'TestBus' - - # The weak reference should now return None - assert weak_ref() is None - - def test_multiple_buses_with_gc(self): - """Test multiple EventBus instances with some being garbage collected""" - # Create multiple buses, some with strong refs, some without - bus1 = EventBus(name='Bus1') - EventBus(name='Bus2') # Will be GC'd - bus3 = EventBus(name='Bus3') - EventBus(name='Bus4') # Will be GC'd - - # Should be able to create new buses with the names of GC'd buses - bus2_new = EventBus(name='Bus2') - bus4_new = EventBus(name='Bus4') - - # But not with names of buses that still exist - they get auto-generated names - with pytest.warns(UserWarning, match='EventBus with name "Bus1" already exists'): - bus1_conflict = EventBus(name='Bus1') - assert bus1_conflict.name.startswith('Bus1_') - - with pytest.warns(UserWarning, match='EventBus with name "Bus3" already exists'): - bus3_conflict = EventBus(name='Bus3') - assert bus3_conflict.name.startswith('Bus3_') - - @pytest.mark.asyncio - async def test_name_conflict_after_stop_and_clear(self): - """Test that clearing an EventBus allows reusing its name""" - # Create an EventBus - bus1 = EventBus(name='TestBus') - - # Stop and clear it - await bus1.stop(clear=True) - - # Delete the reference to allow garbage collection - del bus1 - - # Now we should be able to create a new one with the same name - bus2 = EventBus(name='TestBus') - assert bus2.name == 'TestBus' - - def test_weakset_behavior(self): - """Test that the WeakSet properly tracks EventBus instances""" - initial_count = len(EventBus.all_instances) - - # Create some buses - bus1 = EventBus(name='WeakTest1') - bus2 = EventBus(name='WeakTest2') - bus3 = EventBus(name='WeakTest3') - - # Check they're tracked - assert len(EventBus.all_instances) == initial_count + 3 - - # Delete one - del bus2 - - # The WeakSet should automatically remove it (no gc.collect needed) - # But we need to check the actual buses in the set, not just the count - names = {bus.name for bus in EventBus.all_instances if hasattr(bus, 'name') and bus.name.startswith('WeakTest')} - assert 'WeakTest1' in names - assert 'WeakTest3' in names - # WeakTest2 might still be there until the next iteration - - def test_eventbus_removed_from_weakset(self): - """Test that our implementation removes dead EventBus from WeakSet during conflict check""" - # Create a bus that will be "dead" (no strong references) - EventBus(name='DeadBus') - - # When we try to create a new bus with the same name, the conflict check - # should detect the dead bus and remove it from the WeakSet - bus = EventBus(name='DeadBus') - assert bus.name == 'DeadBus' - - # The dead bus should have been removed from all_instances - names = [b.name for b in EventBus.all_instances if hasattr(b, 'name') and b.name == 'DeadBus'] - assert len(names) == 1 # Only the new one - - def test_concurrent_name_creation(self): - """Test that concurrent creation with same name generates warning and unique name""" - # This tests the edge case where two buses might be created nearly simultaneously - bus1 = EventBus(name='ConcurrentTest') - - # Even if we're in the middle of checking, the second one should get a unique name - with pytest.warns(UserWarning, match='EventBus with name "ConcurrentTest" already exists'): - bus2 = EventBus(name='ConcurrentTest') - - assert bus1.name == 'ConcurrentTest' - assert bus2.name.startswith('ConcurrentTest_') - assert bus2.name != bus1.name diff --git a/tests/test_optional_dependencies.py b/tests/test_optional_dependencies.py new file mode 100644 index 0000000..98cd572 --- /dev/null +++ b/tests/test_optional_dependencies.py @@ -0,0 +1,30 @@ +from __future__ import annotations + +import ast +from pathlib import Path + +_ROOT = Path(__file__).resolve().parents[1] + + +def _ast_import_roots(path: Path) -> set[str]: + parsed = ast.parse(path.read_text(encoding='utf-8'), filename=str(path)) + roots: set[str] = set() + for node in ast.walk(parsed): + if isinstance(node, ast.Import): + for alias in node.names: + roots.add(alias.name.split('.')[0]) + elif isinstance(node, ast.ImportFrom) and node.module is not None: + roots.add(node.module.split('.')[0]) + return roots + + +def test_bridge_modules_do_not_eager_import_optional_packages() -> None: + bridge_modules = { + _ROOT / 'bubus' / 'bridge_postgres.py': {'asyncpg'}, + _ROOT / 'bubus' / 'bridge_nats.py': {'nats'}, + _ROOT / 'bubus' / 'bridge_redis.py': {'redis'}, + } + + for path, forbidden_roots in bridge_modules.items(): + imported_roots = _ast_import_roots(path) + assert forbidden_roots.isdisjoint(imported_roots), f'{path} eagerly imports {forbidden_roots & imported_roots}' diff --git a/tests/test_semaphores.py b/tests/test_retry.py similarity index 70% rename from tests/test_semaphores.py rename to tests/test_retry.py index 02cdcfd..43a9f49 100644 --- a/tests/test_semaphores.py +++ b/tests/test_retry.py @@ -1,12 +1,15 @@ import asyncio +import inspect import multiprocessing import os +import re import time from typing import Any import pytest -from bubus.helpers import retry +import bubus.retry as retry_helpers +from bubus.retry import retry def worker_acquire_semaphore( @@ -23,7 +26,7 @@ def worker_acquire_semaphore( # Define a function decorated with multiprocess semaphore @retry( - retries=0, + max_attempts=1, timeout=10, semaphore_limit=3, # Only 3 concurrent processes allowed semaphore_name='test_multiprocess_sem', @@ -71,7 +74,7 @@ def worker_that_dies( try: @retry( - retries=0, + max_attempts=1, timeout=10, semaphore_limit=2, # Only 2 concurrent processes semaphore_name='test_death_sem', @@ -87,7 +90,7 @@ async def semaphore_protected_function(): await asyncio.sleep(die_after) # Simulate unexpected death - os._exit(1) # Hard exit without cleanup # type: ignore[attr-defined] + os._exit(1) # Hard exit without cleanup asyncio.run(semaphore_protected_function()) @@ -104,7 +107,7 @@ def worker_death_test_normal( """Worker for death test that uses the same semaphore.""" @retry( - retries=0, + max_attempts=1, timeout=10, semaphore_limit=2, semaphore_name='test_death_sem', @@ -141,7 +144,7 @@ def worker_with_custom_limit( try: @retry( - retries=0, + max_attempts=1, timeout=10, semaphore_limit=semaphore_limit, semaphore_name=semaphore_name, @@ -201,7 +204,7 @@ def test_basic_multiprocess_semaphore(self): p.join(timeout=10) # Collect results - results: list[tuple[str, int, float]] = [] + results: list[tuple[Any, ...]] = [] while not results_queue.empty(): results.append(results_queue.get()) @@ -229,8 +232,12 @@ def test_basic_multiprocess_semaphore(self): # Verify semaphore is actually limiting concurrency # Check that no more than 3 workers held the semaphore simultaneously active_workers: list[int] = [] - # Filter out events that don't have timing information - timed_events: list[tuple[str, int, float]] = [e for e in results if len(e) >= 3 and isinstance(e[2], (int, float))] # type: ignore[arg-type] + # Keep only acquire/release events with numeric timestamps. + timed_events: list[tuple[str, int, float]] = [ + (str(e[0]), int(e[1]), float(e[2])) + for e in results + if len(e) >= 3 and e[0] in ('acquired', 'released') and isinstance(e[2], (int, float)) + ] for event in sorted(timed_events, key=lambda x: x[2]): # Sort all events by time if event[0] == 'acquired': active_workers.append(event[1]) @@ -470,21 +477,19 @@ async def test_semaphore_file_disappears(self): import tempfile from pathlib import Path - from bubus import helpers - # Use a custom directory for this test test_dir = Path(tempfile.gettempdir()) / 'test_semaphore_disappear' test_dir.mkdir(exist_ok=True) - original_dir = helpers.MULTIPROCESS_SEMAPHORE_DIR + original_dir = retry_helpers.MULTIPROCESS_SEMAPHORE_DIR try: # Monkey patch the directory for this test - helpers.MULTIPROCESS_SEMAPHORE_DIR = test_dir + retry_helpers.MULTIPROCESS_SEMAPHORE_DIR = test_dir acquired_count = 0 @retry( - retries=0, + max_attempts=1, timeout=5, semaphore_limit=2, semaphore_name='disappearing_sem', @@ -512,9 +517,7 @@ async def test_function(): finally: # Restore original directory - from bubus import helpers - - helpers.MULTIPROCESS_SEMAPHORE_DIR = original_dir + retry_helpers.MULTIPROCESS_SEMAPHORE_DIR = original_dir # Clean up test directory shutil.rmtree(test_dir, ignore_errors=True) @@ -527,7 +530,7 @@ async def test_global_scope(self): results: list[tuple[str, int, float]] = [] @retry( - retries=0, + max_attempts=1, timeout=1, semaphore_limit=2, semaphore_scope='global', @@ -561,7 +564,7 @@ def __init__(self): self.results: list[tuple[str, int, float]] = [] @retry( - retries=0, + max_attempts=1, timeout=1, semaphore_limit=1, semaphore_scope='class', @@ -597,10 +600,10 @@ def __init__(self): self.results: list[tuple[str, int, float]] = [] @retry( - retries=0, + max_attempts=1, timeout=1, semaphore_limit=1, - semaphore_scope='self', + semaphore_scope='instance', semaphore_name='test_method', ) async def test_method(self, worker_id: int): @@ -622,272 +625,76 @@ async def test_method(self, worker_id: int): ) end_time = time.time() - # Should take ~0.1s (parallel) not ~0.2s (sequential) - assert end_time - start_time < 0.15 - - -class TestRetryWithEventBus: - """Test @retry decorator with EventBus handlers.""" - - async def test_retry_decorator_on_eventbus_handler(self): - """Test that @retry decorator works correctly when applied to EventBus handlers.""" - from bubus import BaseEvent, EventBus - - # Track handler execution details - handler_calls: list[tuple[str, float]] = [] - # results: list[Any] = [] # Unused variable - - class TestEvent(BaseEvent[str]): - """Simple test event.""" - - event_result_type: Any = str - - message: str - - # Create an EventBus - bus = EventBus(name='test_retry_bus') - - # Define a handler with retry decorator - @retry( - retries=2, - wait=0.1, - timeout=1.0, - semaphore_limit=1, - semaphore_scope='global', - ) - async def retrying_handler(event: TestEvent) -> str: - call_time = time.time() - handler_calls.append(('called', call_time)) - - # Fail the first 2 attempts, succeed on the 3rd - if len(handler_calls) < 3: - raise ValueError(f'Attempt {len(handler_calls)} failed') - - return f'Success: {event.message}' - - # Register the handler - bus.on('TestEvent', retrying_handler) - - # Dispatch an event - # start_time = time.time() # Unused variable - event = TestEvent(message='Hello retry!') - completed_event = await bus.dispatch(event) - - # Wait for completion - await bus.wait_until_idle(timeout=5) - - # Check results - assert len(handler_calls) == 3, f'Expected 3 attempts, got {len(handler_calls)}' - - # Verify the handler was retried with appropriate delays - for i in range(1, len(handler_calls)): - delay = handler_calls[i][1] - handler_calls[i - 1][1] - assert delay >= 0.08, f'Retry delay {i} was {delay:.3f}s, expected >= 0.08s' - - # Check that the event completed successfully - assert completed_event.event_status == 'completed' - - # Check the result - handler_result = await completed_event.event_result() - assert handler_result == 'Success: Hello retry!' - - await bus.stop() - - async def test_retry_with_semaphore_on_multiple_handlers(self): - """Test @retry decorator with semaphore limiting concurrent handler executions.""" - from bubus import BaseEvent, EventBus - - # Track handler execution - active_handlers: list[int] = [] - max_concurrent = 0 - handler_results: dict[int, list[tuple[str, float]]] = {1: [], 2: [], 3: [], 4: []} - - class WorkEvent(BaseEvent[str]): - """Event that triggers work.""" - - event_result_type: Any = str - - work_id: int - - bus = EventBus(name='test_concurrent_bus', parallel_handlers=True) - - # Create handlers with semaphore limit - async def create_handler(handler_id: int): - @retry( - retries=0, - timeout=5.0, - semaphore_limit=2, # Only 2 handlers can run concurrently - semaphore_name='test_handler_sem', - semaphore_scope='global', - ) - async def limited_handler(event: WorkEvent) -> str: - nonlocal max_concurrent - - # Track entry - active_handlers.append(handler_id) - handler_results[handler_id].append(('started', time.time())) - - # Update max concurrent - current_concurrent = len(active_handlers) - max_concurrent = max(max_concurrent, current_concurrent) - - # Simulate work - await asyncio.sleep(0.2) - - # Track exit - active_handlers.remove(handler_id) - handler_results[handler_id].append(('completed', time.time())) + # Should be closer to parallel execution (~0.1s) than strict serialization (~0.2s). + # Allow overhead from periodic overload checks. + assert end_time - start_time < 0.25 - return f'Handler {handler_id} processed work {event.work_id}' - # Give each handler a unique name - limited_handler.__name__ = f'limited_handler_{handler_id}' - return limited_handler +class TestRetryApiParity: + async def test_defaults_match_typescript(self): + params = inspect.signature(retry).parameters + assert params['max_attempts'].default == 1 + assert params['timeout'].default is None - # Register multiple handlers - for i in range(1, 5): # 4 handlers - handler = await create_handler(i) - bus.on('WorkEvent', handler) - - # Dispatch event (all 4 handlers will try to process it) - event = WorkEvent(work_id=1) - await bus.dispatch(event) - - # Wait for completion - await bus.wait_until_idle(timeout=3) - - # Verify semaphore limited concurrency to 2 - assert max_concurrent <= 2, f'Max concurrent was {max_concurrent}, expected <= 2' - - # Verify all handlers executed - for handler_id in range(1, 5): - assert len(handler_results[handler_id]) == 2, f'Handler {handler_id} should have started and completed' - - # Verify timing - with limit of 2 and 0.2s work, should take ~0.4s total - all_starts = [r[1] for results in handler_results.values() for r in results if r[0] == 'started'] - all_ends = [r[1] for results in handler_results.values() for r in results if r[0] == 'completed'] - - total_time = max(all_ends) - min(all_starts) - assert 0.35 < total_time < 0.6, f'Total execution time was {total_time:.3f}s, expected ~0.4s' - - await bus.stop() - - async def test_retry_timeout_with_eventbus_handler(self): - """Test that retry timeout works correctly with EventBus handlers.""" - from bubus import BaseEvent, EventBus - - class TimeoutEvent(BaseEvent[str]): - """Event for timeout testing.""" - - test_id: str - event_timeout: float | None = 1 - - bus = EventBus(name='test_timeout_bus') - - handler_started = False - # handler_error = None # Unused variable - - async def slow_handler(event: TimeoutEvent) -> str: - nonlocal handler_started - handler_started = True - - # This will timeout - await asyncio.sleep(5) - return 'Should not reach here' - - @retry( - retries=0, # No retries - timeout=0.2, # 200ms timeout - ) - async def wrapped_handler(event: TimeoutEvent) -> str: - nonlocal handler_started - handler_started = True - - # This will timeout - await asyncio.sleep(5) - return 'Should not reach here' - - # Register handler - bus.on(TimeoutEvent, slow_handler) - bus.on(TimeoutEvent, wrapped_handler) - - # Dispatch event - event = TimeoutEvent(test_id='timeout-test') - await bus.dispatch(event) - - # Wait for completion - await bus.wait_until_idle(timeout=2) - - # Check that handler started but timed out - assert handler_started, 'Handler should have started' - - # Check event results for timeout error - handler_id = list(event.event_results.keys())[0] - result = event.event_results[handler_id] - - assert result.status == 'error' - assert result.error is not None - assert isinstance(result.error, TimeoutError) - - await bus.stop() - - async def test_retry_with_event_type_filter(self): - """Test retry decorator with specific exception types.""" - from bubus import BaseEvent, EventBus - - class RetryTestEvent(BaseEvent[str]): - """Event for testing retry on specific exceptions.""" + async def test_max_attempts_counts_total_attempts(self): + attempt_count = 0 - event_result_type: Any = str + @retry(max_attempts=3) + async def flaky(): + nonlocal attempt_count + attempt_count += 1 + raise ValueError('always fails') - attempt_limit: int + with pytest.raises(ValueError): + await flaky() - bus = EventBus(name='test_exception_filter_bus') + assert attempt_count == 3 + async def test_retry_on_errors_supports_exception_classes_and_regex(self): attempt_count = 0 @retry( - retries=3, - wait=0.05, - timeout=1.0, - retry_on=(ValueError, RuntimeError), # Only retry these exceptions + max_attempts=4, + retry_after=0.01, + retry_on_errors=[re.compile(r'^ValueError: temporary failure$'), RuntimeError], ) - async def selective_retry_handler(event: RetryTestEvent) -> str: + async def flaky(): nonlocal attempt_count attempt_count += 1 + if attempt_count < 3: + raise ValueError('temporary failure') + return 'ok' - if attempt_count == 1: - raise ValueError('This should be retried') - elif attempt_count == 2: - raise RuntimeError('This should also be retried') - elif attempt_count == 3: - raise TypeError('This should NOT be retried') # Not in retry_on + assert await flaky() == 'ok' + assert attempt_count == 3 - return 'Success' + async def test_semaphore_name_callable_uses_call_args_for_keying(self): + active = 0 + max_active = 0 - # Register handler - bus.on('RetryTestEvent', selective_retry_handler) + def _semaphore_key(a: str, b: str) -> str: + return f'{a}-{b}' - # Dispatch event - event = RetryTestEvent(attempt_limit=3) - await bus.dispatch(event) - - # Wait for completion - await bus.wait_until_idle(timeout=2) - - # Should have attempted 3 times (initial + 2 retries for ValueError and RuntimeError) - # Then failed with TypeError which is not retried - assert attempt_count == 3, f'Expected 3 attempts, got {attempt_count}' - - # Check the final error is TypeError - handler_id = list(event.event_results.keys())[0] - result = event.event_results[handler_id] - - assert result.status == 'error' - assert isinstance(result.error, TypeError) - assert 'This should NOT be retried' in str(result.error) - - await bus.stop() + @retry( + max_attempts=1, + semaphore_limit=1, + semaphore_scope='global', + semaphore_name=_semaphore_key, + ) + async def keyed(a: str, b: str): + nonlocal active, max_active + active += 1 + max_active = max(max_active, active) + await asyncio.sleep(0.05) + active -= 1 + + max_active = 0 + await asyncio.gather(keyed('same', 'key'), keyed('same', 'key')) + assert max_active == 1 + + max_active = 0 + await asyncio.gather(keyed('a', '1'), keyed('b', '2')) + assert max_active >= 2 if __name__ == '__main__': diff --git a/tests/test_simple_typed_results.py b/tests/test_simple_typed_results.py deleted file mode 100644 index ab21db1..0000000 --- a/tests/test_simple_typed_results.py +++ /dev/null @@ -1,62 +0,0 @@ -"""Simple test for typed event results.""" - -import asyncio -from typing import Any - -from pydantic import BaseModel - -from bubus import BaseEvent, EventBus - - -class MyResult(BaseModel): - value: str - count: int - - -class TypedEvent(BaseEvent[MyResult]): - event_result_type: Any = MyResult - - -async def test_simple_typed_result(): - """Test a simple typed result.""" - bus = EventBus(name='simple_test') - - def handler(event: TypedEvent) -> MyResult: - return MyResult.model_validate({'value': 'hello', 'count': 42}) - - bus.on('TypedEvent', handler) - - event = TypedEvent() - completed_event = await bus.dispatch(event) - - # Check the result was cast to the correct type - handler_ids = list(completed_event.event_results.keys()) - if handler_ids: - result_obj = completed_event.event_results[handler_ids[0]] - print(f'Result type: {type(result_obj.result)}') - print(f'Result: {result_obj.result}') - print(f'Status: {result_obj.status}') - print(f'Result type setting: {result_obj.result_type}') - if result_obj.error: - print(f'Error: {result_obj.error}') - - # Let's test different constructor approaches - import pydantic - - print(f'Pydantic version: {pydantic.VERSION}') - - try: - test_result1 = MyResult.model_validate({'value': 'hello', 'count': 42}) - print(f'Constructor with dict: {test_result1}') - except Exception as e: - print(f'Constructor with dict fails: {e}') - - else: - print('No results found') - - await bus.stop(clear=True) - print('βœ… Simple typed result test completed') - - -if __name__ == '__main__': - asyncio.run(test_simple_typed_result()) diff --git a/tests/test_stress_20k_events.py b/tests/test_stress_20k_events.py deleted file mode 100644 index 3a75be3..0000000 --- a/tests/test_stress_20k_events.py +++ /dev/null @@ -1,242 +0,0 @@ -import asyncio -import gc -import os -import time -from typing import Any - -import psutil -import pytest - -from bubus import BaseEvent, EventBus - - -def get_memory_usage_mb(): - """Get current process memory usage in MB""" - process = psutil.Process(os.getpid()) - return process.memory_info().rss / 1024 / 1024 - - -class SimpleEvent(BaseEvent): - """Simple event without Generic for performance testing""" - - pass - - -@pytest.mark.asyncio -async def test_20k_events_with_memory_control(): - """Test processing 20k events with no memory leaks""" - - # Record initial memory - gc.collect() - initial_memory = get_memory_usage_mb() - print(f'\nInitial memory: {initial_memory:.1f} MB') - - # Create EventBus with proper limits (now default) - bus = EventBus(name='ManyEvents') - - print('EventBus settings:') - print(f' max_history_size: {bus.max_history_size}') - print(f' queue maxsize: {bus.event_queue.maxsize if bus.event_queue else "not created"}') - print('Starting event dispatch...') - - processed_count = 0 - - async def handler(event: SimpleEvent) -> None: - nonlocal processed_count - processed_count += 1 - - bus.on('SimpleEvent', handler) - - total_events = 20_000 # Reduced for faster tests - - start_time = time.time() - memory_samples: list[float] = [] - max_memory = initial_memory - - # Dispatch all events as fast as possible - dispatched = 0 - pending_events: list[BaseEvent[Any]] = [] - - while dispatched < total_events: - try: - event = bus.dispatch(SimpleEvent()) - pending_events.append(event) - dispatched += 1 - if dispatched <= 5: - print(f'Dispatched event {dispatched}') - except RuntimeError as e: - if 'EventBus at capacity' in str(e): - # Queue is full, complete the oldest pending events to make room - # Complete first 10 events to free up space - if pending_events: - to_complete = pending_events[:10] - await asyncio.gather(*to_complete) - pending_events = pending_events[10:] - else: - raise - - # Sample memory every 10k events - if dispatched % 10_000 == 0 and dispatched > 0: - gc.collect() - current_memory = get_memory_usage_mb() - memory_samples.append(current_memory) - max_memory = max(max_memory, current_memory) - elapsed = time.time() - start_time - rate = dispatched / elapsed - print( - f'Progress: {dispatched:,} events, ' - f'Memory: {current_memory:.1f} MB (+{current_memory - initial_memory:.1f} MB), ' - f'History: {len(bus.event_history)}, ' - f'Rate: {rate:.0f} events/sec' - ) - - # Wait for all remaining events to complete - if pending_events: - await asyncio.gather(*pending_events) - - # Final wait - await bus.wait_until_idle() - - duration = time.time() - start_time - - # Final memory check - gc.collect() - final_memory = get_memory_usage_mb() - memory_growth = final_memory - initial_memory - peak_growth = max_memory - initial_memory - - print('\nFinal Results:') - print(f'Processed: {processed_count:,} events') - print(f'Duration: {duration:.2f} seconds') - print(f'Rate: {processed_count / duration:,.0f} events/sec') - print(f'Initial memory: {initial_memory:.1f} MB') - print(f'Peak memory: {max_memory:.1f} MB (+{peak_growth:.1f} MB)') - print(f'Final memory: {final_memory:.1f} MB (+{memory_growth:.1f} MB)') - - # Debug: Check if event loop is still processing - print(f'DEBUG: Bus is running: {bus._is_running}') # type: ignore - print(f'DEBUG: Runloop task: {bus._runloop_task}') # type: ignore - if bus._runloop_task: # type: ignore - print(f'DEBUG: Runloop task done: {bus._runloop_task.done()}') # type: ignore - - # Safely get event history size without iterating - try: - history_size = len(bus.event_history) - print(f'Event history size: {history_size} (capped at {bus.max_history_size})') - except Exception as e: - print(f'ERROR getting event history size: {type(e).__name__}: {e}') - - # Verify results - print('DEBUG: About to check processed_count assertion...') - assert processed_count == total_events, f'Only processed {processed_count} of {total_events}' - print('DEBUG: About to check duration assertion...') - assert duration < 120, f'Took {duration:.2f}s, should be < 120s' # Allow more time for CI - - # Check memory usage stayed reasonable - print('DEBUG: About to check memory assertion...') - assert peak_growth < 100, f'Memory grew by {peak_growth:.1f} MB at peak, indicates memory leak' - - # Check event history is properly limited - print('DEBUG: About to check history size assertions...') - assert bus.max_history_size is not None - assert len(bus.event_history) <= bus.max_history_size, ( - f'Event history has {len(bus.event_history)} events, should be <= {bus.max_history_size}' - ) - - # Explicitly clean up the bus to prevent hanging - print('\nCleaning up EventBus...') - print(f'Before stop - Running: {bus._is_running}') # type: ignore - print(f'Before stop - Runloop task: {bus._runloop_task}') # type: ignore - if bus._runloop_task: # type: ignore - print(f' - Done: {bus._runloop_task.done()}') # type: ignore - print(f' - Cancelled: {bus._runloop_task.cancelled()}') # type: ignore - - await bus.stop(timeout=0, clear=True) - print('EventBus stopped successfully') - - -@pytest.mark.asyncio -async def test_hard_limit_enforcement(): - """Test that hard limit of 100 pending events is enforced""" - bus = EventBus(name='HardLimitTest') - - try: - # Create a slow handler to keep events pending - async def slow_handler(event: SimpleEvent) -> None: - await asyncio.sleep(0.5) # Reduced from 10s to 0.5s - - bus.on('SimpleEvent', slow_handler) - - # Try to dispatch more than 100 events - events_dispatched = 0 - errors = 0 - - for _ in range(150): - try: - bus.dispatch(SimpleEvent()) - events_dispatched += 1 - except RuntimeError as e: - if 'EventBus at capacity' in str(e): - errors += 1 - else: - raise - - print(f'\nDispatched {events_dispatched} events') - print(f'Hit capacity error {errors} times') - - # Should hit the limit - assert events_dispatched <= 100 - assert errors > 0 - - finally: - # Properly stop the bus to clean up pending tasks - await bus.stop(timeout=0, clear=True) # Don't wait, just force cleanup - - -@pytest.mark.asyncio -async def test_cleanup_prioritizes_pending(): - """Test that cleanup keeps pending events and removes completed ones""" - bus = EventBus(name='CleanupTest', max_history_size=10) - - try: - # Process some events to completion - completed_events: list[BaseEvent[Any]] = [] - for _ in range(5): - event = bus.dispatch(BaseEvent(event_type='QuickEvent')) - completed_events.append(event) - - await asyncio.gather(*completed_events) - - # Add pending events with slow handler (reduced sleep time) - async def slow_handler(event: BaseEvent) -> None: - if event.event_type == 'SlowEvent': - await asyncio.sleep(0.5) # Reduced from 10s to 0.5s - - bus.on('*', slow_handler) - - pending_events: list[BaseEvent[Any]] = [] - for _ in range(10): - event = bus.dispatch(BaseEvent(event_type='SlowEvent')) - pending_events.append(event) - - # Give them time to start - await asyncio.sleep(0.1) - - # Check history - should prioritize keeping pending events - history_types: dict[str, int] = {} - for event in bus.event_history.values(): - status = event.event_status - history_types[status] = history_types.get(status, 0) + 1 - - print('\nHistory after cleanup:') - print(f' Total: {len(bus.event_history)} (max: {bus.max_history_size})') - print(f' By status: {history_types}') - - # Should have removed completed events to make room for pending - assert bus.max_history_size is not None - assert len(bus.event_history) <= bus.max_history_size - assert history_types.get('pending', 0) + history_types.get('started', 0) >= 5 - - finally: - # Properly stop the bus to clean up pending tasks - await bus.stop(timeout=0, clear=True) # Don't wait, just force cleanup diff --git a/tests/test_typed_event_results.py b/tests/test_typed_event_results.py deleted file mode 100644 index ee9833d..0000000 --- a/tests/test_typed_event_results.py +++ /dev/null @@ -1,306 +0,0 @@ -"""Test typed event results with automatic casting.""" - -import asyncio -from typing import Any, assert_type - -from pydantic import BaseModel - -from bubus import BaseEvent, EventBus - - -class ScreenshotEventResult(BaseModel): - screenshot_base64: bytes | None = None - error: str | None = None - - -class ScreenshotEvent(BaseEvent[ScreenshotEventResult]): - screenshot_width: int = 1080 - screenshot_height: int = 900 - - -class StringEvent(BaseEvent[str]): - pass - - -class IntEvent(BaseEvent[int]): - pass - - -async def test_pydantic_model_result_casting(): - """Test that handler results are automatically cast to Pydantic models.""" - print('\n=== Test Pydantic Model Result Casting ===') - - bus = EventBus(name='pydantic_test_bus') - - def screenshot_handler(event: ScreenshotEvent): - # Return a dict that should be cast to ScreenshotEventResult - return {'screenshot_base64': b'fake_screenshot_data', 'error': None} - - bus.on('ScreenshotEvent', screenshot_handler) - - event = ScreenshotEvent(screenshot_width=1920, screenshot_height=1080) - await bus.dispatch(event) - - # Get the result - result = await event.event_result() - - # Verify it was cast to the correct type - assert isinstance(result, ScreenshotEventResult) - assert result.screenshot_base64 == b'fake_screenshot_data' - assert result.error is None - - print(f'βœ… Result correctly cast to {type(result).__name__}: {result}') - await bus.stop(clear=True) - - -async def test_builtin_type_casting(): - """Test that handler results are automatically cast to built-in types.""" - print('\n=== Test Built-in Type Casting ===') - - bus = EventBus(name='builtin_test_bus') - - def string_handler(event: StringEvent): - return '42' # Return a proper string - - def int_handler(event: IntEvent): - return 123 # Return a proper int - - bus.on('StringEvent', string_handler) - bus.on('IntEvent', int_handler) - - # Test string validation - string_event = StringEvent() - await bus.dispatch(string_event) - string_result = await string_event.event_result() - assert isinstance(string_result, str) - assert string_result == '42' - print(f'βœ… String "42" validated as str: "{string_result}"') - - # Test int validation - int_event = IntEvent() - await bus.dispatch(int_event) - int_result = await int_event.event_result() - assert isinstance(int_result, int) - assert int_result == 123 - print(f'βœ… Int 123 validated as int: {int_result}') - await bus.stop(clear=True) - - -async def test_casting_failure_handling(): - """Test that casting failures are handled gracefully.""" - print('\n=== Test Casting Failure Handling ===') - - bus = EventBus(name='failure_test_bus') - - def bad_handler(event: IntEvent): - return 'not_a_number' # Should fail validation as int - - bus.on('IntEvent', bad_handler) - - event = IntEvent() - await bus.dispatch(event) - - # The event should complete but the result should be an error - try: - await event.event_results_by_handler_id(raise_if_any=False) - handler_id = list(event.event_results.keys())[0] - event_result = event.event_results[handler_id] - except Exception: - # If event_results_by_handler_id raises, get the result directly - handler_id = list(event.event_results.keys())[0] - event_result = event.event_results[handler_id] - - assert event_result.status == 'error' - assert isinstance(event_result.error, ValueError) - assert 'expected event_result_type' in str(event_result.error) - - print(f'βœ… Casting failure handled: {event_result.error}') - await bus.stop(clear=True) - - -async def test_no_casting_when_no_result_type(): - """Test that events without result_type work normally.""" - print('\n=== Test No Casting When No Result Type ===') - - bus = EventBus(name='normal_test_bus') - - class NormalEvent(BaseEvent[None]): - pass # No event_result_type specified - - def normal_handler(event: NormalEvent): - return {'raw': 'data'} - - bus.on('NormalEvent', normal_handler) - - event = NormalEvent() - await bus.dispatch(event) - - result = await event.event_result() - - # Should remain as original dict, no casting - assert isinstance(result, dict) - assert result == {'raw': 'data'} - - print(f'βœ… No casting applied: {result}') - await bus.stop(clear=True) - - -async def test_result_type_stored_in_event_result(): - """Test that result_type is stored in EventResult for inspection.""" - print('\n=== Test Result Type Stored in EventResult ===') - - bus = EventBus(name='storage_test_bus') - - def handler(event: StringEvent): - return '123' # Already a string, will validate successfully - - bus.on('StringEvent', handler) - - event = StringEvent() - await bus.dispatch(event) - - # Check that result_type is accessible - handler_id = list(event.event_results.keys())[0] - event_result = event.event_results[handler_id] - - assert event_result.result_type is str - assert isinstance(event_result.result, str) - assert event_result.result == '123' - - print(f'βœ… Result type stored: {event_result.result_type}') - await bus.stop(clear=True) - - -async def test_expect_type_inference(): - """Test that EventBus.expect() returns the correct typed event.""" - print('\n=== Test Expect Type Inference ===') - - bus = EventBus(name='expect_type_test_bus') - - class CustomResult(BaseModel): - data: str - - class SpecificEvent(BaseEvent[CustomResult]): - request_id: str = 'test123' - - # Start a task that will dispatch the event - async def dispatch_later(): - await asyncio.sleep(0.01) - bus.dispatch(SpecificEvent(request_id='req456')) - - dispatch_task = asyncio.create_task(dispatch_later()) - - # Use expect with the event class - should return SpecificEvent type - expected_event = await bus.expect(SpecificEvent, timeout=1.0) - - # Type checking - this should work without cast - assert_type(expected_event, SpecificEvent) # Verify type is SpecificEvent, not BaseEvent[Any] - - # Runtime check - assert type(expected_event) is SpecificEvent - assert expected_event.request_id == 'req456' - - # Test with filters - type should still be preserved - async def dispatch_multiple(): - await asyncio.sleep(0.01) - bus.dispatch(SpecificEvent(request_id='wrong')) - bus.dispatch(SpecificEvent(request_id='correct')) - - dispatch_task2 = asyncio.create_task(dispatch_multiple()) - - # Expect with include filter - filtered_event = await bus.expect( - SpecificEvent, - include=lambda e: e.request_id == 'correct', # type: ignore - timeout=1.0, - ) - - assert_type(filtered_event, SpecificEvent) # Should still be SpecificEvent - assert type(filtered_event) is SpecificEvent - assert filtered_event.request_id == 'correct' - - # Test with string event type - returns BaseEvent[Any] - async def dispatch_string_event(): - await asyncio.sleep(0.01) - bus.dispatch(BaseEvent(event_type='StringEvent')) - - dispatch_task3 = asyncio.create_task(dispatch_string_event()) - string_event = await bus.expect('StringEvent', timeout=1.0) - - assert_type(string_event, BaseEvent[Any]) # Should be BaseEvent[Any] - assert string_event.event_type == 'StringEvent' - - await dispatch_task - await dispatch_task2 - await dispatch_task3 - - print(f'βœ… Expect correctly preserved type: {type(expected_event).__name__}') - print(f'βœ… Expect with filter preserved type: {type(filtered_event).__name__}') - print('βœ… No cast() needed for expect() - type inference works!') - await bus.stop(clear=True) - - -async def test_dispatch_type_inference(): - """Test that EventBus.dispatch() returns the same type as its input.""" - print('\n=== Test Dispatch Type Inference ===') - - bus = EventBus(name='type_inference_test_bus') - - class CustomResult(BaseModel): - value: str - - class CustomEvent(BaseEvent[CustomResult]): - pass - - # Create an event instance - original_event = CustomEvent() - - # Dispatch should return the same type WITHOUT needing cast() - dispatched_event = bus.dispatch(original_event) - - # Type checking - this should work without cast - assert_type(dispatched_event, CustomEvent) # Should be CustomEvent, not BaseEvent[Any] - - # Runtime check - assert type(dispatched_event) is CustomEvent - assert dispatched_event is original_event # Should be the same object - - # The returned event should be fully typed - async def handler(event: CustomEvent) -> CustomResult: - return CustomResult(value='test') - - bus.on('CustomEvent', handler) - - # We should be able to use it without casting - result = await dispatched_event.event_result() - - # Type checking for the result - assert_type(result, CustomResult | None) # Should be CustomResult | None - - # Test that we can access type-specific attributes without cast - # This would fail type checking if dispatched_event was BaseEvent[Any] - assert dispatched_event.event_type == 'CustomEvent' - - # Demonstrate the improvement - no cast needed! - # Before: event = cast(CustomEvent, bus.dispatch(CustomEvent())) - # After: event = bus.dispatch(CustomEvent()) # Type is preserved! - - print(f'βœ… Dispatch correctly preserved type: {type(dispatched_event).__name__}') - print('βœ… No cast() needed - type inference works!') - await bus.stop(clear=True) - - -async def test_typed_event_results(): - """Run all typed event result tests.""" - await test_pydantic_model_result_casting() - await test_builtin_type_casting() - await test_casting_failure_handling() - await test_no_casting_when_no_result_type() - await test_result_type_stored_in_event_result() - await test_expect_type_inference() - await test_dispatch_type_inference() - print('\nπŸŽ‰ All typed event result tests passed!') - - -if __name__ == '__main__': - asyncio.run(test_pydantic_model_result_casting()) diff --git a/tests/test_typing_contracts.py b/tests/test_typing_contracts.py new file mode 100644 index 0000000..4acea1c --- /dev/null +++ b/tests/test_typing_contracts.py @@ -0,0 +1,142 @@ +"""Static typing contracts for the event execution pipeline. + +This module is never imported by runtime code. It exists so strict type checks +(`pyright`, `ty`) fail if the end-to-end event handler pipeline is weakened. +""" + +from typing import Any, assert_type + +from pydantic import BaseModel + +from bubus.base_event import BaseEvent, EventResult +from bubus.event_bus import EventBus +from bubus.event_handler import EventHandler + + +class TypeContractResult(BaseModel): + message: str + + +class TypeContractEvent(BaseEvent[TypeContractResult]): + pass + + +async def _contract_handler(event: TypeContractEvent) -> TypeContractResult: + return TypeContractResult(message=event.event_type) + + +async def _assert_pipeline_types(bus: EventBus, event: TypeContractEvent) -> None: + handler_entry = bus.on(TypeContractEvent, _contract_handler) + assert_type(handler_entry, EventHandler) + + dispatched_event = bus.emit(event) + assert_type(dispatched_event, TypeContractEvent) + + typed_pending_result = dispatched_event.event_result_update(handler_entry, eventbus=bus, status='pending') + assert_type(typed_pending_result, EventResult[TypeContractResult]) + result_run_value = await typed_pending_result.run_handler(dispatched_event, eventbus=bus, timeout=event.event_timeout) + assert_type(result_run_value, TypeContractResult | BaseEvent[Any] | None) + assert_type(typed_pending_result.result, TypeContractResult | BaseEvent[Any] | None) + + emitted_event = bus.emit(TypeContractEvent()) + assert_type(emitted_event, TypeContractEvent) + completed_event = await emitted_event.event_completed() + assert_type(completed_event, TypeContractEvent) + + first_result = await completed_event.first() + assert_type(first_result, TypeContractResult | None) + + aggregated_result = await completed_event.event_result() + assert_type(aggregated_result, TypeContractResult | None) + + all_values = await completed_event.event_results_list() + assert_type(all_values, list[TypeContractResult | None]) + for handler_result in completed_event.event_results.values(): + assert_type(handler_result, EventResult[TypeContractResult]) + + +def test_typing_contracts_module_loads() -> None: + """Runtime no-op so this file is a valid pytest module.""" + assert callable(_assert_pipeline_types) + + +# Consolidated from tests/test_handler_registration_typing.py + +"""Static typing contracts for EventBus.on overload behavior. + +This file is for static type checking only (pyright/ty), not runtime pytest execution. +""" + +# pyright: strict, reportUnnecessaryTypeIgnoreComment=true + +from typing import TYPE_CHECKING + +from bubus.base_event import BaseEvent +from bubus.event_bus import EventBus + + +class _SomeEventClass(BaseEvent[str]): + pass + + +class _OtherEventClass(BaseEvent[str]): + pass + + +class _EventTypeA(BaseEvent[int]): + field_a: int = 1234 + + +class _EventTypeB(BaseEvent[int]): + field_b: int = 5678 + + +class _EventTypeSubclassOfA(_EventTypeA): + field_sub: float = 123.123 + + +def _some_handler(event: _SomeEventClass) -> str: + return 'ok' + + +def _base_handler(event: BaseEvent[Any]) -> str: + return 'ok' + + +def _other_handler(event: _OtherEventClass) -> str: + return 'ok' + + +def _handler_for_a(event: _EventTypeA) -> int: + return event.field_a + + +def _handler_for_specific_subclass(event: _EventTypeSubclassOfA) -> int: + return int(event.field_sub) + + +if TYPE_CHECKING: + _bus = EventBus() + + # Class pattern should preserve strict subclass typing. + _class_entry = _bus.on(_SomeEventClass, _some_handler) + assert_type(_class_entry, EventHandler) + + # String pattern is intentionally looser: BaseEvent handlers and subclass handlers are both accepted. + _string_base_entry = _bus.on('SomeEventClass', _base_handler) + assert_type(_string_base_entry, EventHandler) + _string_subclass_entry = _bus.on('SomeEventClass', _some_handler) + assert_type(_string_subclass_entry, EventHandler) + + # Expected static type errors: + # 1) class pattern should reject a mismatched event subclass handler + _bus.on(_SomeEventClass, _other_handler) # pyright: ignore[reportCallIssue, reportArgumentType] # ty: ignore[no-matching-overload] + + # Variance contracts for class patterns: + # 2) unrelated class pattern should reject handler expecting a different event class + _bus.on(_EventTypeB, _handler_for_a) # type: ignore + # 3) subclass pattern accepts base-class handler (contravariant safe) + _subclass_ok = _bus.on(_EventTypeSubclassOfA, _handler_for_a) + assert_type(_subclass_ok, EventHandler) + # 4) base-class pattern rejects subclass-only handler + _bus.on(_EventTypeA, _handler_for_specific_subclass) # type: ignore diff --git a/typings/uuid_extensions/__init__.pyi b/typings/uuid_extensions/__init__.pyi new file mode 100644 index 0000000..8338c68 --- /dev/null +++ b/typings/uuid_extensions/__init__.pyi @@ -0,0 +1 @@ +def uuid7str() -> str: ... diff --git a/ui/README.md b/ui/README.md new file mode 100644 index 0000000..c9e50ad --- /dev/null +++ b/ui/README.md @@ -0,0 +1,39 @@ +# bubus Monitoring Dashboard UI + +Minimal FastAPI Web UI application that reads the `events_log` and `event_results_log` tables produced by the `SQLiteHistoryMirrorMiddleware` and exposes them over HTTP/WebSocket for live monitoring by an administrator / developer. +For local debugging, this middleware-backed history is the most complete source because it includes lifecycle snapshots and handler result metadata. + +## Quick start + +```bash +git clone https://github.com/pirate/bbus.git +cd bbus +uv venv +uv pip install fastapi 'uvicorn[standard]' +``` + +```bash +# generate and save a live stream of test events (creates/appends to ./events.sqlite) +export EVENT_HISTORY_DB=./events.sqlite +uv run python -m ui.test_events & +``` + +```bash +# run the UI backend server and then open the UI in your browser +uv run uvicorn ui.main:app --reload +open http://localhost:8000 +``` + +You should now see on [http://localhost:8000](http://localhost:8000) a simple dashboard that shows recent events and handler results in real-time (via WebSocket). + +Replace `events.sqlite` with any db matching that schema to use in other codebases. + +## Endpoints + +- `GET /events?limit=20` – latest events (JSON) +- `GET /results?limit=20` – latest handler results (JSON) +- `GET /meta` – database path + table readiness flags +- `GET /` – minimal HTML dashboard +- `WS /ws/events` – pushes new rows as they arrive (`{"events": [...], "results": [...]}`) + +This app is intentionally small so you can vibecode-extend it with additional metrics, authentication, or richer UI as needed. diff --git a/ui/__init__.py b/ui/__init__.py new file mode 100644 index 0000000..908674f --- /dev/null +++ b/ui/__init__.py @@ -0,0 +1,18 @@ +"""Minimal FastAPI app for monitoring bubus SQLite event history.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any + +if TYPE_CHECKING: + from .main import app as app + +__all__ = ['app'] + + +def __getattr__(name: str) -> Any: + if name != 'app': + raise AttributeError(name) + from .main import app + + return app diff --git a/ui/config.py b/ui/config.py new file mode 100644 index 0000000..b4165f2 --- /dev/null +++ b/ui/config.py @@ -0,0 +1,19 @@ +"""Configuration helpers for the monitoring app.""" + +import os +from pathlib import Path + +DEFAULT_DB_PATH = Path(os.getenv('EVENT_HISTORY_DB', 'events.sqlite')) + + +def resolve_db_path() -> Path: + """ + Resolve the path to the SQLite history database. + + The path can be overridden via the EVENT_HISTORY_DB environment variable. + """ + db_path = Path(os.getenv('EVENT_HISTORY_DB', DEFAULT_DB_PATH)) + if not db_path.is_absolute(): + # Resolve relative to repository root (parent directory of ui) + db_path = Path(__file__).resolve().parent.parent / db_path + return db_path diff --git a/ui/db.py b/ui/db.py new file mode 100644 index 0000000..30d3cc4 --- /dev/null +++ b/ui/db.py @@ -0,0 +1,146 @@ +"""Async helpers for reading the SQLite event history.""" + +from __future__ import annotations + +import asyncio +import sqlite3 +from dataclasses import dataclass +from typing import Any + +from .config import resolve_db_path + + +def _connect() -> sqlite3.Connection: + conn = sqlite3.connect(resolve_db_path(), check_same_thread=False) + conn.row_factory = sqlite3.Row + return conn + + +def _table_exists(conn: sqlite3.Connection, table_name: str) -> bool: + row = conn.execute( + "SELECT 1 FROM sqlite_master WHERE type='table' AND name=? LIMIT 1", + (table_name,), + ).fetchone() + return row is not None + + +@dataclass +class HistorySchemaStatus: + events_table_exists: bool + results_table_exists: bool + + +async def fetch_schema_status() -> HistorySchemaStatus: + return await asyncio.to_thread(_fetch_schema_status_sync) + + +def _fetch_schema_status_sync() -> HistorySchemaStatus: + conn = _connect() + try: + return HistorySchemaStatus( + events_table_exists=_table_exists(conn, 'events_log'), + results_table_exists=_table_exists(conn, 'event_results_log'), + ) + finally: + conn.close() + + +async def fetch_events(limit: int = 50) -> list[dict[str, Any]]: + return await asyncio.to_thread(_fetch_events_sync, limit) + + +def _fetch_events_sync(limit: int) -> list[dict[str, Any]]: + conn = _connect() + try: + if not _table_exists(conn, 'events_log'): + return [] + rows = conn.execute( + """ + SELECT id, event_id, event_type, event_status, eventbus_id, eventbus_name, phase, event_json, inserted_at + FROM events_log + ORDER BY id DESC + LIMIT ? + """, + (limit,), + ).fetchall() + return [dict(row) for row in rows] + finally: + conn.close() + + +async def fetch_results(limit: int = 50) -> list[dict[str, Any]]: + return await asyncio.to_thread(_fetch_results_sync, limit) + + +def _fetch_results_sync(limit: int) -> list[dict[str, Any]]: + conn = _connect() + try: + if not _table_exists(conn, 'event_results_log'): + return [] + rows = conn.execute( + """ + SELECT id, event_id, event_result_id, handler_name, status, phase, result_repr, error_repr, + eventbus_id, eventbus_name, event_type, event_result_json, inserted_at + FROM event_results_log + ORDER BY id DESC + LIMIT ? + """, + (limit,), + ).fetchall() + return [dict(row) for row in rows] + finally: + conn.close() + + +@dataclass +class HistoryStreamState: + last_event_id: int = 0 + last_result_id: int = 0 + + +async def stream_new_rows(state: HistoryStreamState) -> dict[str, list[dict[str, Any]]]: + """Return new rows added since the last call.""" + updates = await asyncio.to_thread(_stream_new_rows_sync, state) + return updates + + +def _stream_new_rows_sync(state: HistoryStreamState) -> dict[str, list[dict[str, Any]]]: + conn = _connect() + try: + events: list[sqlite3.Row] = [] + results: list[sqlite3.Row] = [] + + if _table_exists(conn, 'events_log'): + events = conn.execute( + """ + SELECT id, event_id, event_type, event_status, eventbus_id, eventbus_name, phase, event_json, inserted_at + FROM events_log + WHERE id > ? + ORDER BY id ASC + """, + (state.last_event_id,), + ).fetchall() + + if _table_exists(conn, 'event_results_log'): + results = conn.execute( + """ + SELECT id, event_id, event_result_id, handler_name, status, phase, result_repr, error_repr, + eventbus_id, eventbus_name, event_type, event_result_json, inserted_at + FROM event_results_log + WHERE id > ? + ORDER BY id ASC + """, + (state.last_result_id,), + ).fetchall() + + if events: + state.last_event_id = int(events[-1]['id']) + if results: + state.last_result_id = int(results[-1]['id']) + + return { + 'events': [dict(row) for row in events], + 'results': [dict(row) for row in results], + } + finally: + conn.close() diff --git a/ui/main.py b/ui/main.py new file mode 100644 index 0000000..03d7e68 --- /dev/null +++ b/ui/main.py @@ -0,0 +1,610 @@ +from __future__ import annotations + +import asyncio +import importlib +import json +from datetime import datetime +from typing import Annotated, Any + +try: + _fastapi = importlib.import_module('fastapi') + _fastapi_responses = importlib.import_module('fastapi.responses') +except ModuleNotFoundError as exc: # pragma: no cover - optional UI dependency + raise ModuleNotFoundError("Install 'fastapi' to run the bubus UI module.") from exc + +FastAPI = getattr(_fastapi, 'FastAPI') +Query = getattr(_fastapi, 'Query') +WebSocket = getattr(_fastapi, 'WebSocket') +WebSocketDisconnect = getattr(_fastapi, 'WebSocketDisconnect') +HTMLResponse = getattr(_fastapi_responses, 'HTMLResponse') +JSONResponse = getattr(_fastapi_responses, 'JSONResponse') + +from . import db +from .config import resolve_db_path + +app = FastAPI(title='bubus event monitor', version='0.1.0') + + +def _format_timestamp(value: str | None) -> str | None: + if not value: + return None + # SQLite timestamp string -> ISO 8601 + try: + return datetime.fromisoformat(value.replace('Z', '+00:00')).isoformat() + except ValueError: + return value + + +async def _fetch_events(limit: int) -> list[dict[str, Any]]: + rows = await db.fetch_events(limit) + for row in rows: + row['inserted_at'] = _format_timestamp(row.get('inserted_at')) + return rows + + +async def _fetch_results(limit: int) -> list[dict[str, Any]]: + rows = await db.fetch_results(limit) + for row in rows: + row['inserted_at'] = _format_timestamp(row.get('inserted_at')) + return rows + + +def _parse_nonnegative_int(value: str | None, default: int = 0) -> int: + if value is None: + return default + try: + parsed = int(value) + except (TypeError, ValueError): + return default + return max(0, parsed) + + +@app.get('/', response_class=HTMLResponse) +async def index() -> str: + return """ + + + + + bubus Event Monitor + + + +
+

bubus Event Monitor

+
+ Database: + connecting… + +
+
+
+ +
+ + + +
+
+
+ + + + """ + + +@app.get('/events') +async def list_events(limit: Annotated[int, Query(ge=1, le=200)] = 20) -> JSONResponse: + rows = await _fetch_events(limit) + return JSONResponse(rows) + + +@app.get('/results') +async def list_results(limit: Annotated[int, Query(ge=1, le=200)] = 20) -> JSONResponse: + rows = await _fetch_results(limit) + return JSONResponse(rows) + + +@app.get('/meta') +async def meta() -> dict[str, Any]: + db_path = resolve_db_path() + exists = db_path.exists() + schema = await db.fetch_schema_status() + return { + 'db_path': str(db_path), + 'db_exists': exists, + 'events_table_exists': schema.events_table_exists, + 'results_table_exists': schema.results_table_exists, + } + + +@app.websocket('/ws/events') +async def websocket_events(socket: WebSocket) -> None: + await socket.accept() + state = db.HistoryStreamState( + last_event_id=_parse_nonnegative_int(socket.query_params.get('since_event_id')), + last_result_id=_parse_nonnegative_int(socket.query_params.get('since_result_id')), + ) + try: + while True: + updates = await db.stream_new_rows(state) + if updates['events'] or updates['results']: + for key in ('events', 'results'): + for row in updates[key]: + row['inserted_at'] = _format_timestamp(row.get('inserted_at')) + await socket.send_text(json.dumps(updates)) + await asyncio.sleep(1.0) + except WebSocketDisconnect: + return + except Exception as exc: # pragma: no cover - surface to client + await socket.send_text(json.dumps({'error': str(exc)})) + await asyncio.sleep(0.5) diff --git a/ui/test_events.py b/ui/test_events.py new file mode 100644 index 0000000..fcac0d1 --- /dev/null +++ b/ui/test_events.py @@ -0,0 +1,171 @@ +"""Utility script to generate auto events for the monitor app.""" + +from __future__ import annotations + +import argparse +import asyncio +import random +import string +import sys +from collections.abc import Sequence +from pathlib import Path + +from bubus import BaseEvent, EventBus, SQLiteHistoryMirrorMiddleware + +if __package__ in (None, ''): + repo_root = Path(__file__).resolve().parents[1] + if str(repo_root) not in sys.path: + sys.path.insert(0, str(repo_root)) + from ui.config import resolve_db_path +else: + from .config import resolve_db_path + + +class RandomTestEvent(BaseEvent): + abc_payload_field: str + xyz_category_field: str + route_hint: str | None = None + + +class FollowUpEvent(BaseEvent): + abc_parent_payload_field: str + xyz_detail_field: str + depth: int + + +class AuditTrailEvent(BaseEvent): + source_event_id: str + handler_name: str + message: str + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description='Generate random events for the bubus monitor.') + parser.add_argument('--min-delay', type=float, default=0.2, help='Minimum delay between root events (seconds).') + parser.add_argument('--max-delay', type=float, default=1.0, help='Maximum delay between root events (seconds).') + parser.add_argument('--error-rate', type=float, default=0.2, help='Fraction of handlers that should raise an error.') + parser.add_argument('--child-rate', type=float, default=0.4, help='Probability of dispatching follow-up events.') + parser.add_argument('--audit-rate', type=float, default=0.5, help='Probability of emitting audit trail events.') + parser.add_argument('--max-depth', type=int, default=2, help='Maximum nested follow-up depth.') + parser.add_argument('--burst-size', type=int, default=4, help='Number of root events per burst.') + parser.add_argument('--categories', nargs='*', default=['alpha', 'beta', 'gamma'], help='Event categories to sample.') + parser.add_argument('--concurrent', type=int, default=3, help='Number of concurrent root event producers.') + parser.add_argument('--events', type=int, default=0, help='Optional count. 0 = run forever.') + return parser.parse_args() + + +def _random_text(length: int = 8) -> str: + return ''.join(random.choices(string.ascii_lowercase + string.digits, k=length)) + + +async def run_generator(args: argparse.Namespace) -> None: + db_path = resolve_db_path() + db_path.parent.mkdir(parents=True, exist_ok=True) + middleware = SQLiteHistoryMirrorMiddleware(db_path) + bus = EventBus( + name='MonitorGenerator', + middlewares=[middleware], + event_handler_concurrency='parallel', + max_history_size=0, + ) + + categories: Sequence[str] = args.categories or ['default'] + + async def random_handler(event: RandomTestEvent) -> str: + await asyncio.sleep(random.uniform(0.35, 0.7)) + if random.random() < args.child_rate: + depth = random.randint(1, max(1, args.max_depth)) + await emit_followups(event, depth) + if random.random() < args.audit_rate: + bus.emit( + AuditTrailEvent( + source_event_id=event.event_id, + handler_name='random_handler', + message=f'Processed payload {event.abc_payload_field}', + ) + ) + if random.random() < args.error_rate: + raise RuntimeError(f'Flaky handler failed for payload={event.abc_payload_field}') + return event.abc_payload_field[::-1] + + async def analytics_handler(event: RandomTestEvent) -> None: + await asyncio.sleep(random.uniform(0.2, 0.5)) + if random.random() < args.audit_rate: + bus.emit( + AuditTrailEvent( + source_event_id=event.event_id, + handler_name='analytics_handler', + message=f'Category {event.xyz_category_field}', + ) + ) + + async def auditing_handler(event: RandomTestEvent) -> str: + await asyncio.sleep(random.uniform(0.25, 0.6)) + return f'route:{event.route_hint or "default"}|category:{event.xyz_category_field}' + + async def followup_handler(event: FollowUpEvent) -> str: + await asyncio.sleep(random.uniform(0.3, 0.65)) + if random.random() < 0.3 and event.depth < args.max_depth: + await emit_followups(event, args.max_depth - event.depth) + return f'followup:{event.xyz_detail_field}' + + async def audit_handler(event: AuditTrailEvent) -> None: + await asyncio.sleep(random.uniform(0.2, 0.4)) + + bus.on('RandomTestEvent', random_handler) + bus.on('RandomTestEvent', analytics_handler) + bus.on('RandomTestEvent', auditing_handler) + bus.on('FollowUpEvent', followup_handler) + bus.on('AuditTrailEvent', audit_handler) + + print(f'🟒 Streaming events to {db_path}') + + async def producer_task(task_id: int) -> None: + emitted = 0 + while args.events == 0 or emitted < args.events: + burst = random.randint(1, max(1, args.burst_size)) + for _ in range(burst): + payload = _random_text(10) + event = RandomTestEvent( + abc_payload_field=payload, + xyz_category_field=random.choice(list(categories)), + route_hint=f'route-{task_id}-{random.randint(1, 3)}', + event_result_type=str, + ) + bus.emit(event) + emitted += 1 + if args.events and emitted >= args.events: + break + await asyncio.sleep(random.uniform(args.min_delay, args.max_delay)) + await asyncio.sleep(random.uniform(args.min_delay, args.max_delay)) + + async def emit_followups(parent_event: BaseEvent, remaining_depth: int) -> None: + depth = getattr(parent_event, 'depth', 0) + 1 + followup_count = random.randint(1, 2) + for _ in range(followup_count): + follow_up = FollowUpEvent( + abc_parent_payload_field=getattr(parent_event, 'abc_payload_field', parent_event.event_id), + xyz_detail_field=_random_text(6), + depth=depth, + event_result_type=str, + ) + bus.emit(follow_up) + if remaining_depth > 1 and random.random() < 0.6: + await asyncio.sleep(random.uniform(0.2, 0.4)) + await emit_followups(parent_event, remaining_depth - 1) + + try: + producers = [asyncio.create_task(producer_task(idx)) for idx in range(max(1, args.concurrent))] + await asyncio.gather(*producers) + await bus.wait_until_idle() + finally: + await bus.stop() + + +def main() -> None: + args = parse_args() + asyncio.run(run_generator(args)) + + +if __name__ == '__main__': + main()