Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 15 additions & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,23 @@ name: CI

on:
push:
branches: [ main, devel ]
branches: [ main, devel, 'release/**' ]
paths:
- 'wildedge/**'
- 'tests/**'
- 'scripts/**'
- 'pyproject.toml'
- 'uv.lock'
- '.github/workflows/ci.yml'
pull_request:
branches: [ main, devel ]
paths:
- 'wildedge/**'
- 'tests/**'
- 'scripts/**'
- 'pyproject.toml'
- 'uv.lock'
- '.github/workflows/ci.yml'

jobs:
test:
Expand Down
56 changes: 48 additions & 8 deletions .github/workflows/compat.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,23 +2,35 @@ name: Compatibility

on:
push:
branches: [ main, devel ]
branches: [ main, 'release/**' ]
paths:
- 'wildedge/**'
- 'tests/compat/**'
- 'scripts/compat_matrix.py'
- 'pyproject.toml'
- '.github/workflows/compat.yml'
pull_request:
branches: [ main, devel ]
types: [opened, synchronize, reopened, labeled]
branches: [ main ]
types: [opened, synchronize, reopened]
paths:
- 'wildedge/**'
- 'tests/compat/**'
- 'scripts/compat_matrix.py'
- 'pyproject.toml'
- '.github/workflows/compat.yml'
schedule:
- cron: "0 3 * * *"
workflow_dispatch:

jobs:
compat:
if: github.event_name != 'pull_request' || contains(github.event.pull_request.labels.*.name, 'compat')
if: github.event_name != 'pull_request' || startsWith(github.head_ref, 'release/')
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
python-version: ["3.10", "3.11", "3.12", "3.13"]
integration: ["onnx", "torch", "timm", "tensorflow"]
integration: ["onnx", "torch", "timm", "tensorflow", "gguf", "openai", "transformers", "ultralytics"]
version-set: ["min", "current"]
exclude:
- python-version: "3.13"
Expand All @@ -32,7 +44,7 @@ jobs:
- name: Install project
run: |
python -m pip install --upgrade pip
pip install -e . pytest
pip install -e . pytest pytest-asyncio pytest-mock
- name: Install compatibility dependencies
run: |
python scripts/compat_matrix.py deps \
Expand All @@ -43,8 +55,36 @@ jobs:
- name: Run compatibility test
run: python -m pytest "tests/compat/test_${{ matrix.integration }}_compat.py" -q

compat-mlx:
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
runs-on: macos-14
strategy:
fail-fast: false
matrix:
python-version: ["3.12", "3.13"]
version-set: ["current"]
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install project
run: |
python -m pip install --upgrade pip
pip install -e . pytest pytest-asyncio pytest-mock
- name: Install compatibility dependencies
run: |
python scripts/compat_matrix.py deps \
--integration "mlx" \
--version-set "${{ matrix.version-set }}" \
--python-version "${{ matrix.python-version }}" > compat-requirements.txt
pip install -r compat-requirements.txt
- name: Run compatibility test
run: python -m pytest tests/compat/test_mlx_compat.py -q

compat-canary-314:
if: github.event_name != 'pull_request' || contains(github.event.pull_request.labels.*.name, 'compat')
if: github.event_name != 'pull_request' || startsWith(github.head_ref, 'release/')
runs-on: ubuntu-latest
continue-on-error: true
strategy:
Expand All @@ -63,7 +103,7 @@ jobs:
- name: Install project
run: |
python -m pip install --upgrade pip
pip install -e . pytest
pip install -e . pytest pytest-asyncio pytest-mock
- name: Install compatibility dependencies
run: |
python scripts/compat_matrix.py deps \
Expand Down
30 changes: 30 additions & 0 deletions .github/workflows/release-pr.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
name: Release PR

on:
pull_request:
branches: [ main ]
types: [opened, synchronize, reopened]

jobs:
changelog-preview:
if: startsWith(github.head_ref, 'release/')
runs-on: ubuntu-latest
permissions:
contents: write
pull-requests: write
steps:
- uses: actions/checkout@v4

- name: Generate changelog preview
env:
GH_TOKEN: ${{ github.token }}
HEAD_REF: ${{ github.head_ref }}
REPO: ${{ github.repository }}
OUTPUT: /tmp/changelog-preview.md
run: python3 scripts/build_changelog_comment.py

- name: Post changelog preview comment
uses: marocchino/sticky-pull-request-comment@v2
with:
header: changelog-preview
path: /tmp/changelog-preview.md
12 changes: 11 additions & 1 deletion .github/workflows/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -38,12 +38,22 @@ jobs:
- name: Check package metadata
run: twine check dist/*

- name: Generate release notes
if: github.ref_type == 'tag'
env:
GH_TOKEN: ${{ github.token }}
REPO: ${{ github.repository }}
HEAD_REF: ${{ github.ref_name }}
TAG_NAME: ${{ github.ref_name }}
OUTPUT: /tmp/release-notes.md
run: python3 scripts/build_changelog_comment.py

- name: Create GitHub release
if: github.ref_type == 'tag'
uses: softprops/action-gh-release@v2
with:
files: dist/*
generate_release_notes: true
body_path: /tmp/release-notes.md

- name: Publish package to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
32 changes: 0 additions & 32 deletions CHANGELOG.md

This file was deleted.

11 changes: 10 additions & 1 deletion CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,16 @@
1. Fork the repository and create a feature branch off `devel`.
2. Make your changes and ensure tests pass.
3. Update documentation if needed.
4. Submit a pull request with a clear description of the changes.
4. Submit a pull request targeting `devel` with a clear description of the changes.

## Release process

Releases follow a `release/<version>` branching flow:

1. Cut a `release/<version>` branch from `devel`.
2. Bump the version in `pyproject.toml`.
3. Open a pull request targeting `main`. CI will automatically run the full compatibility matrix and post a changelog preview comment.
4. Once merged, tag the commit as `v<version>`. The release workflow publishes to PyPI and creates a GitHub release.

## Reporting issues

Expand Down
13 changes: 10 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -47,12 +47,19 @@ Useful flags:
```python
import wildedge

client = wildedge.WildEdge(dsn="...") # or WILDEDGE_DSN env var
client.instrument("transformers", hubs=["huggingface"])
client = wildedge.init(
dsn="...", # or WILDEDGE_DSN env var
integrations=["transformers"],
hubs=["huggingface"],
)

# models loaded after this point are tracked automatically
```

If no DSN is configured, the client becomes a no-op and logs a warning.

`init(...)` is a convenience wrapper for `WildEdge(...)` + `instrument(...)`.

## Supported integrations

**On-device**
Expand Down Expand Up @@ -90,7 +97,7 @@ For unsupported frameworks, see [Manual tracking](https://github.com/wild-edge/w

| Parameter | Default | Description |
|---|---|---|
| `dsn` | - | `https://<secret>@ingest.wildedge.dev/<key>` (or `WILDEDGE_DSN`) |
| `dsn` | - | `https://<secret>@ingest.wildedge.dev/<key>` (or `WILDEDGE_DSN`). If unset, the client is a no-op. |
| `app_version` | `None` | Your app's version string |
| `app_identity` | `<project_key>` | Namespace for offline persistence. Set per-app in multi-process workloads (or `WILDEDGE_APP_IDENTITY`) |
| `enable_offline_persistence` | `true` | Persist unsent events to disk and replay on restart |
Expand Down
2 changes: 1 addition & 1 deletion docs/configuration.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ Full reference for all `WildEdge` client parameters.

| Parameter | Default | Env var | Description |
|---|---|---|---|
| `dsn` | - | `WILDEDGE_DSN` | `https://<secret>@ingest.wildedge.dev/<key>` |
| `dsn` | - | `WILDEDGE_DSN` | `https://<secret>@ingest.wildedge.dev/<key>`. If unset, the client is a no-op. |
| `app_version` | `None` | - | Your app's version string |
| `app_identity` | `<project_key>` | `WILDEDGE_APP_IDENTITY` | Namespace for offline persistence. Set per-app in multi-process workloads |
| `enable_offline_persistence` | `true` | - | Persist unsent events to disk and replay on restart |
Expand Down
5 changes: 4 additions & 1 deletion docs/manual-tracking.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,10 @@ Every model needs a handle before you can track events against it. Pass the mode
```python
import wildedge

client = wildedge.WildEdge() # set WILDEDGE_DSN env var
client = wildedge.init() # uses WILDEDGE_DSN if set; otherwise no-op

# Optional: enable auto-instrumentation alongside manual tracking.
# client = wildedge.init(integrations=["transformers"], hubs=["huggingface"])

handle = client.register_model(
my_model,
Expand Down
10 changes: 9 additions & 1 deletion examples/cli/uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 2 additions & 1 deletion examples/django_gemma/uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Empty file.
54 changes: 54 additions & 0 deletions examples/fastapi_openai/app/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
"""FastAPI + OpenRouter example.

WildEdge is injected via `wildedge run` before the app starts, so inference
tracking happens automatically for every chat.completions.create call.

Run with: see demo.sh
Requires: WILDEDGE_DSN and OPENROUTER_API_KEY environment variables.
"""

import os
import pathlib

from fastapi import FastAPI
from fastapi.responses import FileResponse
from openai import OpenAI
from pydantic import BaseModel

STATIC = pathlib.Path(__file__).parent / "static"

app = FastAPI()


@app.get("/")
def index() -> FileResponse:
return FileResponse(STATIC / "index.html")


client = OpenAI(
api_key=os.environ["OPENROUTER_API_KEY"],
base_url="https://openrouter.ai/api/v1",
)


class ChatRequest(BaseModel):
prompt: str
model: str = "openai/gpt-4o-mini"


class ChatResponse(BaseModel):
response: str
model: str


@app.post("/chat", response_model=ChatResponse)
def chat(req: ChatRequest) -> ChatResponse:
completion = client.chat.completions.create(
model=req.model,
messages=[{"role": "user", "content": req.prompt}],
max_tokens=512,
)
return ChatResponse(
response=completion.choices[0].message.content,
model=completion.model,
)
Loading
Loading