Jaunt
Reference

Configuration (jaunt.toml)

Config keys and behavior.

Jaunt discovers the project root by walking upward from the current working directory until it finds jaunt.toml.

Minimal config:

version = 1

[paths]
source_roots = ["src"]
test_roots = ["tests"]
generated_dir = "__generated__"

Full config (all keys optional except version):

version = 1

[paths]
# Directories to scan for build specs (relative to project root).
source_roots = ["src", "."]
# Directories to scan for test specs (relative to project root).
test_roots = ["tests"]
# Directory name inserted into import paths and used on disk.
# Must be a valid Python identifier. Recommended: "__generated__".
generated_dir = "__generated__"

[llm]
# Supported by the CLI: "openai", "anthropic", or "cerebras".
provider = "openai"
# Model name passed to the backend (default: "gpt-5.2").
model = "gpt-5.2"
# Environment variable used for the API key (default: "OPENAI_API_KEY").
api_key_env = "OPENAI_API_KEY"
# Optional hard ceiling for accumulated API spend during a build/test command.
# max_cost_per_build = 5.0
# Optional pass-through for OpenAI/Cerebras.
# Common values: "low", "medium", "high".
# reasoning_effort = "medium"
# Optional Anthropic thinking budget. If set, Jaunt sends:
# thinking = { type = "enabled", budget_tokens = ... }.
# anthropic_thinking_budget_tokens = 1024
# Enable OpenAI prompt caching support when available.
# prompt_cache = true
# Optional explicit cache key prefix.
# prompt_cache_key = "jaunt:shared-builds"

[build]
# Max parallel workers for build generation.
jobs = 8
# Best-effort dependency inference (explicit deps always apply).
infer_deps = true
# Extra ty-based repair passes after the normal retry loop.
ty_retry_attempts = 1
# Pytest marker backend for generated async tests: "asyncio" or "anyio".
async_runner = "asyncio"
# Include targeted test-spec source in build prompts.
include_target_tests = false
# Optional project-wide steering appended to build prompts.
# instructions = ["Prefer small composable helpers."]

[test]
# Max parallel workers for test generation.
jobs = 4
# Best-effort dependency inference for tests.
infer_deps = true
# Passed through to pytest as repeated "--pytest-args" flags.
pytest_args = ["-q"]

[agent]
# Internal generation runtime.
# "aider" is the default task-runner runtime, "legacy" uses Jaunt's direct SDK backends.
engine = "aider"

[aider]
# Mode for build-module generation.
build_mode = "architect"
# Mode for generated pytest modules and skill workflows.
test_mode = "code"
skill_mode = "code"
# Optional separate editor model for architect mode.
editor_model = ""
# Aider repository map token budget.
map_tokens = 0
# Keep temp trace workspaces instead of auto-cleaning them.
save_traces = false

[prompts]
# If set, these are treated as file paths and read at runtime by the backend.
# Leave empty to use packaged defaults under src/jaunt/prompts/.
build_system = ""
build_module = ""
test_system = ""
test_module = ""

Notes:

  • paths.source_roots: the CLI picks the first existing source root as the output base for generated build modules.
  • prompts.* are treated as file paths and read at runtime by the configured backend.
  • Provider extras include the default Aider runtime. Install jaunt[openai], jaunt[anthropic], or jaunt[cerebras] (or jaunt[all]).
  • jaunt[aider] is still available as a runtime-only extra for custom install layouts.
  • There is no [mcp] config section; MCP is started explicitly via jaunt mcp serve.
  • build.async_runner controls the pytest marker selected for generated async tests.
  • For Aider, using the provider's canonical API key env var name (OPENAI_API_KEY, ANTHROPIC_API_KEY, or CEREBRAS_API_KEY) avoids an internal env-remap lock. If you set llm.api_key_env to a different name, Jaunt currently preserves correctness by serializing those Aider tasks.

Next: Output Locations.