Back to Docs
Python SDK
Full-featured Python client with sync and async support, streaming, and automatic retries.
Installation
pip install consiliumClient Configuration
from consilium import ConsiliumClient, AsyncConsiliumClient
# Synchronous client
client = ConsiliumClient(
api_url="http://localhost:4000/api/v1",
api_key="your-api-key",
timeout=120, # seconds (default: 120)
max_retries=2, # retry attempts (default: 2)
retry_delay=1.0 # base delay in seconds (default: 1.0)
)
# Asynchronous client
async_client = AsyncConsiliumClient(
api_url="http://localhost:4000/api/v1",
api_key="your-api-key"
)
# Context manager support (auto-cleanup)
with ConsiliumClient(...) as client:
result = client.deliberate(...)
async with AsyncConsiliumClient(...) as client:
result = await client.deliberate(...)Methods
| Method | Returns |
|---|---|
| health_check() | HealthStatus |
| deliberate(topic, mode, models, ...) | DeliberationResult |
| red_team(topic, models, ...) | RedTeamResult |
| blind_eval(topic, models, ...) | EvalResult |
| estimate_cost(topic, mode, models) | CostEstimate |
| stream_deliberation(topic, mode, models, ...) | Iterator[Event] |
Types (Pydantic)
| Type | Fields |
|---|---|
| DeliberationMode | Enum: quick, council, deep, blind, redteam, jury, market, auto, prediction-market, adversarial, delphi |
| DeliberationResult | golden_prompt, dissent_report, cost, audit_trail[], votes, confidence_scores |
| RedTeamResult | attacks[], defenses[], judgments[], overall_score, vulnerability_count |
| EvalResult | rankings[], scores, method |
| CostEstimate | estimated_cost, breakdown[CostBreakdownEntry], rounds, mode |
| CostBreakdownEntry | model, role, estimated_cost |
| HealthStatus | status, version, uptime |
| SemanticExtractionResult | decisions, action_items, key_disagreements, consensus_level |
All types use model_config = {"populate_by_name": True} for flexible field mapping.
Error Handling
Exponential backoff — min(BASE * 2^attempt, 30s) between retries
Automatic retries — On 5xx server errors and 429 rate limit responses
Connection pooling — HTTP connections managed by httpx for efficiency
Full Example
from consilium import ConsiliumClient
client = ConsiliumClient(
api_url="http://localhost:4000/api/v1",
api_key="your-key"
)
# 1. Check health
health = client.health_check()
print(f"API Status: {health.status}")
# 2. Estimate cost before running
estimate = client.estimate_cost(
topic="Should we migrate from REST to GraphQL?",
mode="council",
models=["claude-sonnet-4-6", "gpt-5.4", "gemini-3-flash-preview"]
)
print(f"Estimated cost: ${estimate.estimated_cost:.4f}")
# 3. Run deliberation
result = client.deliberate(
topic="Should we migrate from REST to GraphQL?",
mode="council",
models=["claude-sonnet-4-6", "gpt-5.4", "gemini-3-flash-preview"],
max_rounds=3
)
print(f"Synthesis: {result.golden_prompt}")
print(f"Actual cost: ${result.cost:.4f}")
print(f"Dissent: {result.dissent_report}")
# 4. Red team assessment
red = client.red_team(
topic="Review this auth middleware for vulnerabilities",
models=["claude-sonnet-4-6", "gpt-5.4"]
)
print(f"Vulnerabilities found: {red.vulnerability_count}")
print(f"Overall score: {red.overall_score}")
# 5. Stream events in real-time
for event in client.stream_deliberation(
topic="Is Kubernetes overkill for our startup?",
mode="jury",
models=["claude-sonnet-4-6", "gpt-5.4", "gemini-3-flash-preview"]
):
if event.event == "agent:chunk":
print(event.chunk, end="")
elif event.event == "phase:voting":
print("\nVoting phase started")
elif event.event == "convergence:detected":
print("\nConsensus reached!")
elif event.event == "dissent:report":
print(f"\nDissent detected: {event.data}")Async Example
import asyncio
from consilium import AsyncConsiliumClient
async def main():
async with AsyncConsiliumClient(
api_url="http://localhost:4000/api/v1",
api_key="your-key"
) as client:
# Run multiple deliberations concurrently
results = await asyncio.gather(
client.deliberate(
topic="Best database for our use case?",
mode="council",
models=["claude-sonnet-4-6", "gpt-5.4"]
),
client.deliberate(
topic="Should we add caching?",
mode="quick",
models=["gpt-5.4-mini"]
),
)
for r in results:
print(r.golden_prompt[:200])
asyncio.run(main())