Python SDK Reference
The DuraGraph Python SDK provides a decorator-based interface for building AI agents and workflows. Build graphs with simple decorators, run them locally, or deploy to the control plane.
Installation
Section titled “Installation”# Core SDKpip install duragraph
# With LLM providerspip install duragraph[openai] # OpenAI supportpip install duragraph[anthropic] # Anthropic support
# With vector storespip install duragraph[chroma] # Chroma vector storepip install duragraph[pinecone] # Pinecone vector storepip install duragraph[qdrant] # Qdrant vector store
# All featurespip install duragraph[all]Quick Start
Section titled “Quick Start”from duragraph import Graph, llm_node, entrypoint
@Graph(id="customer_support")class CustomerSupportAgent: """A customer support agent that classifies and responds to queries."""
@entrypoint @llm_node(model="gpt-4o-mini") def classify(self, state): """Classify the customer intent.""" return {"intent": "billing"}
@llm_node(model="gpt-4o-mini") def respond(self, state): """Generate a response based on intent.""" return {"response": f"I'll help you with {state['intent']}."}
# Define flow with edge operator classify >> respond
# Run locallyagent = CustomerSupportAgent()result = agent.run({"message": "I have a billing question"})print(result)
# Or deploy to control planeagent.serve("http://localhost:8081")Core Concepts
Section titled “Core Concepts”Graph Decorator
Section titled “Graph Decorator”The @Graph decorator turns a Python class into a workflow graph.
from duragraph import Graph
@Graph( id="my_graph", # Unique graph identifier state_schema=MyStateClass, # Optional state schema checkpointer=None, # Optional checkpointer for persistence)class MyGraph: passParameters:
id(str, required) - Unique identifier for the graphstate_schema(Type, optional) - Pydantic model or TypedDict for state validationcheckpointer(Checkpointer, optional) - State persistence implementation
Node Decorators
Section titled “Node Decorators”Node decorators define executable steps in your graph.
@entrypoint
Section titled “@entrypoint”Marks the starting node of the graph.
@entrypointdef start(self, state): return state@llm_node
Section titled “@llm_node”Creates a node that calls an LLM.
@llm_node( model="gpt-4o-mini", # Model name temperature=0.7, # Optional: sampling temperature max_tokens=1000, # Optional: max response tokens system_prompt="You are...", # Optional: system message)def generate(self, state): # Return messages or prompt return {"messages": [{"role": "user", "content": "Hello"}]}Supported Providers (v0.2.0):
- OpenAI (GPT-4, GPT-4o, GPT-3.5-turbo)
- Anthropic (Claude 3.5 Sonnet, Claude 3 Opus, Claude 3 Haiku)
@tool_node
Section titled “@tool_node”Defines a tool that can be called by LLMs or other nodes.
from duragraph import tool_node
@tool_node( name="web_search", description="Search the web for information",)def search(self, query: str, max_results: int = 5): """Execute a web search.""" results = perform_search(query, limit=max_results) return {"results": results}@router_node
Section titled “@router_node”Creates a conditional routing node.
from duragraph import router_node
@router_nodedef route_by_intent(self, state): """Route based on classified intent.""" intent = state.get("intent")
if intent == "billing": return "billing_handler" elif intent == "support": return "support_handler" else: return "general_handler"@human_node
Section titled “@human_node”Creates a human-in-the-loop node that pauses for human input.
from duragraph import human_node
@human_node( prompt="Please review and approve the following:", timeout=3600, # 1 hour timeout)def review(self, state): """Wait for human approval.""" return stateEdge Operators
Section titled “Edge Operators”The >> operator defines transitions between nodes.
@Graph(id="my_graph")class MyGraph: @entrypoint def start(self, state): return state
def process(self, state): return state
def end(self, state): return state
# Sequential flow start >> process >> endConditional edges:
from duragraph import conditional_edge
@Graph(id="conditional_graph")class ConditionalGraph: @entrypoint def start(self, state): return state
@router_node def decide(self, state): return "path_a" if state["condition"] else "path_b"
def path_a(self, state): return state
def path_b(self, state): return state
# Conditional routing start >> decide decide >> {"path_a": path_a, "path_b": path_b}Async Execution
Section titled “Async Execution”Full async/await support for parallel execution.
import asynciofrom duragraph import Graph, llm_node, entrypoint
@Graph(id="async_agent")class AsyncAgent: @entrypoint @llm_node(model="gpt-4o-mini") async def think(self, state): """Async LLM call.""" return state
async def custom_async(self, state): """Custom async node.""" result = await some_async_operation() return {"result": result}
think >> custom_async
# Run asyncasync def main(): agent = AsyncAgent() result = await agent.arun({"input": "Hello"}) print(result)
asyncio.run(main())Worker Runtime
Section titled “Worker Runtime”Deploy your graph as a worker that connects to the control plane.
from duragraph import Graph, Worker
@Graph(id="production_agent")class ProductionAgent: # ... define nodes ... pass
# Create workerworker = Worker( graph=ProductionAgent, control_plane_url="http://localhost:8081", heartbeat_interval=30, # Send heartbeat every 30 seconds max_concurrent_tasks=10,)
# Start worker (blocking)worker.start()
# Or asyncasync def run_worker(): await worker.astart()
asyncio.run(run_worker())Graceful Shutdown:
import signal
def shutdown_handler(signum, frame): print("Shutting down gracefully...") worker.stop()
signal.signal(signal.SIGINT, shutdown_handler)signal.signal(signal.SIGTERM, shutdown_handler)
worker.start()LLM Providers
Section titled “LLM Providers”OpenAI
Section titled “OpenAI”from duragraph.llm import OpenAIProvider
provider = OpenAIProvider( api_key="sk-...", # Or set OPENAI_API_KEY env var organization="org-...", # Optional base_url="https://api.openai.com/v1", # Optional custom endpoint)
@llm_node( model="gpt-4o", temperature=0.7, provider=provider,)def generate(self, state): return stateSupported Models:
gpt-4ogpt-4o-minigpt-4-turbogpt-4gpt-3.5-turbo
Anthropic
Section titled “Anthropic”from duragraph.llm import AnthropicProvider
provider = AnthropicProvider( api_key="sk-ant-...", # Or set ANTHROPIC_API_KEY env var)
@llm_node( model="claude-3-5-sonnet-20241022", temperature=0.7, provider=provider,)def generate(self, state): return stateSupported Models:
claude-3-5-sonnet-20241022claude-3-opus-20240229claude-3-sonnet-20240229claude-3-haiku-20240307
Vector Stores
Section titled “Vector Stores”Chroma
Section titled “Chroma”from duragraph.vectorstores import ChromaVectorStore
vector_store = ChromaVectorStore( collection_name="my_docs", persist_directory="./chroma_db", embedding_function=None, # Uses default)
# Add documentsvector_store.add_documents([ {"id": "1", "text": "Document 1", "metadata": {"source": "file1.txt"}}, {"id": "2", "text": "Document 2", "metadata": {"source": "file2.txt"}},])
# Searchresults = vector_store.similarity_search( query="search term", k=5, filter={"source": "file1.txt"},)Pinecone
Section titled “Pinecone”from duragraph.vectorstores import PineconeVectorStore
vector_store = PineconeVectorStore( api_key="your-api-key", environment="us-east-1-aws", index_name="my-index",)
# Use same interface as ChromaQdrant
Section titled “Qdrant”from duragraph.vectorstores import QdrantVectorStore
vector_store = QdrantVectorStore( url="http://localhost:6333", collection_name="my_collection", api_key=None, # Optional for cloud)Embedding Providers
Section titled “Embedding Providers”OpenAI Embeddings
Section titled “OpenAI Embeddings”from duragraph.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings( model="text-embedding-3-small", api_key="sk-...",)
# Generate embeddingsvectors = embeddings.embed_documents([ "Document 1", "Document 2",])
# Single queryquery_vector = embeddings.embed_query("search term")Cohere Embeddings
Section titled “Cohere Embeddings”from duragraph.embeddings import CohereEmbeddings
embeddings = CohereEmbeddings( model="embed-english-v3.0", api_key="your-key",)Ollama (Local)
Section titled “Ollama (Local)”from duragraph.embeddings import OllamaEmbeddings
embeddings = OllamaEmbeddings( model="nomic-embed-text", base_url="http://localhost:11434",)Document Loaders
Section titled “Document Loaders”Text Files
Section titled “Text Files”from duragraph.document_loaders import TextLoader
loader = TextLoader("path/to/file.txt")documents = loader.load()from duragraph.document_loaders import PDFLoader
loader = PDFLoader("path/to/file.pdf")documents = loader.load()Directory
Section titled “Directory”from duragraph.document_loaders import DirectoryLoader
loader = DirectoryLoader( path="./docs", glob="**/*.md", loader_cls=TextLoader,)documents = loader.load()Chunking
Section titled “Chunking”from duragraph.text_splitters import RecursiveCharacterTextSplitter
splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=200, separators=["\n\n", "\n", " ", ""],)
chunks = splitter.split_documents(documents)Tool Registry
Section titled “Tool Registry”Register and execute tools dynamically.
from duragraph import ToolRegistry, tool
# Create registryregistry = ToolRegistry()
# Register tools@registry.register@tool( name="calculator", description="Perform arithmetic operations",)def calculator(operation: str, a: float, b: float) -> float: """Execute a calculation.""" if operation == "add": return a + b elif operation == "multiply": return a * b # ... more operations
# Use in graph@Graph(id="math_agent")class MathAgent: @entrypoint @llm_node(model="gpt-4o-mini", tools=registry.get_tools()) def solve(self, state): return stateLocal development commands (v0.2.0).
# Initialize new projectduragraph init my-agent
# Run graph locallyduragraph dev my_graph.py:MyGraph
# Deploy to control planeduragraph deploy my_graph.py:MyGraph --url http://localhost:8081
# Visualize graphduragraph visualize my_graph.py:MyGraph --output graph.pngState Management
Section titled “State Management”TypedDict Schema
Section titled “TypedDict Schema”from typing import TypedDictfrom duragraph import Graph
class MyState(TypedDict): messages: list[dict] intent: str result: str
@Graph(id="typed_graph", state_schema=MyState)class TypedGraph: @entrypoint def start(self, state: MyState) -> MyState: return statePydantic Schema
Section titled “Pydantic Schema”from pydantic import BaseModel, Fieldfrom duragraph import Graph
class MyState(BaseModel): messages: list[dict] = Field(default_factory=list) intent: str = "" result: str = ""
@Graph(id="pydantic_graph", state_schema=MyState)class PydanticGraph: @entrypoint def start(self, state: MyState) -> MyState: return stateError Handling
Section titled “Error Handling”from duragraph.exceptions import ( DuraGraphError, NodeExecutionError, WorkerConnectionError,)
try: result = agent.run(input_state)except NodeExecutionError as e: print(f"Node {e.node_id} failed: {e.message}")except WorkerConnectionError as e: print(f"Worker connection failed: {e}")except DuraGraphError as e: print(f"Error: {e}")Examples
Section titled “Examples”RAG Agent
Section titled “RAG Agent”from duragraph import Graph, llm_node, entrypointfrom duragraph.vectorstores import ChromaVectorStorefrom duragraph.embeddings import OpenAIEmbeddings
@Graph(id="rag_agent")class RAGAgent: def __init__(self): self.vector_store = ChromaVectorStore( collection_name="docs", embedding_function=OpenAIEmbeddings(), )
@entrypoint def retrieve(self, state): """Retrieve relevant documents.""" query = state["query"] results = self.vector_store.similarity_search(query, k=5) return {"documents": results}
@llm_node(model="gpt-4o") def generate(self, state): """Generate answer from documents.""" context = "\n".join([doc["text"] for doc in state["documents"]]) return { "messages": [ {"role": "system", "content": f"Context:\n{context}"}, {"role": "user", "content": state["query"]}, ] }
retrieve >> generateMulti-Agent Collaboration
Section titled “Multi-Agent Collaboration”from duragraph import Graph, llm_node, entrypoint
@Graph(id="researcher")class Researcher: @entrypoint @llm_node(model="gpt-4o") def research(self, state): return {"findings": "research results"}
@Graph(id="writer")class Writer: @entrypoint @llm_node(model="gpt-4o") def write(self, state): return {"article": "written article"}
@Graph(id="multi_agent")class MultiAgent: def __init__(self): self.researcher = Researcher() self.writer = Writer()
@entrypoint def orchestrate(self, state): # Run researcher research_result = self.researcher.run(state)
# Pass to writer article = self.writer.run({ "findings": research_result["findings"] })
return articleAPI Reference
Section titled “API Reference”For complete API documentation, see: