Integrations
The GenAI Observability SDK works with any Python agent framework. Use @observe to wrap your agent’s entry points and tools, and enrich() to capture model details. Auto-instrumentation handles LLM calls automatically.
Each example below shows a complete, runnable integration. All examples assume the observability stack is running locally.
Strands Agents
Section titled “Strands Agents”Strands Agents is a model-driven agent framework from AWS. The SDK’s auto-instrumentation captures Bedrock/OpenAI calls automatically - add @observe for agent-level visibility.
pip install "opensearch-genai-observability-sdk-py[bedrock]" strands-agents strands-agents-bedrockfrom opensearch_genai_observability_sdk_py import register, observe, Op, enrichfrom strands import Agentfrom strands.models.bedrock import BedrockModel
register( endpoint="http://localhost:4318/v1/traces", service_name="strands-agent",)
@observe(op=Op.EXECUTE_TOOL)def weather_tool(city: str) -> str: """Get weather for a city.""" return f"72°F and sunny in {city}"
@observe(op=Op.INVOKE_AGENT)def run_agent(query: str) -> str: model = BedrockModel(model_id="us.anthropic.claude-sonnet-4-20250514-v1:0") agent = Agent( model=model, tools=[weather_tool], system_prompt="You are a helpful travel assistant.", ) enrich(model="claude-sonnet-4-20250514", provider="bedrock") result = agent(query) return str(result)
run_agent("What's the weather in Seattle?")Using Strands built-in telemetry
Section titled “Using Strands built-in telemetry”Strands Agents has native OpenTelemetry support via StrandsTelemetry. It automatically emits spans for agent invocations, tool executions, and LLM calls following GenAI semantic conventions. Point it at the OTel collector, then use @observe from the SDK to add spans for any custom logic that Strands doesn’t instrument automatically.
pip install opensearch-genai-observability-sdk-py strands-agents strands-agents-bedrockfrom opensearch_genai_observability_sdk_py import observe, Opfrom opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporterfrom opentelemetry.sdk.trace.export import BatchSpanProcessorfrom strands import Agentfrom strands.models.bedrock import BedrockModelfrom strands.telemetry import StrandsTelemetry
# Initialize Strands' built-in telemetry - automatically creates spans for:# invoke_agent, execute_tool, chat (LLM calls)telemetry = StrandsTelemetry()
# Point the exporter at the OTel collector (gRPC on port 4317)exporter = OTLPSpanExporter(endpoint="localhost:4317", insecure=True)telemetry.tracer_provider.add_span_processor(BatchSpanProcessor(exporter))
# Use @observe for custom logic that Strands doesn't auto-instrument@observe(op=Op.EXECUTE_TOOL)def fetch_hotel_ratings(city: str) -> str: """Fetch hotel ratings from an external API.""" # Custom API call - not covered by Strands auto-instrumentation return f"4.5 stars average in {city}"
model = BedrockModel(model_id="us.anthropic.claude-sonnet-4-20250514-v1:0")agent = Agent( model=model, tools=[fetch_hotel_ratings], system_prompt="You are a helpful travel assistant.",)
agent("Find top-rated hotels in Seattle")Tip:
StrandsTelemetryhandles agent, tool, and LLM spans automatically. Use@observeonly for custom functions (API calls, database queries, post-processing) where you need visibility that Strands doesn’t provide out of the box.
LangGraph
Section titled “LangGraph”LangGraph builds stateful, multi-step agent workflows. Install the LangChain auto-instrumentor for automatic LLM tracing.
pip install "opensearch-genai-observability-sdk-py[langchain]" langgraph langchain-openaifrom opensearch_genai_observability_sdk_py import register, observe, Op, enrichfrom langgraph.graph import StateGraph, START, ENDfrom langchain_openai import ChatOpenAIfrom typing import TypedDict
register( endpoint="http://localhost:4318/v1/traces", service_name="langgraph-agent",)
class State(TypedDict): query: str result: str
@observe(op=Op.CHAT, name="llm_call")def call_model(state: State) -> dict: llm = ChatOpenAI(model="gpt-4o") response = llm.invoke(state["query"]) enrich(model="gpt-4o", provider="openai") return {"result": response.content}
@observe(op=Op.EXECUTE_TOOL)def check_facts(state: State) -> dict: # fact-checking logic return {"result": f"Verified: {state['result']}"}
graph = StateGraph(State)graph.add_node("model", call_model)graph.add_node("fact_check", check_facts)graph.add_edge(START, "model")graph.add_edge("model", "fact_check")graph.add_edge("fact_check", END)app = graph.compile()
@observe(op=Op.INVOKE_AGENT, name="fact_check_agent")def run(query: str) -> str: result = app.invoke({"query": query, "result": ""}) return result["result"]
run("What is OpenSearch?")CrewAI
Section titled “CrewAI”CrewAI orchestrates role-playing AI agents. Wrap crew execution with @observe.
pip install opensearch-genai-observability-sdk-py crewaifrom opensearch_genai_observability_sdk_py import register, observe, Op, enrichfrom crewai import Agent, Task, Crew
register( endpoint="http://localhost:4318/v1/traces", service_name="crewai-agent",)
researcher = Agent( role="Researcher", goal="Find accurate information", backstory="Expert research analyst", llm="gpt-4o",)
writer = Agent( role="Writer", goal="Write clear summaries", backstory="Technical writer", llm="gpt-4o",)
@observe(op=Op.INVOKE_AGENT, name="research_crew")def run_crew(topic: str) -> str: enrich(model="gpt-4o", provider="openai") research_task = Task( description=f"Research: {topic}", expected_output="Key findings", agent=researcher, ) write_task = Task( description="Summarize the research findings", expected_output="Clear summary", agent=writer, ) crew = Crew(agents=[researcher, writer], tasks=[research_task, write_task]) result = crew.kickoff() return str(result)
run_crew("OpenTelemetry observability for AI agents")OpenAI Agents SDK
Section titled “OpenAI Agents SDK”The OpenAI Agents SDK has built-in tracing. Install the OpenAI auto-instrumentor to capture all LLM calls, and use @observe for top-level orchestration.
pip install "opensearch-genai-observability-sdk-py[openai]" openai-agentsfrom opensearch_genai_observability_sdk_py import register, observe, Op, enrichfrom agents import Agent, Runner
register( endpoint="http://localhost:4318/v1/traces", service_name="openai-agents",)
agent = Agent( name="assistant", instructions="You are a helpful assistant.", model="gpt-4o",)
@observe(op=Op.INVOKE_AGENT, name="openai_agent")def run(query: str) -> str: enrich(model="gpt-4o", provider="openai") result = Runner.run_sync(agent, query) enrich( input_tokens=result.raw_responses[-1].usage.input_tokens, output_tokens=result.raw_responses[-1].usage.output_tokens, ) return result.final_output
run("Explain observability in three sentences.")Amazon Bedrock
Section titled “Amazon Bedrock”For direct Bedrock converse or invoke_model calls, install the Bedrock auto-instrumentor.
pip install "opensearch-genai-observability-sdk-py[bedrock]"from opensearch_genai_observability_sdk_py import register, observe, Op, enrichimport boto3
register( endpoint="http://localhost:4318/v1/traces", service_name="bedrock-agent",)
@observe(op=Op.INVOKE_AGENT, name="bedrock_agent")def run(query: str) -> str: client = boto3.client("bedrock-runtime", region_name="us-east-1") enrich(model="claude-sonnet-4-20250514", provider="bedrock") response = client.converse( modelId="us.anthropic.claude-sonnet-4-20250514-v1:0", messages=[{"role": "user", "content": [{"text": query}]}], ) result = response["output"]["message"]["content"][0]["text"] enrich( input_tokens=response["usage"]["inputTokens"], output_tokens=response["usage"]["outputTokens"], ) return result
run("What is OpenSearch?")Related links
Section titled “Related links”- Python SDK reference - full API documentation for
register,observe,enrich - Auto-instrumentation - supported providers and extras
- Evaluation & Scoring - score and evaluate your agent outputs