Skip to content

mcp powered voice agents #1897

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 7 commits into from
Jun 17, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file not shown.

Large diffs are not rendered by default.

Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
107 changes: 107 additions & 0 deletions examples/partners/mcp_powered_voice_agents/search_server.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,107 @@
import os
from mcp.server.fastmcp import FastMCP
from openai import OpenAI
from agents import set_tracing_export_api_key

# Create server
mcp = FastMCP("Search Server")
_vector_store_id = ""

def _run_rag(query: str) -> str:
"""Do a search for answers within the knowledge base and internal documents of the user.
Args:
query: The user query
"""
results = client.vector_stores.search(
vector_store_id=_vector_store_id,
query=query,
rewrite_query=True, # Query rewriting generally improves results
)
return results.data[0].content[0].text


def _summarize_rag_response(rag_output: str) -> str:
"""Summarize the RAG response using GPT-4
Args:
rag_output: The RAG response
"""
response = client.responses.create(
model="gpt-4.1-mini",
tools=[{"type": "web_search_preview"}],
input="Summarize the following text concisely: \n\n" + rag_output,
)
return response.output_text


@mcp.tool()
def generate_rag_output(query: str) -> str:
"""Generate a summarized RAG output for a given query.
Args:
query: The user query
"""
print("[debug-server] generate_rag_output: ", query)
rag_output = _run_rag(query)
return _summarize_rag_response(rag_output)


@mcp.tool()
def run_web_search(query: str) -> str:
"""Run a web search for the given query.
Args:
query: The user query
"""
print("[debug-server] run_web_search:", query)
response = client.responses.create(
model="gpt-4.1-mini",
tools=[{"type": "web_search_preview"}],
input=query,
)
return response.output_text


def index_documents(directory: str):
"""Index the documents in the given directory to the vector store
Args:
directory: The directory to index the documents from
"""
# OpenAI supported file extensions for retrieval (see docs)
SUPPORTED_EXTENSIONS = {'.pdf', '.txt', '.md', '.docx', '.pptx', '.csv', '.rtf', '.html', '.json', '.xml'}
# Collect all files in the specified directory
files = [os.path.join(directory, f) for f in os.listdir(directory)]
# Filter files for supported extensions only
supported_files = []
for file_path in files:
_, ext = os.path.splitext(file_path)
if ext.lower() in SUPPORTED_EXTENSIONS:
supported_files.append(file_path)
else:
print(f"[warning] Skipping unsupported file for retrieval: {file_path}")

vector_store = client.vector_stores.create( # Create vector store
name="Support FAQ",
)
global _vector_store_id
_vector_store_id = vector_store.id

for file_path in supported_files:
# Upload each file to the vector store, ensuring the file handle is closed
with open(file_path, "rb") as fp:
client.vector_stores.files.upload_and_poll(
vector_store_id=vector_store.id,
file=fp
)
print(f"[debug-server] uploading file: {file_path}")


if __name__ == "__main__":
oai_api_key = os.environ.get("OPENAI_API_KEY")
if not oai_api_key:
raise ValueError("OPENAI_API_KEY environment variable is not set")
set_tracing_export_api_key(oai_api_key)
client = OpenAI(api_key=oai_api_key)

current_dir = os.path.dirname(os.path.abspath(__file__))
samples_dir = os.path.join(current_dir, "sample_files")
index_documents(samples_dir)

mcp.run(transport="sse")
Binary file added images/System_flow_partner_mcp.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added images/Traces-1_partner.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added images/Traces-2_partner.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
1 change: 1 addition & 0 deletions images/partner_mcp_Cookbook.svg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added images/trace-sk1_partner.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added images/traces_partner_granular.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
14 changes: 14 additions & 0 deletions registry.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,20 @@
# should build pages for, and indicates metadata such as tags, creation date and
# authors for each page.

- title: MCP Powered Voice Agents
path: examples/partners/mcp_powered_voice_agents/mcp_powered_agents_cookbook.ipynb
date: 2025-06-12
authors:
- shikhar-cyber
- Cece Z
- Sibon li
tags:
- mcp
- voice
- agents-sdk
- functions
- tracing

- title: Eval Driven System Design - From Prototype to Production
path: examples/partners/eval_driven_system_design/receipt_inspection.ipynb
date: 2025-06-02
Expand Down