Skip to content

Commit b7b3304

Browse files
authored
Add files via upload
1 parent db80e99 commit b7b3304

File tree

1 file changed

+242
-0
lines changed

1 file changed

+242
-0
lines changed
Lines changed: 242 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,242 @@
1+
# https://github.com/modelcontextprotocol/servers/tree/main/src/filesystem
2+
3+
import streamlit as st
4+
import asyncio
5+
import os
6+
import shutil
7+
import tempfile
8+
import glob
9+
10+
from agents import Agent, Runner, OpenAIChatCompletionsModel, AsyncOpenAI
11+
from openai.types.responses import ResponseTextDeltaEvent
12+
13+
14+
# Create a sample file for demonstration if needed
15+
def ensure_sample_files():
16+
current_dir = os.path.dirname(os.path.abspath(__file__))
17+
samples_dir = os.path.join(current_dir, "sample_files")
18+
19+
# Create the directory if it doesn't exist
20+
os.makedirs(samples_dir, exist_ok=True)
21+
22+
# Create a sample WWDC predictions file
23+
predictions_file = os.path.join(samples_dir, "wwdc25_predictions.md")
24+
if not os.path.exists(predictions_file):
25+
with open(predictions_file, "w") as f:
26+
f.write("# WWDC25 Predictions\n\n")
27+
f.write("1. Apple Intelligence features for iPad\n")
28+
f.write("2. New Apple Watch with health sensors\n")
29+
f.write("3. Vision Pro 2 announcement\n")
30+
f.write("4. iOS 18 with advanced customization\n")
31+
f.write("5. macOS 15 with AI features\n")
32+
33+
# Create a sample WWDC activities file
34+
activities_file = os.path.join(samples_dir, "wwdc_activities.txt")
35+
if not os.path.exists(activities_file):
36+
with open(activities_file, "w") as f:
37+
f.write("My favorite WWDC activities:\n\n")
38+
f.write("1. Attending sessions\n")
39+
f.write("2. Labs with Apple engineers\n")
40+
f.write("3. Networking events\n")
41+
f.write("4. Exploring new APIs\n")
42+
f.write("5. Hands-on demos\n")
43+
44+
return samples_dir
45+
46+
47+
# Using a separate event loop to run async code in Streamlit
48+
class AsyncRunner:
49+
@staticmethod
50+
def run_async(func, *args, **kwargs):
51+
loop = asyncio.new_event_loop()
52+
asyncio.set_event_loop(loop)
53+
try:
54+
return loop.run_until_complete(func(*args, **kwargs))
55+
finally:
56+
loop.close()
57+
58+
59+
# Function to read all files in the sample directory and return their contents
60+
def read_sample_files():
61+
samples_dir = ensure_sample_files()
62+
file_contents = {}
63+
64+
# Read all files in the directory
65+
for file_path in glob.glob(os.path.join(samples_dir, "*")):
66+
if os.path.isfile(file_path):
67+
with open(file_path, 'r') as file:
68+
file_contents[os.path.basename(file_path)] = file.read()
69+
70+
return file_contents
71+
72+
73+
# Function to build context from file contents
74+
def build_context_from_files():
75+
file_contents = read_sample_files()
76+
context = "Here are the contents of the files in the system:\n\n"
77+
78+
for filename, content in file_contents.items():
79+
context += f"--- File: {filename} ---\n{content}\n\n"
80+
81+
return context
82+
83+
84+
# Function to run a query with error handling
85+
def run_agent_query(query):
86+
try:
87+
# Read all files and build context
88+
context = build_context_from_files()
89+
90+
# Combine context and query
91+
full_prompt = f"{context}\n\nBased on the file contents above, {query}"
92+
93+
async def run_query():
94+
try:
95+
# Initialize Ollama client and local model
96+
local_model = OpenAIChatCompletionsModel(
97+
model="deepseek-r1:8b",
98+
openai_client=AsyncOpenAI(base_url="http://localhost:11434/v1")
99+
)
100+
101+
agent = Agent(
102+
name="Assistant for Content in Files",
103+
instructions="You are a helpful assistant that answers questions about the file contents provided in the context.",
104+
model=local_model
105+
)
106+
107+
result = await Runner.run(starting_agent=agent, input=full_prompt)
108+
return result.final_output, None # No trace_id since we're not using MCP
109+
except Exception as e:
110+
st.error(f"Error in run_query: {str(e)}")
111+
return f"Failed to process query: {str(e)}", None
112+
113+
return AsyncRunner.run_async(run_query)
114+
except Exception as e:
115+
st.error(f"Error processing query: {str(e)}")
116+
return f"Failed to process query: {str(e)}", None
117+
118+
119+
# Function to run a streaming query with error handling
120+
def run_agent_query_streamed(query):
121+
try:
122+
# Read all files and build context
123+
context = build_context_from_files()
124+
125+
# Combine context and query
126+
full_prompt = f"{context}\n\nBased on the file contents above, {query}"
127+
128+
async def run_streamed_query():
129+
try:
130+
# Create a placeholder for the streaming output
131+
response_placeholder = st.empty()
132+
full_response = ""
133+
134+
# Initialize Ollama client and local model
135+
local_model = OpenAIChatCompletionsModel(
136+
model="deepseek-r1:8b",
137+
openai_client=AsyncOpenAI(base_url="http://localhost:11434/v1")
138+
)
139+
140+
agent = Agent(
141+
name="Assistant for Content in Files",
142+
instructions="You are a helpful assistant that answers questions about the file contents provided in the context.",
143+
model=local_model
144+
)
145+
146+
# Stream the response
147+
result = Runner.run_streamed(agent, full_prompt)
148+
async for event in result.stream_events():
149+
if event.type == "raw_response_event" and isinstance(event.data, ResponseTextDeltaEvent):
150+
# Append new text to the full response
151+
full_response += event.data.delta
152+
# Update the placeholder with the accumulated text
153+
response_placeholder.markdown(full_response)
154+
155+
return full_response
156+
except Exception as e:
157+
st.error(f"Error in run_streamed_query: {str(e)}")
158+
return f"Failed to process query: {str(e)}"
159+
160+
return AsyncRunner.run_async(run_streamed_query)
161+
except Exception as e:
162+
st.error(f"Error processing query: {str(e)}")
163+
return f"Failed to process query: {str(e)}"
164+
165+
166+
def main():
167+
st.title("File Explorer Assistant with Ollama and deepseek-r1:8b")
168+
st.write("This app uses Ollama with deepseek-r1:8b model to read files and answer questions about them.")
169+
170+
# Ensure sample files exist
171+
ensure_sample_files()
172+
173+
# Display available files
174+
st.subheader("Available Files")
175+
files = read_sample_files()
176+
for filename in files.keys():
177+
st.write(f"- {filename}")
178+
179+
# Input area for user queries
180+
query = st.text_area("Ask me about the files:", height=100)
181+
182+
use_streaming = st.checkbox("Use streaming response", value=True)
183+
184+
if st.button("Submit"):
185+
if query:
186+
with st.spinner("Processing your request..."):
187+
if use_streaming:
188+
run_agent_query_streamed(query)
189+
else:
190+
result, _ = run_agent_query(query)
191+
st.write("### Response:")
192+
st.write(result)
193+
194+
# Sample queries
195+
st.sidebar.header("Sample Queries")
196+
if st.sidebar.button("List all files"):
197+
with st.spinner("Processing..."):
198+
if use_streaming:
199+
run_agent_query_streamed("List the names of all the files.")
200+
else:
201+
result, _ = run_agent_query("List the names of all the files.")
202+
st.write("### Files in the system:")
203+
st.write(result)
204+
205+
if st.sidebar.button("WWDC Activities"):
206+
with st.spinner("Processing..."):
207+
if use_streaming:
208+
run_agent_query_streamed("What are my favorite WWDC activities?")
209+
else:
210+
result, _ = run_agent_query("What are my favorite WWDC activities?")
211+
st.write("### WWDC Activities:")
212+
st.write(result)
213+
214+
if st.sidebar.button("WWDC25 Predictions"):
215+
with st.spinner("Processing..."):
216+
if use_streaming:
217+
run_agent_query_streamed("Look at my wwdc25 predictions. List the predictions that are most likely to be true.")
218+
else:
219+
result, _ = run_agent_query("Look at my wwdc25 predictions. List the predictions that are most likely to be true.")
220+
st.write("### WWDC25 Predictions Analysis:")
221+
st.write(result)
222+
223+
224+
if __name__ == "__main__":
225+
# Check if the user has Ollama running with deepseek-r1:8b model
226+
import requests
227+
try:
228+
response = requests.get("http://localhost:11434/api/tags")
229+
if response.status_code == 200:
230+
models = response.json()["models"]
231+
deepseek_available = any("deepseek-r1:8b" in model["name"] for model in models)
232+
if not deepseek_available:
233+
st.error("deepseek-r1:8b model is not available in Ollama. Please run 'ollama pull deepseek-r1:8b' to download it.")
234+
st.stop()
235+
else:
236+
st.error("Unable to connect to Ollama API. Make sure Ollama is running.")
237+
st.stop()
238+
except requests.exceptions.ConnectionError:
239+
st.error("Unable to connect to Ollama. Make sure Ollama is running at http://localhost:11434")
240+
st.stop()
241+
242+
main()

0 commit comments

Comments
 (0)