Skip to content

Commit 2f67126

Browse files
authored
Merge pull request #82 from stackhpc/fix-gradio-5
Fix Gradio 5, tab title, and copy buttons
2 parents 45a4d15 + 335b7d1 commit 2f67126

File tree

1 file changed

+25
-11
lines changed

1 file changed

+25
-11
lines changed

web-apps/chat/app.py

Lines changed: 25 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,7 @@ class PossibleSystemPromptException(Exception):
6464
def inference(latest_message, history):
6565
# Allow mutating global variable
6666
global BACKEND_INITIALISED
67+
log.debug("Inference request received with history: %s", history)
6768

6869
try:
6970
context = []
@@ -81,26 +82,32 @@ def inference(latest_message, history):
8182
else:
8283
if role != "assistant":
8384
log.warn(f"Message role {role} converted to 'assistant'")
84-
context.append(AIMessage(content=(content or "")))
85+
context.append(AIMessage(content=(content or "")))
8586
context.append(HumanMessage(content=latest_message))
8687

8788
log.debug("Chat context: %s", context)
8889

89-
9090
response = ""
91+
thinking = False
92+
9193
for chunk in llm.stream(context):
9294
# If this is our first successful response from the backend
9395
# then update the status variable to allow future error messages
9496
# to be more informative
9597
if not BACKEND_INITIALISED and len(response) > 0:
9698
BACKEND_INITIALISED = True
9799

98-
# NOTE(sd109): For some reason the '>' character breaks the UI
99-
# so we need to escape it here.
100-
# response += chunk.content.replace('>', '\>')
101-
# UPDATE(sd109): Above bug seems to have been fixed as of gradio 4.15.0
102-
# but keeping this note here incase we enounter it again
103-
response += chunk.content
100+
# The "think" tags mark the chatbot's reasoning. Remove the content
101+
# and replace with "Thinking..." until the closing tag is found.
102+
content = chunk.content
103+
if '<think>' in content or thinking:
104+
thinking = True
105+
response = "Thinking..."
106+
if '</think>' in content:
107+
thinking = False
108+
response = ""
109+
else:
110+
response += content
104111
yield response
105112

106113
# Handle any API errors here. See OpenAI Python client for possible error responses
@@ -163,13 +170,20 @@ def inference_wrapper(*args):
163170
fill_height=True,
164171
theme=theme,
165172
css=settings.css_overrides,
166-
js=settings.custom_javascript
173+
js=settings.custom_javascript,
174+
title=settings.page_title,
167175
) as demo:
168-
gr.ChatInterface(
176+
gr.Markdown('# ' + settings.page_title)
177+
gr.ChatInterface(
169178
inference_wrapper,
170179
type="messages",
171-
title=settings.page_title,
172180
analytics_enabled=False,
181+
chatbot=gr.Chatbot(
182+
show_copy_button=True,
183+
height="75vh",
184+
resizable=True,
185+
sanitize_html=True,
186+
),
173187
)
174188

175189

0 commit comments

Comments
 (0)