@@ -78,19 +78,30 @@ def handle_text_message(event):
78
78
if text .startswith ('/圖像' ):
79
79
text = text [3 :].strip ()
80
80
role = 'assistant'
81
- response = model_management [user_id ].image_generations (text )
82
- msg = ImageSendMessage (
83
- original_content_url = response ,
84
- preview_image_url = response
85
- )
81
+ response , error_message = model_management [user_id ].image_generations (text )
82
+ if error_message :
83
+ msg = TextSendMessage (text = error_message )
84
+ memory .remove (user_id )
85
+ else :
86
+ msg = ImageSendMessage (
87
+ original_content_url = response ,
88
+ preview_image_url = response
89
+ )
90
+ memory .append (user_id , {
91
+ 'role' : role ,
92
+ 'content' : response
93
+ })
86
94
else :
87
- role , response = model_management [user_id ].chat_completions (memory .get (user_id ), os .getenv ('OPENAI_MODEL_ENGINE' ))
88
- msg = TextSendMessage (text = response )
89
- memory .append (user_id , {
90
- 'role' : role ,
91
- 'content' : response
92
- })
93
-
95
+ role , response , error_message = model_management [user_id ].chat_completions (memory .get (user_id ), os .getenv ('OPENAI_MODEL_ENGINE' ))
96
+ if error_message :
97
+ msg = TextSendMessage (text = error_message )
98
+ memory .remove (user_id )
99
+ else :
100
+ msg = TextSendMessage (text = response )
101
+ memory .append (user_id , {
102
+ 'role' : role ,
103
+ 'content' : response
104
+ })
94
105
line_bot_api .reply_message (event .reply_token , msg )
95
106
96
107
@@ -103,12 +114,22 @@ def handle_audio_message(event):
103
114
for chunk in audio_content .iter_content ():
104
115
fd .write (chunk )
105
116
106
- transciption = model_management [user_id ].audio_transcriptions (input_audio_path , 'whisper-1' )
117
+ transciption , error_message = model_management [user_id ].audio_transcriptions (input_audio_path , 'whisper-1' )
118
+ if error_message :
119
+ os .remove (input_audio_path )
120
+ line_bot_api .reply_message (event .reply_token , TextSendMessage (text = error_message ))
121
+ return
107
122
memory .append (user_id , {
108
123
'role' : 'user' ,
109
124
'content' : transciption
110
125
})
111
- role , response = model_management [user_id ].chat_completions (memory .get (user_id ), 'gpt-3.5-turbo' )
126
+
127
+ role , response , error_message = model_management [user_id ].chat_completions (memory .get (user_id ), 'gpt-3.5-turbo' )
128
+ if error_message :
129
+ os .remove (input_audio_path )
130
+ line_bot_api .reply_message (event .reply_token , TextSendMessage (text = error_message ))
131
+ memory .remove (user_id )
132
+ return
112
133
memory .append (user_id , {
113
134
'role' : role ,
114
135
'content' : response
0 commit comments