70
70
71
71
properties
72
72
% TEMPERATURE Temperature of generation.
73
- Temperature
73
+ Temperature { mustBeValidTemperature } = 1
74
74
75
75
% TOPPROBABILITYMASS Top probability mass to consider for generation.
76
- TopProbabilityMass
76
+ TopProbabilityMass { mustBeValidTopP } = 1
77
77
78
78
% STOPSEQUENCES Sequences to stop the generation of tokens.
79
- StopSequences
79
+ StopSequences { mustBeValidStop } = {}
80
80
81
81
% PRESENCEPENALTY Penalty for using a token in the response that has already been used.
82
- PresencePenalty
82
+ PresencePenalty { mustBeValidPenalty } = 0
83
83
84
84
% FREQUENCYPENALTY Penalty for using a token that is frequent in the training data.
85
- FrequencyPenalty
85
+ FrequencyPenalty { mustBeValidPenalty } = 0
86
86
end
87
87
88
88
properties (SetAccess = private )
114
114
arguments
115
115
systemPrompt {llms .utils .mustBeTextOrEmpty } = []
116
116
nvp.Tools (1 ,: ) {mustBeA(nvp .Tools , " openAIFunction" )} = openAIFunction.empty
117
- nvp.ModelName (1 ,1 ) {mustBeMember(nvp .ModelName ,[" gpt-4o" , ...
118
- " gpt-4o-2024-05-13" ," gpt-4-turbo" , ...
119
- " gpt-4-turbo-2024-04-09" ," gpt-4" ," gpt-4-0613" , ...
120
- " gpt-3.5-turbo" ," gpt-3.5-turbo-0125" , ...
121
- " gpt-3.5-turbo-1106" ])} = " gpt-3.5-turbo"
117
+ nvp.ModelName (1 ,1 ) string {mustBeMember(nvp .ModelName ,[...
118
+ " gpt-4o" ," gpt-4o-2024-05-13" ,...
119
+ " gpt-4-turbo" ," gpt-4-turbo-2024-04-09" ,...
120
+ " gpt-4" ," gpt-4-0613" , ...
121
+ " gpt-3.5-turbo" ," gpt-3.5-turbo-0125" , ...
122
+ " gpt-3.5-turbo-1106" ,...
123
+ ])} = " gpt-3.5-turbo"
122
124
nvp.Temperature {mustBeValidTemperature } = 1
123
125
nvp.TopProbabilityMass {mustBeValidTopP } = 1
124
126
nvp.StopSequences {mustBeValidStop } = {}
147
149
148
150
if ~isempty(systemPrompt )
149
151
systemPrompt = string(systemPrompt );
150
- if ~(strlength( systemPrompt )== 0 )
152
+ if systemPrompt ~= " "
151
153
this.SystemPrompt = {struct(" role" , " system" , " content" , systemPrompt )};
152
154
end
153
155
end
158
160
this.StopSequences = nvp .StopSequences ;
159
161
160
162
% ResponseFormat is only supported in the latest models only
161
- if ( nvp .ResponseFormat == " json" )
163
+ if nvp .ResponseFormat == " json"
162
164
if ismember(this .ModelName ,[" gpt-4" ," gpt-4-0613" ])
163
165
error(" llms:invalidOptionAndValueForModel" , ...
164
166
llms .utils .errorMessageCatalog .getMessage(" llms:invalidOptionAndValueForModel" , " ResponseFormat" , " json" , this .ModelName ));
243
245
end
244
246
245
247
end
246
-
247
- function this = set .Temperature(this , temperature )
248
- arguments
249
- this openAIChat
250
- temperature
251
- end
252
- mustBeValidTemperature(temperature );
253
-
254
- this.Temperature = temperature ;
255
- end
256
-
257
- function this = set .TopProbabilityMass(this ,topP )
258
- arguments
259
- this openAIChat
260
- topP
261
- end
262
- mustBeValidTopP(topP );
263
- this.TopProbabilityMass = topP ;
264
- end
265
-
266
- function this = set .StopSequences(this ,stop )
267
- arguments
268
- this openAIChat
269
- stop
270
- end
271
- mustBeValidStop(stop );
272
- this.StopSequences = stop ;
273
- end
274
-
275
- function this = set .PresencePenalty(this ,penalty )
276
- arguments
277
- this openAIChat
278
- penalty
279
- end
280
- mustBeValidPenalty(penalty )
281
- this.PresencePenalty = penalty ;
282
- end
283
-
284
- function this = set .FrequencyPenalty(this ,penalty )
285
- arguments
286
- this openAIChat
287
- penalty
288
- end
289
- mustBeValidPenalty(penalty )
290
- this.FrequencyPenalty = penalty ;
291
- end
292
248
end
293
249
294
250
methods (Hidden )
@@ -331,7 +287,7 @@ function mustBeNonzeroLengthTextScalar(content)
331
287
332
288
for i = 1 : numFunctions
333
289
functionsStruct{i } = struct(' type' ,' function' , ...
334
- ' function' ,encodeStruct(functions(i ))) ;
290
+ ' function' ,encodeStruct(functions(i )));
335
291
functionNames(i ) = functions(i ).FunctionName;
336
292
end
337
293
end
@@ -377,4 +333,4 @@ function mustBeIntegerOrEmpty(value)
377
333
if ~isempty(value )
378
334
mustBeInteger(value )
379
335
end
380
- end
336
+ end
0 commit comments