@@ -181,9 +181,9 @@ type ChatCompletion struct {
181181 // utilize scale tier credits until they are exhausted.
182182 // - If set to 'auto', and the Project is not Scale tier enabled, the request will
183183 // be processed using the default service tier with a lower uptime SLA and no
184- // latency guarentee .
184+ // latency guarantee .
185185 // - If set to 'default', the request will be processed using the default service
186- // tier with a lower uptime SLA and no latency guarentee .
186+ // tier with a lower uptime SLA and no latency guarantee .
187187 // - If set to 'flex', the request will be processed with the Flex Processing
188188 // service tier.
189189 // [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -283,9 +283,9 @@ func (r *ChatCompletionChoiceLogprobs) UnmarshalJSON(data []byte) error {
283283// utilize scale tier credits until they are exhausted.
284284// - If set to 'auto', and the Project is not Scale tier enabled, the request will
285285// be processed using the default service tier with a lower uptime SLA and no
286- // latency guarentee .
286+ // latency guarantee .
287287// - If set to 'default', the request will be processed using the default service
288- // tier with a lower uptime SLA and no latency guarentee .
288+ // tier with a lower uptime SLA and no latency guarantee .
289289// - If set to 'flex', the request will be processed with the Flex Processing
290290// service tier.
291291// [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -576,9 +576,9 @@ type ChatCompletionChunk struct {
576576 // utilize scale tier credits until they are exhausted.
577577 // - If set to 'auto', and the Project is not Scale tier enabled, the request will
578578 // be processed using the default service tier with a lower uptime SLA and no
579- // latency guarentee .
579+ // latency guarantee .
580580 // - If set to 'default', the request will be processed using the default service
581- // tier with a lower uptime SLA and no latency guarentee .
581+ // tier with a lower uptime SLA and no latency guarantee .
582582 // - If set to 'flex', the request will be processed with the Flex Processing
583583 // service tier.
584584 // [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -793,9 +793,9 @@ func (r *ChatCompletionChunkChoiceLogprobs) UnmarshalJSON(data []byte) error {
793793// utilize scale tier credits until they are exhausted.
794794// - If set to 'auto', and the Project is not Scale tier enabled, the request will
795795// be processed using the default service tier with a lower uptime SLA and no
796- // latency guarentee .
796+ // latency guarantee .
797797// - If set to 'default', the request will be processed using the default service
798- // tier with a lower uptime SLA and no latency guarentee .
798+ // tier with a lower uptime SLA and no latency guarantee .
799799// - If set to 'flex', the request will be processed with the Flex Processing
800800// service tier.
801801// [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -2209,9 +2209,9 @@ type ChatCompletionNewParams struct {
22092209 // utilize scale tier credits until they are exhausted.
22102210 // - If set to 'auto', and the Project is not Scale tier enabled, the request will
22112211 // be processed using the default service tier with a lower uptime SLA and no
2212- // latency guarentee .
2212+ // latency guarantee .
22132213 // - If set to 'default', the request will be processed using the default service
2214- // tier with a lower uptime SLA and no latency guarentee .
2214+ // tier with a lower uptime SLA and no latency guarantee .
22152215 // - If set to 'flex', the request will be processed with the Flex Processing
22162216 // service tier.
22172217 // [Learn more](https://platform.openai.com/docs/guides/flex-processing).
@@ -2411,9 +2411,9 @@ func (u ChatCompletionNewParamsResponseFormatUnion) GetType() *string {
24112411// utilize scale tier credits until they are exhausted.
24122412// - If set to 'auto', and the Project is not Scale tier enabled, the request will
24132413// be processed using the default service tier with a lower uptime SLA and no
2414- // latency guarentee .
2414+ // latency guarantee .
24152415// - If set to 'default', the request will be processed using the default service
2416- // tier with a lower uptime SLA and no latency guarentee .
2416+ // tier with a lower uptime SLA and no latency guarantee .
24172417// - If set to 'flex', the request will be processed with the Flex Processing
24182418// service tier.
24192419// [Learn more](https://platform.openai.com/docs/guides/flex-processing).
0 commit comments