Skip to content

Commit 405e22a

Browse files
authored
Fix hfjs chatCompletionStream snippet (#1382)
there's no `await` here ~~I don't remember if i need to run a script to check CI~~ EDIT: nvm found it
1 parent f37b0aa commit 405e22a

File tree

5 files changed

+10
-10
lines changed

5 files changed

+10
-10
lines changed

packages/inference/src/snippets/templates/js/huggingface.js/conversationalStream.jinja

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ const client = new InferenceClient("{{ accessToken }}");
44

55
let out = "";
66

7-
const stream = await client.chatCompletionStream({
7+
const stream = client.chatCompletionStream({
88
provider: "{{ provider }}",
99
model: "{{ model.id }}",
1010
{{ inputs.asTsString }}
@@ -17,5 +17,5 @@ for await (const chunk of stream) {
1717
const newContent = chunk.choices[0].delta.content;
1818
out += newContent;
1919
console.log(newContent);
20-
}
20+
}
2121
}

packages/tasks-gen/snippets-fixtures/conversational-llm-stream/js/huggingface.js/0.hf-inference.js

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ const client = new InferenceClient("api_token");
44

55
let out = "";
66

7-
const stream = await client.chatCompletionStream({
7+
const stream = client.chatCompletionStream({
88
provider: "hf-inference",
99
model: "meta-llama/Llama-3.1-8B-Instruct",
1010
messages: [
@@ -21,5 +21,5 @@ for await (const chunk of stream) {
2121
const newContent = chunk.choices[0].delta.content;
2222
out += newContent;
2323
console.log(newContent);
24-
}
24+
}
2525
}

packages/tasks-gen/snippets-fixtures/conversational-llm-stream/js/huggingface.js/0.together.js

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ const client = new InferenceClient("api_token");
44

55
let out = "";
66

7-
const stream = await client.chatCompletionStream({
7+
const stream = client.chatCompletionStream({
88
provider: "together",
99
model: "meta-llama/Llama-3.1-8B-Instruct",
1010
messages: [
@@ -21,5 +21,5 @@ for await (const chunk of stream) {
2121
const newContent = chunk.choices[0].delta.content;
2222
out += newContent;
2323
console.log(newContent);
24-
}
24+
}
2525
}

packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/js/huggingface.js/0.fireworks-ai.js

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ const client = new InferenceClient("api_token");
44

55
let out = "";
66

7-
const stream = await client.chatCompletionStream({
7+
const stream = client.chatCompletionStream({
88
provider: "fireworks-ai",
99
model: "meta-llama/Llama-3.2-11B-Vision-Instruct",
1010
messages: [
@@ -32,5 +32,5 @@ for await (const chunk of stream) {
3232
const newContent = chunk.choices[0].delta.content;
3333
out += newContent;
3434
console.log(newContent);
35-
}
35+
}
3636
}

packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/js/huggingface.js/0.hf-inference.js

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ const client = new InferenceClient("api_token");
44

55
let out = "";
66

7-
const stream = await client.chatCompletionStream({
7+
const stream = client.chatCompletionStream({
88
provider: "hf-inference",
99
model: "meta-llama/Llama-3.2-11B-Vision-Instruct",
1010
messages: [
@@ -32,5 +32,5 @@ for await (const chunk of stream) {
3232
const newContent = chunk.choices[0].delta.content;
3333
out += newContent;
3434
console.log(newContent);
35-
}
35+
}
3636
}

0 commit comments

Comments
 (0)