Skip to content

Commit d05b840

Browse files
authored
Move inference snippets logic to inference package (#1247)
_Original slack message from @Wauplin ([private link](https://huggingface.slack.com/archives/C04PJ0H35UM/p1740673808170509)):_ > I have an annoying problem I don't know how to handle in `@hugingface.js`. I'm working on updating the inference snippets under `@tasks` . Now that we have a `makeUrl` helper for each provider (see [cohere example](https://github.com/huggingface/huggingface.js/blob/b5230f9cbe71348014f39fd3a8149979cc10c196/packages/inference/src/providers/cohere.ts#L34)), I want to use it in the CURL snippets to display to correct URL (for now it's hardcoded [here](https://github.com/huggingface/huggingface.js/blob/b5230f9cbe71348014f39fd3a8149979cc10c196/packages/tasks/src/snippets/curl.ts#L46) which is incorrect for some providers). To do that, I need to make `@tasks` depend on `@inference` . But `@inference` is already depending on `@tasks` especially to type inference inputs/outputs. Is this something npm/pnpm is able to settle (in Python it's not ^^). > > The other solution I'm thinking about is to start having a `@snippets` package depending on both `@tasks` and `@inference` but I'd like to avoid that as much as possible 🙈 Any idea? => after some discussions we went for "let's move the `snippets` code to `inference`" which this PR does. @julien-c @coyotte508 ⚠️ **This is a breaking change for @huggingface.js/tasks.** Will require to make a major release? ---- In practice I had to move only parts of the `./snippets` folder: - the type definitions + utils remains in `@tasks` since it is used to generate some local app snippets - only the js/python/curl -specific parts are moved to `@inference` => i.e. only the logic to generate the snippets, not the helpers
1 parent 412cd82 commit d05b840

File tree

10 files changed

+60
-34
lines changed

10 files changed

+60
-34
lines changed

e2e/ts/tsconfig.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"compilerOptions": {
3-
"target": "ES2015",
3+
"target": "ES2022",
44
"module": "commonjs",
55
"strict": true,
66
"esModuleInterop": true,

packages/inference/src/index.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,3 +2,6 @@ export { HfInference, HfInferenceEndpoint } from "./HfInference";
22
export { InferenceOutputError } from "./lib/InferenceOutputError";
33
export * from "./types";
44
export * from "./tasks";
5+
6+
import * as snippets from "./snippets/index.js";
7+
export { snippets };
Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,13 @@
1-
import { HF_HUB_INFERENCE_PROXY_TEMPLATE, type SnippetInferenceProvider } from "../inference-providers.js";
2-
import type { PipelineType } from "../pipelines.js";
3-
import type { ChatCompletionInputMessage, GenerationParameters } from "../tasks/index.js";
4-
import { stringifyGenerationConfig, stringifyMessages } from "./common.js";
5-
import { getModelInputSnippet } from "./inputs.js";
6-
import type { InferenceSnippet, ModelDataMinimal } from "./types.js";
1+
import { HF_HUB_INFERENCE_PROXY_TEMPLATE, type SnippetInferenceProvider } from "@huggingface/tasks";
2+
import type { PipelineType } from "@huggingface/tasks/src/pipelines.js";
3+
import type { ChatCompletionInputMessage, GenerationParameters } from "@huggingface/tasks/src/tasks/index.js";
4+
import {
5+
type InferenceSnippet,
6+
type ModelDataMinimal,
7+
getModelInputSnippet,
8+
stringifyGenerationConfig,
9+
stringifyMessages,
10+
} from "@huggingface/tasks";
711

812
export const snippetBasic = (
913
model: ModelDataMinimal,
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
import * as curl from "./curl.js";
2+
import * as python from "./python.js";
3+
import * as js from "./js.js";
4+
5+
export { curl, python, js };
Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,13 @@
1-
import { openAIbaseUrl, type SnippetInferenceProvider } from "../inference-providers.js";
2-
import type { PipelineType, WidgetType } from "../pipelines.js";
3-
import type { ChatCompletionInputMessage, GenerationParameters } from "../tasks/index.js";
4-
import { stringifyGenerationConfig, stringifyMessages } from "./common.js";
5-
import { getModelInputSnippet } from "./inputs.js";
6-
import type { InferenceSnippet, ModelDataMinimal } from "./types.js";
1+
import { openAIbaseUrl, type SnippetInferenceProvider } from "@huggingface/tasks";
2+
import type { PipelineType, WidgetType } from "@huggingface/tasks/src/pipelines.js";
3+
import type { ChatCompletionInputMessage, GenerationParameters } from "@huggingface/tasks/src/tasks/index.js";
4+
import {
5+
type InferenceSnippet,
6+
type ModelDataMinimal,
7+
getModelInputSnippet,
8+
stringifyGenerationConfig,
9+
stringifyMessages,
10+
} from "@huggingface/tasks";
711

812
const HFJS_METHODS: Partial<Record<WidgetType, string>> = {
913
"text-classification": "textClassification",
Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,13 @@
1-
import { openAIbaseUrl, type SnippetInferenceProvider } from "../inference-providers.js";
2-
import type { PipelineType, WidgetType } from "../pipelines.js";
3-
import type { ChatCompletionInputMessage, GenerationParameters } from "../tasks/index.js";
4-
import { stringifyGenerationConfig, stringifyMessages } from "./common.js";
5-
import { getModelInputSnippet } from "./inputs.js";
6-
import type { InferenceSnippet, ModelDataMinimal } from "./types.js";
1+
import { openAIbaseUrl, type SnippetInferenceProvider } from "@huggingface/tasks";
2+
import type { PipelineType, WidgetType } from "@huggingface/tasks/src/pipelines.js";
3+
import type { ChatCompletionInputMessage, GenerationParameters } from "@huggingface/tasks/src/tasks/index.js";
4+
import {
5+
type InferenceSnippet,
6+
type ModelDataMinimal,
7+
getModelInputSnippet,
8+
stringifyGenerationConfig,
9+
stringifyMessages,
10+
} from "@huggingface/tasks";
711

812
const HFH_INFERENCE_CLIENT_METHODS: Partial<Record<WidgetType, string>> = {
913
"audio-classification": "audio_classification",

packages/inference/test/vcr.ts

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -191,9 +191,15 @@ async function vcr(
191191
statusText: response.statusText,
192192
headers: Object.fromEntries(
193193
// Remove varying headers as much as possible
194-
[...response.headers.entries()].filter(
195-
([key]) => key !== "date" && key !== "content-length" && !key.startsWith("x-") && key !== "via"
196-
)
194+
(() => {
195+
const entries: [string, string][] = [];
196+
response.headers.forEach((value, key) => {
197+
if (key !== "date" && key !== "content-length" && !key.startsWith("x-") && key !== "via") {
198+
entries.push([key, value]);
199+
}
200+
});
201+
return entries;
202+
})()
197203
),
198204
},
199205
};

packages/tasks-gen/scripts/generate-snippets-fixtures.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,14 +19,14 @@ import { existsSync as pathExists } from "node:fs";
1919
import * as fs from "node:fs/promises";
2020
import * as path from "node:path/posix";
2121

22-
import type { SnippetInferenceProvider, InferenceSnippet } from "@huggingface/tasks";
23-
import { snippets } from "@huggingface/tasks";
22+
import { snippets } from "@huggingface/inference";
23+
import type { SnippetInferenceProvider, InferenceSnippet, ModelDataMinimal } from "@huggingface/tasks";
2424

2525
type LANGUAGE = "sh" | "js" | "py";
2626

2727
const TEST_CASES: {
2828
testName: string;
29-
model: snippets.ModelDataMinimal;
29+
model: ModelDataMinimal;
3030
languages: LANGUAGE[];
3131
providers: SnippetInferenceProvider[];
3232
opts?: Record<string, unknown>;

packages/tasks/src/index.ts

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -45,11 +45,15 @@ export type {
4545
} from "./widget-example.js";
4646
export { SPECIAL_TOKENS_ATTRIBUTES } from "./tokenizer-data.js";
4747

48-
import * as snippets from "./snippets/index.js";
4948
export * from "./gguf.js";
5049

51-
export { snippets };
52-
export type { InferenceSnippet } from "./snippets/index.js";
50+
export {
51+
type InferenceSnippet,
52+
type ModelDataMinimal,
53+
stringifyGenerationConfig,
54+
stringifyMessages,
55+
getModelInputSnippet,
56+
} from "./snippets/index.js";
5357

5458
export { SKUS, DEFAULT_MEMORY_OPTIONS } from "./hardware.js";
5559
export type { HardwareSpec, SkuType } from "./hardware.js";
Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,3 @@
1-
import * as inputs from "./inputs.js";
2-
import * as curl from "./curl.js";
3-
import * as python from "./python.js";
4-
import * as js from "./js.js";
1+
export * from "./common.js";
2+
export * from "./inputs.js";
53
export * from "./types.js";
6-
7-
export { inputs, curl, python, js };

0 commit comments

Comments
 (0)