Skip to content

Commit 34a2c4e

Browse files
authored
[Components] lamini - new components (#16822)
1 parent 0d5fa2c commit 34a2c4e

File tree

11 files changed

+717
-11
lines changed

11 files changed

+717
-11
lines changed
Lines changed: 103 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,103 @@
1+
import app from "../../lamini.app.mjs";
2+
import constants from "../../common/constants.mjs";
3+
import utils from "../../common/utils.mjs";
4+
5+
export default {
6+
key: "lamini-create-fine-tune-job",
7+
name: "Create Fine-Tune Job",
8+
description: "Create a fine-tuning job with a dataset. [See the documentation](https://docs.lamini.ai/api/).",
9+
version: "0.0.1",
10+
type: "action",
11+
props: {
12+
app,
13+
modelName: {
14+
description: "Base model to be fine-tuned.",
15+
propDefinition: [
16+
app,
17+
"modelName",
18+
() => ({
19+
includeFineTunedModels: false,
20+
}),
21+
],
22+
},
23+
datasetId: {
24+
type: "string",
25+
label: "Dataset ID",
26+
description: "Previously uploaded dataset to use for training. Please use the **Upload Dataset** action to upload a dataset.",
27+
},
28+
fineTuneArgs: {
29+
type: "object",
30+
label: "Finetune Arguments",
31+
description: "Optional hyperparameters for fine-tuning. Each property is optional:\n- `index_pq_m`: Number of subquantizers for PQ (eg. 8)\n- `index_max_size`: Maximum index size (eg. 65536)\n- `max_steps`: Maximum number of training steps (eg. 60)\n- `batch_size`: Training batch size (eg. 1)\n- `learning_rate`: Learning rate (eg. 0.0003)\n- `index_pq_nbits`: Number of bits per subquantizer (eg. 8)\n- `max_length`: Maximum sequence length (eg. 2048)\n- `index_ivf_nlist`: Number of IVF lists (eg. 2048)\n- `save_steps`: Steps between checkpoints (eg. 60)\n- `args_name`: Name for the argument set (eg. \"demo\")\n- `r_value`: R value for LoRA (eg. 32)\n- `index_hnsw_m`: Number of neighbors in HNSW (eg. 32)\n- `index_method`: Indexing method (eg. \"IndexIVFPQ\")\n- `optim`: Optimizer to use (eg. \"adafactor\")\n- `index_hnsw_efConstruction`: HNSW construction parameter (eg. 16)\n- `index_hnsw_efSearch`: HNSW search parameter (eg. 8)\n- `index_k`: Number of nearest neighbors (eg. 2)\n- `index_ivf_nprobe`: Number of IVF probes (eg. 48)\n- `eval_steps`: Steps between evaluations (eg. 30)\n[See the documentation](https://docs.lamini.ai/tuning/hyperparameters/#finetune_args).",
32+
optional: true,
33+
},
34+
gpuConfig: {
35+
type: "object",
36+
label: "GPU Config",
37+
description: "Optional GPU configuration for fine-tuning. [See the documentation](https://docs.lamini.ai/tuning/hyperparameters/#gpu_config).",
38+
optional: true,
39+
},
40+
isPublic: {
41+
type: "boolean",
42+
label: "Is Public",
43+
description: "Whether this fine-tuning job and dataset should be publicly accessible.",
44+
optional: true,
45+
},
46+
customModelName: {
47+
type: "string",
48+
label: "Custom Model Name",
49+
description: "A human-readable name for the fine-tuned model.",
50+
optional: true,
51+
},
52+
},
53+
methods: {
54+
createFineTuneJob(args = {}) {
55+
return this.app.post({
56+
versionPath: constants.VERSION_PATH.V1,
57+
path: "/train",
58+
...args,
59+
});
60+
},
61+
},
62+
async run({ $ }) {
63+
const {
64+
app,
65+
createFineTuneJob,
66+
modelName,
67+
datasetId,
68+
fineTuneArgs,
69+
gpuConfig,
70+
isPublic,
71+
customModelName,
72+
} = this;
73+
74+
const { upload_base_path: uploadBasePath } =
75+
await app.getUploadBasePath({
76+
$,
77+
});
78+
79+
await app.getExistingDataset({
80+
$,
81+
data: {
82+
dataset_id: datasetId,
83+
upload_base_path: uploadBasePath,
84+
},
85+
});
86+
87+
const response = await createFineTuneJob({
88+
$,
89+
data: {
90+
model_name: modelName,
91+
dataset_id: datasetId,
92+
upload_file_path: `${uploadBasePath}/${datasetId}.jsonlines`,
93+
finetune_args: utils.parseJson(fineTuneArgs),
94+
gpu_config: utils.parseJson(gpuConfig),
95+
is_public: isPublic,
96+
custom_model_name: customModelName,
97+
},
98+
});
99+
100+
$.export("$summary", `Successfully created a fine-tune job with ID \`${response.job_id}\`.`);
101+
return response;
102+
},
103+
};
Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
import app from "../../lamini.app.mjs";
2+
3+
export default {
4+
key: "lamini-evaluate-job",
5+
name: "Evaluate Job",
6+
description: "Evaluate a fine-tuning job by job ID. [See the documentation](https://docs.lamini.ai/api/).",
7+
version: "0.0.1",
8+
type: "action",
9+
props: {
10+
app,
11+
jobId: {
12+
propDefinition: [
13+
app,
14+
"jobId",
15+
],
16+
description: "The ID of the fine-tuning job to evaluate.",
17+
},
18+
},
19+
methods: {
20+
evaluateJob({
21+
jobId, ...args
22+
} = {}) {
23+
return this.app.makeRequest({
24+
path: `/finetune_eval/jobs/${jobId}`,
25+
...args,
26+
});
27+
},
28+
},
29+
async run({ $ }) {
30+
const {
31+
evaluateJob,
32+
jobId,
33+
} = this;
34+
35+
const response = await evaluateJob({
36+
$,
37+
jobId,
38+
});
39+
40+
$.export("$summary", `Successfully evaluated job with ID \`${jobId}\`.`);
41+
return response;
42+
},
43+
};
Lines changed: 75 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,75 @@
1+
import app from "../../lamini.app.mjs";
2+
import utils from "../../common/utils.mjs";
3+
4+
export default {
5+
key: "lamini-generate-completion",
6+
name: "Generate Completion",
7+
description: "Generate completions using a Lamini model. [See the documentation](https://docs.lamini.ai/api/).",
8+
version: "0.0.1",
9+
type: "action",
10+
props: {
11+
app,
12+
modelName: {
13+
propDefinition: [
14+
app,
15+
"modelName",
16+
],
17+
description: "The model to use for completion.",
18+
},
19+
prompt: {
20+
type: "string",
21+
label: "Prompt",
22+
description: "The prompt to send to the model.",
23+
},
24+
outputType: {
25+
propDefinition: [
26+
app,
27+
"outputType",
28+
],
29+
},
30+
maxTokens: {
31+
propDefinition: [
32+
app,
33+
"maxTokens",
34+
],
35+
},
36+
maxNewTokens: {
37+
propDefinition: [
38+
app,
39+
"maxNewTokens",
40+
],
41+
},
42+
},
43+
methods: {
44+
generateCompletion(args = {}) {
45+
return this.app.post({
46+
path: "/completions",
47+
...args,
48+
});
49+
},
50+
},
51+
async run({ $ }) {
52+
const {
53+
generateCompletion,
54+
modelName,
55+
prompt,
56+
outputType,
57+
maxTokens,
58+
maxNewTokens,
59+
} = this;
60+
61+
const response = await generateCompletion({
62+
$,
63+
data: {
64+
model_name: modelName,
65+
prompt,
66+
output_type: utils.parseJson(outputType),
67+
max_tokens: maxTokens,
68+
max_new_tokens: maxNewTokens,
69+
},
70+
});
71+
72+
$.export("$summary", `Successfully generated completion for prompt with model ${modelName}.`);
73+
return response;
74+
},
75+
};
Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
import app from "../../lamini.app.mjs";
2+
import utils from "../../common/utils.mjs";
3+
4+
export default {
5+
key: "lamini-get-batch-completions",
6+
name: "Get Batch Completions",
7+
description: "Retrieve the results of a batch completion request from Lamini. [See the documentation](https://docs.lamini.ai/api/).",
8+
version: "0.0.1",
9+
type: "action",
10+
props: {
11+
app,
12+
modelName: {
13+
propDefinition: [
14+
app,
15+
"modelName",
16+
],
17+
},
18+
prompt: {
19+
type: "string[]",
20+
label: "Prompts",
21+
description: "The prompts to use for completion.",
22+
},
23+
outputType: {
24+
propDefinition: [
25+
app,
26+
"outputType",
27+
],
28+
},
29+
maxTokens: {
30+
propDefinition: [
31+
app,
32+
"maxTokens",
33+
],
34+
},
35+
maxNewTokens: {
36+
propDefinition: [
37+
app,
38+
"maxNewTokens",
39+
],
40+
},
41+
},
42+
methods: {
43+
submitBatchCompletions(args = {}) {
44+
return this.app.post({
45+
path: "/batch_completions",
46+
...args,
47+
});
48+
},
49+
getBatchCompletions({
50+
id, ...args
51+
} = {}) {
52+
return this.app.makeRequest({
53+
path: `/batch_completions/${id}/result`,
54+
...args,
55+
});
56+
},
57+
},
58+
async run({ $ }) {
59+
const {
60+
submitBatchCompletions,
61+
getBatchCompletions,
62+
modelName,
63+
prompt,
64+
outputType,
65+
maxTokens,
66+
maxNewTokens,
67+
} = this;
68+
69+
const { id } = await submitBatchCompletions({
70+
$,
71+
data: {
72+
model_name: modelName,
73+
prompt,
74+
output_type: utils.parseJson(outputType),
75+
max_tokens: maxTokens,
76+
max_new_tokens: maxNewTokens,
77+
},
78+
});
79+
80+
const response = await getBatchCompletions({
81+
$,
82+
id,
83+
});
84+
85+
$.export("$summary", `Successfully submitted batch completion with ID \`${id}\`.`);
86+
return response;
87+
},
88+
};

0 commit comments

Comments
 (0)