@@ -351,6 +351,7 @@ struct ggml_backend_opencl_context {
351
351
cl_program program_gemv_noshuffle_general;
352
352
cl_program program_gemv_noshuffle;
353
353
cl_program program_get_rows;
354
+ cl_program program_glu;
354
355
cl_program program_im2col_f16;
355
356
cl_program program_im2col_f32;
356
357
cl_program program_mul_mat_Ab_Bi_8x4;
@@ -401,6 +402,8 @@ struct ggml_backend_opencl_context {
401
402
cl_kernel kernel_relu;
402
403
cl_kernel kernel_sigmoid_f32, kernel_sigmoid_f16;
403
404
cl_kernel kernel_clamp;
405
+ cl_kernel kernel_geglu, kernel_reglu, kernel_swiglu,
406
+ kernel_geglu_f16, kernel_reglu_f16, kernel_swiglu_f16;
404
407
cl_kernel kernel_norm;
405
408
cl_kernel kernel_rms_norm;
406
409
cl_kernel kernel_group_norm;
@@ -738,6 +741,27 @@ static void load_cl_kernels(ggml_backend_opencl_context *backend_ctx, ggml_cl_ve
738
741
GGML_LOG_CONT (" ." );
739
742
}
740
743
744
+ // glu
745
+ {
746
+ #ifdef GGML_OPENCL_EMBED_KERNELS
747
+ const std::string kernel_src {
748
+ #include " glu.cl.h"
749
+ };
750
+ #else
751
+ const std::string kernel_src = read_file (" glu.cl" );
752
+ #endif
753
+ backend_ctx->program_glu =
754
+ build_program_from_source (backend_ctx->context , backend_ctx->device , kernel_src.c_str (), compile_opts);
755
+
756
+ CL_CHECK ((backend_ctx->kernel_geglu = clCreateKernel (backend_ctx->program_glu , " kernel_geglu" , &err), err));
757
+ CL_CHECK ((backend_ctx->kernel_reglu = clCreateKernel (backend_ctx->program_glu , " kernel_reglu" , &err), err));
758
+ CL_CHECK ((backend_ctx->kernel_swiglu = clCreateKernel (backend_ctx->program_glu , " kernel_swiglu" , &err), err));
759
+ CL_CHECK ((backend_ctx->kernel_geglu_f16 = clCreateKernel (backend_ctx->program_glu , " kernel_geglu_f16" , &err), err));
760
+ CL_CHECK ((backend_ctx->kernel_reglu_f16 = clCreateKernel (backend_ctx->program_glu , " kernel_reglu_f16" , &err), err));
761
+ CL_CHECK ((backend_ctx->kernel_swiglu_f16 = clCreateKernel (backend_ctx->program_glu , " kernel_swiglu_f16" , &err), err));
762
+ GGML_LOG_CONT (" ." );
763
+ }
764
+
741
765
// get_rows
742
766
{
743
767
#ifdef GGML_OPENCL_EMBED_KERNELS
@@ -2242,6 +2266,15 @@ static bool ggml_opencl_supports_op(ggml_backend_dev_t dev, const struct ggml_te
2242
2266
default :
2243
2267
return false ;
2244
2268
}
2269
+ case GGML_OP_GLU:
2270
+ switch (ggml_get_glu_op (op)) {
2271
+ case GGML_GLU_OP_GEGLU:
2272
+ case GGML_GLU_OP_REGLU:
2273
+ case GGML_GLU_OP_SWIGLU:
2274
+ return ggml_is_contiguous_1 (op->src [0 ]) && (op->type == GGML_TYPE_F32 || op->type == GGML_TYPE_F16);
2275
+ default :
2276
+ return false ;
2277
+ }
2245
2278
case GGML_OP_CLAMP:
2246
2279
return op->src [0 ]->type == GGML_TYPE_F32;
2247
2280
case GGML_OP_SOFT_MAX:
@@ -6143,6 +6176,91 @@ static void ggml_cl_sum_rows(ggml_backend_t backend, const ggml_tensor * src0, c
6143
6176
backend_ctx->enqueue_ndrange_kernel (kernel, 3 , global_work_size, local_work_size, dst);
6144
6177
}
6145
6178
6179
+ static void ggml_cl_glu (ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
6180
+ GGML_ASSERT (src0);
6181
+ GGML_ASSERT (src0->extra );
6182
+ GGML_ASSERT (dst);
6183
+ GGML_ASSERT (dst->extra );
6184
+
6185
+ GGML_ASSERT (ggml_is_contiguous_1 (src0));
6186
+
6187
+ if (src1) {
6188
+ GGML_ASSERT (src1);
6189
+ GGML_ASSERT (src1->extra );
6190
+ GGML_ASSERT (ggml_are_same_shape (src0, src1));
6191
+ }
6192
+
6193
+ ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context ;
6194
+
6195
+ cl_kernel kernel;
6196
+ switch (ggml_get_glu_op (dst)) {
6197
+ case GGML_GLU_OP_GEGLU:
6198
+ if (dst->type == GGML_TYPE_F32) {
6199
+ kernel = backend_ctx->kernel_geglu ;
6200
+ } else {
6201
+ kernel = backend_ctx->kernel_geglu_f16 ;
6202
+ }
6203
+ break ;
6204
+ case GGML_GLU_OP_REGLU:
6205
+ if (dst->type == GGML_TYPE_F32) {
6206
+ kernel = backend_ctx->kernel_reglu ;
6207
+ } else {
6208
+ kernel = backend_ctx->kernel_reglu_f16 ;
6209
+ }
6210
+ break ;
6211
+ case GGML_GLU_OP_SWIGLU:
6212
+ if (dst->type == GGML_TYPE_F32) {
6213
+ kernel = backend_ctx->kernel_swiglu ;
6214
+ } else {
6215
+ kernel = backend_ctx->kernel_swiglu_f16 ;
6216
+ }
6217
+ break ;
6218
+ default :
6219
+ GGML_ABORT (" Unsupported glu op" );
6220
+ }
6221
+
6222
+ ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra ;
6223
+ ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra ;
6224
+
6225
+ ggml_tensor_extra_cl * extra1 = src1 ? (ggml_tensor_extra_cl *)src1->extra : nullptr ;
6226
+
6227
+ cl_ulong offset0 = extra0->offset + src0->view_offs ;
6228
+ cl_ulong offsetd = extrad->offset + dst->view_offs ;
6229
+
6230
+ cl_ulong offset1 = extra1 ? extra1->offset + src1->view_offs : offset0;
6231
+
6232
+ const int ne0 = dst->ne [0 ];
6233
+
6234
+ const cl_ulong nb01 = src0->nb [1 ];
6235
+ const cl_ulong nb11 = src1 ? src1->nb [1 ] : nb01;
6236
+
6237
+ const cl_ulong nb1 = dst->nb [1 ];
6238
+
6239
+ const int swp = ((const int32_t *) dst->op_params )[1 ];
6240
+ const int ne00_off = src1 ? 0 : (swp ? ne0 : 0 );
6241
+ const int ne10_off = src1 ? 0 : (swp ? 0 : ne0);
6242
+
6243
+ CL_CHECK (clSetKernelArg (kernel, 0 , sizeof (cl_mem), &extra0->data_device ));
6244
+ CL_CHECK (clSetKernelArg (kernel, 1 , sizeof (cl_ulong), &offset0));
6245
+ CL_CHECK (clSetKernelArg (kernel, 2 , sizeof (cl_mem), src1 ? &extra1->data_device : &extra0->data_device ));
6246
+ CL_CHECK (clSetKernelArg (kernel, 3 , sizeof (cl_ulong), &offset1));
6247
+ CL_CHECK (clSetKernelArg (kernel, 4 , sizeof (cl_mem), &extrad->data_device ));
6248
+ CL_CHECK (clSetKernelArg (kernel, 5 , sizeof (cl_ulong), &offsetd));
6249
+ CL_CHECK (clSetKernelArg (kernel, 6 , sizeof (cl_ulong), &nb01));
6250
+ CL_CHECK (clSetKernelArg (kernel, 7 , sizeof (cl_ulong), &nb11));
6251
+ CL_CHECK (clSetKernelArg (kernel, 8 , sizeof (int ), &ne0));
6252
+ CL_CHECK (clSetKernelArg (kernel, 9 , sizeof (cl_ulong), &nb1));
6253
+ CL_CHECK (clSetKernelArg (kernel, 10 , sizeof (int ), &ne00_off));
6254
+ CL_CHECK (clSetKernelArg (kernel, 11 , sizeof (int ), &ne10_off));
6255
+
6256
+ const size_t nrows = ggml_nrows (src0);
6257
+ size_t nth = 512 ;
6258
+ size_t global_work_size[] = {nrows*nth, 1 , 1 };
6259
+ size_t local_work_size[] = {nth, 1 , 1 };
6260
+
6261
+ backend_ctx->enqueue_ndrange_kernel (kernel, 3 , global_work_size, local_work_size, dst);
6262
+ }
6263
+
6146
6264
// ------------------------------------------------------------------------------
6147
6265
// Op offloading
6148
6266
// ------------------------------------------------------------------------------
@@ -6244,6 +6362,12 @@ bool ggml_cl_compute_forward(ggml_backend_t backend, struct ggml_tensor * tensor
6244
6362
default :
6245
6363
return false ;
6246
6364
} break ;
6365
+ case GGML_OP_GLU:
6366
+ if (!any_on_device) {
6367
+ return false ;
6368
+ }
6369
+ func = ggml_cl_glu;
6370
+ break ;
6247
6371
case GGML_OP_CLAMP:
6248
6372
if (!any_on_device) {
6249
6373
return false ;
0 commit comments