Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 27 additions & 0 deletions backends/cadence/aot/quantizer/quantizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -372,3 +372,30 @@ def __init__(self, quantizers: Optional[list[Quantizer]] = None) -> None:
# Add 16-bit quantizers for LinearPattern
quantizers.append(CadenceAtenQuantizer(LinearPattern(), qconfig_A16))
super().__init__(quantizers)


class CadenceWith16BitConvActivationsQuantizer(CadenceQuantizer):
"""
Quantizer including A16 conv
"""

def __init__(self, quantizers: Optional[list[Quantizer]] = None) -> None:
if quantizers is None:
quantizers = []
# Add 16-bit quantizers for Conv patterns
quantizers.append(CadenceAtenQuantizer(Conv1dPattern(), qconfig_A16))
quantizers.append(CadenceAtenQuantizer(Conv2dPattern(), qconfig_A16))
super().__init__(quantizers)


class CadenceWith16BitMatmulActivationsQuantizer(CadenceQuantizer):
"""
Quantizer including A16 matmul
"""

def __init__(self, quantizers: Optional[list[Quantizer]] = None) -> None:
if quantizers is None:
quantizers = []
# Add 16-bit quantizers for MatmulPattern
quantizers.append(CadenceAtenQuantizer(MatmulPattern(), qconfig_A16))
super().__init__(quantizers)
49 changes: 49 additions & 0 deletions backends/cadence/hifi/operators/op_quantized_conv2d_nchw_out.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include <executorch/backends/cadence/hifi/kernels/kernels.h>
#include <executorch/backends/cadence/hifi/operators/operators.h>
#include <executorch/runtime/kernel/kernel_includes.h>
#include <on_device_ai/Assistant/Jarvis/min_runtime/operators/generic/op_quantized_conv2d.h>

#define ALIGN_PTR(x, bytes) ((((unsigned)(x)) + (bytes - 1)) & (~(bytes - 1)))

Expand Down Expand Up @@ -532,6 +533,30 @@ void quantized_conv2d_nchw_out(
__ET_UNUSED const Tensor& out_multiplier,
__ET_UNUSED const Tensor& out_shift,
Tensor& out) {
// Handle W8A16 heterogeneous type (int16_t activations, int8_t weights)
if (out.scalar_type() == ::executorch::aten::ScalarType::Short &&
input.scalar_type() == ::executorch::aten::ScalarType::Short &&
weight.scalar_type() == ::executorch::aten::ScalarType::Char) {
::impl::generic::native::quantized_conv2d_nchw_out(
ctx,
input,
weight,
bias,
stride,
padding,
dilation,
groups,
in_zero_point,
weight_zero_point,
bias_scale,
output_scale,
output_zero_point,
out_multiplier,
out_shift,
out);
return;
}

const float bias_scale_float = bias_scale.const_data_ptr<float>()[0];
const int32_t weight_zero_point_int =
weight_zero_point.const_data_ptr<int32_t>()[0];
Expand Down Expand Up @@ -596,6 +621,30 @@ void quantized_conv2d_nchw_per_tensor_out(
__ET_UNUSED int64_t out_multiplier,
__ET_UNUSED int64_t out_shift,
Tensor& out) {
// Handle W8A16 heterogeneous type (int16_t activations, int8_t weights)
if (out.scalar_type() == ::executorch::aten::ScalarType::Short &&
input.scalar_type() == ::executorch::aten::ScalarType::Short &&
weight.scalar_type() == ::executorch::aten::ScalarType::Char) {
::impl::generic::native::quantized_conv2d_nchw_per_tensor_out(
ctx,
input,
weight,
bias,
stride,
padding,
dilation,
groups,
in_zero_point,
weight_zero_point,
bias_scale,
output_scale,
output_zero_point,
out_multiplier,
out_shift,
out);
return;
}

bool optimized = 0;

if ((input.scalar_type() == ScalarType::Char) ||
Expand Down
49 changes: 48 additions & 1 deletion backends/cadence/hifi/operators/op_quantized_conv2d_nhwc_out.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include <executorch/backends/cadence/hifi/kernels/kernels.h>
#include <executorch/backends/cadence/hifi/operators/operators.h>
#include <executorch/runtime/kernel/kernel_includes.h>
#include <on_device_ai/Assistant/Jarvis/min_runtime/operators/generic/op_quantized_conv2d.h>

#define ALIGN_PTR(x, bytes) ((((unsigned)(x)) + (bytes - 1)) & (~(bytes - 1)))

Expand Down Expand Up @@ -438,6 +439,29 @@ void quantized_conv2d_nhwc_out(
__ET_UNUSED const Tensor& out_multiplier,
__ET_UNUSED const Tensor& out_shift,
Tensor& out) {
// Handle W8A16 heterogeneous type (int16_t activations, int8_t weights)
if (out.scalar_type() == ::executorch::aten::ScalarType::Short &&
input.scalar_type() == ::executorch::aten::ScalarType::Short &&
weight.scalar_type() == ::executorch::aten::ScalarType::Char) {
::impl::generic::native::quantized_conv2d_nhwc_out(
ctx,
input,
weight,
bias,
stride,
padding,
dilation,
groups,
in_zero_point,
weight_zero_point,
bias_scale,
output_scale,
output_zero_point,
out_multiplier,
out_shift,
out);
return;
}
const float bias_scale_float = bias_scale.const_data_ptr<float>()[0];
const int32_t weight_zero_point_int =
weight_zero_point.const_data_ptr<int32_t>()[0];
Expand Down Expand Up @@ -502,8 +526,31 @@ void quantized_conv2d_nhwc_per_tensor_out(
__ET_UNUSED int64_t out_multiplier,
__ET_UNUSED int64_t out_shift,
Tensor& out) {
bool optimized = 0;
// Handle W8A16 heterogeneous type (int16_t activations, int8_t weights)
if (out.scalar_type() == ::executorch::aten::ScalarType::Short &&
input.scalar_type() == ::executorch::aten::ScalarType::Short &&
weight.scalar_type() == ::executorch::aten::ScalarType::Char) {
::impl::generic::native::quantized_conv2d_nhwc_per_tensor_out(
ctx,
input,
weight,
bias,
stride,
padding,
dilation,
groups,
in_zero_point,
weight_zero_point,
bias_scale,
output_scale,
output_zero_point,
out_multiplier,
out_shift,
out);
return;
}

bool optimized = 0;
Copy link

Copilot AI Nov 28, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Variable declaration for 'optimized' appears after the early return, but it was previously at the beginning of the function. This creates inconsistent code structure and the variable is now unreachable when the W8A16 path is taken. Consider moving this declaration back before the W8A16 conditional check for consistency with the original code structure.

Copilot uses AI. Check for mistakes.
if ((input.scalar_type() == ScalarType::Char) ||
(input.scalar_type() == ScalarType::Byte))
optimized = 1;
Expand Down
38 changes: 36 additions & 2 deletions backends/cadence/hifi/operators/op_quantized_linear_out.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include <executorch/backends/cadence/hifi/operators/operators.h>
#include <executorch/runtime/kernel/kernel_includes.h>
#include <xa_nnlib_kernels_api.h>
#include <on_device_ai/Assistant/Jarvis/min_runtime/operators/generic/op_quantized_linear.h>
#include <xtensa/tie/xt_datacache.h>
#include <algorithm>
#include <cmath>
Expand Down Expand Up @@ -218,7 +219,24 @@ void quantized_linear_out(
int64_t out_zero_point,
__ET_UNUSED const optional<Tensor>& offset,
Tensor& out) {
if (out.scalar_type() == executorch::aten::ScalarType::Byte) {

if (out.scalar_type() == ::executorch::aten::ScalarType::Short &&
in.scalar_type() == ::executorch::aten::ScalarType::Short &&
weight.scalar_type() == ::executorch::aten::ScalarType::Char) {
::impl::generic::native::quantized_linear_out(
ctx,
in,
weight,
bias,
in_zero_point,
weight_zero_point,
out_multiplier,
out_shift,
out_zero_point,
offset,
out);
}
else if (out.scalar_type() == executorch::aten::ScalarType::Byte) {
Comment on lines +238 to +239
Copy link

Copilot AI Nov 28, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Missing space after closing brace on line 238. The 'else' should be on the same line as the closing brace of the preceding if block, or there should be consistent formatting with other similar conditionals in the file.

Suggested change
}
else if (out.scalar_type() == executorch::aten::ScalarType::Byte) {
} else if (out.scalar_type() == executorch::aten::ScalarType::Byte) {

Copilot uses AI. Check for mistakes.
_quantized_linear_asym8u(
in,
weight,
Expand Down Expand Up @@ -260,7 +278,23 @@ void quantized_linear_per_tensor_out(
int64_t out_zero_point,
__ET_UNUSED const optional<Tensor>& offset,
Tensor& out) {
if (out.scalar_type() == executorch::aten::ScalarType::Byte) {
if (out.scalar_type() == ::executorch::aten::ScalarType::Short &&
in.scalar_type() == ::executorch::aten::ScalarType::Short &&
weight.scalar_type() == ::executorch::aten::ScalarType::Char) {
::impl::generic::native::quantized_linear_per_tensor_out(
ctx,
in,
weight,
bias,
in_zero_point,
weight_zero_point,
out_multiplier,
out_shift,
out_zero_point,
offset,
out);
}
else if (out.scalar_type() == executorch::aten::ScalarType::Byte) {
Comment on lines +296 to +297
Copy link

Copilot AI Nov 28, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Missing space after closing brace on line 296. The 'else' should be on the same line as the closing brace of the preceding if block, or there should be consistent formatting with other similar conditionals in the file.

Suggested change
}
else if (out.scalar_type() == executorch::aten::ScalarType::Byte) {
} else if (out.scalar_type() == executorch::aten::ScalarType::Byte) {

Copilot uses AI. Check for mistakes.
_quantized_linear_per_tensor_asym8u(
in,
weight,
Expand Down
17 changes: 15 additions & 2 deletions backends/cadence/hifi/operators/op_quantized_matmul_out.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

#include <executorch/backends/cadence/hifi/kernels/kernels.h>
#include <executorch/runtime/kernel/kernel_includes.h>
#include <on_device_ai/Assistant/Jarvis/min_runtime/operators/generic/op_quantized_matmul.h>
#include <stdlib.h>

using executorch::aten::ScalarType;
Expand Down Expand Up @@ -192,8 +193,20 @@ void quantized_matmul_out(
size_t leading_dim = X.size(X.dim() - 2);
size_t out_dim = Y.size(Y.dim() - 1 - transposed);
size_t in_dim = X.size(X.dim() - 1);

if (out.scalar_type() == exec_aten::ScalarType::Byte) {
if (out.scalar_type() == exec_aten::ScalarType::Short) {
::impl::generic::native::quantized_matmul_out(
ctx,
X,
X_zero_point,
Y,
Y_zero_point,
bias,
out_multiplier,
out_shift,
out_zero_point,
transposed,
out);
} else if (out.scalar_type() == exec_aten::ScalarType::Byte) {
_typed_quantized_matmul<uint8_t>(
ctx,
X,
Expand Down
33 changes: 33 additions & 0 deletions backends/cadence/hifi/operators/op_quantized_matmul_out.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/

#pragma once

#include "executorch/runtime/core/exec_aten/exec_aten.h"
#include "executorch/runtime/kernel/kernel_runtime_context.h"

namespace impl {
namespace HiFi {
namespace native {

::executorch::aten::Tensor& quantized_matmul_out(
::executorch::runtime::KernelRuntimeContext& ctx,
const ::executorch::aten::Tensor& X,
int64_t X_zero_point,
const ::executorch::aten::Tensor& Y,
int64_t Y_zero_point,
const ::executorch::aten::optional<::executorch::aten::Tensor>& bias,
int64_t out_multiplier,
int64_t out_shift,
int64_t out_zero_point,
bool transposed,
::executorch::aten::Tensor& out);

} // namespace native
} // namespace HiFi
} // namespace impl
21 changes: 15 additions & 6 deletions backends/cadence/hifi/operators/targets.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ load("@fbsource//tools/build_defs:platform_defs.bzl", "CXX")
load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime")


def define_operator(name: str, deps: list[str] | None = None) -> None:
def define_operator(name: str, deps: list[str] | None = None, exported_headers: list[str] | None = None) -> None:
op_name = "op_{}".format(name)

# Deps used by all operators.
Expand All @@ -21,6 +21,8 @@ def define_operator(name: str, deps: list[str] | None = None) -> None:
]
if deps == None:
deps = []
if exported_headers == None:
exported_headers = ["operators.h"]

runtime.cxx_library(
name = op_name,
Expand All @@ -32,7 +34,7 @@ def define_operator(name: str, deps: list[str] | None = None) -> None:
],
compatible_with = ["ovr_config//cpu:xtensa"],
deps = deps + common_deps,
exported_headers = ["operators.h"],
exported_headers = exported_headers,
)

OPERATORS = [
Expand Down Expand Up @@ -65,7 +67,6 @@ OPERATORS = [
"ne",
"permute_copy",
"pow",
"quantized_conv2d_nchw_out",
"quantized_conv2d_nchw_asym8sxsym8s_asym8s_per_tensor_out",
"quantized_conv2d_nchw_asym8uxsym8u_asym8u_per_tensor_out",
"quantized_conv1d_ncl_asym8sxsym8s_asym8s_per_tensor_out",
Expand All @@ -74,7 +75,6 @@ OPERATORS = [
"quantized_conv2d_nchw_depthwise_asym8uxsym8u_asym8u_per_tensor_out",
"quantized_conv2d_nchw_dilated_asym8sxsym8s_asym8s_per_tensor_out",
"quantized_conv2d_nchw_dilated_asym8uxsym8u_asym8u_per_tensor_out",
"quantized_conv2d_nhwc_out",
"quantized_conv2d_nhwc_asym8sxsym8s_asym8s_per_tensor_out",
"quantized_conv2d_nhwc_asym8uxsym8u_asym8u_per_tensor_out",
"quantized_conv1d_nlc_asym8sxsym8s_asym8s_per_tensor_out",
Expand All @@ -87,10 +87,8 @@ OPERATORS = [
"quantized_fully_connected_asym8sxasym8s_asym8s_per_tensor_out",
"quantized_fully_connected_asym8uxasym8u_asym8u_per_tensor_out",
"quantized_layer_norm",
"quantized_linear_out",
"quantized_linear_asym8sxasym8s_asym8s_per_tensor_out",
"quantized_linear_asym8uxasym8u_asym8u_per_tensor_out",
"quantized_matmul_out",
"quantized_matmul_asym8sxasym8s_asym8s_out",
"quantized_matmul_asym8uxasym8u_asym8u_out",
"quantized_relu_out",
Expand Down Expand Up @@ -122,3 +120,14 @@ def define_common_targets():
# Define build targets for all operators registered in the tables above.
for op in OPERATORS:
define_operator(op)

# quantized_linear_out and quantized_linear_per_tensor_out needs additional dependency for int16 support
define_operator("quantized_linear_out", deps=["fbcode//on_device_ai/Assistant/Jarvis/min_runtime/operators/generic:op_quantized_linear"])
define_operator("quantized_linear_per_tensor_out", deps=["fbcode//on_device_ai/Assistant/Jarvis/min_runtime/operators/generic:op_quantized_linear"])

# quantized_conv2d_nchw_out and quantized_conv2d_nhwc_out need additional dependency for int16 support
define_operator("quantized_conv2d_nchw_out", deps=["fbcode//on_device_ai/Assistant/Jarvis/min_runtime/operators/generic:op_quantized_conv2d"])
define_operator("quantized_conv2d_nhwc_out", deps=["fbcode//on_device_ai/Assistant/Jarvis/min_runtime/operators/generic:op_quantized_conv2d"])

# quantized_matmul_out needs additional dependency for int16 support
define_operator("quantized_matmul_out", deps=["fbcode//on_device_ai/Assistant/Jarvis/min_runtime/operators/generic:op_quantized_matmul"], exported_headers=["op_quantized_matmul_out.h"])
Loading