Skip to content

Commit d719e8e

Browse files
cyyeverswolchok
andauthored
Use std::string_view and std::optional (#10541)
### Summary This PR moves ``executorch::aten::{string_view,optional}`` to std counterparts. --------- Co-authored-by: Scott Wolchok <[email protected]>
1 parent 7933ac9 commit d719e8e

File tree

143 files changed

+520
-539
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

143 files changed

+520
-539
lines changed

backends/cadence/fusion_g3/operators/op_clamp.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,13 +21,13 @@
2121
#include <executorch/kernels/portable/cpu/util/math_util.h>
2222
#include <executorch/runtime/kernel/kernel_includes.h>
2323

24-
using ::executorch::aten::optional;
2524
using ::executorch::aten::Scalar;
2625
using ::executorch::aten::ScalarType;
2726
using ::executorch::aten::Tensor;
2827
using ::executorch::runtime::canCast;
2928
using ::executorch::runtime::Error;
3029
using ::executorch::runtime::KernelRuntimeContext;
30+
using std::optional;
3131

3232
namespace cadence {
3333
namespace impl {

backends/cadence/fusion_g3/operators/op_dequantize.cpp

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ using ::executorch::runtime::Error;
2424
using ::executorch::runtime::KernelRuntimeContext;
2525

2626
template <typename T>
27-
using optional = ::executorch::aten::optional<T>;
27+
using optional = std::optional<T>;
2828
/* ScalarType in Executorch do not have support for below data types.
2929
* So, creating a placeholder for these data types. Once, ScalarTypes is
3030
* updated to have support for below data types, these can be removed and
@@ -51,7 +51,7 @@ void check_dequantize_per_tensor_args(
5151
int64_t quant_min,
5252
int64_t quant_max,
5353
ScalarType dtype,
54-
::executorch::aten::optional<ScalarType>& out_dtype,
54+
std::optional<ScalarType>& out_dtype,
5555
Tensor& out) {
5656
ET_CHECK_MSG(
5757
input.scalar_type() == ScalarType::Byte ||
@@ -93,7 +93,7 @@ Tensor& dequantize_impl(
9393
float* scale_data,
9494
int* zero_point_data,
9595
int* axis,
96-
::executorch::aten::optional<ScalarType> out_dtype) {
96+
std::optional<ScalarType> out_dtype) {
9797
const ::executorch::aten::ArrayRef<Tensor::SizesType> input_size =
9898
input.sizes();
9999

@@ -260,8 +260,8 @@ Tensor& dequantize_impl(
260260
}
261261
}
262262

263-
::executorch::aten::optional<::executorch::aten::ArrayRef<int64_t>>
264-
optional_dim_list{::executorch::aten::ArrayRef<int64_t>{
263+
std::optional<::executorch::aten::ArrayRef<int64_t>> optional_dim_list{
264+
::executorch::aten::ArrayRef<int64_t>{
265265
dims, size_t(input.dim() - 1)}};
266266

267267
// Actual dequantization logic
@@ -466,8 +466,8 @@ Tensor& dequantize_impl(
466466
}
467467
}
468468

469-
::executorch::aten::optional<::executorch::aten::ArrayRef<int64_t>>
470-
optional_dim_list{::executorch::aten::ArrayRef<int64_t>{
469+
std::optional<::executorch::aten::ArrayRef<int64_t>> optional_dim_list{
470+
::executorch::aten::ArrayRef<int64_t>{
471471
dims, size_t(input.dim() - 1)}};
472472

473473
// Actual dequantization logic
@@ -600,7 +600,7 @@ Tensor& dequantize_per_tensor_tensor_args_out(
600600
int64_t quant_min,
601601
int64_t quant_max,
602602
ScalarType dtype,
603-
::executorch::aten::optional<ScalarType> out_dtype,
603+
std::optional<ScalarType> out_dtype,
604604
Tensor& out) {
605605
#ifdef OP_ARG_CHECK
606606
ET_CHECK_MSG(
@@ -639,12 +639,12 @@ Tensor& dequantize_per_channel_out(
639639
KernelRuntimeContext& context,
640640
const Tensor& input,
641641
const Tensor& scale,
642-
const ::executorch::aten::optional<Tensor>& opt_zero_points,
642+
const std::optional<Tensor>& opt_zero_points,
643643
int64_t axis,
644644
int64_t quant_min,
645645
int64_t quant_max,
646646
ScalarType dtype,
647-
::executorch::aten::optional<ScalarType> out_dtype,
647+
std::optional<ScalarType> out_dtype,
648648
Tensor& out) {
649649
if (axis < 0) {
650650
axis += executorch::runtime::nonzero_dim(input);

backends/cadence/fusion_g3/operators/op_div.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,14 +19,14 @@
1919
#include <executorch/runtime/kernel/kernel_includes.h>
2020
#include <executorch/runtime/platform/assert.h>
2121

22-
using ::executorch::aten::optional;
2322
using ::executorch::aten::Scalar;
2423
using ::executorch::aten::ScalarType;
25-
using ::executorch::aten::string_view;
2624
using ::executorch::aten::Tensor;
2725
using ::executorch::runtime::canCast;
2826
using ::executorch::runtime::Error;
2927
using ::executorch::runtime::KernelRuntimeContext;
28+
using std::optional;
29+
using std::string_view;
3030

3131
namespace cadence {
3232
namespace impl {
@@ -686,4 +686,4 @@ Tensor& div_scalar_mode_out(
686686
} // namespace native
687687
} // namespace G3
688688
} // namespace impl
689-
} // namespace cadence
689+
} // namespace cadence

backends/cadence/fusion_g3/operators/op_mean.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,11 +17,11 @@
1717
#include <executorch/runtime/platform/assert.h>
1818

1919
using ::executorch::aten::ArrayRef;
20-
using ::executorch::aten::optional;
2120
using ::executorch::aten::ScalarType;
2221
using ::executorch::aten::Tensor;
2322
using ::executorch::runtime::Error;
2423
using ::executorch::runtime::KernelRuntimeContext;
24+
using std::optional;
2525

2626
namespace cadence {
2727
namespace impl {

backends/cadence/fusion_g3/operators/op_native_layer_norm.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,11 +19,11 @@
1919
#include <executorch/runtime/kernel/kernel_includes.h>
2020

2121
using ::executorch::aten::IntArrayRef;
22-
using ::executorch::aten::optional;
2322
using ::executorch::aten::ScalarType;
2423
using ::executorch::aten::Tensor;
2524
using ::executorch::runtime::Error;
2625
using ::executorch::runtime::KernelRuntimeContext;
26+
using std::optional;
2727

2828
namespace cadence {
2929
namespace impl {

backends/cadence/fusion_g3/operators/op_quantize.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -329,8 +329,8 @@ Tensor& quantize_impl(
329329
}
330330
}
331331

332-
::executorch::aten::optional<::executorch::aten::ArrayRef<int64_t>>
333-
optional_dim_list{::executorch::aten::ArrayRef<int64_t>{
332+
std::optional<::executorch::aten::ArrayRef<int64_t>> optional_dim_list{
333+
::executorch::aten::ArrayRef<int64_t>{
334334
dims, size_t(input.dim() - 1)}};
335335

336336
// Actual quantization logic
@@ -534,8 +534,8 @@ Tensor& quantize_impl(
534534
}
535535
}
536536

537-
::executorch::aten::optional<::executorch::aten::ArrayRef<int64_t>>
538-
optional_dim_list{::executorch::aten::ArrayRef<int64_t>{
537+
std::optional<::executorch::aten::ArrayRef<int64_t>> optional_dim_list{
538+
::executorch::aten::ArrayRef<int64_t>{
539539
dims, size_t(input.dim() - 1)}};
540540

541541
// Actual quantization logic

backends/cadence/fusion_g3/operators/op_slice_copy.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,8 +37,8 @@ Tensor& slice_copy_Tensor_out(
3737
KernelRuntimeContext& ctx,
3838
const Tensor& in,
3939
int64_t dim,
40-
::executorch::aten::optional<int64_t> start_val,
41-
::executorch::aten::optional<int64_t> end_val,
40+
std::optional<int64_t> start_val,
41+
std::optional<int64_t> end_val,
4242
int64_t step,
4343
Tensor& out) {
4444
(void)ctx;

backends/cadence/fusion_g3/operators/operators.h

Lines changed: 15 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -47,13 +47,12 @@ ::executorch::aten::Tensor& dequantize_per_channel_out(
4747
::executorch::runtime::KernelRuntimeContext& context,
4848
const ::executorch::aten::Tensor& input,
4949
const ::executorch::aten::Tensor& scale,
50-
const ::executorch::aten::optional<::executorch::aten::Tensor>&
51-
opt_zero_points,
50+
const std::optional<::executorch::aten::Tensor>& opt_zero_points,
5251
int64_t axis,
5352
int64_t quant_min,
5453
int64_t quant_max,
5554
::executorch::aten::ScalarType dtype,
56-
::executorch::aten::optional<::executorch::aten::ScalarType> out_dtype,
55+
std::optional<::executorch::aten::ScalarType> out_dtype,
5756
::executorch::aten::Tensor& out);
5857

5958
::executorch::aten::Tensor& dequantize_per_tensor_out(
@@ -64,7 +63,7 @@ ::executorch::aten::Tensor& dequantize_per_tensor_out(
6463
int64_t quant_min,
6564
int64_t quant_max,
6665
::executorch::aten::ScalarType dtype,
67-
::executorch::aten::optional<::executorch::aten::ScalarType> out_dtype,
66+
std::optional<::executorch::aten::ScalarType> out_dtype,
6867
::executorch::aten::Tensor& out);
6968

7069
::executorch::aten::Tensor& div_out(
@@ -77,7 +76,7 @@ ::executorch::aten::Tensor& div_out_mode(
7776
::executorch::runtime::KernelRuntimeContext& ctx,
7877
const ::executorch::aten::Tensor& a,
7978
const ::executorch::aten::Tensor& b,
80-
::executorch::aten::optional<::executorch::aten::string_view> mode,
79+
std::optional<std::string_view> mode,
8180
::executorch::aten::Tensor& out);
8281

8382
::executorch::aten::Tensor& div_scalar_out(
@@ -90,7 +89,7 @@ ::executorch::aten::Tensor& div_scalar_mode_out(
9089
::executorch::runtime::KernelRuntimeContext& ctx,
9190
const ::executorch::aten::Tensor& a,
9291
const ::executorch::aten::Scalar& b,
93-
::executorch::aten::optional<::executorch::aten::string_view> mode,
92+
std::optional<std::string_view> mode,
9493
::executorch::aten::Tensor& out);
9594

9695
::executorch::aten::Tensor& exp_out(
@@ -101,10 +100,9 @@ ::executorch::aten::Tensor& exp_out(
101100
::executorch::aten::Tensor& mean_dim_out(
102101
::executorch::runtime::KernelRuntimeContext& ctx,
103102
const ::executorch::aten::Tensor& in,
104-
::executorch::aten::optional<::executorch::aten::ArrayRef<int64_t>>
105-
dim_list,
103+
std::optional<::executorch::aten::ArrayRef<int64_t>> dim_list,
106104
bool keepdim,
107-
::executorch::aten::optional<::executorch::aten::ScalarType> dtype,
105+
std::optional<::executorch::aten::ScalarType> dtype,
108106
::executorch::aten::Tensor& out);
109107

110108
::executorch::aten::Tensor& mul_out(
@@ -127,8 +125,8 @@ native_layer_norm_out(
127125
::executorch::runtime::KernelRuntimeContext& ctx,
128126
const ::executorch::aten::Tensor& input,
129127
::executorch::aten::IntArrayRef normalized_shape,
130-
const ::executorch::aten::optional<::executorch::aten::Tensor>& weight,
131-
const ::executorch::aten::optional<::executorch::aten::Tensor>& bias,
128+
const std::optional<::executorch::aten::Tensor>& weight,
129+
const std::optional<::executorch::aten::Tensor>& bias,
132130
double eps,
133131
::executorch::aten::Tensor& out,
134132
::executorch::aten::Tensor& mean_out,
@@ -165,8 +163,8 @@ ::executorch::aten::Tensor& slice_copy_Tensor_out(
165163
::executorch::runtime::KernelRuntimeContext& ctx,
166164
const ::executorch::aten::Tensor& in,
167165
int64_t dim,
168-
::executorch::aten::optional<int64_t> start_val,
169-
::executorch::aten::optional<int64_t> end_val,
166+
std::optional<int64_t> start_val,
167+
std::optional<int64_t> end_val,
170168
int64_t step,
171169
::executorch::aten::Tensor& out);
172170

@@ -226,15 +224,15 @@ ::executorch::aten::Tensor& where_out(
226224
::executorch::aten::Tensor& clamp_out(
227225
::executorch::runtime::KernelRuntimeContext& ctx,
228226
const ::executorch::aten::Tensor& in,
229-
const ::executorch::aten::optional<::executorch::aten::Scalar>& min_opt,
230-
const ::executorch::aten::optional<::executorch::aten::Scalar>& max_opt,
227+
const std::optional<::executorch::aten::Scalar>& min_opt,
228+
const std::optional<::executorch::aten::Scalar>& max_opt,
231229
::executorch::aten::Tensor& out);
232230

233231
::executorch::aten::Tensor& clamp_tensor_out(
234232
::executorch::runtime::KernelRuntimeContext& ctx,
235233
const ::executorch::aten::Tensor& in,
236-
const ::executorch::aten::optional<::executorch::aten::Tensor>& min_opt,
237-
const ::executorch::aten::optional<::executorch::aten::Tensor>& max_opt,
234+
const std::optional<::executorch::aten::Tensor>& min_opt,
235+
const std::optional<::executorch::aten::Tensor>& max_opt,
238236
::executorch::aten::Tensor& out);
239237

240238
::executorch::aten::Tensor& transpose_copy_int_out(

backends/cadence/hifi/operators/op_clamp.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -51,8 +51,8 @@ namespace native {
5151
Tensor& clamp_Tensor_out(
5252
RuntimeContext& ctx,
5353
const Tensor& in,
54-
const executorch::aten::optional<Tensor>& min_opt,
55-
const executorch::aten::optional<Tensor>& max_opt,
54+
const std::optional<Tensor>& min_opt,
55+
const std::optional<Tensor>& max_opt,
5656
Tensor& out) {
5757
(void)ctx;
5858

@@ -325,8 +325,8 @@ Tensor& clamp_Tensor_out(
325325
Tensor& clamp_tensor_out(
326326
RuntimeContext& ctx,
327327
const Tensor& in,
328-
const executorch::aten::optional<Tensor>& min_opt,
329-
const executorch::aten::optional<Tensor>& max_opt,
328+
const std::optional<Tensor>& min_opt,
329+
const std::optional<Tensor>& max_opt,
330330
Tensor& out) {
331331
return clamp_Tensor_out(ctx, in, min_opt, max_opt, out);
332332
}

backends/cadence/hifi/operators/op_div.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,7 @@ Tensor& div_out_mode(
178178
RuntimeContext& ctx,
179179
const Tensor& a,
180180
const Tensor& b,
181-
executorch::aten::optional<executorch::aten::string_view> mode,
181+
std::optional<std::string_view> mode,
182182
Tensor& out) {
183183
ET_KERNEL_CHECK(
184184
ctx,

backends/cadence/hifi/operators/op_quantized_fully_connected_out.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,12 +19,12 @@ namespace native {
1919

2020
using ::executorch::aten::ArrayRef;
2121
using ::executorch::aten::IntArrayRef;
22-
using ::executorch::aten::optional;
2322
using ::executorch::aten::Scalar;
2423
using ::executorch::aten::ScalarType;
2524
using ::executorch::aten::SizesType;
2625
using ::executorch::aten::Tensor;
2726
using ::executorch::runtime::KernelRuntimeContext;
27+
using std::optional;
2828

2929
void inline _quantized_fully_connected_asym8u(
3030
const Tensor& in,

backends/cadence/hifi/operators/op_quantized_linear_out.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,11 +20,11 @@ namespace impl {
2020
namespace HiFi {
2121
namespace native {
2222

23-
using ::executorch::aten::optional;
2423
using ::executorch::aten::ScalarType;
2524
using ::executorch::aten::Tensor;
2625
using ::executorch::runtime::getLeadingDims;
2726
using ::executorch::runtime::KernelRuntimeContext;
27+
using std::optional;
2828

2929
// The nnlib kernel to compute quantized linear via matmul.
3030

backends/cadence/hifi/operators/op_softmax.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ Tensor& _softmax_out(
5050
// Adjust for negative dim
5151
dim = dim < 0 ? dim + executorch::runtime::nonzero_dim(in) : dim;
5252

53-
const executorch::aten::optional<int64_t>& dim_t = dim;
53+
const std::optional<int64_t>& dim_t = dim;
5454
const size_t d = ET_NORMALIZE_IX(dim_t.value(), in.dim());
5555
const size_t size = in.size(d);
5656

backends/cadence/hifi/operators/operators.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ ::executorch::aten::Tensor& div_out_mode(
3939
::executorch::runtime::KernelRuntimeContext& ctx,
4040
const ::executorch::aten::Tensor& a,
4141
const ::executorch::aten::Tensor& b,
42-
::executorch::aten::optional<::executorch::aten::string_view> mode,
42+
std::optional<std::string_view> mode,
4343
::executorch::aten::Tensor& out);
4444

4545
void quantized_linear_out(

backends/cadence/hifi/operators/tests/test_op_div.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,16 +25,16 @@ namespace HiFi {
2525
namespace native {
2626
namespace {
2727

28-
using ::executorch::aten::optional;
2928
using ::executorch::aten::Scalar;
3029
using ::executorch::aten::ScalarType;
31-
using ::executorch::aten::string_view;
3230
using ::executorch::aten::Tensor;
3331
using ::executorch::aten::TensorImpl;
3432
using ::executorch::runtime::Error;
3533
using ::executorch::runtime::KernelRuntimeContext;
3634
using ::executorch::runtime::runtime_init;
3735
using ::executorch::runtime::testing::TensorFactory;
36+
using std::optional;
37+
using std::string_view;
3838

3939
class HiFiDivTest : public OperatorTest {
4040
public:

backends/cadence/reference/operators/operators.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ using ::executorch::runtime::getLeadingDims;
2727
inline __attribute__((always_inline)) void linear_(
2828
const ::executorch::aten::Tensor& input,
2929
const ::executorch::aten::Tensor& weight,
30-
const ::executorch::aten::optional<::executorch::aten::Tensor>& bias,
30+
const std::optional<::executorch::aten::Tensor>& bias,
3131
::executorch::aten::Tensor& output) {
3232
const float* __restrict__ input_data = input.const_data_ptr<float>();
3333
const float* __restrict__ weight_data = weight.const_data_ptr<float>();

backends/cadence/reference/operators/quantized_fully_connected_out.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,10 +13,10 @@ namespace impl {
1313
namespace reference {
1414
namespace native {
1515

16-
using ::executorch::aten::optional;
1716
using ::executorch::aten::ScalarType;
1817
using ::executorch::aten::Tensor;
1918
using ::executorch::runtime::KernelRuntimeContext;
19+
using std::optional;
2020

2121
void quantized_fully_connected_out(
2222
__ET_UNUSED KernelRuntimeContext& ctx,

0 commit comments

Comments
 (0)