Skip to content

Commit ff36efd

Browse files
authored
Address various warnings as errors (#8581)
Address various warnings as errors (#8581) Summary: Pull Request resolved: #8581 Some projects uses more restrictive build options than currently used in ET CI. This means we encountered a number of errors when enabling for a microcontroller. Reviewed By: digantdesai, swolchok Differential Revision: D69139962
1 parent 9841e54 commit ff36efd

39 files changed

+186
-143
lines changed

extension/threadpool/cpuinfo_utils.cpp

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <c10/util/irange.h>
910
#include <executorch/extension/threadpool/cpuinfo_utils.h>
1011

1112
#include <fstream>
@@ -84,7 +85,7 @@ bool populate_available_cpu_mids() {
8485
cpu_midrs->resize(num_possible_cores);
8586
const std::string kMidrFilePathPrefix = "/sys/devices/system/cpu/cpu";
8687
const std::string kMidrFilePathSuffix = "/regs/identification/midr_el1";
87-
for (int32_t i = 0; i < num_possible_cores; ++i) {
88+
for (const auto i : c10::irange(num_possible_cores)) {
8889
std::string midr_file_path =
8990
kMidrFilePathPrefix + std::to_string(i) + kMidrFilePathSuffix;
9091
ET_LOG(Info, "Reading file %s", midr_file_path.c_str());
@@ -115,7 +116,7 @@ uint32_t _get_num_performant_cores() {
115116
ET_LOG(Info, "CPU info and manual query on # of cpus dont match.");
116117
return 0;
117118
}
118-
for (int32_t i = 0; i < cpu_midrs->size(); ++i) {
119+
for (const auto i : c10::irange(cpu_midrs->size())) {
119120
uint32_t masked_midr = (*cpu_midrs)[i] & RIVISION_MASK;
120121
switch (masked_midr) {
121122
case CPUINFO_ARM_MIDR_CORTEX_A520:
@@ -148,7 +149,7 @@ uint32_t get_num_performant_cores() {
148149
uint32_t num_possible_cores = cpuinfo_get_processors_count();
149150
uint32_t num_non_performant_core = 0;
150151
if (uarch_count > 1) {
151-
for (int32_t i = 0; i < uarch_count; ++i) {
152+
for (const auto i : c10::irange(uarch_count)) {
152153
const struct cpuinfo_uarch_info* uarch_info = cpuinfo_get_uarch(i);
153154
if (is_non_performant_core(uarch_info)) {
154155
num_non_performant_core += uarch_info->processor_count;

extension/threadpool/targets.bzl

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ def define_common_targets():
2323
srcs = _THREADPOOL_SRCS,
2424
deps = [
2525
"//executorch/runtime/core:core",
26+
"//executorch/runtime/core/portable_type/c10/c10:c10",
2627
],
2728
exported_headers = _THREADPOOL_HEADERS,
2829
exported_deps = [

kernels/portable/cpu/op__to_dim_order_copy.cpp

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,8 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <c10/util/irange.h>
10+
911
#include <executorch/kernels/portable/cpu/scalar_utils.h>
1012
#include <executorch/kernels/portable/cpu/util/copy_ops_util.h>
1113
#include <executorch/runtime/core/exec_aten/util/dim_order_util.h>
@@ -41,7 +43,7 @@ int64_t coordinateToIndexWithDimOrder(
4143

4244
dim_order_to_stride_nocheck(
4345
sizes.data(), dim_order.data(), sizes.size(), strides);
44-
for (size_t i = 0; i < self.dim(); ++i) {
46+
for (const auto i : c10::irange(self.dim())) {
4547
index += cur_indices[i] * strides[i];
4648
}
4749
return index;
@@ -59,7 +61,7 @@ void _to_dim_order_copy_impl(const Tensor& self, Tensor& out) {
5961
for (ssize_t i = 0; i < self.numel(); i++) {
6062
// Update the current indices.
6163
for (ssize_t j = self.dim() - 1; j >= 0; j--) {
62-
if (coordinate[j] + 1 < self.size(j)) {
64+
if (coordinate[j] + 1 < static_cast<size_t>(self.size(j))) {
6365
coordinate[j]++;
6466
break;
6567
} else {

kernels/portable/cpu/op_amax.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <c10/util/irange.h>
910
#include <cmath>
1011

1112
#include <executorch/kernels/portable/cpu/util/reduce_util.h>
@@ -44,7 +45,7 @@ Tensor& amax_out(
4445

4546
ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, "amax.out", CTYPE, [&]() {
4647
CTYPE* out_data = out.mutable_data_ptr<CTYPE>();
47-
for (size_t out_ix = 0; out_ix < out.numel(); ++out_ix) {
48+
for (const auto out_ix : c10::irange(out.numel())) {
4849
out_data[out_ix] = reduce_over_dim_list<CTYPE>(
4950
[](CTYPE v, CTYPE max_v) {
5051
return std::isnan(v) || v > max_v ? v : max_v;

kernels/portable/cpu/op_amin.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
* This source code is licensed under the BSD-style license found in the
66
* LICENSE file in the root directory of this source tree.
77
*/
8-
8+
#include <c10/util/irange.h>
99
#include <cmath>
1010

1111
#include <executorch/kernels/portable/cpu/util/reduce_util.h>
@@ -44,7 +44,7 @@ Tensor& amin_out(
4444

4545
ET_SWITCH_REALHBBF16_TYPES(in.scalar_type(), ctx, "amin.out", CTYPE, [&]() {
4646
CTYPE* out_data = out.mutable_data_ptr<CTYPE>();
47-
for (size_t out_ix = 0; out_ix < out.numel(); ++out_ix) {
47+
for (const auto out_ix : c10::irange(out.numel())) {
4848
out_data[out_ix] = reduce_over_dim_list<CTYPE>(
4949
[](CTYPE v, CTYPE min_v) {
5050
return std::isnan(v) || v < min_v ? v : min_v;

kernels/portable/cpu/op_argmax.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <c10/util/irange.h>
910
#include <cmath>
1011
#include <tuple>
1112

@@ -46,7 +47,7 @@ Tensor& argmax_out(
4647
ET_SWITCH_REALHBF16_TYPES(in.scalar_type(), ctx, "argmax.out", CTYPE, [&] {
4748
long* out_data = out.mutable_data_ptr<long>();
4849

49-
for (size_t out_ix = 0; out_ix < out.numel(); ++out_ix) {
50+
for (const auto out_ix : c10::irange(out.numel())) {
5051
std::tuple<CTYPE, long> acc = reduce_over_dim<CTYPE>(
5152
[](CTYPE v, long ix, CTYPE acc_val, long acc_ix) {
5253
if (!std::isnan(acc_val) && (std::isnan(v) || v > acc_val)) {

kernels/portable/cpu/op_argmin.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <c10/util/irange.h>
910
#include <cmath>
1011
#include <tuple>
1112

@@ -46,7 +47,7 @@ Tensor& argmin_out(
4647
ET_SWITCH_REALHBF16_TYPES(in.scalar_type(), ctx, "argmin.out", CTYPE, [&] {
4748
long* out_data = out.mutable_data_ptr<long>();
4849

49-
for (size_t out_ix = 0; out_ix < out.numel(); ++out_ix) {
50+
for (const auto out_ix : c10::irange(out.numel())) {
5051
std::tuple<CTYPE, long> acc = reduce_over_dim<CTYPE>(
5152
[](CTYPE v, long ix, CTYPE acc_val, long acc_ix) {
5253
if (!std::isnan(acc_val) && (std::isnan(v) || v < acc_val)) {

kernels/portable/cpu/op_expand_copy.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,8 @@ Tensor& expand_copy_out(
9696

9797
ET_KERNEL_CHECK(
9898
ctx,
99-
repeat_tensor(self, {repeats, repeats_size}, out) == Error::Ok,
99+
repeat_tensor(self, makeArrayRef(repeats, repeats_size), out) ==
100+
Error::Ok,
100101
InvalidArgument,
101102
out);
102103

kernels/portable/cpu/util/activation_ops_util.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ bool check_glu_args(const Tensor& in, int64_t dim, Tensor& out) {
3131
ET_LOG_AND_RETURN_IF_FALSE(tensor_is_floating_type(in));
3232

3333
const size_t non_negative_dim = dim < 0 ? dim + in.dim() : dim;
34-
const size_t dim_size = in.size(non_negative_dim);
34+
const ssize_t dim_size = in.size(non_negative_dim);
3535

3636
ET_CHECK_OR_RETURN_FALSE(
3737
dim_size % 2 == 0,

kernels/portable/cpu/util/broadcast_util.cpp

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <executorch/kernels/portable/cpu/util/broadcast_util.h>
910
#include <executorch/kernels/portable/cpu/util/repeat_util.h>
1011
#include <executorch/runtime/core/exec_aten/exec_aten.h>
1112
#include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
@@ -274,7 +275,7 @@ void delinearize_index(
274275
size_t* out_indexes,
275276
const size_t out_indexes_len) {
276277
ET_CHECK(shape.size() <= out_indexes_len);
277-
for (auto i = 0; i < shape.size(); ++i) {
278+
for (size_t i = 0; i < shape.size(); ++i) {
278279
auto dim = shape.size() - 1 - i;
279280
auto dim_size = shape[dim];
280281
out_indexes[dim] = linear_index % dim_size;
@@ -304,7 +305,8 @@ size_t linearize_access_indexes(
304305
size_t linear_index = 0;
305306
for (size_t i = 0; i < indexes_broadcast_from.size(); ++i) {
306307
// If this dimension is broadcasted, add zero to the linear address.
307-
if (indexes_broadcast_from[i] >= broadcast_from_shape[i]) {
308+
if (indexes_broadcast_from[i] >=
309+
static_cast<size_t>(broadcast_from_shape[i])) {
308310
ET_CHECK_MSG(
309311
broadcast_from_shape[i] == 1,
310312
"Expected dim size == 1 if broadcasted, but actual dim size is %zu",

kernels/portable/cpu/util/copy_ops_util.h

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
*/
88

99
#pragma once
10+
#include <c10/util/irange.h>
1011

1112
#include <executorch/runtime/kernel/kernel_includes.h>
1213

@@ -26,16 +27,16 @@ void _as_strided_copy(
2627
ArrayRef<int64_t> stride,
2728
int64_t dim) {
2829
// the last dimension, copy data
29-
if (dim == size.size() - 1) {
30-
for (size_t i = 0; i < size.at(dim); ++i) {
30+
if (dim == static_cast<int64_t>(size.size()) - 1) {
31+
for (const auto i : c10::irange(size.at(dim))) {
3132
output_data[i] = *input_data;
3233
input_data += stride.at(dim);
3334
}
3435
return;
3536
}
3637
size_t trailing_dims = getTrailingDims(out, dim);
3738
// recursively set data for the next dimension
38-
for (size_t i = 0; i < size.at(dim); ++i) {
39+
for ([[maybe_unused]] const auto i : c10::irange(size.at(dim))) {
3940
_as_strided_copy<CTYPE>(
4041
input_data, output_data, out, size, stride, dim + 1);
4142
input_data += stride.at(dim);

kernels/portable/cpu/util/functional_util.h

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,8 @@
88

99
#pragma once
1010

11+
#include <c10/util/irange.h>
12+
1113
#include <executorch/runtime/core/exec_aten/exec_aten.h>
1214
#include <executorch/runtime/core/exec_aten/util/tensor_util.h>
1315

@@ -30,7 +32,7 @@ inline CTYPE apply_unary_reduce_fn(
3032
const int64_t size,
3133
const int64_t stride = 1) {
3234
CTYPE acc_val = data_in[0];
33-
for (size_t i = 1; i < size; i++) {
35+
for (const auto i : c10::irange(1, size)) {
3436
acc_val = reduce_fun(data_in[i * stride], acc_val);
3537
}
3638
return acc_val;
@@ -51,7 +53,7 @@ inline void apply_unary_map_fn(
5153
CTYPE_OUT* const data_out,
5254
const int64_t size,
5355
const int64_t stride = 1) {
54-
for (size_t i = 0; i < size; i++) {
56+
for (const auto i : c10::irange(size)) {
5557
data_out[i * stride] = map_fun(data_in[i * stride]);
5658
}
5759
}
@@ -77,7 +79,7 @@ inline CTYPE_OUT apply_unary_map_reduce_fn(
7779
const int64_t size,
7880
const int64_t stride = 1) {
7981
CTYPE_OUT acc_val = map_fun(data_in[0]);
80-
for (size_t i = 1; i < size; ++i) {
82+
for (const auto i : c10::irange(1, size)) {
8183
acc_val = reduce_fun(map_fun(data_in[i * stride]), acc_val);
8284
}
8385
return acc_val;

kernels/portable/cpu/util/reduce_util.cpp

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -48,8 +48,7 @@ ET_NODISCARD bool check_dim_list_is_valid(
4848
}
4949

5050
const size_t non_neg_d = _normalize_non_neg_d(d, in.dim());
51-
ET_LOG_AND_RETURN_IF_FALSE(
52-
non_neg_d < kTensorDimensionLimit && non_neg_d >= 0);
51+
ET_LOG_AND_RETURN_IF_FALSE(non_neg_d < kTensorDimensionLimit);
5352

5453
ET_CHECK_OR_RETURN_FALSE(
5554
dim_exist[non_neg_d] == false,
@@ -86,7 +85,7 @@ size_t get_reduced_dim_product(
8685
}
8786
size_t dim_product = 1;
8887
if (!dim.has_value()) {
89-
for (size_t i = 0; i < in.dim(); ++i) {
88+
for (size_t i = 0; i < static_cast<size_t>(in.dim()); ++i) {
9089
dim_product *= in.size(i);
9190
}
9291
return dim_product;
@@ -108,7 +107,7 @@ size_t get_reduced_dim_product(
108107
size_t dim_product = 1;
109108
const size_t in_dim = in.dim();
110109
if (!dim_list.has_value() || dim_list.value().size() == 0) {
111-
for (size_t i = 0; i < in.dim(); ++i) {
110+
for (size_t i = 0; i < static_cast<size_t>(in.dim()); ++i) {
112111
dim_product *= in.size(i);
113112
}
114113
return dim_product;
@@ -136,7 +135,7 @@ size_t get_out_numel(
136135
ET_CHECK_VALID_DIM(dim_val, in.dim());
137136
}
138137
const size_t non_neg_dim = _normalize_non_neg_d(dim_val, in.dim());
139-
for (size_t d = 0; d < in.dim(); ++d) {
138+
for (size_t d = 0; d < static_cast<size_t>(in.dim()); ++d) {
140139
if (d != non_neg_dim) {
141140
out_numel *= in.size(d);
142141
}
@@ -155,7 +154,7 @@ size_t get_out_numel(
155154
dim_list) {
156155
size_t out_numel = 1;
157156
if (dim_list.has_value() && dim_list.value().size() != 0) {
158-
for (size_t d = 0; d < in.dim(); ++d) {
157+
for (size_t d = 0; d < static_cast<size_t>(in.dim()); ++d) {
159158
if (!check_dim_in_dim_list(d, in.dim(), dim_list.value())) {
160159
out_numel *= in.size(d);
161160
}
@@ -234,7 +233,7 @@ size_t compute_reduced_out_size(
234233
if (dim.has_value()) {
235234
const auto dim_val = dim.value();
236235
const size_t non_neg_dim = _normalize_non_neg_d(dim_val, in_dim);
237-
for (ssize_t i = 0; i < non_neg_dim; ++i) {
236+
for (size_t i = 0; i < non_neg_dim; ++i) {
238237
sizes_arr[i] = in.size(i);
239238
}
240239
if (keepdim) {
@@ -250,7 +249,7 @@ size_t compute_reduced_out_size(
250249
}
251250
} else {
252251
if (keepdim) {
253-
for (size_t i = 0; i < in_dim; ++i) {
252+
for (size_t i = 0; i < static_cast<size_t>(in_dim); ++i) {
254253
sizes_arr[i] = 1;
255254
}
256255
} else {
@@ -266,7 +265,9 @@ size_t compute_reduced_out_size(
266265
dim_list,
267266
bool keepdim,
268267
executorch::aten::SizesType* sizes_arr) {
269-
const auto in_dim = in.dim();
268+
// check_dim_in_dim_list and later comparisons
269+
// expect in_dim to be size_t, so cast it here
270+
const size_t in_dim = static_cast<size_t>(in.dim());
270271
size_t out_dim = in_dim;
271272

272273
if (dim_list.has_value() && dim_list.value().size() != 0) {

kernels/portable/cpu/util/reduce_util.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -50,15 +50,15 @@ void apply_on_flat_ix_with_dim_mask_and_base(
5050
const size_t start,
5151
const size_t end) {
5252
// Compute innermost dim from dim list
53-
size_t inner_dim = in.dim() - 1;
53+
int64_t inner_dim = in.dim() - 1;
5454
while (!dim_mask[inner_dim]) {
5555
inner_dim--;
5656
}
5757

5858
// Initialize array of indices per dimension. This array is used to maintain
5959
// the per-dimension index of the element in `in` that is being reduced over
6060
// Only the dims that are in the dim list are relevant.
61-
size_t dim_index[kTensorDimensionLimit];
61+
int64_t dim_index[kTensorDimensionLimit];
6262
for (int64_t d = 0; d < in.dim(); d++) {
6363
dim_index[d] = 0;
6464
}

0 commit comments

Comments
 (0)