Skip to content

Commit b5a6362

Browse files
authored
irangeify most of runtime/core/exec_aten (#8612)
Had to skip scalar_type_util.h because it needs the constexpr patch to land in PyTorch (and for us to update our pin to pick it up).
1 parent 401016d commit b5a6362

14 files changed

+72
-55
lines changed

runtime/core/exec_aten/testing_util/targets.bzl

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@ def define_common_targets():
4444
"//executorch/runtime/core/exec_aten/util:scalar_type_util" + aten_suffix,
4545
"//executorch/runtime/core/exec_aten/util:tensor_util" + aten_suffix,
4646
"//executorch/runtime/core/exec_aten/util:tensor_dimension_limit",
47+
"//executorch/runtime/core/portable_type/c10/c10:c10",
4748
],
4849
exported_external_deps = [
4950
"gmock" + aten_suffix,

runtime/core/exec_aten/testing_util/tensor_factory.h

Lines changed: 15 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
#include <algorithm>
66
#include <cstdint>
77

8+
#include <c10/util/irange.h>
89
#include <executorch/runtime/core/exec_aten/exec_aten.h>
910
#include <executorch/runtime/core/exec_aten/util/dim_order_util.h>
1011
#include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
@@ -78,7 +79,7 @@ inline bool check_strides(
7879
// a.strides == (1, 1, 2). We want to sort create a mapping to make the
7980
// sorted_stride as (2, 1, 1) while sorted_size == (3, 2, 1)
8081
std::vector<std::int32_t> sorted_idx(sizes.size());
81-
for (size_t i = 0; i < sizes.size(); i++) {
82+
for (const auto i : c10::irange(sizes.size())) {
8283
sorted_idx[i] = i;
8384
}
8485
std::sort(
@@ -98,7 +99,7 @@ inline bool check_strides(
9899
// Use the mapping to rearrange the sizes and strides
99100
std::vector<std::int32_t> sorted_sizes(sizes.size());
100101
std::vector<std::int32_t> sorted_strides(sizes.size());
101-
for (size_t i = 0; i < sizes.size(); i++) {
102+
for (const auto i : c10::irange(sizes.size())) {
102103
sorted_sizes[i] = sizes[sorted_idx[i]] == 0 ? 1 : sizes[sorted_idx[i]];
103104
sorted_strides[i] = strides[sorted_idx[i]];
104105
}
@@ -132,7 +133,7 @@ inline bool check_dim_order(
132133
}
133134
size_t gauss_sum = 0;
134135
std::vector<int> count(dim_order.size(), 0);
135-
for (int i = 0; i < dim_order.size(); i++) {
136+
for (const auto i : c10::irange(dim_order.size())) {
136137
if (dim_order[i] >= sizes.size()) {
137138
return false;
138139
}
@@ -378,7 +379,7 @@ class TensorFactory {
378379
std::vector<executorch::aten::StridesType> contiguous_strides =
379380
internal::strides_from_dim_order(sizes, contiguous_dim_order);
380381

381-
for (int32_t i = 0; i < input.dim(); i++) {
382+
for (const auto i : c10::irange(input.dim())) {
382383
ET_CHECK_MSG(
383384
input.strides()[i] == contiguous_strides[i],
384385
"Input tensor is not contiguous");
@@ -394,10 +395,10 @@ class TensorFactory {
394395
std::vector<ctype> channels_last_data(
395396
N * C * H * W); // Create a new blob with the same total size to contain
396397
// channels_last data
397-
for (int32_t n = 0; n < N; ++n) {
398-
for (int32_t c = 0; c < C; ++c) {
399-
for (int32_t h = 0; h < H; ++h) {
400-
for (int32_t w = 0; w < W; ++w) {
398+
for (const auto n : c10::irange(N)) {
399+
for (const auto c : c10::irange(C)) {
400+
for (const auto h : c10::irange(H)) {
401+
for (const auto w : c10::irange(W)) {
401402
// Calculate the index in the original blob
402403
int32_t old_index = ((n * C + c) * H + h) * W + w;
403404
// Calculate the index in the new blob
@@ -614,7 +615,7 @@ inline void validate_strides(
614615
}
615616
}
616617
// No two dimensions can have same stride value
617-
for (int32_t i = 0; i < strides.size(); ++i) {
618+
for (const auto i : c10::irange(strides.size())) {
618619
for (int32_t j = i + 1; j < strides.size(); ++j) {
619620
if ((sizes[i] == 0) || (sizes[j] == 0) ||
620621
((sizes[i] == 1) || (sizes[j] == 1))) {
@@ -830,7 +831,7 @@ class TensorFactory {
830831
// given strides is empty.
831832
if (!sizes.empty() && dim_order.empty()) {
832833
default_dim_order.resize(sizes.size(), 1);
833-
for (size_t i = 0; i < sizes.size(); ++i) {
834+
for (const auto i : c10::irange(sizes.size())) {
834835
default_dim_order[i] = i;
835836
}
836837
}
@@ -904,10 +905,10 @@ class TensorFactory {
904905
std::vector<ctype> channels_last_data(
905906
N * C * H * W); // Create a new blob with the same total size to contain
906907
// channels_last data
907-
for (int32_t n = 0; n < N; ++n) {
908-
for (int32_t c = 0; c < C; ++c) {
909-
for (int32_t h = 0; h < H; ++h) {
910-
for (int32_t w = 0; w < W; ++w) {
908+
for (const auto n : c10::irange(N)) {
909+
for (const auto c : c10::irange(C)) {
910+
for (const auto h : c10::irange(H)) {
911+
for (const auto w : c10::irange(W)) {
911912
// Calculate the index in the original blob
912913
int32_t old_index = ((n * C + c) * H + h) * W + w;
913914
// Calculate the index in the new blob

runtime/core/exec_aten/testing_util/tensor_util.cpp

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
#include <cstring>
1111
#include <ostream>
1212

13+
#include <c10/util/irange.h>
1314
#include <executorch/runtime/core/exec_aten/exec_aten.h>
1415
#include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
1516
#include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
@@ -50,7 +51,7 @@ bool data_is_close(
5051
if (a == b) {
5152
return true;
5253
}
53-
for (size_t i = 0; i < numel; i++) {
54+
for (const auto i : c10::irange(numel)) {
5455
const auto ai = a[i];
5556
const auto bi = b[i];
5657

@@ -201,7 +202,7 @@ bool tensor_lists_are_close(
201202
if (num_tensors_a != num_tensors_b) {
202203
return false;
203204
}
204-
for (size_t i = 0; i < num_tensors_a; i++) {
205+
for (const auto i : c10::irange(num_tensors_a)) {
205206
if (!tensors_are_close(tensors_a[i], tensors_b[i], rtol, opt_atol)) {
206207
return false;
207208
}
@@ -245,7 +246,7 @@ template <typename T>
245246
std::ostream& print_data(std::ostream& os, const T* data, size_t numel) {
246247
// TODO(dbort): Make this smarter: show dimensions, listen to strides,
247248
// break up or truncate data when it's huge
248-
for (auto i = 0; i < numel; i++) {
249+
for (const auto i : c10::irange(numel)) {
249250
os << data[i];
250251
if (i < numel - 1) {
251252
os << ", ";
@@ -257,7 +258,7 @@ std::ostream& print_data(std::ostream& os, const T* data, size_t numel) {
257258
template <typename T>
258259
std::ostream&
259260
print_data(std::ostream& os, const etensor::complex<T>* data, size_t numel) {
260-
for (auto i = 0; i < numel; i++) {
261+
for (const auto i : c10::irange(numel)) {
261262
os << data[i].real_ << " + " << data[i].imag_ << "j";
262263
if (i < numel - 1) {
263264
os << ", ";
@@ -276,7 +277,7 @@ template <>
276277
std::ostream& print_data(std::ostream& os, const uint8_t* data, size_t numel) {
277278
// TODO(dbort): Make this smarter: show dimensions, listen to strides,
278279
// break up or truncate data when it's huge
279-
for (auto i = 0; i < numel; i++) {
280+
for (const auto i : c10::irange(numel)) {
280281
os << (uint64_t)data[i];
281282
if (i < numel - 1) {
282283
os << ", ";
@@ -292,7 +293,7 @@ std::ostream& print_data(std::ostream& os, const uint8_t* data, size_t numel) {
292293
*/
293294
std::ostream& operator<<(std::ostream& os, const Tensor& t) {
294295
os << "ETensor(sizes={";
295-
for (auto dim = 0; dim < t.dim(); dim++) {
296+
for (const auto dim : c10::irange(t.dim())) {
296297
os << t.size(dim);
297298
if (dim < t.dim() - 1) {
298299
os << ", ";

runtime/core/exec_aten/testing_util/test/targets.bzl

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,5 +25,6 @@ def define_common_targets():
2525
preprocessor_flags = preprocessor_flags,
2626
deps = [
2727
"//executorch/runtime/core/exec_aten/testing_util:tensor_util" + aten_suffix,
28+
"//executorch/runtime/core/portable_type/c10/c10:c10",
2829
],
2930
)

runtime/core/exec_aten/testing_util/test/tensor_factory_test.cpp

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
* LICENSE file in the root directory of this source tree.
77
*/
88

9+
#include <c10/util/irange.h>
910
#include <executorch/runtime/core/error.h>
1011
#include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
1112
#include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
@@ -86,7 +87,7 @@ using torch::executor::TensorImpl;
8687
"Arrays are not equal size." #a1 " size:%zu," #a2 " size:%zu", \
8788
a1.size(), \
8889
a2.size()); \
89-
for (size_t i = 0; i < a1.size(); ++i) { \
90+
for (const auto i : c10::irange(a1.size())) { \
9091
ET_CHECK_MSG( \
9192
a1[i] == a2[i], \
9293
"Value mismatch at index %zu, " #a1 \
@@ -784,7 +785,7 @@ void run_zeros_like_test(Tensor input) {
784785

785786
// A Tensor created manually, that should be identical to `actual`.
786787
std::vector<int32_t> expected_data;
787-
for (int i = 0; i < input.numel(); i++) {
788+
for (const auto i : c10::irange(input.numel())) {
788789
expected_data.push_back(0);
789790
}
790791
#ifdef USE_ATEN_LIB
@@ -842,7 +843,7 @@ void run_ones_like_test(Tensor input) {
842843

843844
// A Tensor created manually, that should be identical to `actual`.
844845
std::vector<int32_t> expected_data;
845-
for (int i = 0; i < input.numel(); i++) {
846+
for (const auto i : c10::irange(input.numel())) {
846847
expected_data.push_back(1);
847848
}
848849
#ifdef USE_ATEN_LIB

runtime/core/exec_aten/util/dim_order_util.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
#include <cstdio>
1414
#include <cstring>
1515

16+
#include <c10/util/irange.h>
1617
#include <executorch/runtime/core/error.h>
1718
#include <executorch/runtime/platform/assert.h>
1819
#include <executorch/runtime/platform/compiler.h>
@@ -23,7 +24,7 @@ namespace runtime {
2324
namespace {
2425
template <typename DimOrderType>
2526
bool validate_dim_order(const DimOrderType* dim_order, const size_t dims) {
26-
for (size_t i = 0; i < dims; ++i) {
27+
for (const auto i : c10::irange(dims)) {
2728
if (dim_order[i] >= static_cast<DimOrderType>(dims)) {
2829
return false;
2930
}
@@ -43,7 +44,7 @@ template <typename DimOrderType>
4344
inline bool is_contiguous_dim_order(
4445
const DimOrderType* dim_order,
4546
const size_t dims) {
46-
for (size_t i = 0; i < dims; ++i) {
47+
for (const auto i : c10::irange(dims)) {
4748
if (dim_order[i] != static_cast<DimOrderType>(i)) {
4849
return false;
4950
}

runtime/core/exec_aten/util/targets.bzl

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@ def define_common_targets():
4040
],
4141
exported_deps = [
4242
"//executorch/runtime/core:core",
43+
"//executorch/runtime/core/portable_type/c10/c10:c10",
4344
],
4445
visibility = [
4546
"//executorch/...",
@@ -62,6 +63,7 @@ def define_common_targets():
6263
exported_deps = [
6364
":tensor_dimension_limit",
6465
"//executorch/runtime/core:core",
66+
"//executorch/runtime/core/portable_type/c10/c10:c10",
6567
] + [
6668
"//executorch/runtime/core/exec_aten:lib" + aten_suffix,
6769
":scalar_type_util" + aten_suffix,

runtime/core/exec_aten/util/tensor_util.h

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717

1818
#include <limits>
1919

20+
#include <c10/util/irange.h>
2021
#include <executorch/runtime/core/array_ref.h>
2122
#include <executorch/runtime/core/error.h>
2223
#include <executorch/runtime/core/exec_aten/exec_aten.h>
@@ -277,7 +278,7 @@
277278
a_strides = a__.strides(); \
278279
const ::executorch::aten::ArrayRef<executorch::aten::StridesType> \
279280
b_strides = b__.strides(); \
280-
for (size_t i = 0; i < a__.dim(); i++) { \
281+
for (const auto i : c10::irange(a__.dim())) { \
281282
ET_CHECK_MSG( \
282283
a_strides[i] == b_strides[i], \
283284
"a.strides()[%zu] shall equal to b.strides()[%zu], " \
@@ -309,7 +310,7 @@
309310
b_strides = b__.strides(); \
310311
const ::executorch::aten::ArrayRef<executorch::aten::StridesType> \
311312
c_strides = c__.strides(); \
312-
for (size_t i = 0; i < a__.dim(); i++) { \
313+
for (const auto i : c10::irange(a__.dim())) { \
313314
ET_CHECK_MSG( \
314315
a_strides[i] == b_strides[i] && b_strides[i] == c_strides[i], \
315316
"a_strides[%zu], b_strides[%zu] and c_strides[%zu] " \
@@ -967,7 +968,7 @@ inline size_t coordinateToIndex(
967968
const executorch::aten::Tensor& tensor,
968969
const size_t* const coordinate) {
969970
size_t index = 0;
970-
for (int d = 0; d < tensor.dim(); ++d) {
971+
for (const auto d : c10::irange(tensor.dim())) {
971972
index += coordinate[d] * getTrailingDims(tensor, d);
972973
}
973974
return index;
@@ -999,7 +1000,7 @@ inline size_t coordinateToIndexWithTrailingDimsMemo(
9991000
const size_t* const coordinate,
10001001
const size_t trailing_dims_memo[kTensorDimensionLimit]) {
10011002
size_t index = 0;
1002-
for (int d = 0; d < tensor.dim(); ++d) {
1003+
for (const auto d : c10::irange(tensor.dim())) {
10031004
index += coordinate[d] * trailing_dims_memo[d];
10041005
}
10051006
return index;
@@ -1021,7 +1022,7 @@ inline void indexToCoordinate(
10211022
size_t index,
10221023
size_t* coordinate) {
10231024
ET_CHECK(index < static_cast<size_t>(tensor.numel()));
1024-
for (auto i = 0; i < tensor.dim(); ++i) {
1025+
for (const auto i : c10::irange(tensor.dim())) {
10251026
auto dim = tensor.dim() - 1 - i;
10261027
size_t dim_size = tensor.size(dim);
10271028
coordinate[dim] = index % dim_size;
@@ -1211,7 +1212,7 @@ ET_NODISCARD inline Error resize_tensor(
12111212
std::array<executorch::aten::SizesType, kTensorDimensionLimit>
12121213
new_sizes_casted{};
12131214
size_t new_sizes_ndim = new_sizes.size();
1214-
for (size_t i = 0; i < new_sizes_ndim; ++i) {
1215+
for (const auto i : c10::irange(new_sizes_ndim)) {
12151216
new_sizes_casted[i] =
12161217
static_cast<executorch::aten::SizesType>(new_sizes[i]);
12171218
}
@@ -1342,7 +1343,7 @@ inline size_t calculate_linear_index(
13421343
const executorch::aten::StridesType* strides,
13431344
const size_t ndim) {
13441345
size_t index = 0;
1345-
for (size_t i = 0; i < ndim; i++) {
1346+
for (const auto i : c10::irange(ndim)) {
13461347
index += coordinate[i] * strides[i];
13471348
}
13481349
return index;

runtime/core/exec_aten/util/tensor_util_aten.cpp

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
#include <executorch/runtime/core/exec_aten/util/tensor_util.h>
1010

1111
#include <ATen/Tensor.h> // @manual
12+
#include <c10/util/irange.h>
1213
#include <executorch/runtime/platform/assert.h>
1314

1415
namespace executorch {
@@ -41,7 +42,7 @@ bool tensor_has_valid_dim_order(at::Tensor t) {
4142

4243
if (!validate_dim_order(dim_order, t.dim())) {
4344
ET_LOG(Error, "Tensor dim order is not valid:");
44-
for (size_t d = 0; d < t.dim(); ++d) {
45+
for (const auto d : c10::irange(t.dim())) {
4546
ET_LOG(
4647
Error,
4748
" dim_order(%zu): %zu",
@@ -66,7 +67,7 @@ inline bool tensor_is_default_or_channels_last_dim_order(at::Tensor t) {
6667
ET_LOG(
6768
Error,
6869
"Expected tensor to have default or channels last dim order, but got");
69-
for (size_t d = 0; d < t.dim(); ++d) {
70+
for (const auto d : c10::irange(t.dim())) {
7071
ET_LOG(
7172
Error,
7273
" dim_order(%zu): %zu",
@@ -96,7 +97,7 @@ bool tensors_have_same_dim_order(
9697
bool all_channels_last =
9798
is_channels_last_dim_order(first_dim_order, tensor_list[0].dim());
9899

99-
for (size_t i = 1; i < tensor_list.size(); ++i) {
100+
for (const auto i : c10::irange(1, tensor_list.size())) {
100101
ET_CHECK_OR_RETURN_FALSE(
101102
get_dim_order(tensor_list[i], other_dim_order, tensor_list[i].dim()) ==
102103
Error::Ok,

0 commit comments

Comments
 (0)