Skip to content

Commit f7cc72f

Browse files
Revert D73564127
Differential Revision: D76493425 Pull Request resolved: #11606
1 parent 30582ab commit f7cc72f

File tree

5 files changed

+122
-96
lines changed

5 files changed

+122
-96
lines changed

CMakeLists.txt

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -583,14 +583,6 @@ if(EXECUTORCH_BUILD_PYBIND)
583583
torch
584584
)
585585

586-
if(EXECUTORCH_BUILD_EXTENSION_MODULE)
587-
if(CMAKE_TOOLCHAIN_IOS OR CMAKE_TOOLCHAIN_ANDROID OR APPLE)
588-
list(APPEND _dep_libs extension_module_static)
589-
else()
590-
list(APPEND _dep_libs extension_module)
591-
endif()
592-
endif()
593-
594586
if(EXECUTORCH_BUILD_TESTS)
595587
list(APPEND _dep_libs test_backend_compiler_lib)
596588
endif()

devtools/bundled_program/test/test_end2end.py

Lines changed: 30 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,21 @@
55
# LICENSE file in the root directory of this source tree.
66

77
# flake8: noqa: F401
8+
import functools
9+
import inspect
10+
import os
11+
import random
812
import unittest
13+
from typing import Callable, Dict, Optional, Tuple, Type
14+
15+
import executorch.exir as exir
16+
17+
import executorch.exir.control_flow as control_flow
18+
19+
# @manual=//executorch/extension/pytree:pybindings
20+
import executorch.extension.pytree as pytree
21+
22+
import torch
923

1024
from executorch.devtools.bundled_program.core import BundledProgram
1125
from executorch.devtools.bundled_program.serialize import (
@@ -21,6 +35,8 @@
2135
try:
2236
from executorch.extension.pybindings.portable_lib import (
2337
_load_bundled_program_from_buffer,
38+
_load_for_executorch_from_buffer,
39+
_load_for_executorch_from_bundled_program,
2440
)
2541

2642
kernel_mode = "lean"
@@ -31,6 +47,8 @@
3147
try:
3248
from executorch.extension.pybindings.aten_lib import ( # @manual=//executorch/extension/pybindings:aten_lib
3349
_load_bundled_program_from_buffer,
50+
_load_for_executorch_from_buffer,
51+
_load_for_executorch_from_bundled_program,
3452
)
3553

3654
assert kernel_mode is None
@@ -57,8 +75,19 @@ def test_sample_model_e2e(self):
5775
bundled_program_buffer
5876
)
5977

78+
executorch_module = _load_for_executorch_from_bundled_program(
79+
executorch_bundled_program
80+
)
81+
6082
for method_name in eager_model.method_names:
61-
executorch_bundled_program.verify_result_with_bundled_expected_output(
83+
executorch_module.load_bundled_input(
84+
executorch_bundled_program,
85+
method_name,
86+
0,
87+
)
88+
executorch_module.plan_execute(method_name)
89+
executorch_module.verify_result_with_bundled_expected_output(
90+
executorch_bundled_program,
6291
method_name,
6392
0,
6493
)

extension/pybindings/README.md

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,8 @@ CMAKE_ARGS="-DEXECUTORCH_BUILD_MPS=ON" ./install_executorch.sh
2727
- `_reset_profile_results()`: Reset profile results.
2828
## Classes
2929
### ExecuTorchModule
30+
- `load_bundled_input()`: Load bundled input.
31+
- `verify_result_with_bundled_expected_output(bundle: str, method_name: str, testset_idx: int, rtol: float = 1e-5, atol: float = 1e-8)`: Verify result with bundled expected output.
3032
- `plan_execute()`: Plan and execute.
3133
- `run_method()`: Run method.
3234
- `forward()`: Forward. This takes a pytree-flattend PyTorch-tensor-based input.
@@ -35,6 +37,5 @@ CMAKE_ARGS="-DEXECUTORCH_BUILD_MPS=ON" ./install_executorch.sh
3537
- `__call__()`: Call method.
3638
### BundledModule
3739
This class is currently empty and serves as a placeholder for future methods and attributes.
38-
- `verify_result_with_bundled_expected_output(method_name: str, testset_idx: int, rtol: float = 1e-5, atol: float = 1e-8)`: Verify result with bundled expected output.
3940
## Note
4041
All functions and methods are guarded by a call guard that redirects `cout` and `cerr` to the Python environment.

extension/pybindings/pybindings.cpp

Lines changed: 90 additions & 82 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@
2323
#include <executorch/extension/data_loader/buffer_data_loader.h>
2424
#include <executorch/extension/data_loader/mmap_data_loader.h>
2525
#include <executorch/extension/memory_allocator/malloc_memory_allocator.h>
26-
#include <executorch/extension/module/bundled_module.h>
2726
#include <executorch/extension/threadpool/threadpool.h>
2827
#include <executorch/runtime/backend/interface.h>
2928
#include <executorch/runtime/core/data_loader.h>
@@ -97,7 +96,6 @@ using ::executorch::ET_RUNTIME_NAMESPACE::Program;
9796
using ::executorch::extension::BufferDataLoader;
9897
using ::executorch::extension::MallocMemoryAllocator;
9998
using ::executorch::extension::MmapDataLoader;
100-
using ::executorch::extension::ET_BUNDLED_MODULE_NAMESPACE::BundledModule;
10199
using ::executorch::runtime::ArrayRef;
102100
using ::executorch::runtime::DataLoader;
103101
using ::executorch::runtime::Error;
@@ -442,54 +440,13 @@ inline std::unique_ptr<Module> load_module_from_file(
442440
program_verification);
443441
}
444442

445-
inline py::list get_outputs_as_py_list(
446-
const std::vector<EValue>& outputs,
447-
bool clone_outputs = true) {
448-
const auto outputs_size = outputs.size();
449-
py::list list(outputs_size);
450-
for (size_t i = 0; i < outputs_size; ++i) {
451-
auto& v = outputs[i];
452-
if (Tag::None == v.tag) {
453-
list[i] = py::none();
454-
} else if (Tag::Int == v.tag) {
455-
list[i] = py::cast(v.toInt());
456-
} else if (Tag::Double == v.tag) {
457-
list[i] = py::cast(v.toDouble());
458-
} else if (Tag::Bool == v.tag) {
459-
list[i] = py::cast(v.toBool());
460-
} else if (Tag::String == v.tag) {
461-
list[i] = py::cast(std::string(v.toString().data()));
462-
} else if (Tag::Tensor == v.tag) {
463-
#ifdef USE_ATEN_LIB
464-
// Clone so the outputs in python do not share a lifetime with the
465-
// module object
466-
if (clone_outputs) {
467-
list[i] = py::cast(v.toTensor().clone());
468-
} else {
469-
list[i] = py::cast(v.toTensor());
470-
}
471-
#else
472-
if (clone_outputs) {
473-
list[i] = py::cast(alias_attensor_to_etensor(v.toTensor()).clone());
474-
} else {
475-
list[i] = py::cast(alias_attensor_to_etensor(v.toTensor()));
476-
}
477-
#endif
478-
} else {
479-
ET_ASSERT_UNREACHABLE_MSG("Invalid model output type");
480-
}
481-
}
482-
return list;
483-
}
484-
485443
static constexpr size_t kDEFAULT_BUNDLED_INPUT_POOL_SIZE = 16 * 1024U;
486444

487-
struct PyBundledModule : public BundledModule {
445+
struct PyBundledModule final {
488446
explicit PyBundledModule(
489447
const py::bytes& buffer,
490448
uint32_t bundled_input_pool_size)
491-
: BundledModule(buffer.cast<std::string_view>().data()),
492-
bundled_program_ptr_(buffer),
449+
: bundled_program_ptr_(buffer),
493450
program_ptr_(static_cast<const void*>(
494451
bundled_program_flatbuffer::GetBundledProgram(
495452
get_bundled_program_ptr())
@@ -518,33 +475,6 @@ struct PyBundledModule : public BundledModule {
518475
return program_len_;
519476
}
520477

521-
py::list verify_result_with_bundled_expected_output(
522-
const std::string& method_name,
523-
size_t testset_idx,
524-
double rtol = 1e-5,
525-
double atol = 1e-8) {
526-
// Execute the method
527-
auto result = BundledModule::execute(method_name, testset_idx);
528-
if (!result.ok()) {
529-
THROW_IF_ERROR(
530-
result.error(),
531-
"Method execution failed with status 0x%" PRIx32,
532-
static_cast<uint32_t>(result.error()));
533-
}
534-
535-
// Convert outputs to py::list
536-
const auto& outputs = result.get();
537-
py::list py_outputs = get_outputs_as_py_list(outputs);
538-
539-
Error status = BundledModule::verify_method_outputs(
540-
method_name, testset_idx, rtol, atol);
541-
THROW_IF_ERROR(
542-
status,
543-
"Result verification failed with status %" PRIu32,
544-
static_cast<uint32_t>(status));
545-
return py_outputs;
546-
}
547-
548478
private:
549479
// Store the bytes object instead of a raw pointer so that this module will
550480
// keep the bytes alive.
@@ -901,6 +831,43 @@ struct PyModule final {
901831
}
902832
}
903833

834+
void load_bundled_input(
835+
PyBundledModule& m,
836+
const std::string method_name,
837+
size_t testset_idx) {
838+
const void* bundled_program_ptr = m.get_bundled_program_ptr();
839+
Error status = executorch::BUNDLED_PROGRAM_NAMESPACE::load_bundled_input(
840+
module_->get_method(method_name), bundled_program_ptr, testset_idx);
841+
THROW_IF_ERROR(
842+
status,
843+
"load_bundled_input failed with status 0x%" PRIx32,
844+
static_cast<uint32_t>(status));
845+
}
846+
847+
py::list verify_result_with_bundled_expected_output(
848+
PyBundledModule& m,
849+
const std::string method_name,
850+
size_t testset_idx,
851+
double rtol = 1e-5,
852+
double atol = 1e-8) {
853+
const void* bundled_program_ptr = m.get_bundled_program_ptr();
854+
auto& method = module_->get_method(method_name);
855+
Error status = executorch::BUNDLED_PROGRAM_NAMESPACE::load_bundled_input(
856+
method, bundled_program_ptr, testset_idx);
857+
THROW_IF_ERROR(
858+
status,
859+
"load_bundled_input failed with status 0x%" PRIx32,
860+
static_cast<uint32_t>(status));
861+
py::list outputs = plan_execute(method_name);
862+
status = executorch::BUNDLED_PROGRAM_NAMESPACE::verify_method_outputs(
863+
method, bundled_program_ptr, testset_idx, rtol, atol);
864+
THROW_IF_ERROR(
865+
status,
866+
"Result verification failed with status %" PRIu32,
867+
static_cast<uint32_t>(status));
868+
return outputs;
869+
}
870+
904871
py::list plan_execute(
905872
const std::string method_name,
906873
bool clone_outputs = true) {
@@ -923,6 +890,46 @@ struct PyModule final {
923890
return get_outputs_as_py_list(outputs, clone_outputs);
924891
}
925892

893+
py::list get_outputs_as_py_list(
894+
const std::vector<EValue>& outputs,
895+
bool clone_outputs = true) {
896+
const auto outputs_size = outputs.size();
897+
py::list list(outputs_size);
898+
for (size_t i = 0; i < outputs_size; ++i) {
899+
auto& v = outputs[i];
900+
if (Tag::None == v.tag) {
901+
list[i] = py::none();
902+
} else if (Tag::Int == v.tag) {
903+
list[i] = py::cast(v.toInt());
904+
} else if (Tag::Double == v.tag) {
905+
list[i] = py::cast(v.toDouble());
906+
} else if (Tag::Bool == v.tag) {
907+
list[i] = py::cast(v.toBool());
908+
} else if (Tag::String == v.tag) {
909+
list[i] = py::cast(std::string(v.toString().data()));
910+
} else if (Tag::Tensor == v.tag) {
911+
#ifdef USE_ATEN_LIB
912+
// Clone so the outputs in python do not share a lifetime with the
913+
// module object
914+
if (clone_outputs) {
915+
list[i] = py::cast(v.toTensor().clone());
916+
} else {
917+
list[i] = py::cast(v.toTensor());
918+
}
919+
#else
920+
if (clone_outputs) {
921+
list[i] = py::cast(alias_attensor_to_etensor(v.toTensor()).clone());
922+
} else {
923+
list[i] = py::cast(alias_attensor_to_etensor(v.toTensor()));
924+
}
925+
#endif
926+
} else {
927+
ET_ASSERT_UNREACHABLE_MSG("Invalid model output type");
928+
}
929+
}
930+
return list;
931+
}
932+
926933
std::unique_ptr<PyMethodMeta> method_meta(const std::string method_name) {
927934
auto& method = module_->get_method(method_name);
928935
return std::make_unique<PyMethodMeta>(module_, method.method_meta());
@@ -1082,6 +1089,16 @@ PYBIND11_MODULE(EXECUTORCH_PYTHON_MODULE_NAME, m) {
10821089
call_guard);
10831090

10841091
py::class_<PyModule>(m, "ExecuTorchModule")
1092+
.def("load_bundled_input", &PyModule::load_bundled_input, call_guard)
1093+
.def(
1094+
"verify_result_with_bundled_expected_output",
1095+
&PyModule::verify_result_with_bundled_expected_output,
1096+
py::arg("bundle"),
1097+
py::arg("method_name"),
1098+
py::arg("testset_idx"),
1099+
py::arg("rtol") = 1e-5,
1100+
py::arg("atol") = 1e-8,
1101+
call_guard)
10851102
.def(
10861103
"plan_execute",
10871104
&PyModule::plan_execute,
@@ -1127,16 +1144,7 @@ PYBIND11_MODULE(EXECUTORCH_PYTHON_MODULE_NAME, m) {
11271144
py::arg("clone_outputs") = true,
11281145
call_guard);
11291146

1130-
py::class_<PyBundledModule>(m, "BundledModule")
1131-
.def(
1132-
"verify_result_with_bundled_expected_output",
1133-
&PyBundledModule::verify_result_with_bundled_expected_output,
1134-
py::arg("method_name"),
1135-
py::arg("testset_idx"),
1136-
py::arg("rtol") = 1e-5,
1137-
py::arg("atol") = 1e-8,
1138-
call_guard);
1139-
1147+
py::class_<PyBundledModule>(m, "BundledModule");
11401148
py::class_<PyTensorInfo>(m, "TensorInfo")
11411149
.def("sizes", &PyTensorInfo::sizes, call_guard)
11421150
.def("dtype", &PyTensorInfo::dtype, call_guard)

shim_et/xplat/executorch/extension/pybindings/pybindings.bzl

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,6 @@ PORTABLE_MODULE_DEPS = [
1616
"//executorch/extension/data_loader:buffer_data_loader",
1717
"//executorch/extension/data_loader:mmap_data_loader",
1818
"//executorch/extension/memory_allocator:malloc_memory_allocator",
19-
"//executorch/extension/module:module",
20-
"//executorch/extension/module:bundled_module",
2119
"//executorch/runtime/executor/test:test_backend_compiler_lib",
2220
"//executorch/devtools/etdump:etdump_flatcc",
2321
] + get_all_cpu_backend_targets()
@@ -30,8 +28,6 @@ ATEN_MODULE_DEPS = [
3028
"//executorch/extension/data_loader:buffer_data_loader",
3129
"//executorch/extension/data_loader:mmap_data_loader",
3230
"//executorch/extension/memory_allocator:malloc_memory_allocator",
33-
"//executorch/extension/module:module_aten",
34-
"//executorch/extension/module:bundled_module_aten",
3531
"//executorch/devtools/bundled_program:runtime_aten",
3632
"//executorch/runtime/executor/test:test_backend_compiler_lib_aten",
3733
"//executorch/devtools/etdump:etdump_flatcc",

0 commit comments

Comments
 (0)