28
28
29
29
class OperatorsSupportedForCoreMLBackend (OperatorSupportBase ):
30
30
def __init__ (
31
- self , skip_ops_for_coreml_delegation : Optional [List [str ]] = None
31
+ self ,
32
+ skip_ops_for_coreml_delegation : Optional [List [str ]] = None ,
33
+ lower_full_graph : bool = False ,
32
34
) -> None :
33
35
if skip_ops_for_coreml_delegation is None :
34
36
skip_ops_for_coreml_delegation = []
35
37
super ().__init__ ()
36
38
self .skip_ops_for_coreml_delegation = skip_ops_for_coreml_delegation
39
+ self .lower_full_graph = lower_full_graph
40
+ self ._logged_msgs = set ()
41
+
42
+ def log_once (self , msg : str ) -> None :
43
+ if msg not in self ._logged_msgs :
44
+ logging .info (msg )
45
+ self ._logged_msgs .add (msg )
37
46
38
47
def is_node_supported (self , submodules , node : torch .fx .Node ) -> bool :
39
48
# get_attr node can always be supported on any backend
@@ -44,14 +53,63 @@ def is_node_supported(self, submodules, node: torch.fx.Node) -> bool:
44
53
# skip ops if specified by user
45
54
node_target_name = getattr (node .target , "__name__" , "" ).lower ()
46
55
if node_target_name in (self .skip_ops_for_coreml_delegation or []):
56
+ self .log_once (
57
+ "Skipping op for CoreML delegation because it is in skip_ops_for_coreml_delegation: "
58
+ + node_target_name
59
+ )
60
+ assert (
61
+ not self .lower_full_graph
62
+ ), "Cannot have skip_ops_for_coreml_delegation when lower_full_graph is True"
47
63
return False
64
+
65
+ # TODO: enable this after bugs in ExecuTorch's partitioner are fixed
66
+ # # If lower_full_graph=False, do not partition nodes with symbolic args because it can result in symbolic args
67
+ # # in the placeholders due to partitioning, which CoreML does not support
68
+ # if not self.lower_full_graph and any(
69
+ # isinstance(arg, torch.fx.Node)
70
+ # and isinstance(
71
+ # arg.meta.get("val", None),
72
+ # (torch.SymInt, torch.SymBool, torch.SymFloat),
73
+ # )
74
+ # for arg in node.args
75
+ # ):
76
+ # self.log_once(
77
+ # "Skipping op for CoreML delegation because it contains symbolic args: "
78
+ # + node_target_name
79
+ # )
80
+ # assert not self.lower_full_graph
81
+ # return False
82
+
48
83
# query coremltools to see if node is supported
49
- return ct .converters .mil .frontend .torch .is_torch_fx_node_supported (node )
84
+ is_supported = ct .converters .mil .frontend .torch .is_torch_fx_node_supported (
85
+ node
86
+ )
87
+ if not is_supported :
88
+ if self .lower_full_graph :
89
+ raise NotImplementedError (
90
+ f"""CoreML does not support the op { node_target_name } , but you have set lower_full_graph=True in the CoreMLPartitioner.
91
+
92
+ Please set lower_full_graph=False in the CoreMLPartitioner to allow running unsupported ops outside of CoreML. Note that setting lower_full_graph=False may affect performance of CoreML and the available features.
93
+ As an alternative to setting lower_full_graph=False, you can try rewriting your model to avoid using this op.
94
+
95
+ Also consider filing an issue with Apple's coremltools repo to request support for the op: https://github.com/apple/coremltools/issues
96
+ Do not file an issue with ExecuTorch for op support.
97
+ """
98
+ )
99
+ self .log_once (
100
+ "Skipping op for CoreML delegation because it is not supported by CoreML: "
101
+ + node_target_name
102
+ )
103
+ return is_supported
50
104
# cowardly refuse to support all other types of node:
51
105
# 1. placeholder / output nodes should not be tagged
52
106
# reference: https://github.com/pytorch/executorch/pull/1398
53
107
# 2. call_module / call_method should have been replaced with call_function?
54
108
else :
109
+ self .log_once (
110
+ "Skipping op for CoreML delegation because it is not get_attr or call_function: "
111
+ + node .op
112
+ )
55
113
return False
56
114
57
115
@@ -62,6 +120,8 @@ def __init__(
62
120
skip_ops_for_coreml_delegation : Optional [List [str ]] = None ,
63
121
compile_specs : Optional [List [CompileSpec ]] = None ,
64
122
take_over_mutable_buffer : Optional [bool ] = True ,
123
+ lower_full_graph : bool = False ,
124
+ take_over_constant_data : bool = True ,
65
125
) -> None :
66
126
if skip_ops_for_coreml_delegation is None :
67
127
skip_ops_for_coreml_delegation = []
@@ -71,6 +131,20 @@ def __init__(
71
131
compile_specs = compile_specs if compile_specs is not None else [],
72
132
)
73
133
self .take_over_mutable_buffer = take_over_mutable_buffer
134
+ self .lower_full_graph = lower_full_graph
135
+ self .take_over_constant_data = take_over_constant_data
136
+ self ._logged_msgs = set ()
137
+
138
+ if self .lower_full_graph :
139
+ assert (
140
+ len (self .skip_ops_for_coreml_delegation ) == 0
141
+ ), "When lower_full_graph=True, you cannot set skip_ops_for_coreml_delegation"
142
+ assert (
143
+ self .take_over_constant_data
144
+ ), "When lower_full_graph=True, you must set take_over_constant_data=True"
145
+ assert (
146
+ self .take_over_mutable_buffer
147
+ ), "When lower_full_graph=True, you must set take_over_mutable_buffer=True"
74
148
75
149
def partition (self , exported_program : ExportedProgram ) -> PartitionResult :
76
150
# Run the CapabilityBasedPartitioner to return the largest possible
@@ -80,7 +154,9 @@ def partition(self, exported_program: ExportedProgram) -> PartitionResult:
80
154
81
155
capability_partitioner = CapabilityBasedPartitioner (
82
156
exported_program .graph_module ,
83
- OperatorsSupportedForCoreMLBackend (self .skip_ops_for_coreml_delegation ),
157
+ OperatorsSupportedForCoreMLBackend (
158
+ self .skip_ops_for_coreml_delegation , self .lower_full_graph
159
+ ),
84
160
allows_single_node_partition = True ,
85
161
)
86
162
partition_list = capability_partitioner .propose_partitions ()
@@ -90,7 +166,8 @@ def partition(self, exported_program: ExportedProgram) -> PartitionResult:
90
166
node .meta ["delegation_tag" ] = tag
91
167
partition_tags [tag ] = self .delegation_spec
92
168
93
- tag_constant_data (exported_program )
169
+ if self .take_over_constant_data :
170
+ tag_constant_data (exported_program )
94
171
if self .take_over_mutable_buffer :
95
172
logger .info (
96
173
"Core ML partitioner will take over torch mutable buffer as Core ML state, "
@@ -105,12 +182,18 @@ def partition(self, exported_program: ExportedProgram) -> PartitionResult:
105
182
tagged_exported_program = exported_program , partition_tags = partition_tags
106
183
)
107
184
185
+ def log_once (self , msg : str ) -> None :
186
+ if msg not in self ._logged_msgs :
187
+ logging .info (msg )
188
+ self ._logged_msgs .add (msg )
189
+
108
190
def ops_to_not_decompose (
109
191
self , ep : ExportedProgram
110
192
) -> Tuple [List [torch ._ops .OpOverload ], Optional [Callable [[torch .fx .Node ], bool ]]]:
111
193
do_not_decompose = []
112
- op_support = OperatorsSupportedForCoreMLBackend ()
113
- _logged_warnings = set ()
194
+ op_support = OperatorsSupportedForCoreMLBackend (
195
+ self .skip_ops_for_coreml_delegation , self .lower_full_graph
196
+ )
114
197
115
198
# CoreML prevents certain ops (like triu) from lowering to CoreML when put in the ExecuTorch op namespace
116
199
# TODO: upstream fixes, but pending ET consuming a new published version of coremltools with the
@@ -134,9 +217,7 @@ def ops_to_not_decompose(
134
217
except Exception as e :
135
218
# CoreML's op_support.is_node_supported will sometimes throw
136
219
# for unsupported ops, rather than returning False
137
- warn_str = f"Encountered exception when checking node support: { e } "
138
- if warn_str not in _logged_warnings :
139
- logger .warning (warn_str )
140
- _logged_warnings .add (warn_str )
141
-
220
+ self .log_once (
221
+ f"Encountered exception when checking node support, treating node as unsupported: { e } "
222
+ )
142
223
return do_not_decompose , None
0 commit comments