Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 4 additions & 6 deletions backends/arm/test/ops/test_mean_dim.py
Original file line number Diff line number Diff line change
Expand Up @@ -356,14 +356,12 @@ def forward(self, tensor: torch.Tensor, keepdim: bool):
return tensor.mean()

test_data_suite: dict[str, Callable[[], mean_input_t]] = {
"rank1": lambda: (
torch.rand(
1,
),
"rank_2": lambda: (
torch.rand(1, 2),
False,
),
"rank2": lambda: (torch.rand(5, 5), True),
"rank4": lambda: (torch.rand(5, 1, 10, 1), False),
"rank_2_keepdim": lambda: (torch.rand(5, 5), True),
"rank_4": lambda: (torch.rand(5, 1, 10, 1), False),
}


Expand Down
6 changes: 3 additions & 3 deletions backends/arm/test/ops/test_sum.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ def test_sum_dim_intlist_tosa_INT(test_data: input_t1):

@common.parametrize("test_data", Sum.test_parameters)
@common.XfailIfNoCorstone300
def test_view_u55_INT_1_0(test_data: Tuple):
def test_sum_u55_INT_1_0(test_data: Tuple):
pipeline = EthosU55PipelineINT[input_t1](
Sum(),
test_data(),
Expand All @@ -78,7 +78,7 @@ def test_view_u55_INT_1_0(test_data: Tuple):

@common.parametrize("test_data", Sum.test_parameters)
@common.XfailIfNoCorstone320
def test_view_u85_INT_1_0(test_data: Tuple):
def test_sum_u85_INT_1_0(test_data: Tuple):
pipeline = EthosU85PipelineINT[input_t1](
Sum(),
test_data(),
Expand Down Expand Up @@ -122,7 +122,7 @@ def test_sum_dim_intlist_vgf_INT(test_data: input_t1):


@common.parametrize("test_data", reject_inputs)
def test_view_u55_INT_failure_set(test_data: Tuple):
def test_sum_u55_INT_failure_set(test_data: Tuple):
pipeline = EthosU55PipelineINT[input_t1](
Sum(),
test_data(),
Expand Down
5 changes: 4 additions & 1 deletion backends/arm/test/ops/test_unflatten.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,10 @@ def __init__(self, dim: int, sizes: Tuple[int, ...]):
self.sizes = sizes

def forward(self, x: torch.Tensor) -> torch.Tensor:
return torch.unflatten(x, self.dim, self.sizes)
unflatten_op = torch.unflatten(x, self.dim, self.sizes)
# Because we treat a single view as a no compute operation and therefore do not partition it,
# we want to provide a mul op to verify that it does indeed get partitioned when bundled with another op.
return unflatten_op * unflatten_op

test_data: dict[str, test_data_t] = {
"rand_3d_batch3": (lambda: (Unflatten(1, (-1, 2)), (torch.rand(3, 4, 4),))),
Expand Down
7 changes: 5 additions & 2 deletions backends/arm/test/ops/test_view.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,10 @@ def __init__(self, new_shape):
self.new_shape = new_shape

def forward(self, x: torch.Tensor):
return x.view(self.new_shape)
view_op = x.view(self.new_shape)
# Because we treat a single view as a no compute operation and therefore do not partition it,
# we want to provide a mul op to verify that it does indeed get partitioned when bundled with another op.
return view_op * view_op


@common.parametrize("test_data", View.needs_transpose_tests)
Expand Down Expand Up @@ -139,7 +142,7 @@ def test_view_u55_INT_not_delegated(test_data: Tuple):
View(new_shape),
(test_tensor,),
{"executorch_exir_dialects_edge__ops_aten_view_copy": 1},
n_expected_delegates=0,
n_expected_delegates=1,
quantize=True,
u55_subset=True,
)
Expand Down
22 changes: 20 additions & 2 deletions backends/arm/tosa/partitioner.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,22 @@ def is_noop_expand(node: torch.fx.node.Node) -> bool:
return all(m == 1 for m in multiples) and not changes_rank


def is_view_copy(node: torch.fx.node.Node) -> bool:
"""Return True if node is a ``view_copy``.

view_copy can be regarded as a no-compute op.

Args:
node (torch.fx.Node): FX node to inspect.

Returns:
bool: True if the node targets ``aten.view_copy.default``; otherwise,
False.

"""
return node.target == exir_ops.edge.aten.view_copy.default


def is_partitioned(
node: torch.fx.Node,
tag: str,
Expand Down Expand Up @@ -267,16 +283,18 @@ def _tag_module( # noqa
del node.meta["delegation_tag"]
break

is_noop_partition = all(
# Check whether the partition contains only no-op or non-computational ops. Such partitions don't make sense to delegate, and in the worst case may be optimized away during lowering, which can break compilation."
is_nocompute_partition = all(
is_noop_clone(node)
or is_noop_alias_copy(node)
or is_noop_expand(node)
or is_noop_to_dim_order_copy(node)
or is_view_copy(node)
or node.target in Q_OPS
or node.target in DQ_OPS
for node in partition.nodes
)
if is_noop_partition:
if is_nocompute_partition:
reject_partition(
"Partition contained only ops which are removed in the TOSA lowering, leading to an empty partition.",
partition,
Expand Down