-
Notifications
You must be signed in to change notification settings - Fork 13.6k
AMDGPU: Handle vectors in copysign sign type combine #142157
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
arsenm
merged 1 commit into
main
from
users/arsenm/amdgpu/handle-vectors-fcopysign-combine-sign
May 30, 2025
Merged
AMDGPU: Handle vectors in copysign sign type combine #142157
arsenm
merged 1 commit into
main
from
users/arsenm/amdgpu/handle-vectors-fcopysign-combine-sign
May 30, 2025
Conversation
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This was referenced May 30, 2025
This was referenced May 30, 2025
@llvm/pr-subscribers-backend-amdgpu Author: Matt Arsenault (arsenm) ChangesThis avoids some ugly codegen on pre-16-bit instruction targets now Full diff: https://github.com/llvm/llvm-project/pull/142157.diff 3 Files Affected:
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index af85c6bef273d..c61c52ec5843e 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -11737,9 +11737,10 @@ SDValue SITargetLowering::performFCopySignCombine(SDNode *N,
// lower half with a copy.
// fcopysign f64:x, _:y -> x.lo32, (fcopysign (f32 x.hi32), _:y)
EVT MagVT = MagnitudeOp.getValueType();
- if (MagVT.getScalarType() == MVT::f64) {
- unsigned NumElts = MagVT.isVector() ? MagVT.getVectorNumElements() : 1;
+ unsigned NumElts = MagVT.isVector() ? MagVT.getVectorNumElements() : 1;
+
+ if (MagVT.getScalarType() == MVT::f64) {
EVT F32VT = MagVT.isVector()
? EVT::getVectorVT(*DAG.getContext(), MVT::f32, 2 * NumElts)
: MVT::v2f32;
@@ -11777,7 +11778,7 @@ SDValue SITargetLowering::performFCopySignCombine(SDNode *N,
return DAG.getNode(ISD::BUILD_VECTOR, DL, MagVT, NewElts);
}
- if (SignVT != MVT::f64)
+ if (SignVT.getScalarType() != MVT::f64)
return SDValue();
// Reduce width of sign operand, we only need the highest bit.
@@ -11785,13 +11786,31 @@ SDValue SITargetLowering::performFCopySignCombine(SDNode *N,
// fcopysign f64:x, f64:y ->
// fcopysign f64:x, (extract_vector_elt (bitcast f64:y to v2f32), 1)
// TODO: In some cases it might make sense to go all the way to f16.
- SDValue SignAsVector = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32, SignOp);
- SDValue SignAsF32 =
- DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, SignAsVector,
- DAG.getConstant(1, DL, MVT::i32));
+
+ EVT F32VT = MagVT.isVector()
+ ? EVT::getVectorVT(*DAG.getContext(), MVT::f32, 2 * NumElts)
+ : MVT::v2f32;
+
+ SDValue SignAsVector = DAG.getNode(ISD::BITCAST, DL, F32VT, SignOp);
+
+ SmallVector<SDValue, 8> F32Signs;
+ for (unsigned I = 0; I != NumElts; ++I) {
+ // Take sign from odd elements of cast vector
+ SDValue SignAsF32 =
+ DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, SignAsVector,
+ DAG.getConstant(2 * I + 1, DL, MVT::i32));
+ F32Signs.push_back(SignAsF32);
+ }
+
+ SDValue NewSign =
+ NumElts == 1
+ ? F32Signs.back()
+ : DAG.getNode(ISD::BUILD_VECTOR, DL,
+ EVT::getVectorVT(*DAG.getContext(), MVT::f32, NumElts),
+ F32Signs);
return DAG.getNode(ISD::FCOPYSIGN, DL, N->getValueType(0), N->getOperand(0),
- SignAsF32);
+ NewSign);
}
// (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2)
diff --git a/llvm/test/CodeGen/AMDGPU/fcopysign.bf16.ll b/llvm/test/CodeGen/AMDGPU/fcopysign.bf16.ll
index 90a368885bfdc..45bf0770ad924 100644
--- a/llvm/test/CodeGen/AMDGPU/fcopysign.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fcopysign.bf16.ll
@@ -4677,37 +4677,33 @@ define <2 x bfloat> @v_copysign_out_v2bf16_mag_v2bf16_sign_v2f64(<2 x bfloat> %m
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
-; GCN-NEXT: v_cvt_f32_f64_e32 v2, v[2:3]
-; GCN-NEXT: v_cvt_f32_f64_e32 v3, v[4:5]
+; GCN-NEXT: v_and_b32_e32 v2, 0x80000000, v5
+; GCN-NEXT: v_and_b32_e32 v3, 0x80000000, v3
; GCN-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GCN-NEXT: v_lshrrev_b32_e32 v3, 16, v3
; GCN-NEXT: v_bfe_u32 v1, v1, 16, 15
+; GCN-NEXT: v_lshrrev_b32_e32 v3, 16, v3
; GCN-NEXT: v_bfe_u32 v0, v0, 16, 15
-; GCN-NEXT: v_and_b32_e32 v3, 0x8000, v3
-; GCN-NEXT: v_and_b32_e32 v2, 0x8000, v2
-; GCN-NEXT: v_or_b32_e32 v1, v1, v3
-; GCN-NEXT: v_or_b32_e32 v0, v0, v2
-; GCN-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GCN-NEXT: v_or_b32_e32 v1, v1, v2
+; GCN-NEXT: v_or_b32_e32 v0, v0, v3
; GCN-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GCN-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GCN-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: v_copysign_out_v2bf16_mag_v2bf16_sign_v2f64:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX7-NEXT: v_cvt_f32_f64_e32 v2, v[2:3]
-; GFX7-NEXT: v_cvt_f32_f64_e32 v3, v[4:5]
-; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GFX7-NEXT: v_and_b32_e32 v2, 0x80000000, v5
; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; GFX7-NEXT: v_and_b32_e32 v3, 0x8000, v3
; GFX7-NEXT: v_bfe_u32 v1, v1, 16, 15
-; GFX7-NEXT: v_and_b32_e32 v2, 0x8000, v2
+; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GFX7-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX7-NEXT: v_and_b32_e32 v2, 0x80000000, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v2
; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 15
-; GFX7-NEXT: v_or_b32_e32 v1, v1, v3
; GFX7-NEXT: v_or_b32_e32 v0, v0, v2
-; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_copysign_out_v2bf16_mag_v2bf16_sign_v2f64:
@@ -5585,16 +5581,14 @@ define amdgpu_ps i32 @s_copysign_out_v2bf16_mag_v2bf16_sign_v2f64(<2 x bfloat> i
; GCN: ; %bb.0:
; GCN-NEXT: v_mul_f32_e64 v0, 1.0, s1
; GCN-NEXT: v_mul_f32_e64 v1, 1.0, s0
-; GCN-NEXT: v_cvt_f32_f64_e32 v2, s[4:5]
-; GCN-NEXT: v_cvt_f32_f64_e32 v3, s[2:3]
-; GCN-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GCN-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GCN-NEXT: s_and_b32 s0, s3, 0x80000000
+; GCN-NEXT: s_and_b32 s1, s5, 0x80000000
+; GCN-NEXT: s_lshr_b32 s0, s0, 16
; GCN-NEXT: v_bfe_u32 v1, v1, 16, 15
+; GCN-NEXT: s_lshr_b32 s1, s1, 16
; GCN-NEXT: v_bfe_u32 v0, v0, 16, 15
-; GCN-NEXT: v_and_b32_e32 v3, 0x8000, v3
-; GCN-NEXT: v_and_b32_e32 v2, 0x8000, v2
-; GCN-NEXT: v_or_b32_e32 v1, v1, v3
-; GCN-NEXT: v_or_b32_e32 v0, v0, v2
+; GCN-NEXT: v_or_b32_e32 v1, s0, v1
+; GCN-NEXT: v_or_b32_e32 v0, s1, v0
; GCN-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GCN-NEXT: v_or_b32_e32 v0, v1, v0
; GCN-NEXT: v_readfirstlane_b32 s0, v0
@@ -5602,18 +5596,16 @@ define amdgpu_ps i32 @s_copysign_out_v2bf16_mag_v2bf16_sign_v2f64(<2 x bfloat> i
;
; GFX7-LABEL: s_copysign_out_v2bf16_mag_v2bf16_sign_v2f64:
; GFX7: ; %bb.0:
-; GFX7-NEXT: v_cvt_f32_f64_e32 v0, s[4:5]
-; GFX7-NEXT: v_cvt_f32_f64_e32 v1, s[2:3]
-; GFX7-NEXT: v_mul_f32_e64 v2, 1.0, s1
-; GFX7-NEXT: v_mul_f32_e64 v3, 1.0, s0
-; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX7-NEXT: v_and_b32_e32 v0, 0x8000, v0
-; GFX7-NEXT: v_bfe_u32 v2, v2, 16, 15
-; GFX7-NEXT: v_and_b32_e32 v1, 0x8000, v1
-; GFX7-NEXT: v_bfe_u32 v3, v3, 16, 15
-; GFX7-NEXT: v_or_b32_e32 v0, v2, v0
-; GFX7-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX7-NEXT: v_mul_f32_e64 v1, 1.0, s0
+; GFX7-NEXT: s_and_b32 s0, s3, 0x80000000
+; GFX7-NEXT: s_lshr_b32 s0, s0, 16
+; GFX7-NEXT: v_bfe_u32 v1, v1, 16, 15
+; GFX7-NEXT: v_mul_f32_e64 v0, 1.0, s1
+; GFX7-NEXT: v_or_b32_e32 v1, s0, v1
+; GFX7-NEXT: s_and_b32 s0, s5, 0x80000000
+; GFX7-NEXT: s_lshr_b32 s0, s0, 16
+; GFX7-NEXT: v_bfe_u32 v0, v0, 16, 15
+; GFX7-NEXT: v_or_b32_e32 v0, s0, v0
; GFX7-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX7-NEXT: v_or_b32_e32 v0, v1, v0
; GFX7-NEXT: v_readfirstlane_b32 s0, v0
diff --git a/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll b/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll
index 5534e3140c646..7fba1f9db7397 100644
--- a/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll
@@ -4013,96 +4013,13 @@ define <2 x half> @v_copysign_out_v2f16_mag_v2f16_sign_v2f64(<2 x half> %mag, <2
; SI-LABEL: v_copysign_out_v2f16_mag_v2f16_sign_v2f64:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_and_b32_e32 v7, 0x1ff, v5
-; SI-NEXT: v_or_b32_e32 v4, v7, v4
-; SI-NEXT: v_lshrrev_b32_e32 v6, 8, v5
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
-; SI-NEXT: v_and_b32_e32 v6, 0xffe, v6
-; SI-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
-; SI-NEXT: v_bfe_u32 v7, v5, 20, 11
-; SI-NEXT: s_movk_i32 s4, 0x3f1
-; SI-NEXT: v_or_b32_e32 v4, v6, v4
-; SI-NEXT: v_sub_i32_e32 v8, vcc, s4, v7
-; SI-NEXT: v_or_b32_e32 v6, 0x1000, v4
-; SI-NEXT: v_med3_i32 v8, v8, 0, 13
-; SI-NEXT: v_lshrrev_b32_e32 v9, v8, v6
-; SI-NEXT: v_lshlrev_b32_e32 v8, v8, v9
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, v8, v6
-; SI-NEXT: s_movk_i32 s5, 0xfc10
-; SI-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
-; SI-NEXT: v_add_i32_e32 v7, vcc, s5, v7
-; SI-NEXT: v_lshlrev_b32_e32 v8, 12, v7
-; SI-NEXT: v_or_b32_e32 v6, v9, v6
-; SI-NEXT: v_or_b32_e32 v8, v4, v8
-; SI-NEXT: v_cmp_gt_i32_e32 vcc, 1, v7
-; SI-NEXT: v_cndmask_b32_e32 v6, v8, v6, vcc
-; SI-NEXT: v_and_b32_e32 v8, 7, v6
-; SI-NEXT: v_cmp_lt_i32_e32 vcc, 5, v8
-; SI-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, 3, v8
-; SI-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
-; SI-NEXT: v_or_b32_e32 v8, v8, v9
-; SI-NEXT: v_lshrrev_b32_e32 v6, 2, v6
-; SI-NEXT: v_add_i32_e32 v6, vcc, v6, v8
-; SI-NEXT: v_mov_b32_e32 v8, 0x7c00
-; SI-NEXT: v_cmp_gt_i32_e32 vcc, 31, v7
-; SI-NEXT: v_cndmask_b32_e32 v6, v8, v6, vcc
-; SI-NEXT: v_mov_b32_e32 v9, 0x7e00
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
-; SI-NEXT: s_movk_i32 s6, 0x40f
-; SI-NEXT: v_cndmask_b32_e32 v4, v8, v9, vcc
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, s6, v7
-; SI-NEXT: v_cndmask_b32_e32 v4, v6, v4, vcc
-; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; SI-NEXT: v_and_b32_e32 v6, 0x1ff, v3
-; SI-NEXT: v_and_b32_e32 v5, 0x8000, v5
-; SI-NEXT: v_or_b32_e32 v2, v6, v2
-; SI-NEXT: v_or_b32_e32 v4, v5, v4
-; SI-NEXT: v_lshrrev_b32_e32 v5, 8, v3
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; SI-NEXT: v_and_b32_e32 v5, 0xffe, v5
-; SI-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
-; SI-NEXT: v_bfe_u32 v6, v3, 20, 11
-; SI-NEXT: v_or_b32_e32 v2, v5, v2
-; SI-NEXT: v_sub_i32_e32 v7, vcc, s4, v6
-; SI-NEXT: v_or_b32_e32 v5, 0x1000, v2
-; SI-NEXT: v_med3_i32 v7, v7, 0, 13
-; SI-NEXT: v_lshrrev_b32_e32 v10, v7, v5
-; SI-NEXT: v_lshlrev_b32_e32 v7, v7, v10
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, v7, v5
-; SI-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
-; SI-NEXT: v_add_i32_e32 v6, vcc, s5, v6
-; SI-NEXT: v_lshlrev_b32_e32 v7, 12, v6
-; SI-NEXT: v_or_b32_e32 v5, v10, v5
-; SI-NEXT: v_or_b32_e32 v7, v2, v7
-; SI-NEXT: v_cmp_gt_i32_e32 vcc, 1, v6
-; SI-NEXT: v_cndmask_b32_e32 v5, v7, v5, vcc
-; SI-NEXT: v_and_b32_e32 v7, 7, v5
-; SI-NEXT: v_cmp_lt_i32_e32 vcc, 5, v7
-; SI-NEXT: v_cndmask_b32_e64 v10, 0, 1, vcc
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, 3, v7
-; SI-NEXT: v_cndmask_b32_e64 v7, 0, 1, vcc
-; SI-NEXT: v_or_b32_e32 v7, v7, v10
-; SI-NEXT: v_lshrrev_b32_e32 v5, 2, v5
-; SI-NEXT: v_add_i32_e32 v5, vcc, v5, v7
-; SI-NEXT: v_cmp_gt_i32_e32 vcc, 31, v6
-; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-NEXT: v_cndmask_b32_e32 v5, v8, v5, vcc
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; SI-NEXT: v_cndmask_b32_e32 v2, v8, v9, vcc
-; SI-NEXT: v_cmp_eq_u32_e32 vcc, s6, v6
-; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; SI-NEXT: v_cndmask_b32_e32 v2, v5, v2, vcc
-; SI-NEXT: v_and_b32_e32 v3, 0x8000, v3
-; SI-NEXT: v_or_b32_e32 v2, v3, v2
-; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
-; SI-NEXT: v_cvt_f32_f16_e32 v3, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
; SI-NEXT: s_brev_b32 s4, -2
-; SI-NEXT: v_bfi_b32 v0, s4, v0, v2
-; SI-NEXT: v_bfi_b32 v1, s4, v1, v3
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT: v_bfi_b32 v0, s4, v0, v3
+; SI-NEXT: v_bfi_b32 v1, s4, v1, v5
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: v_copysign_out_v2f16_mag_v2f16_sign_v2f64:
@@ -4900,99 +4817,16 @@ define amdgpu_ps i32 @s_copysign_out_v2f16_mag_v2f16_sign_v2f32(<2 x half> inreg
define amdgpu_ps i32 @s_copysign_out_v2f16_mag_v2f16_sign_v2f64(<2 x half> inreg %mag, <2 x double> inreg %sign) {
; SI-LABEL: s_copysign_out_v2f16_mag_v2f16_sign_v2f64:
; SI: ; %bb.0:
-; SI-NEXT: v_cvt_f16_f32_e32 v1, s0
-; SI-NEXT: s_lshr_b32 s0, s3, 8
-; SI-NEXT: s_and_b32 s6, s0, 0xffe
-; SI-NEXT: s_and_b32 s0, s3, 0x1ff
-; SI-NEXT: s_or_b32 s0, s0, s2
-; SI-NEXT: s_cmp_lg_u32 s0, 0
; SI-NEXT: v_cvt_f16_f32_e32 v0, s1
-; SI-NEXT: s_cselect_b64 s[0:1], -1, 0
-; SI-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
-; SI-NEXT: v_readfirstlane_b32 s0, v2
-; SI-NEXT: s_bfe_u32 s2, s3, 0xb0014
-; SI-NEXT: s_or_b32 s0, s6, s0
-; SI-NEXT: s_sub_i32 s6, 0x3f1, s2
-; SI-NEXT: v_med3_i32 v2, s6, 0, 13
-; SI-NEXT: s_or_b32 s1, s0, 0x1000
-; SI-NEXT: v_readfirstlane_b32 s6, v2
-; SI-NEXT: s_lshr_b32 s7, s1, s6
-; SI-NEXT: s_lshl_b32 s6, s7, s6
-; SI-NEXT: s_cmp_lg_u32 s6, s1
-; SI-NEXT: s_cselect_b32 s1, 1, 0
-; SI-NEXT: s_addk_i32 s2, 0xfc10
-; SI-NEXT: s_lshl_b32 s6, s2, 12
-; SI-NEXT: s_or_b32 s1, s7, s1
-; SI-NEXT: s_or_b32 s6, s0, s6
-; SI-NEXT: s_cmp_lt_i32 s2, 1
-; SI-NEXT: s_cselect_b32 s1, s1, s6
-; SI-NEXT: s_and_b32 s6, s1, 7
-; SI-NEXT: s_cmp_gt_i32 s6, 5
-; SI-NEXT: s_cselect_b32 s7, 1, 0
-; SI-NEXT: s_cmp_eq_u32 s6, 3
-; SI-NEXT: s_cselect_b32 s6, 1, 0
-; SI-NEXT: s_or_b32 s6, s6, s7
-; SI-NEXT: s_lshr_b32 s1, s1, 2
-; SI-NEXT: s_add_i32 s1, s1, s6
-; SI-NEXT: s_cmp_lt_i32 s2, 31
-; SI-NEXT: s_cselect_b32 s1, s1, 0x7c00
-; SI-NEXT: s_cmp_lg_u32 s0, 0
-; SI-NEXT: s_movk_i32 s6, 0x7e00
-; SI-NEXT: s_cselect_b32 s0, s6, 0x7c00
-; SI-NEXT: s_cmpk_eq_i32 s2, 0x40f
-; SI-NEXT: s_cselect_b32 s0, s0, s1
-; SI-NEXT: s_lshr_b32 s1, s3, 16
-; SI-NEXT: s_and_b32 s1, s1, 0x8000
-; SI-NEXT: s_or_b32 s2, s1, s0
-; SI-NEXT: s_lshr_b32 s0, s5, 8
-; SI-NEXT: s_and_b32 s3, s0, 0xffe
-; SI-NEXT: s_and_b32 s0, s5, 0x1ff
-; SI-NEXT: s_or_b32 s0, s0, s4
-; SI-NEXT: s_cmp_lg_u32 s0, 0
-; SI-NEXT: s_cselect_b64 s[0:1], -1, 0
-; SI-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
-; SI-NEXT: v_readfirstlane_b32 s0, v2
-; SI-NEXT: s_or_b32 s0, s3, s0
-; SI-NEXT: s_bfe_u32 s3, s5, 0xb0014
-; SI-NEXT: s_sub_i32 s4, 0x3f1, s3
-; SI-NEXT: v_med3_i32 v2, s4, 0, 13
-; SI-NEXT: s_or_b32 s1, s0, 0x1000
-; SI-NEXT: v_readfirstlane_b32 s4, v2
-; SI-NEXT: s_lshr_b32 s7, s1, s4
-; SI-NEXT: s_lshl_b32 s4, s7, s4
-; SI-NEXT: s_cmp_lg_u32 s4, s1
-; SI-NEXT: s_cselect_b32 s1, 1, 0
-; SI-NEXT: s_addk_i32 s3, 0xfc10
-; SI-NEXT: s_lshl_b32 s4, s3, 12
-; SI-NEXT: s_or_b32 s1, s7, s1
-; SI-NEXT: s_or_b32 s4, s0, s4
-; SI-NEXT: s_cmp_lt_i32 s3, 1
-; SI-NEXT: s_cselect_b32 s1, s1, s4
-; SI-NEXT: s_and_b32 s4, s1, 7
-; SI-NEXT: s_cmp_gt_i32 s4, 5
-; SI-NEXT: s_cselect_b32 s7, 1, 0
-; SI-NEXT: s_cmp_eq_u32 s4, 3
-; SI-NEXT: s_cselect_b32 s4, 1, 0
-; SI-NEXT: s_or_b32 s4, s4, s7
-; SI-NEXT: s_lshr_b32 s1, s1, 2
-; SI-NEXT: s_add_i32 s1, s1, s4
-; SI-NEXT: s_cmp_lt_i32 s3, 31
-; SI-NEXT: s_cselect_b32 s1, s1, 0x7c00
-; SI-NEXT: s_cmp_lg_u32 s0, 0
-; SI-NEXT: s_cselect_b32 s0, s6, 0x7c00
-; SI-NEXT: s_cmpk_eq_i32 s3, 0x40f
-; SI-NEXT: s_cselect_b32 s0, s0, s1
-; SI-NEXT: s_lshr_b32 s1, s5, 16
-; SI-NEXT: s_and_b32 s1, s1, 0x8000
-; SI-NEXT: s_or_b32 s0, s1, s0
+; SI-NEXT: v_cvt_f16_f32_e32 v1, s0
+; SI-NEXT: s_brev_b32 s0, -2
+; SI-NEXT: v_mov_b32_e32 v2, s5
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NEXT: v_cvt_f32_f16_e32 v2, s0
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-NEXT: v_cvt_f32_f16_e32 v3, s2
-; SI-NEXT: s_brev_b32 s0, -2
; SI-NEXT: v_bfi_b32 v0, s0, v0, v2
+; SI-NEXT: v_mov_b32_e32 v2, s3
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-NEXT: v_bfi_b32 v1, s0, v1, v3
+; SI-NEXT: v_bfi_b32 v1, s0, v1, v2
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
|
ad2fdd8
to
ff07bad
Compare
4169270
to
9427bd0
Compare
This was referenced May 30, 2025
ff07bad
to
ed07122
Compare
9427bd0
to
158179f
Compare
rampitec
approved these changes
May 30, 2025
158179f
to
3fe8957
Compare
Base automatically changed from
users/arsenm/amdgpu/handle-vectors-fcopysign-combine-magnitude
to
main
May 30, 2025 17:58
This avoids some ugly codegen on pre-16-bit instruction targets now from annoying f16 legalization effects. This also avoids regressions on newer targets in a future patch.
ed07122
to
a920da7
Compare
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
This avoids some ugly codegen on pre-16-bit instruction targets now
from annoying f16 legalization effects. This also avoids regressions
on newer targets in a future patch.