Skip to content

[RISCV] Support cR Inline Asm Constraint #124174

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jan 24, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion clang/lib/Basic/Targets/RISCV.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ bool RISCVTargetInfo::validateAsmConstraint(
return true;
case 'c':
// A RVC register - GPR or FPR
if (Name[1] == 'r' || Name[1] == 'f') {
if (Name[1] == 'r' || Name[1] == 'R' || Name[1] == 'f') {
Info.setAllowsRegister();
Name += 1;
return true;
Expand Down
8 changes: 8 additions & 0 deletions clang/test/CodeGen/RISCV/riscv-inline-asm.c
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,14 @@ double_xlen_t test_R_wide_scalar(double_xlen_t p) {
return ret;
}

double_xlen_t test_cR_wide_scalar(double_xlen_t p) {
// CHECK-LABEL: define{{.*}} {{i128|i64}} @test_cR_wide_scalar(
// CHECK: call {{i128|i64}} asm sideeffect "", "=^cR,^cR"({{i128|i64}} %{{.*}})
double_xlen_t ret;
asm volatile("" : "=cR"(ret) : "cR"(p));
return ret;
}

void test_I(void) {
// CHECK-LABEL: define{{.*}} void @test_I()
// CHECK: call void asm sideeffect "", "I"(i32 2047)
Expand Down
4 changes: 3 additions & 1 deletion llvm/lib/Target/RISCV/RISCVISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20903,7 +20903,7 @@ RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
} else {
if (Constraint == "vr" || Constraint == "vd" || Constraint == "vm")
return C_RegisterClass;
if (Constraint == "cr" || Constraint == "cf")
if (Constraint == "cr" || Constraint == "cR" || Constraint == "cf")
return C_RegisterClass;
}
return TargetLowering::getConstraintType(Constraint);
Expand Down Expand Up @@ -20992,6 +20992,8 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
return std::make_pair(0U, &RISCV::GPRPairCRegClass);
if (!VT.isVector())
return std::make_pair(0U, &RISCV::GPRCRegClass);
} else if (Constraint == "cR") {
return std::make_pair(0U, &RISCV::GPRPairCRegClass);
} else if (Constraint == "cf") {
if (VT == MVT::f16) {
if (Subtarget.hasStdExtZfhmin())
Expand Down
70 changes: 70 additions & 0 deletions llvm/test/CodeGen/RISCV/rv32-inline-asm-pairs.ll
Original file line number Diff line number Diff line change
Expand Up @@ -71,3 +71,73 @@ entry:
%9 = load i64, ptr %3, align 8
ret i64 %9
}

define i64 @test_cR_wide_scalar_simple(i64 noundef %0) nounwind {
; CHECK-LABEL: test_cR_wide_scalar_simple:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: #APP
; CHECK-NEXT: # a2 <- a0
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: mv a0, a2
; CHECK-NEXT: mv a1, a3
; CHECK-NEXT: ret
entry:
%1 = call i64 asm sideeffect "/* $0 <- $1 */", "=&^cR,^cR"(i64 %0)
ret i64 %1
}

define i32 @test_cR_wide_scalar_with_ops(i32 noundef %0) nounwind {
; CHECK-LABEL: test_cR_wide_scalar_with_ops:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: #APP
; CHECK-NEXT: # a2 <- a0
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: or a0, a2, a3
; CHECK-NEXT: ret
entry:
%1 = zext i32 %0 to i64
%2 = shl i64 %1, 32
%3 = or i64 %1, %2
%4 = call i64 asm sideeffect "/* $0 <- $1 */", "=&^cR,^cR"(i64 %3)
%5 = trunc i64 %4 to i32
%6 = lshr i64 %4, 32
%7 = trunc i64 %6 to i32
%8 = or i32 %5, %7
ret i32 %8
}

define i64 @test_cR_wide_scalar_inout(ptr %0, i64 noundef %1) nounwind {
; CHECK-LABEL: test_cR_wide_scalar_inout:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: mv a3, a2
; CHECK-NEXT: sw a0, 12(sp)
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: sw a1, 0(sp)
; CHECK-NEXT: sw a3, 4(sp)
; CHECK-NEXT: #APP
; CHECK-NEXT: # a0; a2
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: sw a0, 12(sp)
; CHECK-NEXT: sw a2, 0(sp)
; CHECK-NEXT: sw a3, 4(sp)
; CHECK-NEXT: mv a0, a2
; CHECK-NEXT: mv a1, a3
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
entry:
%2 = alloca ptr, align 4
%3 = alloca i64, align 8
store ptr %0, ptr %2, align 4
store i64 %1, ptr %3, align 8
%4 = load ptr, ptr %2, align 4
%5 = load i64, ptr %3, align 8
%6 = call { ptr, i64 } asm sideeffect "/* $0; $1 */", "=r,=^cR,0,1"(ptr %4, i64 %5)
%7 = extractvalue { ptr, i64} %6, 0
%8 = extractvalue { ptr, i64 } %6, 1
store ptr %7, ptr %2, align 4
store i64 %8, ptr %3, align 8
%9 = load i64, ptr %3, align 8
ret i64 %9
}
70 changes: 70 additions & 0 deletions llvm/test/CodeGen/RISCV/rv64-inline-asm-pairs.ll
Original file line number Diff line number Diff line change
Expand Up @@ -71,3 +71,73 @@ entry:
%9 = load i128, ptr %3, align 16
ret i128 %9
}

define i128 @test_cR_wide_scalar_simple(i128 noundef %0) nounwind {
; CHECK-LABEL: test_cR_wide_scalar_simple:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: #APP
; CHECK-NEXT: # a2 <- a0
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: mv a0, a2
; CHECK-NEXT: mv a1, a3
; CHECK-NEXT: ret
entry:
%1 = call i128 asm sideeffect "/* $0 <- $1 */", "=&^cR,^cR"(i128 %0)
ret i128 %1
}

define i64 @test_cR_wide_scalar_with_ops(i64 noundef %0) nounwind {
; CHECK-LABEL: test_cR_wide_scalar_with_ops:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: #APP
; CHECK-NEXT: # a2 <- a0
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: or a0, a2, a3
; CHECK-NEXT: ret
entry:
%1 = zext i64 %0 to i128
%2 = shl i128 %1, 64
%3 = or i128 %1, %2
%4 = call i128 asm sideeffect "/* $0 <- $1 */", "=&^cR,^cR"(i128 %3)
%5 = trunc i128 %4 to i64
%6 = lshr i128 %4, 64
%7 = trunc i128 %6 to i64
%8 = or i64 %5, %7
ret i64 %8
}

define i128 @test_cR_wide_scalar_inout(ptr %0, i128 noundef %1) nounwind {
; CHECK-LABEL: test_cR_wide_scalar_inout:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -32
; CHECK-NEXT: mv a3, a2
; CHECK-NEXT: sd a0, 24(sp)
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: sd a1, 0(sp)
; CHECK-NEXT: sd a3, 8(sp)
; CHECK-NEXT: #APP
; CHECK-NEXT: # a0; a2
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: sd a0, 24(sp)
; CHECK-NEXT: sd a2, 0(sp)
; CHECK-NEXT: sd a3, 8(sp)
; CHECK-NEXT: mv a0, a2
; CHECK-NEXT: mv a1, a3
; CHECK-NEXT: addi sp, sp, 32
; CHECK-NEXT: ret
entry:
%2 = alloca ptr, align 8
%3 = alloca i128, align 16
store ptr %0, ptr %2, align 8
store i128 %1, ptr %3, align 16
%4 = load ptr, ptr %2, align 8
%5 = load i128, ptr %3, align 16
%6 = call { ptr, i128 } asm sideeffect "/* $0; $1 */", "=r,=^cR,0,1"(ptr %4, i128 %5)
%7 = extractvalue { ptr, i128} %6, 0
%8 = extractvalue { ptr, i128 } %6, 1
store ptr %7, ptr %2, align 8
store i128 %8, ptr %3, align 16
%9 = load i128, ptr %3, align 16
ret i128 %9
}
44 changes: 44 additions & 0 deletions llvm/test/CodeGen/RISCV/zdinx-asm-constraint.ll
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,24 @@ entry:
ret void
}

define dso_local void @zdinx_asm_cR_inout(ptr nocapture noundef writeonly %a, double noundef %b) nounwind {
; CHECK-LABEL: zdinx_asm_cR_inout:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: mv a3, a2
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: #APP
; CHECK-NEXT: fabs.d a2, a2
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: sw a2, 8(a0)
; CHECK-NEXT: sw a3, 12(a0)
; CHECK-NEXT: ret
entry:
%arrayidx = getelementptr inbounds double, ptr %a, i32 1
%0 = tail call double asm "fsgnjx.d $0, $1, $1", "=^cR,0"(double %b)
store double %0, ptr %arrayidx, align 8
ret void
}

define dso_local void @zfinx_asm(ptr nocapture noundef writeonly %a, float noundef %b, float noundef %c) nounwind {
; CHECK-LABEL: zfinx_asm:
; CHECK: # %bb.0: # %entry
Expand Down Expand Up @@ -167,3 +185,29 @@ entry:
store half %0, ptr %arrayidx, align 8
ret void
}

define dso_local void @zdinx_asm_cR(ptr nocapture noundef writeonly %a, double noundef %b, double noundef %c) nounwind {
; CHECK-LABEL: zdinx_asm_cR:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: sw s0, 12(sp) # 4-byte Folded Spill
; CHECK-NEXT: sw s1, 8(sp) # 4-byte Folded Spill
; CHECK-NEXT: mv a5, a4
; CHECK-NEXT: mv s1, a2
; CHECK-NEXT: mv a4, a3
; CHECK-NEXT: mv s0, a1
; CHECK-NEXT: #APP
; CHECK-NEXT: fsgnjx.d a2, s0, a4
; CHECK-NEXT: #NO_APP
; CHECK-NEXT: sw a2, 8(a0)
; CHECK-NEXT: sw a3, 12(a0)
; CHECK-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
; CHECK-NEXT: lw s1, 8(sp) # 4-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
entry:
%arrayidx = getelementptr inbounds double, ptr %a, i32 1
%0 = tail call double asm "fsgnjx.d $0, $1, $2", "=^cR,^cR,^cR"(double %b, double %c)
store double %0, ptr %arrayidx, align 8
ret void
}
Loading