gcc/fix-AArch64-128-bit-immediate-ICEs.patch
eastb233 6be565f050 [Bugfix] Fix ICEs when compiling simde
- fix-PR83666-punt-BLKmode-when-expand_debug_expr.patch: New patch for
  bugfix
- fix-AArch64-128-bit-immediate-ICEs.patch: Likewise
- gcc.spec: Add new patch
2021-06-28 21:36:37 +08:00

215 lines
7.7 KiB
Diff

This backport contains 3 patch from gcc main stream tree.
The commit id of these patchs list as following in the order of time.
0001-Remove-from-movsi-di-ti-patterns.patch
ff76f0b5f6e6a4144fabb9ae984a9ee9dcaa2d08
0001-Improve-aarch64_legitimate_constant_p.patch
26895c21eb10cfd6c00285e13e6f13a75cccc1d9
0001-AArch64-PR82964-Fix-128-bit-immediate-ICEs.patch
c0bb5bc54feab4bac0df04f358ec9e839a32b2a2
diff -Nurp a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
--- a/gcc/config/aarch64/aarch64.c 2021-06-20 21:43:53.688000000 -0400
+++ b/gcc/config/aarch64/aarch64.c 2021-06-20 22:25:42.428000000 -0400
@@ -2090,6 +2090,23 @@ aarch64_internal_mov_immediate (rtx dest
return num_insns;
}
+/* Return whether imm is a 128-bit immediate which is simple enough to
+ expand inline. */
+bool
+aarch64_mov128_immediate (rtx imm)
+{
+ if (GET_CODE (imm) == CONST_INT)
+ return true;
+
+ gcc_assert (CONST_WIDE_INT_NUNITS (imm) == 2);
+
+ rtx lo = GEN_INT (CONST_WIDE_INT_ELT (imm, 0));
+ rtx hi = GEN_INT (CONST_WIDE_INT_ELT (imm, 1));
+
+ return aarch64_internal_mov_immediate (NULL_RTX, lo, false, DImode)
+ + aarch64_internal_mov_immediate (NULL_RTX, hi, false, DImode) <= 4;
+}
+
void
aarch64_expand_mov_immediate (rtx dest, rtx imm)
@@ -10180,44 +10197,43 @@ aarch64_legitimate_pic_operand_p (rtx x)
return true;
}
-/* Return true if X holds either a quarter-precision or
- floating-point +0.0 constant. */
-static bool
-aarch64_valid_floating_const (machine_mode mode, rtx x)
-{
- if (!CONST_DOUBLE_P (x))
- return false;
-
- if (aarch64_float_const_zero_rtx_p (x))
- return true;
-
- /* We only handle moving 0.0 to a TFmode register. */
- if (!(mode == SFmode || mode == DFmode))
- return false;
-
- return aarch64_float_const_representable_p (x);
-}
+/* Implement TARGET_LEGITIMATE_CONSTANT_P hook. Return true for constants
+ that should be rematerialized rather than spilled. */
static bool
aarch64_legitimate_constant_p (machine_mode mode, rtx x)
{
+ /* Support CSE and rematerialization of common constants. */
+ if (CONST_INT_P (x)
+ || (CONST_DOUBLE_P (x)
+ && (mode == SFmode || mode == DFmode || mode == TFmode))
+ || GET_CODE (x) == CONST_VECTOR)
+ return true;
+
/* Do not allow vector struct mode constants. We could support
0 and -1 easily, but they need support in aarch64-simd.md. */
- if (TARGET_SIMD && aarch64_vect_struct_mode_p (mode))
+ if (aarch64_vect_struct_mode_p (mode))
return false;
- /* This could probably go away because
- we now decompose CONST_INTs according to expand_mov_immediate. */
- if ((GET_CODE (x) == CONST_VECTOR
- && aarch64_simd_valid_immediate (x, mode, false, NULL))
- || CONST_INT_P (x) || aarch64_valid_floating_const (mode, x))
- return !targetm.cannot_force_const_mem (mode, x);
+ /* Do not allow const (plus (anchor_symbol, const_int)). */
+ if (GET_CODE (x) == CONST)
+ {
+ rtx offset;
+
+ split_const (x, &x, &offset);
- if (GET_CODE (x) == HIGH
- && aarch64_valid_symref (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
+ if (SYMBOL_REF_P (x) && SYMBOL_REF_ANCHOR_P (x))
+ return false;
+ }
+
+ if (GET_CODE (x) == HIGH)
+ x = XEXP (x, 0);
+
+ /* Label references are always constant. */
+ if (GET_CODE (x) == LABEL_REF)
return true;
- return aarch64_constant_address_p (x);
+ return false;
}
rtx
diff -Nurp a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
--- a/gcc/config/aarch64/aarch64.md 2021-06-20 21:43:53.712000000 -0400
+++ b/gcc/config/aarch64/aarch64.md 2021-06-20 22:22:37.848000000 -0400
@@ -1131,9 +1131,9 @@
(define_insn "*movti_aarch64"
[(set (match_operand:TI 0
- "nonimmediate_operand" "=r, *w,r ,*w,r,m,m,*w,m")
+ "nonimmediate_operand" "= r,w, r,w,r,m,m,w,m")
(match_operand:TI 1
- "aarch64_movti_operand" " rn,r ,*w,*w,m,r,Z, m,*w"))]
+ "aarch64_movti_operand" " rUti,r, w,w,m,r,Z,m,w"))]
"(register_operand (operands[0], TImode)
|| aarch64_reg_or_zero (operands[1], TImode))"
"@
diff -Nurp a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
--- a/gcc/config/aarch64/aarch64-protos.h 2021-06-20 21:43:53.584000000 -0400
+++ b/gcc/config/aarch64/aarch64-protos.h 2021-06-20 22:22:37.844000000 -0400
@@ -414,6 +414,8 @@ void aarch64_split_128bit_move (rtx, rtx
bool aarch64_split_128bit_move_p (rtx, rtx);
+bool aarch64_mov128_immediate (rtx);
+
void aarch64_split_simd_combine (rtx, rtx, rtx);
void aarch64_split_simd_move (rtx, rtx);
diff -Nurp a/gcc/config/aarch64/constraints.md b/gcc/config/aarch64/constraints.md
--- a/gcc/config/aarch64/constraints.md 2017-01-01 07:07:43.905435000 -0500
+++ b/gcc/config/aarch64/constraints.md 2021-06-20 22:22:37.852000000 -0400
@@ -69,6 +69,12 @@
(and (match_code "const_int")
(match_test "aarch64_move_imm (ival, DImode)")))
+(define_constraint "Uti"
+ "A constant that can be used with a 128-bit MOV immediate operation."
+ (and (ior (match_code "const_int")
+ (match_code "const_wide_int"))
+ (match_test "aarch64_mov128_immediate (op)")))
+
(define_constraint "UsO"
"A constant that can be used with a 32-bit and operation."
(and (match_code "const_int")
diff -Nurp a/gcc/config/aarch64/predicates.md b/gcc/config/aarch64/predicates.md
--- a/gcc/config/aarch64/predicates.md 2021-06-20 21:43:53.588000000 -0400
+++ b/gcc/config/aarch64/predicates.md 2021-06-20 22:22:37.852000000 -0400
@@ -224,15 +224,14 @@
(match_test "aarch64_mov_operand_p (op, mode)")))))
(define_predicate "aarch64_movti_operand"
- (and (match_code "reg,subreg,mem,const_int")
- (ior (match_operand 0 "register_operand")
- (ior (match_operand 0 "memory_operand")
- (match_operand 0 "const_int_operand")))))
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "memory_operand")
+ (and (match_operand 0 "const_scalar_int_operand")
+ (match_test "aarch64_mov128_immediate (op)"))))
(define_predicate "aarch64_reg_or_imm"
- (and (match_code "reg,subreg,const_int")
- (ior (match_operand 0 "register_operand")
- (match_operand 0 "const_int_operand"))))
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "const_scalar_int_operand")))
;; True for integer comparisons and for FP comparisons other than LTGT or UNEQ.
(define_special_predicate "aarch64_comparison_operator"
diff -Nurp a/gcc/testsuite/gcc.target/aarch64/pr78733.c b/gcc/testsuite/gcc.target/aarch64/pr78733.c
--- a/gcc/testsuite/gcc.target/aarch64/pr78733.c 2016-12-09 09:26:07.297066000 -0500
+++ b/gcc/testsuite/gcc.target/aarch64/pr78733.c 2021-06-20 22:22:37.852000000 -0400
@@ -1,10 +1,13 @@
/* { dg-do compile } */
-/* { dg-options "-O2 -mpc-relative-literal-loads" } */
+/* { dg-options "-O2 -mcmodel=large -mpc-relative-literal-loads" } */
+/* { dg-require-effective-target lp64 } */
+/* { dg-skip-if "-mcmodel=large, no support for -fpic" { aarch64-*-* } { "-fpic" } { "" } } */
__int128
t (void)
{
- return (__int128)1 << 80;
+ return ((__int128)0x123456789abcdef << 64) | 0xfedcba987654321;
}
/* { dg-final { scan-assembler "adr" } } */
+/* { dg-final { scan-assembler-not "adrp" } } */
diff -Nurp a/gcc/testsuite/gcc.target/aarch64/pr79041-2.c b/gcc/testsuite/gcc.target/aarch64/pr79041-2.c
--- a/gcc/testsuite/gcc.target/aarch64/pr79041-2.c 2017-07-26 07:57:57.970160000 -0400
+++ b/gcc/testsuite/gcc.target/aarch64/pr79041-2.c 2021-06-20 22:22:37.852000000 -0400
@@ -1,11 +1,12 @@
/* { dg-do compile } */
/* { dg-options "-O2 -mcmodel=large -mpc-relative-literal-loads" } */
/* { dg-require-effective-target lp64 } */
+/* { dg-skip-if "-mcmodel=large, no support for -fpic" { aarch64-*-* } { "-fpic" } { "" } } */
__int128
t (void)
{
- return (__int128)1 << 80;
+ return ((__int128)0x123456789abcdef << 64) | 0xfedcba987654321;
}
/* { dg-final { scan-assembler "adr" } } */