mirror of
https://github.com/dashpay/dash.git
synced 2024-12-25 12:02:48 +01:00
merge bitcoin#24736: fix vmov alignment issues with gcc 10.3.0 & mingw-w64
This commit is contained in:
parent
17103b90f9
commit
edf48e4c38
@ -167,13 +167,17 @@ desirable for building Dash Core release binaries."
|
|||||||
(define (make-gcc-with-pthreads gcc)
|
(define (make-gcc-with-pthreads gcc)
|
||||||
(package-with-extra-configure-variable gcc "--enable-threads" "posix"))
|
(package-with-extra-configure-variable gcc "--enable-threads" "posix"))
|
||||||
|
|
||||||
|
(define (make-mingw-w64-cross-gcc-vmov-alignment cross-gcc)
|
||||||
|
(package-with-extra-patches cross-gcc
|
||||||
|
(search-our-patches "vmov-alignment.patch")))
|
||||||
|
|
||||||
(define (make-mingw-pthreads-cross-toolchain target)
|
(define (make-mingw-pthreads-cross-toolchain target)
|
||||||
"Create a cross-compilation toolchain package for TARGET"
|
"Create a cross-compilation toolchain package for TARGET"
|
||||||
(let* ((xbinutils (cross-binutils target))
|
(let* ((xbinutils (cross-binutils target))
|
||||||
(pthreads-xlibc mingw-w64-x86_64-winpthreads)
|
(pthreads-xlibc mingw-w64-x86_64-winpthreads)
|
||||||
(pthreads-xgcc (make-gcc-with-pthreads
|
(pthreads-xgcc (make-gcc-with-pthreads
|
||||||
(cross-gcc target
|
(cross-gcc target
|
||||||
#:xgcc (make-ssp-fixed-gcc base-gcc)
|
#:xgcc (make-ssp-fixed-gcc (make-mingw-w64-cross-gcc-vmov-alignment base-gcc))
|
||||||
#:xbinutils xbinutils
|
#:xbinutils xbinutils
|
||||||
#:libc pthreads-xlibc))))
|
#:libc pthreads-xlibc))))
|
||||||
;; Define a meta-package that propagates the resulting XBINUTILS, XLIBC, and
|
;; Define a meta-package that propagates the resulting XBINUTILS, XLIBC, and
|
||||||
|
267
contrib/guix/patches/vmov-alignment.patch
Normal file
267
contrib/guix/patches/vmov-alignment.patch
Normal file
@ -0,0 +1,267 @@
|
|||||||
|
Description: Use unaligned VMOV instructions
|
||||||
|
Author: Stephen Kitt <skitt@debian.org>
|
||||||
|
Bug-Debian: https://bugs.debian.org/939559
|
||||||
|
|
||||||
|
Based on a patch originally by Claude Heiland-Allen <claude@mathr.co.uk>
|
||||||
|
|
||||||
|
--- a/gcc/config/i386/sse.md
|
||||||
|
+++ b/gcc/config/i386/sse.md
|
||||||
|
@@ -1058,17 +1058,11 @@
|
||||||
|
{
|
||||||
|
if (FLOAT_MODE_P (GET_MODE_INNER (<MODE>mode)))
|
||||||
|
{
|
||||||
|
- if (misaligned_operand (operands[1], <MODE>mode))
|
||||||
|
- return "vmovu<ssemodesuffix>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
|
||||||
|
- else
|
||||||
|
- return "vmova<ssemodesuffix>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
|
||||||
|
+ return "vmovu<ssemodesuffix>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
- if (misaligned_operand (operands[1], <MODE>mode))
|
||||||
|
- return "vmovdqu<ssescalarsize>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
|
||||||
|
- else
|
||||||
|
- return "vmovdqa<ssescalarsize>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
|
||||||
|
+ return "vmovdqu<ssescalarsize>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
[(set_attr "type" "ssemov")
|
||||||
|
@@ -1184,17 +1178,11 @@
|
||||||
|
{
|
||||||
|
if (FLOAT_MODE_P (GET_MODE_INNER (<MODE>mode)))
|
||||||
|
{
|
||||||
|
- if (misaligned_operand (operands[0], <MODE>mode))
|
||||||
|
- return "vmovu<ssemodesuffix>\t{%1, %0%{%2%}|%0%{%2%}, %1}";
|
||||||
|
- else
|
||||||
|
- return "vmova<ssemodesuffix>\t{%1, %0%{%2%}|%0%{%2%}, %1}";
|
||||||
|
+ return "vmovu<ssemodesuffix>\t{%1, %0%{%2%}|%0%{%2%}, %1}";
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
- if (misaligned_operand (operands[0], <MODE>mode))
|
||||||
|
- return "vmovdqu<ssescalarsize>\t{%1, %0%{%2%}|%0%{%2%}, %1}";
|
||||||
|
- else
|
||||||
|
- return "vmovdqa<ssescalarsize>\t{%1, %0%{%2%}|%0%{%2%}, %1}";
|
||||||
|
+ return "vmovdqu<ssescalarsize>\t{%1, %0%{%2%}|%0%{%2%}, %1}";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
[(set_attr "type" "ssemov")
|
||||||
|
@@ -7806,7 +7794,7 @@
|
||||||
|
"TARGET_SSE && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
|
||||||
|
"@
|
||||||
|
%vmovlps\t{%1, %0|%q0, %1}
|
||||||
|
- %vmovaps\t{%1, %0|%0, %1}
|
||||||
|
+ %vmovups\t{%1, %0|%0, %1}
|
||||||
|
%vmovlps\t{%1, %d0|%d0, %q1}"
|
||||||
|
[(set_attr "type" "ssemov")
|
||||||
|
(set_attr "prefix" "maybe_vex")
|
||||||
|
@@ -13997,29 +13985,15 @@
|
||||||
|
switch (<MODE>mode)
|
||||||
|
{
|
||||||
|
case E_V8DFmode:
|
||||||
|
- if (misaligned_operand (operands[2], <ssequartermode>mode))
|
||||||
|
- return "vmovupd\t{%2, %x0|%x0, %2}";
|
||||||
|
- else
|
||||||
|
- return "vmovapd\t{%2, %x0|%x0, %2}";
|
||||||
|
+ return "vmovupd\t{%2, %x0|%x0, %2}";
|
||||||
|
case E_V16SFmode:
|
||||||
|
- if (misaligned_operand (operands[2], <ssequartermode>mode))
|
||||||
|
- return "vmovups\t{%2, %x0|%x0, %2}";
|
||||||
|
- else
|
||||||
|
- return "vmovaps\t{%2, %x0|%x0, %2}";
|
||||||
|
+ return "vmovups\t{%2, %x0|%x0, %2}";
|
||||||
|
case E_V8DImode:
|
||||||
|
- if (misaligned_operand (operands[2], <ssequartermode>mode))
|
||||||
|
- return which_alternative == 2 ? "vmovdqu64\t{%2, %x0|%x0, %2}"
|
||||||
|
+ return which_alternative == 2 ? "vmovdqu64\t{%2, %x0|%x0, %2}"
|
||||||
|
: "vmovdqu\t{%2, %x0|%x0, %2}";
|
||||||
|
- else
|
||||||
|
- return which_alternative == 2 ? "vmovdqa64\t{%2, %x0|%x0, %2}"
|
||||||
|
- : "vmovdqa\t{%2, %x0|%x0, %2}";
|
||||||
|
case E_V16SImode:
|
||||||
|
- if (misaligned_operand (operands[2], <ssequartermode>mode))
|
||||||
|
- return which_alternative == 2 ? "vmovdqu32\t{%2, %x0|%x0, %2}"
|
||||||
|
+ return which_alternative == 2 ? "vmovdqu32\t{%2, %x0|%x0, %2}"
|
||||||
|
: "vmovdqu\t{%2, %x0|%x0, %2}";
|
||||||
|
- else
|
||||||
|
- return which_alternative == 2 ? "vmovdqa32\t{%2, %x0|%x0, %2}"
|
||||||
|
- : "vmovdqa\t{%2, %x0|%x0, %2}";
|
||||||
|
default:
|
||||||
|
gcc_unreachable ();
|
||||||
|
}
|
||||||
|
@@ -21225,63 +21199,27 @@
|
||||||
|
switch (get_attr_mode (insn))
|
||||||
|
{
|
||||||
|
case MODE_V16SF:
|
||||||
|
- if (misaligned_operand (operands[1], <ssehalfvecmode>mode))
|
||||||
|
- return "vmovups\t{%1, %t0|%t0, %1}";
|
||||||
|
- else
|
||||||
|
- return "vmovaps\t{%1, %t0|%t0, %1}";
|
||||||
|
+ return "vmovups\t{%1, %t0|%t0, %1}";
|
||||||
|
case MODE_V8DF:
|
||||||
|
- if (misaligned_operand (operands[1], <ssehalfvecmode>mode))
|
||||||
|
- return "vmovupd\t{%1, %t0|%t0, %1}";
|
||||||
|
- else
|
||||||
|
- return "vmovapd\t{%1, %t0|%t0, %1}";
|
||||||
|
+ return "vmovupd\t{%1, %t0|%t0, %1}";
|
||||||
|
case MODE_V8SF:
|
||||||
|
- if (misaligned_operand (operands[1], <ssehalfvecmode>mode))
|
||||||
|
- return "vmovups\t{%1, %x0|%x0, %1}";
|
||||||
|
- else
|
||||||
|
- return "vmovaps\t{%1, %x0|%x0, %1}";
|
||||||
|
+ return "vmovups\t{%1, %x0|%x0, %1}";
|
||||||
|
case MODE_V4DF:
|
||||||
|
- if (misaligned_operand (operands[1], <ssehalfvecmode>mode))
|
||||||
|
- return "vmovupd\t{%1, %x0|%x0, %1}";
|
||||||
|
- else
|
||||||
|
- return "vmovapd\t{%1, %x0|%x0, %1}";
|
||||||
|
+ return "vmovupd\t{%1, %x0|%x0, %1}";
|
||||||
|
case MODE_XI:
|
||||||
|
- if (misaligned_operand (operands[1], <ssehalfvecmode>mode))
|
||||||
|
- {
|
||||||
|
- if (which_alternative == 2)
|
||||||
|
- return "vmovdqu\t{%1, %t0|%t0, %1}";
|
||||||
|
- else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
|
||||||
|
- return "vmovdqu64\t{%1, %t0|%t0, %1}";
|
||||||
|
- else
|
||||||
|
- return "vmovdqu32\t{%1, %t0|%t0, %1}";
|
||||||
|
- }
|
||||||
|
+ if (which_alternative == 2)
|
||||||
|
+ return "vmovdqu\t{%1, %t0|%t0, %1}";
|
||||||
|
+ else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
|
||||||
|
+ return "vmovdqu64\t{%1, %t0|%t0, %1}";
|
||||||
|
else
|
||||||
|
- {
|
||||||
|
- if (which_alternative == 2)
|
||||||
|
- return "vmovdqa\t{%1, %t0|%t0, %1}";
|
||||||
|
- else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
|
||||||
|
- return "vmovdqa64\t{%1, %t0|%t0, %1}";
|
||||||
|
- else
|
||||||
|
- return "vmovdqa32\t{%1, %t0|%t0, %1}";
|
||||||
|
- }
|
||||||
|
+ return "vmovdqu32\t{%1, %t0|%t0, %1}";
|
||||||
|
case MODE_OI:
|
||||||
|
- if (misaligned_operand (operands[1], <ssehalfvecmode>mode))
|
||||||
|
- {
|
||||||
|
- if (which_alternative == 2)
|
||||||
|
- return "vmovdqu\t{%1, %x0|%x0, %1}";
|
||||||
|
- else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
|
||||||
|
- return "vmovdqu64\t{%1, %x0|%x0, %1}";
|
||||||
|
- else
|
||||||
|
- return "vmovdqu32\t{%1, %x0|%x0, %1}";
|
||||||
|
- }
|
||||||
|
+ if (which_alternative == 2)
|
||||||
|
+ return "vmovdqu\t{%1, %x0|%x0, %1}";
|
||||||
|
+ else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
|
||||||
|
+ return "vmovdqu64\t{%1, %x0|%x0, %1}";
|
||||||
|
else
|
||||||
|
- {
|
||||||
|
- if (which_alternative == 2)
|
||||||
|
- return "vmovdqa\t{%1, %x0|%x0, %1}";
|
||||||
|
- else if (GET_MODE_SIZE (<ssescalarmode>mode) == 8)
|
||||||
|
- return "vmovdqa64\t{%1, %x0|%x0, %1}";
|
||||||
|
- else
|
||||||
|
- return "vmovdqa32\t{%1, %x0|%x0, %1}";
|
||||||
|
- }
|
||||||
|
+ return "vmovdqu32\t{%1, %x0|%x0, %1}";
|
||||||
|
default:
|
||||||
|
gcc_unreachable ();
|
||||||
|
}
|
||||||
|
--- a/gcc/config/i386/i386.c
|
||||||
|
+++ b/gcc/config/i386/i386.c
|
||||||
|
@@ -4981,13 +4981,13 @@
|
||||||
|
switch (type)
|
||||||
|
{
|
||||||
|
case opcode_int:
|
||||||
|
- opcode = misaligned_p ? "vmovdqu32" : "vmovdqa32";
|
||||||
|
+ opcode = "vmovdqu32";
|
||||||
|
break;
|
||||||
|
case opcode_float:
|
||||||
|
- opcode = misaligned_p ? "vmovups" : "vmovaps";
|
||||||
|
+ opcode = "vmovups";
|
||||||
|
break;
|
||||||
|
case opcode_double:
|
||||||
|
- opcode = misaligned_p ? "vmovupd" : "vmovapd";
|
||||||
|
+ opcode = "vmovupd";
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@@ -4996,16 +4996,16 @@
|
||||||
|
switch (scalar_mode)
|
||||||
|
{
|
||||||
|
case E_SFmode:
|
||||||
|
- opcode = misaligned_p ? "%vmovups" : "%vmovaps";
|
||||||
|
+ opcode = "%vmovups";
|
||||||
|
break;
|
||||||
|
case E_DFmode:
|
||||||
|
- opcode = misaligned_p ? "%vmovupd" : "%vmovapd";
|
||||||
|
+ opcode = "%vmovupd";
|
||||||
|
break;
|
||||||
|
case E_TFmode:
|
||||||
|
if (evex_reg_p)
|
||||||
|
- opcode = misaligned_p ? "vmovdqu64" : "vmovdqa64";
|
||||||
|
+ opcode = "vmovdqu64";
|
||||||
|
else
|
||||||
|
- opcode = misaligned_p ? "%vmovdqu" : "%vmovdqa";
|
||||||
|
+ opcode = "%vmovdqu";
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
gcc_unreachable ();
|
||||||
|
@@ -5017,48 +5017,32 @@
|
||||||
|
{
|
||||||
|
case E_QImode:
|
||||||
|
if (evex_reg_p)
|
||||||
|
- opcode = (misaligned_p
|
||||||
|
- ? (TARGET_AVX512BW
|
||||||
|
- ? "vmovdqu8"
|
||||||
|
- : "vmovdqu64")
|
||||||
|
- : "vmovdqa64");
|
||||||
|
+ opcode = TARGET_AVX512BW ? "vmovdqu8" : "vmovdqu64";
|
||||||
|
else
|
||||||
|
- opcode = (misaligned_p
|
||||||
|
- ? (TARGET_AVX512BW
|
||||||
|
- ? "vmovdqu8"
|
||||||
|
- : "%vmovdqu")
|
||||||
|
- : "%vmovdqa");
|
||||||
|
+ opcode = TARGET_AVX512BW ? "vmovdqu8" : "%vmovdqu";
|
||||||
|
break;
|
||||||
|
case E_HImode:
|
||||||
|
if (evex_reg_p)
|
||||||
|
- opcode = (misaligned_p
|
||||||
|
- ? (TARGET_AVX512BW
|
||||||
|
- ? "vmovdqu16"
|
||||||
|
- : "vmovdqu64")
|
||||||
|
- : "vmovdqa64");
|
||||||
|
+ opcode = TARGET_AVX512BW ? "vmovdqu16" : "vmovdqu64";
|
||||||
|
else
|
||||||
|
- opcode = (misaligned_p
|
||||||
|
- ? (TARGET_AVX512BW
|
||||||
|
- ? "vmovdqu16"
|
||||||
|
- : "%vmovdqu")
|
||||||
|
- : "%vmovdqa");
|
||||||
|
+ opcode = TARGET_AVX512BW ? "vmovdqu16" : "%vmovdqu";
|
||||||
|
break;
|
||||||
|
case E_SImode:
|
||||||
|
if (evex_reg_p)
|
||||||
|
- opcode = misaligned_p ? "vmovdqu32" : "vmovdqa32";
|
||||||
|
+ opcode = "vmovdqu32";
|
||||||
|
else
|
||||||
|
- opcode = misaligned_p ? "%vmovdqu" : "%vmovdqa";
|
||||||
|
+ opcode = "%vmovdqu";
|
||||||
|
break;
|
||||||
|
case E_DImode:
|
||||||
|
case E_TImode:
|
||||||
|
case E_OImode:
|
||||||
|
if (evex_reg_p)
|
||||||
|
- opcode = misaligned_p ? "vmovdqu64" : "vmovdqa64";
|
||||||
|
+ opcode = "vmovdqu64";
|
||||||
|
else
|
||||||
|
- opcode = misaligned_p ? "%vmovdqu" : "%vmovdqa";
|
||||||
|
+ opcode = "%vmovdqu";
|
||||||
|
break;
|
||||||
|
case E_XImode:
|
||||||
|
- opcode = misaligned_p ? "vmovdqu64" : "vmovdqa64";
|
||||||
|
+ opcode = "vmovdqu64";
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
gcc_unreachable ();
|
Loading…
Reference in New Issue
Block a user