mirror of
https://gitlab.winehq.org/wine/wine-staging.git
synced 2024-11-21 16:46:54 -08:00
Added patch to Implement 64-bit atomic instructions in OpenMP.
This commit is contained in:
parent
27ea029694
commit
7b0d10f1f4
@ -51,7 +51,7 @@ usage()
|
||||
# Get the upstream commit sha
|
||||
upstream_commit()
|
||||
{
|
||||
echo "f2ceadc6e6e6b7906400b6df84ac385d263cf394"
|
||||
echo "26c9bd9f15c364215be9731bb050454c14d90767"
|
||||
}
|
||||
|
||||
# Show version information
|
||||
@ -299,6 +299,7 @@ patch_enable_all ()
|
||||
enable_user32_WM_MDICALCCHILDSCROLL="$1"
|
||||
enable_user32_WndProc="$1"
|
||||
enable_uxtheme_GTK_Theming="$1"
|
||||
enable_vcomp_Atomic_I8="$1"
|
||||
enable_version_VerQueryValue="$1"
|
||||
enable_vmm_vxd_PageReserve="$1"
|
||||
enable_wbemdisp_ISWbemSecurity="$1"
|
||||
@ -1020,6 +1021,9 @@ patch_enable ()
|
||||
uxtheme-GTK_Theming)
|
||||
enable_uxtheme_GTK_Theming="$2"
|
||||
;;
|
||||
vcomp-Atomic_I8)
|
||||
enable_vcomp_Atomic_I8="$2"
|
||||
;;
|
||||
version-VerQueryValue)
|
||||
enable_version_VerQueryValue="$2"
|
||||
;;
|
||||
@ -5839,6 +5843,23 @@ if test "$enable_uxtheme_GTK_Theming" -eq 1; then
|
||||
) >> "$patchlist"
|
||||
fi
|
||||
|
||||
# Patchset vcomp-Atomic_I8
|
||||
# |
|
||||
# | Modified files:
|
||||
# | * dlls/vcomp/main.c, dlls/vcomp/tests/vcomp.c, dlls/vcomp/vcomp.spec, dlls/vcomp100/vcomp100.spec,
|
||||
# | dlls/vcomp110/vcomp110.spec, dlls/vcomp120/vcomp120.spec, dlls/vcomp90/vcomp90.spec
|
||||
# |
|
||||
if test "$enable_vcomp_Atomic_I8" -eq 1; then
|
||||
patch_apply vcomp-Atomic_I8/0001-vcomp-tests-Reenable-architecture-dependent-tests.patch
|
||||
patch_apply vcomp-Atomic_I8/0002-vcomp-Implement-64-bit-atomic-instructions.patch
|
||||
patch_apply vcomp-Atomic_I8/0003-vcomp-tests-Add-tests-for-64-bit-atomic-instructions.patch
|
||||
(
|
||||
echo '+ { "Sebastian Lackner", "vcomp/tests: Reenable architecture dependent tests.", 1 },';
|
||||
echo '+ { "Sebastian Lackner", "vcomp: Implement 64-bit atomic instructions.", 1 },';
|
||||
echo '+ { "Sebastian Lackner", "vcomp/tests: Add tests for 64-bit atomic instructions.", 1 },';
|
||||
) >> "$patchlist"
|
||||
fi
|
||||
|
||||
# Patchset version-VerQueryValue
|
||||
# |
|
||||
# | Modified files:
|
||||
|
@ -0,0 +1,46 @@
|
||||
From 94973c7be88a18b4b4f79cea79f82f450cb659f0 Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Lackner <sebastian@fds-team.de>
|
||||
Date: Thu, 14 Jan 2016 07:18:08 +0100
|
||||
Subject: vcomp/tests: Reenable architecture dependent tests.
|
||||
|
||||
---
|
||||
dlls/vcomp/tests/vcomp.c | 10 +++++++---
|
||||
1 file changed, 7 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/dlls/vcomp/tests/vcomp.c b/dlls/vcomp/tests/vcomp.c
|
||||
index 4f030d3..5c177df 100644
|
||||
--- a/dlls/vcomp/tests/vcomp.c
|
||||
+++ b/dlls/vcomp/tests/vcomp.c
|
||||
@@ -1406,13 +1406,15 @@ static void test_atomic_integer32(void)
|
||||
{ p_vcomp_atomic_mul_i4, 0x11223344, -0x77665544, 0xecccdf0 },
|
||||
{ p_vcomp_atomic_or_i4, 0x11223344, 0x77665544, 0x77667744 },
|
||||
{ p_vcomp_atomic_shl_i4, 0x11223344, 3, -0x76ee65e0 },
|
||||
- /* { p_vcomp_atomic_shl_i4, 0x11223344, 35, -0x76ee65e0 }, */ /* depends on Architecture */
|
||||
{ p_vcomp_atomic_shl_i4, -0x11223344, 3, 0x76ee65e0 },
|
||||
{ p_vcomp_atomic_shr_i4, 0x11223344, 3, 0x2244668 },
|
||||
- /* { p_vcomp_atomic_shr_i4, 0x11223344, 35, 0x2244668 }, */ /* depends on Architecture */
|
||||
{ p_vcomp_atomic_shr_i4, -0x11223344, 3, -0x2244669 },
|
||||
{ p_vcomp_atomic_sub_i4, 0x11223344, 0x77665544, -0x66442200 },
|
||||
{ p_vcomp_atomic_xor_i4, 0x11223344, 0x77665544, 0x66446600 },
|
||||
+ #if defined(__i386__) || defined(__x86_64__)
|
||||
+ { p_vcomp_atomic_shl_i4, 0x11223344, 35, -0x76ee65e0 },
|
||||
+ { p_vcomp_atomic_shr_i4, 0x11223344, 35, 0x2244668 },
|
||||
+ #endif
|
||||
};
|
||||
struct
|
||||
{
|
||||
@@ -1424,8 +1426,10 @@ static void test_atomic_integer32(void)
|
||||
{ p_vcomp_atomic_div_ui4, 0x77665544, 0x11223344, 6 },
|
||||
{ p_vcomp_atomic_div_ui4, 0x77665544, 0xeeddccbc, 0 },
|
||||
{ p_vcomp_atomic_shr_ui4, 0x11223344, 3, 0x2244668 },
|
||||
- /* { p_vcomp_atomic_shr_ui4, 0x11223344, 35, 0x2244668 }, */ /* depends on Architecture */
|
||||
{ p_vcomp_atomic_shr_ui4, 0xeeddccbc, 3, 0x1ddbb997 },
|
||||
+ #if defined(__i386__) || defined(__x86_64__)
|
||||
+ { p_vcomp_atomic_shr_ui4, 0x11223344, 35, 0x2244668 },
|
||||
+ #endif
|
||||
};
|
||||
int i;
|
||||
|
||||
--
|
||||
2.6.4
|
||||
|
@ -0,0 +1,458 @@
|
||||
From bf16d653d692fdb4b3f5f595f1fa9cf58d9b2947 Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Lackner <sebastian@fds-team.de>
|
||||
Date: Thu, 14 Jan 2016 07:20:03 +0100
|
||||
Subject: vcomp: Implement 64-bit atomic instructions.
|
||||
|
||||
---
|
||||
dlls/vcomp/main.c | 68 ++++++++++++++++++++++++++++++++++++++++++++-
|
||||
dlls/vcomp/vcomp.spec | 22 +++++++--------
|
||||
dlls/vcomp100/vcomp100.spec | 22 +++++++--------
|
||||
dlls/vcomp110/vcomp110.spec | 22 +++++++--------
|
||||
dlls/vcomp120/vcomp120.spec | 22 +++++++--------
|
||||
dlls/vcomp90/vcomp90.spec | 22 +++++++--------
|
||||
6 files changed, 122 insertions(+), 56 deletions(-)
|
||||
|
||||
diff --git a/dlls/vcomp/main.c b/dlls/vcomp/main.c
|
||||
index 12fd2a5..446b83d 100644
|
||||
--- a/dlls/vcomp/main.c
|
||||
+++ b/dlls/vcomp/main.c
|
||||
@@ -4,7 +4,7 @@
|
||||
*
|
||||
* Copyright 2011 Austin English
|
||||
* Copyright 2012 Dan Kegel
|
||||
- * Copyright 2015 Sebastian Lackner
|
||||
+ * Copyright 2015-2016 Sebastian Lackner
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
@@ -356,6 +356,72 @@ void CDECL _vcomp_atomic_xor_i4(int *dest, int val)
|
||||
do old = *dest; while (interlocked_cmpxchg(dest, old ^ val, old) != old);
|
||||
}
|
||||
|
||||
+void CDECL _vcomp_atomic_add_i8(LONG64 *dest, LONG64 val)
|
||||
+{
|
||||
+ LONG64 old;
|
||||
+ do old = *dest; while (interlocked_cmpxchg64(dest, old + val, old) != old);
|
||||
+}
|
||||
+
|
||||
+void CDECL _vcomp_atomic_and_i8(LONG64 *dest, LONG64 val)
|
||||
+{
|
||||
+ LONG64 old;
|
||||
+ do old = *dest; while (interlocked_cmpxchg64(dest, old & val, old) != old);
|
||||
+}
|
||||
+
|
||||
+void CDECL _vcomp_atomic_div_i8(LONG64 *dest, LONG64 val)
|
||||
+{
|
||||
+ LONG64 old;
|
||||
+ do old = *dest; while (interlocked_cmpxchg64(dest, old / val, old) != old);
|
||||
+}
|
||||
+
|
||||
+void CDECL _vcomp_atomic_div_ui8(ULONG64 *dest, ULONG64 val)
|
||||
+{
|
||||
+ ULONG64 old;
|
||||
+ do old = *dest; while (interlocked_cmpxchg64((LONG64 *)dest, old / val, old) != old);
|
||||
+}
|
||||
+
|
||||
+void CDECL _vcomp_atomic_mul_i8(LONG64 *dest, LONG64 val)
|
||||
+{
|
||||
+ LONG64 old;
|
||||
+ do old = *dest; while (interlocked_cmpxchg64(dest, old * val, old) != old);
|
||||
+}
|
||||
+
|
||||
+void CDECL _vcomp_atomic_or_i8(LONG64 *dest, LONG64 val)
|
||||
+{
|
||||
+ LONG64 old;
|
||||
+ do old = *dest; while (interlocked_cmpxchg64(dest, old | val, old) != old);
|
||||
+}
|
||||
+
|
||||
+void CDECL _vcomp_atomic_shl_i8(LONG64 *dest, unsigned int val)
|
||||
+{
|
||||
+ LONG64 old;
|
||||
+ do old = *dest; while (interlocked_cmpxchg64(dest, old << val, old) != old);
|
||||
+}
|
||||
+
|
||||
+void CDECL _vcomp_atomic_shr_i8(LONG64 *dest, unsigned int val)
|
||||
+{
|
||||
+ LONG64 old;
|
||||
+ do old = *dest; while (interlocked_cmpxchg64(dest, old >> val, old) != old);
|
||||
+}
|
||||
+
|
||||
+void CDECL _vcomp_atomic_shr_ui8(ULONG64 *dest, unsigned int val)
|
||||
+{
|
||||
+ ULONG64 old;
|
||||
+ do old = *dest; while (interlocked_cmpxchg64((LONG64 *)dest, old >> val, old) != old);
|
||||
+}
|
||||
+
|
||||
+void CDECL _vcomp_atomic_sub_i8(LONG64 *dest, LONG64 val)
|
||||
+{
|
||||
+ LONG64 old;
|
||||
+ do old = *dest; while (interlocked_cmpxchg64(dest, old - val, old) != old);
|
||||
+}
|
||||
+
|
||||
+void CDECL _vcomp_atomic_xor_i8(LONG64 *dest, LONG64 val)
|
||||
+{
|
||||
+ LONG64 old;
|
||||
+ do old = *dest; while (interlocked_cmpxchg64(dest, old ^ val, old) != old);
|
||||
+}
|
||||
+
|
||||
void CDECL _vcomp_atomic_add_r4(float *dest, float val)
|
||||
{
|
||||
int old, new;
|
||||
diff --git a/dlls/vcomp/vcomp.spec b/dlls/vcomp/vcomp.spec
|
||||
index 7703e2e..eff411e 100644
|
||||
--- a/dlls/vcomp/vcomp.spec
|
||||
+++ b/dlls/vcomp/vcomp.spec
|
||||
@@ -1,55 +1,55 @@
|
||||
@ stub _vcomp_atomic_add_i1
|
||||
@ stub _vcomp_atomic_add_i2
|
||||
@ cdecl _vcomp_atomic_add_i4(ptr long)
|
||||
-@ stub _vcomp_atomic_add_i8
|
||||
+@ cdecl _vcomp_atomic_add_i8(ptr int64)
|
||||
@ cdecl _vcomp_atomic_add_r4(ptr float)
|
||||
@ cdecl _vcomp_atomic_add_r8(ptr double)
|
||||
@ stub _vcomp_atomic_and_i1
|
||||
@ stub _vcomp_atomic_and_i2
|
||||
@ cdecl _vcomp_atomic_and_i4(ptr long)
|
||||
-@ stub _vcomp_atomic_and_i8
|
||||
+@ cdecl _vcomp_atomic_and_i8(ptr int64)
|
||||
@ stub _vcomp_atomic_div_i1
|
||||
@ stub _vcomp_atomic_div_i2
|
||||
@ cdecl _vcomp_atomic_div_i4(ptr long)
|
||||
-@ stub _vcomp_atomic_div_i8
|
||||
+@ cdecl _vcomp_atomic_div_i8(ptr int64)
|
||||
@ cdecl _vcomp_atomic_div_r4(ptr float)
|
||||
@ cdecl _vcomp_atomic_div_r8(ptr double)
|
||||
@ stub _vcomp_atomic_div_ui1
|
||||
@ stub _vcomp_atomic_div_ui2
|
||||
@ cdecl _vcomp_atomic_div_ui4(ptr long)
|
||||
-@ stub _vcomp_atomic_div_ui8
|
||||
+@ cdecl _vcomp_atomic_div_ui8(ptr int64)
|
||||
@ stub _vcomp_atomic_mul_i1
|
||||
@ stub _vcomp_atomic_mul_i2
|
||||
@ cdecl _vcomp_atomic_mul_i4(ptr long)
|
||||
-@ stub _vcomp_atomic_mul_i8
|
||||
+@ cdecl _vcomp_atomic_mul_i8(ptr int64)
|
||||
@ cdecl _vcomp_atomic_mul_r4(ptr float)
|
||||
@ cdecl _vcomp_atomic_mul_r8(ptr double)
|
||||
@ stub _vcomp_atomic_or_i1
|
||||
@ stub _vcomp_atomic_or_i2
|
||||
@ cdecl _vcomp_atomic_or_i4(ptr long)
|
||||
-@ stub _vcomp_atomic_or_i8
|
||||
+@ cdecl _vcomp_atomic_or_i8(ptr int64)
|
||||
@ stub _vcomp_atomic_shl_i1
|
||||
@ stub _vcomp_atomic_shl_i2
|
||||
@ cdecl _vcomp_atomic_shl_i4(ptr long)
|
||||
-@ stub _vcomp_atomic_shl_i8
|
||||
+@ cdecl _vcomp_atomic_shl_i8(ptr long)
|
||||
@ stub _vcomp_atomic_shr_i1
|
||||
@ stub _vcomp_atomic_shr_i2
|
||||
@ cdecl _vcomp_atomic_shr_i4(ptr long)
|
||||
-@ stub _vcomp_atomic_shr_i8
|
||||
+@ cdecl _vcomp_atomic_shr_i8(ptr long)
|
||||
@ stub _vcomp_atomic_shr_ui1
|
||||
@ stub _vcomp_atomic_shr_ui2
|
||||
@ cdecl _vcomp_atomic_shr_ui4(ptr long)
|
||||
-@ stub _vcomp_atomic_shr_ui8
|
||||
+@ cdecl _vcomp_atomic_shr_ui8(ptr long)
|
||||
@ stub _vcomp_atomic_sub_i1
|
||||
@ stub _vcomp_atomic_sub_i2
|
||||
@ cdecl _vcomp_atomic_sub_i4(ptr long)
|
||||
-@ stub _vcomp_atomic_sub_i8
|
||||
+@ cdecl _vcomp_atomic_sub_i8(ptr int64)
|
||||
@ cdecl _vcomp_atomic_sub_r4(ptr float)
|
||||
@ cdecl _vcomp_atomic_sub_r8(ptr double)
|
||||
@ stub _vcomp_atomic_xor_i1
|
||||
@ stub _vcomp_atomic_xor_i2
|
||||
@ cdecl _vcomp_atomic_xor_i4(ptr long)
|
||||
-@ stub _vcomp_atomic_xor_i8
|
||||
+@ cdecl _vcomp_atomic_xor_i8(ptr int64)
|
||||
@ cdecl _vcomp_barrier()
|
||||
@ stub _vcomp_copyprivate_broadcast
|
||||
@ stub _vcomp_copyprivate_receive
|
||||
diff --git a/dlls/vcomp100/vcomp100.spec b/dlls/vcomp100/vcomp100.spec
|
||||
index 849125f..ba1f414 100644
|
||||
--- a/dlls/vcomp100/vcomp100.spec
|
||||
+++ b/dlls/vcomp100/vcomp100.spec
|
||||
@@ -1,55 +1,55 @@
|
||||
@ stub _vcomp_atomic_add_i1
|
||||
@ stub _vcomp_atomic_add_i2
|
||||
@ cdecl _vcomp_atomic_add_i4(ptr long) vcomp._vcomp_atomic_add_i4
|
||||
-@ stub _vcomp_atomic_add_i8
|
||||
+@ cdecl _vcomp_atomic_add_i8(ptr int64) vcomp._vcomp_atomic_add_i8
|
||||
@ cdecl _vcomp_atomic_add_r4(ptr float) vcomp._vcomp_atomic_add_r4
|
||||
@ cdecl _vcomp_atomic_add_r8(ptr double) vcomp._vcomp_atomic_add_r8
|
||||
@ stub _vcomp_atomic_and_i1
|
||||
@ stub _vcomp_atomic_and_i2
|
||||
@ cdecl _vcomp_atomic_and_i4(ptr long) vcomp._vcomp_atomic_and_i4
|
||||
-@ stub _vcomp_atomic_and_i8
|
||||
+@ cdecl _vcomp_atomic_and_i8(ptr int64) vcomp._vcomp_atomic_and_i8
|
||||
@ stub _vcomp_atomic_div_i1
|
||||
@ stub _vcomp_atomic_div_i2
|
||||
@ cdecl _vcomp_atomic_div_i4(ptr long) vcomp._vcomp_atomic_div_i4
|
||||
-@ stub _vcomp_atomic_div_i8
|
||||
+@ cdecl _vcomp_atomic_div_i8(ptr int64) vcomp._vcomp_atomic_div_i8
|
||||
@ cdecl _vcomp_atomic_div_r4(ptr float) vcomp._vcomp_atomic_div_r4
|
||||
@ cdecl _vcomp_atomic_div_r8(ptr double) vcomp._vcomp_atomic_div_r8
|
||||
@ stub _vcomp_atomic_div_ui1
|
||||
@ stub _vcomp_atomic_div_ui2
|
||||
@ cdecl _vcomp_atomic_div_ui4(ptr long) vcomp._vcomp_atomic_div_ui4
|
||||
-@ stub _vcomp_atomic_div_ui8
|
||||
+@ cdecl _vcomp_atomic_div_ui8(ptr int64) vcomp._vcomp_atomic_div_ui8
|
||||
@ stub _vcomp_atomic_mul_i1
|
||||
@ stub _vcomp_atomic_mul_i2
|
||||
@ cdecl _vcomp_atomic_mul_i4(ptr long) vcomp._vcomp_atomic_mul_i4
|
||||
-@ stub _vcomp_atomic_mul_i8
|
||||
+@ cdecl _vcomp_atomic_mul_i8(ptr int64) vcomp._vcomp_atomic_mul_i8
|
||||
@ cdecl _vcomp_atomic_mul_r4(ptr float) vcomp._vcomp_atomic_mul_r4
|
||||
@ cdecl _vcomp_atomic_mul_r8(ptr double) vcomp._vcomp_atomic_mul_r8
|
||||
@ stub _vcomp_atomic_or_i1
|
||||
@ stub _vcomp_atomic_or_i2
|
||||
@ cdecl _vcomp_atomic_or_i4(ptr long) vcomp._vcomp_atomic_or_i4
|
||||
-@ stub _vcomp_atomic_or_i8
|
||||
+@ cdecl _vcomp_atomic_or_i8(ptr int64) vcomp._vcomp_atomic_or_i8
|
||||
@ stub _vcomp_atomic_shl_i1
|
||||
@ stub _vcomp_atomic_shl_i2
|
||||
@ cdecl _vcomp_atomic_shl_i4(ptr long) vcomp._vcomp_atomic_shl_i4
|
||||
-@ stub _vcomp_atomic_shl_i8
|
||||
+@ cdecl _vcomp_atomic_shl_i8(ptr long) vcomp._vcomp_atomic_shl_i8
|
||||
@ stub _vcomp_atomic_shr_i1
|
||||
@ stub _vcomp_atomic_shr_i2
|
||||
@ cdecl _vcomp_atomic_shr_i4(ptr long) vcomp._vcomp_atomic_shr_i4
|
||||
-@ stub _vcomp_atomic_shr_i8
|
||||
+@ cdecl _vcomp_atomic_shr_i8(ptr long) vcomp._vcomp_atomic_shr_i8
|
||||
@ stub _vcomp_atomic_shr_ui1
|
||||
@ stub _vcomp_atomic_shr_ui2
|
||||
@ cdecl _vcomp_atomic_shr_ui4(ptr long) vcomp._vcomp_atomic_shr_ui4
|
||||
-@ stub _vcomp_atomic_shr_ui8
|
||||
+@ cdecl _vcomp_atomic_shr_ui8(ptr long) vcomp._vcomp_atomic_shr_ui8
|
||||
@ stub _vcomp_atomic_sub_i1
|
||||
@ stub _vcomp_atomic_sub_i2
|
||||
@ cdecl _vcomp_atomic_sub_i4(ptr long) vcomp._vcomp_atomic_sub_i4
|
||||
-@ stub _vcomp_atomic_sub_i8
|
||||
+@ cdecl _vcomp_atomic_sub_i8(ptr int64) vcomp._vcomp_atomic_sub_i8
|
||||
@ cdecl _vcomp_atomic_sub_r4(ptr float) vcomp._vcomp_atomic_sub_r4
|
||||
@ cdecl _vcomp_atomic_sub_r8(ptr double) vcomp._vcomp_atomic_sub_r8
|
||||
@ stub _vcomp_atomic_xor_i1
|
||||
@ stub _vcomp_atomic_xor_i2
|
||||
@ cdecl _vcomp_atomic_xor_i4(ptr long) vcomp._vcomp_atomic_xor_i4
|
||||
-@ stub _vcomp_atomic_xor_i8
|
||||
+@ cdecl _vcomp_atomic_xor_i8(ptr int64) vcomp._vcomp_atomic_xor_i8
|
||||
@ cdecl _vcomp_barrier() vcomp._vcomp_barrier
|
||||
@ stub _vcomp_copyprivate_broadcast
|
||||
@ stub _vcomp_copyprivate_receive
|
||||
diff --git a/dlls/vcomp110/vcomp110.spec b/dlls/vcomp110/vcomp110.spec
|
||||
index 87a7205..8389d27 100644
|
||||
--- a/dlls/vcomp110/vcomp110.spec
|
||||
+++ b/dlls/vcomp110/vcomp110.spec
|
||||
@@ -2,55 +2,55 @@
|
||||
@ stub _vcomp_atomic_add_i1
|
||||
@ stub _vcomp_atomic_add_i2
|
||||
@ cdecl _vcomp_atomic_add_i4(ptr long) vcomp._vcomp_atomic_add_i4
|
||||
-@ stub _vcomp_atomic_add_i8
|
||||
+@ cdecl _vcomp_atomic_add_i8(ptr int64) vcomp._vcomp_atomic_add_i8
|
||||
@ cdecl _vcomp_atomic_add_r4(ptr float) vcomp._vcomp_atomic_add_r4
|
||||
@ cdecl _vcomp_atomic_add_r8(ptr double) vcomp._vcomp_atomic_add_r8
|
||||
@ stub _vcomp_atomic_and_i1
|
||||
@ stub _vcomp_atomic_and_i2
|
||||
@ cdecl _vcomp_atomic_and_i4(ptr long) vcomp._vcomp_atomic_and_i4
|
||||
-@ stub _vcomp_atomic_and_i8
|
||||
+@ cdecl _vcomp_atomic_and_i8(ptr int64) vcomp._vcomp_atomic_and_i8
|
||||
@ stub _vcomp_atomic_div_i1
|
||||
@ stub _vcomp_atomic_div_i2
|
||||
@ cdecl _vcomp_atomic_div_i4(ptr long) vcomp._vcomp_atomic_div_i4
|
||||
-@ stub _vcomp_atomic_div_i8
|
||||
+@ cdecl _vcomp_atomic_div_i8(ptr int64) vcomp._vcomp_atomic_div_i8
|
||||
@ cdecl _vcomp_atomic_div_r4(ptr float) vcomp._vcomp_atomic_div_r4
|
||||
@ cdecl _vcomp_atomic_div_r8(ptr double) vcomp._vcomp_atomic_div_r8
|
||||
@ stub _vcomp_atomic_div_ui1
|
||||
@ stub _vcomp_atomic_div_ui2
|
||||
@ cdecl _vcomp_atomic_div_ui4(ptr long) vcomp._vcomp_atomic_div_ui4
|
||||
-@ stub _vcomp_atomic_div_ui8
|
||||
+@ cdecl _vcomp_atomic_div_ui8(ptr int64) vcomp._vcomp_atomic_div_ui8
|
||||
@ stub _vcomp_atomic_mul_i1
|
||||
@ stub _vcomp_atomic_mul_i2
|
||||
@ cdecl _vcomp_atomic_mul_i4(ptr long) vcomp._vcomp_atomic_mul_i4
|
||||
-@ stub _vcomp_atomic_mul_i8
|
||||
+@ cdecl _vcomp_atomic_mul_i8(ptr int64) vcomp._vcomp_atomic_mul_i8
|
||||
@ cdecl _vcomp_atomic_mul_r4(ptr float) vcomp._vcomp_atomic_mul_r4
|
||||
@ cdecl _vcomp_atomic_mul_r8(ptr double) vcomp._vcomp_atomic_mul_r8
|
||||
@ stub _vcomp_atomic_or_i1
|
||||
@ stub _vcomp_atomic_or_i2
|
||||
@ cdecl _vcomp_atomic_or_i4(ptr long) vcomp._vcomp_atomic_or_i4
|
||||
-@ stub _vcomp_atomic_or_i8
|
||||
+@ cdecl _vcomp_atomic_or_i8(ptr int64) vcomp._vcomp_atomic_or_i8
|
||||
@ stub _vcomp_atomic_shl_i1
|
||||
@ stub _vcomp_atomic_shl_i2
|
||||
@ cdecl _vcomp_atomic_shl_i4(ptr long) vcomp._vcomp_atomic_shl_i4
|
||||
-@ stub _vcomp_atomic_shl_i8
|
||||
+@ cdecl _vcomp_atomic_shl_i8(ptr long) vcomp._vcomp_atomic_shl_i8
|
||||
@ stub _vcomp_atomic_shr_i1
|
||||
@ stub _vcomp_atomic_shr_i2
|
||||
@ cdecl _vcomp_atomic_shr_i4(ptr long) vcomp._vcomp_atomic_shr_i4
|
||||
-@ stub _vcomp_atomic_shr_i8
|
||||
+@ cdecl _vcomp_atomic_shr_i8(ptr long) vcomp._vcomp_atomic_shr_i8
|
||||
@ stub _vcomp_atomic_shr_ui1
|
||||
@ stub _vcomp_atomic_shr_ui2
|
||||
@ cdecl _vcomp_atomic_shr_ui4(ptr long) vcomp._vcomp_atomic_shr_ui4
|
||||
-@ stub _vcomp_atomic_shr_ui8
|
||||
+@ cdecl _vcomp_atomic_shr_ui8(ptr long) vcomp._vcomp_atomic_shr_ui8
|
||||
@ stub _vcomp_atomic_sub_i1
|
||||
@ stub _vcomp_atomic_sub_i2
|
||||
@ cdecl _vcomp_atomic_sub_i4(ptr long) vcomp._vcomp_atomic_sub_i4
|
||||
-@ stub _vcomp_atomic_sub_i8
|
||||
+@ cdecl _vcomp_atomic_sub_i8(ptr int64) vcomp._vcomp_atomic_sub_i8
|
||||
@ cdecl _vcomp_atomic_sub_r4(ptr float) vcomp._vcomp_atomic_sub_r4
|
||||
@ cdecl _vcomp_atomic_sub_r8(ptr double) vcomp._vcomp_atomic_sub_r8
|
||||
@ stub _vcomp_atomic_xor_i1
|
||||
@ stub _vcomp_atomic_xor_i2
|
||||
@ cdecl _vcomp_atomic_xor_i4(ptr long) vcomp._vcomp_atomic_xor_i4
|
||||
-@ stub _vcomp_atomic_xor_i8
|
||||
+@ cdecl _vcomp_atomic_xor_i8(ptr int64) vcomp._vcomp_atomic_xor_i8
|
||||
@ cdecl _vcomp_barrier() vcomp._vcomp_barrier
|
||||
@ stub _vcomp_copyprivate_broadcast
|
||||
@ stub _vcomp_copyprivate_receive
|
||||
diff --git a/dlls/vcomp120/vcomp120.spec b/dlls/vcomp120/vcomp120.spec
|
||||
index 87a7205..8389d27 100644
|
||||
--- a/dlls/vcomp120/vcomp120.spec
|
||||
+++ b/dlls/vcomp120/vcomp120.spec
|
||||
@@ -2,55 +2,55 @@
|
||||
@ stub _vcomp_atomic_add_i1
|
||||
@ stub _vcomp_atomic_add_i2
|
||||
@ cdecl _vcomp_atomic_add_i4(ptr long) vcomp._vcomp_atomic_add_i4
|
||||
-@ stub _vcomp_atomic_add_i8
|
||||
+@ cdecl _vcomp_atomic_add_i8(ptr int64) vcomp._vcomp_atomic_add_i8
|
||||
@ cdecl _vcomp_atomic_add_r4(ptr float) vcomp._vcomp_atomic_add_r4
|
||||
@ cdecl _vcomp_atomic_add_r8(ptr double) vcomp._vcomp_atomic_add_r8
|
||||
@ stub _vcomp_atomic_and_i1
|
||||
@ stub _vcomp_atomic_and_i2
|
||||
@ cdecl _vcomp_atomic_and_i4(ptr long) vcomp._vcomp_atomic_and_i4
|
||||
-@ stub _vcomp_atomic_and_i8
|
||||
+@ cdecl _vcomp_atomic_and_i8(ptr int64) vcomp._vcomp_atomic_and_i8
|
||||
@ stub _vcomp_atomic_div_i1
|
||||
@ stub _vcomp_atomic_div_i2
|
||||
@ cdecl _vcomp_atomic_div_i4(ptr long) vcomp._vcomp_atomic_div_i4
|
||||
-@ stub _vcomp_atomic_div_i8
|
||||
+@ cdecl _vcomp_atomic_div_i8(ptr int64) vcomp._vcomp_atomic_div_i8
|
||||
@ cdecl _vcomp_atomic_div_r4(ptr float) vcomp._vcomp_atomic_div_r4
|
||||
@ cdecl _vcomp_atomic_div_r8(ptr double) vcomp._vcomp_atomic_div_r8
|
||||
@ stub _vcomp_atomic_div_ui1
|
||||
@ stub _vcomp_atomic_div_ui2
|
||||
@ cdecl _vcomp_atomic_div_ui4(ptr long) vcomp._vcomp_atomic_div_ui4
|
||||
-@ stub _vcomp_atomic_div_ui8
|
||||
+@ cdecl _vcomp_atomic_div_ui8(ptr int64) vcomp._vcomp_atomic_div_ui8
|
||||
@ stub _vcomp_atomic_mul_i1
|
||||
@ stub _vcomp_atomic_mul_i2
|
||||
@ cdecl _vcomp_atomic_mul_i4(ptr long) vcomp._vcomp_atomic_mul_i4
|
||||
-@ stub _vcomp_atomic_mul_i8
|
||||
+@ cdecl _vcomp_atomic_mul_i8(ptr int64) vcomp._vcomp_atomic_mul_i8
|
||||
@ cdecl _vcomp_atomic_mul_r4(ptr float) vcomp._vcomp_atomic_mul_r4
|
||||
@ cdecl _vcomp_atomic_mul_r8(ptr double) vcomp._vcomp_atomic_mul_r8
|
||||
@ stub _vcomp_atomic_or_i1
|
||||
@ stub _vcomp_atomic_or_i2
|
||||
@ cdecl _vcomp_atomic_or_i4(ptr long) vcomp._vcomp_atomic_or_i4
|
||||
-@ stub _vcomp_atomic_or_i8
|
||||
+@ cdecl _vcomp_atomic_or_i8(ptr int64) vcomp._vcomp_atomic_or_i8
|
||||
@ stub _vcomp_atomic_shl_i1
|
||||
@ stub _vcomp_atomic_shl_i2
|
||||
@ cdecl _vcomp_atomic_shl_i4(ptr long) vcomp._vcomp_atomic_shl_i4
|
||||
-@ stub _vcomp_atomic_shl_i8
|
||||
+@ cdecl _vcomp_atomic_shl_i8(ptr long) vcomp._vcomp_atomic_shl_i8
|
||||
@ stub _vcomp_atomic_shr_i1
|
||||
@ stub _vcomp_atomic_shr_i2
|
||||
@ cdecl _vcomp_atomic_shr_i4(ptr long) vcomp._vcomp_atomic_shr_i4
|
||||
-@ stub _vcomp_atomic_shr_i8
|
||||
+@ cdecl _vcomp_atomic_shr_i8(ptr long) vcomp._vcomp_atomic_shr_i8
|
||||
@ stub _vcomp_atomic_shr_ui1
|
||||
@ stub _vcomp_atomic_shr_ui2
|
||||
@ cdecl _vcomp_atomic_shr_ui4(ptr long) vcomp._vcomp_atomic_shr_ui4
|
||||
-@ stub _vcomp_atomic_shr_ui8
|
||||
+@ cdecl _vcomp_atomic_shr_ui8(ptr long) vcomp._vcomp_atomic_shr_ui8
|
||||
@ stub _vcomp_atomic_sub_i1
|
||||
@ stub _vcomp_atomic_sub_i2
|
||||
@ cdecl _vcomp_atomic_sub_i4(ptr long) vcomp._vcomp_atomic_sub_i4
|
||||
-@ stub _vcomp_atomic_sub_i8
|
||||
+@ cdecl _vcomp_atomic_sub_i8(ptr int64) vcomp._vcomp_atomic_sub_i8
|
||||
@ cdecl _vcomp_atomic_sub_r4(ptr float) vcomp._vcomp_atomic_sub_r4
|
||||
@ cdecl _vcomp_atomic_sub_r8(ptr double) vcomp._vcomp_atomic_sub_r8
|
||||
@ stub _vcomp_atomic_xor_i1
|
||||
@ stub _vcomp_atomic_xor_i2
|
||||
@ cdecl _vcomp_atomic_xor_i4(ptr long) vcomp._vcomp_atomic_xor_i4
|
||||
-@ stub _vcomp_atomic_xor_i8
|
||||
+@ cdecl _vcomp_atomic_xor_i8(ptr int64) vcomp._vcomp_atomic_xor_i8
|
||||
@ cdecl _vcomp_barrier() vcomp._vcomp_barrier
|
||||
@ stub _vcomp_copyprivate_broadcast
|
||||
@ stub _vcomp_copyprivate_receive
|
||||
diff --git a/dlls/vcomp90/vcomp90.spec b/dlls/vcomp90/vcomp90.spec
|
||||
index 849125f..ba1f414 100644
|
||||
--- a/dlls/vcomp90/vcomp90.spec
|
||||
+++ b/dlls/vcomp90/vcomp90.spec
|
||||
@@ -1,55 +1,55 @@
|
||||
@ stub _vcomp_atomic_add_i1
|
||||
@ stub _vcomp_atomic_add_i2
|
||||
@ cdecl _vcomp_atomic_add_i4(ptr long) vcomp._vcomp_atomic_add_i4
|
||||
-@ stub _vcomp_atomic_add_i8
|
||||
+@ cdecl _vcomp_atomic_add_i8(ptr int64) vcomp._vcomp_atomic_add_i8
|
||||
@ cdecl _vcomp_atomic_add_r4(ptr float) vcomp._vcomp_atomic_add_r4
|
||||
@ cdecl _vcomp_atomic_add_r8(ptr double) vcomp._vcomp_atomic_add_r8
|
||||
@ stub _vcomp_atomic_and_i1
|
||||
@ stub _vcomp_atomic_and_i2
|
||||
@ cdecl _vcomp_atomic_and_i4(ptr long) vcomp._vcomp_atomic_and_i4
|
||||
-@ stub _vcomp_atomic_and_i8
|
||||
+@ cdecl _vcomp_atomic_and_i8(ptr int64) vcomp._vcomp_atomic_and_i8
|
||||
@ stub _vcomp_atomic_div_i1
|
||||
@ stub _vcomp_atomic_div_i2
|
||||
@ cdecl _vcomp_atomic_div_i4(ptr long) vcomp._vcomp_atomic_div_i4
|
||||
-@ stub _vcomp_atomic_div_i8
|
||||
+@ cdecl _vcomp_atomic_div_i8(ptr int64) vcomp._vcomp_atomic_div_i8
|
||||
@ cdecl _vcomp_atomic_div_r4(ptr float) vcomp._vcomp_atomic_div_r4
|
||||
@ cdecl _vcomp_atomic_div_r8(ptr double) vcomp._vcomp_atomic_div_r8
|
||||
@ stub _vcomp_atomic_div_ui1
|
||||
@ stub _vcomp_atomic_div_ui2
|
||||
@ cdecl _vcomp_atomic_div_ui4(ptr long) vcomp._vcomp_atomic_div_ui4
|
||||
-@ stub _vcomp_atomic_div_ui8
|
||||
+@ cdecl _vcomp_atomic_div_ui8(ptr int64) vcomp._vcomp_atomic_div_ui8
|
||||
@ stub _vcomp_atomic_mul_i1
|
||||
@ stub _vcomp_atomic_mul_i2
|
||||
@ cdecl _vcomp_atomic_mul_i4(ptr long) vcomp._vcomp_atomic_mul_i4
|
||||
-@ stub _vcomp_atomic_mul_i8
|
||||
+@ cdecl _vcomp_atomic_mul_i8(ptr int64) vcomp._vcomp_atomic_mul_i8
|
||||
@ cdecl _vcomp_atomic_mul_r4(ptr float) vcomp._vcomp_atomic_mul_r4
|
||||
@ cdecl _vcomp_atomic_mul_r8(ptr double) vcomp._vcomp_atomic_mul_r8
|
||||
@ stub _vcomp_atomic_or_i1
|
||||
@ stub _vcomp_atomic_or_i2
|
||||
@ cdecl _vcomp_atomic_or_i4(ptr long) vcomp._vcomp_atomic_or_i4
|
||||
-@ stub _vcomp_atomic_or_i8
|
||||
+@ cdecl _vcomp_atomic_or_i8(ptr int64) vcomp._vcomp_atomic_or_i8
|
||||
@ stub _vcomp_atomic_shl_i1
|
||||
@ stub _vcomp_atomic_shl_i2
|
||||
@ cdecl _vcomp_atomic_shl_i4(ptr long) vcomp._vcomp_atomic_shl_i4
|
||||
-@ stub _vcomp_atomic_shl_i8
|
||||
+@ cdecl _vcomp_atomic_shl_i8(ptr long) vcomp._vcomp_atomic_shl_i8
|
||||
@ stub _vcomp_atomic_shr_i1
|
||||
@ stub _vcomp_atomic_shr_i2
|
||||
@ cdecl _vcomp_atomic_shr_i4(ptr long) vcomp._vcomp_atomic_shr_i4
|
||||
-@ stub _vcomp_atomic_shr_i8
|
||||
+@ cdecl _vcomp_atomic_shr_i8(ptr long) vcomp._vcomp_atomic_shr_i8
|
||||
@ stub _vcomp_atomic_shr_ui1
|
||||
@ stub _vcomp_atomic_shr_ui2
|
||||
@ cdecl _vcomp_atomic_shr_ui4(ptr long) vcomp._vcomp_atomic_shr_ui4
|
||||
-@ stub _vcomp_atomic_shr_ui8
|
||||
+@ cdecl _vcomp_atomic_shr_ui8(ptr long) vcomp._vcomp_atomic_shr_ui8
|
||||
@ stub _vcomp_atomic_sub_i1
|
||||
@ stub _vcomp_atomic_sub_i2
|
||||
@ cdecl _vcomp_atomic_sub_i4(ptr long) vcomp._vcomp_atomic_sub_i4
|
||||
-@ stub _vcomp_atomic_sub_i8
|
||||
+@ cdecl _vcomp_atomic_sub_i8(ptr int64) vcomp._vcomp_atomic_sub_i8
|
||||
@ cdecl _vcomp_atomic_sub_r4(ptr float) vcomp._vcomp_atomic_sub_r4
|
||||
@ cdecl _vcomp_atomic_sub_r8(ptr double) vcomp._vcomp_atomic_sub_r8
|
||||
@ stub _vcomp_atomic_xor_i1
|
||||
@ stub _vcomp_atomic_xor_i2
|
||||
@ cdecl _vcomp_atomic_xor_i4(ptr long) vcomp._vcomp_atomic_xor_i4
|
||||
-@ stub _vcomp_atomic_xor_i8
|
||||
+@ cdecl _vcomp_atomic_xor_i8(ptr int64) vcomp._vcomp_atomic_xor_i8
|
||||
@ cdecl _vcomp_barrier() vcomp._vcomp_barrier
|
||||
@ stub _vcomp_copyprivate_broadcast
|
||||
@ stub _vcomp_copyprivate_receive
|
||||
--
|
||||
2.6.4
|
||||
|
@ -0,0 +1,249 @@
|
||||
From 351810ed0612e63620ec612eee12b15e962de6bc Mon Sep 17 00:00:00 2001
|
||||
From: Sebastian Lackner <sebastian@fds-team.de>
|
||||
Date: Thu, 14 Jan 2016 07:22:32 +0100
|
||||
Subject: vcomp/tests: Add tests for 64-bit atomic instructions.
|
||||
|
||||
---
|
||||
dlls/vcomp/tests/vcomp.c | 148 ++++++++++++++++++++++++++++++++++++++++++++++-
|
||||
1 file changed, 147 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/dlls/vcomp/tests/vcomp.c b/dlls/vcomp/tests/vcomp.c
|
||||
index 5c177df..d1c242e 100644
|
||||
--- a/dlls/vcomp/tests/vcomp.c
|
||||
+++ b/dlls/vcomp/tests/vcomp.c
|
||||
@@ -2,7 +2,7 @@
|
||||
* Unit test suite for vcomp
|
||||
*
|
||||
* Copyright 2012 Dan Kegel
|
||||
- * Copyright 2015 Sebastian Lackner
|
||||
+ * Copyright 2015-2016 Sebastian Lackner
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
@@ -19,6 +19,7 @@
|
||||
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
|
||||
*/
|
||||
|
||||
+#include <stdio.h>
|
||||
#include "wine/test.h"
|
||||
|
||||
static char vcomp_manifest_file[MAX_PATH];
|
||||
@@ -35,24 +36,35 @@ typedef CRITICAL_SECTION *omp_lock_t;
|
||||
typedef CRITICAL_SECTION *omp_nest_lock_t;
|
||||
|
||||
static void (CDECL *p_vcomp_atomic_add_i4)(int *dest, int val);
|
||||
+static void (CDECL *p_vcomp_atomic_add_i8)(LONG64 *dest, LONG64 val);
|
||||
static void (CDECL *p_vcomp_atomic_add_r4)(float *dest, float val);
|
||||
static void (CDECL *p_vcomp_atomic_add_r8)(double *dest, double val);
|
||||
static void (CDECL *p_vcomp_atomic_and_i4)(int *dest, int val);
|
||||
+static void (CDECL *p_vcomp_atomic_and_i8)(LONG64 *dest, LONG64 val);
|
||||
static void (CDECL *p_vcomp_atomic_div_i4)(int *dest, int val);
|
||||
+static void (CDECL *p_vcomp_atomic_div_i8)(LONG64 *dest, LONG64 val);
|
||||
static void (CDECL *p_vcomp_atomic_div_r4)(float *dest, float val);
|
||||
static void (CDECL *p_vcomp_atomic_div_r8)(double *dest, double val);
|
||||
static void (CDECL *p_vcomp_atomic_div_ui4)(unsigned int *dest, unsigned int val);
|
||||
+static void (CDECL *p_vcomp_atomic_div_ui8)(ULONG64 *dest, ULONG64 val);
|
||||
static void (CDECL *p_vcomp_atomic_mul_i4)(int *dest, int val);
|
||||
+static void (CDECL *p_vcomp_atomic_mul_i8)(LONG64 *dest, LONG64 val);
|
||||
static void (CDECL *p_vcomp_atomic_mul_r4)(float *dest, float val);
|
||||
static void (CDECL *p_vcomp_atomic_mul_r8)(double *dest, double val);
|
||||
static void (CDECL *p_vcomp_atomic_or_i4)(int *dest, int val);
|
||||
+static void (CDECL *p_vcomp_atomic_or_i8)(LONG64 *dest, LONG64 val);
|
||||
static void (CDECL *p_vcomp_atomic_shl_i4)(int *dest, int val);
|
||||
+static void (CDECL *p_vcomp_atomic_shl_i8)(LONG64 *dest, unsigned int val);
|
||||
static void (CDECL *p_vcomp_atomic_shr_i4)(int *dest, int val);
|
||||
+static void (CDECL *p_vcomp_atomic_shr_i8)(LONG64 *dest, unsigned int val);
|
||||
static void (CDECL *p_vcomp_atomic_shr_ui4)(unsigned int *dest, unsigned int val);
|
||||
+static void (CDECL *p_vcomp_atomic_shr_ui8)(ULONG64 *dest, unsigned int val);
|
||||
static void (CDECL *p_vcomp_atomic_sub_i4)(int *dest, int val);
|
||||
+static void (CDECL *p_vcomp_atomic_sub_i8)(LONG64 *dest, LONG64 val);
|
||||
static void (CDECL *p_vcomp_atomic_sub_r4)(float *dest, float val);
|
||||
static void (CDECL *p_vcomp_atomic_sub_r8)(double *dest, double val);
|
||||
static void (CDECL *p_vcomp_atomic_xor_i4)(int *dest, int val);
|
||||
+static void (CDECL *p_vcomp_atomic_xor_i8)(LONG64 *dest, LONG64 val);
|
||||
static void (CDECL *p_vcomp_barrier)(void);
|
||||
static void (CDECL *p_vcomp_enter_critsect)(CRITICAL_SECTION **critsect);
|
||||
static void (CDECL *p_vcomp_flush)(void);
|
||||
@@ -134,6 +146,16 @@ static const char vcomp_manifest[] =
|
||||
|
||||
#undef ARCH
|
||||
|
||||
+static const char *debugstr_longlong(ULONGLONG ll)
|
||||
+{
|
||||
+ static char str[17];
|
||||
+ if (sizeof(ll) > sizeof(unsigned long) && ll >> 32)
|
||||
+ sprintf(str, "%lx%08lx", (unsigned long)(ll >> 32), (unsigned long)ll);
|
||||
+ else
|
||||
+ sprintf(str, "%lx", (unsigned long)ll);
|
||||
+ return str;
|
||||
+}
|
||||
+
|
||||
static void create_vcomp_manifest(void)
|
||||
{
|
||||
char temp_path[MAX_PATH];
|
||||
@@ -228,24 +250,35 @@ static BOOL init_vcomp(void)
|
||||
}
|
||||
|
||||
VCOMP_GET_PROC(_vcomp_atomic_add_i4);
|
||||
+ VCOMP_GET_PROC(_vcomp_atomic_add_i8);
|
||||
VCOMP_GET_PROC(_vcomp_atomic_add_r4);
|
||||
VCOMP_GET_PROC(_vcomp_atomic_add_r8);
|
||||
VCOMP_GET_PROC(_vcomp_atomic_and_i4);
|
||||
+ VCOMP_GET_PROC(_vcomp_atomic_and_i8);
|
||||
VCOMP_GET_PROC(_vcomp_atomic_div_i4);
|
||||
+ VCOMP_GET_PROC(_vcomp_atomic_div_i8);
|
||||
VCOMP_GET_PROC(_vcomp_atomic_div_r4);
|
||||
VCOMP_GET_PROC(_vcomp_atomic_div_r8);
|
||||
VCOMP_GET_PROC(_vcomp_atomic_div_ui4);
|
||||
+ VCOMP_GET_PROC(_vcomp_atomic_div_ui8);
|
||||
VCOMP_GET_PROC(_vcomp_atomic_mul_i4);
|
||||
+ VCOMP_GET_PROC(_vcomp_atomic_mul_i8);
|
||||
VCOMP_GET_PROC(_vcomp_atomic_mul_r4);
|
||||
VCOMP_GET_PROC(_vcomp_atomic_mul_r8);
|
||||
VCOMP_GET_PROC(_vcomp_atomic_or_i4);
|
||||
+ VCOMP_GET_PROC(_vcomp_atomic_or_i8);
|
||||
VCOMP_GET_PROC(_vcomp_atomic_shl_i4);
|
||||
+ VCOMP_GET_PROC(_vcomp_atomic_shl_i8);
|
||||
VCOMP_GET_PROC(_vcomp_atomic_shr_i4);
|
||||
+ VCOMP_GET_PROC(_vcomp_atomic_shr_i8);
|
||||
VCOMP_GET_PROC(_vcomp_atomic_shr_ui4);
|
||||
+ VCOMP_GET_PROC(_vcomp_atomic_shr_ui8);
|
||||
VCOMP_GET_PROC(_vcomp_atomic_sub_i4);
|
||||
+ VCOMP_GET_PROC(_vcomp_atomic_sub_i8);
|
||||
VCOMP_GET_PROC(_vcomp_atomic_sub_r4);
|
||||
VCOMP_GET_PROC(_vcomp_atomic_sub_r8);
|
||||
VCOMP_GET_PROC(_vcomp_atomic_xor_i4);
|
||||
+ VCOMP_GET_PROC(_vcomp_atomic_xor_i8);
|
||||
VCOMP_GET_PROC(_vcomp_barrier);
|
||||
VCOMP_GET_PROC(_vcomp_enter_critsect);
|
||||
VCOMP_GET_PROC(_vcomp_flush);
|
||||
@@ -1447,6 +1480,118 @@ static void test_atomic_integer32(void)
|
||||
}
|
||||
}
|
||||
|
||||
+static void test_atomic_integer64(void)
|
||||
+{
|
||||
+ struct
|
||||
+ {
|
||||
+ void (CDECL *func)(LONG64 *, LONG64);
|
||||
+ LONG64 v1, v2, expected;
|
||||
+ }
|
||||
+ tests1[] =
|
||||
+ {
|
||||
+ { p_vcomp_atomic_add_i8, 0x1122334455667788, 0x7766554433221100, -0x7777777777777778 },
|
||||
+ { p_vcomp_atomic_and_i8, 0x1122334455667788, 0x7766554433221100, 0x1122114411221100 },
|
||||
+ { p_vcomp_atomic_div_i8, 0x7766554433221100, 0x1122334455667788, 6 },
|
||||
+ { p_vcomp_atomic_div_i8, 0x7766554433221100, -0x1122334455667788, -6 },
|
||||
+ { p_vcomp_atomic_mul_i8, 0x1122334455667788, 0x7766554433221100, 0x3e963337c6000800 },
|
||||
+ { p_vcomp_atomic_mul_i8, 0x1122334455667788, -0x7766554433221100, 0xc169ccc839fff800 },
|
||||
+ { p_vcomp_atomic_or_i8, 0x1122334455667788, 0x7766554433221100, 0x7766774477667788 },
|
||||
+ { p_vcomp_atomic_sub_i8, 0x1122334455667788, 0x7766554433221100, -0x664421ffddbb9978 },
|
||||
+ { p_vcomp_atomic_xor_i8, 0x1122334455667788, 0x7766554433221100, 0x6644660066446688 },
|
||||
+ };
|
||||
+ struct
|
||||
+ {
|
||||
+ void (CDECL *func)(LONG64 *, unsigned int);
|
||||
+ LONG64 v1;
|
||||
+ unsigned int v2;
|
||||
+ LONG64 expected;
|
||||
+ BOOL todo;
|
||||
+ }
|
||||
+ tests2[] =
|
||||
+ {
|
||||
+ { p_vcomp_atomic_shl_i8, 0x1122334455667788, 3, -0x76ee65dd54cc43c0 },
|
||||
+ { p_vcomp_atomic_shl_i8, 0x1122334455667788, 60, 0x8000000000000000 },
|
||||
+ { p_vcomp_atomic_shl_i8, -0x1122334455667788, 3, 0x76ee65dd54cc43c0 },
|
||||
+ { p_vcomp_atomic_shr_i8, 0x1122334455667788, 3, 0x22446688aaccef1 },
|
||||
+ { p_vcomp_atomic_shr_i8, 0x1122334455667788, 60, 1 },
|
||||
+ { p_vcomp_atomic_shr_i8, -0x1122334455667788, 3, -0x22446688aaccef1 },
|
||||
+ #if defined(__i386__)
|
||||
+ { p_vcomp_atomic_shl_i8, 0x1122334455667788, 64, 0, TRUE },
|
||||
+ { p_vcomp_atomic_shl_i8, 0x1122334455667788, 67, 0, TRUE },
|
||||
+ { p_vcomp_atomic_shr_i8, 0x1122334455667788, 64, 0, TRUE },
|
||||
+ { p_vcomp_atomic_shr_i8, 0x1122334455667788, 67, 0, TRUE },
|
||||
+ #elif defined(__x86_64__)
|
||||
+ { p_vcomp_atomic_shl_i8, 0x1122334455667788, 64, 0x1122334455667788 },
|
||||
+ { p_vcomp_atomic_shl_i8, 0x1122334455667788, 67, -0x76ee65dd54cc43c0 },
|
||||
+ { p_vcomp_atomic_shr_i8, 0x1122334455667788, 64, 0x1122334455667788 },
|
||||
+ { p_vcomp_atomic_shr_i8, 0x1122334455667788, 67, 0x22446688aaccef1 },
|
||||
+ #endif
|
||||
+ };
|
||||
+ struct
|
||||
+ {
|
||||
+ void (CDECL *func)(ULONG64 *, ULONG64);
|
||||
+ ULONG64 v1, v2, expected;
|
||||
+ }
|
||||
+ tests3[] =
|
||||
+ {
|
||||
+ { p_vcomp_atomic_div_ui8, 0x7766554455667788, 0x1122334433221100, 6 },
|
||||
+ { p_vcomp_atomic_div_ui8, 0x7766554455667788, 0xeeddccbbaa998878, 0 },
|
||||
+ };
|
||||
+ struct
|
||||
+ {
|
||||
+ void (CDECL *func)(ULONG64 *, unsigned int);
|
||||
+ ULONG64 v1;
|
||||
+ unsigned int v2;
|
||||
+ ULONG64 expected;
|
||||
+ BOOL todo;
|
||||
+ }
|
||||
+ tests4[] =
|
||||
+ {
|
||||
+ { p_vcomp_atomic_shr_ui8, 0x1122334455667788, 3, 0x22446688aaccef1 },
|
||||
+ { p_vcomp_atomic_shr_ui8, 0x1122334455667788, 60, 1 },
|
||||
+ { p_vcomp_atomic_shr_ui8, 0xeeddccbbaa998878, 3, 0x1ddbb9977553310f },
|
||||
+ #if defined(__i386__)
|
||||
+ { p_vcomp_atomic_shr_ui8, 0x1122334455667788, 64, 0, TRUE },
|
||||
+ { p_vcomp_atomic_shr_ui8, 0x1122334455667788, 67, 0, TRUE },
|
||||
+ #elif defined(__x86_64__)
|
||||
+ { p_vcomp_atomic_shr_ui8, 0x1122334455667788, 64, 0x1122334455667788 },
|
||||
+ { p_vcomp_atomic_shr_ui8, 0x1122334455667788, 67, 0x22446688aaccef1 },
|
||||
+ #endif
|
||||
+ };
|
||||
+ int i;
|
||||
+
|
||||
+ for (i = 0; i < sizeof(tests1)/sizeof(tests1[0]); i++)
|
||||
+ {
|
||||
+ LONG64 val = tests1[i].v1;
|
||||
+ tests1[i].func(&val, tests1[i].v2);
|
||||
+ ok(val == tests1[i].expected, "test %d: unexpectedly got %s\n", i, debugstr_longlong(val));
|
||||
+ }
|
||||
+ for (i = 0; i < sizeof(tests2)/sizeof(tests2[0]); i++)
|
||||
+ {
|
||||
+ LONG64 val = tests2[i].v1;
|
||||
+ tests2[i].func(&val, tests2[i].v2);
|
||||
+ if (!tests2[i].todo)
|
||||
+ ok(val == tests2[i].expected, "test %d: unexpectedly got %s\n", i, debugstr_longlong(val));
|
||||
+ else todo_wine
|
||||
+ ok(val == tests2[i].expected, "test %d: unexpectedly got %s\n", i, debugstr_longlong(val));
|
||||
+ }
|
||||
+ for (i = 0; i < sizeof(tests3)/sizeof(tests3[0]); i++)
|
||||
+ {
|
||||
+ ULONG64 val = tests3[i].v1;
|
||||
+ tests3[i].func(&val, tests3[i].v2);
|
||||
+ ok(val == tests3[i].expected, "test %d: unexpectedly got %s\n", i, debugstr_longlong(val));
|
||||
+ }
|
||||
+ for (i = 0; i < sizeof(tests4)/sizeof(tests4[0]); i++)
|
||||
+ {
|
||||
+ ULONG64 val = tests4[i].v1;
|
||||
+ tests4[i].func(&val, tests4[i].v2);
|
||||
+ if (!tests4[i].todo)
|
||||
+ ok(val == tests4[i].expected, "test %d: unexpectedly got %s\n", i, debugstr_longlong(val));
|
||||
+ else todo_wine
|
||||
+ ok(val == tests4[i].expected, "test %d: unexpectedly got %s\n", i, debugstr_longlong(val));
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
static void test_atomic_float(void)
|
||||
{
|
||||
struct
|
||||
@@ -1516,6 +1661,7 @@ START_TEST(vcomp)
|
||||
test_omp_init_lock();
|
||||
test_omp_init_nest_lock();
|
||||
test_atomic_integer32();
|
||||
+ test_atomic_integer64();
|
||||
test_atomic_float();
|
||||
test_atomic_double();
|
||||
|
||||
--
|
||||
2.6.4
|
||||
|
1
patches/vcomp-Atomic_I8/definition
Normal file
1
patches/vcomp-Atomic_I8/definition
Normal file
@ -0,0 +1 @@
|
||||
Fixes: Implement 64-bit atomic instructions in OpenMP
|
Loading…
Reference in New Issue
Block a user