aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'www-client/firefox/files/arm64-2-import-crbug-for-aarch64.patch')
-rw-r--r--www-client/firefox/files/arm64-2-import-crbug-for-aarch64.patch407
1 files changed, 407 insertions, 0 deletions
diff --git a/www-client/firefox/files/arm64-2-import-crbug-for-aarch64.patch b/www-client/firefox/files/arm64-2-import-crbug-for-aarch64.patch
new file mode 100644
index 0000000..1e3d7f3
--- /dev/null
+++ b/www-client/firefox/files/arm64-2-import-crbug-for-aarch64.patch
@@ -0,0 +1,407 @@
+From: Makoto Kato <m_kato@ga2.so-net.ne.jp>
+Date: Sun, 28 Feb 2016 19:11:09 +0900
+Subject: Bug 1250403 - Part 2. Import crbug #354405 for aarch64. r=billm
+
+MozReview-Commit-ID: A3sArb6IE6m
+---
+ ipc/chromium/moz.build | 2 +-
+ ipc/chromium/src/base/atomicops.h | 4 +-
+ .../src/base/atomicops_internals_arm64_gcc.h | 360 +++++++++++++++++++++
+ 3 files changed, 364 insertions(+), 2 deletions(-)
+ create mode 100644 ipc/chromium/src/base/atomicops_internals_arm64_gcc.h
+
+diff --git a/ipc/chromium/moz.build b/ipc/chromium/moz.build
+index 8b8e4cc..88aaafe 100644
+--- a/ipc/chromium/moz.build
++++ b/ipc/chromium/moz.build
+@@ -162,7 +162,7 @@ if os_bsd or os_linux:
+ ]
+
+ ost = CONFIG['OS_TEST']
+-if '86' not in ost and 'arm' not in ost and 'mips' not in ost:
++if '86' not in ost and 'arm' not in ost and 'aarch64' != ost and 'mips' not in ost:
+ SOURCES += [
+ 'src/base/atomicops_internals_mutex.cc',
+ ]
+diff --git a/ipc/chromium/src/base/atomicops.h b/ipc/chromium/src/base/atomicops.h
+index a167541..f9ad55b 100644
+--- a/ipc/chromium/src/base/atomicops.h
++++ b/ipc/chromium/src/base/atomicops.h
+@@ -138,8 +138,10 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
+ #include "base/atomicops_internals_x86_macosx.h"
+ #elif defined(COMPILER_GCC) && defined(ARCH_CPU_X86_FAMILY)
+ #include "base/atomicops_internals_x86_gcc.h"
+-#elif defined(COMPILER_GCC) && defined(ARCH_CPU_ARM_FAMILY)
++#elif defined(COMPILER_GCC) && defined(ARCH_CPU_ARMEL)
+ #include "base/atomicops_internals_arm_gcc.h"
++#elif defined(COMPILER_GCC) && defined(ARCH_CPU_ARM64)
++#include "base/atomicops_internals_arm64_gcc.h"
+ #elif defined(COMPILER_GCC) && defined(ARCH_CPU_MIPS)
+ #include "base/atomicops_internals_mips_gcc.h"
+ #else
+diff --git a/ipc/chromium/src/base/atomicops_internals_arm64_gcc.h b/ipc/chromium/src/base/atomicops_internals_arm64_gcc.h
+new file mode 100644
+index 0000000..a2b0abc
+--- /dev/null
++++ b/ipc/chromium/src/base/atomicops_internals_arm64_gcc.h
+@@ -0,0 +1,360 @@
++// Copyright 2014 The Chromium Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style license that can be
++// found in the LICENSE file.
++
++// This file is an internal atomic implementation, use base/atomicops.h instead.
++
++// TODO(rmcilroy): Investigate whether we can use __sync__ intrinsics instead of
++// the hand coded assembly without introducing perf regressions.
++// TODO(rmcilroy): Investigate whether we can use acquire / release versions of
++// exclusive load / store assembly instructions and do away with
++// the barriers.
++
++#ifndef BASE_ATOMICOPS_INTERNALS_ARM64_GCC_H_
++#define BASE_ATOMICOPS_INTERNALS_ARM64_GCC_H_
++
++#if defined(OS_QNX)
++#include <sys/cpuinline.h>
++#endif
++
++namespace base {
++namespace subtle {
++
++inline void MemoryBarrier() {
++ __asm__ __volatile__ ( // NOLINT
++ "dmb ish \n\t" // Data memory barrier.
++ ::: "memory"
++ ); // NOLINT
++}
++
++
++inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
++ Atomic32 old_value,
++ Atomic32 new_value) {
++ Atomic32 prev;
++ int32_t temp;
++
++ __asm__ __volatile__ ( // NOLINT
++ "0: \n\t"
++ "ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
++ "cmp %w[prev], %w[old_value] \n\t"
++ "bne 1f \n\t"
++ "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
++ "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
++ "1: \n\t"
++ "clrex \n\t" // In case we didn't swap.
++ : [prev]"=&r" (prev),
++ [temp]"=&r" (temp),
++ [ptr]"+Q" (*ptr)
++ : [old_value]"r" (old_value),
++ [new_value]"r" (new_value)
++ : "memory", "cc"
++ ); // NOLINT
++
++ return prev;
++}
++
++inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
++ Atomic32 new_value) {
++ Atomic32 result;
++ int32_t temp;
++
++ __asm__ __volatile__ ( // NOLINT
++ "0: \n\t"
++ "ldxr %w[result], %[ptr] \n\t" // Load the previous value.
++ "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
++ "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
++ : [result]"=&r" (result),
++ [temp]"=&r" (temp),
++ [ptr]"+Q" (*ptr)
++ : [new_value]"r" (new_value)
++ : "memory"
++ ); // NOLINT
++
++ return result;
++}
++
++inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
++ Atomic32 increment) {
++ Atomic32 result;
++ int32_t temp;
++
++ __asm__ __volatile__ ( // NOLINT
++ "0: \n\t"
++ "ldxr %w[result], %[ptr] \n\t" // Load the previous value.
++ "add %w[result], %w[result], %w[increment]\n\t"
++ "stxr %w[temp], %w[result], %[ptr] \n\t" // Try to store the result.
++ "cbnz %w[temp], 0b \n\t" // Retry on failure.
++ : [result]"=&r" (result),
++ [temp]"=&r" (temp),
++ [ptr]"+Q" (*ptr)
++ : [increment]"r" (increment)
++ : "memory"
++ ); // NOLINT
++
++ return result;
++}
++
++inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
++ Atomic32 increment) {
++ MemoryBarrier();
++ Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment);
++ MemoryBarrier();
++
++ return result;
++}
++
++inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
++ Atomic32 old_value,
++ Atomic32 new_value) {
++ Atomic32 prev;
++ int32_t temp;
++
++ __asm__ __volatile__ ( // NOLINT
++ "0: \n\t"
++ "ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
++ "cmp %w[prev], %w[old_value] \n\t"
++ "bne 1f \n\t"
++ "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
++ "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
++ "dmb ish \n\t" // Data memory barrier.
++ "1: \n\t"
++ // If the compare failed the 'dmb' is unnecessary, but we still need a
++ // 'clrex'.
++ "clrex \n\t"
++ : [prev]"=&r" (prev),
++ [temp]"=&r" (temp),
++ [ptr]"+Q" (*ptr)
++ : [old_value]"r" (old_value),
++ [new_value]"r" (new_value)
++ : "memory", "cc"
++ ); // NOLINT
++
++ return prev;
++}
++
++inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
++ Atomic32 old_value,
++ Atomic32 new_value) {
++ Atomic32 prev;
++ int32_t temp;
++
++ MemoryBarrier();
++
++ __asm__ __volatile__ ( // NOLINT
++ "0: \n\t"
++ "ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
++ "cmp %w[prev], %w[old_value] \n\t"
++ "bne 1f \n\t"
++ "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
++ "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
++ "1: \n\t"
++ // If the compare failed the we still need a 'clrex'.
++ "clrex \n\t"
++ : [prev]"=&r" (prev),
++ [temp]"=&r" (temp),
++ [ptr]"+Q" (*ptr)
++ : [old_value]"r" (old_value),
++ [new_value]"r" (new_value)
++ : "memory", "cc"
++ ); // NOLINT
++
++ return prev;
++}
++
++inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
++ *ptr = value;
++}
++
++inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
++ *ptr = value;
++ MemoryBarrier();
++}
++
++inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
++ MemoryBarrier();
++ *ptr = value;
++}
++
++inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
++ return *ptr;
++}
++
++inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
++ Atomic32 value = *ptr;
++ MemoryBarrier();
++ return value;
++}
++
++inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
++ MemoryBarrier();
++ return *ptr;
++}
++
++// 64-bit versions of the operations.
++// See the 32-bit versions for comments.
++
++inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
++ Atomic64 old_value,
++ Atomic64 new_value) {
++ Atomic64 prev;
++ int32_t temp;
++
++ __asm__ __volatile__ ( // NOLINT
++ "0: \n\t"
++ "ldxr %[prev], %[ptr] \n\t"
++ "cmp %[prev], %[old_value] \n\t"
++ "bne 1f \n\t"
++ "stxr %w[temp], %[new_value], %[ptr] \n\t"
++ "cbnz %w[temp], 0b \n\t"
++ "1: \n\t"
++ "clrex \n\t"
++ : [prev]"=&r" (prev),
++ [temp]"=&r" (temp),
++ [ptr]"+Q" (*ptr)
++ : [old_value]"r" (old_value),
++ [new_value]"r" (new_value)
++ : "memory", "cc"
++ ); // NOLINT
++
++ return prev;
++}
++
++inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
++ Atomic64 new_value) {
++ Atomic64 result;
++ int32_t temp;
++
++ __asm__ __volatile__ ( // NOLINT
++ "0: \n\t"
++ "ldxr %[result], %[ptr] \n\t"
++ "stxr %w[temp], %[new_value], %[ptr] \n\t"
++ "cbnz %w[temp], 0b \n\t"
++ : [result]"=&r" (result),
++ [temp]"=&r" (temp),
++ [ptr]"+Q" (*ptr)
++ : [new_value]"r" (new_value)
++ : "memory"
++ ); // NOLINT
++
++ return result;
++}
++
++inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
++ Atomic64 increment) {
++ Atomic64 result;
++ int32_t temp;
++
++ __asm__ __volatile__ ( // NOLINT
++ "0: \n\t"
++ "ldxr %[result], %[ptr] \n\t"
++ "add %[result], %[result], %[increment] \n\t"
++ "stxr %w[temp], %[result], %[ptr] \n\t"
++ "cbnz %w[temp], 0b \n\t"
++ : [result]"=&r" (result),
++ [temp]"=&r" (temp),
++ [ptr]"+Q" (*ptr)
++ : [increment]"r" (increment)
++ : "memory"
++ ); // NOLINT
++
++ return result;
++}
++
++inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
++ Atomic64 increment) {
++ MemoryBarrier();
++ Atomic64 result = NoBarrier_AtomicIncrement(ptr, increment);
++ MemoryBarrier();
++
++ return result;
++}
++
++inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
++ Atomic64 old_value,
++ Atomic64 new_value) {
++ Atomic64 prev;
++ int32_t temp;
++
++ __asm__ __volatile__ ( // NOLINT
++ "0: \n\t"
++ "ldxr %[prev], %[ptr] \n\t"
++ "cmp %[prev], %[old_value] \n\t"
++ "bne 1f \n\t"
++ "stxr %w[temp], %[new_value], %[ptr] \n\t"
++ "cbnz %w[temp], 0b \n\t"
++ "dmb ish \n\t"
++ "1: \n\t"
++ "clrex \n\t"
++ : [prev]"=&r" (prev),
++ [temp]"=&r" (temp),
++ [ptr]"+Q" (*ptr)
++ : [old_value]"r" (old_value),
++ [new_value]"r" (new_value)
++ : "memory", "cc"
++ ); // NOLINT
++
++ return prev;
++}
++
++inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
++ Atomic64 old_value,
++ Atomic64 new_value) {
++ Atomic64 prev;
++ int32_t temp;
++
++ MemoryBarrier();
++
++ __asm__ __volatile__ ( // NOLINT
++ "0: \n\t"
++ "ldxr %[prev], %[ptr] \n\t"
++ "cmp %[prev], %[old_value] \n\t"
++ "bne 1f \n\t"
++ "stxr %w[temp], %[new_value], %[ptr] \n\t"
++ "cbnz %w[temp], 0b \n\t"
++ "1: \n\t"
++ "clrex \n\t"
++ : [prev]"=&r" (prev),
++ [temp]"=&r" (temp),
++ [ptr]"+Q" (*ptr)
++ : [old_value]"r" (old_value),
++ [new_value]"r" (new_value)
++ : "memory", "cc"
++ ); // NOLINT
++
++ return prev;
++}
++
++inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
++ *ptr = value;
++}
++
++inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
++ *ptr = value;
++ MemoryBarrier();
++}
++
++inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
++ MemoryBarrier();
++ *ptr = value;
++}
++
++inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
++ return *ptr;
++}
++
++inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
++ Atomic64 value = *ptr;
++ MemoryBarrier();
++ return value;
++}
++
++inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
++ MemoryBarrier();
++ return *ptr;
++}
++
++} // namespace base::subtle
++} // namespace base
++
++#endif // BASE_ATOMICOPS_INTERNALS_ARM64_GCC_H_