ops_gcc_ppc_common.hpp 2.7 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273
  1. /*
  2. * Distributed under the Boost Software License, Version 1.0.
  3. * (See accompanying file LICENSE_1_0.txt or copy at
  4. * http://www.boost.org/LICENSE_1_0.txt)
  5. *
  6. * Copyright (c) 2009 Helge Bahmann
  7. * Copyright (c) 2013 Tim Blechmann
  8. * Copyright (c) 2014 Andrey Semashev
  9. */
  10. /*!
  11. * \file atomic/detail/ops_gcc_ppc_common.hpp
  12. *
  13. * This header contains basic utilities for gcc PowerPC backend.
  14. */
  15. #ifndef BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_COMMON_HPP_INCLUDED_
  16. #define BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_COMMON_HPP_INCLUDED_
  17. #include <boost/memory_order.hpp>
  18. #include <boost/atomic/detail/config.hpp>
  19. #include <boost/atomic/detail/header.hpp>
  20. #ifdef BOOST_HAS_PRAGMA_ONCE
  21. #pragma once
  22. #endif
  23. namespace boost {
  24. namespace atomics {
  25. namespace detail {
  26. // The implementation below uses information from this document:
  27. // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2010.02.19a.html
  28. // A note about memory_order_consume. Technically, this architecture allows to avoid
  29. // unnecessary memory barrier after consume load since it supports data dependency ordering.
  30. // However, some compiler optimizations may break a seemingly valid code relying on data
  31. // dependency tracking by injecting bogus branches to aid out of order execution.
  32. // This may happen not only in Boost.Atomic code but also in user's code, which we have no
  33. // control of. See this thread: http://lists.boost.org/Archives/boost/2014/06/213890.php.
  34. // For this reason we promote memory_order_consume to memory_order_acquire.
  35. struct core_arch_operations_gcc_ppc_base
  36. {
  37. static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false;
  38. static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true;
  39. static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT
  40. {
  41. #if defined(__powerpc64__) || defined(__PPC64__)
  42. if (order == memory_order_seq_cst)
  43. __asm__ __volatile__ ("sync" ::: "memory");
  44. else if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
  45. __asm__ __volatile__ ("lwsync" ::: "memory");
  46. #else
  47. if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u)
  48. __asm__ __volatile__ ("sync" ::: "memory");
  49. #endif
  50. }
  51. static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT
  52. {
  53. if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u)
  54. __asm__ __volatile__ ("isync" ::: "memory");
  55. }
  56. };
  57. } // namespace detail
  58. } // namespace atomics
  59. } // namespace boost
  60. #include <boost/atomic/detail/footer.hpp>
  61. #endif // BOOST_ATOMIC_DETAIL_OPS_GCC_PPC_COMMON_HPP_INCLUDED_