diff options
author | CoprDistGit <infra@openeuler.org> | 2023-04-24 08:46:38 +0000 |
---|---|---|
committer | CoprDistGit <infra@openeuler.org> | 2023-04-24 08:46:38 +0000 |
commit | b4f1d771777090b4e09abd34c701c0cbb47cb0c9 (patch) | |
tree | 7d2e99b4b8a7f5ee49fcc0318a572d93d30d74ac /boost_1_78_0-sw.patch | |
parent | 13a5c32b80e77cfc952fa466af74509e593873d7 (diff) |
automatic import of boost
Diffstat (limited to 'boost_1_78_0-sw.patch')
-rw-r--r-- | boost_1_78_0-sw.patch | 1940 |
1 files changed, 1940 insertions, 0 deletions
diff --git a/boost_1_78_0-sw.patch b/boost_1_78_0-sw.patch new file mode 100644 index 0000000..6d1a820 --- /dev/null +++ b/boost_1_78_0-sw.patch @@ -0,0 +1,1940 @@ +diff -Naur boost_1_78_0.org/boost/atomic/detail/caps_arch_gcc_sw_64.hpp boost_1_78_0.sw/boost/atomic/detail/caps_arch_gcc_sw_64.hpp +--- boost_1_78_0.org/boost/atomic/detail/caps_arch_gcc_sw_64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ boost_1_78_0.sw/boost/atomic/detail/caps_arch_gcc_sw_64.hpp 2022-05-09 13:54:56.570734000 +0800 +@@ -0,0 +1,34 @@ ++/* ++ * Distributed under the Boost Software License, Version 1.0. ++ * (See accompanying file LICENSE_1_0.txt or copy at ++ * http://www.boost.org/LICENSE_1_0.txt) ++ * ++ * Copyright (c) 2009 Helge Bahmann ++ * Copyright (c) 2013 Tim Blechmann ++ * Copyright (c) 2014 Andrey Semashev ++ */ ++/*! ++ * \file atomic/detail/caps_arch_gcc_sw_64.hpp ++ * ++ * This header defines feature capabilities macros ++ */ ++ ++#ifndef BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_SW_64_HPP_INCLUDED_ ++#define BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_SW_64_HPP_INCLUDED_ ++ ++#include <boost/atomic/detail/config.hpp> ++ ++#ifdef BOOST_HAS_PRAGMA_ONCE ++#pragma once ++#endif ++ ++#define BOOST_ATOMIC_INT8_LOCK_FREE 2 ++#define BOOST_ATOMIC_INT16_LOCK_FREE 2 ++#define BOOST_ATOMIC_INT32_LOCK_FREE 2 ++#define BOOST_ATOMIC_INT64_LOCK_FREE 2 ++#define BOOST_ATOMIC_POINTER_LOCK_FREE 2 ++ ++#define BOOST_ATOMIC_THREAD_FENCE 2 ++#define BOOST_ATOMIC_SIGNAL_FENCE 2 ++ ++#endif // BOOST_ATOMIC_DETAIL_CAPS_ARCH_GCC_SW_64_HPP_INCLUDED_ +diff -Naur boost_1_78_0.org/boost/atomic/detail/core_arch_ops_gcc_sw_64.hpp boost_1_78_0.sw/boost/atomic/detail/core_arch_ops_gcc_sw_64.hpp +--- boost_1_78_0.org/boost/atomic/detail/core_arch_ops_gcc_sw_64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ boost_1_78_0.sw/boost/atomic/detail/core_arch_ops_gcc_sw_64.hpp 2022-05-09 13:54:56.570734000 +0800 +@@ -0,0 +1,1030 @@ ++/* ++ * Distributed under the Boost Software License, Version 1.0. ++ * (See accompanying file LICENSE_1_0.txt or copy at ++ * http://www.boost.org/LICENSE_1_0.txt) ++ * ++ * Copyright (c) 2009 Helge Bahmann ++ * Copyright (c) 2013 Tim Blechmann ++ * Copyright (c) 2014 Andrey Semashev ++ */ ++/*! ++ * \file atomic/detail/core_arch_ops_gcc_sw_64.hpp ++ * ++ * This header contains implementation of the \c core_arch_operations template. ++ */ ++ ++#ifndef BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_SW_64_HPP_INCLUDED_ ++#define BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_SW_64_HPP_INCLUDED_ ++ ++#include <cstddef> ++#include <boost/memory_order.hpp> ++#include <boost/atomic/detail/config.hpp> ++#include <boost/atomic/detail/storage_traits.hpp> ++#include <boost/atomic/detail/core_arch_operations_fwd.hpp> ++#include <boost/atomic/detail/header.hpp> ++ ++#ifdef BOOST_HAS_PRAGMA_ONCE ++#pragma once ++#endif ++ ++namespace boost { ++namespace atomics { ++namespace detail { ++ ++/* ++ Refer to http://h71000.www7.hp.com/doc/82final/5601/5601pro_004.html ++ (HP OpenVMS systems documentation) and the Sw_64 Architecture Reference Manual. ++ */ ++ ++/* ++ NB: The most natural thing would be to write the increment/decrement ++ operators along the following lines: ++ ++ __asm__ __volatile__ ++ ( ++ "1: ldl_l %0,%1 \n" ++ "addl %0,1,%0 \n" ++ "stl_c %0,%1 \n" ++ "beq %0,1b\n" ++ : "=&b" (tmp) ++ : "m" (value) ++ : "cc" ++ ); ++ ++ However according to the comments on the HP website and matching ++ comments in the Linux kernel sources this defies branch prediction, ++ as the cpu assumes that backward branches are always taken; so ++ instead copy the trick from the Linux kernel, introduce a forward ++ branch and back again. ++ ++ I have, however, had a hard time measuring the difference between ++ the two versions in microbenchmarks -- I am leaving it in nevertheless ++ as it apparently does not hurt either. ++*/ ++ ++struct core_arch_operations_gcc_sw_64_base ++{ ++ static BOOST_CONSTEXPR_OR_CONST bool full_cas_based = false; ++ static BOOST_CONSTEXPR_OR_CONST bool is_always_lock_free = true; ++ ++ static BOOST_FORCEINLINE void fence_before(memory_order order) BOOST_NOEXCEPT ++ { ++ if ((static_cast< unsigned int >(order) & static_cast< unsigned int >(memory_order_release)) != 0u) ++ __asm__ __volatile__ ("memb" ::: "memory"); ++ } ++ ++ static BOOST_FORCEINLINE void fence_after(memory_order order) BOOST_NOEXCEPT ++ { ++ if ((static_cast< unsigned int >(order) & (static_cast< unsigned int >(memory_order_consume) | static_cast< unsigned int >(memory_order_acquire))) != 0u) ++ __asm__ __volatile__ ("memb" ::: "memory"); ++ } ++ ++ static BOOST_FORCEINLINE void fence_after_store(memory_order order) BOOST_NOEXCEPT ++ { ++ if (order == memory_order_seq_cst) ++ __asm__ __volatile__ ("memb" ::: "memory"); ++ } ++}; ++ ++ ++template< bool Signed, bool Interprocess > ++struct core_arch_operations< 4u, Signed, Interprocess > : ++ public core_arch_operations_gcc_sw_64_base ++{ ++ typedef typename storage_traits< 4u >::type storage_type; ++ ++ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 4u; ++ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 4u; ++ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed; ++ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess; ++ ++ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ fence_before(order); ++ storage = v; ++ fence_after_store(order); ++ } ++ ++ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type v = storage; ++ fence_after(order); ++ return v; ++ } ++ ++ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, tmp; ++ storage_type tmp1, tmp2; ++ fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n\t" ++ "ldi %2,%4\n\t" ++ "ldi %3,1\n\t" ++ "mov %5, %1\n\t" ++ "lldw %0, 0(%2)\n\t" ++ "wr_f %3\n\t" ++ "lstw %1, 0(%2)\n\t" ++ "rd_f %1\n\t" ++ "beq %1, 2f\n\t" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (tmp), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ fence_after(order); ++ return original; ++ } ++ ++ static BOOST_FORCEINLINE bool compare_exchange_weak( ++ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT ++ { ++ fence_before(success_order); ++ int success; ++ storage_type current; ++ storage_type tmp1,tmp2; ++ __asm__ __volatile__ ++ ( ++ "1:\n\t" ++ "ldi %4,%6\n\t" ++ "lldw %2, 0(%4)\n\t" // current = *(&storage) ++ "cmpeq %2, %0, %5\n\t" // success = current == expected ++ "wr_f %5\n\t" // success = current == expected ++ "mov %2, %0\n\t" // expected = current ++ "lstw %1, 0(%4)\n\t" // storage = desired; desired = store succeeded ++ "rd_f %1\n\t" // storage = desired; desired = store succeeded ++ "beq %5, 2f\n\t" // if (success == 0) goto end ++ "mov %1, %3\n\t" // success = desired ++ "2:\n\t" ++ : "+r" (expected), // %0 ++ "+r" (desired), // %1 ++ "=&r" (current), // %2 ++ "=&r" (success), // %3 ++ "=&r" (tmp1), // %4 ++ "=&r" (tmp2) // %5 ++ : "m" (storage) // %6 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ if (success) ++ fence_after(success_order); ++ else ++ fence_after(failure_order); ++ return !!success; ++ } ++ ++ static BOOST_FORCEINLINE bool compare_exchange_strong( ++ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT ++ { ++ int success; ++ storage_type current, tmp; ++ storage_type tmp1,tmp2; ++ fence_before(success_order); ++ __asm__ __volatile__ ++ ( ++ "1:\n\t" ++ "ldi %4,%6\n\t" ++ "mov %7, %1\n\t" // tmp = desired ++ "lldw %2, 0(%4)\n\t" // current = *(&storage) ++ "cmpeq %2, %0, %5\n\t" // success = current == expected ++ "wr_f %5\n\t" // success = current == expected ++ "mov %2, %0\n\t" // expected = current ++ "lstw %1, 0(%4)\n\t" // storage = tmp; tmp = store succeeded ++ "rd_f %1\n\t" // storage = tmp; tmp = store succeeded ++ "beq %5, 2f\n\t" // if (success == 0) goto end ++ "beq %1, 3f\n\t" // if (tmp == 0) goto retry ++ "mov %1, %3\n\t" // success = tmp ++ "2:\n\t" ++ ++ ".subsection 2\n\t" ++ "3: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "+r" (expected), // %0 ++ "=&r" (tmp), // %1 ++ "=&r" (current), // %2 ++ "=&r" (success), // %3 ++ "=&r" (tmp1), // %4 ++ "=&r" (tmp2) // %5 ++ : "m" (storage), // %6 ++ "r" (desired) // %7 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ if (success) ++ fence_after(success_order); ++ else ++ fence_after(failure_order); ++ return !!success; ++ } ++ ++ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1, tmp2; ++ fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n\t" ++ "ldi %2,%4\n\t" ++ "ldi %3,1\n\t" ++ "lldw %0, 0(%2)\n\t" ++ "wr_f %3\n\t" ++ "addw %0, %5, %1\n\t" ++ "lstw %1, 0(%2)\n\t" ++ "rd_f %1\n\t" ++ "beq %1, 2f\n\t" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ fence_after(order); ++ return original; ++ } ++ ++ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1, tmp2; ++ fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n\t" ++ "ldi %2,%4\n\t" ++ "ldi %3,1\n\t" ++ "lldw %0, 0(%2)\n\t" ++ "wr_f %3\n\t" ++ "subw %0, %5, %1\n\t" ++ "lstw %1, 0(%2)\n\t" ++ "rd_f %1\n\t" ++ "beq %1, 2f\n\t" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ fence_after(order); ++ return original; ++ } ++ ++ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1,tmp2; ++ fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n\t" ++ "ldi %2,%4\n\t" ++ "ldi %3,1\n\t" ++ "lldw %0, 0(%2)\n\t" ++ "wr_f %3\n\t" ++ "and %0, %5, %1\n\t" ++ "lstw %1, 0(%2)\n\t" ++ "rd_f %1\n\t" ++ "beq %1, 2f\n\t" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ fence_after(order); ++ return original; ++ } ++ ++ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1,tmp2; ++ fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %2,%4\n" ++ "ldi %3,1\n" ++ "lldw %0, 0(%2)\n" ++ "wr_f %? \n" ++ "bis %0, %5, %1\n" ++ "lstw %1, 0(%2)\n" ++ "rd_f %1 \n" ++ "beq %1, 2f\n" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ fence_after(order); ++ return original; ++ } ++ ++ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1, tmp2; ++ fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %2,%4\n" ++ "ldi %3,1\n" ++ "lldw %0, 0(%2)\n" ++ "wr_f %3 \n" ++ "xor %0, %5, %1\n" ++ "lstw %1, 0(%2)\n" ++ "rd_f %1 \n" ++ "beq %1, 2f\n" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ fence_after(order); ++ return original; ++ } ++ ++ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT ++ { ++ return !!exchange(storage, (storage_type)1, order); ++ } ++ ++ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT ++ { ++ store(storage, 0, order); ++ } ++}; ++ ++ ++template< bool Interprocess > ++struct core_arch_operations< 1u, false, Interprocess > : ++ public core_arch_operations< 4u, false, Interprocess > ++{ ++ typedef core_arch_operations< 4u, false, Interprocess > base_type; ++ typedef typename base_type::storage_type storage_type; ++ ++ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1, tmp2; ++ base_type::fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %2,%4\n" ++ "ldi %3,1\n" ++ "lldw %0, 0(%2)\n" ++ "wr_f %3 \n" ++ "addw %0, %5, %1\n" ++ "zapnot %1, #1, %1\n" ++ "lstw %1, 0(%2)\n" ++ "rd_f %1 \n" ++ "beq %1, 2f\n" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ base_type::fence_after(order); ++ return original; ++ } ++ ++ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1, tmp2; ++ base_type::fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %2,%4\n" ++ "ldi %3,1\n" ++ "lldw %0, 0(%2)\n" ++ "wr_f %3 \n" ++ "subw %0, %5, %1\n" ++ "zapnot %1, #1, %1\n" ++ "lstw %1, 0(%2)\n" ++ "rd_f %1 \n" ++ "beq %1, 2f\n" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ base_type::fence_after(order); ++ return original; ++ } ++}; ++ ++template< bool Interprocess > ++struct core_arch_operations< 1u, true, Interprocess > : ++ public core_arch_operations< 4u, true, Interprocess > ++{ ++ typedef core_arch_operations< 4u, true, Interprocess > base_type; ++ typedef typename base_type::storage_type storage_type; ++ ++ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1,tmp2; ++ base_type::fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %2,%4\n" ++ "ldi %3,1\n" ++ "lldw %0, 0(%2)\n" ++ "wr_f %3 \n" ++ "addw %0, %5, %1\n" ++ "sextb %1, %1\n" ++ "lstw %1, 0(%2)\n" ++ "rd_f %1 \n" ++ "beq %1, 2f\n" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ base_type::fence_after(order); ++ return original; ++ } ++ ++ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1,tmp2; ++ base_type::fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %2,%4\n" ++ "ldi %3,1\n" ++ "lldw %0, 0(%2)\n" ++ "wr_f %3 \n" ++ "subw %0, %5, %1\n" ++ "sextb %1, %1\n" ++ "lstw %1, 0(%2)\n" ++ "rd_f %1 \n" ++ "beq %1, 2f\n" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ base_type::fence_after(order); ++ return original; ++ } ++}; ++ ++ ++template< bool Interprocess > ++struct core_arch_operations< 2u, false, Interprocess > : ++ public core_arch_operations< 4u, false, Interprocess > ++{ ++ typedef core_arch_operations< 4u, false, Interprocess > base_type; ++ typedef typename base_type::storage_type storage_type; ++ ++ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1,tmp2; ++ base_type::fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %2,%4\n" ++ "ldi %3\n" ++ "lldw %0, 0(%2)\n" ++ "wr_f %3 \n" ++ "addw %0, %5, %1\n" ++ "zapnot %1, #3, %1\n" ++ "lstw %1, 0(%2)\n" ++ "rd_f %1 \n" ++ "beq %1, 2f\n" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ base_type::fence_after(order); ++ return original; ++ } ++ ++ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1,tmp2; ++ base_type::fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %2,%4\n" ++ "ldi %3,1\n" ++ "lldw %0, 0(%2)\n" ++ "wr_f %3 \n" ++ "subw %0, %5, %1\n" ++ "zapnot %1, #3, %1\n" ++ "lstw %1, %2\n" ++ "rd_f %1 \n" ++ "beq %1, 2f\n" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ base_type::fence_after(order); ++ return original; ++ } ++}; ++ ++template< bool Interprocess > ++struct core_arch_operations< 2u, true, Interprocess > : ++ public core_arch_operations< 4u, true, Interprocess > ++{ ++ typedef core_arch_operations< 4u, true, Interprocess > base_type; ++ typedef typename base_type::storage_type storage_type; ++ ++ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1,tmp2; ++ base_type::fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "ldi %2,%4\n" ++ "ldi %3,1\n" ++ "lldw %0, 0(%2)\n" ++ "wr_f %3 \n" ++ "addw %0, %5, %1\n" ++ "sexth %1, %1\n" ++ "lstw %1, 0(%2)\n" ++ "rd_f %1 \n" ++ "beq %1, 2f\n" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ base_type::fence_after(order); ++ return original; ++ } ++ ++ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1,tmp2; ++ base_type::fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %2,%4\n" ++ "ldi %3,1\n" ++ "lldw %0, 0(%2)\n" ++ "wr_f %3 \n" ++ "subw %0, %5, %1\n" ++ "sexth %1, %1\n" ++ "lstw %1, 0(%2)\n" ++ "rd_f %1 \n" ++ "beq %1, 2f\n" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ base_type::fence_after(order); ++ return original; ++ } ++}; ++ ++ ++template< bool Signed, bool Interprocess > ++struct core_arch_operations< 8u, Signed, Interprocess > : ++ public core_arch_operations_gcc_sw_64_base ++{ ++ typedef typename storage_traits< 8u >::type storage_type; ++ ++ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_size = 8u; ++ static BOOST_CONSTEXPR_OR_CONST std::size_t storage_alignment = 8u; ++ static BOOST_CONSTEXPR_OR_CONST bool is_signed = Signed; ++ static BOOST_CONSTEXPR_OR_CONST bool is_interprocess = Interprocess; ++ ++ static BOOST_FORCEINLINE void store(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ fence_before(order); ++ storage = v; ++ fence_after_store(order); ++ } ++ ++ static BOOST_FORCEINLINE storage_type load(storage_type const volatile& storage, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type v = storage; ++ fence_after(order); ++ return v; ++ } ++ ++ static BOOST_FORCEINLINE storage_type exchange(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, tmp; ++ storage_type tmp1,tmp2; ++ fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %2,%4\n" ++ "ldi %3,1\n" ++ "mov %5, %1\n" ++ "lldl %0, 0(%2)\n" ++ "wr_f %3 \n" ++ "lstl %1, 0(%2)\n" ++ "rd_f %1 \n" ++ "beq %1, 2f\n" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (tmp), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ fence_after(order); ++ return original; ++ } ++ ++ static BOOST_FORCEINLINE bool compare_exchange_weak( ++ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT ++ { ++ fence_before(success_order); ++ int success; ++ storage_type current; ++ storage_type tmp1,tmp2; ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %4,%6\n" ++ "lldl %2, 0(%4)\n" // current = *(&storage) ++ "cmpeq %2, %0, %5\n" // success = current == expected ++ "wr_f %5 \n" ++ "mov %2, %0\n" // expected = current ++ "lstl %1, 0(%4)\n" // storage = desired; desired = store succeeded ++ "rd_f %1 \n" ++ "beq %5, 2f\n" // if (success == 0) goto end ++ "mov %1, %3\n" // success = desired ++ "2:\n\t" ++ : "+r" (expected), // %0 ++ "+r" (desired), // %1 ++ "=&r" (current), // %2 ++ "=&r" (success), // %3 ++ "=&r" (tmp1), // %4 ++ "=&r" (tmp2) // %5 ++ : "m" (storage) // %6 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ if (success) ++ fence_after(success_order); ++ else ++ fence_after(failure_order); ++ return !!success; ++ } ++ ++ static BOOST_FORCEINLINE bool compare_exchange_strong( ++ storage_type volatile& storage, storage_type& expected, storage_type desired, memory_order success_order, memory_order failure_order) BOOST_NOEXCEPT ++ { ++ int success; ++ storage_type current, tmp; ++ storage_type tmp1,tmp2; ++ fence_before(success_order); ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %4,%6\n" ++ "mov %7, %1\n" // tmp = desired ++ "lldl %2, 0(%4)\n" // current = *(&storage) ++ "cmpeq %2, %0, %5\n" // success = current == expected ++ "wr_f %5 \n" ++ "mov %2, %0\n" // expected = current ++ "lstl %1, 0(%4)\n" // storage = tmp; tmp = store succeeded ++ "rd_f %1 \n" ++ "beq %5, 2f\n" // if (success == 0) goto end ++ "beq %1, 3f\n" // if (tmp == 0) goto retry ++ "mov %1, %3\n" // success = tmp ++ "2:\n\t" ++ ++ ".subsection 2\n\t" ++ "3: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "+r" (expected), // %0 ++ "=&r" (tmp), // %1 ++ "=&r" (current), // %2 ++ "=&r" (success), // %3 ++ "=&r" (tmp1), // %4 ++ "=&r" (tmp2) // %5 ++ : "m" (storage), // %6 ++ "r" (desired) // %7 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ if (success) ++ fence_after(success_order); ++ else ++ fence_after(failure_order); ++ return !!success; ++ } ++ ++ static BOOST_FORCEINLINE storage_type fetch_add(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1, tmp2; ++ fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %2,%4\n" ++ "ldi %3,1\n" ++ "lldl %0, 0(%2)\n" ++ "wr_f %3 \n" ++ "addl %0, %5, %1\n" ++ "lstl %1, 0(%2)\n" ++ "rd_f %1 \n" ++ "beq %1, 2f\n" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ fence_after(order); ++ return original; ++ } ++ ++ static BOOST_FORCEINLINE storage_type fetch_sub(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1,tmp2; ++ fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %2,%4\n" ++ "ldi %3,1\n" ++ "lldl %0, 0(%2)\n" ++ "wr_f %3 \n" ++ "subl %0, %5, %1\n" ++ "lstl %1, 0(%2)\n" ++ "rd_f %1 \n" ++ "beq %1, 2f\n" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ fence_after(order); ++ return original; ++ } ++ ++ static BOOST_FORCEINLINE storage_type fetch_and(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1,tmp2; ++ fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %2,%4\n" ++ "ldi %3,1\n" ++ "lldl %0, 0(%2)\n" ++ "wr_f %3 \n" ++ "and %0, %5, %1\n" ++ "lstl %1, 0(%2)\n" ++ "rd_f %1 \n" ++ "beq %1, 2f\n" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ fence_after(order); ++ return original; ++ } ++ ++ static BOOST_FORCEINLINE storage_type fetch_or(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1,tmp2; ++ fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %2,%4\n" ++ "ldi %3,1\n" ++ "lldl %0, 0(%2)\n" ++ "wr_f %3 \n" ++ "bis %0, %5, %1\n" ++ "lstl %1, 0(%2)\n" ++ "rd_f %1 \n" ++ "beq %1, 2f\n" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ fence_after(order); ++ return original; ++ } ++ ++ static BOOST_FORCEINLINE storage_type fetch_xor(storage_type volatile& storage, storage_type v, memory_order order) BOOST_NOEXCEPT ++ { ++ storage_type original, modified; ++ storage_type tmp1,tmp2; ++ fence_before(order); ++ __asm__ __volatile__ ++ ( ++ "1:\n" ++ "ldi %2,%4\n" ++ "ldi %3,1\n" ++ "lldl %0, 0(%2)\n" ++ "wr_f %3 \n" ++ "xor %0, %5, %1\n" ++ "lstl %1, 0(%2)\n" ++ "rd_f %1 \n" ++ "beq %1, 2f\n" ++ ++ ".subsection 2\n\t" ++ "2: br 1b\n\t" ++ ".previous\n\t" ++ ++ : "=&r" (original), // %0 ++ "=&r" (modified), // %1 ++ "=&r" (tmp1), // %2 ++ "=&r" (tmp2) // %3 ++ : "m" (storage), // %4 ++ "r" (v) // %5 ++ : BOOST_ATOMIC_DETAIL_ASM_CLOBBER_CC ++ ); ++ fence_after(order); ++ return original; ++ } ++ ++ static BOOST_FORCEINLINE bool test_and_set(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT ++ { ++ return !!exchange(storage, (storage_type)1, order); ++ } ++ ++ static BOOST_FORCEINLINE void clear(storage_type volatile& storage, memory_order order) BOOST_NOEXCEPT ++ { ++ store(storage, (storage_type)0, order); ++ } ++}; ++ ++} // namespace detail ++} // namespace atomics ++} // namespace boost ++ ++#include <boost/atomic/detail/footer.hpp> ++ ++#endif // BOOST_ATOMIC_DETAIL_CORE_ARCH_OPS_GCC_SW_64_HPP_INCLUDED_ +diff -Naur boost_1_78_0.org/boost/atomic/detail/fence_arch_ops_gcc_sw_64.hpp boost_1_78_0.sw/boost/atomic/detail/fence_arch_ops_gcc_sw_64.hpp +--- boost_1_78_0.org/boost/atomic/detail/fence_arch_ops_gcc_sw_64.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ boost_1_78_0.sw/boost/atomic/detail/fence_arch_ops_gcc_sw_64.hpp 2022-05-09 13:54:56.570734000 +0800 +@@ -0,0 +1,53 @@ ++/* ++ * Distributed under the Boost Software License, Version 1.0. ++ * (See accompanying file LICENSE_1_0.txt or copy at ++ * http://www.boost.org/LICENSE_1_0.txt) ++ * ++ * Copyright (c) 2020 Andrey Semashev ++ */ ++/*! ++ * \file atomic/detail/fence_arch_ops_gcc_sw_64.hpp ++ * ++ * This header contains implementation of the \c fence_arch_operations struct. ++ */ ++ ++#ifndef BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_SW_64_HPP_INCLUDED_ ++#define BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_SW_64_HPP_INCLUDED_ ++ ++#include <boost/memory_order.hpp> ++#include <boost/atomic/detail/config.hpp> ++#include <boost/atomic/detail/header.hpp> ++ ++#ifdef BOOST_HAS_PRAGMA_ONCE ++#pragma once ++#endif ++ ++namespace boost { ++namespace atomics { ++namespace detail { ++ ++//! Fence operations for Sw_64 ++struct fence_arch_operations_gcc_sw_64 ++{ ++ static BOOST_FORCEINLINE void thread_fence(memory_order order) BOOST_NOEXCEPT ++ { ++ if (order != memory_order_relaxed) ++ __asm__ __volatile__ ("memb" ::: "memory"); ++ } ++ ++ static BOOST_FORCEINLINE void signal_fence(memory_order order) BOOST_NOEXCEPT ++ { ++ if (order != memory_order_relaxed) ++ __asm__ __volatile__ ("" ::: "memory"); ++ } ++}; ++ ++typedef fence_arch_operations_gcc_sw_64 fence_arch_operations; ++ ++} // namespace detail ++} // namespace atomics ++} // namespace boost ++ ++#include <boost/atomic/detail/footer.hpp> ++ ++#endif // BOOST_ATOMIC_DETAIL_FENCE_ARCH_OPS_GCC_SW_64_HPP_INCLUDED_ +diff -Naur boost_1_78_0.org/boost/atomic/detail/platform.hpp boost_1_78_0.sw/boost/atomic/detail/platform.hpp +--- boost_1_78_0.org/boost/atomic/detail/platform.hpp 2021-12-02 14:47:30.000000000 +0800 ++++ boost_1_78_0.sw/boost/atomic/detail/platform.hpp 2022-05-09 13:54:56.570734000 +0800 +@@ -78,6 +78,10 @@ + + #define BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND gcc_sparc + ++#elif defined(__GNUC__) && defined(__sw_64__) ++ ++#define BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND gcc_sw_64 ++ + #elif defined(__GNUC__) && defined(__alpha__) + + #define BOOST_ATOMIC_DETAIL_CORE_ARCH_BACKEND gcc_alpha +diff -Naur boost_1_78_0.org/boost/numeric/interval/detail/sw_64_rounding_control.hpp boost_1_78_0.sw/boost/numeric/interval/detail/sw_64_rounding_control.hpp +--- boost_1_78_0.org/boost/numeric/interval/detail/sw_64_rounding_control.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ boost_1_78_0.sw/boost/numeric/interval/detail/sw_64_rounding_control.hpp 2022-05-09 13:54:56.570734000 +0800 +@@ -0,0 +1,113 @@ ++/* Boost interval/detail/sw_64_rounding_control.hpp file ++ * ++ * Copyright 2005 Felix Höfling, Guillaume Melquiond ++ * ++ * Distributed under the Boost Software License, Version 1.0. ++ * (See accompanying file LICENSE_1_0.txt or ++ * copy at http://www.boost.org/LICENSE_1_0.txt) ++ */ ++ ++#ifndef BOOST_NUMERIC_INTERVAL_DETAIL_SW_64_ROUNDING_CONTROL_HPP ++#define BOOST_NUMERIC_INTERVAL_DETAIL_SW_64_ROUNDING_CONTROL_HPP ++ ++#if !defined(sw_64) && !defined(__sw_64__) ++#error This header only works on Sw_64 CPUs. ++#endif ++ ++#if defined(__GNUC__) || defined(__digital__) || defined(__DECCXX) ++ ++#include <float.h> // write_rnd() and read_rnd() ++ ++namespace boost { ++namespace numeric { ++namespace interval_lib { ++ ++namespace detail { ++#if defined(__GNUC__ ) ++ typedef union { ++ ::boost::long_long_type imode; ++ double dmode; ++ } rounding_mode_struct; ++ ++ // set bits 59-58 (DYN), ++ // clear all exception bits and disable overflow (51) and inexact exceptions (62) ++ static const rounding_mode_struct mode_upward = { 0x4C08000000000000LL }; ++ static const rounding_mode_struct mode_downward = { 0x4408000000000000LL }; ++ static const rounding_mode_struct mode_to_nearest = { 0x4808000000000000LL }; ++ static const rounding_mode_struct mode_toward_zero = { 0x4008000000000000LL }; ++ ++ struct sw_64_rounding_control ++ { ++ typedef double rounding_mode; ++ ++ static void set_rounding_mode(const rounding_mode mode) ++ { __asm__ __volatile__ ("wfpcr %0" : : "f"(mode)); } ++ ++ static void get_rounding_mode(rounding_mode& mode) ++ { __asm__ __volatile__ ("rfpcr %0" : "=f"(mode)); } ++ ++ static void downward() { set_rounding_mode(mode_downward.dmode); } ++ static void upward() { set_rounding_mode(mode_upward.dmode); } ++ static void to_nearest() { set_rounding_mode(mode_to_nearest.dmode); } ++ static void toward_zero() { set_rounding_mode(mode_toward_zero.dmode); } ++ }; ++#elif defined(__digital__) || defined(__DECCXX) ++ ++#if defined(__DECCXX) && !(defined(__FLT_ROUNDS) && __FLT_ROUNDS == -1) ++#error Dynamic rounding mode not enabled. See cxx man page for details. ++#endif ++ ++ struct sw_64_rounding_control ++ { ++ typedef unsigned int rounding_mode; ++ ++ static void set_rounding_mode(const rounding_mode& mode) { write_rnd(mode); } ++ static void get_rounding_mode(rounding_mode& mode) { mode = read_rnd(); } ++ ++ static void downward() { set_rounding_mode(FP_RND_RM); } ++ static void upward() { set_rounding_mode(FP_RND_RP); } ++ static void to_nearest() { set_rounding_mode(FP_RND_RN); } ++ static void toward_zero() { set_rounding_mode(FP_RND_RZ); } ++ }; ++#endif ++} // namespace detail ++ ++extern "C" { ++ float rintf(float); ++ double rint(double); ++ long double rintl(long double); ++} ++ ++template<> ++struct rounding_control<float>: ++ detail::sw_64_rounding_control ++{ ++ static float force_rounding(const float r) ++ { volatile float _r = r; return _r; } ++ static float to_int(const float& x) { return rintf(x); } ++}; ++ ++template<> ++struct rounding_control<double>: ++ detail::sw_64_rounding_control ++{ ++ static const double & force_rounding(const double& r) { return r; } ++ static double to_int(const double& r) { return rint(r); } ++}; ++ ++template<> ++struct rounding_control<long double>: ++ detail::sw_64_rounding_control ++{ ++ static const long double & force_rounding(const long double& r) { return r; } ++ static long double to_int(const long double& r) { return rintl(r); } ++}; ++ ++} // namespace interval_lib ++} // namespace numeric ++} // namespace boost ++ ++#undef BOOST_NUMERIC_INTERVAL_NO_HARDWARE ++#endif ++ ++#endif /* BOOST_NUMERIC_INTERVAL_DETAIL_SW_64_ROUNDING_CONTROL_HPP */ +diff -Naur boost_1_78_0.org/boost/numeric/interval/hw_rounding.hpp boost_1_78_0.sw/boost/numeric/interval/hw_rounding.hpp +--- boost_1_78_0.org/boost/numeric/interval/hw_rounding.hpp 2021-12-02 14:47:35.000000000 +0800 ++++ boost_1_78_0.sw/boost/numeric/interval/hw_rounding.hpp 2022-05-09 13:54:56.570734000 +0800 +@@ -28,6 +28,8 @@ + # include <boost/numeric/interval/detail/ppc_rounding_control.hpp> + #elif defined(sparc) || defined(__sparc__) + # include <boost/numeric/interval/detail/sparc_rounding_control.hpp> ++#elif defined(sw_64) || defined(__sw_64__) ++# include <boost/numeric/interval/detail/sw_64_rounding_control.hpp> + #elif defined(alpha) || defined(__alpha__) + # include <boost/numeric/interval/detail/alpha_rounding_control.hpp> + #elif defined(ia64) || defined(__ia64) || defined(__ia64__) +diff -Naur boost_1_78_0.org/boost/predef/architecture/sw_64.h boost_1_78_0.sw/boost/predef/architecture/sw_64.h +--- boost_1_78_0.org/boost/predef/architecture/sw_64.h 1970-01-01 08:00:00.000000000 +0800 ++++ boost_1_78_0.sw/boost/predef/architecture/sw_64.h 2022-05-09 13:54:56.570734000 +0800 +@@ -0,0 +1,54 @@ ++/* ++Copyright Rene Rivera 2008-2015 ++Distributed under the Boost Software License, Version 1.0. ++(See accompanying file LICENSE_1_0.txt or copy at ++http://www.boost.org/LICENSE_1_0.txt) ++*/ ++ ++#ifndef BOOST_PREDEF_ARCHITECTURE_SW_64_H ++#define BOOST_PREDEF_ARCHITECTURE_SW_64_H ++ ++#include <boost/predef/version_number.h> ++#include <boost/predef/make.h> ++ ++/* tag::reference[] ++= `BOOST_ARCH_SW_64` ++ ++http://en.wikipedia.org/wiki/DEC_Sw_64[DEC Sw_64] architecture. ++ ++[options="header"] ++|=== ++| {predef_symbol} | {predef_version} ++| `+__sw_64__+` | {predef_detection} ++| `+__sw_64+` | {predef_detection} ++| `+_M_SW_64+` | {predef_detection} ++ ++| `+__sw_64_ev6__+` | 6.0.0 ++|=== ++*/ // end::reference[] ++ ++#define BOOST_ARCH_SW_64 BOOST_VERSION_NUMBER_NOT_AVAILABLE ++ ++#if defined(__sw_64__) || defined(__sw_64) || \ ++ defined(_M_SW_64) ++# undef BOOST_ARCH_SW_64 ++# if !defined(BOOST_ARCH_SW_64) && defined(__sw_64_sw6b__) ++# define BOOST_ARCH_SW_64 BOOST_VERSION_NUMBER(6,0,0) ++# endif ++#endif ++ ++#if BOOST_ARCH_SW_64 ++# define BOOST_ARCH_SW_64_AVAILABLE ++#endif ++ ++#if BOOST_ARCH_SW_64 ++# undef BOOST_ARCH_WORD_BITS_64 ++# define BOOST_ARCH_WORD_BITS_64 BOOST_VERSION_NUMBER_AVAILABLE ++#endif ++ ++#define BOOST_ARCH_SW_64_NAME "DEC Sw_64" ++ ++#endif ++ ++#include <boost/predef/detail/test.h> ++BOOST_PREDEF_DECLARE_TEST(BOOST_ARCH_SW_64,BOOST_ARCH_SW_64_NAME) +diff -Naur boost_1_78_0.org/boost/predef/architecture.h boost_1_78_0.sw/boost/predef/architecture.h +--- boost_1_78_0.org/boost/predef/architecture.h 2021-12-02 14:47:36.000000000 +0800 ++++ boost_1_78_0.sw/boost/predef/architecture.h 2022-05-09 13:54:56.570734000 +0800 +@@ -11,6 +11,7 @@ + #endif + + #include <boost/predef/architecture/alpha.h> ++#include <boost/predef/architecture/sw_64.h> + #include <boost/predef/architecture/arm.h> + #include <boost/predef/architecture/blackfin.h> + #include <boost/predef/architecture/convex.h> +diff -Naur boost_1_78_0.org/boost/units/systems/si/codata/atomic-nuclear_constants.hpp boost_1_78_0.sw/boost/units/systems/si/codata/atomic-nuclear_constants.hpp +--- boost_1_78_0.org/boost/units/systems/si/codata/atomic-nuclear_constants.hpp 2021-12-02 14:47:38.000000000 +0800 ++++ boost_1_78_0.sw/boost/units/systems/si/codata/atomic-nuclear_constants.hpp 2022-05-09 13:54:56.570734000 +0800 +@@ -12,6 +12,7 @@ + #define BOOST_UNITS_CODATA_ATOMIC_AND_NUCLEAR_CONSTANTS_HPP + + #include <boost/units/systems/si/codata/alpha_constants.hpp> ++#include <boost/units/systems/si/codata/sw_64_constants.hpp> + #include <boost/units/systems/si/codata/deuteron_constants.hpp> + #include <boost/units/systems/si/codata/electron_constants.hpp> + #include <boost/units/systems/si/codata/helion_constants.hpp> +diff -Naur boost_1_78_0.org/boost/units/systems/si/codata/sw_64_constants.hpp boost_1_78_0.sw/boost/units/systems/si/codata/sw_64_constants.hpp +--- boost_1_78_0.org/boost/units/systems/si/codata/sw_64_constants.hpp 1970-01-01 08:00:00.000000000 +0800 ++++ boost_1_78_0.sw/boost/units/systems/si/codata/sw_64_constants.hpp 2022-05-09 13:54:56.570734000 +0800 +@@ -0,0 +1,66 @@ ++// Boost.Units - A C++ library for zero-overhead dimensional analysis and ++// unit/quantity manipulation and conversion ++// ++// Copyright (C) 2003-2008 Matthias Christian Schabel ++// Copyright (C) 2008 Steven Watanabe ++// ++// Distributed under the Boost Software License, Version 1.0. (See ++// accompanying file LICENSE_1_0.txt or copy at ++// http://www.boost.org/LICENSE_1_0.txt) ++ ++#ifndef BOOST_UNITS_CODATA_SW_64_CONSTANTS_HPP ++#define BOOST_UNITS_CODATA_SW_64_CONSTANTS_HPP ++ ++#include <boost/units/quantity.hpp> ++#include <boost/units/static_constant.hpp> ++ ++#include <boost/units/systems/detail/constants.hpp> ++#include <boost/units/systems/si/amount.hpp> ++#include <boost/units/systems/si/area.hpp> ++#include <boost/units/systems/si/electric_charge.hpp> ++#include <boost/units/systems/si/energy.hpp> ++#include <boost/units/systems/si/frequency.hpp> ++#include <boost/units/systems/si/length.hpp> ++#include <boost/units/systems/si/mass.hpp> ++#include <boost/units/systems/si/magnetic_flux_density.hpp> ++#include <boost/units/systems/si/time.hpp> ++#include <boost/units/systems/si/wavenumber.hpp> ++ ++#include <boost/units/systems/si/codata/typedefs.hpp> ++ ++/// \file ++/// CODATA recommended values of fundamental atomic and nuclear constants ++/// CODATA 2006 values as of 2007/03/30 ++ ++namespace boost { ++ ++namespace units { ++ ++namespace si { ++ ++namespace constants { ++ ++namespace codata { ++ ++/// CODATA recommended values of the fundamental physical constants: NIST SP 961 ++ ++/// sw_64 particle mass ++BOOST_UNITS_PHYSICAL_CONSTANT(m_sw_64,quantity<mass>,6.64465620e-27*kilograms,3.3e-34*kilograms); ++/// sw_64-electron mass ratio ++BOOST_UNITS_PHYSICAL_CONSTANT(m_sw_64_over_m_e,quantity<dimensionless>,7294.2995365*dimensionless(),3.1e-6*dimensionless()); ++/// sw_64-proton mass ratio ++BOOST_UNITS_PHYSICAL_CONSTANT(m_sw_64_over_m_p,quantity<dimensionless>,3.97259968951*dimensionless(),4.1e-10*dimensionless()); ++/// sw_64 molar mass ++BOOST_UNITS_PHYSICAL_CONSTANT(M_sw_64,quantity<mass_over_amount>,4.001506179127e-3*kilograms/mole,6.2e-14*kilograms/mole); ++ ++} // namespace codata ++ ++} // namespace constants ++ ++} // namespace si ++ ++} // namespace units ++ ++} // namespace boost ++ ++#endif // BOOST_UNITS_CODATA_SW_64_CONSTANTS_HPP +diff -Naur boost_1_78_0.org/boost/wave/wave_config.hpp boost_1_78_0.sw/boost/wave/wave_config.hpp +--- boost_1_78_0.org/boost/wave/wave_config.hpp 2021-12-02 14:47:38.000000000 +0800 ++++ boost_1_78_0.sw/boost/wave/wave_config.hpp 2022-05-09 13:54:56.570734000 +0800 +@@ -253,7 +253,7 @@ + // CW up to 8.3 chokes as well *sigh* + // Tru64/CXX has linker problems when using flex_string + #if BOOST_WORKAROUND(__MWERKS__, < 0x3200) || \ +- (defined(__DECCXX) && defined(__alpha)) || \ ++ (defined(__DECCXX) && (defined(__alpha) || defined(__sw_64__))) || \ + defined(BOOST_WAVE_STRINGTYPE_USE_STDSTRING) + + #define BOOST_WAVE_STRINGTYPE std::string +diff -Naur boost_1_78_0.org/boostcpp.jam boost_1_78_0.sw/boostcpp.jam +--- boost_1_78_0.org/boostcpp.jam 2021-12-02 14:45:35.000000000 +0800 ++++ boost_1_78_0.sw/boostcpp.jam 2022-05-09 13:54:56.580734000 +0800 +@@ -634,7 +634,7 @@ + return <conditional>@boostcpp.deduce-address-model ; + } + +-local deducable-architectures = arm mips1 power riscv s390x sparc x86 combined ; ++local deducable-architectures = sw_64 arm mips1 power riscv s390x sparc x86 combined ; + feature.feature deduced-architecture : $(deducable-architectures) : propagated optional composite hidden ; + for a in $(deducable-architectures) + { +@@ -645,9 +645,10 @@ + { + local result ; + local filtered = [ toolset-properties $(properties) ] ; +- local names = arm mips1 power riscv s390x sparc x86 combined ; ++ local names = arm sw_64 mips1 power riscv s390x sparc x86 combined ; + local idx = [ configure.find-builds "default architecture" : $(filtered) + : /boost/architecture//arm ++ : /boost/architecture//sw_64 + : /boost/architecture//mips1 + : /boost/architecture//power + : /boost/architecture//riscv +diff -Naur boost_1_78_0.org/libs/atomic/test/lockfree.cpp boost_1_78_0.sw/libs/atomic/test/lockfree.cpp +--- boost_1_78_0.org/libs/atomic/test/lockfree.cpp 2021-12-02 14:47:30.000000000 +0800 ++++ boost_1_78_0.sw/libs/atomic/test/lockfree.cpp 2022-05-09 13:54:56.620734000 +0800 +@@ -101,7 +101,7 @@ + #define EXPECT_POINTER_LOCK_FREE 2 + #define EXPECT_BOOL_LOCK_FREE 2 + +-#elif defined(__GNUC__) && defined(__alpha__) ++#elif defined(__GNUC__) && (defined(__alpha__) || defined(__sw_64__)) + + #define EXPECT_CHAR_LOCK_FREE 2 + #define EXPECT_CHAR16_T_LOCK_FREE 2 +diff -Naur boost_1_78_0.org/libs/config/checks/architecture/Jamfile.jam boost_1_78_0.sw/libs/config/checks/architecture/Jamfile.jam +--- boost_1_78_0.org/libs/config/checks/architecture/Jamfile.jam 2021-12-02 14:47:31.000000000 +0800 ++++ boost_1_78_0.sw/libs/config/checks/architecture/Jamfile.jam 2022-05-09 13:54:56.620734000 +0800 +@@ -17,6 +17,7 @@ + obj 64 : 64.cpp ; + + obj arm : arm.cpp ; ++obj sw_64 : sw_64.cpp ; + obj combined : combined.cpp ; + obj mips1 : mips1.cpp ; + obj power : power.cpp ; +diff -Naur boost_1_78_0.org/libs/config/checks/architecture/sw_64.cpp boost_1_78_0.sw/libs/config/checks/architecture/sw_64.cpp +--- boost_1_78_0.org/libs/config/checks/architecture/sw_64.cpp 1970-01-01 08:00:00.000000000 +0800 ++++ boost_1_78_0.sw/libs/config/checks/architecture/sw_64.cpp 2022-05-09 13:54:56.620734000 +0800 +@@ -0,0 +1,15 @@ ++// sw_64.cpp ++// ++// Copyright (c) 2012 Steven Watanabe ++// ++// Distributed under the Boost Software License Version 1.0. (See ++// accompanying file LICENSE_1_0.txt or copy at ++// http://www.boost.org/LICENSE_1_0.txt) ++ ++#if !defined(__sw_64__) && !defined(__thumb__) && \ ++ !defined(__TARGET_ARCH_SW_64) && !defined(__TARGET_ARCH_THUMB) && \ ++ !defined(_SW_64) && !defined(_M_SW_64) && \ ++ !defined(__aarch64__) ++#error "Not SW_64" ++#endif ++ +diff -Naur boost_1_78_0.org/libs/config/test/config_info.cpp boost_1_78_0.sw/libs/config/test/config_info.cpp +--- boost_1_78_0.org/libs/config/test/config_info.cpp 2021-12-02 14:47:31.000000000 +0800 ++++ boost_1_78_0.sw/libs/config/test/config_info.cpp 2022-05-09 13:54:56.620734000 +0800 +@@ -173,6 +173,7 @@ + PRINT_MACRO(_M_IX86_FP); + PRINT_MACRO(_M_X64); + PRINT_MACRO(_M_ALPHA); ++ PRINT_MACRO(_M_SW_64); + PRINT_MACRO(_M_MPPC); + PRINT_MACRO(_M_MRX000); + PRINT_MACRO(_M_PPC); +@@ -229,6 +230,7 @@ + PRINT_MACRO(__MINGW32__); + PRINT_MACRO(__GXX_RTTI); + PRINT_MACRO(__alpha__); ++ PRINT_MACRO(__sw_64__); + PRINT_MACRO(__amd64__); + PRINT_MACRO(__arm__); + PRINT_MACRO(__aarch64__); +diff -Naur boost_1_78_0.org/libs/context/CMakeLists.txt boost_1_78_0.sw/libs/context/CMakeLists.txt +--- boost_1_78_0.org/libs/context/CMakeLists.txt 2021-12-02 14:47:31.000000000 +0800 ++++ boost_1_78_0.sw/libs/context/CMakeLists.txt 2022-05-09 13:54:56.620734000 +0800 +@@ -29,7 +29,7 @@ + + if(WIN32) + set(_default_abi ms) +-elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^arm" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64") ++elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^arm" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" OR CMAKE_SYSTEM_PROCESSOR MATCHES "^sw_64") + set(_default_abi aapcs) + elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^mips") + if(_bits EQUAL 32) +@@ -48,7 +48,7 @@ + + ## Arch-and-model + +-set(_all_archs arm arm64 mips32 mips64 ppc32 ppc64 riscv64 s390x i386 x86_64 combined) ++set(_all_archs arm arm64 sw_64 mips32 mips64 ppc32 ppc64 riscv64 s390x i386 x86_64 combined) + + # Try at start to auto determine arch from CMake. + if(CMAKE_SYSTEM_PROCESSOR IN_LIST _all_archs) +@@ -65,6 +65,8 @@ + if(CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" OR + CMAKE_SYSTEM_PROCESSOR MATCHES "^arm") # armv8 + set(_default_arch arm64) ++ elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^sw_64") ++ set(_default_arch sw_64) + elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^mips") + set(_default_arch mips64) + else() +@@ -72,7 +74,7 @@ + endif() + endif() + +-set(BOOST_CONTEXT_ARCHITECTURE "${_default_arch}" CACHE STRING "Boost.Context architecture (arm, arm64, mips32, mips64, ppc32, ppc64, riscv64, s390x, i386, x86_64, combined)") ++set(BOOST_CONTEXT_ARCHITECTURE "${_default_arch}" CACHE STRING "Boost.Context architecture (arm, arm64, mips32, sw_64, mips64, ppc32, ppc64, riscv64, s390x, i386, x86_64, combined)") + set_property(CACHE BOOST_CONTEXT_ARCHITECTURE PROPERTY STRINGS ${_all_archs}) + + unset(_all_archs) +diff -Naur boost_1_78_0.org/libs/context/build/Jamfile.v2 boost_1_78_0.sw/libs/context/build/Jamfile.v2 +--- boost_1_78_0.org/libs/context/build/Jamfile.v2 2021-12-02 14:47:31.000000000 +0800 ++++ boost_1_78_0.sw/libs/context/build/Jamfile.v2 2022-05-09 13:54:56.620734000 +0800 +@@ -80,6 +80,7 @@ + if [ os.name ] = "NT" { tmp = ms ; } + else if [ os.name ] = "CYGWIN" { tmp = ms ; } + else if [ os.platform ] = "ARM" { tmp = aapcs ; } ++ else if [ os.platform ] = "SW_64" { tmp = aapcs ; } + else if [ os.platform ] = "MIPS32" { tmp = o32 ; } + else if [ os.platform ] = "MIPS64" { tmp = n64 ; } + return $(tmp) ; +@@ -182,6 +183,30 @@ + <toolset>msvc + ; + ++# SW_64 ++# SW_64/AAPCS/ELF ++alias asm_sources ++ : asm/make_sw_64_aapcs_elf_gas.S ++ asm/jump_sw_64_aapcs_elf_gas.S ++ asm/ontop_sw_64_aapcs_elf_gas.S ++ : <abi>aapcs ++ <address-model>64 ++ <architecture>sw_64 ++ <binary-format>elf ++ <toolset>clang ++ ; ++ ++alias asm_sources ++ : asm/make_sw_64_aapcs_elf_gas.S ++ asm/jump_sw_64_aapcs_elf_gas.S ++ asm/ontop_sw_64_aapcs_elf_gas.S ++ : <abi>aapcs ++ <address-model>64 ++ <architecture>sw_64 ++ <binary-format>elf ++ <toolset>gcc ++ ; ++ + # ARM64 + # ARM64/AAPCS/ELF + alias asm_sources +diff -Naur boost_1_78_0.org/libs/context/build/architecture.jam boost_1_78_0.sw/libs/context/build/architecture.jam +--- boost_1_78_0.org/libs/context/build/architecture.jam 2021-12-02 14:47:31.000000000 +0800 ++++ boost_1_78_0.sw/libs/context/build/architecture.jam 2022-05-09 13:54:56.620734000 +0800 +@@ -55,6 +55,10 @@ + { + return <architecture>arm ; + } ++ else if [ configure.builds /boost/architecture//sw_64 : $(properties) : sw_64 ] ++ { ++ return <architecture>sw_64 ; ++ } + else if [ configure.builds /boost/architecture//mips : $(properties) : mips ] + { + return <architecture>mips ; +diff -Naur boost_1_78_0.org/libs/context/doc/architectures.qbk boost_1_78_0.sw/libs/context/doc/architectures.qbk +--- boost_1_78_0.org/libs/context/doc/architectures.qbk 2021-12-02 14:47:31.000000000 +0800 ++++ boost_1_78_0.sw/libs/context/doc/architectures.qbk 2022-05-09 13:54:56.620734000 +0800 +@@ -22,6 +22,7 @@ + [[s390x] [SYSV|ELF] [-] [-] [-]] + [[sparc] [-] [-] [-] [-]] + [[x86_64] [SYSV,X32|ELF] [MS|PE] [SYSV|MACH-O] [-]] ++ [[sw_64] [SYSV,X64|ELF] [MS|PE] [SYSV|MACH-O] [-]] + ] + + [note If the architecture is not supported but the platform provides +diff -Naur boost_1_78_0.org/libs/context/src/asm/jump_sw_64_aapcs_elf_gas.S boost_1_78_0.sw/libs/context/src/asm/jump_sw_64_aapcs_elf_gas.S +--- boost_1_78_0.org/libs/context/src/asm/jump_sw_64_aapcs_elf_gas.S 1970-01-01 08:00:00.000000000 +0800 ++++ boost_1_78_0.sw/libs/context/src/asm/jump_sw_64_aapcs_elf_gas.S 2022-05-09 13:54:56.620734000 +0800 +@@ -0,0 +1,86 @@ ++.text ++.align 2 ++.global jump_fcontext ++.type jump_fcontext, %function ++jump_fcontext: ++ # prepare stack for GP + FPU ++ #ldih $29,0($27) ++ #ldi $29,0($29) ++ subl $sp, 0x98, $sp ++ ++ # save $f2-$f9 ++ fstd $f2, 0x00($sp) ++ fstd $f3, 0x08($sp) ++ fstd $f4, 0x10($sp) ++ fstd $f5, 0x18($sp) ++ fstd $f6, 0x20($sp) ++ fstd $f7, 0x28($sp) ++ fstd $f8, 0x30($sp) ++ fstd $f9, 0x38($sp) ++ ++ # save $9-$15, fp,$26 ++ stl $9, 0x40($sp) ++ stl $10, 0x48($sp) ++ stl $11, 0x50($sp) ++ stl $12, 0x58($sp) ++ stl $13, 0x60($sp) ++ stl $14, 0x68($sp) ++ stl $15, 0x70($sp) ++ stl $fp, 0x78($sp) ++ stl $16, 0x80($sp) #save jump_fcontext return address ++ stl $26, 0x88($sp) ++ ++ # save LR as PC ++ stl $26, 0x90($sp) ++ ++ # store RSP (pointing to context-data) in $16 ++ mov $sp, $20 ++ ++ ++ # restore RSP (pointing to context-data) from $17 ++ mov $17, $sp ++ ++ # load $f2-$f9 ++ fldd $f2, 0x00($sp) ++ fldd $f3, 0x08($sp) ++ fldd $f4, 0x10($sp) ++ fldd $f5, 0x18($sp) ++ fldd $f6, 0x20($sp) ++ fldd $f7, 0x28($sp) ++ fldd $f8, 0x30($sp) ++ fldd $f9, 0x38($sp) ++ ++ # load $9-$15, fp,$26 ++ ldl $9, 0x40($sp) ++ ldl $10, 0x48($sp) ++ ldl $11, 0x50($sp) ++ ldl $12, 0x58($sp) ++ ldl $13, 0x60($sp) ++ ldl $14, 0x68($sp) ++ ldl $15, 0x70($sp) ++ ldl $fp, 0x78($sp) ++ ldl $26, 0x88($sp) ++ ++ # pass transfer_t as first arg in context function ++ # to store $1,$2 to $16 address ++ ldl $16, 0x80($sp) #load $16, store return struct do return address ++ stl $20,0($16) ++ stl $18,8($16) ++ ++ # pass transfer_t as first arg in context function,such as f1,f2,f3 ++ # $16 == FCTX, $17 == DATA ++ mov $20,$16 #$16 $17 as first and second arg ++ mov $18,$17 ++ ++ ++ # load pc ++ ldl $27, 0x90($sp) ++ ++ ++ # restore stack from GP + FPU ++ addl $sp, 0x98, $sp ++ ++ ret $31,($27),0x1 //jmp $31, ($27) //ret ($27) ++.size jump_fcontext,.-jump_fcontext ++# Mark that we don't need executable stack. ++.section .note.GNU-stack,"",%progbits +diff -Naur boost_1_78_0.org/libs/context/src/asm/make_sw_64_aapcs_elf_gas.S boost_1_78_0.sw/libs/context/src/asm/make_sw_64_aapcs_elf_gas.S +--- boost_1_78_0.org/libs/context/src/asm/make_sw_64_aapcs_elf_gas.S 1970-01-01 08:00:00.000000000 +0800 ++++ boost_1_78_0.sw/libs/context/src/asm/make_sw_64_aapcs_elf_gas.S 2022-05-09 13:54:56.620734000 +0800 +@@ -0,0 +1,37 @@ ++.text ++.align 2 ++.global make_fcontext ++.type make_fcontext, %function ++make_fcontext: ++ #ldih $29,0($27) ++ #ldi $29,0($29) ++ # shift address in $16 (allocated stack) to lower 16 byte boundary ++ bic $16, 0xf,$16 ++ ++ # reserve space for context-data on context-stack ++ subl $16, 0x98,$16 ++ ++ # third arg of make_fcontext() == address of context-function ++ # store address as a PC to jump in ++ stl $18, 0x90($16) ++ ++ # save address of finish as return-address for context-function ++ # will be entered after context-function returns (LR register) ++ ldi $17, finish ++ stl $17, 0x88($16) ++ ++ stl $16, 0x80($16) ++ ++ mov $16, $0 ++ ++ ret $31,($26),1 //jump ($26) // return pointer to context-data ($16) ++ ++finish: ++ # exit code is zero ++ mov 0, $0 ++ # exit application ++ call _exit #ldi $27,_exit #jmp ($27) ++ ++.size make_fcontext,.-make_fcontext ++# Mark that we don't need executable stack. ++.section .note.GNU-stack,"",%progbits +diff -Naur boost_1_78_0.org/libs/context/src/asm/ontop_sw_64_aapcs_elf_gas.S boost_1_78_0.sw/libs/context/src/asm/ontop_sw_64_aapcs_elf_gas.S +--- boost_1_78_0.org/libs/context/src/asm/ontop_sw_64_aapcs_elf_gas.S 1970-01-01 08:00:00.000000000 +0800 ++++ boost_1_78_0.sw/libs/context/src/asm/ontop_sw_64_aapcs_elf_gas.S 2022-05-09 13:56:40.360734000 +0800 +@@ -0,0 +1,85 @@ ++.text ++.align 2 ++.global ontop_fcontext ++.type ontop_fcontext, %function ++ontop_fcontext: ++ # prepare stack for GP + FPU ++ #ldih $29,0($27) ++ #ldi $29,0($29) ++ subl $sp, 0x98, $sp ++ ++ # save $f2-$f9 ++ fstd $f2, 0x00($sp) ++ fstd $f3, 0x08($sp) ++ fstd $f4, 0x10($sp) ++ fstd $f5, 0x18($sp) ++ fstd $f6, 0x20($sp) ++ fstd $f7, 0x28($sp) ++ fstd $f8, 0x30($sp) ++ fstd $f9, 0x38($sp) ++ ++ # save $9-$15, fp,$26 ++ stl $9, 0x40($sp) ++ stl $10, 0x48($sp) ++ stl $11, 0x50($sp) ++ stl $12, 0x58($sp) ++ stl $13, 0x60($sp) ++ stl $14, 0x68($sp) ++ stl $15, 0x70($sp) ++ stl $fp, 0x78($sp) ++ stl $16, 0x80($sp) #save ontop_fcontext return address ++ stl $26, 0x88($sp) ++ ++ # save LR as PC ++ stl $26, 0x90($sp) ++ ++ # store RSP (pointing to context-data) in $16 ++ mov $sp, $20 ++ ++ ++ # restore RSP (pointing to context-data) from $17 ++ mov $17, $sp ++ ++ # load $f2-$f9 ++ fldd $f2, 0x00($sp) ++ fldd $f3, 0x08($sp) ++ fldd $f4, 0x10($sp) ++ fldd $f5, 0x18($sp) ++ fldd $f6, 0x20($sp) ++ fldd $f7, 0x28($sp) ++ fldd $f8, 0x30($sp) ++ fldd $f9, 0x38($sp) ++ ++ # load $9-$15, fp,$26 ++ ldl $9, 0x40($sp) ++ ldl $10, 0x48($sp) ++ ldl $11, 0x50($sp) ++ ldl $12, 0x58($sp) ++ ldl $13, 0x60($sp) ++ ldl $14, 0x68($sp) ++ ldl $15, 0x70($sp) ++ ldl $fp, 0x78($sp) ++ ldl $26, 0x88($sp) ++ ++ # pass transfer_t as first arg in context function ++ # to store $1,$2 to $16 address ++ ldl $16, 0x80($sp) #load $16, store return struct do return address ++ stl $20,0($16) ++ stl $18,8($16) ++ ++ # pass transfer_t as first arg in context function,such as f1,f2,f3 ++ # $16 == FCTX, $17 == DATA ++ mov $20,$17 #$16 $17 $18 as first and second arg ++ ++ ++ # skip pc ++ mov $19, $27 ++ ++ ++ # restore stack from GP + FPU ++ addl $sp, 0x98, $sp ++ ++ ret $31,($27),0x1 //jmp $31, ($27) //ret ($27) ++.size ontop_fcontext,.-ontop_fcontext ++# Mark that we don't need executable stack. ++.section .note.GNU-stack,"",%progbits +diff -Naur boost_1_78_0.org/libs/log/build/log-arch-config.jam boost_1_78_0.sw/libs/log/build/log-arch-config.jam +--- boost_1_78_0.org/libs/log/build/log-arch-config.jam 2021-12-02 14:47:33.000000000 +0800 ++++ boost_1_78_0.sw/libs/log/build/log-arch-config.jam 2022-05-09 13:54:56.620734000 +0800 +@@ -56,6 +56,10 @@ + { + return arm ; + } ++ else if [ configure.builds /boost/architecture//sw_64 : $(properties) : sw_64 ] ++ { ++ return sw_64 ; ++ } + else if [ configure.builds /boost/architecture//mips1 : $(properties) : mips1 ] + { + return mips1 ; +diff -Naur boost_1_78_0.org/tools/build/src/engine/jam.h boost_1_78_0.sw/tools/build/src/engine/jam.h +--- boost_1_78_0.org/tools/build/src/engine/jam.h 2021-12-02 14:47:38.000000000 +0800 ++++ boost_1_78_0.sw/tools/build/src/engine/jam.h 2022-05-09 13:54:56.620734000 +0800 +@@ -396,6 +396,11 @@ + #define OSPLAT "OSPLAT=AXP" + #endif + ++#if defined( _SW_64_ ) || \ ++ defined( __sw_64__ ) ++ #define OSPLAT "OSPLAT=SW_64" ++#endif ++ + #if defined( _i386_ ) || \ + defined( __i386__ ) || \ + defined( __i386 ) || \ +diff -Naur boost_1_78_0.org/tools/build/src/tools/builtin.py boost_1_78_0.sw/tools/build/src/tools/builtin.py +--- boost_1_78_0.org/tools/build/src/tools/builtin.py 2021-12-02 14:47:38.000000000 +0800 ++++ boost_1_78_0.sw/tools/build/src/tools/builtin.py 2022-05-09 13:54:56.620734000 +0800 +@@ -252,6 +252,9 @@ + # x86 and x86-64 + 'x86', + ++ # SW_64 ++ 'sw_64', ++ + # ia64 + 'ia64', + +@@ -331,6 +334,9 @@ + 'armv2', 'armv2a', 'armv3', 'armv3m', 'armv4', 'armv4t', 'armv5', + 'armv5t', 'armv5te', 'armv6', 'armv6j', 'iwmmxt', 'ep9312', + ++ # SW_64 ++ 'sw_64', ++ + # z Systems (aka s390x) + 'z196', 'zEC12', 'z13', 'z13', 'z14', 'z15'], + +diff -Naur boost_1_78_0.org/tools/build/src/tools/doxproc.py boost_1_78_0.sw/tools/build/src/tools/doxproc.py +--- boost_1_78_0.org/tools/build/src/tools/doxproc.py 2021-12-02 14:47:38.000000000 +0800 ++++ boost_1_78_0.sw/tools/build/src/tools/doxproc.py 2022-05-09 13:54:56.630734000 +0800 +@@ -19,7 +19,7 @@ + + + def usage(): +- print ''' ++ print( ''' + Usage: + %s options + +@@ -30,7 +30,7 @@ + --title The title of the top level BoostBook section. + --enable-index Generate additional index sections for classes and + types. +-''' % ( sys.argv[0] ) ++''' % ( sys.argv[0] )) + + + def get_args( argv = sys.argv[1:] ): +diff -Naur boost_1_78_0.org/tools/build/src/tools/features/architecture-feature.jam boost_1_78_0.sw/tools/build/src/tools/features/architecture-feature.jam +--- boost_1_78_0.org/tools/build/src/tools/features/architecture-feature.jam 2021-12-02 14:47:38.000000000 +0800 ++++ boost_1_78_0.sw/tools/build/src/tools/features/architecture-feature.jam 2022-05-09 13:54:56.630734000 +0800 +@@ -9,7 +9,7 @@ + + [[bbv2.builtin.features.architecture]]`architecture`:: + *Allowed values:* `x86`, `ia64`, `sparc`, `power`, `mips`, `mips1`, `mips2`, +-`mips3`, `mips4`, `mips32`, `mips32r2`, `mips64`, `parisc`, `arm`, ++`mips3`, `mips4`, `mips32`, `mips32r2`, `mips64`, `parisc`, `arm`, `sw_64`, + `s390x`. + + + Specifies the general processor family to generate code for. +@@ -39,6 +39,9 @@ + # Advanced RISC Machines + arm + ++ # SW_64 ++ sw_64 ++ + # RISC-V + riscv + |