From: ronag Date: Sun, 27 Nov 2011 13:02:12 +0000 (+0000) Subject: dependencies: Cleanup X-Git-Tag: dependencies_subtree~4 X-Git-Url: https://git.sesse.net/?p=casparcg;a=commitdiff_plain;h=a17d35e0d39050de29d2c5407b4a5857a0e0de9a dependencies: Cleanup --- diff --git a/BlueFish444Installer_5_8_2.7z b/BlueFish444Installer_5_8_2.7z deleted file mode 100644 index 56cadc94c..000000000 Binary files a/BlueFish444Installer_5_8_2.7z and /dev/null differ diff --git a/boost_1_44_0.7z b/boost_1_44_0.7z deleted file mode 100644 index 13a72faf3..000000000 Binary files a/boost_1_44_0.7z and /dev/null differ diff --git a/tbb30_20100406oss/bin/ia32/vc10/irml/irml.dll b/tbb30_20100406oss/bin/ia32/vc10/irml/irml.dll deleted file mode 100644 index 3c348efba..000000000 Binary files a/tbb30_20100406oss/bin/ia32/vc10/irml/irml.dll and /dev/null differ diff --git a/tbb30_20100406oss/bin/ia32/vc10/irml/irml.pdb b/tbb30_20100406oss/bin/ia32/vc10/irml/irml.pdb deleted file mode 100644 index f9a216366..000000000 Binary files a/tbb30_20100406oss/bin/ia32/vc10/irml/irml.pdb and /dev/null differ diff --git a/tbb30_20100406oss/bin/ia32/vc10/irml/irml_debug.dll b/tbb30_20100406oss/bin/ia32/vc10/irml/irml_debug.dll deleted file mode 100644 index d3c4d8d95..000000000 Binary files a/tbb30_20100406oss/bin/ia32/vc10/irml/irml_debug.dll and /dev/null differ diff --git a/tbb30_20100406oss/bin/ia32/vc10/irml/irml_debug.pdb b/tbb30_20100406oss/bin/ia32/vc10/irml/irml_debug.pdb deleted file mode 100644 index 4c0f170d0..000000000 Binary files a/tbb30_20100406oss/bin/ia32/vc10/irml/irml_debug.pdb and /dev/null differ diff --git a/tbb30_20100406oss/bin/ia32/vc10/irml_c/irml.dll b/tbb30_20100406oss/bin/ia32/vc10/irml_c/irml.dll deleted file mode 100644 index a63f20511..000000000 Binary files a/tbb30_20100406oss/bin/ia32/vc10/irml_c/irml.dll and /dev/null differ diff --git a/tbb30_20100406oss/bin/ia32/vc10/irml_c/irml.pdb b/tbb30_20100406oss/bin/ia32/vc10/irml_c/irml.pdb deleted file mode 100644 index e46981a87..000000000 Binary files a/tbb30_20100406oss/bin/ia32/vc10/irml_c/irml.pdb and /dev/null differ diff --git a/tbb30_20100406oss/bin/ia32/vc10/irml_c/irml_debug.dll b/tbb30_20100406oss/bin/ia32/vc10/irml_c/irml_debug.dll deleted file mode 100644 index 64329e845..000000000 Binary files a/tbb30_20100406oss/bin/ia32/vc10/irml_c/irml_debug.dll and /dev/null differ diff --git a/tbb30_20100406oss/bin/ia32/vc10/irml_c/irml_debug.pdb b/tbb30_20100406oss/bin/ia32/vc10/irml_c/irml_debug.pdb deleted file mode 100644 index 174a67034..000000000 Binary files a/tbb30_20100406oss/bin/ia32/vc10/irml_c/irml_debug.pdb and /dev/null differ diff --git a/tbb30_20100406oss/bin/ia32/vc10/tbb.dll b/tbb30_20100406oss/bin/ia32/vc10/tbb.dll deleted file mode 100644 index f99cb7c46..000000000 Binary files a/tbb30_20100406oss/bin/ia32/vc10/tbb.dll and /dev/null differ diff --git a/tbb30_20100406oss/bin/ia32/vc10/tbb.pdb b/tbb30_20100406oss/bin/ia32/vc10/tbb.pdb deleted file mode 100644 index 0ff4a8bb4..000000000 Binary files a/tbb30_20100406oss/bin/ia32/vc10/tbb.pdb and /dev/null differ diff --git a/tbb30_20100406oss/bin/ia32/vc10/tbb_debug.dll b/tbb30_20100406oss/bin/ia32/vc10/tbb_debug.dll deleted file mode 100644 index 9831addff..000000000 Binary files a/tbb30_20100406oss/bin/ia32/vc10/tbb_debug.dll and /dev/null differ diff --git a/tbb30_20100406oss/bin/ia32/vc10/tbb_debug.pdb b/tbb30_20100406oss/bin/ia32/vc10/tbb_debug.pdb deleted file mode 100644 index dec1511bf..000000000 Binary files a/tbb30_20100406oss/bin/ia32/vc10/tbb_debug.pdb and /dev/null differ diff --git a/tbb30_20100406oss/bin/ia32/vc10/tbbmalloc.dll b/tbb30_20100406oss/bin/ia32/vc10/tbbmalloc.dll deleted file mode 100644 index 703736be7..000000000 Binary files a/tbb30_20100406oss/bin/ia32/vc10/tbbmalloc.dll and /dev/null differ diff --git a/tbb30_20100406oss/bin/ia32/vc10/tbbmalloc.pdb b/tbb30_20100406oss/bin/ia32/vc10/tbbmalloc.pdb deleted file mode 100644 index f7b5b988c..000000000 Binary files a/tbb30_20100406oss/bin/ia32/vc10/tbbmalloc.pdb and /dev/null differ diff --git a/tbb30_20100406oss/bin/ia32/vc10/tbbmalloc_debug.dll b/tbb30_20100406oss/bin/ia32/vc10/tbbmalloc_debug.dll deleted file mode 100644 index 5dfe95846..000000000 Binary files a/tbb30_20100406oss/bin/ia32/vc10/tbbmalloc_debug.dll and /dev/null differ diff --git a/tbb30_20100406oss/bin/ia32/vc10/tbbmalloc_debug.pdb b/tbb30_20100406oss/bin/ia32/vc10/tbbmalloc_debug.pdb deleted file mode 100644 index 8820bded0..000000000 Binary files a/tbb30_20100406oss/bin/ia32/vc10/tbbmalloc_debug.pdb and /dev/null differ diff --git a/tbb30_20100406oss/bin/ia32/vc10/tbbmalloc_proxy.dll b/tbb30_20100406oss/bin/ia32/vc10/tbbmalloc_proxy.dll deleted file mode 100644 index 6cd458e4b..000000000 Binary files a/tbb30_20100406oss/bin/ia32/vc10/tbbmalloc_proxy.dll and /dev/null differ diff --git a/tbb30_20100406oss/bin/ia32/vc10/tbbmalloc_proxy.pdb b/tbb30_20100406oss/bin/ia32/vc10/tbbmalloc_proxy.pdb deleted file mode 100644 index 1acef9be7..000000000 Binary files a/tbb30_20100406oss/bin/ia32/vc10/tbbmalloc_proxy.pdb and /dev/null differ diff --git a/tbb30_20100406oss/bin/ia32/vc10/tbbmalloc_proxy_debug.dll b/tbb30_20100406oss/bin/ia32/vc10/tbbmalloc_proxy_debug.dll deleted file mode 100644 index b88eecf8b..000000000 Binary files a/tbb30_20100406oss/bin/ia32/vc10/tbbmalloc_proxy_debug.dll and /dev/null differ diff --git a/tbb30_20100406oss/bin/ia32/vc10/tbbmalloc_proxy_debug.pdb b/tbb30_20100406oss/bin/ia32/vc10/tbbmalloc_proxy_debug.pdb deleted file mode 100644 index ad13b144d..000000000 Binary files a/tbb30_20100406oss/bin/ia32/vc10/tbbmalloc_proxy_debug.pdb and /dev/null differ diff --git a/tbb30_20100406oss/bin/ia32/vc10/tbbvars.bat b/tbb30_20100406oss/bin/ia32/vc10/tbbvars.bat deleted file mode 100644 index e19b40730..000000000 --- a/tbb30_20100406oss/bin/ia32/vc10/tbbvars.bat +++ /dev/null @@ -1,32 +0,0 @@ -@echo off -REM -REM Copyright 2005-2010 Intel Corporation. All Rights Reserved. -REM -REM This file is part of Threading Building Blocks. -REM -REM Threading Building Blocks is free software; you can redistribute it -REM and/or modify it under the terms of the GNU General Public License -REM version 2 as published by the Free Software Foundation. -REM -REM Threading Building Blocks is distributed in the hope that it will be -REM useful, but WITHOUT ANY WARRANTY; without even the implied warranty -REM of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -REM GNU General Public License for more details. -REM -REM You should have received a copy of the GNU General Public License -REM along with Threading Building Blocks; if not, write to the Free Software -REM Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -REM -REM As a special exception, you may use this file as part of a free software -REM library without restriction. Specifically, if other files instantiate -REM templates or use macros or inline functions from this file, or you compile -REM this file and link it with other files to produce an executable, this -REM file does not by itself cause the resulting executable to be covered by -REM the GNU General Public License. This exception does not however -REM invalidate any other reasons why the executable file might be covered by -REM the GNU General Public License. -REM -set TBB_TARGET_ARCH=ia32 -set TBB_TARGET_VS=vc10 -if exist "%~d0%~p0..\..\tbbvars.bat" call "%~d0%~p0..\..\tbbvars.bat" arch_placeholder vs_placeholder -if exist "%~d0%~p0..\..\..\..\tbb\bin\tbbvars.bat" call "%~d0%~p0..\..\..\..\tbb\bin\tbbvars.bat" arch_placeholder vs_placeholder diff --git a/tbb30_20100406oss/bin/ia32/vc10/tbbvars.csh b/tbb30_20100406oss/bin/ia32/vc10/tbbvars.csh deleted file mode 100644 index 398fd8371..000000000 --- a/tbb30_20100406oss/bin/ia32/vc10/tbbvars.csh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/csh -# -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -setenv TBB30_INSTALL_DIR "SUBSTITUTE_INSTALL_DIR_HERE" -setenv TBB_ARCH_PLATFORM "ia32\vc10" -if (! $?PATH) then - setenv PATH "${TBB30_INSTALL_DIR}\bin\${TBB_ARCH_PLATFORM}" -else - setenv PATH "${TBB30_INSTALL_DIR}\bin\${TBB_ARCH_PLATFORM};$PATH" -endif -if (! $?LIB) then - setenv LIB "${TBB30_INSTALL_DIR}\lib\${TBB_ARCH_PLATFORM}" -else - setenv LIB "${TBB30_INSTALL_DIR}\lib\${TBB_ARCH_PLATFORM};$LIB" -endif -if (! $?INCLUDE) then - setenv INCLUDE "${TBB30_INSTALL_DIR}\include" -else - setenv INCLUDE "${TBB30_INSTALL_DIR}\include;$INCLUDE" -endif diff --git a/tbb30_20100406oss/bin/ia32/vc10/tbbvars.sh b/tbb30_20100406oss/bin/ia32/vc10/tbbvars.sh deleted file mode 100644 index 0bae236fd..000000000 --- a/tbb30_20100406oss/bin/ia32/vc10/tbbvars.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/sh -# -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -TBB30_INSTALL_DIR="SUBSTITUTE_SH_INSTALL_DIR_HERE"; export TBB30_INSTALL_DIR -TBB_ARCH_PLATFORM="ia32/vc10" -if [ -z "${PATH}" ]; then - PATH="${TBB30_INSTALL_DIR}/bin/${TBB_ARCH_PLATFORM}"; export PATH -else - PATH="${TBB30_INSTALL_DIR}/bin/${TBB_ARCH_PLATFORM};$PATH"; export PATH -fi -if [ -z "${LIB}" ]; then - LIB="${TBB30_INSTALL_DIR}/lib/${TBB_ARCH_PLATFORM}"; export LIB -else - LIB="${TBB30_INSTALL_DIR}/lib/${TBB_ARCH_PLATFORM};$LIB"; export LIB -fi -if [ -z "${INCLUDE}" ]; then - INCLUDE="${TBB30_INSTALL_DIR}/include"; export INCLUDE -else - INCLUDE="${TBB30_INSTALL_DIR}/include;$INCLUDE"; export INCLUDE -fi diff --git a/tbb30_20100406oss/bin/tbbvars.bat b/tbb30_20100406oss/bin/tbbvars.bat deleted file mode 100644 index 4f15a1da3..000000000 --- a/tbb30_20100406oss/bin/tbbvars.bat +++ /dev/null @@ -1,75 +0,0 @@ -@echo off -REM -REM Copyright 2005-2010 Intel Corporation. All Rights Reserved. -REM -REM This file is part of Threading Building Blocks. -REM -REM Threading Building Blocks is free software; you can redistribute it -REM and/or modify it under the terms of the GNU General Public License -REM version 2 as published by the Free Software Foundation. -REM -REM Threading Building Blocks is distributed in the hope that it will be -REM useful, but WITHOUT ANY WARRANTY; without even the implied warranty -REM of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -REM GNU General Public License for more details. -REM -REM You should have received a copy of the GNU General Public License -REM along with Threading Building Blocks; if not, write to the Free Software -REM Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -REM -REM As a special exception, you may use this file as part of a free software -REM library without restriction. Specifically, if other files instantiate -REM templates or use macros or inline functions from this file, or you compile -REM this file and link it with other files to produce an executable, this -REM file does not by itself cause the resulting executable to be covered by -REM the GNU General Public License. This exception does not however -REM invalidate any other reasons why the executable file might be covered by -REM the GNU General Public License. -REM - -set SCRIPT_NAME=%~nx0 -if (%1) == () goto Syntax - -SET TBB30_BIN_DIR=%~d0%~p0 - -SET TBB30_INSTALL_DIR=%TBB30_BIN_DIR%.. - -:ParseArgs -:: Parse the incoming arguments -if /i "%1"=="" goto SetEnv -if /i "%1"=="ia32" (set TBB_TARGET_ARCH=ia32) & shift & goto ParseArgs -if /i "%1"=="intel64" (set TBB_TARGET_ARCH=intel64) & shift & goto ParseArgs -if /i "%1"=="vs2005" (set TBB_TARGET_VS=vc8) & shift & goto ParseArgs -if /i "%1"=="vs2008" (set TBB_TARGET_VS=vc9) & shift & goto ParseArgs -if /i "%1"=="vs2010" (set TBB_TARGET_VS=vc10) & shift & goto ParseArgs -if /i "%1"=="all" (set TBB_TARGET_VS=vc_mt) & shift & goto ParseArgs - -:SetEnv - -if ("%TBB_TARGET_VS%") == ("") set TBB_TARGET_VS=vc_mt - -SET TBB_ARCH_PLATFORM=%TBB_TARGET_ARCH%\%TBB_TARGET_VS% -if exist "%TBB30_BIN_DIR%\%TBB_ARCH_PLATFORM%\tbb.dll" SET PATH=%TBB30_BIN_DIR%\%TBB_ARCH_PLATFORM%;%PATH% -if exist "%TBB30_INSTALL_DIR%\..\redist\%TBB_TARGET_ARCH%\tbb\%TBB_TARGET_VS%\tbb.dll" SET PATH=%TBB30_INSTALL_DIR%\..\redist\%TBB_TARGET_ARCH%\tbb\%TBB_TARGET_VS%;%PATH% -SET LIB=%TBB30_INSTALL_DIR%\lib\%TBB_ARCH_PLATFORM%;%LIB% -SET INCLUDE=%TBB30_INSTALL_DIR%\include;%INCLUDE% -IF ("%ICPP_COMPILER11%") NEQ ("") SET TBB_CXX=icl.exe -IF ("%ICPP_COMPILER12%") NEQ ("") SET TBB_CXX=icl.exe -goto End - -:Syntax -echo Syntax: -echo %SCRIPT_NAME% ^ ^ -echo ^ must be is one of the following -echo ia32 : Set up for IA-32 architecture -echo intel64 : Set up for Intel(R) 64 architecture -echo ^ should be one of the following -echo vs2005 : Set to use with Microsoft Visual Studio 2005 runtime DLLs -echo vs2008 : Set to use with Microsoft Visual Studio 2008 runtime DLLs -echo vs2010 : Set to use with Microsoft Visual Studio 2010 runtime DLLs -echo all : Set to use TBB statically linked with Microsoft Visual C++ runtime -echo if ^ is not set TBB statically linked with Microsoft Visual C++ runtime will be used. -exit /B 1 - -:End -exit /B 0 \ No newline at end of file diff --git a/tbb30_20100406oss/include/index.html b/tbb30_20100406oss/include/index.html deleted file mode 100644 index dddfb9dd2..000000000 --- a/tbb30_20100406oss/include/index.html +++ /dev/null @@ -1,24 +0,0 @@ - - - -

Overview

-Include files for Threading Building Blocks. - -

Directories

-
-
tbb -
Include files for Threading Building Blocks classes and functions. -
- -
-Up to parent directory -

-Copyright © 2005-2010 Intel Corporation. All Rights Reserved. -

-Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are -registered trademarks or trademarks of Intel Corporation or its -subsidiaries in the United States and other countries. -

-* Other names and brands may be claimed as the property of others. - - diff --git a/tbb30_20100406oss/include/tbb/_concurrent_queue_internal.h b/tbb30_20100406oss/include/tbb/_concurrent_queue_internal.h deleted file mode 100644 index a7bbbb870..000000000 --- a/tbb30_20100406oss/include/tbb/_concurrent_queue_internal.h +++ /dev/null @@ -1,1016 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_concurrent_queue_internal_H -#define __TBB_concurrent_queue_internal_H - -#include "tbb_stddef.h" -#include "tbb_machine.h" -#include "atomic.h" -#include "spin_mutex.h" -#include "cache_aligned_allocator.h" -#include "tbb_exception.h" -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - - -namespace tbb { - -#if !__TBB_TEMPLATE_FRIENDS_BROKEN - -// forward declaration -namespace strict_ppl { -template class concurrent_queue; -} - -template class concurrent_bounded_queue; - -namespace deprecated { -template class concurrent_queue; -} -#endif - -//! For internal use only. -namespace strict_ppl { - -//! @cond INTERNAL -namespace internal { - -using namespace tbb::internal; - -typedef size_t ticket; - -template class micro_queue ; -template class micro_queue_pop_finalizer ; -template class concurrent_queue_base_v3; - -//! parts of concurrent_queue_rep that do not have references to micro_queue -/** - * For internal use only. - */ -struct concurrent_queue_rep_base : no_copy { - template friend class micro_queue; - template friend class concurrent_queue_base_v3; - -protected: - //! Approximately n_queue/golden ratio - static const size_t phi = 3; - -public: - // must be power of 2 - static const size_t n_queue = 8; - - //! Prefix on a page - struct page { - page* next; - uintptr_t mask; - }; - - atomic head_counter; - char pad1[NFS_MaxLineSize-sizeof(atomic)]; - atomic tail_counter; - char pad2[NFS_MaxLineSize-sizeof(atomic)]; - - //! Always a power of 2 - size_t items_per_page; - - //! Size of an item - size_t item_size; - - //! number of invalid entries in the queue - atomic n_invalid_entries; - - char pad3[NFS_MaxLineSize-sizeof(size_t)-sizeof(size_t)-sizeof(atomic)]; -} ; - -inline bool is_valid_page(const concurrent_queue_rep_base::page* p) { - return uintptr_t(p)>1; -} - -//! Abstract class to define interface for page allocation/deallocation -/** - * For internal use only. - */ -class concurrent_queue_page_allocator -{ - template friend class micro_queue ; - template friend class micro_queue_pop_finalizer ; -protected: - virtual ~concurrent_queue_page_allocator() {} -private: - virtual concurrent_queue_rep_base::page* allocate_page() = 0; - virtual void deallocate_page( concurrent_queue_rep_base::page* p ) = 0; -} ; - -#if _MSC_VER && !defined(__INTEL_COMPILER) -// unary minus operator applied to unsigned type, result still unsigned -#pragma warning( push ) -#pragma warning( disable: 4146 ) -#endif - -//! A queue using simple locking. -/** For efficient, this class has no constructor. - The caller is expected to zero-initialize it. */ -template -class micro_queue : no_copy { - typedef concurrent_queue_rep_base::page page; - - //! Class used to ensure exception-safety of method "pop" - class destroyer: no_copy { - T& my_value; - public: - destroyer( T& value ) : my_value(value) {} - ~destroyer() {my_value.~T();} - }; - - void copy_item( page& dst, size_t index, const void* src ) { - new( &get_ref(dst,index) ) T(*static_cast(src)); - } - - void copy_item( page& dst, size_t dindex, const page& src, size_t sindex ) { - new( &get_ref(dst,dindex) ) T( get_ref(const_cast(src),sindex) ); - } - - void assign_and_destroy_item( void* dst, page& src, size_t index ) { - T& from = get_ref(src,index); - destroyer d(from); - *static_cast(dst) = from; - } - - void spin_wait_until_my_turn( atomic& counter, ticket k, concurrent_queue_rep_base& rb ) const ; - -public: - friend class micro_queue_pop_finalizer; - - struct padded_page: page { - //! Not defined anywhere - exists to quiet warnings. - padded_page(); - //! Not defined anywhere - exists to quiet warnings. - void operator=( const padded_page& ); - //! Must be last field. - T last; - }; - - static T& get_ref( page& p, size_t index ) { - return (&static_cast(static_cast(&p))->last)[index]; - } - - atomic head_page; - atomic head_counter; - - atomic tail_page; - atomic tail_counter; - - spin_mutex page_mutex; - - void push( const void* item, ticket k, concurrent_queue_base_v3& base ) ; - - bool pop( void* dst, ticket k, concurrent_queue_base_v3& base ) ; - - micro_queue& assign( const micro_queue& src, concurrent_queue_base_v3& base ) ; - - page* make_copy( concurrent_queue_base_v3& base, const page* src_page, size_t begin_in_page, size_t end_in_page, ticket& g_index ) ; - - void invalidate_page_and_rethrow( ticket k ) ; -}; - -template -void micro_queue::spin_wait_until_my_turn( atomic& counter, ticket k, concurrent_queue_rep_base& rb ) const { - atomic_backoff backoff; - do { - backoff.pause(); - if( counter&1 ) { - ++rb.n_invalid_entries; - throw_exception( eid_bad_last_alloc ); - } - } while( counter!=k ) ; -} - -template -void micro_queue::push( const void* item, ticket k, concurrent_queue_base_v3& base ) { - k &= -concurrent_queue_rep_base::n_queue; - page* p = NULL; - size_t index = k/concurrent_queue_rep_base::n_queue & (base.my_rep->items_per_page-1); - if( !index ) { - __TBB_TRY { - concurrent_queue_page_allocator& pa = base; - p = pa.allocate_page(); - } __TBB_CATCH (...) { - ++base.my_rep->n_invalid_entries; - invalidate_page_and_rethrow( k ); - } - p->mask = 0; - p->next = NULL; - } - - if( tail_counter!=k ) spin_wait_until_my_turn( tail_counter, k, *base.my_rep ); - - if( p ) { - spin_mutex::scoped_lock lock( page_mutex ); - page* q = tail_page; - if( is_valid_page(q) ) - q->next = p; - else - head_page = p; - tail_page = p; - } else { - p = tail_page; - } - - __TBB_TRY { - copy_item( *p, index, item ); - // If no exception was thrown, mark item as present. - p->mask |= uintptr_t(1)<n_invalid_entries; - tail_counter += concurrent_queue_rep_base::n_queue; - __TBB_RETHROW(); - } -} - -template -bool micro_queue::pop( void* dst, ticket k, concurrent_queue_base_v3& base ) { - k &= -concurrent_queue_rep_base::n_queue; - if( head_counter!=k ) spin_wait_until_eq( head_counter, k ); - if( tail_counter==k ) spin_wait_while_eq( tail_counter, k ); - page& p = *head_page; - __TBB_ASSERT( &p, NULL ); - size_t index = k/concurrent_queue_rep_base::n_queue & (base.my_rep->items_per_page-1); - bool success = false; - { - micro_queue_pop_finalizer finalizer( *this, base, k+concurrent_queue_rep_base::n_queue, index==base.my_rep->items_per_page-1 ? &p : NULL ); - if( p.mask & uintptr_t(1)<n_invalid_entries; - } - } - return success; -} - -template -micro_queue& micro_queue::assign( const micro_queue& src, concurrent_queue_base_v3& base ) { - head_counter = src.head_counter; - tail_counter = src.tail_counter; - page_mutex = src.page_mutex; - - const page* srcp = src.head_page; - if( is_valid_page(srcp) ) { - ticket g_index = head_counter; - __TBB_TRY { - size_t n_items = (tail_counter-head_counter)/concurrent_queue_rep_base::n_queue; - size_t index = head_counter/concurrent_queue_rep_base::n_queue & (base.my_rep->items_per_page-1); - size_t end_in_first_page = (index+n_itemsitems_per_page)?(index+n_items):base.my_rep->items_per_page; - - head_page = make_copy( base, srcp, index, end_in_first_page, g_index ); - page* cur_page = head_page; - - if( srcp != src.tail_page ) { - for( srcp = srcp->next; srcp!=src.tail_page; srcp=srcp->next ) { - cur_page->next = make_copy( base, srcp, 0, base.my_rep->items_per_page, g_index ); - cur_page = cur_page->next; - } - - __TBB_ASSERT( srcp==src.tail_page, NULL ); - size_t last_index = tail_counter/concurrent_queue_rep_base::n_queue & (base.my_rep->items_per_page-1); - if( last_index==0 ) last_index = base.my_rep->items_per_page; - - cur_page->next = make_copy( base, srcp, 0, last_index, g_index ); - cur_page = cur_page->next; - } - tail_page = cur_page; - } __TBB_CATCH (...) { - invalidate_page_and_rethrow( g_index ); - } - } else { - head_page = tail_page = NULL; - } - return *this; -} - -template -void micro_queue::invalidate_page_and_rethrow( ticket k ) { - // Append an invalid page at address 1 so that no more pushes are allowed. - page* invalid_page = (page*)uintptr_t(1); - { - spin_mutex::scoped_lock lock( page_mutex ); - tail_counter = k+concurrent_queue_rep_base::n_queue+1; - page* q = tail_page; - if( is_valid_page(q) ) - q->next = invalid_page; - else - head_page = invalid_page; - tail_page = invalid_page; - } - __TBB_RETHROW(); -} - -template -concurrent_queue_rep_base::page* micro_queue::make_copy( concurrent_queue_base_v3& base, const concurrent_queue_rep_base::page* src_page, size_t begin_in_page, size_t end_in_page, ticket& g_index ) { - concurrent_queue_page_allocator& pa = base; - page* new_page = pa.allocate_page(); - new_page->next = NULL; - new_page->mask = src_page->mask; - for( ; begin_in_page!=end_in_page; ++begin_in_page, ++g_index ) - if( new_page->mask & uintptr_t(1)< -class micro_queue_pop_finalizer: no_copy { - typedef concurrent_queue_rep_base::page page; - ticket my_ticket; - micro_queue& my_queue; - page* my_page; - concurrent_queue_page_allocator& allocator; -public: - micro_queue_pop_finalizer( micro_queue& queue, concurrent_queue_base_v3& b, ticket k, page* p ) : - my_ticket(k), my_queue(queue), my_page(p), allocator(b) - {} - ~micro_queue_pop_finalizer() ; -}; - -template -micro_queue_pop_finalizer::~micro_queue_pop_finalizer() { - page* p = my_page; - if( is_valid_page(p) ) { - spin_mutex::scoped_lock lock( my_queue.page_mutex ); - page* q = p->next; - my_queue.head_page = q; - if( !is_valid_page(q) ) { - my_queue.tail_page = NULL; - } - } - my_queue.head_counter = my_ticket; - if( is_valid_page(p) ) { - allocator.deallocate_page( p ); - } -} - -#if _MSC_VER && !defined(__INTEL_COMPILER) -#pragma warning( pop ) -#endif // warning 4146 is back - -template class concurrent_queue_iterator_rep ; -template class concurrent_queue_iterator_base_v3; - -//! representation of concurrent_queue_base -/** - * the class inherits from concurrent_queue_rep_base and defines an array of micro_queue's - */ -template -struct concurrent_queue_rep : public concurrent_queue_rep_base { - micro_queue array[n_queue]; - - //! Map ticket to an array index - static size_t index( ticket k ) { - return k*phi%n_queue; - } - - micro_queue& choose( ticket k ) { - // The formula here approximates LRU in a cache-oblivious way. - return array[index(k)]; - } -}; - -//! base class of concurrent_queue -/** - * The class implements the interface defined by concurrent_queue_page_allocator - * and has a pointer to an instance of concurrent_queue_rep. - */ -template -class concurrent_queue_base_v3: public concurrent_queue_page_allocator { - //! Internal representation - concurrent_queue_rep* my_rep; - - friend struct concurrent_queue_rep; - friend class micro_queue; - friend class concurrent_queue_iterator_rep; - friend class concurrent_queue_iterator_base_v3; - -protected: - typedef typename concurrent_queue_rep::page page; - -private: - typedef typename micro_queue::padded_page padded_page; - - /* override */ virtual page *allocate_page() { - concurrent_queue_rep& r = *my_rep; - size_t n = sizeof(padded_page) + (r.items_per_page-1)*sizeof(T); - return reinterpret_cast(allocate_block ( n )); - } - - /* override */ virtual void deallocate_page( concurrent_queue_rep_base::page *p ) { - concurrent_queue_rep& r = *my_rep; - size_t n = sizeof(padded_page) + (r.items_per_page-1)*sizeof(T); - deallocate_block( reinterpret_cast(p), n ); - } - - //! custom allocator - virtual void *allocate_block( size_t n ) = 0; - - //! custom de-allocator - virtual void deallocate_block( void *p, size_t n ) = 0; - -protected: - concurrent_queue_base_v3(); - - /* override */ virtual ~concurrent_queue_base_v3() { -#if __TBB_USE_ASSERT - size_t nq = my_rep->n_queue; - for( size_t i=0; iarray[i].tail_page==NULL, "pages were not freed properly" ); -#endif /* __TBB_USE_ASSERT */ - cache_aligned_allocator >().deallocate(my_rep,1); - } - - //! Enqueue item at tail of queue - void internal_push( const void* src ) { - concurrent_queue_rep& r = *my_rep; - ticket k = r.tail_counter++; - r.choose(k).push( src, k, *this ); - } - - //! Attempt to dequeue item from queue. - /** NULL if there was no item to dequeue. */ - bool internal_try_pop( void* dst ) ; - - //! Get size of queue; result may be invalid if queue is modified concurrently - size_t internal_size() const ; - - //! check if the queue is empty; thread safe - bool internal_empty() const ; - - //! free any remaining pages - /* note that the name may be misleading, but it remains so due to a historical accident. */ - void internal_finish_clear() ; - - //! Obsolete - void internal_throw_exception() const { - throw_exception( eid_bad_alloc ); - } - - //! copy internal representation - void assign( const concurrent_queue_base_v3& src ) ; -}; - -template -concurrent_queue_base_v3::concurrent_queue_base_v3() { - const size_t item_size = sizeof(T); - my_rep = cache_aligned_allocator >().allocate(1); - __TBB_ASSERT( (size_t)my_rep % NFS_GetLineSize()==0, "alignment error" ); - __TBB_ASSERT( (size_t)&my_rep->head_counter % NFS_GetLineSize()==0, "alignment error" ); - __TBB_ASSERT( (size_t)&my_rep->tail_counter % NFS_GetLineSize()==0, "alignment error" ); - __TBB_ASSERT( (size_t)&my_rep->array % NFS_GetLineSize()==0, "alignment error" ); - memset(my_rep,0,sizeof(concurrent_queue_rep)); - my_rep->item_size = item_size; - my_rep->items_per_page = item_size<=8 ? 32 : - item_size<=16 ? 16 : - item_size<=32 ? 8 : - item_size<=64 ? 4 : - item_size<=128 ? 2 : - 1; -} - -template -bool concurrent_queue_base_v3::internal_try_pop( void* dst ) { - concurrent_queue_rep& r = *my_rep; - ticket k; - do { - k = r.head_counter; - for(;;) { - if( r.tail_counter<=k ) { - // Queue is empty - return false; - } - // Queue had item with ticket k when we looked. Attempt to get that item. - ticket tk=k; -#if defined(_MSC_VER) && defined(_Wp64) - #pragma warning (push) - #pragma warning (disable: 4267) -#endif - k = r.head_counter.compare_and_swap( tk+1, tk ); -#if defined(_MSC_VER) && defined(_Wp64) - #pragma warning (pop) -#endif - if( k==tk ) - break; - // Another thread snatched the item, retry. - } - } while( !r.choose( k ).pop( dst, k, *this ) ); - return true; -} - -template -size_t concurrent_queue_base_v3::internal_size() const { - concurrent_queue_rep& r = *my_rep; - __TBB_ASSERT( sizeof(ptrdiff_t)<=sizeof(size_t), NULL ); - ticket hc = r.head_counter; - size_t nie = r.n_invalid_entries; - ticket tc = r.tail_counter; - __TBB_ASSERT( hc!=tc || !nie, NULL ); - ptrdiff_t sz = tc-hc-nie; - return sz<0 ? 0 : size_t(sz); -} - -template -bool concurrent_queue_base_v3::internal_empty() const { - concurrent_queue_rep& r = *my_rep; - ticket tc = r.tail_counter; - ticket hc = r.head_counter; - // if tc!=r.tail_counter, the queue was not empty at some point between the two reads. - return tc==r.tail_counter && tc==hc+r.n_invalid_entries ; -} - -template -void concurrent_queue_base_v3::internal_finish_clear() { - concurrent_queue_rep& r = *my_rep; - size_t nq = r.n_queue; - for( size_t i=0; i -void concurrent_queue_base_v3::assign( const concurrent_queue_base_v3& src ) { - concurrent_queue_rep& r = *my_rep; - r.items_per_page = src.my_rep->items_per_page; - - // copy concurrent_queue_rep. - r.head_counter = src.my_rep->head_counter; - r.tail_counter = src.my_rep->tail_counter; - r.n_invalid_entries = src.my_rep->n_invalid_entries; - - // copy micro_queues - for( size_t i = 0; iarray[i], *this); - - __TBB_ASSERT( r.head_counter==src.my_rep->head_counter && r.tail_counter==src.my_rep->tail_counter, - "the source concurrent queue should not be concurrently modified." ); -} - -template class concurrent_queue_iterator; - -template -class concurrent_queue_iterator_rep: no_assign { - typedef typename micro_queue::padded_page padded_page; -public: - ticket head_counter; - const concurrent_queue_base_v3& my_queue; - typename concurrent_queue_base_v3::page* array[concurrent_queue_rep::n_queue]; - concurrent_queue_iterator_rep( const concurrent_queue_base_v3& queue ) : - head_counter(queue.my_rep->head_counter), - my_queue(queue) - { - for( size_t k=0; k::n_queue; ++k ) - array[k] = queue.my_rep->array[k].head_page; - } - - //! Set item to point to kth element. Return true if at end of queue or item is marked valid; false otherwise. - bool get_item( T*& item, size_t k ) ; -}; - -template -bool concurrent_queue_iterator_rep::get_item( T*& item, size_t k ) { - if( k==my_queue.my_rep->tail_counter ) { - item = NULL; - return true; - } else { - typename concurrent_queue_base_v3::page* p = array[concurrent_queue_rep::index(k)]; - __TBB_ASSERT(p,NULL); - size_t i = k/concurrent_queue_rep::n_queue & (my_queue.my_rep->items_per_page-1); - item = µ_queue::get_ref(*p,i); - return (p->mask & uintptr_t(1)< -class concurrent_queue_iterator_base_v3 : no_assign { - //! Represents concurrent_queue over which we are iterating. - /** NULL if one past last element in queue. */ - concurrent_queue_iterator_rep* my_rep; - - template - friend bool operator==( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ); - - template - friend bool operator!=( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ); -protected: - //! Pointer to current item - Value* my_item; - - //! Default constructor - concurrent_queue_iterator_base_v3() : my_rep(NULL), my_item(NULL) { -#if __GNUC__==4&&__GNUC_MINOR__==3 - // to get around a possible gcc 4.3 bug - __asm__ __volatile__("": : :"memory"); -#endif - } - - //! Copy constructor - concurrent_queue_iterator_base_v3( const concurrent_queue_iterator_base_v3& i ) - : no_assign(), my_rep(NULL), my_item(NULL) { - assign(i); - } - - //! Construct iterator pointing to head of queue. - concurrent_queue_iterator_base_v3( const concurrent_queue_base_v3& queue ) ; - - //! Assignment - void assign( const concurrent_queue_iterator_base_v3& other ) ; - - //! Advance iterator one step towards tail of queue. - void advance() ; - - //! Destructor - ~concurrent_queue_iterator_base_v3() { - cache_aligned_allocator >().deallocate(my_rep, 1); - my_rep = NULL; - } -}; - -template -concurrent_queue_iterator_base_v3::concurrent_queue_iterator_base_v3( const concurrent_queue_base_v3& queue ) { - my_rep = cache_aligned_allocator >().allocate(1); - new( my_rep ) concurrent_queue_iterator_rep(queue); - size_t k = my_rep->head_counter; - if( !my_rep->get_item(my_item, k) ) advance(); -} - -template -void concurrent_queue_iterator_base_v3::assign( const concurrent_queue_iterator_base_v3& other ) { - if( my_rep!=other.my_rep ) { - if( my_rep ) { - cache_aligned_allocator >().deallocate(my_rep, 1); - my_rep = NULL; - } - if( other.my_rep ) { - my_rep = cache_aligned_allocator >().allocate(1); - new( my_rep ) concurrent_queue_iterator_rep( *other.my_rep ); - } - } - my_item = other.my_item; -} - -template -void concurrent_queue_iterator_base_v3::advance() { - __TBB_ASSERT( my_item, "attempt to increment iterator past end of queue" ); - size_t k = my_rep->head_counter; - const concurrent_queue_base_v3& queue = my_rep->my_queue; -#if TBB_USE_ASSERT - Value* tmp; - my_rep->get_item(tmp,k); - __TBB_ASSERT( my_item==tmp, NULL ); -#endif /* TBB_USE_ASSERT */ - size_t i = k/concurrent_queue_rep::n_queue & (queue.my_rep->items_per_page-1); - if( i==queue.my_rep->items_per_page-1 ) { - typename concurrent_queue_base_v3::page*& root = my_rep->array[concurrent_queue_rep::index(k)]; - root = root->next; - } - // advance k - my_rep->head_counter = ++k; - if( !my_rep->get_item(my_item, k) ) advance(); -} - -//! Similar to C++0x std::remove_cv -/** "tbb_" prefix added to avoid overload confusion with C++0x implementations. */ -template struct tbb_remove_cv {typedef T type;}; -template struct tbb_remove_cv {typedef T type;}; -template struct tbb_remove_cv {typedef T type;}; -template struct tbb_remove_cv {typedef T type;}; - -//! Meets requirements of a forward iterator for STL. -/** Value is either the T or const T type of the container. - @ingroup containers */ -template -class concurrent_queue_iterator: public concurrent_queue_iterator_base_v3::type>, - public std::iterator { -#if !__TBB_TEMPLATE_FRIENDS_BROKEN - template - friend class ::tbb::strict_ppl::concurrent_queue; -#else -public: // workaround for MSVC -#endif - //! Construct iterator pointing to head of queue. - concurrent_queue_iterator( const concurrent_queue_base_v3& queue ) : - concurrent_queue_iterator_base_v3::type>(queue) - { - } - -public: - concurrent_queue_iterator() {} - - concurrent_queue_iterator( const concurrent_queue_iterator& other ) : - concurrent_queue_iterator_base_v3::type>(other) - {} - - //! Iterator assignment - concurrent_queue_iterator& operator=( const concurrent_queue_iterator& other ) { - assign(other); - return *this; - } - - //! Reference to current item - Value& operator*() const { - return *static_cast(this->my_item); - } - - Value* operator->() const {return &operator*();} - - //! Advance to next item in queue - concurrent_queue_iterator& operator++() { - this->advance(); - return *this; - } - - //! Post increment - Value* operator++(int) { - Value* result = &operator*(); - operator++(); - return result; - } -}; // concurrent_queue_iterator - - -template -bool operator==( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ) { - return i.my_item==j.my_item; -} - -template -bool operator!=( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ) { - return i.my_item!=j.my_item; -} - -} // namespace internal - -//! @endcond - -} // namespace strict_ppl - -//! @cond INTERNAL -namespace internal { - -class concurrent_queue_rep; -class concurrent_queue_iterator_rep; -class concurrent_queue_iterator_base_v3; -template class concurrent_queue_iterator; - -//! For internal use only. -/** Type-independent portion of concurrent_queue. - @ingroup containers */ -class concurrent_queue_base_v3: no_copy { - //! Internal representation - concurrent_queue_rep* my_rep; - - friend class concurrent_queue_rep; - friend struct micro_queue; - friend class micro_queue_pop_finalizer; - friend class concurrent_queue_iterator_rep; - friend class concurrent_queue_iterator_base_v3; -protected: - //! Prefix on a page - struct page { - page* next; - uintptr_t mask; - }; - - //! Capacity of the queue - ptrdiff_t my_capacity; - - //! Always a power of 2 - size_t items_per_page; - - //! Size of an item - size_t item_size; - -#if __TBB_GCC_3_3_PROTECTED_BROKEN -public: -#endif - template - struct padded_page: page { - //! Not defined anywhere - exists to quiet warnings. - padded_page(); - //! Not defined anywhere - exists to quiet warnings. - void operator=( const padded_page& ); - //! Must be last field. - T last; - }; - -private: - virtual void copy_item( page& dst, size_t index, const void* src ) = 0; - virtual void assign_and_destroy_item( void* dst, page& src, size_t index ) = 0; -protected: - __TBB_EXPORTED_METHOD concurrent_queue_base_v3( size_t item_size ); - virtual __TBB_EXPORTED_METHOD ~concurrent_queue_base_v3(); - - //! Enqueue item at tail of queue - void __TBB_EXPORTED_METHOD internal_push( const void* src ); - - //! Dequeue item from head of queue - void __TBB_EXPORTED_METHOD internal_pop( void* dst ); - - //! Attempt to enqueue item onto queue. - bool __TBB_EXPORTED_METHOD internal_push_if_not_full( const void* src ); - - //! Attempt to dequeue item from queue. - /** NULL if there was no item to dequeue. */ - bool __TBB_EXPORTED_METHOD internal_pop_if_present( void* dst ); - - //! Get size of queue - ptrdiff_t __TBB_EXPORTED_METHOD internal_size() const; - - //! Check if the queue is emtpy - bool __TBB_EXPORTED_METHOD internal_empty() const; - - //! Set the queue capacity - void __TBB_EXPORTED_METHOD internal_set_capacity( ptrdiff_t capacity, size_t element_size ); - - //! custom allocator - virtual page *allocate_page() = 0; - - //! custom de-allocator - virtual void deallocate_page( page *p ) = 0; - - //! free any remaining pages - /* note that the name may be misleading, but it remains so due to a historical accident. */ - void __TBB_EXPORTED_METHOD internal_finish_clear() ; - - //! throw an exception - void __TBB_EXPORTED_METHOD internal_throw_exception() const; - - //! copy internal representation - void __TBB_EXPORTED_METHOD assign( const concurrent_queue_base_v3& src ) ; - -private: - virtual void copy_page_item( page& dst, size_t dindex, const page& src, size_t sindex ) = 0; -}; - -//! Type-independent portion of concurrent_queue_iterator. -/** @ingroup containers */ -class concurrent_queue_iterator_base_v3 { - //! concurrent_queue over which we are iterating. - /** NULL if one past last element in queue. */ - concurrent_queue_iterator_rep* my_rep; - - template - friend bool operator==( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ); - - template - friend bool operator!=( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ); - - void initialize( const concurrent_queue_base_v3& queue, size_t offset_of_data ); -protected: - //! Pointer to current item - void* my_item; - - //! Default constructor - concurrent_queue_iterator_base_v3() : my_rep(NULL), my_item(NULL) {} - - //! Copy constructor - concurrent_queue_iterator_base_v3( const concurrent_queue_iterator_base_v3& i ) : my_rep(NULL), my_item(NULL) { - assign(i); - } - - //! Obsolete entry point for constructing iterator pointing to head of queue. - /** Does not work correctly for SSE types. */ - __TBB_EXPORTED_METHOD concurrent_queue_iterator_base_v3( const concurrent_queue_base_v3& queue ); - - //! Construct iterator pointing to head of queue. - __TBB_EXPORTED_METHOD concurrent_queue_iterator_base_v3( const concurrent_queue_base_v3& queue, size_t offset_of_data ); - - //! Assignment - void __TBB_EXPORTED_METHOD assign( const concurrent_queue_iterator_base_v3& i ); - - //! Advance iterator one step towards tail of queue. - void __TBB_EXPORTED_METHOD advance(); - - //! Destructor - __TBB_EXPORTED_METHOD ~concurrent_queue_iterator_base_v3(); -}; - -typedef concurrent_queue_iterator_base_v3 concurrent_queue_iterator_base; - -//! Meets requirements of a forward iterator for STL. -/** Value is either the T or const T type of the container. - @ingroup containers */ -template -class concurrent_queue_iterator: public concurrent_queue_iterator_base, - public std::iterator { - -#if !defined(_MSC_VER) || defined(__INTEL_COMPILER) - template - friend class ::tbb::concurrent_bounded_queue; - - template - friend class ::tbb::deprecated::concurrent_queue; -#else -public: // workaround for MSVC -#endif - //! Construct iterator pointing to head of queue. - concurrent_queue_iterator( const concurrent_queue_base_v3& queue ) : - concurrent_queue_iterator_base_v3(queue,__TBB_offsetof(concurrent_queue_base_v3::padded_page,last)) - { - } - -public: - concurrent_queue_iterator() {} - - /** If Value==Container::value_type, then this routine is the copy constructor. - If Value==const Container::value_type, then this routine is a conversion constructor. */ - concurrent_queue_iterator( const concurrent_queue_iterator& other ) : - concurrent_queue_iterator_base_v3(other) - {} - - //! Iterator assignment - concurrent_queue_iterator& operator=( const concurrent_queue_iterator& other ) { - assign(other); - return *this; - } - - //! Reference to current item - Value& operator*() const { - return *static_cast(my_item); - } - - Value* operator->() const {return &operator*();} - - //! Advance to next item in queue - concurrent_queue_iterator& operator++() { - advance(); - return *this; - } - - //! Post increment - Value* operator++(int) { - Value* result = &operator*(); - operator++(); - return result; - } -}; // concurrent_queue_iterator - - -template -bool operator==( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ) { - return i.my_item==j.my_item; -} - -template -bool operator!=( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ) { - return i.my_item!=j.my_item; -} - -} // namespace internal; - -//! @endcond - -} // namespace tbb - -#endif /* __TBB_concurrent_queue_internal_H */ diff --git a/tbb30_20100406oss/include/tbb/_concurrent_unordered_internal.h b/tbb30_20100406oss/include/tbb/_concurrent_unordered_internal.h deleted file mode 100644 index 66b0b97cd..000000000 --- a/tbb30_20100406oss/include/tbb/_concurrent_unordered_internal.h +++ /dev/null @@ -1,1440 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -/* Container implementations in this header are based on PPL implementations - provided by Microsoft. */ - -#ifndef __TBB_concurrent_unordered_internal_H -#define __TBB_concurrent_unordered_internal_H - -#include "tbb_stddef.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include -#include // Need std::pair -#include -#include // For tbb_hasher -#include // Need std::memset - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#include "tbb_machine.h" -#include "tbb_exception.h" -#include "tbb_allocator.h" - -namespace tbb { -namespace interface5 { -//! @cond INTERNAL -namespace internal { - -template -class split_ordered_list; -template -class concurrent_unordered_base; - -// Forward list iterators (without skipping dummy elements) -template -class flist_iterator : public std::iterator -{ - template - friend class split_ordered_list; - template - friend class concurrent_unordered_base; - template - friend class flist_iterator; - - typedef typename Solist::nodeptr_t nodeptr_t; -public: - typedef typename Solist::value_type value_type; - typedef typename Solist::difference_type difference_type; - typedef typename Solist::pointer pointer; - typedef typename Solist::reference reference; - - flist_iterator() : my_node_ptr(0) {} - flist_iterator( const flist_iterator &other ) - : my_node_ptr(other.my_node_ptr) {} - - reference operator*() const { return my_node_ptr->my_element; } - pointer operator->() const { return &**this; } - - flist_iterator& operator++() { - my_node_ptr = my_node_ptr->my_next; - return *this; - } - - flist_iterator operator++(int) { - flist_iterator tmp = *this; - ++*this; - return tmp; - } - -protected: - flist_iterator(nodeptr_t pnode) : my_node_ptr(pnode) {} - nodeptr_t get_node_ptr() const { return my_node_ptr; } - - nodeptr_t my_node_ptr; - - template - friend bool operator==( const flist_iterator &i, const flist_iterator &j ); - template - friend bool operator!=( const flist_iterator& i, const flist_iterator& j ); -}; - -template -bool operator==( const flist_iterator &i, const flist_iterator &j ) { - return i.my_node_ptr == j.my_node_ptr; -} -template -bool operator!=( const flist_iterator& i, const flist_iterator& j ) { - return i.my_node_ptr != j.my_node_ptr; -} - -// Split-order list iterators, needed to skip dummy elements -template -class solist_iterator : public flist_iterator -{ - typedef flist_iterator base_type; - typedef typename Solist::nodeptr_t nodeptr_t; - using base_type::get_node_ptr; - template - friend class split_ordered_list; - template - friend class solist_iterator; - template - friend bool operator==( const solist_iterator &i, const solist_iterator &j ); - template - friend bool operator!=( const solist_iterator& i, const solist_iterator& j ); - - const Solist *my_list_ptr; - solist_iterator(nodeptr_t pnode, const Solist *plist) : base_type(pnode), my_list_ptr(plist) {} - -public: - typedef typename Solist::value_type value_type; - typedef typename Solist::difference_type difference_type; - typedef typename Solist::pointer pointer; - typedef typename Solist::reference reference; - - solist_iterator() {} - solist_iterator(const solist_iterator &other ) - : base_type(other), my_list_ptr(other.my_list_ptr) {} - - reference operator*() const { - return this->base_type::operator*(); - } - - pointer operator->() const { - return (&**this); - } - - solist_iterator& operator++() { - do ++(*(base_type *)this); - while (get_node_ptr() != NULL && get_node_ptr()->is_dummy()); - - return (*this); - } - - solist_iterator operator++(int) { - solist_iterator tmp = *this; - do ++*this; - while (get_node_ptr() != NULL && get_node_ptr()->is_dummy()); - - return (tmp); - } -}; - -template -bool operator==( const solist_iterator &i, const solist_iterator &j ) { - return i.my_node_ptr == j.my_node_ptr && i.my_list_ptr == j.my_list_ptr; -} -template -bool operator!=( const solist_iterator& i, const solist_iterator& j ) { - return i.my_node_ptr != j.my_node_ptr || i.my_list_ptr != j.my_list_ptr; -} - -// Forward type and class definitions -typedef size_t sokey_t; - -// Forward list in which elements are sorted in a split-order -template -class split_ordered_list -{ -public: - typedef split_ordered_list self_type; - typedef typename Allocator::template rebind::other allocator_type; - struct node; - typedef node *nodeptr_t; - - typedef typename allocator_type::size_type size_type; - typedef typename allocator_type::difference_type difference_type; - typedef typename allocator_type::pointer pointer; - typedef typename allocator_type::const_pointer const_pointer; - typedef typename allocator_type::reference reference; - typedef typename allocator_type::const_reference const_reference; - typedef typename allocator_type::value_type value_type; - - typedef solist_iterator const_iterator; - typedef solist_iterator iterator; - typedef flist_iterator raw_const_iterator; - typedef flist_iterator raw_iterator; - - // Node that holds the element in a split-ordered list - struct node : tbb::internal::no_assign - { - // Initialize the node with the given order key - void init(sokey_t order_key) { - my_order_key = order_key; - my_next = NULL; - } - - // Return the order key (needed for hashing) - sokey_t get_order_key() const { // TODO: remove - return my_order_key; - } - - // Inserts the new element in the list in an atomic fashion - nodeptr_t atomic_set_next(nodeptr_t new_node, nodeptr_t current_node) - { - // Try to change the next pointer on the current element to a new element, only if it still points to the cached next - nodeptr_t exchange_node = (nodeptr_t) __TBB_CompareAndSwapW((void *) &my_next, (uintptr_t)new_node, (uintptr_t)current_node); - - if (exchange_node == current_node) // TODO: why this branch? - { - // Operation succeeded, return the new node - return new_node; - } - else - { - // Operation failed, return the "interfering" node - return exchange_node; - } - } - - // Checks if this element in the list is a dummy, order enforcing node. Dummy nodes are used by buckets - // in the hash table to quickly index into the right subsection of the split-ordered list. - bool is_dummy() const { - return (my_order_key & 0x1) == 0; - } - - - nodeptr_t my_next; // Next element in the list - value_type my_element; // Element storage - sokey_t my_order_key; // Order key for this element - }; - - // Allocate a new node with the given order key and value - nodeptr_t create_node(sokey_t order_key, const T &value) { - nodeptr_t pnode = my_node_allocator.allocate(1); - - __TBB_TRY { - new(static_cast(&pnode->my_element)) T(value); - pnode->init(order_key); - } __TBB_CATCH(...) { - my_node_allocator.deallocate(pnode, 1); - __TBB_RETHROW(); - } - - return (pnode); - } - - // Allocate a new node with the given order key; used to allocate dummy nodes - nodeptr_t create_node(sokey_t order_key) { - nodeptr_t pnode = my_node_allocator.allocate(1); - - __TBB_TRY { - new(static_cast(&pnode->my_element)) T(); - pnode->init(order_key); - } __TBB_CATCH(...) { - my_node_allocator.deallocate(pnode, 1); - __TBB_RETHROW(); - } - - return (pnode); - } - - split_ordered_list(allocator_type a = allocator_type()) - : my_node_allocator(a), my_element_count(0) - { - // Immediately allocate a dummy node with order key of 0. This node - // will always be the head of the list. - my_head = create_node(0); - } - - ~split_ordered_list() - { - // Clear the list - clear(); - - // Remove the head element which is not cleared by clear() - nodeptr_t pnode = my_head; - my_head = NULL; - - __TBB_ASSERT(pnode != NULL && pnode->my_next == NULL, "Invalid head list node"); - - destroy_node(pnode); - } - - // Common forward list functions - - allocator_type get_allocator() const { - return (my_node_allocator); - } - - void clear() { - nodeptr_t pnext; - nodeptr_t pnode = my_head; - - __TBB_ASSERT(my_head != NULL, "Invalid head list node"); - pnext = pnode->my_next; - pnode->my_next = NULL; - pnode = pnext; - - while (pnode != NULL) - { - pnext = pnode->my_next; - destroy_node(pnode); - pnode = pnext; - } - - my_element_count = 0; - } - - // Returns a first non-dummy element in the SOL - iterator begin() { - return first_real_iterator(raw_begin()); - } - - // Returns a first non-dummy element in the SOL - const_iterator begin() const { - return first_real_iterator(raw_begin()); - } - - iterator end() { - return (iterator(0, this)); - } - - const_iterator end() const { - return (const_iterator(0, this)); - } - - const_iterator cbegin() const { - return (((const self_type *)this)->begin()); - } - - const_iterator cend() const { - return (((const self_type *)this)->end()); - } - - // Checks if the number of elements (non-dummy) is 0 - bool empty() const { - return (my_element_count == 0); - } - - // Returns the number of non-dummy elements in the list - size_type size() const { - return my_element_count; - } - - // Returns the maximum size of the list, determined by the allocator - size_type max_size() const { - return my_node_allocator.max_size(); - } - - // Swaps 'this' list with the passed in one - void swap(self_type& other) - { - if (this == &other) - { - // Nothing to do - return; - } - - std::swap(my_element_count, other.my_element_count); - std::swap(my_head, other.my_head); - } - - // Split-order list functions - - // Returns a first element in the SOL, which is always a dummy - raw_iterator raw_begin() { - return raw_iterator(my_head); - } - - // Returns a first element in the SOL, which is always a dummy - raw_const_iterator raw_begin() const { - return raw_const_iterator(my_head); - } - - raw_iterator raw_end() { - return raw_iterator(0); - } - - raw_const_iterator raw_end() const { - return raw_const_iterator(0); - } - - static sokey_t get_order_key(const raw_const_iterator& it) { - return it.get_node_ptr()->get_order_key(); - } - - static sokey_t get_safe_order_key(const raw_const_iterator& it) { - if( !it.get_node_ptr() ) return sokey_t(~0U); - return it.get_node_ptr()->get_order_key(); - } - - // Returns a public iterator version of the internal iterator. Public iterator must not - // be a dummy private iterator. - iterator get_iterator(raw_iterator it) { - __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)"); - return iterator(it.get_node_ptr(), this); - } - - // Returns a public iterator version of the internal iterator. Public iterator must not - // be a dummy private iterator. - const_iterator get_iterator(raw_const_iterator it) const { - __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)"); - return const_iterator(it.get_node_ptr(), this); - } - - // Returns a non-const version of the raw_iterator - raw_iterator get_iterator(raw_const_iterator it) { - return raw_iterator(it.get_node_ptr()); - } - - // Returns a non-const version of the iterator - static iterator get_iterator(const_iterator it) { - return iterator(it.my_node_ptr, it.my_list_ptr); - } - - // Returns a public iterator version of a first non-dummy internal iterator at or after - // the passed in internal iterator. - iterator first_real_iterator(raw_iterator it) - { - // Skip all dummy, internal only iterators - while (it != raw_end() && it.get_node_ptr()->is_dummy()) - ++it; - - return iterator(it.get_node_ptr(), this); - } - - // Returns a public iterator version of a first non-dummy internal iterator at or after - // the passed in internal iterator. - const_iterator first_real_iterator(raw_const_iterator it) const - { - // Skip all dummy, internal only iterators - while (it != raw_end() && it.get_node_ptr()->is_dummy()) - ++it; - - return const_iterator(it.get_node_ptr(), this); - } - - // Erase an element using the allocator - void destroy_node(nodeptr_t pnode) { - my_node_allocator.destroy(pnode); - my_node_allocator.deallocate(pnode, 1); - } - - // Try to insert a new element in the list. If insert fails, return the node that - // was inserted instead. - nodeptr_t try_insert(nodeptr_t previous, nodeptr_t new_node, nodeptr_t current_node) { - new_node->my_next = current_node; - return previous->atomic_set_next(new_node, current_node); - } - - // Insert a new element between passed in iterators - std::pair try_insert(raw_iterator it, raw_iterator next, const value_type &value, sokey_t order_key, size_type *new_count) - { - nodeptr_t pnode = create_node(order_key, value); - nodeptr_t inserted_node = try_insert(it.get_node_ptr(), pnode, next.get_node_ptr()); - - if (inserted_node == pnode) - { - // If the insert succeeded, check that the order is correct and increment the element count - check_range(); - *new_count = __TBB_FetchAndAddW((uintptr_t*)&my_element_count, uintptr_t(1)); - return std::pair(iterator(pnode, this), true); - } - else - { - // If the insert failed (element already there), then delete the new one - destroy_node(pnode); - return std::pair(end(), false); - } - } - - // Insert a new dummy element, starting search at a parent dummy element - raw_iterator insert_dummy(raw_iterator it, sokey_t order_key) - { - raw_iterator last = raw_end(); - raw_iterator where = it; - - __TBB_ASSERT(where != last, "Invalid head node"); - - ++where; - - // Create a dummy element up front, even though it may be discarded (due to concurrent insertion) - nodeptr_t dummy_node = create_node(order_key); - - for (;;) - { - __TBB_ASSERT(it != last, "Invalid head list node"); - - // If the head iterator is at the end of the list, or past the point where this dummy - // node needs to be inserted, then try to insert it. - if (where == last || get_order_key(where) > order_key) - { - __TBB_ASSERT(get_order_key(it) < order_key, "Invalid node order in the list"); - - // Try to insert it in the right place - nodeptr_t inserted_node = try_insert(it.get_node_ptr(), dummy_node, where.get_node_ptr()); - - if (inserted_node == dummy_node) - { - // Insertion succeeded, check the list for order violations - check_range(); - return raw_iterator(dummy_node); - } - else - { - // Insertion failed: either dummy node was inserted by another thread, or - // a real element was inserted at exactly the same place as dummy node. - // Proceed with the search from the previous location where order key was - // known to be larger (note: this is legal only because there is no safe - // concurrent erase operation supported). - where = it; - ++where; - continue; - } - } - else if (get_order_key(where) == order_key) - { - // Another dummy node with the same value found, discard the new one. - destroy_node(dummy_node); - return where; - } - - // Move the iterator forward - it = where; - ++where; - } - - } - - // This erase function can handle both real and dummy nodes - void erase_node(raw_iterator previous, raw_const_iterator& where) - { - nodeptr_t pnode = (where++).get_node_ptr(); - nodeptr_t prevnode = previous.get_node_ptr(); - __TBB_ASSERT(prevnode->my_next == pnode, "Erase must take consecutive iterators"); - prevnode->my_next = pnode->my_next; - - destroy_node(pnode); - } - - // Erase the element (previous node needs to be passed because this is a forward only list) - iterator erase_node(raw_iterator previous, const_iterator where) - { - raw_const_iterator it = where; - erase_node(previous, it); - my_element_count--; - - return get_iterator(first_real_iterator(it)); - } - - // Move all elements from the passed in split-ordered list to this one - void move_all(self_type& source) - { - raw_const_iterator first = source.raw_begin(); - raw_const_iterator last = source.raw_end(); - - if (first == last) - return; - - nodeptr_t previous_node = my_head; - raw_const_iterator begin_iterator = first++; - - // Move all elements one by one, including dummy ones - for (raw_const_iterator it = first; it != last;) - { - nodeptr_t pnode = it.get_node_ptr(); - - nodeptr_t dummy_node = pnode->is_dummy() ? create_node(pnode->get_order_key()) : create_node(pnode->get_order_key(), pnode->my_element); - previous_node = try_insert(previous_node, dummy_node, NULL); - __TBB_ASSERT(previous_node != NULL, "Insertion must succeed"); - raw_const_iterator where = it++; - source.erase_node(get_iterator(begin_iterator), where); - } - check_range(); - } - - -private: - - // Check the list for order violations - void check_range() - { -#if TBB_USE_ASSERT - for (raw_iterator it = raw_begin(); it != raw_end(); ++it) - { - raw_iterator next_iterator = it; - ++next_iterator; - - __TBB_ASSERT(next_iterator == end() || next_iterator.get_node_ptr()->get_order_key() >= it.get_node_ptr()->get_order_key(), "!!! List order inconsistency !!!"); - } -#endif - } - - typename allocator_type::template rebind::other my_node_allocator; // allocator object for nodes - size_type my_element_count; // Total item count, not counting dummy nodes - nodeptr_t my_head; // pointer to head node -}; - -// Template class for hash compare -template -class hash_compare -{ -public: - hash_compare() {} - - hash_compare(Hasher a_hasher) : my_hash_object(a_hasher) {} - - hash_compare(Hasher a_hasher, Key_equality a_keyeq) : my_hash_object(a_hasher), my_key_compare_object(a_keyeq) {} - - size_t operator()(const Key& key) const { - return ((size_t)my_hash_object(key)); - } - - bool operator()(const Key& key1, const Key& key2) const { - return (!my_key_compare_object(key1, key2)); - } - - Hasher my_hash_object; // The hash object - Key_equality my_key_compare_object; // The equality comparator object -}; - -#if _MSC_VER -#pragma warning(push) -#pragma warning(disable: 4127) // warning 4127 -- while (true) has a constant expression in it (for allow_multimapping) -#endif - -template -class concurrent_unordered_base : public Traits -{ -protected: - // Type definitions - typedef concurrent_unordered_base self_type; - typedef typename Traits::value_type value_type; - typedef typename Traits::key_type key_type; - typedef typename Traits::hash_compare hash_compare; - typedef typename Traits::value_compare value_compare; - typedef typename Traits::allocator_type allocator_type; - typedef typename allocator_type::pointer pointer; - typedef typename allocator_type::const_pointer const_pointer; - typedef typename allocator_type::reference reference; - typedef typename allocator_type::const_reference const_reference; - typedef typename allocator_type::size_type size_type; - typedef typename allocator_type::difference_type difference_type; - typedef split_ordered_list solist_t; - typedef typename solist_t::nodeptr_t nodeptr_t; - // Iterators that walk the entire split-order list, including dummy nodes - typedef typename solist_t::raw_iterator raw_iterator; - typedef typename solist_t::raw_const_iterator raw_const_iterator; - typedef typename solist_t::iterator iterator; // TODO: restore const iterator for unordered_sets - typedef typename solist_t::const_iterator const_iterator; - typedef iterator local_iterator; - typedef const_iterator const_local_iterator; - using Traits::my_hash_compare; - using Traits::get_key; - using Traits::allow_multimapping; - -private: - typedef std::pair pairii_t; - typedef std::pair paircc_t; - - static size_type const pointers_per_table = sizeof(size_type) * 8; // One bucket segment per bit - static const size_type initial_bucket_number = 8; // Initial number of buckets - static const size_type initial_bucket_load = 4; // Initial maximum number of elements per bucket - -protected: - // Constructors/Destructors - concurrent_unordered_base(size_type n_of_buckets = initial_bucket_number, - const hash_compare& hc = hash_compare(), const allocator_type& a = allocator_type()) - : Traits(hc), my_number_of_buckets(n_of_buckets), my_solist(a), - my_allocator(a), my_maximum_bucket_size((float) initial_bucket_load) - { - internal_init(); - } - - concurrent_unordered_base(const concurrent_unordered_base& right, const allocator_type& a) - : Traits(right.my_hash_compare), my_solist(a), my_allocator(a) - { - internal_copy(right); - } - - concurrent_unordered_base(const concurrent_unordered_base& right) - : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator()) - { - internal_init(); - internal_copy(right); - } - - concurrent_unordered_base& operator=(const concurrent_unordered_base& right) { - if (this != &right) - internal_copy(right); - return (*this); - } - - ~concurrent_unordered_base() { - // Delete all node segments - internal_clear(); - } - -public: - allocator_type get_allocator() const { - return my_solist.get_allocator(); - } - - // Size and capacity function - bool empty() const { - return my_solist.empty(); - } - - size_type size() const { - return my_solist.size(); - } - - size_type max_size() const { - return my_solist.max_size(); - } - - // Iterators - iterator begin() { - return my_solist.begin(); - } - - const_iterator begin() const { - return my_solist.begin(); - } - - iterator end() { - return my_solist.end(); - } - - const_iterator end() const { - return my_solist.end(); - } - - const_iterator cbegin() const { - return my_solist.cbegin(); - } - - const_iterator cend() const { - return my_solist.cend(); - } - - // Parallel traversal support - class const_range_type : tbb::internal::no_assign { - const concurrent_unordered_base &my_table; - raw_const_iterator my_begin_node; - raw_const_iterator my_end_node; - mutable raw_const_iterator my_midpoint_node; - public: - //! Type for size of a range - typedef typename concurrent_unordered_base::size_type size_type; - typedef typename concurrent_unordered_base::value_type value_type; - typedef typename concurrent_unordered_base::reference reference; - typedef typename concurrent_unordered_base::difference_type difference_type; - typedef typename concurrent_unordered_base::const_iterator iterator; - - //! True if range is empty. - bool empty() const {return my_begin_node == my_end_node;} - - //! True if range can be partitioned into two subranges. - bool is_divisible() const { - return my_midpoint_node != my_end_node; - } - //! Split range. - const_range_type( const_range_type &r, split ) : - my_table(r.my_table), my_end_node(r.my_end_node) - { - r.my_end_node = my_begin_node = r.my_midpoint_node; - __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" ); - __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" ); - set_midpoint(); - r.set_midpoint(); - } - //! Init range with container and grainsize specified - const_range_type( const concurrent_unordered_base &a_table ) : - my_table(a_table), my_begin_node(a_table.my_solist.begin()), - my_end_node(a_table.my_solist.end()) - { - set_midpoint(); - } - iterator begin() const { return my_table.my_solist.get_iterator(my_begin_node); } - iterator end() const { return my_table.my_solist.get_iterator(my_end_node); } - //! The grain size for this range. - size_type grainsize() const { return 1; } - - //! Set my_midpoint_node to point approximately half way between my_begin_node and my_end_node. - void set_midpoint() const { - if( my_begin_node == my_end_node ) // not divisible - my_midpoint_node = my_end_node; - else { - sokey_t begin_key = solist_t::get_safe_order_key(my_begin_node); - sokey_t end_key = solist_t::get_safe_order_key(my_end_node); - size_t mid_bucket = __TBB_ReverseBits( begin_key + (end_key-begin_key)/2 ) % my_table.my_number_of_buckets; - while ( !my_table.is_initialized(mid_bucket) ) mid_bucket = my_table.get_parent(mid_bucket); - my_midpoint_node = my_table.my_solist.first_real_iterator(my_table.get_bucket( mid_bucket )); - if( my_midpoint_node == my_begin_node ) - my_midpoint_node = my_end_node; -#if TBB_USE_ASSERT - else { - sokey_t mid_key = solist_t::get_safe_order_key(my_midpoint_node); - __TBB_ASSERT( begin_key < mid_key, "my_begin_node is after my_midpoint_node" ); - __TBB_ASSERT( mid_key <= end_key, "my_midpoint_node is after my_end_node" ); - } -#endif // TBB_USE_ASSERT - } - } - }; - - class range_type : public const_range_type { - public: - typedef typename concurrent_unordered_base::iterator iterator; - //! Split range. - range_type( range_type &r, split ) : const_range_type( r, split() ) {} - //! Init range with container and grainsize specified - range_type( const concurrent_unordered_base &a_table ) : const_range_type(a_table) {} - - iterator begin() const { return solist_t::get_iterator( const_range_type::begin() ); } - iterator end() const { return solist_t::get_iterator( const_range_type::end() ); } - }; - - range_type range() { - return range_type( *this ); - } - - const_range_type range() const { - return const_range_type( *this ); - } - - // Modifiers - std::pair insert(const value_type& value) { - return internal_insert(value); - } - - iterator insert(const_iterator, const value_type& value) { - // Ignore hint - return insert(value).first; - } - - template - void insert(Iterator first, Iterator last) { - for (Iterator it = first; it != last; ++it) - insert(*it); - } - - iterator unsafe_erase(const_iterator where) { - return internal_erase(where); - } - - iterator unsafe_erase(const_iterator first, const_iterator last) { - while (first != last) - unsafe_erase(first++); - return my_solist.get_iterator(first); - } - - size_type unsafe_erase(const key_type& key) { - pairii_t where = equal_range(key); - size_type item_count = internal_distance(where.first, where.second); - unsafe_erase(where.first, where.second); - return item_count; - } - - void swap(concurrent_unordered_base& right) { - if (this != &right) { - std::swap(my_hash_compare, right.my_hash_compare); // TODO: check what ADL meant here - my_solist.swap(right.my_solist); - internal_swap_buckets(right); - std::swap(my_number_of_buckets, right.my_number_of_buckets); - std::swap(my_maximum_bucket_size, right.my_maximum_bucket_size); - } - } - - // Observers - void clear() { - // Clear list - my_solist.clear(); - - // Clear buckets - internal_clear(); - } - - // Lookup - iterator find(const key_type& key) { - return internal_find(key); - } - - const_iterator find(const key_type& key) const { - return const_cast(this)->internal_find(key); - } - - size_type count(const key_type& key) const { - paircc_t answer = equal_range(key); - size_type item_count = internal_distance(answer.first, answer.second); - return item_count; - } - - std::pair equal_range(const key_type& key) { - return internal_equal_range(key); - } - - std::pair equal_range(const key_type& key) const { - return internal_equal_range(key); - } - - // Bucket interface - for debugging - size_type unsafe_bucket_count() const { - return my_number_of_buckets; - } - - size_type unsafe_max_bucket_count() const { - return segment_size(pointers_per_table-1); - } - - size_type unsafe_bucket_size(size_type bucket) { - size_type item_count = 0; - if (is_initialized(bucket)) { - raw_iterator it = get_bucket(bucket); - ++it; - for (; it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy(); ++it) - ++item_count; - } - return item_count; - } - - size_type unsafe_bucket(const key_type& key) const { - sokey_t order_key = (sokey_t) my_hash_compare(key); - size_type bucket = order_key % my_number_of_buckets; - return bucket; - } - - // If the bucket is initialized, return a first non-dummy element in it - local_iterator unsafe_begin(size_type bucket) { - if (!is_initialized(bucket)) - return end(); - - raw_iterator it = get_bucket(bucket); - return my_solist.first_real_iterator(it); - } - - // If the bucket is initialized, return a first non-dummy element in it - const_local_iterator unsafe_begin(size_type bucket) const - { - if (!is_initialized(bucket)) - return end(); - - raw_const_iterator it = get_bucket(bucket); - return my_solist.first_real_iterator(it); - } - - // @REVIEW: Takes O(n) - // Returns the iterator after the last non-dummy element in the bucket - local_iterator unsafe_end(size_type bucket) - { - if (!is_initialized(bucket)) - return end(); - - raw_iterator it = get_bucket(bucket); - - // Find the end of the bucket, denoted by the dummy element - do ++it; - while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy()); - - // Return the first real element past the end of the bucket - return my_solist.first_real_iterator(it); - } - - // @REVIEW: Takes O(n) - // Returns the iterator after the last non-dummy element in the bucket - const_local_iterator unsafe_end(size_type bucket) const - { - if (!is_initialized(bucket)) - return end(); - - raw_const_iterator it = get_bucket(bucket); - - // Find the end of the bucket, denoted by the dummy element - do ++it; - while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy()); - - // Return the first real element past the end of the bucket - return my_solist.first_real_iterator(it); - } - - const_local_iterator unsafe_cbegin(size_type bucket) const { - return ((const self_type *) this)->begin(); - } - - const_local_iterator unsafe_cend(size_type bucket) const { - return ((const self_type *) this)->end(); - } - - // Hash policy - float load_factor() const { - return (float) size() / (float) unsafe_bucket_count(); - } - - float max_load_factor() const { - return my_maximum_bucket_size; - } - - void max_load_factor(float newmax) { - if (newmax != newmax || newmax < 0) - tbb::internal::throw_exception(tbb::internal::eid_invalid_load_factor); - my_maximum_bucket_size = newmax; - } - - // This function is a noop, because the underlying split-ordered list - // is already sorted, so an increase in the bucket number will be - // reflected next time this bucket is touched. - void rehash(size_type buckets) { - size_type current_buckets = my_number_of_buckets; - - if (current_buckets > buckets) - return; - else if ( (buckets & (buckets-1)) != 0 ) - tbb::internal::throw_exception(tbb::internal::eid_invalid_buckets_number); - my_number_of_buckets = buckets; - } - -private: - - // Initialize the hash and keep the first bucket open - void internal_init() { - // Allocate an array of segment pointers - memset(my_buckets, 0, pointers_per_table * sizeof(void *)); - - // Insert the first element in the split-ordered list - raw_iterator dummy_node = my_solist.raw_begin(); - set_bucket(0, dummy_node); - } - - void internal_clear() { - for (size_type index = 0; index < pointers_per_table; ++index) { - if (my_buckets[index] != NULL) { - size_type sz = segment_size(index); - for (size_type index2 = 0; index2 < sz; ++index2) - my_allocator.destroy(&my_buckets[index][index2]); - my_allocator.deallocate(my_buckets[index], sz); - my_buckets[index] = 0; - } - } - } - - void internal_copy(const self_type& right) { - clear(); - - my_maximum_bucket_size = right.my_maximum_bucket_size; - my_number_of_buckets = right.my_number_of_buckets; - - __TBB_TRY { - insert(right.begin(), right.end()); - my_hash_compare = right.my_hash_compare; - } __TBB_CATCH(...) { - my_solist.clear(); - __TBB_RETHROW(); - } - } - - void internal_swap_buckets(concurrent_unordered_base& right) - { - // Swap all node segments - for (size_type index = 0; index < pointers_per_table; ++index) - { - raw_iterator * iterator_pointer = my_buckets[index]; - my_buckets[index] = right.my_buckets[index]; - right.my_buckets[index] = iterator_pointer; - } - } - - // Hash APIs - size_type internal_distance(const_iterator first, const_iterator last) const - { - size_type num = 0; - - for (const_iterator it = first; it != last; ++it) - ++num; - - return num; - } - - // Insert an element in the hash given its value - std::pair internal_insert(const value_type& value) - { - sokey_t order_key = (sokey_t) my_hash_compare(get_key(value)); - size_type bucket = order_key % my_number_of_buckets; - - // If bucket is empty, initialize it first - if (!is_initialized(bucket)) - init_bucket(bucket); - - size_type new_count; - order_key = split_order_key_regular(order_key); - raw_iterator it = get_bucket(bucket); - raw_iterator last = my_solist.raw_end(); - raw_iterator where = it; - - __TBB_ASSERT(where != last, "Invalid head node"); - - // First node is a dummy node - ++where; - - for (;;) - { - if (where == last || solist_t::get_order_key(where) > order_key) - { - // Try to insert it in the right place - std::pair result = my_solist.try_insert(it, where, value, order_key, &new_count); - - if (result.second) - { - // Insertion succeeded, adjust the table size, if needed - adjust_table_size(new_count, my_number_of_buckets); - return result; - } - else - { - // Insertion failed: either the same node was inserted by another thread, or - // another element was inserted at exactly the same place as this node. - // Proceed with the search from the previous location where order key was - // known to be larger (note: this is legal only because there is no safe - // concurrent erase operation supported). - where = it; - ++where; - continue; - } - } - else if (!allow_multimapping && solist_t::get_order_key(where) == order_key && my_hash_compare(get_key(*where), get_key(value)) == 0) - { - // Element already in the list, return it - return std::pair(my_solist.get_iterator(where), false); - } - - // Move the iterator forward - it = where; - ++where; - } - } - - // Find the element in the split-ordered list - iterator internal_find(const key_type& key) - { - sokey_t order_key = (sokey_t) my_hash_compare(key); - size_type bucket = order_key % my_number_of_buckets; - - // If bucket is empty, initialize it first - if (!is_initialized(bucket)) - init_bucket(bucket); - - order_key = split_order_key_regular(order_key); - raw_iterator last = my_solist.raw_end(); - - for (raw_iterator it = get_bucket(bucket); it != last; ++it) - { - if (solist_t::get_order_key(it) > order_key) - { - // If the order key is smaller than the current order key, the element - // is not in the hash. - return end(); - } - else if (solist_t::get_order_key(it) == order_key) - { - // The fact that order keys match does not mean that the element is found. - // Key function comparison has to be performed to check whether this is the - // right element. If not, keep searching while order key is the same. - if (!my_hash_compare(get_key(*it), key)) - return my_solist.get_iterator(it); - } - } - - return end(); - } - - // Erase an element from the list. This is not a concurrency safe function. - iterator internal_erase(const_iterator it) - { - key_type key = get_key(*it); - sokey_t order_key = (sokey_t) my_hash_compare(key); - size_type bucket = order_key % my_number_of_buckets; - - // If bucket is empty, initialize it first - if (!is_initialized(bucket)) - init_bucket(bucket); - - order_key = split_order_key_regular(order_key); - - raw_iterator previous = get_bucket(bucket); - raw_iterator last = my_solist.raw_end(); - raw_iterator where = previous; - - __TBB_ASSERT(where != last, "Invalid head node"); - - // First node is a dummy node - ++where; - - for (;;) { - if (where == last) - return end(); - else if (my_solist.get_iterator(where) == it) - return my_solist.erase_node(previous, it); - - // Move the iterator forward - previous = where; - ++where; - } - } - - // Return the [begin, end) pair of iterators with the same key values. - // This operation makes sense only if mapping is many-to-one. - pairii_t internal_equal_range(const key_type& key) - { - sokey_t order_key = (sokey_t) my_hash_compare(key); - size_type bucket = order_key % my_number_of_buckets; - - // If bucket is empty, initialize it first - if (!is_initialized(bucket)) - init_bucket(bucket); - - order_key = split_order_key_regular(order_key); - raw_iterator end_it = my_solist.raw_end(); - - for (raw_iterator it = get_bucket(bucket); it != end_it; ++it) - { - if (solist_t::get_order_key(it) > order_key) - { - // There is no element with the given key - return pairii_t(end(), end()); - } - else if (solist_t::get_order_key(it) == order_key && !my_hash_compare(get_key(*it), key)) - { - iterator first = my_solist.get_iterator(it); - iterator last = first; - - while( last != end() && !my_hash_compare(get_key(*last), key) ) - ++last; - return pairii_t(first, last); - } - } - - return pairii_t(end(), end()); - } - - // Return the [begin, end) pair of const iterators with the same key values. - // This operation makes sense only if mapping is many-to-one. - paircc_t internal_equal_range(const key_type& key) const - { - sokey_t order_key = (sokey_t) my_hash_compare(key); - size_type bucket = order_key % my_number_of_buckets; - - // If bucket is empty, initialize it first - if (!is_initialized(bucket)) - return paircc_t(end(), end()); - - order_key = split_order_key_regular(order_key); - raw_const_iterator end_it = my_solist.raw_end(); - - for (raw_const_iterator it = get_bucket(bucket); it != end_it; ++it) - { - if (solist_t::get_order_key(it) > order_key) - { - // There is no element with the given key - return paircc_t(end(), end()); - } - else if (solist_t::get_order_key(it) == order_key && !my_hash_compare(get_key(*it), key)) - { - const_iterator first = my_solist.get_iterator(it); - const_iterator last = first; - - while( last != end() && !my_hash_compare(get_key(*last), key ) ) - ++last; - return paircc_t(first, last); - } - } - - return paircc_t(end(), end()); - } - - // Bucket APIs - void init_bucket(size_type bucket) - { - // Bucket 0 has no parent. Initialize it and return. - if (bucket == 0) { - internal_init(); - return; - } - - size_type parent_bucket = get_parent(bucket); - - // All parent_bucket buckets have to be initialized before this bucket is - if (!is_initialized(parent_bucket)) - init_bucket(parent_bucket); - - raw_iterator parent = get_bucket(parent_bucket); - - // Create a dummy first node in this bucket - raw_iterator dummy_node = my_solist.insert_dummy(parent, split_order_key_dummy(bucket)); - set_bucket(bucket, dummy_node); - } - - void adjust_table_size(size_type total_elements, size_type current_size) - { - // Grow the table by a factor of 2 if possible and needed - if ( ((float) total_elements / (float) current_size) > my_maximum_bucket_size ) - { - // Double the size of the hash only if size has not changed inbetween loads - __TBB_CompareAndSwapW((uintptr_t*)&my_number_of_buckets, 2 * current_size, current_size ); - } - } - - size_type get_parent(size_type bucket) const - { - // Unsets bucket's most significant turned-on bit - size_type msb = __TBB_Log2((uintptr_t)bucket); - return bucket & ~(size_type(1) << msb); - } - - - // Dynamic sized array (segments) - //! @return segment index of given index in the array - static size_type segment_index_of( size_type index ) { - return size_type( __TBB_Log2( index|1 ) ); - } - - //! @return the first array index of given segment - static size_type segment_base( size_type k ) { - return (size_type(1)<::other my_allocator; // Allocator object for segments - float my_maximum_bucket_size; // Maximum size of the bucket - raw_iterator *my_buckets[pointers_per_table]; // The segment table -}; -#if _MSC_VER -#pragma warning(pop) // warning 4127 -- while (true) has a constant expression in it -#endif - -//! Hash multiplier -static const size_t hash_multiplier = sizeof(size_t)==4? 2654435769U : 11400714819323198485ULL; -} // namespace internal -//! @endcond -//! Hasher functions -template -inline size_t tbb_hasher( const T& t ) { - return static_cast( t ) * internal::hash_multiplier; -} -template -inline size_t tbb_hasher( P* ptr ) { - size_t const h = reinterpret_cast( ptr ); - return (h >> 3) ^ h; -} -template -inline size_t tbb_hasher( const std::basic_string& s ) { - size_t h = 0; - for( const E* c = s.c_str(); *c; ++c ) - h = static_cast(*c) ^ (h * internal::hash_multiplier); - return h; -} -template -inline size_t tbb_hasher( const std::pair& p ) { - return tbb_hasher(p.first) ^ tbb_hasher(p.second); -} -} // namespace interface5 -using interface5::tbb_hasher; -} // namespace tbb -#endif// __TBB_concurrent_unordered_internal_H diff --git a/tbb30_20100406oss/include/tbb/_tbb_windef.h b/tbb30_20100406oss/include/tbb/_tbb_windef.h deleted file mode 100644 index 7ca10696b..000000000 --- a/tbb30_20100406oss/include/tbb/_tbb_windef.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_tbb_windef_H -#error Do not #include this file directly. Use "#include tbb/tbb_stddef.h" instead. -#endif /* __TBB_tbb_windef_H */ - -// Check that the target Windows version has all API calls requried for TBB. -// Do not increase the version in condition beyond 0x0500 without prior discussion! -#if defined(_WIN32_WINNT) && _WIN32_WINNT<0x0400 -#error TBB is unable to run on old Windows versions; _WIN32_WINNT must be 0x0400 or greater. -#endif - -#if !defined(_MT) -#error TBB requires linkage with multithreaded C/C++ runtime library. \ - Choose multithreaded DLL runtime in project settings, or use /MD[d] compiler switch. -#endif - -// Workaround for the problem with MVSC headers failing to define namespace std -namespace std { - using ::size_t; using ::ptrdiff_t; -} - -#define __TBB_STRING_AUX(x) #x -#define __TBB_STRING(x) __TBB_STRING_AUX(x) - -// Default setting of TBB_USE_DEBUG -#ifdef TBB_USE_DEBUG -# if TBB_USE_DEBUG -# if !defined(_DEBUG) -# pragma message(__FILE__ "(" __TBB_STRING(__LINE__) ") : Warning: Recommend using /MDd if compiling with TBB_USE_DEBUG!=0") -# endif -# else -# if defined(_DEBUG) -# pragma message(__FILE__ "(" __TBB_STRING(__LINE__) ") : Warning: Recommend using /MD if compiling with TBB_USE_DEBUG==0") -# endif -# endif -#else -# ifdef _DEBUG -# define TBB_USE_DEBUG 1 -# endif -#endif - -#if __TBB_BUILD && !defined(__TBB_NO_IMPLICIT_LINKAGE) -#define __TBB_NO_IMPLICIT_LINKAGE 1 -#endif - -#if _MSC_VER - #if !__TBB_NO_IMPLICIT_LINKAGE - #ifdef _DEBUG - #pragma comment(lib, "tbb_debug.lib") - #else - #pragma comment(lib, "tbb.lib") - #endif - #endif -#endif diff --git a/tbb30_20100406oss/include/tbb/aligned_space.h b/tbb30_20100406oss/include/tbb/aligned_space.h deleted file mode 100644 index c4b58e594..000000000 --- a/tbb30_20100406oss/include/tbb/aligned_space.h +++ /dev/null @@ -1,55 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_aligned_space_H -#define __TBB_aligned_space_H - -#include "tbb_stddef.h" -#include "tbb_machine.h" - -namespace tbb { - -//! Block of space aligned sufficiently to construct an array T with N elements. -/** The elements are not constructed or destroyed by this class. - @ingroup memory_allocation */ -template -class aligned_space { -private: - typedef __TBB_TypeWithAlignmentAtLeastAsStrict(T) element_type; - element_type array[(sizeof(T)*N+sizeof(element_type)-1)/sizeof(element_type)]; -public: - //! Pointer to beginning of array - T* begin() {return reinterpret_cast(this);} - - //! Pointer to one past last element in array. - T* end() {return begin()+N;} -}; - -} // namespace tbb - -#endif /* __TBB_aligned_space_H */ diff --git a/tbb30_20100406oss/include/tbb/atomic.h b/tbb30_20100406oss/include/tbb/atomic.h deleted file mode 100644 index dd2cdccbe..000000000 --- a/tbb30_20100406oss/include/tbb/atomic.h +++ /dev/null @@ -1,397 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_atomic_H -#define __TBB_atomic_H - -#include -#include "tbb_stddef.h" - -#if _MSC_VER -#define __TBB_LONG_LONG __int64 -#else -#define __TBB_LONG_LONG long long -#endif /* _MSC_VER */ - -#include "tbb_machine.h" - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - // Workaround for overzealous compiler warnings - #pragma warning (push) - #pragma warning (disable: 4244 4267) -#endif - -namespace tbb { - -//! Specifies memory fencing. -enum memory_semantics { - //! For internal use only. - __TBB_full_fence, - //! Acquire fence - acquire, - //! Release fence - release -}; - -//! @cond INTERNAL -namespace internal { - -#if __GNUC__ || __SUNPRO_CC -#define __TBB_DECL_ATOMIC_FIELD(t,f,a) t f __attribute__ ((aligned(a))); -#elif defined(__INTEL_COMPILER)||_MSC_VER >= 1300 -#define __TBB_DECL_ATOMIC_FIELD(t,f,a) __declspec(align(a)) t f; -#else -#error Do not know syntax for forcing alignment. -#endif /* __GNUC__ */ - -template -struct atomic_rep; // Primary template declared, but never defined. - -template<> -struct atomic_rep<1> { // Specialization - typedef int8_t word; - int8_t value; -}; -template<> -struct atomic_rep<2> { // Specialization - typedef int16_t word; - __TBB_DECL_ATOMIC_FIELD(int16_t,value,2) -}; -template<> -struct atomic_rep<4> { // Specialization -#if _MSC_VER && __TBB_WORDSIZE==4 - // Work-around that avoids spurious /Wp64 warnings - typedef intptr_t word; -#else - typedef int32_t word; -#endif - __TBB_DECL_ATOMIC_FIELD(int32_t,value,4) -}; -template<> -struct atomic_rep<8> { // Specialization - typedef int64_t word; - __TBB_DECL_ATOMIC_FIELD(int64_t,value,8) -}; - -template -struct atomic_traits; // Primary template declared, but not defined. - -#define __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(S,M) \ - template<> struct atomic_traits { \ - typedef atomic_rep::word word; \ - inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) {\ - return __TBB_CompareAndSwap##S##M(location,new_value,comparand); \ - } \ - inline static word fetch_and_add( volatile void* location, word addend ) { \ - return __TBB_FetchAndAdd##S##M(location,addend); \ - } \ - inline static word fetch_and_store( volatile void* location, word value ) {\ - return __TBB_FetchAndStore##S##M(location,value); \ - } \ - }; - -#define __TBB_DECL_ATOMIC_PRIMITIVES(S) \ - template \ - struct atomic_traits { \ - typedef atomic_rep::word word; \ - inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) {\ - return __TBB_CompareAndSwap##S(location,new_value,comparand); \ - } \ - inline static word fetch_and_add( volatile void* location, word addend ) { \ - return __TBB_FetchAndAdd##S(location,addend); \ - } \ - inline static word fetch_and_store( volatile void* location, word value ) {\ - return __TBB_FetchAndStore##S(location,value); \ - } \ - }; - -#if __TBB_DECL_FENCED_ATOMICS -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,__TBB_full_fence) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,__TBB_full_fence) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,__TBB_full_fence) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,__TBB_full_fence) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,acquire) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,acquire) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,acquire) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,acquire) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,release) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,release) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,release) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,release) -#else -__TBB_DECL_ATOMIC_PRIMITIVES(1) -__TBB_DECL_ATOMIC_PRIMITIVES(2) -__TBB_DECL_ATOMIC_PRIMITIVES(4) -__TBB_DECL_ATOMIC_PRIMITIVES(8) -#endif - -//! Additive inverse of 1 for type T. -/** Various compilers issue various warnings if -1 is used with various integer types. - The baroque expression below avoids all the warnings (we hope). */ -#define __TBB_MINUS_ONE(T) (T(T(0)-T(1))) - -//! Base class that provides basic functionality for atomic without fetch_and_add. -/** Works for any type T that has the same size as an integral type, has a trivial constructor/destructor, - and can be copied/compared by memcpy/memcmp. */ -template -struct atomic_impl { -protected: - atomic_rep rep; -private: - //! Union type used to convert type T to underlying integral type. - union converter { - T value; - typename atomic_rep::word bits; - }; -public: - typedef T value_type; - - template - value_type fetch_and_store( value_type value ) { - converter u, w; - u.value = value; - w.bits = internal::atomic_traits::fetch_and_store(&rep.value,u.bits); - return w.value; - } - - value_type fetch_and_store( value_type value ) { - return fetch_and_store<__TBB_full_fence>(value); - } - - template - value_type compare_and_swap( value_type value, value_type comparand ) { - converter u, v, w; - u.value = value; - v.value = comparand; - w.bits = internal::atomic_traits::compare_and_swap(&rep.value,u.bits,v.bits); - return w.value; - } - - value_type compare_and_swap( value_type value, value_type comparand ) { - return compare_and_swap<__TBB_full_fence>(value,comparand); - } - - operator value_type() const volatile { // volatile qualifier here for backwards compatibility - converter w; - w.bits = __TBB_load_with_acquire( rep.value ); - return w.value; - } - -protected: - value_type store_with_release( value_type rhs ) { - converter u; - u.value = rhs; - __TBB_store_with_release(rep.value,u.bits); - return rhs; - } -}; - -//! Base class that provides basic functionality for atomic with fetch_and_add. -/** I is the underlying type. - D is the difference type. - StepType should be char if I is an integral type, and T if I is a T*. */ -template -struct atomic_impl_with_arithmetic: atomic_impl { -public: - typedef I value_type; - - template - value_type fetch_and_add( D addend ) { - return value_type(internal::atomic_traits::fetch_and_add( &this->rep.value, addend*sizeof(StepType) )); - } - - value_type fetch_and_add( D addend ) { - return fetch_and_add<__TBB_full_fence>(addend); - } - - template - value_type fetch_and_increment() { - return fetch_and_add(1); - } - - value_type fetch_and_increment() { - return fetch_and_add(1); - } - - template - value_type fetch_and_decrement() { - return fetch_and_add(__TBB_MINUS_ONE(D)); - } - - value_type fetch_and_decrement() { - return fetch_and_add(__TBB_MINUS_ONE(D)); - } - -public: - value_type operator+=( D addend ) { - return fetch_and_add(addend)+addend; - } - - value_type operator-=( D addend ) { - // Additive inverse of addend computed using binary minus, - // instead of unary minus, for sake of avoiding compiler warnings. - return operator+=(D(0)-addend); - } - - value_type operator++() { - return fetch_and_add(1)+1; - } - - value_type operator--() { - return fetch_and_add(__TBB_MINUS_ONE(D))-1; - } - - value_type operator++(int) { - return fetch_and_add(1); - } - - value_type operator--(int) { - return fetch_and_add(__TBB_MINUS_ONE(D)); - } -}; - -#if __TBB_WORDSIZE == 4 -// Plaforms with 32-bit hardware require special effort for 64-bit loads and stores. -#if defined(__INTEL_COMPILER)||!defined(_MSC_VER)||_MSC_VER>=1400 - -template<> -inline atomic_impl<__TBB_LONG_LONG>::operator atomic_impl<__TBB_LONG_LONG>::value_type() const volatile { - return __TBB_Load8(&rep.value); -} - -template<> -inline atomic_impl::operator atomic_impl::value_type() const volatile { - return __TBB_Load8(&rep.value); -} - -template<> -inline atomic_impl<__TBB_LONG_LONG>::value_type atomic_impl<__TBB_LONG_LONG>::store_with_release( value_type rhs ) { - __TBB_Store8(&rep.value,rhs); - return rhs; -} - -template<> -inline atomic_impl::value_type atomic_impl::store_with_release( value_type rhs ) { - __TBB_Store8(&rep.value,rhs); - return rhs; -} - -#endif /* defined(__INTEL_COMPILER)||!defined(_MSC_VER)||_MSC_VER>=1400 */ -#endif /* __TBB_WORDSIZE==4 */ - -} /* Internal */ -//! @endcond - -//! Primary template for atomic. -/** See the Reference for details. - @ingroup synchronization */ -template -struct atomic: internal::atomic_impl { - T operator=( T rhs ) { - // "this" required here in strict ISO C++ because store_with_release is a dependent name - return this->store_with_release(rhs); - } - atomic& operator=( const atomic& rhs ) {this->store_with_release(rhs); return *this;} -}; - -#define __TBB_DECL_ATOMIC(T) \ - template<> struct atomic: internal::atomic_impl_with_arithmetic { \ - T operator=( T rhs ) {return store_with_release(rhs);} \ - atomic& operator=( const atomic& rhs ) {store_with_release(rhs); return *this;} \ - }; - -#if defined(__INTEL_COMPILER)||!defined(_MSC_VER)||_MSC_VER>=1400 -__TBB_DECL_ATOMIC(__TBB_LONG_LONG) -__TBB_DECL_ATOMIC(unsigned __TBB_LONG_LONG) -#else -// Some old versions of MVSC cannot correctly compile templates with "long long". -#endif /* defined(__INTEL_COMPILER)||!defined(_MSC_VER)||_MSC_VER>=1400 */ - -__TBB_DECL_ATOMIC(long) -__TBB_DECL_ATOMIC(unsigned long) - -#if defined(_MSC_VER) && __TBB_WORDSIZE==4 -/* Special version of __TBB_DECL_ATOMIC that avoids gratuitous warnings from cl /Wp64 option. - It is identical to __TBB_DECL_ATOMIC(unsigned) except that it replaces operator=(T) - with an operator=(U) that explicitly converts the U to a T. Types T and U should be - type synonyms on the platform. Type U should be the wider variant of T from the - perspective of /Wp64. */ -#define __TBB_DECL_ATOMIC_ALT(T,U) \ - template<> struct atomic: internal::atomic_impl_with_arithmetic { \ - T operator=( U rhs ) {return store_with_release(T(rhs));} \ - atomic& operator=( const atomic& rhs ) {store_with_release(rhs); return *this;} \ - }; -__TBB_DECL_ATOMIC_ALT(unsigned,size_t) -__TBB_DECL_ATOMIC_ALT(int,ptrdiff_t) -#else -__TBB_DECL_ATOMIC(unsigned) -__TBB_DECL_ATOMIC(int) -#endif /* defined(_MSC_VER) && __TBB_WORDSIZE==4 */ - -__TBB_DECL_ATOMIC(unsigned short) -__TBB_DECL_ATOMIC(short) -__TBB_DECL_ATOMIC(char) -__TBB_DECL_ATOMIC(signed char) -__TBB_DECL_ATOMIC(unsigned char) - -#if !defined(_MSC_VER)||defined(_NATIVE_WCHAR_T_DEFINED) -__TBB_DECL_ATOMIC(wchar_t) -#endif /* _MSC_VER||!defined(_NATIVE_WCHAR_T_DEFINED) */ - -//! Specialization for atomic with arithmetic and operator->. -template struct atomic: internal::atomic_impl_with_arithmetic { - T* operator=( T* rhs ) { - // "this" required here in strict ISO C++ because store_with_release is a dependent name - return this->store_with_release(rhs); - } - atomic& operator=( const atomic& rhs ) { - this->store_with_release(rhs); return *this; - } - T* operator->() const { - return (*this); - } -}; - -//! Specialization for atomic, for sake of not allowing arithmetic or operator->. -template<> struct atomic: internal::atomic_impl { - void* operator=( void* rhs ) { - // "this" required here in strict ISO C++ because store_with_release is a dependent name - return this->store_with_release(rhs); - } - atomic& operator=( const atomic& rhs ) { - this->store_with_release(rhs); return *this; - } -}; - -} // namespace tbb - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - #pragma warning (pop) -#endif // warnings 4244, 4267 are back - -#endif /* __TBB_atomic_H */ diff --git a/tbb30_20100406oss/include/tbb/blocked_range.h b/tbb30_20100406oss/include/tbb/blocked_range.h deleted file mode 100644 index 52c12cc1a..000000000 --- a/tbb30_20100406oss/include/tbb/blocked_range.h +++ /dev/null @@ -1,129 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_blocked_range_H -#define __TBB_blocked_range_H - -#include "tbb_stddef.h" - -namespace tbb { - -/** \page range_req Requirements on range concept - Class \c R implementing the concept of range must define: - - \code R::R( const R& ); \endcode Copy constructor - - \code R::~R(); \endcode Destructor - - \code bool R::is_divisible() const; \endcode True if range can be partitioned into two subranges - - \code bool R::empty() const; \endcode True if range is empty - - \code R::R( R& r, split ); \endcode Split range \c r into two subranges. -**/ - -//! A range over which to iterate. -/** @ingroup algorithms */ -template -class blocked_range { -public: - //! Type of a value - /** Called a const_iterator for sake of algorithms that need to treat a blocked_range - as an STL container. */ - typedef Value const_iterator; - - //! Type for size of a range - typedef std::size_t size_type; - - //! Construct range with default-constructed values for begin and end. - /** Requires that Value have a default constructor. */ - blocked_range() : my_end(), my_begin() {} - - //! Construct range over half-open interval [begin,end), with the given grainsize. - blocked_range( Value begin_, Value end_, size_type grainsize_=1 ) : - my_end(end_), my_begin(begin_), my_grainsize(grainsize_) - { - __TBB_ASSERT( my_grainsize>0, "grainsize must be positive" ); - } - - //! Beginning of range. - const_iterator begin() const {return my_begin;} - - //! One past last value in range. - const_iterator end() const {return my_end;} - - //! Size of the range - /** Unspecified if end() - friend class blocked_range2d; - - template - friend class blocked_range3d; -}; - -} // namespace tbb - -#endif /* __TBB_blocked_range_H */ diff --git a/tbb30_20100406oss/include/tbb/blocked_range2d.h b/tbb30_20100406oss/include/tbb/blocked_range2d.h deleted file mode 100644 index d541f4243..000000000 --- a/tbb30_20100406oss/include/tbb/blocked_range2d.h +++ /dev/null @@ -1,97 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_blocked_range2d_H -#define __TBB_blocked_range2d_H - -#include "tbb_stddef.h" -#include "blocked_range.h" - -namespace tbb { - -//! A 2-dimensional range that models the Range concept. -/** @ingroup algorithms */ -template -class blocked_range2d { -public: - //! Type for size of an iteation range - typedef blocked_range row_range_type; - typedef blocked_range col_range_type; - -private: - row_range_type my_rows; - col_range_type my_cols; - -public: - - blocked_range2d( RowValue row_begin, RowValue row_end, typename row_range_type::size_type row_grainsize, - ColValue col_begin, ColValue col_end, typename col_range_type::size_type col_grainsize ) : - my_rows(row_begin,row_end,row_grainsize), - my_cols(col_begin,col_end,col_grainsize) - { - } - - blocked_range2d( RowValue row_begin, RowValue row_end, - ColValue col_begin, ColValue col_end ) : - my_rows(row_begin,row_end), - my_cols(col_begin,col_end) - { - } - - //! True if range is empty - bool empty() const { - // Yes, it is a logical OR here, not AND. - return my_rows.empty() || my_cols.empty(); - } - - //! True if range is divisible into two pieces. - bool is_divisible() const { - return my_rows.is_divisible() || my_cols.is_divisible(); - } - - blocked_range2d( blocked_range2d& r, split ) : - my_rows(r.my_rows), - my_cols(r.my_cols) - { - if( my_rows.size()*double(my_cols.grainsize()) < my_cols.size()*double(my_rows.grainsize()) ) { - my_cols.my_begin = col_range_type::do_split(r.my_cols); - } else { - my_rows.my_begin = row_range_type::do_split(r.my_rows); - } - } - - //! The rows of the iteration space - const row_range_type& rows() const {return my_rows;} - - //! The columns of the iteration space - const col_range_type& cols() const {return my_cols;} -}; - -} // namespace tbb - -#endif /* __TBB_blocked_range2d_H */ diff --git a/tbb30_20100406oss/include/tbb/blocked_range3d.h b/tbb30_20100406oss/include/tbb/blocked_range3d.h deleted file mode 100644 index b0bfbe0e1..000000000 --- a/tbb30_20100406oss/include/tbb/blocked_range3d.h +++ /dev/null @@ -1,116 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_blocked_range3d_H -#define __TBB_blocked_range3d_H - -#include "tbb_stddef.h" -#include "blocked_range.h" - -namespace tbb { - -//! A 3-dimensional range that models the Range concept. -/** @ingroup algorithms */ -template -class blocked_range3d { -public: - //! Type for size of an iteation range - typedef blocked_range page_range_type; - typedef blocked_range row_range_type; - typedef blocked_range col_range_type; - -private: - page_range_type my_pages; - row_range_type my_rows; - col_range_type my_cols; - -public: - - blocked_range3d( PageValue page_begin, PageValue page_end, - RowValue row_begin, RowValue row_end, - ColValue col_begin, ColValue col_end ) : - my_pages(page_begin,page_end), - my_rows(row_begin,row_end), - my_cols(col_begin,col_end) - { - } - - blocked_range3d( PageValue page_begin, PageValue page_end, typename page_range_type::size_type page_grainsize, - RowValue row_begin, RowValue row_end, typename row_range_type::size_type row_grainsize, - ColValue col_begin, ColValue col_end, typename col_range_type::size_type col_grainsize ) : - my_pages(page_begin,page_end,page_grainsize), - my_rows(row_begin,row_end,row_grainsize), - my_cols(col_begin,col_end,col_grainsize) - { - } - - //! True if range is empty - bool empty() const { - // Yes, it is a logical OR here, not AND. - return my_pages.empty() || my_rows.empty() || my_cols.empty(); - } - - //! True if range is divisible into two pieces. - bool is_divisible() const { - return my_pages.is_divisible() || my_rows.is_divisible() || my_cols.is_divisible(); - } - - blocked_range3d( blocked_range3d& r, split ) : - my_pages(r.my_pages), - my_rows(r.my_rows), - my_cols(r.my_cols) - { - if( my_pages.size()*double(my_rows.grainsize()) < my_rows.size()*double(my_pages.grainsize()) ) { - if ( my_rows.size()*double(my_cols.grainsize()) < my_cols.size()*double(my_rows.grainsize()) ) { - my_cols.my_begin = col_range_type::do_split(r.my_cols); - } else { - my_rows.my_begin = row_range_type::do_split(r.my_rows); - } - } else { - if ( my_pages.size()*double(my_cols.grainsize()) < my_cols.size()*double(my_pages.grainsize()) ) { - my_cols.my_begin = col_range_type::do_split(r.my_cols); - } else { - my_pages.my_begin = page_range_type::do_split(r.my_pages); - } - } - } - - //! The pages of the iteration space - const page_range_type& pages() const {return my_pages;} - - //! The rows of the iteration space - const row_range_type& rows() const {return my_rows;} - - //! The columns of the iteration space - const col_range_type& cols() const {return my_cols;} - -}; - -} // namespace tbb - -#endif /* __TBB_blocked_range3d_H */ diff --git a/tbb30_20100406oss/include/tbb/cache_aligned_allocator.h b/tbb30_20100406oss/include/tbb/cache_aligned_allocator.h deleted file mode 100644 index 5889682d6..000000000 --- a/tbb30_20100406oss/include/tbb/cache_aligned_allocator.h +++ /dev/null @@ -1,133 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_cache_aligned_allocator_H -#define __TBB_cache_aligned_allocator_H - -#include -#include "tbb_stddef.h" - -namespace tbb { - -//! @cond INTERNAL -namespace internal { - //! Cache/sector line size. - /** @ingroup memory_allocation */ - size_t __TBB_EXPORTED_FUNC NFS_GetLineSize(); - - //! Allocate memory on cache/sector line boundary. - /** @ingroup memory_allocation */ - void* __TBB_EXPORTED_FUNC NFS_Allocate( size_t n_element, size_t element_size, void* hint ); - - //! Free memory allocated by NFS_Allocate. - /** Freeing a NULL pointer is allowed, but has no effect. - @ingroup memory_allocation */ - void __TBB_EXPORTED_FUNC NFS_Free( void* ); -} -//! @endcond - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Workaround for erroneous "unreferenced parameter" warning in method destroy. - #pragma warning (push) - #pragma warning (disable: 4100) -#endif - -//! Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5 -/** The members are ordered the same way they are in section 20.4.1 - of the ISO C++ standard. - @ingroup memory_allocation */ -template -class cache_aligned_allocator { -public: - typedef typename internal::allocator_type::value_type value_type; - typedef value_type* pointer; - typedef const value_type* const_pointer; - typedef value_type& reference; - typedef const value_type& const_reference; - typedef size_t size_type; - typedef ptrdiff_t difference_type; - template struct rebind { - typedef cache_aligned_allocator other; - }; - - cache_aligned_allocator() throw() {} - cache_aligned_allocator( const cache_aligned_allocator& ) throw() {} - template cache_aligned_allocator(const cache_aligned_allocator&) throw() {} - - pointer address(reference x) const {return &x;} - const_pointer address(const_reference x) const {return &x;} - - //! Allocate space for n objects, starting on a cache/sector line. - pointer allocate( size_type n, const void* hint=0 ) { - // The "hint" argument is always ignored in NFS_Allocate thus const_cast shouldn't hurt - return pointer(internal::NFS_Allocate( n, sizeof(value_type), const_cast(hint) )); - } - - //! Free block of memory that starts on a cache line - void deallocate( pointer p, size_type ) { - internal::NFS_Free(p); - } - - //! Largest value for which method allocate might succeed. - size_type max_size() const throw() { - return (~size_t(0)-internal::NFS_MaxLineSize)/sizeof(value_type); - } - - //! Copy-construct value at location pointed to by p. - void construct( pointer p, const value_type& value ) {::new((void*)(p)) value_type(value);} - - //! Destroy value at location pointed to by p. - void destroy( pointer p ) {p->~value_type();} -}; - -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning (pop) -#endif // warning 4100 is back - -//! Analogous to std::allocator, as defined in ISO C++ Standard, Section 20.4.1 -/** @ingroup memory_allocation */ -template<> -class cache_aligned_allocator { -public: - typedef void* pointer; - typedef const void* const_pointer; - typedef void value_type; - template struct rebind { - typedef cache_aligned_allocator other; - }; -}; - -template -inline bool operator==( const cache_aligned_allocator&, const cache_aligned_allocator& ) {return true;} - -template -inline bool operator!=( const cache_aligned_allocator&, const cache_aligned_allocator& ) {return false;} - -} // namespace tbb - -#endif /* __TBB_cache_aligned_allocator_H */ diff --git a/tbb30_20100406oss/include/tbb/combinable.h b/tbb30_20100406oss/include/tbb/combinable.h deleted file mode 100644 index 551059585..000000000 --- a/tbb30_20100406oss/include/tbb/combinable.h +++ /dev/null @@ -1,80 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_combinable_H -#define __TBB_combinable_H - -#include "enumerable_thread_specific.h" -#include "cache_aligned_allocator.h" - -namespace tbb { -/** \name combinable - **/ -//@{ -//! Thread-local storage with optional reduction -/** @ingroup containers */ - template - class combinable { - private: - typedef typename tbb::cache_aligned_allocator my_alloc; - - typedef typename tbb::enumerable_thread_specific my_ets_type; - my_ets_type my_ets; - - public: - - combinable() { } - - template - combinable( finit _finit) : my_ets(_finit) { } - - //! destructor - ~combinable() { - } - - combinable(const combinable& other) : my_ets(other.my_ets) { } - - combinable & operator=( const combinable & other) { my_ets = other.my_ets; return *this; } - - void clear() { my_ets.clear(); } - - T& local() { return my_ets.local(); } - - T& local(bool & exists) { return my_ets.local(exists); } - - // combine_func_t has signature T(T,T) or T(const T&, const T&) - template - T combine(combine_func_t f_combine) { return my_ets.combine(f_combine); } - - // combine_func_t has signature void(T) or void(const T&) - template - void combine_each(combine_func_t f_combine) { my_ets.combine_each(f_combine); } - - }; -} // namespace tbb -#endif /* __TBB_combinable_H */ diff --git a/tbb30_20100406oss/include/tbb/compat/condition_variable b/tbb30_20100406oss/include/tbb/compat/condition_variable deleted file mode 100644 index 4f132d215..000000000 --- a/tbb30_20100406oss/include/tbb/compat/condition_variable +++ /dev/null @@ -1,459 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_condition_variable_H -#define __TBB_condition_variable_H - -#if _WIN32||_WIN64 -#include - -namespace tbb { -namespace interface5 { -namespace internal { -struct condition_variable_using_event -{ - //! Event for blocking waiting threads. - HANDLE event; - //! Protects invariants involving n_waiters, release_count, and epoch. - CRITICAL_SECTION mutex; - //! Number of threads waiting on this condition variable - int n_waiters; - //! Number of threads remaining that should no longer wait on this condition variable. - int release_count; - //! To keep threads from waking up prematurely with earlier signals. - unsigned epoch; -}; -}}} // namespace tbb::interface5::internal - -#ifndef CONDITION_VARIABLE_INIT -typedef void* CONDITION_VARIABLE; -typedef CONDITION_VARIABLE* PCONDITION_VARIABLE; -#endif - -#else /* if not _WIN32||_WIN64 */ -#include -#endif /* _WIN32||_WIN64 */ - -#include "../tbb_stddef.h" -#include "../mutex.h" -#include "../tbb_thread.h" -#include "../tbb_exception.h" -#include "../tbb_profiling.h" - -namespace tbb { - -namespace interface5 { - -// C++0x standard working draft 30.4.3 -// Lock tag types -struct defer_lock_t { }; //! do not acquire ownership of the mutex -struct try_to_lock_t { }; //! try to acquire ownership of the mutex without blocking -struct adopt_lock_t { }; //! assume the calling thread has already -const defer_lock_t defer_lock = {}; -const try_to_lock_t try_to_lock = {}; -const adopt_lock_t adopt_lock = {}; - -// C++0x standard working draft 30.4.3.1 -//! lock_guard -template -class lock_guard : tbb::internal::no_copy { -public: - //! mutex type - typedef M mutex_type; - - //! Constructor - /** precondition: If mutex_type is not a recursive mutex, the calling thread - does not own the mutex m. */ - explicit lock_guard(mutex_type& m) : pm(m) {m.lock();} - - //! Adopt_lock constructor - /** precondition: the calling thread owns the mutex m. */ - lock_guard(mutex_type& m, adopt_lock_t) : pm(m) {} - - //! Destructor - ~lock_guard() { pm.unlock(); } -private: - mutex_type& pm; -}; - -// C++0x standard working draft 30.4.3.2 -//! unique_lock -template -class unique_lock : tbb::internal::no_copy { - friend class condition_variable; -public: - typedef M mutex_type; - - // 30.4.3.2.1 construct/copy/destroy - // NB: Without constructors that take an r-value reference to a unique_lock, the following constructor is of little use. - //! Constructor - /** postcondition: pm==0 && owns==false */ - unique_lock() : pm(NULL), owns(false) {} - - //! Constructor - /** precondition: if mutex_type is not a recursive mutex, the calling thread - does not own the mutex m. If the precondition is not met, a deadlock occurs. - postcondition: pm==&m and owns==true */ - explicit unique_lock(mutex_type& m) : pm(&m) {m.lock(); owns=true;} - - //! Defer_lock constructor - /** postcondition: pm==&m and owns==false */ - unique_lock(mutex_type& m, defer_lock_t) : pm(&m), owns(false) {} - - //! Try_to_lock constructor - /** precondition: if mutex_type is not a recursive mutex, the calling thread - does not own the mutex m. If the precondition is not met, a deadlock occurs. - postcondition: pm==&m and owns==res where res is the value returned by - the call to m.try_lock(). */ - unique_lock(mutex_type& m, try_to_lock_t) : pm(&m) {owns = m.try_lock();} - - //! Adopt_lock constructor - /** precondition: the calling thread owns the mutex. If it does not, mutex->unlock() would fail. - postcondition: pm==&m and owns==true */ - unique_lock(mutex_type& m, adopt_lock_t) : pm(&m), owns(true) {} - - //! Timed unique_lock acquisition. - /** To avoid requiring support for namespace chrono, this method deviates from the working draft in that - it uses tbb::tick_count::interval_t to specify the time duration. */ - unique_lock(mutex_type& m, const tick_count::interval_t &i) : pm(&m) {owns = try_lock_for( i );} - - //! Destructor - ~unique_lock() { if( owns ) pm->unlock(); } - - // 30.4.3.2.2 locking - //! Lock the mutex and own it. - void lock() { - if( pm ) { - if( !owns ) { - pm->lock(); - owns = true; - } else - throw_exception_v4( tbb::internal::eid_possible_deadlock ); - } else - throw_exception_v4( tbb::internal::eid_operation_not_permitted ); - __TBB_ASSERT( owns, NULL ); - } - - //! Try to lock the mutex. - /** If successful, note that this lock owns it. Otherwise, set it false. */ - bool try_lock() { - if( pm ) { - if( !owns ) - owns = pm->try_lock(); - else - throw_exception_v4( tbb::internal::eid_possible_deadlock ); - } else - throw_exception_v4( tbb::internal::eid_operation_not_permitted ); - return owns; - } - - //! Try to lock the mutex. - bool try_lock_for( const tick_count::interval_t &i ); - - //! Unlock the mutex - /** And note that this lock no longer owns it. */ - void unlock() { - if( owns ) { - pm->unlock(); - owns = false; - } else - throw_exception_v4( tbb::internal::eid_operation_not_permitted ); - __TBB_ASSERT( !owns, NULL ); - } - - // 30.4.3.2.3 modifiers - //! Swap the two unique locks - void swap(unique_lock& u) { - mutex_type* t_pm = u.pm; u.pm = pm; pm = t_pm; - bool t_owns = u.owns; u.owns = owns; owns = t_owns; - } - - //! Release control over the mutex. - mutex_type* release() { - mutex_type* o_pm = pm; - pm = NULL; - owns = false; - return o_pm; - } - - // 30.4.3.2.4 observers - //! Does this lock own the mutex? - bool owns_lock() const { return owns; } - - //! Does this lock own the mutex? - /*explicit*/ operator bool() const { return owns; } - - //! Return the mutex that this lock currently has. - mutex_type* mutex() const { return pm; } - -private: - mutex_type* pm; - bool owns; -}; - -template -bool unique_lock::try_lock_for( const tick_count::interval_t &i) -{ - const int unique_lock_tick = 100; /* microseconds; 0.1 milliseconds */ - // the smallest wait-time is 0.1 milliseconds. - bool res = pm->try_lock(); - int duration_in_micro; - if( !res && (duration_in_micro=int(i.seconds()*1e6))>unique_lock_tick ) { - tick_count::interval_t i_100( double(unique_lock_tick)/1e6 /* seconds */); // 100 microseconds = 0.1*10E-3 - do { - this_tbb_thread::sleep(i_100); // sleep for 100 micro seconds - duration_in_micro -= unique_lock_tick; - res = pm->try_lock(); - } while( !res && duration_in_micro>unique_lock_tick ); - } - return (owns=res); -} - -//! Swap the two unique locks that have the mutexes of same type -template -void swap(unique_lock& x, unique_lock& y) { x.swap( y ); } - -namespace internal { - -#if _WIN32||_WIN64 -union condvar_impl_t { - condition_variable_using_event cv_event; - CONDITION_VARIABLE cv_native; -}; - -void __TBB_EXPORTED_FUNC internal_initialize_condition_variable( condvar_impl_t& cv ); -void __TBB_EXPORTED_FUNC internal_destroy_condition_variable( condvar_impl_t& cv ); -void __TBB_EXPORTED_FUNC internal_condition_variable_notify_one( condvar_impl_t& cv ); -void __TBB_EXPORTED_FUNC internal_condition_variable_notify_all( condvar_impl_t& cv ); -bool __TBB_EXPORTED_FUNC internal_condition_variable_wait( condvar_impl_t& cv, mutex* mtx, const tick_count::interval_t* i = NULL ); -#else /* if !(_WIN32||_WIN64), i.e., POSIX threads */ -typedef pthread_cond_t condvar_impl_t; -#endif - -} // namespace internal - -//! cv_status -/** C++0x standard working draft 30.5 */ -enum cv_status { no_timeout, timeout }; - -//! condition variable -/** C++0x standard working draft 30.5.1 - @ingroup synchronization */ -class condition_variable : tbb::internal::no_copy { -public: - //! Constructor - condition_variable() { -#if _WIN32||_WIN64 - internal_initialize_condition_variable( my_cv ); -#else - pthread_cond_init( &my_cv, NULL ); -#endif - } - - //! Destructor - ~condition_variable() { - //precondition: There shall be no thread blocked on *this. -#if _WIN32||_WIN64 - internal_destroy_condition_variable( my_cv ); -#else - pthread_cond_destroy( &my_cv ); -#endif - } - - //! Notify one thread and wake it up - void notify_one() { -#if _WIN32||_WIN64 - internal_condition_variable_notify_one( my_cv ); -#else - pthread_cond_signal( &my_cv ); -#endif - } - - //! Notify all threads - void notify_all() { -#if _WIN32||_WIN64 - internal_condition_variable_notify_all( my_cv ); -#else - pthread_cond_broadcast( &my_cv ); -#endif - } - - //! Release the mutex associated with the lock and wait on this condition variable - void wait(unique_lock& lock); - - //! Wait on this condition variable while pred is false - template - void wait(unique_lock& lock, Predicate pred) { - while( !pred() ) - wait( lock ); - } - - //! Timed version of wait() - cv_status wait_for(unique_lock& lock, const tick_count::interval_t &i ); - - //! Timed version of the predicated wait - /** The loop terminates when pred() returns true or when the time duration specified by rel_time (i) has elapsed. */ - template - bool wait_for(unique_lock& lock, const tick_count::interval_t &i, Predicate pred) - { - while( !pred() ) { - cv_status st = wait_for( lock, i ); - if( st==timeout ) - return pred(); - } - return true; - } - - // C++0x standard working draft. 30.2.3 - typedef internal::condvar_impl_t* native_handle_type; - - native_handle_type native_handle() { return (native_handle_type) &my_cv; } - -private: - internal::condvar_impl_t my_cv; -}; - - -#if _WIN32||_WIN64 -inline void condition_variable::wait( unique_lock& lock ) -{ - __TBB_ASSERT( lock.owns, NULL ); - lock.owns = false; - if( !internal_condition_variable_wait( my_cv, lock.mutex() ) ) { - int ec = GetLastError(); - // on Windows 7, SleepConditionVariableCS() may return ERROR_TIMEOUT while the doc says it returns WAIT_TIMEOUT - __TBB_ASSERT_EX( ec!=WAIT_TIMEOUT&&ec!=ERROR_TIMEOUT, NULL ); - lock.owns = true; - throw_exception_v4( tbb::internal::eid_condvar_wait_failed ); - } - lock.owns = true; -} - -inline cv_status condition_variable::wait_for( unique_lock& lock, const tick_count::interval_t& i ) -{ - cv_status rc = no_timeout; - __TBB_ASSERT( lock.owns, NULL ); - lock.owns = false; - // condvar_wait could be SleepConditionVariableCS (or SleepConditionVariableSRW) or our own pre-vista cond_var_wait() - if( !internal_condition_variable_wait( my_cv, lock.mutex(), &i ) ) { - int ec = GetLastError(); - if( ec==WAIT_TIMEOUT || ec==ERROR_TIMEOUT ) - rc = timeout; - else { - lock.owns = true; - throw_exception_v4( tbb::internal::eid_condvar_wait_failed ); - } - } - lock.owns = true; - return rc; -} -#else -#if __linux__ -#include -#else /* generic Unix */ -#include -#include -#endif - -inline void condition_variable::wait( unique_lock& lock ) -{ - __TBB_ASSERT( lock.owns, NULL ); - lock.owns = false; - if( pthread_cond_wait( &my_cv, lock.mutex()->native_handle() ) ) { - lock.owns = true; - throw_exception_v4( tbb::internal::eid_condvar_wait_failed ); - } - // upon successful return, the mutex has been locked and is owned by the calling thread. - lock.owns = true; -} - -inline cv_status condition_variable::wait_for( unique_lock& lock, const tick_count::interval_t& i ) -{ -#if __linux__ - struct timespec req; - double sec = i.seconds(); - clock_gettime( CLOCK_REALTIME, &req ); - req.tv_sec += static_cast(sec); - req.tv_nsec += static_cast( (sec - static_cast(sec))*1e9 ); -#else /* generic Unix */ - struct timeval tv; - struct timespec req; - double sec = i.seconds(); - int status = gettimeofday(&tv, NULL); - __TBB_ASSERT_EX( status==0, "gettimeofday failed" ); - req.tv_sec = tv.tv_sec + static_cast(sec); - req.tv_nsec = tv.tv_usec*1000 + static_cast( (sec - static_cast(sec))*1e9 ); -#endif /*(choice of OS) */ - - int ec; - cv_status rc = no_timeout; - __TBB_ASSERT( lock.owns, NULL ); - lock.owns = false; - if( ( ec=pthread_cond_timedwait( &my_cv, lock.mutex()->native_handle(), &req ) ) ) { - if( ec==ETIMEDOUT ) - rc = timeout; - else { - __TBB_ASSERT( lock.try_lock()==false, NULL ); - lock.owns = true; - throw_exception_v4( tbb::internal::eid_condvar_wait_failed ); - } - } - lock.owns = true; - return rc; -} -#endif /* !(_WIN32||_WIN64) */ - -} // namespace interface5 - -__TBB_DEFINE_PROFILING_SET_NAME(interface5::condition_variable) - -} // namespace tbb - -#if TBB_IMPLEMENT_CPP0X - -namespace std { - -using tbb::interface5::defer_lock_t; -using tbb::interface5::try_to_lock_t; -using tbb::interface5::adopt_lock_t; -using tbb::interface5::defer_lock; -using tbb::interface5::try_to_lock; -using tbb::interface5::adopt_lock; -using tbb::interface5::lock_guard; -using tbb::interface5::unique_lock; -using tbb::interface5::swap; /* this is for void std::swap(unique_lock&,unique_lock&) */ -using tbb::interface5::condition_variable; -using tbb::interface5::cv_status; -using tbb::interface5::timeout; -using tbb::interface5::no_timeout; - -} // namespace std - -#endif /* TBB_IMPLEMENT_CPP0X */ - -#endif /* __TBB_condition_variable_H */ diff --git a/tbb30_20100406oss/include/tbb/compat/ppl.h b/tbb30_20100406oss/include/tbb/compat/ppl.h deleted file mode 100644 index f524abfed..000000000 --- a/tbb30_20100406oss/include/tbb/compat/ppl.h +++ /dev/null @@ -1,68 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_compat_ppl_H -#define __TBB_compat_ppl_H - -#include "../task_group.h" -#include "../parallel_invoke.h" -#include "../parallel_for_each.h" -#include "../parallel_for.h" -#include "../tbb_exception.h" -#include "../critical_section.h" -#include "../reader_writer_lock.h" -#include "../combinable.h" - -namespace Concurrency { - - using tbb::task_handle; - using tbb::task_group_status; - using tbb::task_group; - using tbb::structured_task_group; - using tbb::invalid_multiple_scheduling; - using tbb::missing_wait; - using tbb::make_task; - - using tbb::not_complete; - using tbb::complete; - using tbb::canceled; - - using tbb::is_current_task_group_canceling; - - using tbb::parallel_invoke; - using tbb::strict_ppl::parallel_for; - using tbb::parallel_for_each; - using tbb::critical_section; - using tbb::reader_writer_lock; - using tbb::combinable; - - using tbb::improper_lock; - -} // namespace Concurrency - -#endif /* __TBB_compat_ppl_H */ diff --git a/tbb30_20100406oss/include/tbb/compat/thread b/tbb30_20100406oss/include/tbb/compat/thread deleted file mode 100644 index e4d3b329d..000000000 --- a/tbb30_20100406oss/include/tbb/compat/thread +++ /dev/null @@ -1,54 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_thread_H -#define __TBB_thread_H - -#include "../tbb_thread.h" - -#if TBB_IMPLEMENT_CPP0X - -namespace std { - -typedef tbb::tbb_thread thread; - -namespace this_thread { - using tbb::this_tbb_thread::get_id; - using tbb::this_tbb_thread::yield; - - inline void sleep_for(const tbb::tick_count::interval_t& rel_time) { - tbb::internal::thread_sleep_v3( rel_time ); - } - -} - -} - -#endif /* TBB_IMPLEMENT_CPP0X */ - -#endif /* __TBB_thread_H */ diff --git a/tbb30_20100406oss/include/tbb/concurrent_hash_map.h b/tbb30_20100406oss/include/tbb/concurrent_hash_map.h deleted file mode 100644 index 117d0c7a0..000000000 --- a/tbb30_20100406oss/include/tbb/concurrent_hash_map.h +++ /dev/null @@ -1,1375 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_concurrent_hash_map_H -#define __TBB_concurrent_hash_map_H - -#include "tbb_stddef.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include -#include // Need std::pair -#include // Need std::memset - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#include "cache_aligned_allocator.h" -#include "tbb_allocator.h" -#include "spin_rw_mutex.h" -#include "atomic.h" -#include "aligned_space.h" -#include "tbb_exception.h" -#include "_concurrent_unordered_internal.h" // Need tbb_hasher -#if TBB_USE_PERFORMANCE_WARNINGS -#include -#endif - -namespace tbb { - -//! @cond INTERNAL -namespace internal { - //! ITT instrumented routine that loads pointer from location pointed to by src. - void* __TBB_EXPORTED_FUNC itt_load_pointer_with_acquire_v3( const void* src ); - //! ITT instrumented routine that stores src into location pointed to by dst. - void __TBB_EXPORTED_FUNC itt_store_pointer_with_release_v3( void* dst, void* src ); - //! Routine that loads pointer from location pointed to by src without causing ITT to report a race. - void* __TBB_EXPORTED_FUNC itt_load_pointer_v3( const void* src ); -} -//! @endcond - -//! hash_compare that is default argument for concurrent_hash_map -template -struct tbb_hash_compare { - static size_t hash( const Key& a ) { return tbb_hasher(a); } - static bool equal( const Key& a, const Key& b ) { return a == b; } -}; - -namespace interface4 { - - template, typename A = tbb_allocator > > - class concurrent_hash_map; - - //! @cond INTERNAL - namespace internal { - - - //! Type of a hash code. - typedef size_t hashcode_t; - //! Node base type - struct hash_map_node_base : tbb::internal::no_copy { - //! Mutex type - typedef spin_rw_mutex mutex_t; - //! Scoped lock type for mutex - typedef mutex_t::scoped_lock scoped_t; - //! Next node in chain - hash_map_node_base *next; - mutex_t mutex; - }; - //! Incompleteness flag value - static hash_map_node_base *const rehash_req = reinterpret_cast(size_t(3)); - //! Rehashed empty bucket flag - static hash_map_node_base *const empty_rehashed = reinterpret_cast(size_t(0)); - //! base class of concurrent_hash_map - class hash_map_base { - public: - //! Size type - typedef size_t size_type; - //! Type of a hash code. - typedef size_t hashcode_t; - //! Segment index type - typedef size_t segment_index_t; - //! Node base type - typedef hash_map_node_base node_base; - //! Bucket type - struct bucket : tbb::internal::no_copy { - //! Mutex type for buckets - typedef spin_rw_mutex mutex_t; - //! Scoped lock type for mutex - typedef mutex_t::scoped_lock scoped_t; - mutex_t mutex; - node_base *node_list; - }; - //! Count of segments in the first block - static size_type const embedded_block = 1; - //! Count of segments in the first block - static size_type const embedded_buckets = 1< my_mask; - //! Segment pointers table. Also prevents false sharing between my_mask and my_size - segments_table_t my_table; - //! Size of container in stored items - atomic my_size; // It must be in separate cache line from my_mask due to performance effects - //! Zero segment - bucket my_embedded_segment[embedded_buckets]; - - //! Constructor - hash_map_base() { - std::memset( this, 0, pointers_per_table*sizeof(segment_ptr_t) // 32*4=128 or 64*8=512 - + sizeof(my_size) + sizeof(my_mask) // 4+4 or 8+8 - + embedded_buckets*sizeof(bucket) ); // n*8 or n*16 - for( size_type i = 0; i < embedded_block; i++ ) // fill the table - my_table[i] = my_embedded_segment + segment_base(i); - my_mask = embedded_buckets - 1; - __TBB_ASSERT( embedded_block <= first_block, "The first block number must include embedded blocks"); - } - - //! @return segment index of given index in the array - static segment_index_t segment_index_of( size_type index ) { - return segment_index_t( __TBB_Log2( index|1 ) ); - } - - //! @return the first array index of given segment - static segment_index_t segment_base( segment_index_t k ) { - return (segment_index_t(1)<(ptr) > size_t(63); - } - - //! Initialize buckets - static void init_buckets( segment_ptr_t ptr, size_type sz, bool is_initial ) { - if( is_initial ) std::memset(ptr, 0, sz*sizeof(bucket) ); - else for(size_type i = 0; i < sz; i++, ptr++) { - *reinterpret_cast(&ptr->mutex) = 0; - ptr->node_list = rehash_req; - } - } - - //! Add node @arg n to bucket @arg b - static void add_to_bucket( bucket *b, node_base *n ) { - __TBB_ASSERT(b->node_list != rehash_req, NULL); - n->next = b->node_list; - b->node_list = n; // its under lock and flag is set - } - - //! Exception safety helper - struct enable_segment_failsafe { - segment_ptr_t *my_segment_ptr; - enable_segment_failsafe(segments_table_t &table, segment_index_t k) : my_segment_ptr(&table[k]) {} - ~enable_segment_failsafe() { - if( my_segment_ptr ) *my_segment_ptr = 0; // indicate no allocation in progress - } - }; - - //! Enable segment - void enable_segment( segment_index_t k, bool is_initial = false ) { - __TBB_ASSERT( k, "Zero segment must be embedded" ); - enable_segment_failsafe watchdog( my_table, k ); - cache_aligned_allocator alloc; - size_type sz; - __TBB_ASSERT( !is_valid(my_table[k]), "Wrong concurrent assignment"); - if( k >= first_block ) { - sz = segment_size( k ); - segment_ptr_t ptr = alloc.allocate( sz ); - init_buckets( ptr, sz, is_initial ); -#if TBB_USE_THREADING_TOOLS - // TODO: actually, fence and notification are unnecessary here and below - itt_store_pointer_with_release_v3( my_table + k, ptr ); -#else - my_table[k] = ptr;// my_mask has release fence -#endif - sz <<= 1;// double it to get entire capacity of the container - } else { // the first block - __TBB_ASSERT( k == embedded_block, "Wrong segment index" ); - sz = segment_size( first_block ); - segment_ptr_t ptr = alloc.allocate( sz - embedded_buckets ); - init_buckets( ptr, sz - embedded_buckets, is_initial ); - ptr -= segment_base(embedded_block); - for(segment_index_t i = embedded_block; i < first_block; i++) // calc the offsets -#if TBB_USE_THREADING_TOOLS - itt_store_pointer_with_release_v3( my_table + i, ptr + segment_base(i) ); -#else - my_table[i] = ptr + segment_base(i); -#endif - } -#if TBB_USE_THREADING_TOOLS - itt_store_pointer_with_release_v3( &my_mask, (void*)(sz-1) ); -#else - my_mask = sz - 1; -#endif - watchdog.my_segment_ptr = 0; - } - - //! Get bucket by (masked) hashcode - bucket *get_bucket( hashcode_t h ) const throw() { // TODO: add throw() everywhere? - segment_index_t s = segment_index_of( h ); - h -= segment_base(s); - segment_ptr_t seg = my_table[s]; - __TBB_ASSERT( is_valid(seg), "hashcode must be cut by valid mask for allocated segments" ); - return &seg[h]; - } - - // internal serial rehashing helper - void mark_rehashed_levels( hashcode_t h ) throw () { - segment_index_t s = segment_index_of( h ); - while( segment_ptr_t seg = my_table[++s] ) - if( seg[h].node_list == rehash_req ) { - seg[h].node_list = empty_rehashed; - mark_rehashed_levels( h + segment_base(s) ); - } - } - - //! Check for mask race - // Splitting into two functions should help inlining - inline bool check_mask_race( const hashcode_t h, hashcode_t &m ) const { - hashcode_t m_now, m_old = m; -#if TBB_USE_THREADING_TOOLS - m_now = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask ); -#else - m_now = my_mask; -#endif - if( m_old != m_now ) - return check_rehashing_collision( h, m_old, m = m_now ); - return false; - } - - //! Process mask race, check for rehashing collision - bool check_rehashing_collision( const hashcode_t h, hashcode_t m_old, hashcode_t m ) const { - __TBB_ASSERT(m_old != m, NULL); // TODO?: m arg could be optimized out by passing h = h&m - if( (h & m_old) != (h & m) ) { // mask changed for this hashcode, rare event - // condition above proves that 'h' has some other bits set beside 'm_old' - // find next applicable mask after m_old //TODO: look at bsl instruction - for( ++m_old; !(h & m_old); m_old <<= 1 ) // at maximum few rounds depending on the first block size - ; - m_old = (m_old<<1) - 1; // get full mask from a bit - __TBB_ASSERT((m_old&(m_old+1))==0 && m_old <= m, NULL); - // check whether it is rehashing/ed -#if TBB_USE_THREADING_TOOLS - if( itt_load_pointer_with_acquire_v3(&( get_bucket(h & m_old)->node_list )) != rehash_req ) -#else - if( __TBB_load_with_acquire(get_bucket( h & m_old )->node_list) != rehash_req ) -#endif - return true; - } - return false; - } - - //! Insert a node and check for load factor. @return segment index to enable. - segment_index_t insert_new_node( bucket *b, node_base *n, hashcode_t mask ) { - size_type sz = ++my_size; // prefix form is to enforce allocation after the first item inserted - add_to_bucket( b, n ); - // check load factor - if( sz >= mask ) { // TODO: add custom load_factor - segment_index_t new_seg = segment_index_of( mask+1 ); - __TBB_ASSERT( is_valid(my_table[new_seg-1]), "new allocations must not publish new mask until segment has allocated"); -#if TBB_USE_THREADING_TOOLS - if( !itt_load_pointer_v3(my_table+new_seg) -#else - if( !my_table[new_seg] -#endif - && __TBB_CompareAndSwapW(&my_table[new_seg], 2, 0) == 0 ) - return new_seg; // The value must be processed - } - return 0; - } - - //! Prepare enough segments for number of buckets - void reserve(size_type buckets) { - if( !buckets-- ) return; - bool is_initial = !my_size; - for( size_type m = my_mask; buckets > m; m = my_mask ) - enable_segment( segment_index_of( m+1 ), is_initial ); - } - //! Swap hash_map_bases - void internal_swap(hash_map_base &table) { - std::swap(this->my_mask, table.my_mask); - std::swap(this->my_size, table.my_size); - for(size_type i = 0; i < embedded_buckets; i++) - std::swap(this->my_embedded_segment[i].node_list, table.my_embedded_segment[i].node_list); - for(size_type i = embedded_block; i < pointers_per_table; i++) - std::swap(this->my_table[i], table.my_table[i]); - } - }; - - template - class hash_map_range; - - //! Meets requirements of a forward iterator for STL */ - /** Value is either the T or const T type of the container. - @ingroup containers */ - template - class hash_map_iterator - : public std::iterator - { - typedef Container map_type; - typedef typename Container::node node; - typedef hash_map_base::node_base node_base; - typedef hash_map_base::bucket bucket; - - template - friend bool operator==( const hash_map_iterator& i, const hash_map_iterator& j ); - - template - friend bool operator!=( const hash_map_iterator& i, const hash_map_iterator& j ); - - template - friend ptrdiff_t operator-( const hash_map_iterator& i, const hash_map_iterator& j ); - - template - friend class hash_map_iterator; - - template - friend class hash_map_range; - - void advance_to_next_bucket() { // TODO?: refactor to iterator_base class - size_t k = my_index+1; - while( my_bucket && k <= my_map->my_mask ) { - // Following test uses 2's-complement wizardry - if( k& (k-2) ) // not the beginning of a segment - ++my_bucket; - else my_bucket = my_map->get_bucket( k ); - my_node = static_cast( my_bucket->node_list ); - if( hash_map_base::is_valid(my_node) ) { - my_index = k; return; - } - ++k; - } - my_bucket = 0; my_node = 0; my_index = k; // the end - } -#if !defined(_MSC_VER) || defined(__INTEL_COMPILER) - template - friend class interface4::concurrent_hash_map; -#else - public: // workaround -#endif - //! concurrent_hash_map over which we are iterating. - const Container *my_map; - - //! Index in hash table for current item - size_t my_index; - - //! Pointer to bucket - const bucket *my_bucket; - - //! Pointer to node that has current item - node *my_node; - - hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n ); - - public: - //! Construct undefined iterator - hash_map_iterator() {} - hash_map_iterator( const hash_map_iterator &other ) : - my_map(other.my_map), - my_index(other.my_index), - my_bucket(other.my_bucket), - my_node(other.my_node) - {} - Value& operator*() const { - __TBB_ASSERT( hash_map_base::is_valid(my_node), "iterator uninitialized or at end of container?" ); - return my_node->item; - } - Value* operator->() const {return &operator*();} - hash_map_iterator& operator++(); - - //! Post increment - Value* operator++(int) { - Value* result = &operator*(); - operator++(); - return result; - } - }; - - template - hash_map_iterator::hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n ) : - my_map(&map), - my_index(index), - my_bucket(b), - my_node( static_cast(n) ) - { - if( b && !hash_map_base::is_valid(n) ) - advance_to_next_bucket(); - } - - template - hash_map_iterator& hash_map_iterator::operator++() { - my_node = static_cast( my_node->next ); - if( !my_node ) advance_to_next_bucket(); - return *this; - } - - template - bool operator==( const hash_map_iterator& i, const hash_map_iterator& j ) { - return i.my_node == j.my_node && i.my_map == j.my_map; - } - - template - bool operator!=( const hash_map_iterator& i, const hash_map_iterator& j ) { - return i.my_node != j.my_node || i.my_map != j.my_map; - } - - //! Range class used with concurrent_hash_map - /** @ingroup containers */ - template - class hash_map_range { - typedef typename Iterator::map_type map_type; - Iterator my_begin; - Iterator my_end; - mutable Iterator my_midpoint; - size_t my_grainsize; - //! Set my_midpoint to point approximately half way between my_begin and my_end. - void set_midpoint() const; - template friend class hash_map_range; - public: - //! Type for size of a range - typedef std::size_t size_type; - typedef typename Iterator::value_type value_type; - typedef typename Iterator::reference reference; - typedef typename Iterator::difference_type difference_type; - typedef Iterator iterator; - - //! True if range is empty. - bool empty() const {return my_begin==my_end;} - - //! True if range can be partitioned into two subranges. - bool is_divisible() const { - return my_midpoint!=my_end; - } - //! Split range. - hash_map_range( hash_map_range& r, split ) : - my_end(r.my_end), - my_grainsize(r.my_grainsize) - { - r.my_end = my_begin = r.my_midpoint; - __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" ); - __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" ); - set_midpoint(); - r.set_midpoint(); - } - //! type conversion - template - hash_map_range( hash_map_range& r) : - my_begin(r.my_begin), - my_end(r.my_end), - my_midpoint(r.my_midpoint), - my_grainsize(r.my_grainsize) - {} -#if TBB_DEPRECATED - //! Init range with iterators and grainsize specified - hash_map_range( const Iterator& begin_, const Iterator& end_, size_type grainsize_ = 1 ) : - my_begin(begin_), - my_end(end_), - my_grainsize(grainsize_) - { - if(!my_end.my_index && !my_end.my_bucket) // end - my_end.my_index = my_end.my_map->my_mask + 1; - set_midpoint(); - __TBB_ASSERT( grainsize_>0, "grainsize must be positive" ); - } -#endif - //! Init range with container and grainsize specified - hash_map_range( const map_type &map, size_type grainsize_ = 1 ) : - my_begin( Iterator( map, 0, map.my_embedded_segment, map.my_embedded_segment->node_list ) ), - my_end( Iterator( map, map.my_mask + 1, 0, 0 ) ), - my_grainsize( grainsize_ ) - { - __TBB_ASSERT( grainsize_>0, "grainsize must be positive" ); - set_midpoint(); - } - const Iterator& begin() const {return my_begin;} - const Iterator& end() const {return my_end;} - //! The grain size for this range. - size_type grainsize() const {return my_grainsize;} - }; - - template - void hash_map_range::set_midpoint() const { - // Split by groups of nodes - size_t m = my_end.my_index-my_begin.my_index; - if( m > my_grainsize ) { - m = my_begin.my_index + m/2u; - hash_map_base::bucket *b = my_begin.my_map->get_bucket(m); - my_midpoint = Iterator(*my_begin.my_map,m,b,b->node_list); - } else { - my_midpoint = my_end; - } - __TBB_ASSERT( my_begin.my_index <= my_midpoint.my_index, - "my_begin is after my_midpoint" ); - __TBB_ASSERT( my_midpoint.my_index <= my_end.my_index, - "my_midpoint is after my_end" ); - __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end, - "[my_begin, my_midpoint) range should not be empty" ); - } - - } // internal -//! @endcond - -//! Unordered map from Key to T. -/** concurrent_hash_map is associative container with concurrent access. - -@par Compatibility - The class meets all Container Requirements from C++ Standard (See ISO/IEC 14882:2003(E), clause 23.1). - -@par Exception Safety - - Hash function is not permitted to throw an exception. User-defined types Key and T are forbidden from throwing an exception in destructors. - - If exception happens during insert() operations, it has no effect (unless exception raised by HashCompare::hash() function during grow_segment). - - If exception happens during operator=() operation, the container can have a part of source items, and methods size() and empty() can return wrong results. - -@par Changes since TBB 2.1 - - Replaced internal algorithm and data structure. Patent is pending. - - Added buckets number argument for constructor - -@par Changes since TBB 2.0 - - Fixed exception-safety - - Added template argument for allocator - - Added allocator argument in constructors - - Added constructor from a range of iterators - - Added several new overloaded insert() methods - - Added get_allocator() - - Added swap() - - Added count() - - Added overloaded erase(accessor &) and erase(const_accessor&) - - Added equal_range() [const] - - Added [const_]pointer, [const_]reference, and allocator_type types - - Added global functions: operator==(), operator!=(), and swap() - - @ingroup containers */ -template -class concurrent_hash_map : protected internal::hash_map_base { - template - friend class internal::hash_map_iterator; - - template - friend class internal::hash_map_range; - -public: - typedef Key key_type; - typedef T mapped_type; - typedef std::pair value_type; - typedef hash_map_base::size_type size_type; - typedef ptrdiff_t difference_type; - typedef value_type *pointer; - typedef const value_type *const_pointer; - typedef value_type &reference; - typedef const value_type &const_reference; - typedef internal::hash_map_iterator iterator; - typedef internal::hash_map_iterator const_iterator; - typedef internal::hash_map_range range_type; - typedef internal::hash_map_range const_range_type; - typedef Allocator allocator_type; - -protected: - friend class const_accessor; - struct node; - typedef typename Allocator::template rebind::other node_allocator_type; - node_allocator_type my_allocator; - HashCompare my_hash_compare; - - struct node : public node_base { - value_type item; - node( const Key &key ) : item(key, T()) {} - node( const Key &key, const T &t ) : item(key, t) {} - // exception-safe allocation, see C++ Standard 2003, clause 5.3.4p17 - void *operator new( size_t /*size*/, node_allocator_type &a ) { - void *ptr = a.allocate(1); - if(!ptr) - tbb::internal::throw_exception(tbb::internal::eid_bad_alloc); - return ptr; - } - // match placement-new form above to be called if exception thrown in constructor - void operator delete( void *ptr, node_allocator_type &a ) {return a.deallocate(static_cast(ptr),1); } - }; - - void delete_node( node_base *n ) { - my_allocator.destroy( static_cast(n) ); - my_allocator.deallocate( static_cast(n), 1); - } - - node *search_bucket( const key_type &key, bucket *b ) const { - node *n = static_cast( b->node_list ); - while( is_valid(n) && !my_hash_compare.equal(key, n->item.first) ) - n = static_cast( n->next ); - __TBB_ASSERT(n != internal::rehash_req, "Search can be executed only for rehashed bucket"); - return n; - } - - //! bucket accessor is to find, rehash, acquire a lock, and access a bucket - class bucket_accessor : public bucket::scoped_t { - bool my_is_writer; // TODO: use it from base type - bucket *my_b; - public: - bucket_accessor( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) { acquire( base, h, writer ); } - //! find a bucket by masked hashcode, optionally rehash, and acquire the lock - inline void acquire( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) { - my_b = base->get_bucket( h ); -#if TBB_USE_THREADING_TOOLS - // TODO: actually, notification is unnecessary here, just hiding double-check - if( itt_load_pointer_with_acquire_v3(&my_b->node_list) == internal::rehash_req -#else - if( __TBB_load_with_acquire(my_b->node_list) == internal::rehash_req -#endif - && try_acquire( my_b->mutex, /*write=*/true ) ) - { - if( my_b->node_list == internal::rehash_req ) base->rehash_bucket( my_b, h ); //recursive rehashing - my_is_writer = true; - } - else bucket::scoped_t::acquire( my_b->mutex, /*write=*/my_is_writer = writer ); - __TBB_ASSERT( my_b->node_list != internal::rehash_req, NULL); - } - //! check whether bucket is locked for write - bool is_writer() { return my_is_writer; } - //! get bucket pointer - bucket *operator() () { return my_b; } - // TODO: optimize out - bool upgrade_to_writer() { my_is_writer = true; return bucket::scoped_t::upgrade_to_writer(); } - }; - - // TODO refactor to hash_base - void rehash_bucket( bucket *b_new, const hashcode_t h ) { - __TBB_ASSERT( *(intptr_t*)(&b_new->mutex), "b_new must be locked (for write)"); - __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" ); - __TBB_store_with_release(b_new->node_list, internal::empty_rehashed); // mark rehashed - hashcode_t mask = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit - - bucket_accessor b_old( this, h & mask ); - - mask = (mask<<1) | 1; // get full mask for new bucket - __TBB_ASSERT( (mask&(mask+1))==0 && (h & mask) == h, NULL ); - restart: - for( node_base **p = &b_old()->node_list, *n = __TBB_load_with_acquire(*p); is_valid(n); n = *p ) { - hashcode_t c = my_hash_compare.hash( static_cast(n)->item.first ); -#if TBB_USE_ASSERT - hashcode_t bmask = h & (mask>>1); - bmask = bmask==0? 1 : ( 1u<<(__TBB_Log2( bmask )+1 ) ) - 1; // minimal mask of parent bucket - __TBB_ASSERT( (c & bmask) == (h & bmask), "hash() function changed for key in table" ); -#endif - if( (c & mask) == h ) { - if( !b_old.is_writer() ) - if( !b_old.upgrade_to_writer() ) { - goto restart; // node ptr can be invalid due to concurrent erase - } - *p = n->next; // exclude from b_old - add_to_bucket( b_new, n ); - } else p = &n->next; // iterate to next item - } - } - -public: - - class accessor; - //! Combines data access, locking, and garbage collection. - class const_accessor { - friend class concurrent_hash_map; - friend class accessor; - void operator=( const accessor & ) const; // Deny access - const_accessor( const accessor & ); // Deny access - public: - //! Type of value - typedef const typename concurrent_hash_map::value_type value_type; - - //! True if result is empty. - bool empty() const {return !my_node;} - - //! Set to null - void release() { - if( my_node ) { - my_lock.release(); - my_node = 0; - } - } - - //! Return reference to associated value in hash table. - const_reference operator*() const { - __TBB_ASSERT( my_node, "attempt to dereference empty accessor" ); - return my_node->item; - } - - //! Return pointer to associated value in hash table. - const_pointer operator->() const { - return &operator*(); - } - - //! Create empty result - const_accessor() : my_node(NULL) {} - - //! Destroy result after releasing the underlying reference. - ~const_accessor() { - my_node = NULL; // my_lock.release() is called in scoped_lock destructor - } - private: - node *my_node; - typename node::scoped_t my_lock; - hashcode_t my_hash; - }; - - //! Allows write access to elements and combines data access, locking, and garbage collection. - class accessor: public const_accessor { - public: - //! Type of value - typedef typename concurrent_hash_map::value_type value_type; - - //! Return reference to associated value in hash table. - reference operator*() const { - __TBB_ASSERT( this->my_node, "attempt to dereference empty accessor" ); - return this->my_node->item; - } - - //! Return pointer to associated value in hash table. - pointer operator->() const { - return &operator*(); - } - }; - - //! Construct empty table. - concurrent_hash_map(const allocator_type &a = allocator_type()) - : internal::hash_map_base(), my_allocator(a) - {} - - //! Construct empty table with n preallocated buckets. This number serves also as initial concurrency level. - concurrent_hash_map(size_type n, const allocator_type &a = allocator_type()) - : my_allocator(a) - { - reserve( n ); - } - - //! Copy constructor - concurrent_hash_map( const concurrent_hash_map& table, const allocator_type &a = allocator_type()) - : internal::hash_map_base(), my_allocator(a) - { - internal_copy(table); - } - - //! Construction with copying iteration range and given allocator instance - template - concurrent_hash_map(I first, I last, const allocator_type &a = allocator_type()) - : my_allocator(a) - { - reserve( std::distance(first, last) ); // TODO: load_factor? - internal_copy(first, last); - } - - //! Assignment - concurrent_hash_map& operator=( const concurrent_hash_map& table ) { - if( this!=&table ) { - clear(); - internal_copy(table); - } - return *this; - } - - - //! Rehashes and optionally resizes the whole table. - /** Useful to optimize performance before or after concurrent operations. - Also enables using of find() and count() concurrent methods in serial context. */ - void rehash(size_type n = 0); - - //! Clear table - void clear(); - - //! Clear table and destroy it. - ~concurrent_hash_map() { clear(); } - - //------------------------------------------------------------------------ - // Parallel algorithm support - //------------------------------------------------------------------------ - range_type range( size_type grainsize=1 ) { - return range_type( *this, grainsize ); - } - const_range_type range( size_type grainsize=1 ) const { - return const_range_type( *this, grainsize ); - } - - //------------------------------------------------------------------------ - // STL support - not thread-safe methods - //------------------------------------------------------------------------ - iterator begin() {return iterator(*this,0,my_embedded_segment,my_embedded_segment->node_list);} - iterator end() {return iterator(*this,0,0,0);} - const_iterator begin() const {return const_iterator(*this,0,my_embedded_segment,my_embedded_segment->node_list);} - const_iterator end() const {return const_iterator(*this,0,0,0);} - std::pair equal_range( const Key& key ) { return internal_equal_range(key, end()); } - std::pair equal_range( const Key& key ) const { return internal_equal_range(key, end()); } - - //! Number of items in table. - size_type size() const { return my_size; } - - //! True if size()==0. - bool empty() const { return my_size == 0; } - - //! Upper bound on size. - size_type max_size() const {return (~size_type(0))/sizeof(node);} - - //! Returns the current number of buckets - size_type bucket_count() const { return my_mask+1; } - - //! return allocator object - allocator_type get_allocator() const { return this->my_allocator; } - - //! swap two instances. Iterators are invalidated - void swap(concurrent_hash_map &table); - - //------------------------------------------------------------------------ - // concurrent map operations - //------------------------------------------------------------------------ - - //! Return count of items (0 or 1) - size_type count( const Key &key ) const { - return const_cast(this)->lookup(/*insert*/false, key, NULL, NULL, /*write=*/false ); - } - - //! Find item and acquire a read lock on the item. - /** Return true if item is found, false otherwise. */ - bool find( const_accessor &result, const Key &key ) const { - result.release(); - return const_cast(this)->lookup(/*insert*/false, key, NULL, &result, /*write=*/false ); - } - - //! Find item and acquire a write lock on the item. - /** Return true if item is found, false otherwise. */ - bool find( accessor &result, const Key &key ) { - result.release(); - return lookup(/*insert*/false, key, NULL, &result, /*write=*/true ); - } - - //! Insert item (if not already present) and acquire a read lock on the item. - /** Returns true if item is new. */ - bool insert( const_accessor &result, const Key &key ) { - result.release(); - return lookup(/*insert*/true, key, NULL, &result, /*write=*/false ); - } - - //! Insert item (if not already present) and acquire a write lock on the item. - /** Returns true if item is new. */ - bool insert( accessor &result, const Key &key ) { - result.release(); - return lookup(/*insert*/true, key, NULL, &result, /*write=*/true ); - } - - //! Insert item by copying if there is no such key present already and acquire a read lock on the item. - /** Returns true if item is new. */ - bool insert( const_accessor &result, const value_type &value ) { - result.release(); - return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/false ); - } - - //! Insert item by copying if there is no such key present already and acquire a write lock on the item. - /** Returns true if item is new. */ - bool insert( accessor &result, const value_type &value ) { - result.release(); - return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/true ); - } - - //! Insert item by copying if there is no such key present already - /** Returns true if item is inserted. */ - bool insert( const value_type &value ) { - return lookup(/*insert*/true, value.first, &value.second, NULL, /*write=*/false ); - } - - //! Insert range [first, last) - template - void insert(I first, I last) { - for(; first != last; ++first) - insert( *first ); - } - - //! Erase item. - /** Return true if item was erased by particularly this call. */ - bool erase( const Key& key ); - - //! Erase item by const_accessor. - /** Return true if item was erased by particularly this call. */ - bool erase( const_accessor& item_accessor ) { - return exclude( item_accessor, /*readonly=*/ true ); - } - - //! Erase item by accessor. - /** Return true if item was erased by particularly this call. */ - bool erase( accessor& item_accessor ) { - return exclude( item_accessor, /*readonly=*/ false ); - } - -protected: - //! Insert or find item and optionally acquire a lock on the item. - bool lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write ); - - //! delete item by accessor - bool exclude( const_accessor &item_accessor, bool readonly ); - - //! Returns an iterator for an item defined by the key, or for the next item after it (if upper==true) - template - std::pair internal_equal_range( const Key& key, I end ) const; - - //! Copy "source" to *this, where *this must start out empty. - void internal_copy( const concurrent_hash_map& source ); - - template - void internal_copy(I first, I last); - - //! Fast find when no concurrent erasure is used. For internal use inside TBB only! - /** Return pointer to item with given key, or NULL if no such item exists. - Must not be called concurrently with erasure operations. */ - const_pointer internal_fast_find( const Key& key ) const { - hashcode_t h = my_hash_compare.hash( key ); -#if TBB_USE_THREADING_TOOLS - hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask ); -#else - hashcode_t m = my_mask; -#endif - node *n; - restart: - __TBB_ASSERT((m&(m+1))==0, NULL); - bucket *b = get_bucket( h & m ); -#if TBB_USE_THREADING_TOOLS - // TODO: actually, notification is unnecessary here, just hiding double-check - if( itt_load_pointer_with_acquire_v3(&b->node_list) == internal::rehash_req ) -#else - if( __TBB_load_with_acquire(b->node_list) == internal::rehash_req ) -#endif - { - bucket::scoped_t lock; - if( lock.try_acquire( b->mutex, /*write=*/true ) ) { - if( b->node_list == internal::rehash_req) - const_cast(this)->rehash_bucket( b, h & m ); //recursive rehashing - } - else lock.acquire( b->mutex, /*write=*/false ); - __TBB_ASSERT(b->node_list!=internal::rehash_req,NULL); - } - n = search_bucket( key, b ); - if( n ) - return &n->item; - else if( check_mask_race( h, m ) ) - goto restart; - return 0; - } -}; - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Suppress "conditional expression is constant" warning. - #pragma warning( push ) - #pragma warning( disable: 4127 ) -#endif - -template -bool concurrent_hash_map::lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write ) { - __TBB_ASSERT( !result || !result->my_node, NULL ); - segment_index_t grow_segment; - bool return_value; - node *n, *tmp_n = 0; - hashcode_t const h = my_hash_compare.hash( key ); -#if TBB_USE_THREADING_TOOLS - hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask ); -#else - hashcode_t m = my_mask; -#endif - restart: - {//lock scope - __TBB_ASSERT((m&(m+1))==0, NULL); - return_value = false; - // get bucket - bucket_accessor b( this, h & m ); - - // find a node - n = search_bucket( key, b() ); - if( op_insert ) { - // [opt] insert a key - if( !n ) { - if( !tmp_n ) { - if(t) tmp_n = new( my_allocator ) node(key, *t); - else tmp_n = new( my_allocator ) node(key); - } - if( !b.is_writer() && !b.upgrade_to_writer() ) { // TODO: improved insertion - // Rerun search_list, in case another thread inserted the item during the upgrade. - n = search_bucket( key, b() ); - if( is_valid(n) ) { // unfortunately, it did - b.downgrade_to_reader(); - goto exists; - } - } - if( check_mask_race(h, m) ) - goto restart; // b.release() is done in ~b(). - // insert and set flag to grow the container - grow_segment = insert_new_node( b(), n = tmp_n, m ); - tmp_n = 0; - return_value = true; - } else { - exists: grow_segment = 0; - } - } else { // find or count - if( !n ) { - if( check_mask_race( h, m ) ) - goto restart; // b.release() is done in ~b(). TODO: replace by continue - return false; - } - return_value = true; - grow_segment = 0; - } - if( !result ) goto check_growth; - // TODO: the following seems as generic/regular operation - // acquire the item - if( !result->my_lock.try_acquire( n->mutex, write ) ) { - // we are unlucky, prepare for longer wait - tbb::internal::atomic_backoff trials; - do { - if( !trials.bounded_pause() ) { - // the wait takes really long, restart the operation - b.release(); - __TBB_ASSERT( !op_insert || !return_value, "Can't acquire new item in locked bucket?" ); - __TBB_Yield(); -#if TBB_USE_THREADING_TOOLS - m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask ); -#else - m = my_mask; -#endif - goto restart; - } - } while( !result->my_lock.try_acquire( n->mutex, write ) ); - } - }//lock scope - result->my_node = n; - result->my_hash = h; -check_growth: - // [opt] grow the container - if( grow_segment ) - enable_segment( grow_segment ); - if( tmp_n ) // if op_insert only - delete_node( tmp_n ); - return return_value; -} - -template -template -std::pair concurrent_hash_map::internal_equal_range( const Key& key, I end_ ) const { - hashcode_t h = my_hash_compare.hash( key ); - hashcode_t m = my_mask; - __TBB_ASSERT((m&(m+1))==0, NULL); - h &= m; - bucket *b = get_bucket( h ); - while( b->node_list == internal::rehash_req ) { - m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit - b = get_bucket( h &= m ); - } - node *n = search_bucket( key, b ); - if( !n ) - return std::make_pair(end_, end_); - iterator lower(*this, h, b, n), upper(lower); - return std::make_pair(lower, ++upper); -} - -template -bool concurrent_hash_map::exclude( const_accessor &item_accessor, bool readonly ) { - __TBB_ASSERT( item_accessor.my_node, NULL ); - node_base *const n = item_accessor.my_node; - item_accessor.my_node = NULL; // we ought release accessor anyway - hashcode_t const h = item_accessor.my_hash; -#if TBB_USE_THREADING_TOOLS - hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask ); -#else - hashcode_t m = my_mask; -#endif - do { - // get bucket - bucket_accessor b( this, h & m, /*writer=*/true ); - node_base **p = &b()->node_list; - while( *p && *p != n ) - p = &(*p)->next; - if( !*p ) { // someone else was the first - if( check_mask_race( h, m ) ) - continue; - item_accessor.my_lock.release(); - return false; - } - __TBB_ASSERT( *p == n, NULL ); - *p = n->next; // remove from container - my_size--; - break; - } while(true); - if( readonly ) // need to get exclusive lock - item_accessor.my_lock.upgrade_to_writer(); // return value means nothing here - item_accessor.my_lock.release(); - delete_node( n ); // Only one thread can delete it due to write lock on the chain_mutex - return true; -} - -template -bool concurrent_hash_map::erase( const Key &key ) { - node_base *n; - hashcode_t const h = my_hash_compare.hash( key ); -#if TBB_USE_THREADING_TOOLS - hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask ); -#else - hashcode_t m = my_mask; -#endif -restart: - {//lock scope - // get bucket - bucket_accessor b( this, h & m ); - search: - node_base **p = &b()->node_list; - n = *p; - while( is_valid(n) && !my_hash_compare.equal(key, static_cast(n)->item.first ) ) { - p = &n->next; - n = *p; - } - if( !n ) { // not found, but mask could be changed - if( check_mask_race( h, m ) ) - goto restart; - return false; - } - else if( !b.is_writer() && !b.upgrade_to_writer() ) { - if( check_mask_race( h, m ) ) // contended upgrade, check mask - goto restart; - goto search; - } - *p = n->next; - my_size--; - } - { - typename node::scoped_t item_locker( n->mutex, /*write=*/true ); - } - // note: there should be no threads pretending to acquire this mutex again, do not try to upgrade const_accessor! - delete_node( n ); // Only one thread can delete it due to write lock on the bucket - return true; -} - -template -void concurrent_hash_map::swap(concurrent_hash_map &table) { - std::swap(this->my_allocator, table.my_allocator); - std::swap(this->my_hash_compare, table.my_hash_compare); - internal_swap(table); -} - -template -void concurrent_hash_map::rehash(size_type sz) { - reserve( sz ); // TODO: add reduction of number of buckets as well - hashcode_t mask = my_mask; - hashcode_t b = (mask+1)>>1; // size or first index of the last segment - __TBB_ASSERT((b&(b-1))==0, NULL); - bucket *bp = get_bucket( b ); // only the last segment should be scanned for rehashing - for(; b <= mask; b++, bp++ ) { - node_base *n = bp->node_list; - __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" ); - __TBB_ASSERT( *reinterpret_cast(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" ); - if( n == internal::rehash_req ) { // rehash bucket, conditional because rehashing of a previous bucket may affect this one - hashcode_t h = b; bucket *b_old = bp; - do { - __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" ); - hashcode_t m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit - b_old = get_bucket( h &= m ); - } while( b_old->node_list == internal::rehash_req ); - // now h - is index of the root rehashed bucket b_old - mark_rehashed_levels( h ); // mark all non-rehashed children recursively across all segments - for( node_base **p = &b_old->node_list, *q = *p; is_valid(q); q = *p ) { - hashcode_t c = my_hash_compare.hash( static_cast(q)->item.first ); - if( (c & mask) != h ) { // should be rehashed - *p = q->next; // exclude from b_old - bucket *b_new = get_bucket( c & mask ); - __TBB_ASSERT( b_new->node_list != internal::rehash_req, "hash() function changed for key in table or internal error" ); - add_to_bucket( b_new, q ); - } else p = &q->next; // iterate to next item - } - } - } -#if TBB_USE_PERFORMANCE_WARNINGS - int current_size = int(my_size), buckets = int(mask)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics - static bool reported = false; -#endif -#if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS - for( b = 0; b <= mask; b++ ) {// only last segment should be scanned for rehashing - if( b & (b-2) ) ++bp; // not the beginning of a segment - else bp = get_bucket( b ); - node_base *n = bp->node_list; - __TBB_ASSERT( *reinterpret_cast(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" ); - __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed, "Broken internal structure" ); -#if TBB_USE_PERFORMANCE_WARNINGS - if( n == internal::empty_rehashed ) empty_buckets++; - else if( n->next ) overpopulated_buckets++; -#endif -#if TBB_USE_ASSERT - for( ; is_valid(n); n = n->next ) { - hashcode_t h = my_hash_compare.hash( static_cast(n)->item.first ) & mask; - __TBB_ASSERT( h == b, "hash() function changed for key in table or internal error" ); - } -#endif - } -#endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS -#if TBB_USE_PERFORMANCE_WARNINGS - if( buckets > current_size) empty_buckets -= buckets - current_size; - else overpopulated_buckets -= current_size - buckets; // TODO: load_factor? - if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) { - tbb::internal::runtime_warning( - "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d", - typeid(*this).name(), current_size, empty_buckets, overpopulated_buckets ); - reported = true; - } -#endif -} - -template -void concurrent_hash_map::clear() { - hashcode_t m = my_mask; - __TBB_ASSERT((m&(m+1))==0, NULL); -#if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS -#if TBB_USE_PERFORMANCE_WARNINGS - int current_size = int(my_size), buckets = int(m)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics - static bool reported = false; -#endif - bucket *bp = 0; - // check consistency - for( segment_index_t b = 0; b <= m; b++ ) { - if( b & (b-2) ) ++bp; // not the beginning of a segment - else bp = get_bucket( b ); - node_base *n = bp->node_list; - __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" ); - __TBB_ASSERT( *reinterpret_cast(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during clear() execution" ); -#if TBB_USE_PERFORMANCE_WARNINGS - if( n == internal::empty_rehashed ) empty_buckets++; - else if( n == internal::rehash_req ) buckets--; - else if( n->next ) overpopulated_buckets++; -#endif -#if __TBB_EXTRA_DEBUG - for(; is_valid(n); n = n->next ) { - hashcode_t h = my_hash_compare.hash( static_cast(n)->item.first ); - h &= m; - __TBB_ASSERT( h == b || get_bucket(h)->node_list == internal::rehash_req, "hash() function changed for key in table or internal error" ); - } -#endif - } -#if TBB_USE_PERFORMANCE_WARNINGS - if( buckets > current_size) empty_buckets -= buckets - current_size; - else overpopulated_buckets -= current_size - buckets; // TODO: load_factor? - if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) { - tbb::internal::runtime_warning( - "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d", - typeid(*this).name(), current_size, empty_buckets, overpopulated_buckets ); - reported = true; - } -#endif -#endif//TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS - my_size = 0; - segment_index_t s = segment_index_of( m ); - __TBB_ASSERT( s+1 == pointers_per_table || !my_table[s+1], "wrong mask or concurrent grow" ); - cache_aligned_allocator alloc; - do { - __TBB_ASSERT( is_valid( my_table[s] ), "wrong mask or concurrent grow" ); - segment_ptr_t buckets_ptr = my_table[s]; - size_type sz = segment_size( s ? s : 1 ); - for( segment_index_t i = 0; i < sz; i++ ) - for( node_base *n = buckets_ptr[i].node_list; is_valid(n); n = buckets_ptr[i].node_list ) { - buckets_ptr[i].node_list = n->next; - delete_node( n ); - } - if( s >= first_block) // the first segment or the next - alloc.deallocate( buckets_ptr, sz ); - else if( s == embedded_block && embedded_block != first_block ) - alloc.deallocate( buckets_ptr, segment_size(first_block)-embedded_buckets ); - if( s >= embedded_block ) my_table[s] = 0; - } while(s-- > 0); - my_mask = embedded_buckets - 1; -} - -template -void concurrent_hash_map::internal_copy( const concurrent_hash_map& source ) { - reserve( source.my_size ); // TODO: load_factor? - hashcode_t mask = source.my_mask; - if( my_mask == mask ) { // optimized version - bucket *dst = 0, *src = 0; - bool rehash_required = false; - for( hashcode_t k = 0; k <= mask; k++ ) { - if( k & (k-2) ) ++dst,src++; // not the beginning of a segment - else { dst = get_bucket( k ); src = source.get_bucket( k ); } - __TBB_ASSERT( dst->node_list != internal::rehash_req, "Invalid bucket in destination table"); - node *n = static_cast( src->node_list ); - if( n == internal::rehash_req ) { // source is not rehashed, items are in previous buckets - rehash_required = true; - dst->node_list = internal::rehash_req; - } else for(; n; n = static_cast( n->next ) ) { - add_to_bucket( dst, new( my_allocator ) node(n->item.first, n->item.second) ); - ++my_size; // TODO: replace by non-atomic op - } - } - if( rehash_required ) rehash(); - } else internal_copy( source.begin(), source.end() ); -} - -template -template -void concurrent_hash_map::internal_copy(I first, I last) { - hashcode_t m = my_mask; - for(; first != last; ++first) { - hashcode_t h = my_hash_compare.hash( first->first ); - bucket *b = get_bucket( h & m ); - __TBB_ASSERT( b->node_list != internal::rehash_req, "Invalid bucket in destination table"); - node *n = new( my_allocator ) node(first->first, first->second); - add_to_bucket( b, n ); - ++my_size; // TODO: replace by non-atomic op - } -} - -} // namespace interface4 - -using interface4::concurrent_hash_map; - - -template -inline bool operator==(const concurrent_hash_map &a, const concurrent_hash_map &b) { - if(a.size() != b.size()) return false; - typename concurrent_hash_map::const_iterator i(a.begin()), i_end(a.end()); - typename concurrent_hash_map::const_iterator j, j_end(b.end()); - for(; i != i_end; ++i) { - j = b.equal_range(i->first).first; - if( j == j_end || !(i->second == j->second) ) return false; - } - return true; -} - -template -inline bool operator!=(const concurrent_hash_map &a, const concurrent_hash_map &b) -{ return !(a == b); } - -template -inline void swap(concurrent_hash_map &a, concurrent_hash_map &b) -{ a.swap( b ); } - -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning( pop ) -#endif // warning 4127 is back - -} // namespace tbb - -#endif /* __TBB_concurrent_hash_map_H */ diff --git a/tbb30_20100406oss/include/tbb/concurrent_queue.h b/tbb30_20100406oss/include/tbb/concurrent_queue.h deleted file mode 100644 index c137fb391..000000000 --- a/tbb30_20100406oss/include/tbb/concurrent_queue.h +++ /dev/null @@ -1,413 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_concurrent_queue_H -#define __TBB_concurrent_queue_H - -#include "_concurrent_queue_internal.h" - -namespace tbb { - -namespace strict_ppl { - -//! A high-performance thread-safe non-blocking concurrent queue. -/** Multiple threads may each push and pop concurrently. - Assignment construction is not allowed. - @ingroup containers */ -template > -class concurrent_queue: public internal::concurrent_queue_base_v3 { - template friend class internal::concurrent_queue_iterator; - - //! Allocator type - typedef typename A::template rebind::other page_allocator_type; - page_allocator_type my_allocator; - - //! Allocates a block of size n (bytes) - /*overide*/ virtual void *allocate_block( size_t n ) { - void *b = reinterpret_cast(my_allocator.allocate( n )); - if( !b ) - internal::throw_exception(internal::eid_bad_alloc); - return b; - } - - //! Deallocates block created by allocate_block. - /*override*/ virtual void deallocate_block( void *b, size_t n ) { - my_allocator.deallocate( reinterpret_cast(b), n ); - } - -public: - //! Element type in the queue. - typedef T value_type; - - //! Reference type - typedef T& reference; - - //! Const reference type - typedef const T& const_reference; - - //! Integral type for representing size of the queue. - typedef size_t size_type; - - //! Difference type for iterator - typedef ptrdiff_t difference_type; - - //! Allocator type - typedef A allocator_type; - - //! Construct empty queue - explicit concurrent_queue(const allocator_type& a = allocator_type()) : - my_allocator( a ) - { - } - - //! [begin,end) constructor - template - concurrent_queue( InputIterator begin, InputIterator end, const allocator_type& a = allocator_type()) : - my_allocator( a ) - { - for( ; begin != end; ++begin ) - internal_push(&*begin); - } - - //! Copy constructor - concurrent_queue( const concurrent_queue& src, const allocator_type& a = allocator_type()) : - internal::concurrent_queue_base_v3(), my_allocator( a ) - { - assign( src ); - } - - //! Destroy queue - ~concurrent_queue(); - - //! Enqueue an item at tail of queue. - void push( const T& source ) { - internal_push( &source ); - } - - //! Attempt to dequeue an item from head of queue. - /** Does not wait for item to become available. - Returns true if successful; false otherwise. */ - bool try_pop( T& result ) { - return internal_try_pop( &result ); - } - - //! Return the number of items in the queue; thread unsafe - size_type unsafe_size() const {return this->internal_size();} - - //! Equivalent to size()==0. - bool empty() const {return this->internal_empty();} - - //! Clear the queue. not thread-safe. - void clear() ; - - //! Return allocator object - allocator_type get_allocator() const { return this->my_allocator; } - - typedef internal::concurrent_queue_iterator iterator; - typedef internal::concurrent_queue_iterator const_iterator; - - //------------------------------------------------------------------------ - // The iterators are intended only for debugging. They are slow and not thread safe. - //------------------------------------------------------------------------ - iterator unsafe_begin() {return iterator(*this);} - iterator unsafe_end() {return iterator();} - const_iterator unsafe_begin() const {return const_iterator(*this);} - const_iterator unsafe_end() const {return const_iterator();} -} ; - -template -concurrent_queue::~concurrent_queue() { - clear(); - this->internal_finish_clear(); -} - -template -void concurrent_queue::clear() { - while( !empty() ) { - T value; - this->internal_try_pop(&value); - } -} - -} // namespace strict_ppl - -//! A high-performance thread-safe blocking concurrent bounded queue. -/** This is the pre-PPL TBB concurrent queue which supports boundedness and blocking semantics. - Note that method names agree with the PPL-style concurrent queue. - Multiple threads may each push and pop concurrently. - Assignment construction is not allowed. - @ingroup containers */ -template > -class concurrent_bounded_queue: public internal::concurrent_queue_base_v3 { - template friend class internal::concurrent_queue_iterator; - - //! Allocator type - typedef typename A::template rebind::other page_allocator_type; - page_allocator_type my_allocator; - - typedef typename concurrent_queue_base_v3::padded_page padded_page; - - //! Class used to ensure exception-safety of method "pop" - class destroyer: internal::no_copy { - T& my_value; - public: - destroyer( T& value ) : my_value(value) {} - ~destroyer() {my_value.~T();} - }; - - T& get_ref( page& p, size_t index ) { - __TBB_ASSERT( index(static_cast(&p))->last)[index]; - } - - /*override*/ virtual void copy_item( page& dst, size_t index, const void* src ) { - new( &get_ref(dst,index) ) T(*static_cast(src)); - } - - /*override*/ virtual void copy_page_item( page& dst, size_t dindex, const page& src, size_t sindex ) { - new( &get_ref(dst,dindex) ) T( get_ref( const_cast(src), sindex ) ); - } - - /*override*/ virtual void assign_and_destroy_item( void* dst, page& src, size_t index ) { - T& from = get_ref(src,index); - destroyer d(from); - *static_cast(dst) = from; - } - - /*overide*/ virtual page *allocate_page() { - size_t n = sizeof(padded_page) + (items_per_page-1)*sizeof(T); - page *p = reinterpret_cast(my_allocator.allocate( n )); - if( !p ) - internal::throw_exception(internal::eid_bad_alloc); - return p; - } - - /*override*/ virtual void deallocate_page( page *p ) { - size_t n = sizeof(padded_page) + items_per_page*sizeof(T); - my_allocator.deallocate( reinterpret_cast(p), n ); - } - -public: - //! Element type in the queue. - typedef T value_type; - - //! Allocator type - typedef A allocator_type; - - //! Reference type - typedef T& reference; - - //! Const reference type - typedef const T& const_reference; - - //! Integral type for representing size of the queue. - /** Notice that the size_type is a signed integral type. - This is because the size can be negative if there are pending pops without corresponding pushes. */ - typedef std::ptrdiff_t size_type; - - //! Difference type for iterator - typedef std::ptrdiff_t difference_type; - - //! Construct empty queue - explicit concurrent_bounded_queue(const allocator_type& a = allocator_type()) : - concurrent_queue_base_v3( sizeof(T) ), my_allocator( a ) - { - } - - //! Copy constructor - concurrent_bounded_queue( const concurrent_bounded_queue& src, const allocator_type& a = allocator_type()) : - concurrent_queue_base_v3( sizeof(T) ), my_allocator( a ) - { - assign( src ); - } - - //! [begin,end) constructor - template - concurrent_bounded_queue( InputIterator begin, InputIterator end, const allocator_type& a = allocator_type()) : - concurrent_queue_base_v3( sizeof(T) ), my_allocator( a ) - { - for( ; begin != end; ++begin ) - internal_push_if_not_full(&*begin); - } - - //! Destroy queue - ~concurrent_bounded_queue(); - - //! Enqueue an item at tail of queue. - void push( const T& source ) { - internal_push( &source ); - } - - //! Dequeue item from head of queue. - /** Block until an item becomes available, and then dequeue it. */ - void pop( T& destination ) { - internal_pop( &destination ); - } - - //! Enqueue an item at tail of queue if queue is not already full. - /** Does not wait for queue to become not full. - Returns true if item is pushed; false if queue was already full. */ - bool try_push( const T& source ) { - return internal_push_if_not_full( &source ); - } - - //! Attempt to dequeue an item from head of queue. - /** Does not wait for item to become available. - Returns true if successful; false otherwise. */ - bool try_pop( T& destination ) { - return internal_pop_if_present( &destination ); - } - - //! Return number of pushes minus number of pops. - /** Note that the result can be negative if there are pops waiting for the - corresponding pushes. The result can also exceed capacity() if there - are push operations in flight. */ - size_type size() const {return internal_size();} - - //! Equivalent to size()<=0. - bool empty() const {return internal_empty();} - - //! Maximum number of allowed elements - size_type capacity() const { - return my_capacity; - } - - //! Set the capacity - /** Setting the capacity to 0 causes subsequent try_push operations to always fail, - and subsequent push operations to block forever. */ - void set_capacity( size_type new_capacity ) { - internal_set_capacity( new_capacity, sizeof(T) ); - } - - //! return allocator object - allocator_type get_allocator() const { return this->my_allocator; } - - //! clear the queue. not thread-safe. - void clear() ; - - typedef internal::concurrent_queue_iterator iterator; - typedef internal::concurrent_queue_iterator const_iterator; - - //------------------------------------------------------------------------ - // The iterators are intended only for debugging. They are slow and not thread safe. - //------------------------------------------------------------------------ - iterator unsafe_begin() {return iterator(*this);} - iterator unsafe_end() {return iterator();} - const_iterator unsafe_begin() const {return const_iterator(*this);} - const_iterator unsafe_end() const {return const_iterator();} - -}; - -template -concurrent_bounded_queue::~concurrent_bounded_queue() { - clear(); - internal_finish_clear(); -} - -template -void concurrent_bounded_queue::clear() { - while( !empty() ) { - T value; - internal_pop_if_present(&value); - } -} - -namespace deprecated { - -//! A high-performance thread-safe blocking concurrent bounded queue. -/** This is the pre-PPL TBB concurrent queue which support boundedness and blocking semantics. - Note that method names agree with the PPL-style concurrent queue. - Multiple threads may each push and pop concurrently. - Assignment construction is not allowed. - @ingroup containers */ -template > -class concurrent_queue: public concurrent_bounded_queue { -#if !__TBB_TEMPLATE_FRIENDS_BROKEN - template friend class internal::concurrent_queue_iterator; -#endif - -public: - //! Construct empty queue - explicit concurrent_queue(const A& a = A()) : - concurrent_bounded_queue( a ) - { - } - - //! Copy constructor - concurrent_queue( const concurrent_queue& src, const A& a = A()) : - concurrent_bounded_queue( src, a ) - { - } - - //! [begin,end) constructor - template - concurrent_queue( InputIterator b /*begin*/, InputIterator e /*end*/, const A& a = A()) : - concurrent_bounded_queue( b, e, a ) - { - } - - //! Enqueue an item at tail of queue if queue is not already full. - /** Does not wait for queue to become not full. - Returns true if item is pushed; false if queue was already full. */ - bool push_if_not_full( const T& source ) { - return try_push( source ); - } - - //! Attempt to dequeue an item from head of queue. - /** Does not wait for item to become available. - Returns true if successful; false otherwise. - @deprecated Use try_pop() - */ - bool pop_if_present( T& destination ) { - return try_pop( destination ); - } - - typedef typename concurrent_bounded_queue::iterator iterator; - typedef typename concurrent_bounded_queue::const_iterator const_iterator; - // - //------------------------------------------------------------------------ - // The iterators are intended only for debugging. They are slow and not thread safe. - //------------------------------------------------------------------------ - iterator begin() {return this->unsafe_begin();} - iterator end() {return this->unsafe_end();} - const_iterator begin() const {return this->unsafe_begin();} - const_iterator end() const {return this->unsafe_end();} -}; - -} - - -#if TBB_DEPRECATED -using deprecated::concurrent_queue; -#else -using strict_ppl::concurrent_queue; -#endif - -} // namespace tbb - -#endif /* __TBB_concurrent_queue_H */ diff --git a/tbb30_20100406oss/include/tbb/concurrent_unordered_map.h b/tbb30_20100406oss/include/tbb/concurrent_unordered_map.h deleted file mode 100644 index 252196148..000000000 --- a/tbb30_20100406oss/include/tbb/concurrent_unordered_map.h +++ /dev/null @@ -1,241 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -/* Container implementations in this header are based on PPL implementations - provided by Microsoft. */ - -#ifndef __TBB_concurrent_unordered_map_H -#define __TBB_concurrent_unordered_map_H - -#include "_concurrent_unordered_internal.h" - -namespace tbb -{ - -// Template class for hash compare -template -class tbb_hash -{ -public: - tbb_hash() {} - - size_t operator()(const Key& key) const - { - return tbb_hasher(key); - } -}; - -namespace interface5 { - -// Template class for hash map traits -template -class concurrent_unordered_map_traits -{ -protected: - typedef std::pair value_type; - typedef Key key_type; - typedef Hash_compare hash_compare; - typedef typename Allocator::template rebind::other allocator_type; - enum { allow_multimapping = Allow_multimapping }; - - concurrent_unordered_map_traits() : my_hash_compare() {} - concurrent_unordered_map_traits(const hash_compare& hc) : my_hash_compare(hc) {} - - class value_compare : public std::binary_function - { - friend class concurrent_unordered_map_traits; - - public: - bool operator()(const value_type& left, const value_type& right) const - { - return (my_hash_compare(left.first, right.first)); - } - - value_compare(const hash_compare& comparator) : my_hash_compare(comparator) {} - - protected: - hash_compare my_hash_compare; // the comparator predicate for keys - }; - - template - static const Key& get_key(const std::pair& value) { - return (value.first); - } - - hash_compare my_hash_compare; // the comparator predicate for keys -}; - -template , typename Key_equality = std::equal_to, typename Allocator = tbb::tbb_allocator > > -class concurrent_unordered_map : public internal::concurrent_unordered_base< concurrent_unordered_map_traits, Allocator, false> > -{ - // Base type definitions - typedef internal::hash_compare hash_compare; - typedef internal::concurrent_unordered_base< concurrent_unordered_map_traits > base_type; - typedef concurrent_unordered_map_traits, Allocator, false> traits_type; - using traits_type::my_hash_compare; -#if __TBB_EXTRA_DEBUG -public: -#endif - using traits_type::allow_multimapping; -public: - using base_type::end; - using base_type::find; - using base_type::insert; - - // Type definitions - typedef Key key_type; - typedef typename base_type::value_type value_type; - typedef T mapped_type; - typedef Hasher hasher; - typedef Key_equality key_equal; - typedef hash_compare key_compare; - - typedef typename base_type::allocator_type allocator_type; - typedef typename base_type::pointer pointer; - typedef typename base_type::const_pointer const_pointer; - typedef typename base_type::reference reference; - typedef typename base_type::const_reference const_reference; - - typedef typename base_type::size_type size_type; - typedef typename base_type::difference_type difference_type; - - typedef typename base_type::iterator iterator; - typedef typename base_type::const_iterator const_iterator; - typedef typename base_type::iterator local_iterator; - typedef typename base_type::const_iterator const_local_iterator; - - // Construction/destruction/copying - explicit concurrent_unordered_map(size_type n_of_buckets = 8, const hasher& a_hasher = hasher(), - const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type()) - : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) - { - } - - concurrent_unordered_map(const Allocator& a) : base_type(8, key_compare(), a) - { - } - - template - concurrent_unordered_map(Iterator first, Iterator last, size_type n_of_buckets = 8, const hasher& a_hasher = hasher(), - const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type()) - : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) - { - for (; first != last; ++first) - base_type::insert(*first); - } - - concurrent_unordered_map(const concurrent_unordered_map& table) : base_type(table) - { - } - - concurrent_unordered_map(const concurrent_unordered_map& table, const Allocator& a) - : base_type(table, a) - { - } - - concurrent_unordered_map& operator=(const concurrent_unordered_map& table) - { - base_type::operator=(table); - return (*this); - } - - iterator unsafe_erase(const_iterator where) - { - return base_type::unsafe_erase(where); - } - - size_type unsafe_erase(const key_type& key) - { - return base_type::unsafe_erase(key); - } - - iterator unsafe_erase(const_iterator first, const_iterator last) - { - return base_type::unsafe_erase(first, last); - } - - void swap(concurrent_unordered_map& table) - { - base_type::swap(table); - } - - // Observers - hasher hash_function() const - { - return my_hash_compare.my_hash_object; - } - - key_equal key_eq() const - { - return my_hash_compare.my_key_compare_object; - } - - mapped_type& operator[](const key_type& key) - { - iterator where = find(key); - - if (where == end()) - { - where = insert(std::pair(key, mapped_type())).first; - } - - return ((*where).second); - } - - mapped_type& at(const key_type& key) - { - iterator where = find(key); - - if (where == end()) - { - tbb::internal::throw_exception(tbb::internal::eid_invalid_key); - } - - return ((*where).second); - } - - const mapped_type& at(const key_type& key) const - { - const_iterator where = find(key); - - if (where == end()) - { - tbb::internal::throw_exception(tbb::internal::eid_invalid_key); - } - - return ((*where).second); - } -}; - -} // namespace interface5 - -using interface5::concurrent_unordered_map; - -} // namespace tbb - -#endif// __TBB_concurrent_unordered_map_H diff --git a/tbb30_20100406oss/include/tbb/concurrent_vector.h b/tbb30_20100406oss/include/tbb/concurrent_vector.h deleted file mode 100644 index 8106eb494..000000000 --- a/tbb30_20100406oss/include/tbb/concurrent_vector.h +++ /dev/null @@ -1,1060 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_concurrent_vector_H -#define __TBB_concurrent_vector_H - -#include "tbb_stddef.h" -#include "tbb_exception.h" -#include "atomic.h" -#include "cache_aligned_allocator.h" -#include "blocked_range.h" -#include "tbb_machine.h" -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#if _MSC_VER==1500 && !__INTEL_COMPILER - // VS2008/VC9 seems to have an issue; limits pull in math.h - #pragma warning( push ) - #pragma warning( disable: 4985 ) -#endif -#include /* std::numeric_limits */ -#if _MSC_VER==1500 && !__INTEL_COMPILER - #pragma warning( pop ) -#endif - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) && defined(_Wp64) - // Workaround for overzealous compiler warnings in /Wp64 mode - #pragma warning (push) - #pragma warning (disable: 4267) -#endif - -namespace tbb { - -template > -class concurrent_vector; - -//! @cond INTERNAL -namespace internal { - - //! Bad allocation marker - static void *const vector_allocation_error_flag = reinterpret_cast(size_t(63)); - - //! Routine that loads pointer from location pointed to by src without any fence, without causing ITT to report a race. - void* __TBB_EXPORTED_FUNC itt_load_pointer_v3( const void* src ); - - //! Base class of concurrent vector implementation. - /** @ingroup containers */ - class concurrent_vector_base_v3 { - protected: - - // Basic types declarations - typedef size_t segment_index_t; - typedef size_t size_type; - - // Using enumerations due to Mac linking problems of static const variables - enum { - // Size constants - default_initial_segments = 1, // 2 initial items - //! Number of slots for segment's pointers inside the class - pointers_per_short_table = 3, // to fit into 8 words of entire structure - pointers_per_long_table = sizeof(segment_index_t) * 8 // one segment per bit - }; - - // Segment pointer. Can be zero-initialized - struct segment_t { - void* array; -#if TBB_USE_ASSERT - ~segment_t() { - __TBB_ASSERT( array <= internal::vector_allocation_error_flag, "should have been freed by clear" ); - } -#endif /* TBB_USE_ASSERT */ - }; - - // Data fields - - //! allocator function pointer - void* (*vector_allocator_ptr)(concurrent_vector_base_v3 &, size_t); - - //! count of segments in the first block - atomic my_first_block; - - //! Requested size of vector - atomic my_early_size; - - //! Pointer to the segments table - atomic my_segment; - - //! embedded storage of segment pointers - segment_t my_storage[pointers_per_short_table]; - - // Methods - - concurrent_vector_base_v3() { - my_early_size = 0; - my_first_block = 0; // here is not default_initial_segments - for( segment_index_t i = 0; i < pointers_per_short_table; i++) - my_storage[i].array = NULL; - my_segment = my_storage; - } - __TBB_EXPORTED_METHOD ~concurrent_vector_base_v3(); - - static segment_index_t segment_index_of( size_type index ) { - return segment_index_t( __TBB_Log2( index|1 ) ); - } - - static segment_index_t segment_base( segment_index_t k ) { - return (segment_index_t(1)< - class vector_iterator - { - //! concurrent_vector over which we are iterating. - Container* my_vector; - - //! Index into the vector - size_t my_index; - - //! Caches my_vector->internal_subscript(my_index) - /** NULL if cached value is not available */ - mutable Value* my_item; - - template - friend vector_iterator operator+( ptrdiff_t offset, const vector_iterator& v ); - - template - friend bool operator==( const vector_iterator& i, const vector_iterator& j ); - - template - friend bool operator<( const vector_iterator& i, const vector_iterator& j ); - - template - friend ptrdiff_t operator-( const vector_iterator& i, const vector_iterator& j ); - - template - friend class internal::vector_iterator; - -#if !defined(_MSC_VER) || defined(__INTEL_COMPILER) - template - friend class tbb::concurrent_vector; -#else -public: // workaround for MSVC -#endif - - vector_iterator( const Container& vector, size_t index, void *ptr = 0 ) : - my_vector(const_cast(&vector)), - my_index(index), - my_item(static_cast(ptr)) - {} - - public: - //! Default constructor - vector_iterator() : my_vector(NULL), my_index(~size_t(0)), my_item(NULL) {} - - vector_iterator( const vector_iterator& other ) : - my_vector(other.my_vector), - my_index(other.my_index), - my_item(other.my_item) - {} - - vector_iterator operator+( ptrdiff_t offset ) const { - return vector_iterator( *my_vector, my_index+offset ); - } - vector_iterator &operator+=( ptrdiff_t offset ) { - my_index+=offset; - my_item = NULL; - return *this; - } - vector_iterator operator-( ptrdiff_t offset ) const { - return vector_iterator( *my_vector, my_index-offset ); - } - vector_iterator &operator-=( ptrdiff_t offset ) { - my_index-=offset; - my_item = NULL; - return *this; - } - Value& operator*() const { - Value* item = my_item; - if( !item ) { - item = my_item = &my_vector->internal_subscript(my_index); - } - __TBB_ASSERT( item==&my_vector->internal_subscript(my_index), "corrupt cache" ); - return *item; - } - Value& operator[]( ptrdiff_t k ) const { - return my_vector->internal_subscript(my_index+k); - } - Value* operator->() const {return &operator*();} - - //! Pre increment - vector_iterator& operator++() { - size_t k = ++my_index; - if( my_item ) { - // Following test uses 2's-complement wizardry - if( (k& (k-2))==0 ) { - // k is a power of two that is at least k-2 - my_item= NULL; - } else { - ++my_item; - } - } - return *this; - } - - //! Pre decrement - vector_iterator& operator--() { - __TBB_ASSERT( my_index>0, "operator--() applied to iterator already at beginning of concurrent_vector" ); - size_t k = my_index--; - if( my_item ) { - // Following test uses 2's-complement wizardry - if( (k& (k-2))==0 ) { - // k is a power of two that is at least k-2 - my_item= NULL; - } else { - --my_item; - } - } - return *this; - } - - //! Post increment - vector_iterator operator++(int) { - vector_iterator result = *this; - operator++(); - return result; - } - - //! Post decrement - vector_iterator operator--(int) { - vector_iterator result = *this; - operator--(); - return result; - } - - // STL support - - typedef ptrdiff_t difference_type; - typedef Value value_type; - typedef Value* pointer; - typedef Value& reference; - typedef std::random_access_iterator_tag iterator_category; - }; - - template - vector_iterator operator+( ptrdiff_t offset, const vector_iterator& v ) { - return vector_iterator( *v.my_vector, v.my_index+offset ); - } - - template - bool operator==( const vector_iterator& i, const vector_iterator& j ) { - return i.my_index==j.my_index && i.my_vector == j.my_vector; - } - - template - bool operator!=( const vector_iterator& i, const vector_iterator& j ) { - return !(i==j); - } - - template - bool operator<( const vector_iterator& i, const vector_iterator& j ) { - return i.my_index - bool operator>( const vector_iterator& i, const vector_iterator& j ) { - return j - bool operator>=( const vector_iterator& i, const vector_iterator& j ) { - return !(i - bool operator<=( const vector_iterator& i, const vector_iterator& j ) { - return !(j - ptrdiff_t operator-( const vector_iterator& i, const vector_iterator& j ) { - return ptrdiff_t(i.my_index)-ptrdiff_t(j.my_index); - } - - template - class allocator_base { - public: - typedef typename A::template - rebind::other allocator_type; - allocator_type my_allocator; - - allocator_base(const allocator_type &a = allocator_type() ) : my_allocator(a) {} - }; - -} // namespace internal -//! @endcond - -//! Concurrent vector container -/** concurrent_vector is a container having the following main properties: - - It provides random indexed access to its elements. The index of the first element is 0. - - It ensures safe concurrent growing its size (different threads can safely append new elements). - - Adding new elements does not invalidate existing iterators and does not change indices of existing items. - -@par Compatibility - The class meets all Container Requirements and Reversible Container Requirements from - C++ Standard (See ISO/IEC 14882:2003(E), clause 23.1). But it doesn't meet - Sequence Requirements due to absence of insert() and erase() methods. - -@par Exception Safety - Methods working with memory allocation and/or new elements construction can throw an - exception if allocator fails to allocate memory or element's default constructor throws one. - Concurrent vector's element of type T must conform to the following requirements: - - Throwing an exception is forbidden for destructor of T. - - Default constructor of T must not throw an exception OR its non-virtual destructor must safely work when its object memory is zero-initialized. - . - Otherwise, the program's behavior is undefined. -@par - If an exception happens inside growth or assignment operation, an instance of the vector becomes invalid unless it is stated otherwise in the method documentation. - Invalid state means: - - There are no guaranties that all items were initialized by a constructor. The rest of items is zero-filled, including item where exception happens. - - An invalid vector instance cannot be repaired; it is unable to grow anymore. - - Size and capacity reported by the vector are incorrect, and calculated as if the failed operation were successful. - - Attempt to access not allocated elements using operator[] or iterators results in access violation or segmentation fault exception, and in case of using at() method a C++ exception is thrown. - . - If a concurrent grow operation successfully completes, all the elements it has added to the vector will remain valid and accessible even if one of subsequent grow operations fails. - -@par Fragmentation - Unlike an STL vector, a concurrent_vector does not move existing elements if it needs - to allocate more memory. The container is divided into a series of contiguous arrays of - elements. The first reservation, growth, or assignment operation determines the size of - the first array. Using small number of elements as initial size incurs fragmentation that - may increase element access time. Internal layout can be optimized by method compact() that - merges several smaller arrays into one solid. - -@par Changes since TBB 2.1 - - Fixed guarantees of concurrent_vector::size() and grow_to_at_least() methods to assure elements are allocated. - - Methods end()/rbegin()/back() are partly thread-safe since they use size() to get the end of vector - - Added resize() methods (not thread-safe) - - Added cbegin/cend/crbegin/crend methods - - Changed return type of methods grow* and push_back to iterator - -@par Changes since TBB 2.0 - - Implemented exception-safety guaranties - - Added template argument for allocator - - Added allocator argument in constructors - - Faster index calculation - - First growth call specifies a number of segments to be merged in the first allocation. - - Fixed memory blow up for swarm of vector's instances of small size - - Added grow_by(size_type n, const_reference t) growth using copying constructor to init new items. - - Added STL-like constructors. - - Added operators ==, < and derivatives - - Added at() method, approved for using after an exception was thrown inside the vector - - Added get_allocator() method. - - Added assign() methods - - Added compact() method to defragment first segments - - Added swap() method - - range() defaults on grainsize = 1 supporting auto grainsize algorithms. - - @ingroup containers */ -template -class concurrent_vector: protected internal::allocator_base, - private internal::concurrent_vector_base { -private: - template - class generic_range_type: public blocked_range { - public: - typedef T value_type; - typedef T& reference; - typedef const T& const_reference; - typedef I iterator; - typedef ptrdiff_t difference_type; - generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range(begin_,end_,grainsize_) {} - template - generic_range_type( const generic_range_type& r) : blocked_range(r.begin(),r.end(),r.grainsize()) {} - generic_range_type( generic_range_type& r, split ) : blocked_range(r,split()) {} - }; - - template - friend class internal::vector_iterator; -public: - //------------------------------------------------------------------------ - // STL compatible types - //------------------------------------------------------------------------ - typedef internal::concurrent_vector_base_v3::size_type size_type; - typedef typename internal::allocator_base::allocator_type allocator_type; - - typedef T value_type; - typedef ptrdiff_t difference_type; - typedef T& reference; - typedef const T& const_reference; - typedef T *pointer; - typedef const T *const_pointer; - - typedef internal::vector_iterator iterator; - typedef internal::vector_iterator const_iterator; - -#if !defined(_MSC_VER) || _CPPLIB_VER>=300 - // Assume ISO standard definition of std::reverse_iterator - typedef std::reverse_iterator reverse_iterator; - typedef std::reverse_iterator const_reverse_iterator; -#else - // Use non-standard std::reverse_iterator - typedef std::reverse_iterator reverse_iterator; - typedef std::reverse_iterator const_reverse_iterator; -#endif /* defined(_MSC_VER) && (_MSC_VER<1300) */ - - //------------------------------------------------------------------------ - // Parallel algorithm support - //------------------------------------------------------------------------ - typedef generic_range_type range_type; - typedef generic_range_type const_range_type; - - //------------------------------------------------------------------------ - // STL compatible constructors & destructors - //------------------------------------------------------------------------ - - //! Construct empty vector. - explicit concurrent_vector(const allocator_type &a = allocator_type()) - : internal::allocator_base(a), internal::concurrent_vector_base() - { - vector_allocator_ptr = &internal_allocator; - } - - //! Copying constructor - concurrent_vector( const concurrent_vector& vector, const allocator_type& a = allocator_type() ) - : internal::allocator_base(a), internal::concurrent_vector_base() - { - vector_allocator_ptr = &internal_allocator; - __TBB_TRY { - internal_copy(vector, sizeof(T), ©_array); - } __TBB_CATCH(...) { - segment_t *table = my_segment; - internal_free_segments( reinterpret_cast(table), internal_clear(&destroy_array), my_first_block ); - __TBB_RETHROW(); - } - } - - //! Copying constructor for vector with different allocator type - template - concurrent_vector( const concurrent_vector& vector, const allocator_type& a = allocator_type() ) - : internal::allocator_base(a), internal::concurrent_vector_base() - { - vector_allocator_ptr = &internal_allocator; - __TBB_TRY { - internal_copy(vector.internal_vector_base(), sizeof(T), ©_array); - } __TBB_CATCH(...) { - segment_t *table = my_segment; - internal_free_segments( reinterpret_cast(table), internal_clear(&destroy_array), my_first_block ); - __TBB_RETHROW(); - } - } - - //! Construction with initial size specified by argument n - explicit concurrent_vector(size_type n) - { - vector_allocator_ptr = &internal_allocator; - __TBB_TRY { - internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array, &initialize_array ); - } __TBB_CATCH(...) { - segment_t *table = my_segment; - internal_free_segments( reinterpret_cast(table), internal_clear(&destroy_array), my_first_block ); - __TBB_RETHROW(); - } - } - - //! Construction with initial size specified by argument n, initialization by copying of t, and given allocator instance - concurrent_vector(size_type n, const_reference t, const allocator_type& a = allocator_type()) - : internal::allocator_base(a) - { - vector_allocator_ptr = &internal_allocator; - __TBB_TRY { - internal_resize( n, sizeof(T), max_size(), static_cast(&t), &destroy_array, &initialize_array_by ); - } __TBB_CATCH(...) { - segment_t *table = my_segment; - internal_free_segments( reinterpret_cast(table), internal_clear(&destroy_array), my_first_block ); - __TBB_RETHROW(); - } - } - - //! Construction with copying iteration range and given allocator instance - template - concurrent_vector(I first, I last, const allocator_type &a = allocator_type()) - : internal::allocator_base(a) - { - vector_allocator_ptr = &internal_allocator; - __TBB_TRY { - internal_assign_range(first, last, static_cast::is_integer> *>(0) ); - } __TBB_CATCH(...) { - segment_t *table = my_segment; - internal_free_segments( reinterpret_cast(table), internal_clear(&destroy_array), my_first_block ); - __TBB_RETHROW(); - } - } - - //! Assignment - concurrent_vector& operator=( const concurrent_vector& vector ) { - if( this != &vector ) - internal_assign(vector, sizeof(T), &destroy_array, &assign_array, ©_array); - return *this; - } - - //! Assignment for vector with different allocator type - template - concurrent_vector& operator=( const concurrent_vector& vector ) { - if( static_cast( this ) != static_cast( &vector ) ) - internal_assign(vector.internal_vector_base(), - sizeof(T), &destroy_array, &assign_array, ©_array); - return *this; - } - - //------------------------------------------------------------------------ - // Concurrent operations - //------------------------------------------------------------------------ - //! Grow by "delta" elements. -#if TBB_DEPRECATED - /** Returns old size. */ - size_type grow_by( size_type delta ) { - return delta ? internal_grow_by( delta, sizeof(T), &initialize_array, NULL ) : my_early_size; - } -#else - /** Returns iterator pointing to the first new element. */ - iterator grow_by( size_type delta ) { - return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array, NULL ) : my_early_size); - } -#endif - - //! Grow by "delta" elements using copying constuctor. -#if TBB_DEPRECATED - /** Returns old size. */ - size_type grow_by( size_type delta, const_reference t ) { - return delta ? internal_grow_by( delta, sizeof(T), &initialize_array_by, static_cast(&t) ) : my_early_size; - } -#else - /** Returns iterator pointing to the first new element. */ - iterator grow_by( size_type delta, const_reference t ) { - return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array_by, static_cast(&t) ) : my_early_size); - } -#endif - - //! Append minimal sequence of elements such that size()>=n. -#if TBB_DEPRECATED - /** The new elements are default constructed. Blocks until all elements in range [0..n) are allocated. - May return while other elements are being constructed by other threads. */ - void grow_to_at_least( size_type n ) { - if( n ) internal_grow_to_at_least_with_result( n, sizeof(T), &initialize_array, NULL ); - }; -#else - /** The new elements are default constructed. Blocks until all elements in range [0..n) are allocated. - May return while other elements are being constructed by other threads. - Returns iterator that points to beginning of appended sequence. - If no elements were appended, returns iterator pointing to nth element. */ - iterator grow_to_at_least( size_type n ) { - size_type m=0; - if( n ) { - m = internal_grow_to_at_least_with_result( n, sizeof(T), &initialize_array, NULL ); - if( m>n ) m=n; - } - return iterator(*this, m); - }; -#endif - - //! Push item -#if TBB_DEPRECATED - size_type push_back( const_reference item ) -#else - /** Returns iterator pointing to the new element. */ - iterator push_back( const_reference item ) -#endif - { - size_type k; - void *ptr = internal_push_back(sizeof(T),k); - internal_loop_guide loop(1, ptr); - loop.init(&item); -#if TBB_DEPRECATED - return k; -#else - return iterator(*this, k, ptr); -#endif - } - - //! Get reference to element at given index. - /** This method is thread-safe for concurrent reads, and also while growing the vector, - as long as the calling thread has checked that index<size(). */ - reference operator[]( size_type index ) { - return internal_subscript(index); - } - - //! Get const reference to element at given index. - const_reference operator[]( size_type index ) const { - return internal_subscript(index); - } - - //! Get reference to element at given index. Throws exceptions on errors. - reference at( size_type index ) { - return internal_subscript_with_exceptions(index); - } - - //! Get const reference to element at given index. Throws exceptions on errors. - const_reference at( size_type index ) const { - return internal_subscript_with_exceptions(index); - } - - //! Get range for iterating with parallel algorithms - range_type range( size_t grainsize = 1) { - return range_type( begin(), end(), grainsize ); - } - - //! Get const range for iterating with parallel algorithms - const_range_type range( size_t grainsize = 1 ) const { - return const_range_type( begin(), end(), grainsize ); - } - //------------------------------------------------------------------------ - // Capacity - //------------------------------------------------------------------------ - //! Return size of vector. It may include elements under construction - size_type size() const { - size_type sz = my_early_size, cp = internal_capacity(); - return cp < sz ? cp : sz; - } - - //! Return true if vector is not empty or has elements under construction at least. - bool empty() const {return !my_early_size;} - - //! Maximum size to which array can grow without allocating more memory. Concurrent allocations are not included in the value. - size_type capacity() const {return internal_capacity();} - - //! Allocate enough space to grow to size n without having to allocate more memory later. - /** Like most of the methods provided for STL compatibility, this method is *not* thread safe. - The capacity afterwards may be bigger than the requested reservation. */ - void reserve( size_type n ) { - if( n ) - internal_reserve(n, sizeof(T), max_size()); - } - - //! Resize the vector. Not thread-safe. - void resize( size_type n ) { - internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array, &initialize_array ); - } - - //! Resize the vector, copy t for new elements. Not thread-safe. - void resize( size_type n, const_reference t ) { - internal_resize( n, sizeof(T), max_size(), static_cast(&t), &destroy_array, &initialize_array_by ); - } - -#if TBB_DEPRECATED - //! An alias for shrink_to_fit() - void compact() {shrink_to_fit();} -#endif /* TBB_DEPRECATED */ - - //! Optimize memory usage and fragmentation. - void shrink_to_fit(); - - //! Upper bound on argument to reserve. - size_type max_size() const {return (~size_type(0))/sizeof(T);} - - //------------------------------------------------------------------------ - // STL support - //------------------------------------------------------------------------ - - //! start iterator - iterator begin() {return iterator(*this,0);} - //! end iterator - iterator end() {return iterator(*this,size());} - //! start const iterator - const_iterator begin() const {return const_iterator(*this,0);} - //! end const iterator - const_iterator end() const {return const_iterator(*this,size());} - //! start const iterator - const_iterator cbegin() const {return const_iterator(*this,0);} - //! end const iterator - const_iterator cend() const {return const_iterator(*this,size());} - //! reverse start iterator - reverse_iterator rbegin() {return reverse_iterator(end());} - //! reverse end iterator - reverse_iterator rend() {return reverse_iterator(begin());} - //! reverse start const iterator - const_reverse_iterator rbegin() const {return const_reverse_iterator(end());} - //! reverse end const iterator - const_reverse_iterator rend() const {return const_reverse_iterator(begin());} - //! reverse start const iterator - const_reverse_iterator crbegin() const {return const_reverse_iterator(end());} - //! reverse end const iterator - const_reverse_iterator crend() const {return const_reverse_iterator(begin());} - //! the first item - reference front() { - __TBB_ASSERT( size()>0, NULL); - return static_cast(my_segment[0].array)[0]; - } - //! the first item const - const_reference front() const { - __TBB_ASSERT( size()>0, NULL); - return static_cast(my_segment[0].array)[0]; - } - //! the last item - reference back() { - __TBB_ASSERT( size()>0, NULL); - return internal_subscript( size()-1 ); - } - //! the last item const - const_reference back() const { - __TBB_ASSERT( size()>0, NULL); - return internal_subscript( size()-1 ); - } - //! return allocator object - allocator_type get_allocator() const { return this->my_allocator; } - - //! assign n items by copying t item - void assign(size_type n, const_reference t) { - clear(); - internal_resize( n, sizeof(T), max_size(), static_cast(&t), &destroy_array, &initialize_array_by ); - } - - //! assign range [first, last) - template - void assign(I first, I last) { - clear(); internal_assign_range( first, last, static_cast::is_integer> *>(0) ); - } - - //! swap two instances - void swap(concurrent_vector &vector) { - if( this != &vector ) { - concurrent_vector_base_v3::internal_swap(static_cast(vector)); - std::swap(this->my_allocator, vector.my_allocator); - } - } - - //! Clear container while keeping memory allocated. - /** To free up the memory, use in conjunction with method compact(). Not thread safe **/ - void clear() { - internal_clear(&destroy_array); - } - - //! Clear and destroy vector. - ~concurrent_vector() { - segment_t *table = my_segment; - internal_free_segments( reinterpret_cast(table), internal_clear(&destroy_array), my_first_block ); - // base class destructor call should be then - } - - const internal::concurrent_vector_base_v3 &internal_vector_base() const { return *this; } -private: - //! Allocate k items - static void *internal_allocator(internal::concurrent_vector_base_v3 &vb, size_t k) { - return static_cast&>(vb).my_allocator.allocate(k); - } - //! Free k segments from table - void internal_free_segments(void *table[], segment_index_t k, segment_index_t first_block); - - //! Get reference to element at given index. - T& internal_subscript( size_type index ) const; - - //! Get reference to element at given index with errors checks - T& internal_subscript_with_exceptions( size_type index ) const; - - //! assign n items by copying t - void internal_assign_n(size_type n, const_pointer p) { - internal_resize( n, sizeof(T), max_size(), static_cast(p), &destroy_array, p? &initialize_array_by : &initialize_array ); - } - - //! helper class - template class is_integer_tag; - - //! assign integer items by copying when arguments are treated as iterators. See C++ Standard 2003 23.1.1p9 - template - void internal_assign_range(I first, I last, is_integer_tag *) { - internal_assign_n(static_cast(first), &static_cast(last)); - } - //! inline proxy assign by iterators - template - void internal_assign_range(I first, I last, is_integer_tag *) { - internal_assign_iterators(first, last); - } - //! assign by iterators - template - void internal_assign_iterators(I first, I last); - - //! Construct n instances of T, starting at "begin". - static void __TBB_EXPORTED_FUNC initialize_array( void* begin, const void*, size_type n ); - - //! Construct n instances of T, starting at "begin". - static void __TBB_EXPORTED_FUNC initialize_array_by( void* begin, const void* src, size_type n ); - - //! Construct n instances of T, starting at "begin". - static void __TBB_EXPORTED_FUNC copy_array( void* dst, const void* src, size_type n ); - - //! Assign n instances of T, starting at "begin". - static void __TBB_EXPORTED_FUNC assign_array( void* dst, const void* src, size_type n ); - - //! Destroy n instances of T, starting at "begin". - static void __TBB_EXPORTED_FUNC destroy_array( void* begin, size_type n ); - - //! Exception-aware helper class for filling a segment by exception-danger operators of user class - class internal_loop_guide : internal::no_copy { - public: - const pointer array; - const size_type n; - size_type i; - internal_loop_guide(size_type ntrials, void *ptr) - : array(static_cast(ptr)), n(ntrials), i(0) {} - void init() { for(; i < n; ++i) new( &array[i] ) T(); } - void init(const void *src) { for(; i < n; ++i) new( &array[i] ) T(*static_cast(src)); } - void copy(const void *src) { for(; i < n; ++i) new( &array[i] ) T(static_cast(src)[i]); } - void assign(const void *src) { for(; i < n; ++i) array[i] = static_cast(src)[i]; } - template void iterate(I &src) { for(; i < n; ++i, ++src) new( &array[i] ) T( *src ); } - ~internal_loop_guide() { - if(i < n) // if exception raised, do zerroing on the rest of items - std::memset(array+i, 0, (n-i)*sizeof(value_type)); - } - }; -}; - -template -void concurrent_vector::shrink_to_fit() { - internal_segments_table old; - __TBB_TRY { - if( internal_compact( sizeof(T), &old, &destroy_array, ©_array ) ) - internal_free_segments( old.table, pointers_per_long_table, old.first_block ); // free joined and unnecessary segments - } __TBB_CATCH(...) { - if( old.first_block ) // free segment allocated for compacting. Only for support of exceptions in ctor of user T[ype] - internal_free_segments( old.table, 1, old.first_block ); - __TBB_RETHROW(); - } -} - -template -void concurrent_vector::internal_free_segments(void *table[], segment_index_t k, segment_index_t first_block) { - // Free the arrays - while( k > first_block ) { - --k; - T* array = static_cast(table[k]); - table[k] = NULL; - if( array > internal::vector_allocation_error_flag ) // check for correct segment pointer - this->my_allocator.deallocate( array, segment_size(k) ); - } - T* array = static_cast(table[0]); - if( array > internal::vector_allocation_error_flag ) { - __TBB_ASSERT( first_block > 0, NULL ); - while(k > 0) table[--k] = NULL; - this->my_allocator.deallocate( array, segment_size(first_block) ); - } -} - -template -T& concurrent_vector::internal_subscript( size_type index ) const { - __TBB_ASSERT( index < my_early_size, "index out of bounds" ); - size_type j = index; - segment_index_t k = segment_base_index_of( j ); - __TBB_ASSERT( (segment_t*)my_segment != my_storage || k < pointers_per_short_table, "index is being allocated" ); - // no need in __TBB_load_with_acquire since thread works in own space or gets -#if TBB_USE_THREADING_TOOLS - T* array = static_cast( tbb::internal::itt_load_pointer_v3(&my_segment[k].array)); -#else - T* array = static_cast(my_segment[k].array); -#endif /* TBB_USE_THREADING_TOOLS */ - __TBB_ASSERT( array != internal::vector_allocation_error_flag, "the instance is broken by bad allocation. Use at() instead" ); - __TBB_ASSERT( array, "index is being allocated" ); - return array[j]; -} - -template -T& concurrent_vector::internal_subscript_with_exceptions( size_type index ) const { - if( index >= my_early_size ) - internal::throw_exception(internal::eid_out_of_range); // throw std::out_of_range - size_type j = index; - segment_index_t k = segment_base_index_of( j ); - if( (segment_t*)my_segment == my_storage && k >= pointers_per_short_table ) - internal::throw_exception(internal::eid_segment_range_error); // throw std::range_error - void *array = my_segment[k].array; // no need in __TBB_load_with_acquire - if( array <= internal::vector_allocation_error_flag ) // check for correct segment pointer - internal::throw_exception(internal::eid_index_range_error); // throw std::range_error - return static_cast(array)[j]; -} - -template template -void concurrent_vector::internal_assign_iterators(I first, I last) { - __TBB_ASSERT(my_early_size == 0, NULL); - size_type n = std::distance(first, last); - if( !n ) return; - internal_reserve(n, sizeof(T), max_size()); - my_early_size = n; - segment_index_t k = 0; - size_type sz = segment_size( my_first_block ); - while( sz < n ) { - internal_loop_guide loop(sz, my_segment[k].array); - loop.iterate(first); - n -= sz; - if( !k ) k = my_first_block; - else { ++k; sz <<= 1; } - } - internal_loop_guide loop(n, my_segment[k].array); - loop.iterate(first); -} - -template -void concurrent_vector::initialize_array( void* begin, const void *, size_type n ) { - internal_loop_guide loop(n, begin); loop.init(); -} - -template -void concurrent_vector::initialize_array_by( void* begin, const void *src, size_type n ) { - internal_loop_guide loop(n, begin); loop.init(src); -} - -template -void concurrent_vector::copy_array( void* dst, const void* src, size_type n ) { - internal_loop_guide loop(n, dst); loop.copy(src); -} - -template -void concurrent_vector::assign_array( void* dst, const void* src, size_type n ) { - internal_loop_guide loop(n, dst); loop.assign(src); -} - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - // Workaround for overzealous compiler warning - #pragma warning (push) - #pragma warning (disable: 4189) -#endif -template -void concurrent_vector::destroy_array( void* begin, size_type n ) { - T* array = static_cast(begin); - for( size_type j=n; j>0; --j ) - array[j-1].~T(); // destructors are supposed to not throw any exceptions -} -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - #pragma warning (pop) -#endif // warning 4189 is back - -// concurrent_vector's template functions -template -inline bool operator==(const concurrent_vector &a, const concurrent_vector &b) { - // Simply: return a.size() == b.size() && std::equal(a.begin(), a.end(), b.begin()); - if(a.size() != b.size()) return false; - typename concurrent_vector::const_iterator i(a.begin()); - typename concurrent_vector::const_iterator j(b.begin()); - for(; i != a.end(); ++i, ++j) - if( !(*i == *j) ) return false; - return true; -} - -template -inline bool operator!=(const concurrent_vector &a, const concurrent_vector &b) -{ return !(a == b); } - -template -inline bool operator<(const concurrent_vector &a, const concurrent_vector &b) -{ return (std::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end())); } - -template -inline bool operator>(const concurrent_vector &a, const concurrent_vector &b) -{ return b < a; } - -template -inline bool operator<=(const concurrent_vector &a, const concurrent_vector &b) -{ return !(b < a); } - -template -inline bool operator>=(const concurrent_vector &a, const concurrent_vector &b) -{ return !(a < b); } - -template -inline void swap(concurrent_vector &a, concurrent_vector &b) -{ a.swap( b ); } - -} // namespace tbb - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) && defined(_Wp64) - #pragma warning (pop) -#endif // warning 4267 is back - -#endif /* __TBB_concurrent_vector_H */ diff --git a/tbb30_20100406oss/include/tbb/critical_section.h b/tbb30_20100406oss/include/tbb/critical_section.h deleted file mode 100644 index 6926770d0..000000000 --- a/tbb30_20100406oss/include/tbb/critical_section.h +++ /dev/null @@ -1,141 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef _TBB_CRITICAL_SECTION_H_ -#define _TBB_CRITICAL_SECTION_H_ - -#if _WIN32||_WIN64 -#include -#else -#include -#include -#endif // _WIN32||WIN64 - -#include "tbb_stddef.h" -#include "tbb_thread.h" -#include "tbb_exception.h" - -#include "tbb_profiling.h" - -namespace tbb { - - namespace internal { -class critical_section_v4 : internal::no_copy { -#if _WIN32||_WIN64 - CRITICAL_SECTION my_impl; -#else - pthread_mutex_t my_impl; -#endif - tbb_thread::id my_tid; -public: - - void __TBB_EXPORTED_METHOD internal_construct(); - - critical_section_v4() { -#if _WIN32||_WIN64 - InitializeCriticalSection(&my_impl); -#else - pthread_mutex_init(&my_impl, NULL); -#endif - internal_construct(); - } - - ~critical_section_v4() { - __TBB_ASSERT(my_tid == tbb_thread::id(), "Destroying a still-held critical section"); -#if _WIN32||_WIN64 - DeleteCriticalSection(&my_impl); -#else - pthread_mutex_destroy(&my_impl); -#endif - } - - class scoped_lock : internal::no_copy { - private: - critical_section_v4 &my_crit; - public: - scoped_lock( critical_section_v4& lock_me) :my_crit(lock_me) { - my_crit.lock(); - } - - ~scoped_lock() { - my_crit.unlock(); - } - }; - - void lock() { - tbb_thread::id local_tid = this_tbb_thread::get_id(); - if(local_tid == my_tid) throw_exception( eid_improper_lock ); -#if _WIN32||_WIN64 - EnterCriticalSection( &my_impl ); -#else - int rval = pthread_mutex_lock(&my_impl); - __TBB_ASSERT_EX(!rval, "critical_section::lock: pthread_mutex_lock failed"); -#endif - __TBB_ASSERT(my_tid == tbb_thread::id(), NULL); - my_tid = local_tid; - } - - bool try_lock() { - bool gotlock; - tbb_thread::id local_tid = this_tbb_thread::get_id(); - if(local_tid == my_tid) return false; -#if _WIN32||_WIN64 - gotlock = TryEnterCriticalSection( &my_impl ) != 0; -#else - int rval = pthread_mutex_trylock(&my_impl); - // valid returns are 0 (locked) and [EBUSY] - __TBB_ASSERT(rval == 0 || rval == EBUSY, "critical_section::trylock: pthread_mutex_trylock failed"); - gotlock = rval == 0; -#endif - if(gotlock) { - my_tid = local_tid; - } - return gotlock; - } - - void unlock() { - __TBB_ASSERT(this_tbb_thread::get_id() == my_tid, "thread unlocking critical_section is not thread that locked it"); - my_tid = tbb_thread::id(); -#if _WIN32||_WIN64 - LeaveCriticalSection( &my_impl ); -#else - int rval = pthread_mutex_unlock(&my_impl); - __TBB_ASSERT_EX(!rval, "critical_section::unlock: pthread_mutex_unlock failed"); -#endif - } - - static const bool is_rw_mutex = false; - static const bool is_recursive_mutex = false; - static const bool is_fair_mutex = true; -}; // critical_section_v4 -} // namespace internal -typedef internal::critical_section_v4 critical_section; - -__TBB_DEFINE_PROFILING_SET_NAME(critical_section) -} // namespace tbb -#endif // _TBB_CRITICAL_SECTION_H_ diff --git a/tbb30_20100406oss/include/tbb/enumerable_thread_specific.h b/tbb30_20100406oss/include/tbb/enumerable_thread_specific.h deleted file mode 100644 index 668c35312..000000000 --- a/tbb30_20100406oss/include/tbb/enumerable_thread_specific.h +++ /dev/null @@ -1,1051 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_enumerable_thread_specific_H -#define __TBB_enumerable_thread_specific_H - -#include "concurrent_vector.h" -#include "tbb_thread.h" -#include "cache_aligned_allocator.h" -#if __SUNPRO_CC -#include // for memcpy -#endif - -#if _WIN32||_WIN64 -#include -#else -#include -#endif - -namespace tbb { - -//! enum for selecting between single key and key-per-instance versions -enum ets_key_usage_type { ets_key_per_instance, ets_no_key }; - -namespace interface5 { - - //! @cond - namespace internal { - - template - class ets_base: tbb::internal::no_copy { - protected: -#if _WIN32||_WIN64 - typedef DWORD key_type; -#else - typedef pthread_t key_type; -#endif -#if __TBB_GCC_3_3_PROTECTED_BROKEN - public: -#endif - struct slot; - - struct array { - array* next; - size_t lg_size; - slot& at( size_t k ) { - return ((slot*)(void*)(this+1))[k]; - } - size_t size() const {return (size_t)1<>(8*sizeof(size_t)-lg_size); - } - }; - struct slot { - key_type key; - void* ptr; - bool empty() const {return !key;} - bool match( key_type k ) const {return key==k;} - bool claim( key_type k ) { - __TBB_ASSERT(sizeof(tbb::atomic)==sizeof(key_type), NULL); - __TBB_ASSERT(sizeof(void*)==sizeof(tbb::atomic*), NULL); - union { void* space; tbb::atomic* key_atomic; } helper; - helper.space = &key; - return helper.key_atomic->compare_and_swap(k,0)==0; - } - }; -#if __TBB_GCC_3_3_PROTECTED_BROKEN - protected: -#endif - - static key_type key_of_current_thread() { - tbb::tbb_thread::id id = tbb::this_tbb_thread::get_id(); - key_type k; - memcpy( &k, &id, sizeof(k) ); - return k; - } - - //! Root of linked list of arrays of decreasing size. - /** NULL if and only if my_count==0. - Each array in the list is half the size of its predecessor. */ - atomic my_root; - atomic my_count; - virtual void* create_local() = 0; - virtual void* create_array(size_t _size) = 0; // _size in bytes - virtual void free_array(void* ptr, size_t _size) = 0; // _size in bytes - array* allocate( size_t lg_size ) { - size_t n = 1<(create_array( sizeof(array)+n*sizeof(slot) )); - a->lg_size = lg_size; - std::memset( a+1, 0, n*sizeof(slot) ); - return a; - } - void free(array* a) { - size_t n = 1<<(a->lg_size); - free_array( (void *)a, size_t(sizeof(array)+n*sizeof(slot)) ); - } - static size_t hash( key_type k ) { - // Multiplicative hashing. Client should use *upper* bits. - // casts required for Mac gcc4.* compiler -#if __TBB_WORDSIZE == 4 - return uintptr_t(k)*0x9E3779B9; -#else - return uintptr_t(k)*0x9E3779B97F4A7C15; -#endif - } - - ets_base() {my_root=NULL; my_count=0;} - virtual ~ets_base(); // g++ complains if this is not virtual... - void* table_lookup( bool& exists ); - void table_clear(); - slot& table_find( key_type k ) { - size_t h = hash(k); - array* r = my_root; - size_t mask = r->mask(); - for(size_t i = r->start(h);;i=(i+1)&mask) { - slot& s = r->at(i); - if( s.empty() || s.match(k) ) - return s; - } - } - void table_reserve_for_copy( const ets_base& other ) { - __TBB_ASSERT(!my_root,NULL); - __TBB_ASSERT(!my_count,NULL); - if( other.my_root ) { - array* a = allocate(other.my_root->lg_size); - a->next = NULL; - my_root = a; - my_count = other.my_count; - } - } - }; - - template - ets_base::~ets_base() { - __TBB_ASSERT(!my_root, NULL); - } - - template - void ets_base::table_clear() { - while( array* r = my_root ) { - my_root = r->next; - free(r); - } - my_count = 0; - } - - template - void* ets_base::table_lookup( bool& exists ) { - const key_type k = key_of_current_thread(); - - __TBB_ASSERT(k!=0,NULL); - void* found; - size_t h = hash(k); - for( array* r=my_root; r; r=r->next ) { - size_t mask=r->mask(); - for(size_t i = r->start(h); ;i=(i+1)&mask) { - slot& s = r->at(i); - if( s.empty() ) break; - if( s.match(k) ) { - if( r==my_root ) { - // Success at top level - exists = true; - return s.ptr; - } else { - // Success at some other level. Need to insert at top level. - exists = true; - found = s.ptr; - goto insert; - } - } - } - } - // Key does not yet exist - exists = false; - found = create_local(); - { - size_t c = ++my_count; - array* r = my_root; - if( !r || c>r->size()/2 ) { - size_t s = r ? r->lg_size : 2; - while( c>size_t(1)<<(s-1) ) ++s; - array* a = allocate(s); - for(;;) { - a->next = my_root; - array* new_r = my_root.compare_and_swap(a,r); - if( new_r==r ) break; - if( new_r->lg_size>=s ) { - // Another thread inserted an equal or bigger array, so our array is superfluous. - free(a); - break; - } - r = new_r; - } - } - } - insert: - // Guaranteed to be room for it, and it is not present, so search for empty slot and grab it. - array* ir = my_root; - size_t mask = ir->mask(); - for(size_t i = ir->start(h);;i=(i+1)&mask) { - slot& s = ir->at(i); - if( s.empty() ) { - if( s.claim(k) ) { - s.ptr = found; - return found; - } - } - } - }; - - //! Specialization that exploits native TLS - template <> - class ets_base: protected ets_base { - typedef ets_base super; -#if _WIN32||_WIN64 - typedef DWORD tls_key_t; - void create_key() { my_key = TlsAlloc(); } - void destroy_key() { TlsFree(my_key); } - void set_tls(void * value) { TlsSetValue(my_key, (LPVOID)value); } - void* get_tls() { return (void *)TlsGetValue(my_key); } -#else - typedef pthread_key_t tls_key_t; - void create_key() { pthread_key_create(&my_key, NULL); } - void destroy_key() { pthread_key_delete(my_key); } - void set_tls( void * value ) const { pthread_setspecific(my_key, value); } - void* get_tls() const { return pthread_getspecific(my_key); } -#endif - tls_key_t my_key; - virtual void* create_local() = 0; - virtual void* create_array(size_t _size) = 0; // _size in bytes - virtual void free_array(void* ptr, size_t _size) = 0; // size in bytes - public: - ets_base() {create_key();} - ~ets_base() {destroy_key();} - void* table_lookup( bool& exists ) { - void* found = get_tls(); - if( found ) { - exists=true; - } else { - found = super::table_lookup(exists); - set_tls(found); - } - return found; - } - void table_clear() { - destroy_key(); - create_key(); - super::table_clear(); - } - }; - - //! Random access iterator for traversing the thread local copies. - template< typename Container, typename Value > - class enumerable_thread_specific_iterator -#if defined(_WIN64) && defined(_MSC_VER) - // Ensure that Microsoft's internal template function _Val_type works correctly. - : public std::iterator -#endif /* defined(_WIN64) && defined(_MSC_VER) */ - { - //! current position in the concurrent_vector - - Container *my_container; - typename Container::size_type my_index; - mutable Value *my_value; - - template - friend enumerable_thread_specific_iterator operator+( ptrdiff_t offset, - const enumerable_thread_specific_iterator& v ); - - template - friend bool operator==( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ); - - template - friend bool operator<( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ); - - template - friend ptrdiff_t operator-( const enumerable_thread_specific_iterator& i, const enumerable_thread_specific_iterator& j ); - - template - friend class enumerable_thread_specific_iterator; - - public: - - enumerable_thread_specific_iterator( const Container &container, typename Container::size_type index ) : - my_container(&const_cast(container)), my_index(index), my_value(NULL) {} - - //! Default constructor - enumerable_thread_specific_iterator() : my_container(NULL), my_index(0), my_value(NULL) {} - - template - enumerable_thread_specific_iterator( const enumerable_thread_specific_iterator& other ) : - my_container( other.my_container ), my_index( other.my_index), my_value( const_cast(other.my_value) ) {} - - enumerable_thread_specific_iterator operator+( ptrdiff_t offset ) const { - return enumerable_thread_specific_iterator(*my_container, my_index + offset); - } - - enumerable_thread_specific_iterator &operator+=( ptrdiff_t offset ) { - my_index += offset; - my_value = NULL; - return *this; - } - - enumerable_thread_specific_iterator operator-( ptrdiff_t offset ) const { - return enumerable_thread_specific_iterator( *my_container, my_index-offset ); - } - - enumerable_thread_specific_iterator &operator-=( ptrdiff_t offset ) { - my_index -= offset; - my_value = NULL; - return *this; - } - - Value& operator*() const { - Value* value = my_value; - if( !value ) { - value = my_value = reinterpret_cast(&(*my_container)[my_index].value); - } - __TBB_ASSERT( value==reinterpret_cast(&(*my_container)[my_index].value), "corrupt cache" ); - return *value; - } - - Value& operator[]( ptrdiff_t k ) const { - return (*my_container)[my_index + k].value; - } - - Value* operator->() const {return &operator*();} - - enumerable_thread_specific_iterator& operator++() { - ++my_index; - my_value = NULL; - return *this; - } - - enumerable_thread_specific_iterator& operator--() { - --my_index; - my_value = NULL; - return *this; - } - - //! Post increment - enumerable_thread_specific_iterator operator++(int) { - enumerable_thread_specific_iterator result = *this; - ++my_index; - my_value = NULL; - return result; - } - - //! Post decrement - enumerable_thread_specific_iterator operator--(int) { - enumerable_thread_specific_iterator result = *this; - --my_index; - my_value = NULL; - return result; - } - - // STL support - typedef ptrdiff_t difference_type; - typedef Value value_type; - typedef Value* pointer; - typedef Value& reference; - typedef std::random_access_iterator_tag iterator_category; - }; - - template - enumerable_thread_specific_iterator operator+( ptrdiff_t offset, - const enumerable_thread_specific_iterator& v ) { - return enumerable_thread_specific_iterator( v.my_container, v.my_index + offset ); - } - - template - bool operator==( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ) { - return i.my_index==j.my_index && i.my_container == j.my_container; - } - - template - bool operator!=( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ) { - return !(i==j); - } - - template - bool operator<( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ) { - return i.my_index - bool operator>( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ) { - return j - bool operator>=( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ) { - return !(i - bool operator<=( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ) { - return !(j - ptrdiff_t operator-( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ) { - return i.my_index-j.my_index; - } - - template - class segmented_iterator -#if defined(_WIN64) && defined(_MSC_VER) - : public std::iterator -#endif - { - template - friend bool operator==(const segmented_iterator& i, const segmented_iterator& j); - - template - friend bool operator!=(const segmented_iterator& i, const segmented_iterator& j); - - template - friend class segmented_iterator; - - public: - - segmented_iterator() {my_segcont = NULL;} - - segmented_iterator( const SegmentedContainer& _segmented_container ) : - my_segcont(const_cast(&_segmented_container)), - outer_iter(my_segcont->end()) { } - - ~segmented_iterator() {} - - typedef typename SegmentedContainer::iterator outer_iterator; - typedef typename SegmentedContainer::value_type InnerContainer; - typedef typename InnerContainer::iterator inner_iterator; - - // STL support - typedef ptrdiff_t difference_type; - typedef Value value_type; - typedef typename SegmentedContainer::size_type size_type; - typedef Value* pointer; - typedef Value& reference; - typedef std::input_iterator_tag iterator_category; - - // Copy Constructor - template - segmented_iterator(const segmented_iterator& other) : - my_segcont(other.my_segcont), - outer_iter(other.outer_iter), - // can we assign a default-constructed iterator to inner if we're at the end? - inner_iter(other.inner_iter) - {} - - // assignment - template - segmented_iterator& operator=( const segmented_iterator& other) { - if(this != &other) { - my_segcont = other.my_segcont; - outer_iter = other.outer_iter; - if(outer_iter != my_segcont->end()) inner_iter = other.inner_iter; - } - return *this; - } - - // allow assignment of outer iterator to segmented iterator. Once it is - // assigned, move forward until a non-empty inner container is found or - // the end of the outer container is reached. - segmented_iterator& operator=(const outer_iterator& new_outer_iter) { - __TBB_ASSERT(my_segcont != NULL, NULL); - // check that this iterator points to something inside the segmented container - for(outer_iter = new_outer_iter ;outer_iter!=my_segcont->end(); ++outer_iter) { - if( !outer_iter->empty() ) { - inner_iter = outer_iter->begin(); - break; - } - } - return *this; - } - - // pre-increment - segmented_iterator& operator++() { - advance_me(); - return *this; - } - - // post-increment - segmented_iterator operator++(int) { - segmented_iterator tmp = *this; - operator++(); - return tmp; - } - - bool operator==(const outer_iterator& other_outer) const { - __TBB_ASSERT(my_segcont != NULL, NULL); - return (outer_iter == other_outer && - (outer_iter == my_segcont->end() || inner_iter == outer_iter->begin())); - } - - bool operator!=(const outer_iterator& other_outer) const { - return !operator==(other_outer); - - } - - // (i)* RHS - reference operator*() const { - __TBB_ASSERT(my_segcont != NULL, NULL); - __TBB_ASSERT(outer_iter != my_segcont->end(), "Dereferencing a pointer at end of container"); - __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // should never happen - return *inner_iter; - } - - // i-> - pointer operator->() const { return &operator*();} - - private: - SegmentedContainer* my_segcont; - outer_iterator outer_iter; - inner_iterator inner_iter; - - void advance_me() { - __TBB_ASSERT(my_segcont != NULL, NULL); - __TBB_ASSERT(outer_iter != my_segcont->end(), NULL); // not true if there are no inner containers - __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // not true if the inner containers are all empty. - ++inner_iter; - while(inner_iter == outer_iter->end() && ++outer_iter != my_segcont->end()) { - inner_iter = outer_iter->begin(); - } - } - }; // segmented_iterator - - template - bool operator==( const segmented_iterator& i, - const segmented_iterator& j ) { - if(i.my_segcont != j.my_segcont) return false; - if(i.my_segcont == NULL) return true; - if(i.outer_iter != j.outer_iter) return false; - if(i.outer_iter == i.my_segcont->end()) return true; - return i.inner_iter == j.inner_iter; - } - - // != - template - bool operator!=( const segmented_iterator& i, - const segmented_iterator& j ) { - return !(i==j); - } - - // storage for initialization function pointer - template - struct callback_base { - virtual T apply( ) = 0; - virtual void destroy( ) = 0; - // need to be able to create copies of callback_base for copy constructor - virtual callback_base* make_copy() = 0; - // need virtual destructor to satisfy GCC compiler warning - virtual ~callback_base() { } - }; - - template - struct callback_leaf : public callback_base, public tbb::internal::no_copy { - typedef Functor my_callback_type; - typedef callback_leaf my_type; - typedef my_type* callback_pointer; - typedef typename tbb::tbb_allocator my_allocator_type; - Functor f; - callback_leaf( const Functor& f_) : f(f_) { - } - - static callback_pointer new_callback(const Functor& f_ ) { - void* new_void = my_allocator_type().allocate(1); - callback_pointer new_cb = new (new_void) callback_leaf(f_); // placement new - return new_cb; - } - - /* override */ callback_pointer make_copy() { - return new_callback( f ); - } - - /* override */ void destroy( ) { - callback_pointer my_ptr = this; - my_allocator_type().destroy(my_ptr); - my_allocator_type().deallocate(my_ptr,1); - } - /* override */ T apply() { return f(); } // does copy construction of returned value. - }; - - - //! Template for adding padding in order to avoid false sharing - /** ModularSize should be sizeof(U) modulo the cache line size. - All maintenance of the space will be done explicitly on push_back, - and all thread local copies must be destroyed before the concurrent - vector is deleted. - */ - template - struct ets_element { - char value[sizeof(U) + tbb::internal::NFS_MaxLineSize-ModularSize]; - void unconstruct() { - // "reinterpret_cast(&value)->~U();" causes type-punning warning with gcc 4.4, - // "U* u = reinterpret_cast(&value); u->~U();" causes unused variable warning with VS2010. - // Thus another "casting via union" hack. - __TBB_ASSERT(sizeof(void*)==sizeof(U*),NULL); - union { void* space; U* val; } helper; - helper.space = &value; - helper.val->~U(); - } - }; - - //! Partial specialization for case where no padding is needed. - template - struct ets_element { - char value[sizeof(U)]; - void unconstruct() { // Same implementation as in general case - __TBB_ASSERT(sizeof(void*)==sizeof(U*),NULL); - union { void* space; U* val; } helper; - helper.space = &value; - helper.val->~U(); - } - }; - - } // namespace internal - //! @endcond - - //! The enumerable_thread_specific container - /** enumerable_thread_specific has the following properties: - - thread-local copies are lazily created, with default, exemplar or function initialization. - - thread-local copies do not move (during lifetime, and excepting clear()) so the address of a copy is invariant. - - the contained objects need not have operator=() defined if combine is not used. - - enumerable_thread_specific containers may be copy-constructed or assigned. - - thread-local copies can be managed by hash-table, or can be accessed via TLS storage for speed. - - outside of parallel contexts, the contents of all thread-local copies are accessible by iterator or using combine or combine_each methods - - @par Segmented iterator - When the thread-local objects are containers with input_iterators defined, a segmented iterator may - be used to iterate over all the elements of all thread-local copies. - - @par combine and combine_each - - Both methods are defined for enumerable_thread_specific. - - combine() requires the the type T have operator=() defined. - - neither method modifies the contents of the object (though there is no guarantee that the applied methods do not modify the object.) - - Both are evaluated in serial context (the methods are assumed to be non-benign.) - - @ingroup containers */ - template , - ets_key_usage_type ETS_key_type=ets_no_key > - class enumerable_thread_specific: internal::ets_base { - - template friend class enumerable_thread_specific; - - typedef internal::ets_element padded_element; - - //! A generic range, used to create range objects from the iterators - template - class generic_range_type: public blocked_range { - public: - typedef T value_type; - typedef T& reference; - typedef const T& const_reference; - typedef I iterator; - typedef ptrdiff_t difference_type; - generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range(begin_,end_,grainsize_) {} - template - generic_range_type( const generic_range_type& r) : blocked_range(r.begin(),r.end(),r.grainsize()) {} - generic_range_type( generic_range_type& r, split ) : blocked_range(r,split()) {} - }; - - typedef typename Allocator::template rebind< padded_element >::other padded_allocator_type; - typedef tbb::concurrent_vector< padded_element, padded_allocator_type > internal_collection_type; - - internal::callback_base *my_finit_callback; - - // need to use a pointed-to exemplar because T may not be assignable. - // using tbb_allocator instead of padded_element_allocator because we may be - // copying an exemplar from one instantiation of ETS to another with a different - // allocator. - typedef typename tbb::tbb_allocator exemplar_allocator_type; - static padded_element * create_exemplar(const T& my_value) { - padded_element *new_exemplar = reinterpret_cast(exemplar_allocator_type().allocate(1)); - new(new_exemplar->value) T(my_value); - return new_exemplar; - } - - static padded_element *create_exemplar( ) { - padded_element *new_exemplar = reinterpret_cast(exemplar_allocator_type().allocate(1)); - new(new_exemplar->value) T( ); - return new_exemplar; - } - - static void free_exemplar(padded_element *my_ptr) { - my_ptr->unconstruct(); - exemplar_allocator_type().destroy(my_ptr); - exemplar_allocator_type().deallocate(my_ptr,1); - } - - padded_element* my_exemplar_ptr; - - internal_collection_type my_locals; - - /*override*/ void* create_local() { -#if TBB_DEPRECATED - void* lref = &my_locals[my_locals.push_back(padded_element())]; -#else - void* lref = &*my_locals.push_back(padded_element()); -#endif - if(my_finit_callback) { - new(lref) T(my_finit_callback->apply()); - } else if(my_exemplar_ptr) { - pointer t_exemp = reinterpret_cast(&(my_exemplar_ptr->value)); - new(lref) T(*t_exemp); - } else { - new(lref) T(); - } - return lref; - } - - void unconstruct_locals() { - for(typename internal_collection_type::iterator cvi = my_locals.begin(); cvi != my_locals.end(); ++cvi) { - cvi->unconstruct(); - } - } - - typedef typename Allocator::template rebind< uintptr_t >::other array_allocator_type; - - // _size is in bytes - /*override*/ void* create_array(size_t _size) { - size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t); - return array_allocator_type().allocate(nelements); - } - - /*override*/ void free_array( void* _ptr, size_t _size) { - size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t); - array_allocator_type().deallocate( reinterpret_cast(_ptr),nelements); - } - - public: - - //! Basic types - typedef Allocator allocator_type; - typedef T value_type; - typedef T& reference; - typedef const T& const_reference; - typedef T* pointer; - typedef const T* const_pointer; - typedef typename internal_collection_type::size_type size_type; - typedef typename internal_collection_type::difference_type difference_type; - - // Iterator types - typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, value_type > iterator; - typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, const value_type > const_iterator; - - // Parallel range types - typedef generic_range_type< iterator > range_type; - typedef generic_range_type< const_iterator > const_range_type; - - //! Default constructor, which leads to default construction of local copies - enumerable_thread_specific() : my_finit_callback(0) { - my_exemplar_ptr = 0; - } - - //! construction with initializer method - // Finit should be a function taking 0 parameters and returning a T - template - enumerable_thread_specific( Finit _finit ) - { - my_finit_callback = internal::callback_leaf::new_callback( _finit ); - my_exemplar_ptr = 0; // don't need exemplar if function is provided - } - - //! Constuction with exemplar, which leads to copy construction of local copies - enumerable_thread_specific(const T &_exemplar) : my_finit_callback(0) { - my_exemplar_ptr = create_exemplar(_exemplar); - } - - //! Destructor - ~enumerable_thread_specific() { - if(my_finit_callback) { - my_finit_callback->destroy(); - } - if(my_exemplar_ptr) { - free_exemplar(my_exemplar_ptr); - } - this->clear(); // deallocation before the derived class is finished destructing - // So free(array *) is still accessible - } - - //! returns reference to local, discarding exists - reference local() { - bool exists; - return local(exists); - } - - //! Returns reference to calling thread's local copy, creating one if necessary - reference local(bool& exists) { - __TBB_ASSERT(ETS_key_type==ets_no_key,"ets_key_per_instance not yet implemented"); - void* ptr = this->table_lookup(exists); - return *(T*)ptr; - } - - //! Get the number of local copies - size_type size() const { return my_locals.size(); } - - //! true if there have been no local copies created - bool empty() const { return my_locals.empty(); } - - //! begin iterator - iterator begin() { return iterator( my_locals, 0 ); } - //! end iterator - iterator end() { return iterator(my_locals, my_locals.size() ); } - - //! begin const iterator - const_iterator begin() const { return const_iterator(my_locals, 0); } - - //! end const iterator - const_iterator end() const { return const_iterator(my_locals, my_locals.size()); } - - //! Get range for parallel algorithms - range_type range( size_t grainsize=1 ) { return range_type( begin(), end(), grainsize ); } - - //! Get const range for parallel algorithms - const_range_type range( size_t grainsize=1 ) const { return const_range_type( begin(), end(), grainsize ); } - - //! Destroys local copies - void clear() { - unconstruct_locals(); - my_locals.clear(); - this->table_clear(); - // callback is not destroyed - // exemplar is not destroyed - } - - private: - - template - void internal_copy( const enumerable_thread_specific& other); - - public: - - template - enumerable_thread_specific( const enumerable_thread_specific& other ) : internal::ets_base () - { - internal_copy(other); - } - - enumerable_thread_specific( const enumerable_thread_specific& other ) : internal::ets_base () - { - internal_copy(other); - } - - private: - - template - enumerable_thread_specific & - internal_assign(const enumerable_thread_specific& other) { - if(static_cast( this ) != static_cast( &other )) { - this->clear(); - if(my_finit_callback) { - my_finit_callback->destroy(); - my_finit_callback = 0; - } - if(my_exemplar_ptr) { - free_exemplar(my_exemplar_ptr); - my_exemplar_ptr = 0; - } - internal_copy( other ); - } - return *this; - } - - public: - - // assignment - enumerable_thread_specific& operator=(const enumerable_thread_specific& other) { - return internal_assign(other); - } - - template - enumerable_thread_specific& operator=(const enumerable_thread_specific& other) - { - return internal_assign(other); - } - - // combine_func_t has signature T(T,T) or T(const T&, const T&) - template - T combine(combine_func_t f_combine) { - if(begin() == end()) { - if(my_finit_callback) { - return my_finit_callback->apply(); - } - pointer local_ref = reinterpret_cast((my_exemplar_ptr->value)); - return T(*local_ref); - } - const_iterator ci = begin(); - T my_result = *ci; - while(++ci != end()) - my_result = f_combine( my_result, *ci ); - return my_result; - } - - // combine_func_t has signature void(T) or void(const T&) - template - void combine_each(combine_func_t f_combine) { - for(const_iterator ci = begin(); ci != end(); ++ci) { - f_combine( *ci ); - } - } - - }; // enumerable_thread_specific - - - template - template - void enumerable_thread_specific::internal_copy( const enumerable_thread_specific& other) { - typedef internal::ets_base base; - __TBB_ASSERT(my_locals.size()==0,NULL); - this->table_reserve_for_copy( other ); - for( base::array* r=other.my_root; r; r=r->next ) { - for( size_t i=0; isize(); ++i ) { - base::slot& s1 = r->at(i); - if( !s1.empty() ) { - base::slot& s2 = this->table_find(s1.key); - if( s2.empty() ) { -#if TBB_DEPRECATED - void* lref = &my_locals[my_locals.push_back(padded_element())]; -#else - void* lref = &*my_locals.push_back(padded_element()); -#endif - s2.ptr = new(lref) T(*(U*)s1.ptr); - s2.key = s1.key; - } else { - // Skip the duplicate - } - } - } - } - if(other.my_finit_callback) { - my_finit_callback = other.my_finit_callback->make_copy(); - } else { - my_finit_callback = 0; - } - if(other.my_exemplar_ptr) { - pointer local_ref = reinterpret_cast(other.my_exemplar_ptr->value); - my_exemplar_ptr = create_exemplar(*local_ref); - } else { - my_exemplar_ptr = 0; - } - } - - template< typename Container > - class flattened2d { - - // This intermediate typedef is to address issues with VC7.1 compilers - typedef typename Container::value_type conval_type; - - public: - - //! Basic types - typedef typename conval_type::size_type size_type; - typedef typename conval_type::difference_type difference_type; - typedef typename conval_type::allocator_type allocator_type; - typedef typename conval_type::value_type value_type; - typedef typename conval_type::reference reference; - typedef typename conval_type::const_reference const_reference; - typedef typename conval_type::pointer pointer; - typedef typename conval_type::const_pointer const_pointer; - - typedef typename internal::segmented_iterator iterator; - typedef typename internal::segmented_iterator const_iterator; - - flattened2d( const Container &c, typename Container::const_iterator b, typename Container::const_iterator e ) : - my_container(const_cast(&c)), my_begin(b), my_end(e) { } - - flattened2d( const Container &c ) : - my_container(const_cast(&c)), my_begin(c.begin()), my_end(c.end()) { } - - iterator begin() { return iterator(*my_container) = my_begin; } - iterator end() { return iterator(*my_container) = my_end; } - const_iterator begin() const { return const_iterator(*my_container) = my_begin; } - const_iterator end() const { return const_iterator(*my_container) = my_end; } - - size_type size() const { - size_type tot_size = 0; - for(typename Container::const_iterator i = my_begin; i != my_end; ++i) { - tot_size += i->size(); - } - return tot_size; - } - - private: - - Container *my_container; - typename Container::const_iterator my_begin; - typename Container::const_iterator my_end; - - }; - - template - flattened2d flatten2d(const Container &c, const typename Container::const_iterator b, const typename Container::const_iterator e) { - return flattened2d(c, b, e); - } - - template - flattened2d flatten2d(const Container &c) { - return flattened2d(c); - } - -} // interface5 - -namespace internal { -using interface5::internal::segmented_iterator; -} - -using interface5::enumerable_thread_specific; -using interface5::flattened2d; -using interface5::flatten2d; - -} // namespace tbb - -#endif diff --git a/tbb30_20100406oss/include/tbb/index.html b/tbb30_20100406oss/include/tbb/index.html deleted file mode 100644 index 7e4552e30..000000000 --- a/tbb30_20100406oss/include/tbb/index.html +++ /dev/null @@ -1,28 +0,0 @@ - - - -

Overview

-Include files for Threading Building Blocks classes and functions. - -
Click here to see all files in the directory. - -

Directories

-
-
machine -
Include files for low-level architecture specific functionality. -
compat -
Include files for source level compatibility with other frameworks. -
- -
-Up to parent directory -

-Copyright © 2005-2010 Intel Corporation. All Rights Reserved. -

-Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are -registered trademarks or trademarks of Intel Corporation or its -subsidiaries in the United States and other countries. -

-* Other names and brands may be claimed as the property of others. - - diff --git a/tbb30_20100406oss/include/tbb/machine/ibm_aix51.h b/tbb30_20100406oss/include/tbb/machine/ibm_aix51.h deleted file mode 100644 index ff3103b1f..000000000 --- a/tbb30_20100406oss/include/tbb/machine/ibm_aix51.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_machine_H -#error Do not include this file directly; include tbb_machine.h instead -#endif - -#define __TBB_WORDSIZE 8 -#define __TBB_BIG_ENDIAN 1 - -#include -#include -#include - -extern "C" { - -int32_t __TBB_machine_cas_32 (volatile void* ptr, int32_t value, int32_t comparand); -int64_t __TBB_machine_cas_64 (volatile void* ptr, int64_t value, int64_t comparand); -#define __TBB_fence_for_acquire() __TBB_machine_flush () -#define __TBB_fence_for_release() __TBB_machine_flush () - -} - -#define __TBB_CompareAndSwap4(P,V,C) __TBB_machine_cas_32(P,V,C) -#define __TBB_CompareAndSwap8(P,V,C) __TBB_machine_cas_64(P,V,C) -#define __TBB_CompareAndSwapW(P,V,C) __TBB_machine_cas_64(P,V,C) -#define __TBB_Yield() sched_yield() diff --git a/tbb30_20100406oss/include/tbb/machine/linux_common.h b/tbb30_20100406oss/include/tbb/machine/linux_common.h deleted file mode 100644 index 35c5ce758..000000000 --- a/tbb30_20100406oss/include/tbb/machine/linux_common.h +++ /dev/null @@ -1,97 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_machine_H -#error Do not include this file directly; include tbb_machine.h instead -#endif - -#include -#include -#include - -// Definition of __TBB_Yield() -#ifndef __TBB_Yield -#define __TBB_Yield() sched_yield() -#endif - -/* Futex definitions */ -#include - -#if defined(SYS_futex) - -#define __TBB_USE_FUTEX 1 -#include -#include -// Unfortunately, some versions of Linux do not have a header that defines FUTEX_WAIT and FUTEX_WAKE. - -#ifdef FUTEX_WAIT -#define __TBB_FUTEX_WAIT FUTEX_WAIT -#else -#define __TBB_FUTEX_WAIT 0 -#endif - -#ifdef FUTEX_WAKE -#define __TBB_FUTEX_WAKE FUTEX_WAKE -#else -#define __TBB_FUTEX_WAKE 1 -#endif - -#ifndef __TBB_ASSERT -#error machine specific headers must be included after tbb_stddef.h -#endif - -namespace tbb { - -namespace internal { - -inline int futex_wait( void *futex, int comparand ) { - int r = ::syscall( SYS_futex,futex,__TBB_FUTEX_WAIT,comparand,NULL,NULL,0 ); -#if TBB_USE_ASSERT - int e = errno; - __TBB_ASSERT( r==0||r==EWOULDBLOCK||(r==-1&&(e==EAGAIN||e==EINTR)), "futex_wait failed." ); -#endif /* TBB_USE_ASSERT */ - return r; -} - -inline int futex_wakeup_one( void *futex ) { - int r = ::syscall( SYS_futex,futex,__TBB_FUTEX_WAKE,1,NULL,NULL,0 ); - __TBB_ASSERT( r==0||r==1, "futex_wakeup_one: more than one thread woken up?" ); - return r; -} - -inline int futex_wakeup_all( void *futex ) { - int r = ::syscall( SYS_futex,futex,__TBB_FUTEX_WAKE,INT_MAX,NULL,NULL,0 ); - __TBB_ASSERT( r>=0, "futex_wakeup_all: error in waking up threads" ); - return r; -} - -} /* namespace internal */ - -} /* namespace tbb */ - -#endif /* SYS_futex */ diff --git a/tbb30_20100406oss/include/tbb/machine/linux_ia32.h b/tbb30_20100406oss/include/tbb/machine/linux_ia32.h deleted file mode 100644 index e0041a151..000000000 --- a/tbb30_20100406oss/include/tbb/machine/linux_ia32.h +++ /dev/null @@ -1,261 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_machine_H -#error Do not include this file directly; include tbb_machine.h instead -#endif - -#if !__MINGW32__ -#include "linux_common.h" -#endif - -#define __TBB_WORDSIZE 4 -#define __TBB_BIG_ENDIAN 0 - -#define __TBB_release_consistency_helper() __asm__ __volatile__("": : :"memory") - -inline void __TBB_rel_acq_fence() { __asm__ __volatile__("mfence": : :"memory"); } - -#if __TBB_ICC_ASM_VOLATILE_BROKEN -#define __TBB_VOLATILE -#else -#define __TBB_VOLATILE volatile -#endif - -#define __MACHINE_DECL_ATOMICS(S,T,X) \ -static inline T __TBB_machine_cmpswp##S (volatile void *ptr, T value, T comparand ) \ -{ \ - T result; \ - \ - __asm__ __volatile__("lock\ncmpxchg" X " %2,%1" \ - : "=a"(result), "=m"(*(__TBB_VOLATILE T*)ptr) \ - : "q"(value), "0"(comparand), "m"(*(__TBB_VOLATILE T*)ptr) \ - : "memory"); \ - return result; \ -} \ - \ -static inline T __TBB_machine_fetchadd##S(volatile void *ptr, T addend) \ -{ \ - T result; \ - __asm__ __volatile__("lock\nxadd" X " %0,%1" \ - : "=r"(result), "=m"(*(__TBB_VOLATILE T*)ptr) \ - : "0"(addend), "m"(*(__TBB_VOLATILE T*)ptr) \ - : "memory"); \ - return result; \ -} \ - \ -static inline T __TBB_machine_fetchstore##S(volatile void *ptr, T value) \ -{ \ - T result; \ - __asm__ __volatile__("lock\nxchg" X " %0,%1" \ - : "=r"(result), "=m"(*(__TBB_VOLATILE T*)ptr) \ - : "0"(value), "m"(*(__TBB_VOLATILE T*)ptr) \ - : "memory"); \ - return result; \ -} \ - -__MACHINE_DECL_ATOMICS(1,int8_t,"") -__MACHINE_DECL_ATOMICS(2,int16_t,"") -__MACHINE_DECL_ATOMICS(4,int32_t,"l") - -static inline int64_t __TBB_machine_cmpswp8 (volatile void *ptr, int64_t value, int64_t comparand ) -{ - int64_t result; -#if __PIC__ - /* compiling position-independent code */ - // EBX register preserved for compliance with position-independent code rules on IA32 - __asm__ __volatile__ ( - "pushl %%ebx\n\t" - "movl (%%ecx),%%ebx\n\t" - "movl 4(%%ecx),%%ecx\n\t" - "lock\n\t cmpxchg8b %1\n\t" - "popl %%ebx" - : "=A"(result), "=m"(*(int64_t *)ptr) - : "m"(*(int64_t *)ptr) - , "0"(comparand) - , "c"(&value) - : "memory", "esp" -#if __INTEL_COMPILER - ,"ebx" -#endif - ); -#else /* !__PIC__ */ - union { - int64_t i64; - int32_t i32[2]; - }; - i64 = value; - __asm__ __volatile__ ( - "lock\n\t cmpxchg8b %1\n\t" - : "=A"(result), "=m"(*(__TBB_VOLATILE int64_t *)ptr) - : "m"(*(__TBB_VOLATILE int64_t *)ptr) - , "0"(comparand) - , "b"(i32[0]), "c"(i32[1]) - : "memory" - ); -#endif /* __PIC__ */ - return result; -} - -static inline int32_t __TBB_machine_lg( uint32_t x ) { - int32_t j; - __asm__ ("bsr %1,%0" : "=r"(j) : "r"(x)); - return j; -} - -static inline void __TBB_machine_or( volatile void *ptr, uint32_t addend ) { - __asm__ __volatile__("lock\norl %1,%0" : "=m"(*(__TBB_VOLATILE uint32_t *)ptr) : "r"(addend), "m"(*(__TBB_VOLATILE uint32_t *)ptr) : "memory"); -} - -static inline void __TBB_machine_and( volatile void *ptr, uint32_t addend ) { - __asm__ __volatile__("lock\nandl %1,%0" : "=m"(*(__TBB_VOLATILE uint32_t *)ptr) : "r"(addend), "m"(*(__TBB_VOLATILE uint32_t *)ptr) : "memory"); -} - -static inline void __TBB_machine_pause( int32_t delay ) { - for (int32_t i = 0; i < delay; i++) { - __asm__ __volatile__("pause;"); - } - return; -} - -static inline int64_t __TBB_machine_load8 (const volatile void *ptr) { - int64_t result; - if( ((uint32_t)ptr&7u)==0 ) { - // Aligned load - __asm__ __volatile__ ( "fildq %1\n\t" - "fistpq %0" : "=m"(result) : "m"(*(const __TBB_VOLATILE uint64_t*)ptr) : "memory" ); - } else { - // Unaligned load - result = __TBB_machine_cmpswp8(const_cast(ptr),0,0); - } - return result; -} - -//! Handles misaligned 8-byte store -/** Defined in tbb_misc.cpp */ -extern "C" void __TBB_machine_store8_slow( volatile void *ptr, int64_t value ); -extern "C" void __TBB_machine_store8_slow_perf_warning( volatile void *ptr ); - -static inline void __TBB_machine_store8(volatile void *ptr, int64_t value) { - if( ((uint32_t)ptr&7u)==0 ) { - // Aligned store - __asm__ __volatile__ ( "fildq %1\n\t" - "fistpq %0" : "=m"(*(__TBB_VOLATILE int64_t*)ptr) : "m"(value) : "memory" ); - } else { - // Unaligned store -#if TBB_USE_PERFORMANCE_WARNINGS - __TBB_machine_store8_slow_perf_warning(ptr); -#endif /* TBB_USE_PERFORMANCE_WARNINGS */ - __TBB_machine_store8_slow(ptr,value); - } -} - -template -struct __TBB_machine_load_store { - static inline T load_with_acquire(const volatile T& location) { - T to_return = location; - __asm__ __volatile__("" : : : "memory" ); // Compiler fence to keep operations from migrating upwards - return to_return; - } - - static inline void store_with_release(volatile T &location, T value) { - __asm__ __volatile__("" : : : "memory" ); // Compiler fence to keep operations from migrating upwards - location = value; - } -}; - -template -struct __TBB_machine_load_store { - static inline T load_with_acquire(const volatile T& location) { - T to_return = __TBB_machine_load8((const volatile void *)&location); - __asm__ __volatile__("" : : : "memory" ); // Compiler fence to keep operations from migrating upwards - return to_return; - } - - static inline void store_with_release(volatile T &location, T value) { - __asm__ __volatile__("" : : : "memory" ); // Compiler fence to keep operations from migrating downwards - __TBB_machine_store8((volatile void *)&location,(int64_t)value); - } -}; - -#undef __TBB_VOLATILE - -template -inline T __TBB_machine_load_with_acquire(const volatile T &location) { - return __TBB_machine_load_store::load_with_acquire(location); -} - -template -inline void __TBB_machine_store_with_release(volatile T &location, V value) { - __TBB_machine_load_store::store_with_release(location,value); -} - -#define __TBB_load_with_acquire(L) __TBB_machine_load_with_acquire((L)) -#define __TBB_store_with_release(L,V) __TBB_machine_store_with_release((L),(V)) - -// Machine specific atomic operations - -#define __TBB_CompareAndSwap1(P,V,C) __TBB_machine_cmpswp1(P,V,C) -#define __TBB_CompareAndSwap2(P,V,C) __TBB_machine_cmpswp2(P,V,C) -#define __TBB_CompareAndSwap4(P,V,C) __TBB_machine_cmpswp4(P,V,C) -#define __TBB_CompareAndSwap8(P,V,C) __TBB_machine_cmpswp8(P,V,C) -#define __TBB_CompareAndSwapW(P,V,C) __TBB_machine_cmpswp4(P,V,C) - -#define __TBB_FetchAndAdd1(P,V) __TBB_machine_fetchadd1(P,V) -#define __TBB_FetchAndAdd2(P,V) __TBB_machine_fetchadd2(P,V) -#define __TBB_FetchAndAdd4(P,V) __TBB_machine_fetchadd4(P,V) -#define __TBB_FetchAndAddW(P,V) __TBB_machine_fetchadd4(P,V) - -#define __TBB_FetchAndStore1(P,V) __TBB_machine_fetchstore1(P,V) -#define __TBB_FetchAndStore2(P,V) __TBB_machine_fetchstore2(P,V) -#define __TBB_FetchAndStore4(P,V) __TBB_machine_fetchstore4(P,V) -#define __TBB_FetchAndStoreW(P,V) __TBB_machine_fetchstore4(P,V) - -#define __TBB_Store8(P,V) __TBB_machine_store8(P,V) -#define __TBB_Load8(P) __TBB_machine_load8(P) - -#define __TBB_AtomicOR(P,V) __TBB_machine_or(P,V) -#define __TBB_AtomicAND(P,V) __TBB_machine_and(P,V) - - -// Those we chose not to implement (they will be implemented generically using CMPSWP8) -#undef __TBB_FetchAndAdd8 -#undef __TBB_FetchAndStore8 - -// Definition of other functions -#define __TBB_Pause(V) __TBB_machine_pause(V) -#define __TBB_Log2(V) __TBB_machine_lg(V) - -// Special atomic functions -#define __TBB_FetchAndAddWrelease(P,V) __TBB_FetchAndAddW(P,V) -#define __TBB_FetchAndIncrementWacquire(P) __TBB_FetchAndAddW(P,1) -#define __TBB_FetchAndDecrementWrelease(P) __TBB_FetchAndAddW(P,-1) - -// Use generic definitions from tbb_machine.h -#undef __TBB_TryLockByte -#undef __TBB_LockByte diff --git a/tbb30_20100406oss/include/tbb/machine/linux_ia64.h b/tbb30_20100406oss/include/tbb/machine/linux_ia64.h deleted file mode 100644 index cd788fecf..000000000 --- a/tbb30_20100406oss/include/tbb/machine/linux_ia64.h +++ /dev/null @@ -1,169 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_machine_H -#error Do not include this file directly; include tbb_machine.h instead -#endif - -#include "linux_common.h" -#include - -#define __TBB_WORDSIZE 8 -#define __TBB_BIG_ENDIAN 0 -#define __TBB_DECL_FENCED_ATOMICS 1 - -// Most of the functions will be in a .s file - -extern "C" { - int8_t __TBB_machine_cmpswp1__TBB_full_fence (volatile void *ptr, int8_t value, int8_t comparand); - int8_t __TBB_machine_fetchadd1__TBB_full_fence (volatile void *ptr, int8_t addend); - int8_t __TBB_machine_fetchadd1acquire(volatile void *ptr, int8_t addend); - int8_t __TBB_machine_fetchadd1release(volatile void *ptr, int8_t addend); - int8_t __TBB_machine_fetchstore1acquire(volatile void *ptr, int8_t value); - int8_t __TBB_machine_fetchstore1release(volatile void *ptr, int8_t value); - - int16_t __TBB_machine_cmpswp2__TBB_full_fence (volatile void *ptr, int16_t value, int16_t comparand); - int16_t __TBB_machine_fetchadd2__TBB_full_fence (volatile void *ptr, int16_t addend); - int16_t __TBB_machine_fetchadd2acquire(volatile void *ptr, int16_t addend); - int16_t __TBB_machine_fetchadd2release(volatile void *ptr, int16_t addend); - int16_t __TBB_machine_fetchstore2acquire(volatile void *ptr, int16_t value); - int16_t __TBB_machine_fetchstore2release(volatile void *ptr, int16_t value); - - int32_t __TBB_machine_fetchstore4__TBB_full_fence (volatile void *ptr, int32_t value); - int32_t __TBB_machine_fetchstore4acquire(volatile void *ptr, int32_t value); - int32_t __TBB_machine_fetchstore4release(volatile void *ptr, int32_t value); - int32_t __TBB_machine_fetchadd4acquire(volatile void *ptr, int32_t addend); - int32_t __TBB_machine_fetchadd4release(volatile void *ptr, int32_t addend); - - int64_t __TBB_machine_cmpswp8__TBB_full_fence (volatile void *ptr, int64_t value, int64_t comparand); - int64_t __TBB_machine_fetchstore8__TBB_full_fence (volatile void *ptr, int64_t value); - int64_t __TBB_machine_fetchstore8acquire(volatile void *ptr, int64_t value); - int64_t __TBB_machine_fetchstore8release(volatile void *ptr, int64_t value); - int64_t __TBB_machine_fetchadd8acquire(volatile void *ptr, int64_t addend); - int64_t __TBB_machine_fetchadd8release(volatile void *ptr, int64_t addend); - - int8_t __TBB_machine_cmpswp1acquire(volatile void *ptr, int8_t value, int8_t comparand); - int8_t __TBB_machine_cmpswp1release(volatile void *ptr, int8_t value, int8_t comparand); - int8_t __TBB_machine_fetchstore1__TBB_full_fence (volatile void *ptr, int8_t value); - - int16_t __TBB_machine_cmpswp2acquire(volatile void *ptr, int16_t value, int16_t comparand); - int16_t __TBB_machine_cmpswp2release(volatile void *ptr, int16_t value, int16_t comparand); - int16_t __TBB_machine_fetchstore2__TBB_full_fence (volatile void *ptr, int16_t value); - - int32_t __TBB_machine_cmpswp4__TBB_full_fence (volatile void *ptr, int32_t value, int32_t comparand); - int32_t __TBB_machine_cmpswp4acquire(volatile void *ptr, int32_t value, int32_t comparand); - int32_t __TBB_machine_cmpswp4release(volatile void *ptr, int32_t value, int32_t comparand); - int32_t __TBB_machine_fetchadd4__TBB_full_fence (volatile void *ptr, int32_t value); - - int64_t __TBB_machine_cmpswp8acquire(volatile void *ptr, int64_t value, int64_t comparand); - int64_t __TBB_machine_cmpswp8release(volatile void *ptr, int64_t value, int64_t comparand); - int64_t __TBB_machine_fetchadd8__TBB_full_fence (volatile void *ptr, int64_t value); - - int64_t __TBB_machine_lg(uint64_t value); - void __TBB_machine_pause(int32_t delay); - bool __TBB_machine_trylockbyte( volatile unsigned char &ptr ); - int64_t __TBB_machine_lockbyte( volatile unsigned char &ptr ); - - //! Retrieves the current RSE backing store pointer. IA64 specific. - void* __TBB_get_bsp(); -} - -#define __TBB_CompareAndSwap1(P,V,C) __TBB_machine_cmpswp1__TBB_full_fence(P,V,C) -#define __TBB_CompareAndSwap2(P,V,C) __TBB_machine_cmpswp2__TBB_full_fence(P,V,C) - -#define __TBB_FetchAndAdd1(P,V) __TBB_machine_fetchadd1__TBB_full_fence(P,V) -#define __TBB_FetchAndAdd1acquire(P,V) __TBB_machine_fetchadd1acquire(P,V) -#define __TBB_FetchAndAdd1release(P,V) __TBB_machine_fetchadd1release(P,V) -#define __TBB_FetchAndAdd2(P,V) __TBB_machine_fetchadd2__TBB_full_fence(P,V) -#define __TBB_FetchAndAdd2acquire(P,V) __TBB_machine_fetchadd2acquire(P,V) -#define __TBB_FetchAndAdd2release(P,V) __TBB_machine_fetchadd2release(P,V) -#define __TBB_FetchAndAdd4acquire(P,V) __TBB_machine_fetchadd4acquire(P,V) -#define __TBB_FetchAndAdd4release(P,V) __TBB_machine_fetchadd4release(P,V) -#define __TBB_FetchAndAdd8acquire(P,V) __TBB_machine_fetchadd8acquire(P,V) -#define __TBB_FetchAndAdd8release(P,V) __TBB_machine_fetchadd8release(P,V) - -#define __TBB_FetchAndStore1acquire(P,V) __TBB_machine_fetchstore1acquire(P,V) -#define __TBB_FetchAndStore1release(P,V) __TBB_machine_fetchstore1release(P,V) -#define __TBB_FetchAndStore2acquire(P,V) __TBB_machine_fetchstore2acquire(P,V) -#define __TBB_FetchAndStore2release(P,V) __TBB_machine_fetchstore2release(P,V) -#define __TBB_FetchAndStore4acquire(P,V) __TBB_machine_fetchstore4acquire(P,V) -#define __TBB_FetchAndStore4release(P,V) __TBB_machine_fetchstore4release(P,V) -#define __TBB_FetchAndStore8acquire(P,V) __TBB_machine_fetchstore8acquire(P,V) -#define __TBB_FetchAndStore8release(P,V) __TBB_machine_fetchstore8release(P,V) - -#define __TBB_CompareAndSwap1acquire(P,V,C) __TBB_machine_cmpswp1acquire(P,V,C) -#define __TBB_CompareAndSwap1release(P,V,C) __TBB_machine_cmpswp1release(P,V,C) -#define __TBB_CompareAndSwap2acquire(P,V,C) __TBB_machine_cmpswp2acquire(P,V,C) -#define __TBB_CompareAndSwap2release(P,V,C) __TBB_machine_cmpswp2release(P,V,C) -#define __TBB_CompareAndSwap4(P,V,C) __TBB_machine_cmpswp4__TBB_full_fence(P,V,C) -#define __TBB_CompareAndSwap4acquire(P,V,C) __TBB_machine_cmpswp4acquire(P,V,C) -#define __TBB_CompareAndSwap4release(P,V,C) __TBB_machine_cmpswp4release(P,V,C) -#define __TBB_CompareAndSwap8(P,V,C) __TBB_machine_cmpswp8__TBB_full_fence(P,V,C) -#define __TBB_CompareAndSwap8acquire(P,V,C) __TBB_machine_cmpswp8acquire(P,V,C) -#define __TBB_CompareAndSwap8release(P,V,C) __TBB_machine_cmpswp8release(P,V,C) - -#define __TBB_FetchAndAdd4(P,V) __TBB_machine_fetchadd4__TBB_full_fence(P,V) -#define __TBB_FetchAndAdd8(P,V) __TBB_machine_fetchadd8__TBB_full_fence(P,V) - -#define __TBB_FetchAndStore1(P,V) __TBB_machine_fetchstore1__TBB_full_fence(P,V) -#define __TBB_FetchAndStore2(P,V) __TBB_machine_fetchstore2__TBB_full_fence(P,V) -#define __TBB_FetchAndStore4(P,V) __TBB_machine_fetchstore4__TBB_full_fence(P,V) -#define __TBB_FetchAndStore8(P,V) __TBB_machine_fetchstore8__TBB_full_fence(P,V) - -#define __TBB_FetchAndIncrementWacquire(P) __TBB_FetchAndAdd8acquire(P,1) -#define __TBB_FetchAndDecrementWrelease(P) __TBB_FetchAndAdd8release(P,-1) - -#ifndef __INTEL_COMPILER -/* Even though GCC imbues volatile loads with acquire semantics, - it sometimes moves loads over the acquire fence. The - fences defined here stop such incorrect code motion. */ -#define __TBB_release_consistency_helper() __asm__ __volatile__("": : :"memory") -#define __TBB_rel_acq_fence() __asm__ __volatile__("mf": : :"memory") -#else -#define __TBB_release_consistency_helper() -#define __TBB_rel_acq_fence() __mf() -#endif /* __INTEL_COMPILER */ - -// Special atomic functions -#define __TBB_CompareAndSwapW(P,V,C) __TBB_CompareAndSwap8(P,V,C) -#define __TBB_FetchAndStoreW(P,V) __TBB_FetchAndStore8(P,V) -#define __TBB_FetchAndAddW(P,V) __TBB_FetchAndAdd8(P,V) -#define __TBB_FetchAndAddWrelease(P,V) __TBB_FetchAndAdd8release(P,V) - -// Not needed -#undef __TBB_Store8 -#undef __TBB_Load8 - -// Definition of Lock functions -#define __TBB_TryLockByte(P) __TBB_machine_trylockbyte(P) -#define __TBB_LockByte(P) __TBB_machine_lockbyte(P) - -// Definition of other utility functions -#define __TBB_Pause(V) __TBB_machine_pause(V) -#define __TBB_Log2(V) __TBB_machine_lg(V) - diff --git a/tbb30_20100406oss/include/tbb/machine/linux_intel64.h b/tbb30_20100406oss/include/tbb/machine/linux_intel64.h deleted file mode 100644 index aa8472c31..000000000 --- a/tbb30_20100406oss/include/tbb/machine/linux_intel64.h +++ /dev/null @@ -1,141 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_machine_H -#error Do not include this file directly; include tbb_machine.h instead -#endif - -#include "linux_common.h" - -#define __TBB_WORDSIZE 8 -#define __TBB_BIG_ENDIAN 0 - -#define __TBB_release_consistency_helper() __asm__ __volatile__("": : :"memory") - -#ifndef __TBB_rel_acq_fence -inline void __TBB_rel_acq_fence() { __asm__ __volatile__("mfence": : :"memory"); } -#endif - -#define __MACHINE_DECL_ATOMICS(S,T,X) \ -static inline T __TBB_machine_cmpswp##S (volatile void *ptr, T value, T comparand ) \ -{ \ - T result; \ - \ - __asm__ __volatile__("lock\ncmpxchg" X " %2,%1" \ - : "=a"(result), "=m"(*(volatile T*)ptr) \ - : "q"(value), "0"(comparand), "m"(*(volatile T*)ptr) \ - : "memory"); \ - return result; \ -} \ - \ -static inline T __TBB_machine_fetchadd##S(volatile void *ptr, T addend) \ -{ \ - T result; \ - __asm__ __volatile__("lock\nxadd" X " %0,%1" \ - : "=r"(result),"=m"(*(volatile T*)ptr) \ - : "0"(addend), "m"(*(volatile T*)ptr) \ - : "memory"); \ - return result; \ -} \ - \ -static inline T __TBB_machine_fetchstore##S(volatile void *ptr, T value) \ -{ \ - T result; \ - __asm__ __volatile__("lock\nxchg" X " %0,%1" \ - : "=r"(result),"=m"(*(volatile T*)ptr) \ - : "0"(value), "m"(*(volatile T*)ptr) \ - : "memory"); \ - return result; \ -} \ - -__MACHINE_DECL_ATOMICS(1,int8_t,"") -__MACHINE_DECL_ATOMICS(2,int16_t,"") -__MACHINE_DECL_ATOMICS(4,int32_t,"") -__MACHINE_DECL_ATOMICS(8,int64_t,"q") - -static inline int64_t __TBB_machine_lg( uint64_t x ) { - int64_t j; - __asm__ ("bsr %1,%0" : "=r"(j) : "r"(x)); - return j; -} - -static inline void __TBB_machine_or( volatile void *ptr, uint64_t addend ) { - __asm__ __volatile__("lock\norq %1,%0" : "=m"(*(volatile uint64_t*)ptr) : "r"(addend), "m"(*(volatile uint64_t*)ptr) : "memory"); -} - -static inline void __TBB_machine_and( volatile void *ptr, uint64_t addend ) { - __asm__ __volatile__("lock\nandq %1,%0" : "=m"(*(volatile uint64_t*)ptr) : "r"(addend), "m"(*(volatile uint64_t*)ptr) : "memory"); -} - -static inline void __TBB_machine_pause( int32_t delay ) { - for (int32_t i = 0; i < delay; i++) { - __asm__ __volatile__("pause;"); - } - return; -} - -// Machine specific atomic operations - -#define __TBB_CompareAndSwap1(P,V,C) __TBB_machine_cmpswp1(P,V,C) -#define __TBB_CompareAndSwap2(P,V,C) __TBB_machine_cmpswp2(P,V,C) -#define __TBB_CompareAndSwap4(P,V,C) __TBB_machine_cmpswp4(P,V,C) -#define __TBB_CompareAndSwap8(P,V,C) __TBB_machine_cmpswp8(P,V,C) -#define __TBB_CompareAndSwapW(P,V,C) __TBB_machine_cmpswp8(P,V,C) - -#define __TBB_FetchAndAdd1(P,V) __TBB_machine_fetchadd1(P,V) -#define __TBB_FetchAndAdd2(P,V) __TBB_machine_fetchadd2(P,V) -#define __TBB_FetchAndAdd4(P,V) __TBB_machine_fetchadd4(P,V) -#define __TBB_FetchAndAdd8(P,V) __TBB_machine_fetchadd8(P,V) -#define __TBB_FetchAndAddW(P,V) __TBB_machine_fetchadd8(P,V) - -#define __TBB_FetchAndStore1(P,V) __TBB_machine_fetchstore1(P,V) -#define __TBB_FetchAndStore2(P,V) __TBB_machine_fetchstore2(P,V) -#define __TBB_FetchAndStore4(P,V) __TBB_machine_fetchstore4(P,V) -#define __TBB_FetchAndStore8(P,V) __TBB_machine_fetchstore8(P,V) -#define __TBB_FetchAndStoreW(P,V) __TBB_machine_fetchstore8(P,V) - -#define __TBB_Store8(P,V) (*P = V) -#define __TBB_Load8(P) (*P) - -#define __TBB_AtomicOR(P,V) __TBB_machine_or(P,V) -#define __TBB_AtomicAND(P,V) __TBB_machine_and(P,V) - -// Definition of other functions -#ifndef __TBB_Pause -#define __TBB_Pause(V) __TBB_machine_pause(V) -#endif -#define __TBB_Log2(V) __TBB_machine_lg(V) - -// Special atomic functions -#define __TBB_FetchAndAddWrelease(P,V) __TBB_FetchAndAddW(P,V) -#define __TBB_FetchAndIncrementWacquire(P) __TBB_FetchAndAddW(P,1) -#define __TBB_FetchAndDecrementWrelease(P) __TBB_FetchAndAddW(P,-1) - -// Use generic definitions from tbb_machine.h -#undef __TBB_TryLockByte -#undef __TBB_LockByte diff --git a/tbb30_20100406oss/include/tbb/machine/mac_ppc.h b/tbb30_20100406oss/include/tbb/machine/mac_ppc.h deleted file mode 100644 index 2487d2127..000000000 --- a/tbb30_20100406oss/include/tbb/machine/mac_ppc.h +++ /dev/null @@ -1,85 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_machine_H -#error Do not include this file directly; include tbb_machine.h instead -#endif - -#include -#include - -#include // sched_yield - -inline int32_t __TBB_machine_cmpswp4 (volatile void *ptr, int32_t value, int32_t comparand ) -{ - int32_t result; - - __asm__ __volatile__("sync\n" - "0: lwarx %0,0,%2\n\t" /* load w/ reservation */ - "cmpw %0,%4\n\t" /* compare against comparand */ - "bne- 1f\n\t" /* exit if not same */ - "stwcx. %3,0,%2\n\t" /* store new_value */ - "bne- 0b\n" /* retry if reservation lost */ - "1: sync" /* the exit */ - : "=&r"(result), "=m"(* (int32_t*) ptr) - : "r"(ptr), "r"(value), "r"(comparand), "m"(* (int32_t*) ptr) - : "cr0"); - return result; -} - -inline int64_t __TBB_machine_cmpswp8 (volatile void *ptr, int64_t value, int64_t comparand ) -{ - int64_t result; - __asm__ __volatile__("sync\n" - "0: ldarx %0,0,%2\n\t" /* load w/ reservation */ - "cmpd %0,%4\n\t" /* compare against comparand */ - "bne- 1f\n\t" /* exit if not same */ - "stdcx. %3,0,%2\n\t" /* store new_value */ - "bne- 0b\n" /* retry if reservation lost */ - "1: sync" /* the exit */ - : "=&b"(result), "=m"(* (int64_t*) ptr) - : "r"(ptr), "r"(value), "r"(comparand), "m"(* (int64_t*) ptr) - : "cr0"); - return result; -} - -#define __TBB_BIG_ENDIAN 1 - -#if defined(powerpc64) || defined(__powerpc64__) || defined(__ppc64__) -#define __TBB_WORDSIZE 8 -#define __TBB_CompareAndSwapW(P,V,C) __TBB_machine_cmpswp8(P,V,C) -#else -#define __TBB_WORDSIZE 4 -#define __TBB_CompareAndSwapW(P,V,C) __TBB_machine_cmpswp4(P,V,C) -#endif - -#define __TBB_CompareAndSwap4(P,V,C) __TBB_machine_cmpswp4(P,V,C) -#define __TBB_CompareAndSwap8(P,V,C) __TBB_machine_cmpswp8(P,V,C) -#define __TBB_Yield() sched_yield() -#define __TBB_rel_acq_fence() __asm__ __volatile__("lwsync": : :"memory") -#define __TBB_release_consistency_helper() __TBB_rel_acq_fence() diff --git a/tbb30_20100406oss/include/tbb/machine/sunos_sparc.h b/tbb30_20100406oss/include/tbb/machine/sunos_sparc.h deleted file mode 100644 index cf90a92ed..000000000 --- a/tbb30_20100406oss/include/tbb/machine/sunos_sparc.h +++ /dev/null @@ -1,233 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - - -#ifndef __TBB_machine_H -#error Do not include this file directly; include tbb_machine.h instead -#endif - -#include -#include - -#include // sched_yield - -#define __TBB_WORDSIZE 8 -#define __TBB_BIG_ENDIAN 1 - -#define __TBB_release_consistency_helper() __asm__ __volatile__ ("": : :"memory") - -inline void __TBB_rel_acq_fence() { __asm__ __volatile__("membar #LoadLoad|#LoadStore|#StoreStore|#StoreLoad": : : "memory"); } - -//-------------------------------------------------- -// Compare and swap -//-------------------------------------------------- - -/** - * Atomic CAS for 32 bit values, if *ptr==comparand, then *ptr=value, returns *ptr - * @param ptr pointer to value in memory to be swapped with value if *ptr==comparand - * @param value value to assign *ptr to if *ptr==comparand - * @param comparand value to compare with *ptr - ( @return value originally in memory at ptr, regardless of success -*/ -static inline int32_t __TBB_machine_cmpswp4(volatile void *ptr, int32_t value, int32_t comparand ){ - int32_t result; - __asm__ __volatile__( - "cas\t[%5],%4,%1" - : "=m"(*(int32_t *)ptr), "=r"(result) - : "m"(*(int32_t *)ptr), "1"(value), "r"(comparand), "r"(ptr) - : "memory"); - return result; -} - -/** - * Atomic CAS for 64 bit values, if *ptr==comparand, then *ptr=value, returns *ptr - * @param ptr pointer to value in memory to be swapped with value if *ptr==comparand - * @param value value to assign *ptr to if *ptr==comparand - * @param comparand value to compare with *ptr - ( @return value originally in memory at ptr, regardless of success - */ -static inline int64_t __TBB_machine_cmpswp8(volatile void *ptr, int64_t value, int64_t comparand ){ - int64_t result; - __asm__ __volatile__( - "casx\t[%5],%4,%1" - : "=m"(*(int64_t *)ptr), "=r"(result) - : "m"(*(int64_t *)ptr), "1"(value), "r"(comparand), "r"(ptr) - : "memory"); - return result; -} - -//--------------------------------------------------- -// Fetch and add -//--------------------------------------------------- - -/** - * Atomic fetch and add for 32 bit values, in this case implemented by continuously checking success of atomicity - * @param ptr pointer to value to add addend to - * @param addened value to add to *ptr - * @return value at ptr before addened was added - */ -static inline int32_t __TBB_machine_fetchadd4(volatile void *ptr, int32_t addend){ - int32_t result; - __asm__ __volatile__ ( - "0:\t add\t %3, %4, %0\n" // do addition - "\t cas\t [%2], %3, %0\n" // cas to store result in memory - "\t cmp\t %3, %0\n" // check if value from memory is original - "\t bne,a,pn\t %%icc, 0b\n" // if not try again - "\t mov %0, %3\n" // use branch delay slot to move new value in memory to be added - : "=&r"(result), "=m"(*(int32_t *)ptr) - : "r"(ptr), "r"(*(int32_t *)ptr), "r"(addend), "m"(*(int32_t *)ptr) - : "ccr", "memory"); - return result; -} - -/** - * Atomic fetch and add for 64 bit values, in this case implemented by continuously checking success of atomicity - * @param ptr pointer to value to add addend to - * @param addened value to add to *ptr - * @return value at ptr before addened was added - */ -static inline int64_t __TBB_machine_fetchadd8(volatile void *ptr, int64_t addend){ - int64_t result; - __asm__ __volatile__ ( - "0:\t add\t %3, %4, %0\n" // do addition - "\t casx\t [%2], %3, %0\n" // cas to store result in memory - "\t cmp\t %3, %0\n" // check if value from memory is original - "\t bne,a,pn\t %%xcc, 0b\n" // if not try again - "\t mov %0, %3\n" // use branch delay slot to move new value in memory to be added - : "=&r"(result), "=m"(*(int64_t *)ptr) - : "r"(ptr), "r"(*(int64_t *)ptr), "r"(addend), "m"(*(int64_t *)ptr) - : "ccr", "memory"); - return result; -} - -//-------------------------------------------------------- -// Logarithm (base two, integer) -//-------------------------------------------------------- - -static inline int64_t __TBB_machine_lg( uint64_t x ) { - uint64_t count; - // one hot encode - x |= (x >> 1); - x |= (x >> 2); - x |= (x >> 4); - x |= (x >> 8); - x |= (x >> 16); - x |= (x >> 32); - // count 1's - __asm__ ("popc %1, %0" : "=r"(count) : "r"(x) ); - return count-1; -} - -//-------------------------------------------------------- - -static inline void __TBB_machine_or( volatile void *ptr, uint64_t addend ) { - __asm__ __volatile__ ( - "0:\t or\t %2, %3, %%g1\n" // do addition - "\t casx\t [%1], %2, %%g1\n" // cas to store result in memory - "\t cmp\t %2, %%g1\n" // check if value from memory is original - "\t bne,a,pn\t %%xcc, 0b\n" // if not try again - "\t mov %%g1, %2\n" // use branch delay slot to move new value in memory to be added - : "=m"(*(int64_t *)ptr) - : "r"(ptr), "r"(*(int64_t *)ptr), "r"(addend), "m"(*(int64_t *)ptr) - : "ccr", "g1", "memory"); -} - -static inline void __TBB_machine_and( volatile void *ptr, uint64_t addend ) { - __asm__ __volatile__ ( - "0:\t and\t %2, %3, %%g1\n" // do addition - "\t casx\t [%1], %2, %%g1\n" // cas to store result in memory - "\t cmp\t %2, %%g1\n" // check if value from memory is original - "\t bne,a,pn\t %%xcc, 0b\n" // if not try again - "\t mov %%g1, %2\n" // use branch delay slot to move new value in memory to be added - : "=m"(*(int64_t *)ptr) - : "r"(ptr), "r"(*(int64_t *)ptr), "r"(addend), "m"(*(int64_t *)ptr) - : "ccr", "g1", "memory"); -} - - -static inline void __TBB_machine_pause( int32_t delay ) { - // do nothing, inlined, doesnt matter -} - -// put 0xff in memory location, return memory value, -// generic trylockbyte puts 0x01, however this is fine -// because all that matters is that 0 is unlocked -static inline bool __TBB_machine_trylockbyte(unsigned char &flag){ - unsigned char result; - __asm__ __volatile__ ( - "ldstub\t [%2], %0\n" - : "=r"(result), "=m"(flag) - : "r"(&flag), "m"(flag) - : "memory"); - return result == 0; -} - - -// Machine specific atomic operations - -//#define __TBB_CompareAndSwap1(P,V,C) __TBB_machine_cmpswp1(P,V,C) // use generic version in tbb_machine.h -//#define __TBB_CompareAndSwap2(P,V,C) __TBB_machine_cmpswp2(P,V,C) // use generic version in tbb_machine.h -#define __TBB_CompareAndSwap4(P,V,C) __TBB_machine_cmpswp4(P,V,C) -#define __TBB_CompareAndSwap8(P,V,C) __TBB_machine_cmpswp8(P,V,C) -#define __TBB_CompareAndSwapW(P,V,C) __TBB_machine_cmpswp8(P,V,C) - -//#define __TBB_FetchAndAdd1(P,V) __TBB_machine_fetchadd1(P,V) // use generic version in tbb_machine.h -//#define __TBB_FetchAndAdd2(P,V) __TBB_machine_fetchadd2(P,V) // use generic version in tbb_machine.h -#define __TBB_FetchAndAdd4(P,V) __TBB_machine_fetchadd4(P,V) -#define __TBB_FetchAndAdd8(P,V) __TBB_machine_fetchadd8(P,V) -#define __TBB_FetchAndAddW(P,V) __TBB_machine_fetchadd8(P,V) - -// use generic version in tbb_machine.h -//#define __TBB_FetchAndStore1(P,V) __TBB_machine_fetchstore1(P,V) -//#define __TBB_FetchAndStore2(P,V) __TBB_machine_fetchstore2(P,V) -//#define __TBB_FetchAndStore4(P,V) __TBB_machine_fetchstore4(P,V) -//#define __TBB_FetchAndStore8(P,V) __TBB_machine_fetchstore8(P,V) -//#define __TBB_FetchAndStoreW(P,V) __TBB_machine_fetchstore8(P,V) - -#define __TBB_Store8(P,V) (*P = V) -#define __TBB_Load8(P) (*P) - -#define __TBB_AtomicOR(P,V) __TBB_machine_or(P,V) -#define __TBB_AtomicAND(P,V) __TBB_machine_and(P,V) - -// Definition of other functions -#define __TBB_Pause(V) __TBB_machine_pause(V) -#define __TBB_Log2(V) __TBB_machine_lg(V) - -// Special atomic functions -#define __TBB_FetchAndAddWrelease(P,V) __TBB_FetchAndAddW(P,V) -#define __TBB_FetchAndIncrementWacquire(P) __TBB_FetchAndAddW(P,1) -#define __TBB_FetchAndDecrementWrelease(P) __TBB_FetchAndAddW(P,-1) - -// Definition of Lock functions -// Repeatedly runs TryLockByte, no need to implement -#undef __TBB_LockByte - -#define __TBB_TryLockByte(P) __TBB_machine_trylockbyte(P) - -#define __TBB_Yield() sched_yield() diff --git a/tbb30_20100406oss/include/tbb/machine/windows_ia32.h b/tbb30_20100406oss/include/tbb/machine/windows_ia32.h deleted file mode 100644 index e6e3fc1bb..000000000 --- a/tbb30_20100406oss/include/tbb/machine/windows_ia32.h +++ /dev/null @@ -1,243 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_machine_H -#error Do not include this file directly; include tbb_machine.h instead -#endif - -#if defined(__INTEL_COMPILER) -#define __TBB_release_consistency_helper() __asm { __asm nop } -#elif _MSC_VER >= 1300 -extern "C" void _ReadWriteBarrier(); -#pragma intrinsic(_ReadWriteBarrier) -#define __TBB_release_consistency_helper() _ReadWriteBarrier() -#else -#error Unsupported compiler - need to define __TBB_release_consistency_helper to support it -#endif - -inline void __TBB_rel_acq_fence() { __asm { __asm mfence } } - -#define __TBB_WORDSIZE 4 -#define __TBB_BIG_ENDIAN 0 - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - // Workaround for overzealous compiler warnings in /Wp64 mode - #pragma warning (push) - #pragma warning (disable: 4244 4267) -#endif - -extern "C" { - __int64 __TBB_EXPORTED_FUNC __TBB_machine_cmpswp8 (volatile void *ptr, __int64 value, __int64 comparand ); - __int64 __TBB_EXPORTED_FUNC __TBB_machine_fetchadd8 (volatile void *ptr, __int64 addend ); - __int64 __TBB_EXPORTED_FUNC __TBB_machine_fetchstore8 (volatile void *ptr, __int64 value ); - void __TBB_EXPORTED_FUNC __TBB_machine_store8 (volatile void *ptr, __int64 value ); - __int64 __TBB_EXPORTED_FUNC __TBB_machine_load8 (const volatile void *ptr); -} - -template -struct __TBB_machine_load_store { - static inline T load_with_acquire(const volatile T& location) { - T to_return = location; - __TBB_release_consistency_helper(); - return to_return; - } - - static inline void store_with_release(volatile T &location, T value) { - __TBB_release_consistency_helper(); - location = value; - } -}; - -template -struct __TBB_machine_load_store { - static inline T load_with_acquire(const volatile T& location) { - return __TBB_machine_load8((volatile void *)&location); - } - - static inline void store_with_release(T &location, T value) { - __TBB_machine_store8((volatile void *)&location,(__int64)value); - } -}; - -template -inline T __TBB_machine_load_with_acquire(const volatile T &location) { - return __TBB_machine_load_store::load_with_acquire(location); -} - -template -inline void __TBB_machine_store_with_release(T& location, V value) { - __TBB_machine_load_store::store_with_release(location,value); -} - -//! Overload that exists solely to avoid /Wp64 warnings. -inline void __TBB_machine_store_with_release(size_t& location, size_t value) { - __TBB_machine_load_store::store_with_release(location,value); -} - -#define __TBB_load_with_acquire(L) __TBB_machine_load_with_acquire((L)) -#define __TBB_store_with_release(L,V) __TBB_machine_store_with_release((L),(V)) - -#define __TBB_DEFINE_ATOMICS(S,T,U,A,C) \ -static inline T __TBB_machine_cmpswp##S ( volatile void * ptr, U value, U comparand ) { \ - T result; \ - volatile T *p = (T *)ptr; \ - __TBB_release_consistency_helper(); \ - __asm \ - { \ - __asm mov edx, p \ - __asm mov C , value \ - __asm mov A , comparand \ - __asm lock cmpxchg [edx], C \ - __asm mov result, A \ - } \ - __TBB_release_consistency_helper(); \ - return result; \ -} \ -\ -static inline T __TBB_machine_fetchadd##S ( volatile void * ptr, U addend ) { \ - T result; \ - volatile T *p = (T *)ptr; \ - __TBB_release_consistency_helper(); \ - __asm \ - { \ - __asm mov edx, p \ - __asm mov A, addend \ - __asm lock xadd [edx], A \ - __asm mov result, A \ - } \ - __TBB_release_consistency_helper(); \ - return result; \ -}\ -\ -static inline T __TBB_machine_fetchstore##S ( volatile void * ptr, U value ) { \ - T result; \ - volatile T *p = (T *)ptr; \ - __TBB_release_consistency_helper(); \ - __asm \ - { \ - __asm mov edx, p \ - __asm mov A, value \ - __asm lock xchg [edx], A \ - __asm mov result, A \ - } \ - __TBB_release_consistency_helper(); \ - return result; \ -} - -__TBB_DEFINE_ATOMICS(1, __int8, __int8, al, cl) -__TBB_DEFINE_ATOMICS(2, __int16, __int16, ax, cx) -__TBB_DEFINE_ATOMICS(4, __int32, __int32, eax, ecx) -__TBB_DEFINE_ATOMICS(W, ptrdiff_t, ptrdiff_t, eax, ecx) - -static inline __int32 __TBB_machine_lg( unsigned __int64 i ) { - unsigned __int32 j; - __asm - { - bsr eax, i - mov j, eax - } - return j; -} - -static inline void __TBB_machine_OR( volatile void *operand, __int32 addend ) { - __asm - { - mov eax, addend - mov edx, [operand] - lock or [edx], eax - } -} - -static inline void __TBB_machine_AND( volatile void *operand, __int32 addend ) { - __asm - { - mov eax, addend - mov edx, [operand] - lock and [edx], eax - } -} - -static inline void __TBB_machine_pause (__int32 delay ) { - _asm - { - mov eax, delay - L1: - pause - add eax, -1 - jne L1 - } - return; -} - -#define __TBB_CompareAndSwap1(P,V,C) __TBB_machine_cmpswp1(P,V,C) -#define __TBB_CompareAndSwap2(P,V,C) __TBB_machine_cmpswp2(P,V,C) -#define __TBB_CompareAndSwap4(P,V,C) __TBB_machine_cmpswp4(P,V,C) -#define __TBB_CompareAndSwap8(P,V,C) __TBB_machine_cmpswp8(P,V,C) -#define __TBB_CompareAndSwapW(P,V,C) __TBB_machine_cmpswpW(P,V,C) - -#define __TBB_FetchAndAdd1(P,V) __TBB_machine_fetchadd1(P,V) -#define __TBB_FetchAndAdd2(P,V) __TBB_machine_fetchadd2(P,V) -#define __TBB_FetchAndAdd4(P,V) __TBB_machine_fetchadd4(P,V) -#define __TBB_FetchAndAdd8(P,V) __TBB_machine_fetchadd8(P,V) -#define __TBB_FetchAndAddW(P,V) __TBB_machine_fetchaddW(P,V) - -#define __TBB_FetchAndStore1(P,V) __TBB_machine_fetchstore1(P,V) -#define __TBB_FetchAndStore2(P,V) __TBB_machine_fetchstore2(P,V) -#define __TBB_FetchAndStore4(P,V) __TBB_machine_fetchstore4(P,V) -#define __TBB_FetchAndStore8(P,V) __TBB_machine_fetchstore8(P,V) -#define __TBB_FetchAndStoreW(P,V) __TBB_machine_fetchstoreW(P,V) - -// Should define this: -#define __TBB_Store8(P,V) __TBB_machine_store8(P,V) -#define __TBB_Load8(P) __TBB_machine_load8(P) -#define __TBB_AtomicOR(P,V) __TBB_machine_OR(P,V) -#define __TBB_AtomicAND(P,V) __TBB_machine_AND(P,V) - -// Definition of other functions -extern "C" __declspec(dllimport) int __stdcall SwitchToThread( void ); -#define __TBB_Yield() SwitchToThread() -#define __TBB_Pause(V) __TBB_machine_pause(V) -#define __TBB_Log2(V) __TBB_machine_lg(V) - -// Use generic definitions from tbb_machine.h -#undef __TBB_TryLockByte -#undef __TBB_LockByte - -#if defined(_MSC_VER)&&_MSC_VER<1400 - static inline void* __TBB_machine_get_current_teb () { - void* pteb; - __asm mov eax, fs:[0x18] - __asm mov pteb, eax - return pteb; - } -#endif - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - #pragma warning (pop) -#endif // warnings 4244, 4267 are back - diff --git a/tbb30_20100406oss/include/tbb/machine/windows_intel64.h b/tbb30_20100406oss/include/tbb/machine/windows_intel64.h deleted file mode 100644 index 8f9b74636..000000000 --- a/tbb30_20100406oss/include/tbb/machine/windows_intel64.h +++ /dev/null @@ -1,132 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_machine_H -#error Do not include this file directly; include tbb_machine.h instead -#endif - -#include -#if !defined(__INTEL_COMPILER) -#pragma intrinsic(_InterlockedOr64) -#pragma intrinsic(_InterlockedAnd64) -#pragma intrinsic(_InterlockedCompareExchange) -#pragma intrinsic(_InterlockedCompareExchange64) -#pragma intrinsic(_InterlockedExchangeAdd) -#pragma intrinsic(_InterlockedExchangeAdd64) -#pragma intrinsic(_InterlockedExchange) -#pragma intrinsic(_InterlockedExchange64) -#endif /* !defined(__INTEL_COMPILER) */ - -#if defined(__INTEL_COMPILER) -#define __TBB_release_consistency_helper() __asm { __asm nop } -inline void __TBB_rel_acq_fence() { __asm { __asm mfence } } -#elif _MSC_VER >= 1300 -extern "C" void _ReadWriteBarrier(); -#pragma intrinsic(_ReadWriteBarrier) -#define __TBB_release_consistency_helper() _ReadWriteBarrier() -#pragma intrinsic(_mm_mfence) -inline void __TBB_rel_acq_fence() { _mm_mfence(); } -#endif - -#define __TBB_WORDSIZE 8 -#define __TBB_BIG_ENDIAN 0 - -// ATTENTION: if you ever change argument types in machine-specific primitives, -// please take care of atomic_word<> specializations in tbb/atomic.h -extern "C" { - __int8 __TBB_EXPORTED_FUNC __TBB_machine_cmpswp1 (volatile void *ptr, __int8 value, __int8 comparand ); - __int8 __TBB_EXPORTED_FUNC __TBB_machine_fetchadd1 (volatile void *ptr, __int8 addend ); - __int8 __TBB_EXPORTED_FUNC __TBB_machine_fetchstore1 (volatile void *ptr, __int8 value ); - __int16 __TBB_EXPORTED_FUNC __TBB_machine_cmpswp2 (volatile void *ptr, __int16 value, __int16 comparand ); - __int16 __TBB_EXPORTED_FUNC __TBB_machine_fetchadd2 (volatile void *ptr, __int16 addend ); - __int16 __TBB_EXPORTED_FUNC __TBB_machine_fetchstore2 (volatile void *ptr, __int16 value ); - void __TBB_EXPORTED_FUNC __TBB_machine_pause (__int32 delay ); -} - - -#if !__INTEL_COMPILER -extern "C" unsigned char _BitScanReverse64( unsigned long* i, unsigned __int64 w ); -#pragma intrinsic(_BitScanReverse64) -#endif - -inline __int64 __TBB_machine_lg( unsigned __int64 i ) { -#if __INTEL_COMPILER - unsigned __int64 j; - __asm - { - bsr rax, i - mov j, rax - } -#else - unsigned long j; - _BitScanReverse64( &j, i ); -#endif - return j; -} - -inline void __TBB_machine_OR( volatile void *operand, intptr_t addend ) { - _InterlockedOr64((__int64*)operand, addend); -} - -inline void __TBB_machine_AND( volatile void *operand, intptr_t addend ) { - _InterlockedAnd64((__int64*)operand, addend); -} - -#define __TBB_CompareAndSwap1(P,V,C) __TBB_machine_cmpswp1(P,V,C) -#define __TBB_CompareAndSwap2(P,V,C) __TBB_machine_cmpswp2(P,V,C) -#define __TBB_CompareAndSwap4(P,V,C) _InterlockedCompareExchange( (long*) P , V , C ) -#define __TBB_CompareAndSwap8(P,V,C) _InterlockedCompareExchange64( (__int64*) P , V , C ) -#define __TBB_CompareAndSwapW(P,V,C) _InterlockedCompareExchange64( (__int64*) P , V , C ) - -#define __TBB_FetchAndAdd1(P,V) __TBB_machine_fetchadd1(P,V) -#define __TBB_FetchAndAdd2(P,V) __TBB_machine_fetchadd2(P,V) -#define __TBB_FetchAndAdd4(P,V) _InterlockedExchangeAdd((long*) P , V ) -#define __TBB_FetchAndAdd8(P,V) _InterlockedExchangeAdd64((__int64*) P , V ) -#define __TBB_FetchAndAddW(P,V) _InterlockedExchangeAdd64((__int64*) P , V ) - -#define __TBB_FetchAndStore1(P,V) __TBB_machine_fetchstore1(P,V) -#define __TBB_FetchAndStore2(P,V) __TBB_machine_fetchstore2(P,V) -#define __TBB_FetchAndStore4(P,V) _InterlockedExchange((long*) P , V ) -#define __TBB_FetchAndStore8(P,V) _InterlockedExchange64((__int64*) P , V ) -#define __TBB_FetchAndStoreW(P,V) _InterlockedExchange64((__int64*) P , V ) - -// Not used if wordsize == 8 -#undef __TBB_Store8 -#undef __TBB_Load8 - -#define __TBB_AtomicOR(P,V) __TBB_machine_OR(P,V) -#define __TBB_AtomicAND(P,V) __TBB_machine_AND(P,V) - -extern "C" __declspec(dllimport) int __stdcall SwitchToThread( void ); -#define __TBB_Yield() SwitchToThread() -#define __TBB_Pause(V) __TBB_machine_pause(V) -#define __TBB_Log2(V) __TBB_machine_lg(V) - -// Use generic definitions from tbb_machine.h -#undef __TBB_TryLockByte -#undef __TBB_LockByte diff --git a/tbb30_20100406oss/include/tbb/machine/xbox360_ppc.h b/tbb30_20100406oss/include/tbb/machine/xbox360_ppc.h deleted file mode 100644 index 0a3b2efeb..000000000 --- a/tbb30_20100406oss/include/tbb/machine/xbox360_ppc.h +++ /dev/null @@ -1,85 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_machine_H -#error Do not include this file directly; include tbb_machine.h instead -#endif - -#define NONET -#define NOD3D -#include "xtl.h" -#include "ppcintrinsics.h" - -#if _MSC_VER >= 1300 -extern "C" void _ReadWriteBarrier(); -#pragma intrinsic(_ReadWriteBarrier) -#define __TBB_release_consistency_helper() _ReadWriteBarrier() -#endif - -inline void __TBB_rel_acq_fence() { __lwsync(); } - -#define __TBB_WORDSIZE 4 -#define __TBB_BIG_ENDIAN 1 - -//todo: define __TBB_DECL_FENCED_ATOMICS and define acquire/release primitives to maximize performance - -typedef __int64 int64_t; //required for definition of Store8/Load8 in atomic.h -typedef unsigned char uint8_t; //same reason - -inline __int32 __TBB_machine_cmpswp4(volatile void *ptr, __int32 value, __int32 comparand ) -{ - __lwsync(); - __int32 result = InterlockedCompareExchange((volatile LONG*)ptr, value, comparand); - __lwsync(); - return result; -} - -inline __int64 __TBB_machine_cmpswp8(volatile void *ptr, __int64 value, __int64 comparand ) -{ - __lwsync(); - __int64 result = InterlockedCompareExchange64((volatile LONG64*)ptr, value, comparand); - __lwsync(); - return result; -} - -#pragma optimize( "", off ) -inline void __TBB_machine_pause (__int32 delay ) -{ - for (__int32 i=0; i - #if !defined(_WIN32_WINNT) - // The following Windows API function is declared explicitly; - // otherwise any user would have to specify /D_WIN32_WINNT=0x0400 - extern "C" BOOL WINAPI TryEnterCriticalSection( LPCRITICAL_SECTION ); - #endif -#else /* if not _WIN32||_WIN64 */ - #include -#endif /* _WIN32||_WIN64 */ - -#include -#include "aligned_space.h" -#include "tbb_stddef.h" -#include "tbb_profiling.h" - -namespace tbb { - -//! Wrapper around the platform's native reader-writer lock. -/** For testing purposes only. - @ingroup synchronization */ -class mutex { -public: - //! Construct unacquired mutex. - mutex() { -#if TBB_USE_ASSERT || TBB_USE_THREADING_TOOLS - internal_construct(); -#else - #if _WIN32||_WIN64 - InitializeCriticalSection(&impl); - #else - int error_code = pthread_mutex_init(&impl,NULL); - if( error_code ) - tbb::internal::handle_perror(error_code,"mutex: pthread_mutex_init failed"); - #endif /* _WIN32||_WIN64*/ -#endif /* TBB_USE_ASSERT */ - }; - - ~mutex() { -#if TBB_USE_ASSERT - internal_destroy(); -#else - #if _WIN32||_WIN64 - DeleteCriticalSection(&impl); - #else - pthread_mutex_destroy(&impl); - - #endif /* _WIN32||_WIN64 */ -#endif /* TBB_USE_ASSERT */ - }; - - class scoped_lock; - friend class scoped_lock; - - //! The scoped locking pattern - /** It helps to avoid the common problem of forgetting to release lock. - It also nicely provides the "node" for queuing locks. */ - class scoped_lock : internal::no_copy { - public: - //! Construct lock that has not acquired a mutex. - scoped_lock() : my_mutex(NULL) {}; - - //! Acquire lock on given mutex. - scoped_lock( mutex& mutex ) { - acquire( mutex ); - } - - //! Release lock (if lock is held). - ~scoped_lock() { - if( my_mutex ) - release(); - } - - //! Acquire lock on given mutex. - void acquire( mutex& mutex ) { -#if TBB_USE_ASSERT - internal_acquire(mutex); -#else - mutex.lock(); - my_mutex = &mutex; -#endif /* TBB_USE_ASSERT */ - } - - //! Try acquire lock on given mutex. - bool try_acquire( mutex& mutex ) { -#if TBB_USE_ASSERT - return internal_try_acquire (mutex); -#else - bool result = mutex.try_lock(); - if( result ) - my_mutex = &mutex; - return result; -#endif /* TBB_USE_ASSERT */ - } - - //! Release lock - void release() { -#if TBB_USE_ASSERT - internal_release (); -#else - my_mutex->unlock(); - my_mutex = NULL; -#endif /* TBB_USE_ASSERT */ - } - - private: - //! The pointer to the current mutex to work - mutex* my_mutex; - - //! All checks from acquire using mutex.state were moved here - void __TBB_EXPORTED_METHOD internal_acquire( mutex& m ); - - //! All checks from try_acquire using mutex.state were moved here - bool __TBB_EXPORTED_METHOD internal_try_acquire( mutex& m ); - - //! All checks from release using mutex.state were moved here - void __TBB_EXPORTED_METHOD internal_release(); - - friend class mutex; - }; - - // Mutex traits - static const bool is_rw_mutex = false; - static const bool is_recursive_mutex = false; - static const bool is_fair_mutex = false; - - // ISO C++0x compatibility methods - - //! Acquire lock - void lock() { -#if TBB_USE_ASSERT - aligned_space tmp; - new(tmp.begin()) scoped_lock(*this); -#else - #if _WIN32||_WIN64 - EnterCriticalSection(&impl); - #else - pthread_mutex_lock(&impl); - #endif /* _WIN32||_WIN64 */ -#endif /* TBB_USE_ASSERT */ - } - - //! Try acquiring lock (non-blocking) - /** Return true if lock acquired; false otherwise. */ - bool try_lock() { -#if TBB_USE_ASSERT - aligned_space tmp; - scoped_lock& s = *tmp.begin(); - s.my_mutex = NULL; - return s.internal_try_acquire(*this); -#else - #if _WIN32||_WIN64 - return TryEnterCriticalSection(&impl)!=0; - #else - return pthread_mutex_trylock(&impl)==0; - #endif /* _WIN32||_WIN64 */ -#endif /* TBB_USE_ASSERT */ - } - - //! Release lock - void unlock() { -#if TBB_USE_ASSERT - aligned_space tmp; - scoped_lock& s = *tmp.begin(); - s.my_mutex = this; - s.internal_release(); -#else - #if _WIN32||_WIN64 - LeaveCriticalSection(&impl); - #else - pthread_mutex_unlock(&impl); - #endif /* _WIN32||_WIN64 */ -#endif /* TBB_USE_ASSERT */ - } - - //! Return native_handle - #if _WIN32||_WIN64 - typedef LPCRITICAL_SECTION native_handle_type; - #else - typedef pthread_mutex_t* native_handle_type; - #endif - native_handle_type native_handle() { return (native_handle_type) &impl; } - - enum state_t { - INITIALIZED=0x1234, - DESTROYED=0x789A, - HELD=0x56CD - }; -private: -#if _WIN32||_WIN64 - CRITICAL_SECTION impl; - enum state_t state; -#else - pthread_mutex_t impl; -#endif /* _WIN32||_WIN64 */ - - //! All checks from mutex constructor using mutex.state were moved here - void __TBB_EXPORTED_METHOD internal_construct(); - - //! All checks from mutex destructor using mutex.state were moved here - void __TBB_EXPORTED_METHOD internal_destroy(); - -#if _WIN32||_WIN64 -public: - //! Set the internal state - void set_state( state_t to ) { state = to; } -#endif -}; - -__TBB_DEFINE_PROFILING_SET_NAME(mutex) - -} // namespace tbb - -#endif /* __TBB_mutex_H */ diff --git a/tbb30_20100406oss/include/tbb/null_mutex.h b/tbb30_20100406oss/include/tbb/null_mutex.h deleted file mode 100644 index 67aabd56a..000000000 --- a/tbb30_20100406oss/include/tbb/null_mutex.h +++ /dev/null @@ -1,63 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_null_mutex_H -#define __TBB_null_mutex_H - -namespace tbb { - -//! A mutex which does nothing -/** A null_mutex does no operation and simulates success. - @ingroup synchronization */ -class null_mutex { - //! Deny assignment and copy construction - null_mutex( const null_mutex& ); - void operator=( const null_mutex& ); -public: - //! Represents acquisition of a mutex. - class scoped_lock { - public: - scoped_lock() {} - scoped_lock( null_mutex& ) {} - ~scoped_lock() {} - void acquire( null_mutex& ) {} - bool try_acquire( null_mutex& ) { return true; } - void release() {} - }; - - null_mutex() {} - - // Mutex traits - static const bool is_rw_mutex = false; - static const bool is_recursive_mutex = true; - static const bool is_fair_mutex = true; -}; - -} - -#endif /* __TBB_null_mutex_H */ diff --git a/tbb30_20100406oss/include/tbb/null_rw_mutex.h b/tbb30_20100406oss/include/tbb/null_rw_mutex.h deleted file mode 100644 index 24623896c..000000000 --- a/tbb30_20100406oss/include/tbb/null_rw_mutex.h +++ /dev/null @@ -1,65 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_null_rw_mutex_H -#define __TBB_null_rw_mutex_H - -namespace tbb { - -//! A rw mutex which does nothing -/** A null_rw_mutex is a rw mutex that does nothing and simulates successful operation. - @ingroup synchronization */ -class null_rw_mutex { - //! Deny assignment and copy construction - null_rw_mutex( const null_rw_mutex& ); - void operator=( const null_rw_mutex& ); -public: - //! Represents acquisition of a mutex. - class scoped_lock { - public: - scoped_lock() {} - scoped_lock( null_rw_mutex& , bool = true ) {} - ~scoped_lock() {} - void acquire( null_rw_mutex& , bool = true ) {} - bool upgrade_to_writer() { return true; } - bool downgrade_to_reader() { return true; } - bool try_acquire( null_rw_mutex& , bool = true ) { return true; } - void release() {} - }; - - null_rw_mutex() {} - - // Mutex traits - static const bool is_rw_mutex = true; - static const bool is_recursive_mutex = true; - static const bool is_fair_mutex = true; -}; - -} - -#endif /* __TBB_null_rw_mutex_H */ diff --git a/tbb30_20100406oss/include/tbb/parallel_do.h b/tbb30_20100406oss/include/tbb/parallel_do.h deleted file mode 100644 index 6f91f72e4..000000000 --- a/tbb30_20100406oss/include/tbb/parallel_do.h +++ /dev/null @@ -1,508 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_parallel_do_H -#define __TBB_parallel_do_H - -#include "task.h" -#include "aligned_space.h" -#include - -namespace tbb { - -//! @cond INTERNAL -namespace internal { - template class parallel_do_feeder_impl; - template class do_group_task; - - //! Strips its template type argument from 'cv' and '&' qualifiers - template - struct strip { typedef T type; }; - template - struct strip { typedef T type; }; - template - struct strip { typedef T type; }; - template - struct strip { typedef T type; }; - template - struct strip { typedef T type; }; - // Most of the compilers remove cv-qualifiers from non-reference function argument types. - // But unfortunately there are those that don't. - template - struct strip { typedef T type; }; - template - struct strip { typedef T type; }; - template - struct strip { typedef T type; }; -} // namespace internal -//! @endcond - -//! Class the user supplied algorithm body uses to add new tasks -/** \param Item Work item type **/ -template -class parallel_do_feeder: internal::no_copy -{ - parallel_do_feeder() {} - virtual ~parallel_do_feeder () {} - virtual void internal_add( const Item& item ) = 0; - template friend class internal::parallel_do_feeder_impl; -public: - //! Add a work item to a running parallel_do. - void add( const Item& item ) {internal_add(item);} -}; - -//! @cond INTERNAL -namespace internal { - //! For internal use only. - /** Selects one of the two possible forms of function call member operator. - @ingroup algorithms **/ - template - class parallel_do_operator_selector - { - typedef parallel_do_feeder Feeder; - template - static void internal_call( const Body& obj, A1& arg1, A2&, void (Body::*)(CvItem) const ) { - obj(arg1); - } - template - static void internal_call( const Body& obj, A1& arg1, A2& arg2, void (Body::*)(CvItem, parallel_do_feeder&) const ) { - obj(arg1, arg2); - } - - public: - template - static void call( const Body& obj, A1& arg1, A2& arg2 ) - { - internal_call( obj, arg1, arg2, &Body::operator() ); - } - }; - - //! For internal use only. - /** Executes one iteration of a do. - @ingroup algorithms */ - template - class do_iteration_task: public task - { - typedef parallel_do_feeder_impl feeder_type; - - Item my_value; - feeder_type& my_feeder; - - do_iteration_task( const Item& value, feeder_type& feeder ) : - my_value(value), my_feeder(feeder) - {} - - /*override*/ - task* execute() - { - parallel_do_operator_selector::call(*my_feeder.my_body, my_value, my_feeder); - return NULL; - } - - template friend class parallel_do_feeder_impl; - }; // class do_iteration_task - - template - class do_iteration_task_iter: public task - { - typedef parallel_do_feeder_impl feeder_type; - - Iterator my_iter; - feeder_type& my_feeder; - - do_iteration_task_iter( const Iterator& iter, feeder_type& feeder ) : - my_iter(iter), my_feeder(feeder) - {} - - /*override*/ - task* execute() - { - parallel_do_operator_selector::call(*my_feeder.my_body, *my_iter, my_feeder); - return NULL; - } - - template friend class do_group_task_forward; - template friend class do_group_task_input; - template friend class do_task_iter; - }; // class do_iteration_task_iter - - //! For internal use only. - /** Implements new task adding procedure. - @ingroup algorithms **/ - template - class parallel_do_feeder_impl : public parallel_do_feeder - { - /*override*/ - void internal_add( const Item& item ) - { - typedef do_iteration_task iteration_type; - - iteration_type& t = *new (task::allocate_additional_child_of(*my_barrier)) iteration_type(item, *this); - - t.spawn( t ); - } - public: - const Body* my_body; - empty_task* my_barrier; - - parallel_do_feeder_impl() - { - my_barrier = new( task::allocate_root() ) empty_task(); - __TBB_ASSERT(my_barrier, "root task allocation failed"); - } - -#if __TBB_TASK_GROUP_CONTEXT - parallel_do_feeder_impl(tbb::task_group_context &context) - { - my_barrier = new( task::allocate_root(context) ) empty_task(); - __TBB_ASSERT(my_barrier, "root task allocation failed"); - } -#endif - - ~parallel_do_feeder_impl() - { - my_barrier->destroy(*my_barrier); - } - }; // class parallel_do_feeder_impl - - - //! For internal use only - /** Unpacks a block of iterations. - @ingroup algorithms */ - - template - class do_group_task_forward: public task - { - static const size_t max_arg_size = 4; - - typedef parallel_do_feeder_impl feeder_type; - - feeder_type& my_feeder; - Iterator my_first; - size_t my_size; - - do_group_task_forward( Iterator first, size_t size, feeder_type& feeder ) - : my_feeder(feeder), my_first(first), my_size(size) - {} - - /*override*/ task* execute() - { - typedef do_iteration_task_iter iteration_type; - __TBB_ASSERT( my_size>0, NULL ); - task_list list; - task* t; - size_t k=0; - for(;;) { - t = new( allocate_child() ) iteration_type( my_first, my_feeder ); - ++my_first; - if( ++k==my_size ) break; - list.push_back(*t); - } - set_ref_count(int(k+1)); - spawn(list); - spawn_and_wait_for_all(*t); - return NULL; - } - - template friend class do_task_iter; - }; // class do_group_task_forward - - template - class do_group_task_input: public task - { - static const size_t max_arg_size = 4; - - typedef parallel_do_feeder_impl feeder_type; - - feeder_type& my_feeder; - size_t my_size; - aligned_space my_arg; - - do_group_task_input( feeder_type& feeder ) - : my_feeder(feeder), my_size(0) - {} - - /*override*/ task* execute() - { - typedef do_iteration_task_iter iteration_type; - __TBB_ASSERT( my_size>0, NULL ); - task_list list; - task* t; - size_t k=0; - for(;;) { - t = new( allocate_child() ) iteration_type( my_arg.begin() + k, my_feeder ); - if( ++k==my_size ) break; - list.push_back(*t); - } - set_ref_count(int(k+1)); - spawn(list); - spawn_and_wait_for_all(*t); - return NULL; - } - - ~do_group_task_input(){ - for( size_t k=0; k~Item(); - } - - template friend class do_task_iter; - }; // class do_group_task_input - - //! For internal use only. - /** Gets block of iterations and packages them into a do_group_task. - @ingroup algorithms */ - template - class do_task_iter: public task - { - typedef parallel_do_feeder_impl feeder_type; - - public: - do_task_iter( Iterator first, Iterator last , feeder_type& feeder ) : - my_first(first), my_last(last), my_feeder(feeder) - {} - - private: - Iterator my_first; - Iterator my_last; - feeder_type& my_feeder; - - /* Do not merge run(xxx) and run_xxx() methods. They are separated in order - to make sure that compilers will eliminate unused argument of type xxx - (that is will not put it on stack). The sole purpose of this argument - is overload resolution. - - An alternative could be using template functions, but explicit specialization - of member function templates is not supported for non specialized class - templates. Besides template functions would always fall back to the least - efficient variant (the one for input iterators) in case of iterators having - custom tags derived from basic ones. */ - /*override*/ task* execute() - { - typedef typename std::iterator_traits::iterator_category iterator_tag; - return run( (iterator_tag*)NULL ); - } - - /** This is the most restricted variant that operates on input iterators or - iterators with unknown tags (tags not derived from the standard ones). **/ - inline task* run( void* ) { return run_for_input_iterator(); } - - task* run_for_input_iterator() { - typedef do_group_task_input block_type; - - block_type& t = *new( allocate_additional_child_of(*my_feeder.my_barrier) ) block_type(my_feeder); - size_t k=0; - while( !(my_first == my_last) ) { - new (t.my_arg.begin() + k) Item(*my_first); - ++my_first; - if( ++k==block_type::max_arg_size ) { - if ( !(my_first == my_last) ) - recycle_to_reexecute(); - break; - } - } - if( k==0 ) { - destroy(t); - return NULL; - } else { - t.my_size = k; - return &t; - } - } - - inline task* run( std::forward_iterator_tag* ) { return run_for_forward_iterator(); } - - task* run_for_forward_iterator() { - typedef do_group_task_forward block_type; - - Iterator first = my_first; - size_t k=0; - while( !(my_first==my_last) ) { - ++my_first; - if( ++k==block_type::max_arg_size ) { - if ( !(my_first==my_last) ) - recycle_to_reexecute(); - break; - } - } - return k==0 ? NULL : new( allocate_additional_child_of(*my_feeder.my_barrier) ) block_type(first, k, my_feeder); - } - - inline task* run( std::random_access_iterator_tag* ) { return run_for_random_access_iterator(); } - - task* run_for_random_access_iterator() { - typedef do_group_task_forward block_type; - typedef do_iteration_task_iter iteration_type; - - size_t k = static_cast(my_last-my_first); - if( k > block_type::max_arg_size ) { - Iterator middle = my_first + k/2; - - empty_task& c = *new( allocate_continuation() ) empty_task; - do_task_iter& b = *new( c.allocate_child() ) do_task_iter(middle, my_last, my_feeder); - recycle_as_child_of(c); - - my_last = middle; - c.set_ref_count(2); - c.spawn(b); - return this; - }else if( k != 0 ) { - task_list list; - task* t; - size_t k1=0; - for(;;) { - t = new( allocate_child() ) iteration_type(my_first, my_feeder); - ++my_first; - if( ++k1==k ) break; - list.push_back(*t); - } - set_ref_count(int(k+1)); - spawn(list); - spawn_and_wait_for_all(*t); - } - return NULL; - } - }; // class do_task_iter - - //! For internal use only. - /** Implements parallel iteration over a range. - @ingroup algorithms */ - template - void run_parallel_do( Iterator first, Iterator last, const Body& body -#if __TBB_TASK_GROUP_CONTEXT - , task_group_context& context -#endif - ) - { - typedef do_task_iter root_iteration_task; -#if __TBB_TASK_GROUP_CONTEXT - parallel_do_feeder_impl feeder(context); -#else - parallel_do_feeder_impl feeder; -#endif - feeder.my_body = &body; - - root_iteration_task &t = *new( feeder.my_barrier->allocate_child() ) root_iteration_task(first, last, feeder); - - feeder.my_barrier->set_ref_count(2); - feeder.my_barrier->spawn_and_wait_for_all(t); - } - - //! For internal use only. - /** Detects types of Body's operator function arguments. - @ingroup algorithms **/ - template - void select_parallel_do( Iterator first, Iterator last, const Body& body, void (Body::*)(Item) const -#if __TBB_TASK_GROUP_CONTEXT - , task_group_context& context -#endif // __TBB_TASK_GROUP_CONTEXT - ) - { - run_parallel_do::type>( first, last, body -#if __TBB_TASK_GROUP_CONTEXT - , context -#endif // __TBB_TASK_GROUP_CONTEXT - ); - } - - //! For internal use only. - /** Detects types of Body's operator function arguments. - @ingroup algorithms **/ - template - void select_parallel_do( Iterator first, Iterator last, const Body& body, void (Body::*)(Item, parallel_do_feeder<_Item>&) const -#if __TBB_TASK_GROUP_CONTEXT - , task_group_context& context -#endif // __TBB_TASK_GROUP_CONTEXT - ) - { - run_parallel_do::type>( first, last, body -#if __TBB_TASK_GROUP_CONTEXT - , context -#endif // __TBB_TASK_GROUP_CONTEXT - ); - } - -} // namespace internal -//! @endcond - - -/** \page parallel_do_body_req Requirements on parallel_do body - Class \c Body implementing the concept of parallel_do body must define: - - \code - B::operator()( - cv_item_type item, - parallel_do_feeder& feeder - ) const - - OR - - B::operator()( cv_item_type& item ) const - \endcode Process item. - May be invoked concurrently for the same \c this but different \c item. - - - \code item_type( const item_type& ) \endcode - Copy a work item. - - \code ~item_type() \endcode Destroy a work item -**/ - -/** \name parallel_do - See also requirements on \ref parallel_do_body_req "parallel_do Body". **/ -//@{ -//! Parallel iteration over a range, with optional addition of more work. -/** @ingroup algorithms */ -template -void parallel_do( Iterator first, Iterator last, const Body& body ) -{ - if ( first == last ) - return; -#if __TBB_TASK_GROUP_CONTEXT - task_group_context context; -#endif // __TBB_TASK_GROUP_CONTEXT - internal::select_parallel_do( first, last, body, &Body::operator() -#if __TBB_TASK_GROUP_CONTEXT - , context -#endif // __TBB_TASK_GROUP_CONTEXT - ); -} - -#if __TBB_TASK_GROUP_CONTEXT -//! Parallel iteration over a range, with optional addition of more work and user-supplied context -/** @ingroup algorithms */ -template -void parallel_do( Iterator first, Iterator last, const Body& body, task_group_context& context ) -{ - if ( first == last ) - return; - internal::select_parallel_do( first, last, body, &Body::operator(), context ); -} -#endif // __TBB_TASK_GROUP_CONTEXT - -//@} - -} // namespace - -#endif /* __TBB_parallel_do_H */ diff --git a/tbb30_20100406oss/include/tbb/parallel_for.h b/tbb30_20100406oss/include/tbb/parallel_for.h deleted file mode 100644 index 0b3c2be6b..000000000 --- a/tbb30_20100406oss/include/tbb/parallel_for.h +++ /dev/null @@ -1,242 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_parallel_for_H -#define __TBB_parallel_for_H - -#include "task.h" -#include "partitioner.h" -#include "blocked_range.h" -#include -#include "tbb_exception.h" - -namespace tbb { - -//! @cond INTERNAL -namespace internal { - - //! Task type used in parallel_for - /** @ingroup algorithms */ - template - class start_for: public task { - Range my_range; - const Body my_body; - typename Partitioner::partition_type my_partition; - /*override*/ task* execute(); - - //! Constructor for root task. - start_for( const Range& range, const Body& body, Partitioner& partitioner ) : - my_range(range), - my_body(body), - my_partition(partitioner) - { - } - //! Splitting constructor used to generate children. - /** this becomes left child. Newly constructed object is right child. */ - start_for( start_for& parent_, split ) : - my_range(parent_.my_range,split()), - my_body(parent_.my_body), - my_partition(parent_.my_partition,split()) - { - my_partition.set_affinity(*this); - } - //! Update affinity info, if any. - /*override*/ void note_affinity( affinity_id id ) { - my_partition.note_affinity( id ); - } - public: - static void run( const Range& range, const Body& body, const Partitioner& partitioner ) { - if( !range.empty() ) { -#if !__TBB_TASK_GROUP_CONTEXT || TBB_JOIN_OUTER_TASK_GROUP - start_for& a = *new(task::allocate_root()) start_for(range,body,const_cast(partitioner)); -#else - // Bound context prevents exceptions from body to affect nesting or sibling algorithms, - // and allows users to handle exceptions safely by wrapping parallel_for in the try-block. - task_group_context context; - start_for& a = *new(task::allocate_root(context)) start_for(range,body,const_cast(partitioner)); -#endif /* __TBB_TASK_GROUP_CONTEXT && !TBB_JOIN_OUTER_TASK_GROUP */ - task::spawn_root_and_wait(a); - } - } -#if __TBB_TASK_GROUP_CONTEXT - static void run( const Range& range, const Body& body, const Partitioner& partitioner, task_group_context& context ) { - if( !range.empty() ) { - start_for& a = *new(task::allocate_root(context)) start_for(range,body,const_cast(partitioner)); - task::spawn_root_and_wait(a); - } - } -#endif /* __TBB_TASK_GROUP_CONTEXT */ - }; - - template - task* start_for::execute() { - if( !my_range.is_divisible() || my_partition.should_execute_range(*this) ) { - my_body( my_range ); - return my_partition.continue_after_execute_range(); - } else { - empty_task& c = *new( this->allocate_continuation() ) empty_task; - recycle_as_child_of(c); - c.set_ref_count(2); - bool delay = my_partition.decide_whether_to_delay(); - start_for& b = *new( c.allocate_child() ) start_for(*this,split()); - my_partition.spawn_or_delay(delay,b); - return this; - } - } -} // namespace internal -//! @endcond - - -// Requirements on Range concept are documented in blocked_range.h - -/** \page parallel_for_body_req Requirements on parallel_for body - Class \c Body implementing the concept of parallel_for body must define: - - \code Body::Body( const Body& ); \endcode Copy constructor - - \code Body::~Body(); \endcode Destructor - - \code void Body::operator()( Range& r ) const; \endcode Function call operator applying the body to range \c r. -**/ - -/** \name parallel_for - See also requirements on \ref range_req "Range" and \ref parallel_for_body_req "parallel_for Body". **/ -//@{ - -//! Parallel iteration over range with default partitioner. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body ) { - internal::start_for::run(range,body,__TBB_DEFAULT_PARTITIONER()); -} - -//! Parallel iteration over range with simple partitioner. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body, const simple_partitioner& partitioner ) { - internal::start_for::run(range,body,partitioner); -} - -//! Parallel iteration over range with auto_partitioner. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body, const auto_partitioner& partitioner ) { - internal::start_for::run(range,body,partitioner); -} - -//! Parallel iteration over range with affinity_partitioner. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body, affinity_partitioner& partitioner ) { - internal::start_for::run(range,body,partitioner); -} - -#if __TBB_TASK_GROUP_CONTEXT -//! Parallel iteration over range with simple partitioner and user-supplied context. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body, const simple_partitioner& partitioner, task_group_context& context ) { - internal::start_for::run(range, body, partitioner, context); -} - -//! Parallel iteration over range with auto_partitioner and user-supplied context. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body, const auto_partitioner& partitioner, task_group_context& context ) { - internal::start_for::run(range, body, partitioner, context); -} - -//! Parallel iteration over range with affinity_partitioner and user-supplied context. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body, affinity_partitioner& partitioner, task_group_context& context ) { - internal::start_for::run(range,body,partitioner, context); -} -#endif /* __TBB_TASK_GROUP_CONTEXT */ -//@} - -//! @cond INTERNAL -namespace internal { - //! Calls the function with values from range [begin, end) with a step provided -template -class parallel_for_body : internal::no_assign { - const Function &my_func; - const Index my_begin; - const Index my_step; -public: - parallel_for_body( const Function& _func, Index& _begin, Index& _step) - : my_func(_func), my_begin(_begin), my_step(_step) {} - - void operator()( tbb::blocked_range& r ) const { - for( Index i = r.begin(), k = my_begin + i * my_step; i < r.end(); i++, k = k + my_step) - my_func( k ); - } -}; -} // namespace internal -//! @endcond - -namespace strict_ppl { - -//@{ -//! Parallel iteration over a range of integers with a step provided -template -void parallel_for(Index first, Index last, Index step, const Function& f) { - tbb::task_group_context context; - parallel_for(first, last, step, f, context); -} -template -void parallel_for(Index first, Index last, Index step, const Function& f, tbb::task_group_context &context) { - if (step <= 0 ) - internal::throw_exception(internal::eid_nonpositive_step); // throws std::invalid_argument - else if (last > first) { - // Above "else" is necessary to prevent "potential divide by zero" warning - Index end = (last - first) / step; - if (first + end * step < last) end++; - tbb::blocked_range range(static_cast(0), end); - internal::parallel_for_body body(f, first, step); - tbb::parallel_for(range, body, tbb::auto_partitioner(), context); - } -} -//! Parallel iteration over a range of integers with a default step value -template -void parallel_for(Index first, Index last, const Function& f) { - tbb::task_group_context context; - parallel_for(first, last, static_cast(1), f, context); -} -template -void parallel_for(Index first, Index last, const Function& f, tbb::task_group_context &context) { - parallel_for(first, last, static_cast(1), f, context); -} - -//@} - -} // namespace strict_ppl - -using strict_ppl::parallel_for; - -} // namespace tbb - -#endif /* __TBB_parallel_for_H */ - diff --git a/tbb30_20100406oss/include/tbb/parallel_for_each.h b/tbb30_20100406oss/include/tbb/parallel_for_each.h deleted file mode 100644 index 6b8d862c0..000000000 --- a/tbb30_20100406oss/include/tbb/parallel_for_each.h +++ /dev/null @@ -1,77 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_parallel_for_each_H -#define __TBB_parallel_for_each_H - -#include "parallel_do.h" - -namespace tbb { - -//! @cond INTERNAL -namespace internal { - // The class calls user function in operator() - template - class parallel_for_each_body : internal::no_assign { - const Function &my_func; - public: - parallel_for_each_body(const Function &_func) : my_func(_func) {} - parallel_for_each_body(const parallel_for_each_body &_caller) : my_func(_caller.my_func) {} - - void operator() ( typename std::iterator_traits::value_type& value ) const { - my_func(value); - } - }; -} // namespace internal -//! @endcond - -/** \name parallel_for_each - **/ -//@{ -//! Calls function f for all items from [first, last) interval using user-supplied context -/** @ingroup algorithms */ -template -void parallel_for_each(InputIterator first, InputIterator last, const Function& f, task_group_context &context) { - internal::parallel_for_each_body body(f); - - tbb::parallel_do (first, last, body, context); -} - -//! Uses default context -template -void parallel_for_each(InputIterator first, InputIterator last, const Function& f) { - internal::parallel_for_each_body body(f); - - tbb::parallel_do (first, last, body); -} - -//@} - -} // namespace - -#endif /* __TBB_parallel_for_each_H */ diff --git a/tbb30_20100406oss/include/tbb/parallel_invoke.h b/tbb30_20100406oss/include/tbb/parallel_invoke.h deleted file mode 100644 index 02c3e80ef..000000000 --- a/tbb30_20100406oss/include/tbb/parallel_invoke.h +++ /dev/null @@ -1,359 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_parallel_invoke_H -#define __TBB_parallel_invoke_H - -#include "task.h" - -namespace tbb { - -//! @cond INTERNAL -namespace internal { - // Simple task object, executing user method - template - class function_invoker : public task{ - public: - function_invoker(const function& _function) : my_function(_function) {} - private: - const function &my_function; - /*override*/ - task* execute() - { - my_function(); - return NULL; - } - }; - - // The class spawns two or three child tasks - template - class spawner : public task { - private: - const function1& my_func1; - const function2& my_func2; - const function3& my_func3; - bool is_recycled; - - task* execute (){ - if(is_recycled){ - return NULL; - }else{ - __TBB_ASSERT(N==2 || N==3, "Number of arguments passed to spawner is wrong"); - set_ref_count(N); - recycle_as_safe_continuation(); - internal::function_invoker* invoker2 = new (allocate_child()) internal::function_invoker(my_func2); - __TBB_ASSERT(invoker2, "Child task allocation failed"); - spawn(*invoker2); - size_t n = N; // To prevent compiler warnings - if (n>2) { - internal::function_invoker* invoker3 = new (allocate_child()) internal::function_invoker(my_func3); - __TBB_ASSERT(invoker3, "Child task allocation failed"); - spawn(*invoker3); - } - my_func1(); - is_recycled = true; - return NULL; - } - } // execute - - public: - spawner(const function1& _func1, const function2& _func2, const function3& _func3) : my_func1(_func1), my_func2(_func2), my_func3(_func3), is_recycled(false) {} - }; - - // Creates and spawns child tasks - class parallel_invoke_helper : public empty_task { - public: - // Dummy functor class - class parallel_invoke_noop { - public: - void operator() () const {} - }; - // Creates a helper object with user-defined number of children expected - parallel_invoke_helper(int number_of_children) - { - set_ref_count(number_of_children + 1); - } - // Adds child task and spawns it - template - void add_child (const function &_func) - { - internal::function_invoker* invoker = new (allocate_child()) internal::function_invoker(_func); - __TBB_ASSERT(invoker, "Child task allocation failed"); - spawn(*invoker); - } - - // Adds a task with multiple child tasks and spawns it - // two arguments - template - void add_children (const function1& _func1, const function2& _func2) - { - // The third argument is dummy, it is ignored actually. - parallel_invoke_noop noop; - internal::spawner<2, function1, function2, parallel_invoke_noop>& sub_root = *new(allocate_child())internal::spawner<2, function1, function2, parallel_invoke_noop>(_func1, _func2, noop); - spawn(sub_root); - } - // three arguments - template - void add_children (const function1& _func1, const function2& _func2, const function3& _func3) - { - internal::spawner<3, function1, function2, function3>& sub_root = *new(allocate_child())internal::spawner<3, function1, function2, function3>(_func1, _func2, _func3); - spawn(sub_root); - } - - // Waits for all child tasks - template - void run_and_finish(const F0& f0) - { - internal::function_invoker* invoker = new (allocate_child()) internal::function_invoker(f0); - __TBB_ASSERT(invoker, "Child task allocation failed"); - spawn_and_wait_for_all(*invoker); - } - }; - // The class destroys root if exception occured as well as in normal case - class parallel_invoke_cleaner: internal::no_copy { - public: - parallel_invoke_cleaner(int number_of_children, tbb::task_group_context& context) : root(*new(task::allocate_root(context)) internal::parallel_invoke_helper(number_of_children)) - {} - ~parallel_invoke_cleaner(){ - root.destroy(root); - } - internal::parallel_invoke_helper& root; - }; -} // namespace internal -//! @endcond - -/** \name parallel_invoke - **/ -//@{ -//! Executes a list of tasks in parallel and waits for all tasks to complete. -/** @ingroup algorithms */ - -// parallel_invoke with user-defined context -// two arguments -template -void parallel_invoke(const F0& f0, const F1& f1, tbb::task_group_context& context) { - internal::parallel_invoke_cleaner cleaner(2, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_child(f1); - - root.run_and_finish(f0); -} - -// three arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, tbb::task_group_context& context) { - internal::parallel_invoke_cleaner cleaner(3, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_child(f2); - root.add_child(f1); - - root.run_and_finish(f0); -} - -// four arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, - tbb::task_group_context& context) -{ - internal::parallel_invoke_cleaner cleaner(4, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_child(f3); - root.add_child(f2); - root.add_child(f1); - - root.run_and_finish(f0); -} - -// five arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - tbb::task_group_context& context) -{ - internal::parallel_invoke_cleaner cleaner(3, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_children(f4, f3); - root.add_children(f2, f1); - - root.run_and_finish(f0); -} - -// six arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, const F5& f5, - tbb::task_group_context& context) -{ - internal::parallel_invoke_cleaner cleaner(3, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_children(f5, f4, f3); - root.add_children(f2, f1); - - root.run_and_finish(f0); -} - -// seven arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - const F5& f5, const F6& f6, - tbb::task_group_context& context) -{ - internal::parallel_invoke_cleaner cleaner(3, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_children(f6, f5, f4); - root.add_children(f3, f2, f1); - - root.run_and_finish(f0); -} - -// eight arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - const F5& f5, const F6& f6, const F7& f7, - tbb::task_group_context& context) -{ - internal::parallel_invoke_cleaner cleaner(4, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_children(f7, f6, f5); - root.add_children(f4, f3); - root.add_children(f2, f1); - - root.run_and_finish(f0); -} - -// nine arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - const F5& f5, const F6& f6, const F7& f7, const F8& f8, - tbb::task_group_context& context) -{ - internal::parallel_invoke_cleaner cleaner(4, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_children(f8, f7, f6); - root.add_children(f5, f4, f3); - root.add_children(f2, f1); - - root.run_and_finish(f0); -} - -// ten arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - const F5& f5, const F6& f6, const F7& f7, const F8& f8, const F9& f9, - tbb::task_group_context& context) -{ - internal::parallel_invoke_cleaner cleaner(4, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_children(f9, f8, f7); - root.add_children(f6, f5, f4); - root.add_children(f3, f2, f1); - - root.run_and_finish(f0); -} - -// two arguments -template -void parallel_invoke(const F0& f0, const F1& f1) { - task_group_context context; - parallel_invoke(f0, f1, context); -} -// three arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2) { - task_group_context context; - parallel_invoke(f0, f1, f2, context); -} -// four arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3) { - task_group_context context; - parallel_invoke(f0, f1, f2, f3, context); -} -// five arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4) { - task_group_context context; - parallel_invoke(f0, f1, f2, f3, f4, context); -} -// six arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, const F5& f5) { - task_group_context context; - parallel_invoke(f0, f1, f2, f3, f4, f5, context); -} -// seven arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - const F5& f5, const F6& f6) -{ - task_group_context context; - parallel_invoke(f0, f1, f2, f3, f4, f5, f6, context); -} -// eigth arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - const F5& f5, const F6& f6, const F7& f7) -{ - task_group_context context; - parallel_invoke(f0, f1, f2, f3, f4, f5, f6, f7, context); -} -// nine arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - const F5& f5, const F6& f6, const F7& f7, const F8& f8) -{ - task_group_context context; - parallel_invoke(f0, f1, f2, f3, f4, f5, f6, f7, f8, context); -} -// ten arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - const F5& f5, const F6& f6, const F7& f7, const F8& f8, const F9& f9) -{ - task_group_context context; - parallel_invoke(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, context); -} - -//@} - -} // namespace - -#endif /* __TBB_parallel_invoke_H */ diff --git a/tbb30_20100406oss/include/tbb/parallel_reduce.h b/tbb30_20100406oss/include/tbb/parallel_reduce.h deleted file mode 100644 index 670b626de..000000000 --- a/tbb30_20100406oss/include/tbb/parallel_reduce.h +++ /dev/null @@ -1,387 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_parallel_reduce_H -#define __TBB_parallel_reduce_H - -#include "task.h" -#include "aligned_space.h" -#include "partitioner.h" -#include - -namespace tbb { - -//! @cond INTERNAL -namespace internal { - - //! ITT instrumented routine that stores src into location pointed to by dst. - void __TBB_EXPORTED_FUNC itt_store_pointer_with_release_v3( void* dst, void* src ); - - //! ITT instrumented routine that loads pointer from location pointed to by src. - void* __TBB_EXPORTED_FUNC itt_load_pointer_with_acquire_v3( const void* src ); - - template inline void parallel_reduce_store_body( T*& dst, T* src ) { -#if TBB_USE_THREADING_TOOLS - itt_store_pointer_with_release_v3(&dst,src); -#else - __TBB_store_with_release(dst,src); -#endif /* TBB_USE_THREADING_TOOLS */ - } - - template inline T* parallel_reduce_load_body( T*& src ) { -#if TBB_USE_THREADING_TOOLS - return static_cast(itt_load_pointer_with_acquire_v3(&src)); -#else - return __TBB_load_with_acquire(src); -#endif /* TBB_USE_THREADING_TOOLS */ - } - - //! 0 if root, 1 if a left child, 2 if a right child. - /** Represented as a char, not enum, for compactness. */ - typedef char reduction_context; - - //! Task type use to combine the partial results of parallel_reduce. - /** @ingroup algorithms */ - template - class finish_reduce: public task { - //! Pointer to body, or NULL if the left child has not yet finished. - Body* my_body; - bool has_right_zombie; - const reduction_context my_context; - aligned_space zombie_space; - finish_reduce( char context_ ) : - my_body(NULL), - has_right_zombie(false), - my_context(context_) - { - } - task* execute() { - if( has_right_zombie ) { - // Right child was stolen. - Body* s = zombie_space.begin(); - my_body->join( *s ); - s->~Body(); - } - if( my_context==1 ) - parallel_reduce_store_body( static_cast(parent())->my_body, my_body ); - return NULL; - } - template - friend class start_reduce; - }; - - //! Task type used to split the work of parallel_reduce. - /** @ingroup algorithms */ - template - class start_reduce: public task { - typedef finish_reduce finish_type; - Body* my_body; - Range my_range; - typename Partitioner::partition_type my_partition; - reduction_context my_context; - /*override*/ task* execute(); - template - friend class finish_reduce; - - //! Constructor used for root task - start_reduce( const Range& range, Body* body, Partitioner& partitioner ) : - my_body(body), - my_range(range), - my_partition(partitioner), - my_context(0) - { - } - //! Splitting constructor used to generate children. - /** this becomes left child. Newly constructed object is right child. */ - start_reduce( start_reduce& parent_, split ) : - my_body(parent_.my_body), - my_range(parent_.my_range,split()), - my_partition(parent_.my_partition,split()), - my_context(2) - { - my_partition.set_affinity(*this); - parent_.my_context = 1; - } - //! Update affinity info, if any - /*override*/ void note_affinity( affinity_id id ) { - my_partition.note_affinity( id ); - } - -public: - static void run( const Range& range, Body& body, Partitioner& partitioner ) { - if( !range.empty() ) { -#if !__TBB_TASK_GROUP_CONTEXT || TBB_JOIN_OUTER_TASK_GROUP - task::spawn_root_and_wait( *new(task::allocate_root()) start_reduce(range,&body,partitioner) ); -#else - // Bound context prevents exceptions from body to affect nesting or sibling algorithms, - // and allows users to handle exceptions safely by wrapping parallel_for in the try-block. - task_group_context context; - task::spawn_root_and_wait( *new(task::allocate_root(context)) start_reduce(range,&body,partitioner) ); -#endif /* __TBB_TASK_GROUP_CONTEXT && !TBB_JOIN_OUTER_TASK_GROUP */ - } - } -#if __TBB_TASK_GROUP_CONTEXT - static void run( const Range& range, Body& body, Partitioner& partitioner, task_group_context& context ) { - if( !range.empty() ) - task::spawn_root_and_wait( *new(task::allocate_root(context)) start_reduce(range,&body,partitioner) ); - } -#endif /* __TBB_TASK_GROUP_CONTEXT */ - }; - - template - task* start_reduce::execute() { - if( my_context==2 ) { - finish_type* p = static_cast(parent() ); - if( !parallel_reduce_load_body(p->my_body) ) { - my_body = new( p->zombie_space.begin() ) Body(*my_body,split()); - p->has_right_zombie = true; - } - } - if( !my_range.is_divisible() || my_partition.should_execute_range(*this) ) { - (*my_body)( my_range ); - if( my_context==1 ) - parallel_reduce_store_body(static_cast(parent())->my_body, my_body ); - return my_partition.continue_after_execute_range(); - } else { - finish_type& c = *new( allocate_continuation()) finish_type(my_context); - recycle_as_child_of(c); - c.set_ref_count(2); - bool delay = my_partition.decide_whether_to_delay(); - start_reduce& b = *new( c.allocate_child() ) start_reduce(*this,split()); - my_partition.spawn_or_delay(delay,b); - return this; - } - } - - //! Auxiliary class for parallel_reduce; for internal use only. - /** The adaptor class that implements \ref parallel_reduce_body_req "parallel_reduce Body" - using given \ref parallel_reduce_lambda_req "anonymous function objects". - **/ - /** @ingroup algorithms */ - template - class lambda_reduce_body { - -//FIXME: decide if my_real_body, my_reduction, and identity_element should be copied or referenced -// (might require some performance measurements) - - const Value& identity_element; - const RealBody& my_real_body; - const Reduction& my_reduction; - Value my_value; - lambda_reduce_body& operator= ( const lambda_reduce_body& other ); - public: - lambda_reduce_body( const Value& identity, const RealBody& body, const Reduction& reduction ) - : identity_element(identity) - , my_real_body(body) - , my_reduction(reduction) - , my_value(identity) - { } - lambda_reduce_body( const lambda_reduce_body& other ) - : identity_element(other.identity_element) - , my_real_body(other.my_real_body) - , my_reduction(other.my_reduction) - , my_value(other.my_value) - { } - lambda_reduce_body( lambda_reduce_body& other, tbb::split ) - : identity_element(other.identity_element) - , my_real_body(other.my_real_body) - , my_reduction(other.my_reduction) - , my_value(other.identity_element) - { } - void operator()(Range& range) { - my_value = my_real_body(range, const_cast(my_value)); - } - void join( lambda_reduce_body& rhs ) { - my_value = my_reduction(const_cast(my_value), const_cast(rhs.my_value)); - } - Value result() const { - return my_value; - } - }; - -} // namespace internal -//! @endcond - -// Requirements on Range concept are documented in blocked_range.h - -/** \page parallel_reduce_body_req Requirements on parallel_reduce body - Class \c Body implementing the concept of parallel_reduce body must define: - - \code Body::Body( Body&, split ); \endcode Splitting constructor. - Must be able to run concurrently with operator() and method \c join - - \code Body::~Body(); \endcode Destructor - - \code void Body::operator()( Range& r ); \endcode Function call operator applying body to range \c r - and accumulating the result - - \code void Body::join( Body& b ); \endcode Join results. - The result in \c b should be merged into the result of \c this -**/ - -/** \page parallel_reduce_lambda_req Requirements on parallel_reduce anonymous function objects (lambda functions) - TO BE DOCUMENTED -**/ - -/** \name parallel_reduce - See also requirements on \ref range_req "Range" and \ref parallel_reduce_body_req "parallel_reduce Body". **/ -//@{ - -//! Parallel iteration with reduction and default partitioner. -/** @ingroup algorithms **/ -template -void parallel_reduce( const Range& range, Body& body ) { - internal::start_reduce::run( range, body, __TBB_DEFAULT_PARTITIONER() ); -} - -//! Parallel iteration with reduction and simple_partitioner -/** @ingroup algorithms **/ -template -void parallel_reduce( const Range& range, Body& body, const simple_partitioner& partitioner ) { - internal::start_reduce::run( range, body, partitioner ); -} - -//! Parallel iteration with reduction and auto_partitioner -/** @ingroup algorithms **/ -template -void parallel_reduce( const Range& range, Body& body, const auto_partitioner& partitioner ) { - internal::start_reduce::run( range, body, partitioner ); -} - -//! Parallel iteration with reduction and affinity_partitioner -/** @ingroup algorithms **/ -template -void parallel_reduce( const Range& range, Body& body, affinity_partitioner& partitioner ) { - internal::start_reduce::run( range, body, partitioner ); -} - -#if __TBB_TASK_GROUP_CONTEXT -//! Parallel iteration with reduction, simple partitioner and user-supplied context. -/** @ingroup algorithms **/ -template -void parallel_reduce( const Range& range, Body& body, const simple_partitioner& partitioner, task_group_context& context ) { - internal::start_reduce::run( range, body, partitioner, context ); -} - -//! Parallel iteration with reduction, auto_partitioner and user-supplied context -/** @ingroup algorithms **/ -template -void parallel_reduce( const Range& range, Body& body, const auto_partitioner& partitioner, task_group_context& context ) { - internal::start_reduce::run( range, body, partitioner, context ); -} - -//! Parallel iteration with reduction, affinity_partitioner and user-supplied context -/** @ingroup algorithms **/ -template -void parallel_reduce( const Range& range, Body& body, affinity_partitioner& partitioner, task_group_context& context ) { - internal::start_reduce::run( range, body, partitioner, context ); -} -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -/** parallel_reduce overloads that work with anonymous function objects - (see also \ref parallel_reduce_lambda_req "requirements on parallel_reduce anonymous function objects"). **/ - -//! Parallel iteration with reduction and default partitioner. -/** @ingroup algorithms **/ -template -Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction ) { - internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_reduce,const __TBB_DEFAULT_PARTITIONER> - ::run(range, body, __TBB_DEFAULT_PARTITIONER() ); - return body.result(); -} - -//! Parallel iteration with reduction and simple_partitioner. -/** @ingroup algorithms **/ -template -Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, - const simple_partitioner& partitioner ) { - internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_reduce,const simple_partitioner> - ::run(range, body, partitioner ); - return body.result(); -} - -//! Parallel iteration with reduction and auto_partitioner -/** @ingroup algorithms **/ -template -Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, - const auto_partitioner& partitioner ) { - internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_reduce,const auto_partitioner> - ::run( range, body, partitioner ); - return body.result(); -} - -//! Parallel iteration with reduction and affinity_partitioner -/** @ingroup algorithms **/ -template -Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, - affinity_partitioner& partitioner ) { - internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_reduce,affinity_partitioner> - ::run( range, body, partitioner ); - return body.result(); -} - -#if __TBB_TASK_GROUP_CONTEXT -//! Parallel iteration with reduction, simple partitioner and user-supplied context. -/** @ingroup algorithms **/ -template -Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, - const simple_partitioner& partitioner, task_group_context& context ) { - internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_reduce,const simple_partitioner> - ::run( range, body, partitioner, context ); - return body.result(); -} - -//! Parallel iteration with reduction, auto_partitioner and user-supplied context -/** @ingroup algorithms **/ -template -Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, - const auto_partitioner& partitioner, task_group_context& context ) { - internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_reduce,const auto_partitioner> - ::run( range, body, partitioner, context ); - return body.result(); -} - -//! Parallel iteration with reduction, affinity_partitioner and user-supplied context -/** @ingroup algorithms **/ -template -Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, - affinity_partitioner& partitioner, task_group_context& context ) { - internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_reduce,affinity_partitioner> - ::run( range, body, partitioner, context ); - return body.result(); -} -#endif /* __TBB_TASK_GROUP_CONTEXT */ -//@} - -} // namespace tbb - -#endif /* __TBB_parallel_reduce_H */ - diff --git a/tbb30_20100406oss/include/tbb/parallel_scan.h b/tbb30_20100406oss/include/tbb/parallel_scan.h deleted file mode 100644 index 3a1963f47..000000000 --- a/tbb30_20100406oss/include/tbb/parallel_scan.h +++ /dev/null @@ -1,351 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_parallel_scan_H -#define __TBB_parallel_scan_H - -#include "task.h" -#include "aligned_space.h" -#include -#include "partitioner.h" - -namespace tbb { - -//! Used to indicate that the initial scan is being performed. -/** @ingroup algorithms */ -struct pre_scan_tag { - static bool is_final_scan() {return false;} -}; - -//! Used to indicate that the final scan is being performed. -/** @ingroup algorithms */ -struct final_scan_tag { - static bool is_final_scan() {return true;} -}; - -//! @cond INTERNAL -namespace internal { - - //! Performs final scan for a leaf - /** @ingroup algorithms */ - template - class final_sum: public task { - public: - Body body; - private: - aligned_space range; - //! Where to put result of last subrange, or NULL if not last subrange. - Body* stuff_last; - public: - final_sum( Body& body_ ) : - body(body_,split()) - { - poison_pointer(stuff_last); - } - ~final_sum() { - range.begin()->~Range(); - } - void finish_construction( const Range& range_, Body* stuff_last_ ) { - new( range.begin() ) Range(range_); - stuff_last = stuff_last_; - } - private: - /*override*/ task* execute() { - body( *range.begin(), final_scan_tag() ); - if( stuff_last ) - stuff_last->assign(body); - return NULL; - } - }; - - //! Split work to be done in the scan. - /** @ingroup algorithms */ - template - class sum_node: public task { - typedef final_sum final_sum_type; - public: - final_sum_type *incoming; - final_sum_type *body; - Body *stuff_last; - private: - final_sum_type *left_sum; - sum_node *left; - sum_node *right; - bool left_is_final; - Range range; - sum_node( const Range range_, bool left_is_final_ ) : - left_sum(NULL), - left(NULL), - right(NULL), - left_is_final(left_is_final_), - range(range_) - { - // Poison fields that will be set by second pass. - poison_pointer(body); - poison_pointer(incoming); - } - task* create_child( const Range& range_, final_sum_type& f, sum_node* n, final_sum_type* incoming_, Body* stuff_last_ ) { - if( !n ) { - f.recycle_as_child_of( *this ); - f.finish_construction( range_, stuff_last_ ); - return &f; - } else { - n->body = &f; - n->incoming = incoming_; - n->stuff_last = stuff_last_; - return n; - } - } - /*override*/ task* execute() { - if( body ) { - if( incoming ) - left_sum->body.reverse_join( incoming->body ); - recycle_as_continuation(); - sum_node& c = *this; - task* b = c.create_child(Range(range,split()),*left_sum,right,left_sum,stuff_last); - task* a = left_is_final ? NULL : c.create_child(range,*body,left,incoming,NULL); - set_ref_count( (a!=NULL)+(b!=NULL) ); - body = NULL; - if( a ) spawn(*b); - else a = b; - return a; - } else { - return NULL; - } - } - template - friend class start_scan; - - template - friend class finish_scan; - }; - - //! Combine partial results - /** @ingroup algorithms */ - template - class finish_scan: public task { - typedef sum_node sum_node_type; - typedef final_sum final_sum_type; - final_sum_type** const sum; - sum_node_type*& return_slot; - public: - final_sum_type* right_zombie; - sum_node_type& result; - - /*override*/ task* execute() { - __TBB_ASSERT( result.ref_count()==(result.left!=NULL)+(result.right!=NULL), NULL ); - if( result.left ) - result.left_is_final = false; - if( right_zombie && sum ) - ((*sum)->body).reverse_join(result.left_sum->body); - __TBB_ASSERT( !return_slot, NULL ); - if( right_zombie || result.right ) { - return_slot = &result; - } else { - destroy( result ); - } - if( right_zombie && !sum && !result.right ) destroy(*right_zombie); - return NULL; - } - - finish_scan( sum_node_type*& return_slot_, final_sum_type** sum_, sum_node_type& result_ ) : - sum(sum_), - return_slot(return_slot_), - right_zombie(NULL), - result(result_) - { - __TBB_ASSERT( !return_slot, NULL ); - } - }; - - //! Initial task to split the work - /** @ingroup algorithms */ - template - class start_scan: public task { - typedef sum_node sum_node_type; - typedef final_sum final_sum_type; - final_sum_type* body; - /** Non-null if caller is requesting total. */ - final_sum_type** sum; - sum_node_type** return_slot; - /** Null if computing root. */ - sum_node_type* parent_sum; - bool is_final; - bool is_right_child; - Range range; - typename Partitioner::partition_type partition; - /*override*/ task* execute(); - public: - start_scan( sum_node_type*& return_slot_, start_scan& parent_, sum_node_type* parent_sum_ ) : - body(parent_.body), - sum(parent_.sum), - return_slot(&return_slot_), - parent_sum(parent_sum_), - is_final(parent_.is_final), - is_right_child(false), - range(parent_.range,split()), - partition(parent_.partition,split()) - { - __TBB_ASSERT( !*return_slot, NULL ); - } - - start_scan( sum_node_type*& return_slot_, const Range& range_, final_sum_type& body_, const Partitioner& partitioner_) : - body(&body_), - sum(NULL), - return_slot(&return_slot_), - parent_sum(NULL), - is_final(true), - is_right_child(false), - range(range_), - partition(partitioner_) - { - __TBB_ASSERT( !*return_slot, NULL ); - } - - static void run( const Range& range, Body& body, const Partitioner& partitioner ) { - if( !range.empty() ) { - typedef internal::start_scan start_pass1_type; - internal::sum_node* root = NULL; - typedef internal::final_sum final_sum_type; - final_sum_type* temp_body = new(task::allocate_root()) final_sum_type( body ); - start_pass1_type& pass1 = *new(task::allocate_root()) start_pass1_type( - /*return_slot=*/root, - range, - *temp_body, - partitioner ); - task::spawn_root_and_wait( pass1 ); - if( root ) { - root->body = temp_body; - root->incoming = NULL; - root->stuff_last = &body; - task::spawn_root_and_wait( *root ); - } else { - body.assign(temp_body->body); - temp_body->finish_construction( range, NULL ); - temp_body->destroy(*temp_body); - } - } - } - }; - - template - task* start_scan::execute() { - typedef internal::finish_scan finish_pass1_type; - finish_pass1_type* p = parent_sum ? static_cast( parent() ) : NULL; - // Inspecting p->result.left_sum would ordinarily be a race condition. - // But we inspect it only if we are not a stolen task, in which case we - // know that task assigning to p->result.left_sum has completed. - bool treat_as_stolen = is_right_child && (is_stolen_task() || body!=p->result.left_sum); - if( treat_as_stolen ) { - // Invocation is for right child that has been really stolen or needs to be virtually stolen - p->right_zombie = body = new( allocate_root() ) final_sum_type(body->body); - is_final = false; - } - task* next_task = NULL; - if( (is_right_child && !treat_as_stolen) || !range.is_divisible() || partition.should_execute_range(*this) ) { - if( is_final ) - (body->body)( range, final_scan_tag() ); - else if( sum ) - (body->body)( range, pre_scan_tag() ); - if( sum ) - *sum = body; - __TBB_ASSERT( !*return_slot, NULL ); - } else { - sum_node_type* result; - if( parent_sum ) - result = new(allocate_additional_child_of(*parent_sum)) sum_node_type(range,/*left_is_final=*/is_final); - else - result = new(task::allocate_root()) sum_node_type(range,/*left_is_final=*/is_final); - finish_pass1_type& c = *new( allocate_continuation()) finish_pass1_type(*return_slot,sum,*result); - // Split off right child - start_scan& b = *new( c.allocate_child() ) start_scan( /*return_slot=*/result->right, *this, result ); - b.is_right_child = true; - // Left child is recycling of *this. Must recycle this before spawning b, - // otherwise b might complete and decrement c.ref_count() to zero, which - // would cause c.execute() to run prematurely. - recycle_as_child_of(c); - c.set_ref_count(2); - c.spawn(b); - sum = &result->left_sum; - return_slot = &result->left; - is_right_child = false; - next_task = this; - parent_sum = result; - __TBB_ASSERT( !*return_slot, NULL ); - } - return next_task; - } -} // namespace internal -//! @endcond - -// Requirements on Range concept are documented in blocked_range.h - -/** \page parallel_scan_body_req Requirements on parallel_scan body - Class \c Body implementing the concept of parallel_reduce body must define: - - \code Body::Body( Body&, split ); \endcode Splitting constructor. - Split \c b so that \c this and \c b can accumulate separately - - \code Body::~Body(); \endcode Destructor - - \code void Body::operator()( const Range& r, pre_scan_tag ); \endcode - Preprocess iterations for range \c r - - \code void Body::operator()( const Range& r, final_scan_tag ); \endcode - Do final processing for iterations of range \c r - - \code void Body::reverse_join( Body& a ); \endcode - Merge preprocessing state of \c a into \c this, where \c a was - created earlier from \c b by b's splitting constructor -**/ - -/** \name parallel_scan - See also requirements on \ref range_req "Range" and \ref parallel_scan_body_req "parallel_scan Body". **/ -//@{ - -//! Parallel prefix with default partitioner -/** @ingroup algorithms **/ -template -void parallel_scan( const Range& range, Body& body ) { - internal::start_scan::run(range,body,__TBB_DEFAULT_PARTITIONER()); -} - -//! Parallel prefix with simple_partitioner -/** @ingroup algorithms **/ -template -void parallel_scan( const Range& range, Body& body, const simple_partitioner& partitioner ) { - internal::start_scan::run(range,body,partitioner); -} - -//! Parallel prefix with auto_partitioner -/** @ingroup algorithms **/ -template -void parallel_scan( const Range& range, Body& body, const auto_partitioner& partitioner ) { - internal::start_scan::run(range,body,partitioner); -} -//@} - -} // namespace tbb - -#endif /* __TBB_parallel_scan_H */ - diff --git a/tbb30_20100406oss/include/tbb/parallel_sort.h b/tbb30_20100406oss/include/tbb/parallel_sort.h deleted file mode 100644 index 6fbbe8073..000000000 --- a/tbb30_20100406oss/include/tbb/parallel_sort.h +++ /dev/null @@ -1,227 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_parallel_sort_H -#define __TBB_parallel_sort_H - -#include "parallel_for.h" -#include "blocked_range.h" -#include -#include -#include - -namespace tbb { - -//! @cond INTERNAL -namespace internal { - -//! Range used in quicksort to split elements into subranges based on a value. -/** The split operation selects a splitter and places all elements less than or equal - to the value in the first range and the remaining elements in the second range. - @ingroup algorithms */ -template -class quick_sort_range: private no_assign { - - inline size_t median_of_three(const RandomAccessIterator &array, size_t l, size_t m, size_t r) const { - return comp(array[l], array[m]) ? ( comp(array[m], array[r]) ? m : ( comp( array[l], array[r]) ? r : l ) ) - : ( comp(array[r], array[m]) ? m : ( comp( array[r], array[l] ) ? r : l ) ); - } - - inline size_t pseudo_median_of_nine( const RandomAccessIterator &array, const quick_sort_range &range ) const { - size_t offset = range.size/8u; - return median_of_three(array, - median_of_three(array, 0, offset, offset*2), - median_of_three(array, offset*3, offset*4, offset*5), - median_of_three(array, offset*6, offset*7, range.size - 1) ); - - } - -public: - - static const size_t grainsize = 500; - const Compare ∁ - RandomAccessIterator begin; - size_t size; - - quick_sort_range( RandomAccessIterator begin_, size_t size_, const Compare &comp_ ) : - comp(comp_), begin(begin_), size(size_) {} - - bool empty() const {return size==0;} - bool is_divisible() const {return size>=grainsize;} - - quick_sort_range( quick_sort_range& range, split ) : comp(range.comp) { - RandomAccessIterator array = range.begin; - RandomAccessIterator key0 = range.begin; - size_t m = pseudo_median_of_nine(array, range); - if (m) std::swap ( array[0], array[m] ); - - size_t i=0; - size_t j=range.size; - // Partition interval [i+1,j-1] with key *key0. - for(;;) { - __TBB_ASSERT( i -class quick_sort_pretest_body : internal::no_assign { - const Compare ∁ - -public: - quick_sort_pretest_body(const Compare &_comp) : comp(_comp) {} - - void operator()( const blocked_range& range ) const { - task &my_task = task::self(); - RandomAccessIterator my_end = range.end(); - - int i = 0; - for (RandomAccessIterator k = range.begin(); k != my_end; ++k, ++i) { - if ( i%64 == 0 && my_task.is_cancelled() ) break; - - // The k-1 is never out-of-range because the first chunk starts at begin+serial_cutoff+1 - if ( comp( *(k), *(k-1) ) ) { - my_task.cancel_group_execution(); - break; - } - } - } - -}; - -//! Body class used to sort elements in a range that is smaller than the grainsize. -/** @ingroup algorithms */ -template -struct quick_sort_body { - void operator()( const quick_sort_range& range ) const { - //SerialQuickSort( range.begin, range.size, range.comp ); - std::sort( range.begin, range.begin + range.size, range.comp ); - } -}; - -//! Wrapper method to initiate the sort by calling parallel_for. -/** @ingroup algorithms */ -template -void parallel_quick_sort( RandomAccessIterator begin, RandomAccessIterator end, const Compare& comp ) { - task_group_context my_context; - const int serial_cutoff = 9; - - __TBB_ASSERT( begin + serial_cutoff < end, "min_parallel_size is smaller than serial cutoff?" ); - RandomAccessIterator k; - for ( k = begin ; k != begin + serial_cutoff; ++k ) { - if ( comp( *(k+1), *k ) ) { - goto do_parallel_quick_sort; - } - } - - parallel_for( blocked_range(k+1, end), - quick_sort_pretest_body(comp), - auto_partitioner(), - my_context); - - if (my_context.is_group_execution_cancelled()) -do_parallel_quick_sort: - parallel_for( quick_sort_range(begin, end-begin, comp ), - quick_sort_body(), - auto_partitioner() ); -} - -} // namespace internal -//! @endcond - -/** \page parallel_sort_iter_req Requirements on iterators for parallel_sort - Requirements on value type \c T of \c RandomAccessIterator for \c parallel_sort: - - \code void swap( T& x, T& y ) \endcode Swaps \c x and \c y - - \code bool Compare::operator()( const T& x, const T& y ) \endcode - True if x comes before y; -**/ - -/** \name parallel_sort - See also requirements on \ref parallel_sort_iter_req "iterators for parallel_sort". **/ -//@{ - -//! Sorts the data in [begin,end) using the given comparator -/** The compare function object is used for all comparisons between elements during sorting. - The compare object must define a bool operator() function. - @ingroup algorithms **/ -template -void parallel_sort( RandomAccessIterator begin, RandomAccessIterator end, const Compare& comp) { - const int min_parallel_size = 500; - if( end > begin ) { - if (end - begin < min_parallel_size) { - std::sort(begin, end, comp); - } else { - internal::parallel_quick_sort(begin, end, comp); - } - } -} - -//! Sorts the data in [begin,end) with a default comparator \c std::less -/** @ingroup algorithms **/ -template -inline void parallel_sort( RandomAccessIterator begin, RandomAccessIterator end ) { - parallel_sort( begin, end, std::less< typename std::iterator_traits::value_type >() ); -} - -//! Sorts the data in the range \c [begin,end) with a default comparator \c std::less -/** @ingroup algorithms **/ -template -inline void parallel_sort( T * begin, T * end ) { - parallel_sort( begin, end, std::less< T >() ); -} -//@} - - -} // namespace tbb - -#endif - diff --git a/tbb30_20100406oss/include/tbb/parallel_while.h b/tbb30_20100406oss/include/tbb/parallel_while.h deleted file mode 100644 index 21c2bc185..000000000 --- a/tbb30_20100406oss/include/tbb/parallel_while.h +++ /dev/null @@ -1,194 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_parallel_while -#define __TBB_parallel_while - -#include "task.h" -#include - -namespace tbb { - -template -class parallel_while; - -//! @cond INTERNAL -namespace internal { - - template class while_task; - - //! For internal use only. - /** Executes one iteration of a while. - @ingroup algorithms */ - template - class while_iteration_task: public task { - const Body& my_body; - typename Body::argument_type my_value; - /*override*/ task* execute() { - my_body(my_value); - return NULL; - } - while_iteration_task( const typename Body::argument_type& value, const Body& body ) : - my_body(body), my_value(value) - {} - template friend class while_group_task; - friend class tbb::parallel_while; - }; - - //! For internal use only - /** Unpacks a block of iterations. - @ingroup algorithms */ - template - class while_group_task: public task { - static const size_t max_arg_size = 4; - const Body& my_body; - size_t size; - typename Body::argument_type my_arg[max_arg_size]; - while_group_task( const Body& body ) : my_body(body), size(0) {} - /*override*/ task* execute() { - typedef while_iteration_task iteration_type; - __TBB_ASSERT( size>0, NULL ); - task_list list; - task* t; - size_t k=0; - for(;;) { - t = new( allocate_child() ) iteration_type(my_arg[k],my_body); - if( ++k==size ) break; - list.push_back(*t); - } - set_ref_count(int(k+1)); - spawn(list); - spawn_and_wait_for_all(*t); - return NULL; - } - template friend class while_task; - }; - - //! For internal use only. - /** Gets block of iterations from a stream and packages them into a while_group_task. - @ingroup algorithms */ - template - class while_task: public task { - Stream& my_stream; - const Body& my_body; - empty_task& my_barrier; - /*override*/ task* execute() { - typedef while_group_task block_type; - block_type& t = *new( allocate_additional_child_of(my_barrier) ) block_type(my_body); - size_t k=0; - while( my_stream.pop_if_present(t.my_arg[k]) ) { - if( ++k==block_type::max_arg_size ) { - // There might be more iterations. - recycle_to_reexecute(); - break; - } - } - if( k==0 ) { - destroy(t); - return NULL; - } else { - t.size = k; - return &t; - } - } - while_task( Stream& stream, const Body& body, empty_task& barrier ) : - my_stream(stream), - my_body(body), - my_barrier(barrier) - {} - friend class tbb::parallel_while; - }; - -} // namespace internal -//! @endcond - -//! Parallel iteration over a stream, with optional addition of more work. -/** The Body b has the requirement: \n - "b(v)" \n - "b.argument_type" \n - where v is an argument_type - @ingroup algorithms */ -template -class parallel_while: internal::no_copy { -public: - //! Construct empty non-running parallel while. - parallel_while() : my_body(NULL), my_barrier(NULL) {} - - //! Destructor cleans up data members before returning. - ~parallel_while() { - if( my_barrier ) { - my_barrier->destroy(*my_barrier); - my_barrier = NULL; - } - } - - //! Type of items - typedef typename Body::argument_type value_type; - - //! Apply body.apply to each item in the stream. - /** A Stream s has the requirements \n - "S::value_type" \n - "s.pop_if_present(value) is convertible to bool */ - template - void run( Stream& stream, const Body& body ); - - //! Add a work item while running. - /** Should be executed only by body.apply or a thread spawned therefrom. */ - void add( const value_type& item ); - -private: - const Body* my_body; - empty_task* my_barrier; -}; - -template -template -void parallel_while::run( Stream& stream, const Body& body ) { - using namespace internal; - empty_task& barrier = *new( task::allocate_root() ) empty_task(); - my_body = &body; - my_barrier = &barrier; - my_barrier->set_ref_count(2); - while_task& w = *new( my_barrier->allocate_child() ) while_task( stream, body, barrier ); - my_barrier->spawn_and_wait_for_all(w); - my_barrier->destroy(*my_barrier); - my_barrier = NULL; - my_body = NULL; -} - -template -void parallel_while::add( const value_type& item ) { - __TBB_ASSERT(my_barrier,"attempt to add to parallel_while that is not running"); - typedef internal::while_iteration_task iteration_type; - iteration_type& i = *new( task::allocate_additional_child_of(*my_barrier) ) iteration_type(item,*my_body); - task::self().spawn( i ); -} - -} // namespace - -#endif /* __TBB_parallel_while */ diff --git a/tbb30_20100406oss/include/tbb/partitioner.h b/tbb30_20100406oss/include/tbb/partitioner.h deleted file mode 100644 index 98db3ac4d..000000000 --- a/tbb30_20100406oss/include/tbb/partitioner.h +++ /dev/null @@ -1,228 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_partitioner_H -#define __TBB_partitioner_H - -#include "task.h" - -namespace tbb { -class affinity_partitioner; - -//! @cond INTERNAL -namespace internal { -size_t __TBB_EXPORTED_FUNC get_initial_auto_partitioner_divisor(); - -//! Defines entry points into tbb run-time library; -/** The entry points are the constructor and destructor. */ -class affinity_partitioner_base_v3: no_copy { - friend class tbb::affinity_partitioner; - //! Array that remembers affinities of tree positions to affinity_id. - /** NULL if my_size==0. */ - affinity_id* my_array; - //! Number of elements in my_array. - size_t my_size; - //! Zeros the fields. - affinity_partitioner_base_v3() : my_array(NULL), my_size(0) {} - //! Deallocates my_array. - ~affinity_partitioner_base_v3() {resize(0);} - //! Resize my_array. - /** Retains values if resulting size is the same. */ - void __TBB_EXPORTED_METHOD resize( unsigned factor ); - friend class affinity_partition_type; -}; - -//! Provides default methods for partition objects without affinity. -class partition_type_base { -public: - void set_affinity( task & ) {} - void note_affinity( task::affinity_id ) {} - task* continue_after_execute_range() {return NULL;} - bool decide_whether_to_delay() {return false;} - void spawn_or_delay( bool, task& b ) { - task::spawn(b); - } -}; - -class affinity_partition_type; - -template class start_for; -template class start_reduce; -template class start_reduce_with_affinity; -template class start_scan; - -} // namespace internal -//! @endcond - -//! A simple partitioner -/** Divides the range until the range is not divisible. - @ingroup algorithms */ -class simple_partitioner { -public: - simple_partitioner() {} -private: - template friend class internal::start_for; - template friend class internal::start_reduce; - template friend class internal::start_scan; - - class partition_type: public internal::partition_type_base { - public: - bool should_execute_range(const task& ) {return false;} - partition_type( const simple_partitioner& ) {} - partition_type( const partition_type&, split ) {} - }; -}; - -//! An auto partitioner -/** The range is initial divided into several large chunks. - Chunks are further subdivided into VICTIM_CHUNKS pieces if they are stolen and divisible. - @ingroup algorithms */ -class auto_partitioner { -public: - auto_partitioner() {} - -private: - template friend class internal::start_for; - template friend class internal::start_reduce; - template friend class internal::start_scan; - - class partition_type: public internal::partition_type_base { - size_t num_chunks; - static const size_t VICTIM_CHUNKS = 4; -public: - bool should_execute_range(const task &t) { - if( num_chunks friend class internal::start_for; - template friend class internal::start_reduce; - template friend class internal::start_reduce_with_affinity; - template friend class internal::start_scan; - - typedef internal::affinity_partition_type partition_type; - friend class internal::affinity_partition_type; -}; - -//! @cond INTERNAL -namespace internal { - -class affinity_partition_type: public no_copy { - //! Must be power of two - static const unsigned factor = 16; - static const size_t VICTIM_CHUNKS = 4; - - internal::affinity_id* my_array; - task_list delay_list; - unsigned map_begin, map_end; - size_t num_chunks; -public: - affinity_partition_type( affinity_partitioner& ap ) { - __TBB_ASSERT( (factor&(factor-1))==0, "factor must be power of two" ); - ap.resize(factor); - my_array = ap.my_array; - map_begin = 0; - map_end = unsigned(ap.my_size); - num_chunks = internal::get_initial_auto_partitioner_divisor(); - } - affinity_partition_type(affinity_partition_type& p, split) : my_array(p.my_array) { - __TBB_ASSERT( p.map_end-p.map_beginfactor ) - d &= 0u-factor; - map_end = e; - map_begin = p.map_end = e-d; - } - - bool should_execute_range(const task &t) { - if( num_chunks < VICTIM_CHUNKS && t.is_stolen_task() ) - num_chunks = VICTIM_CHUNKS; - return num_chunks == 1; - } - - void set_affinity( task &t ) { - if( map_begin - -namespace tbb { - -class pipeline; -class filter; - -//! @cond INTERNAL -namespace internal { - -// The argument for PIPELINE_VERSION should be an integer between 2 and 9 -#define __TBB_PIPELINE_VERSION(x) (unsigned char)(x-2)<<1 - -typedef unsigned long Token; -typedef long tokendiff_t; -class stage_task; -class input_buffer; -class pipeline_root_task; -class pipeline_cleaner; - -} // namespace internal - -namespace interface5 { - template class filter_t; - - namespace internal { - class pipeline_proxy; - } -} - -//! @endcond - -//! A stage in a pipeline. -/** @ingroup algorithms */ -class filter: internal::no_copy { -private: - //! Value used to mark "not in pipeline" - static filter* not_in_pipeline() {return reinterpret_cast(intptr_t(-1));} - - //! The lowest bit 0 is for parallel vs. serial - static const unsigned char filter_is_serial = 0x1; - - //! 4th bit distinguishes ordered vs unordered filters. - /** The bit was not set for parallel filters in TBB 2.1 and earlier, - but is_ordered() function always treats parallel filters as out of order. */ - static const unsigned char filter_is_out_of_order = 0x1<<4; - - //! 5th bit distinguishes thread-bound and regular filters. - static const unsigned char filter_is_bound = 0x1<<5; - - //! 7th bit defines exception propagation mode expected by the application. - static const unsigned char exact_exception_propagation = -#if TBB_USE_CAPTURED_EXCEPTION - 0x0; -#else - 0x1<<7; -#endif /* TBB_USE_CAPTURED_EXCEPTION */ - - static const unsigned char current_version = __TBB_PIPELINE_VERSION(5); - static const unsigned char version_mask = 0x7<<1; // bits 1-3 are for version -public: - enum mode { - //! processes multiple items in parallel and in no particular order - parallel = current_version | filter_is_out_of_order, - //! processes items one at a time; all such filters process items in the same order - serial_in_order = current_version | filter_is_serial, - //! processes items one at a time and in no particular order - serial_out_of_order = current_version | filter_is_serial | filter_is_out_of_order, - //! @deprecated use serial_in_order instead - serial = serial_in_order - }; -protected: - filter( bool is_serial_ ) : - next_filter_in_pipeline(not_in_pipeline()), - my_input_buffer(NULL), - my_filter_mode(static_cast((is_serial_ ? serial : parallel) | exact_exception_propagation)), - prev_filter_in_pipeline(not_in_pipeline()), - my_pipeline(NULL), - next_segment(NULL) - {} - - filter( mode filter_mode ) : - next_filter_in_pipeline(not_in_pipeline()), - my_input_buffer(NULL), - my_filter_mode(static_cast(filter_mode | exact_exception_propagation)), - prev_filter_in_pipeline(not_in_pipeline()), - my_pipeline(NULL), - next_segment(NULL) - {} - -public: - //! True if filter is serial. - bool is_serial() const { - return bool( my_filter_mode & filter_is_serial ); - } - - //! True if filter must receive stream in order. - bool is_ordered() const { - return (my_filter_mode & (filter_is_out_of_order|filter_is_serial))==filter_is_serial; - } - - //! True if filter is thread-bound. - bool is_bound() const { - return ( my_filter_mode & filter_is_bound )==filter_is_bound; - } - - //! Operate on an item from the input stream, and return item for output stream. - /** Returns NULL if filter is a sink. */ - virtual void* operator()( void* item ) = 0; - - //! Destroy filter. - /** If the filter was added to a pipeline, the pipeline must be destroyed first. */ - virtual __TBB_EXPORTED_METHOD ~filter(); - -#if __TBB_TASK_GROUP_CONTEXT - //! Destroys item if pipeline was cancelled. - /** Required to prevent memory leaks. - Note it can be called concurrently even for serial filters.*/ - virtual void finalize( void* /*item*/ ) {}; -#endif - -private: - //! Pointer to next filter in the pipeline. - filter* next_filter_in_pipeline; - - //! Buffer for incoming tokens, or NULL if not required. - /** The buffer is required if the filter is serial or follows a thread-bound one. */ - internal::input_buffer* my_input_buffer; - - friend class internal::stage_task; - friend class internal::pipeline_root_task; - friend class pipeline; - friend class thread_bound_filter; - - //! Storage for filter mode and dynamically checked implementation version. - const unsigned char my_filter_mode; - - //! Pointer to previous filter in the pipeline. - filter* prev_filter_in_pipeline; - - //! Pointer to the pipeline. - pipeline* my_pipeline; - - //! Pointer to the next "segment" of filters, or NULL if not required. - /** In each segment, the first filter is not thread-bound but follows a thread-bound one. */ - filter* next_segment; -}; - -//! A stage in a pipeline served by a user thread. -/** @ingroup algorithms */ -class thread_bound_filter: public filter { -public: - enum result_type { - // item was processed - success, - // item is currently not available - item_not_available, - // there are no more items to process - end_of_stream - }; -protected: - thread_bound_filter(mode filter_mode): - filter(static_cast(filter_mode | filter::filter_is_bound | filter::exact_exception_propagation)) - {} -public: - //! If a data item is available, invoke operator() on that item. - /** This interface is non-blocking. - Returns 'success' if an item was processed. - Returns 'item_not_available' if no item can be processed now - but more may arrive in the future, or if token limit is reached. - Returns 'end_of_stream' if there are no more items to process. */ - result_type __TBB_EXPORTED_METHOD try_process_item(); - - //! Wait until a data item becomes available, and invoke operator() on that item. - /** This interface is blocking. - Returns 'success' if an item was processed. - Returns 'end_of_stream' if there are no more items to process. - Never returns 'item_not_available', as it blocks until another return condition applies. */ - result_type __TBB_EXPORTED_METHOD process_item(); - -private: - //! Internal routine for item processing - result_type internal_process_item(bool is_blocking); -}; - -//! A processing pipeling that applies filters to items. -/** @ingroup algorithms */ -class pipeline { -public: - //! Construct empty pipeline. - __TBB_EXPORTED_METHOD pipeline(); - - /** Though the current implementation declares the destructor virtual, do not rely on this - detail. The virtualness is deprecated and may disappear in future versions of TBB. */ - virtual __TBB_EXPORTED_METHOD ~pipeline(); - - //! Add filter to end of pipeline. - void __TBB_EXPORTED_METHOD add_filter( filter& filter_ ); - - //! Run the pipeline to completion. - void __TBB_EXPORTED_METHOD run( size_t max_number_of_live_tokens ); - -#if __TBB_TASK_GROUP_CONTEXT - //! Run the pipeline to completion with user-supplied context. - void __TBB_EXPORTED_METHOD run( size_t max_number_of_live_tokens, tbb::task_group_context& context ); -#endif - - //! Remove all filters from the pipeline. - void __TBB_EXPORTED_METHOD clear(); - -private: - friend class internal::stage_task; - friend class internal::pipeline_root_task; - friend class filter; - friend class thread_bound_filter; - friend class internal::pipeline_cleaner; - friend class tbb::interface5::internal::pipeline_proxy; - - //! Pointer to first filter in the pipeline. - filter* filter_list; - - //! Pointer to location where address of next filter to be added should be stored. - filter* filter_end; - - //! task who's reference count is used to determine when all stages are done. - task* end_counter; - - //! Number of idle tokens waiting for input stage. - atomic input_tokens; - - //! Global counter of tokens - atomic token_counter; - - //! False until fetch_input returns NULL. - bool end_of_input; - - //! True if the pipeline contains a thread-bound filter; false otherwise. - bool has_thread_bound_filters; - - //! Remove filter from pipeline. - void remove_filter( filter& filter_ ); - - //! Not used, but retained to satisfy old export files. - void __TBB_EXPORTED_METHOD inject_token( task& self ); - -#if __TBB_TASK_GROUP_CONTEXT - //! Does clean up if pipeline is cancelled or exception occured - void clear_filters(); -#endif -}; - -//------------------------------------------------------------------------ -// Support for lambda-friendly parallel_pipeline interface -//------------------------------------------------------------------------ - -namespace interface5 { - -namespace internal { - template class concrete_filter; -} - -class flow_control { - bool is_pipeline_stopped; - flow_control() { is_pipeline_stopped = false; } - template friend class internal::concrete_filter; -public: - void stop() { is_pipeline_stopped = true; } -}; - -//! @cond INTERNAL -namespace internal { - -template -class concrete_filter: public tbb::filter { - Body my_body; - - /*override*/ void* operator()(void* input) { - T* temp_input = (T*)input; - // Call user's operator()() here - void* output = (void*) new U(my_body(*temp_input)); - delete temp_input; - return output; - } - -public: - concrete_filter(tbb::filter::mode filter_mode, const Body& body) : filter(filter_mode), my_body(body) {} -}; - -template -class concrete_filter: public filter { - Body my_body; - - /*override*/void* operator()(void*) { - flow_control control; - U temp_output = my_body(control); - void* output = control.is_pipeline_stopped ? NULL : (void*) new U(temp_output); - return output; - } -public: - concrete_filter(tbb::filter::mode filter_mode, const Body& body) : filter(filter_mode), my_body(body) {} -}; - -template -class concrete_filter: public filter { - Body my_body; - - /*override*/ void* operator()(void* input) { - T* temp_input = (T*)input; - my_body(*temp_input); - delete temp_input; - return NULL; - } -public: - concrete_filter(tbb::filter::mode filter_mode, const Body& body) : filter(filter_mode), my_body(body) {} -}; - -template -class concrete_filter: public filter { - Body my_body; - - /** Override privately because it is always called virtually */ - /*override*/ void* operator()(void*) { - flow_control control; - my_body(control); - void* output = control.is_pipeline_stopped ? NULL : (void*)(intptr_t)-1; - return output; - } -public: - concrete_filter(filter::mode filter_mode, const Body& body) : filter(filter_mode), my_body(body) {} -}; - -//! The class that represents an object of the pipeline for parallel_pipeline(). -/** It primarily serves as RAII class that deletes heap-allocated filter instances. */ -class pipeline_proxy { - tbb::pipeline my_pipe; -public: - pipeline_proxy( const filter_t& filter_chain ); - ~pipeline_proxy() { - while( filter* f = my_pipe.filter_list ) - delete f; // filter destructor removes it from the pipeline - } - tbb::pipeline* operator->() { return &my_pipe; } -}; - -//! Abstract base class that represents a node in a parse tree underlying a filter_t. -/** These nodes are always heap-allocated and can be shared by filter_t objects. */ -class filter_node: tbb::internal::no_copy { - /** Count must be atomic because it is hidden state for user, but might be shared by threads. */ - tbb::atomic ref_count; -protected: - filter_node() { - ref_count = 0; -#ifdef __TBB_TEST_FILTER_NODE_COUNT - ++(__TBB_TEST_FILTER_NODE_COUNT); -#endif - } -public: - //! Add concrete_filter to pipeline - virtual void add_to( pipeline& ) = 0; - //! Increment reference count - void add_ref() {++ref_count;} - //! Decrement reference count and delete if it becomes zero. - void remove_ref() { - __TBB_ASSERT(ref_count>0,"ref_count underflow"); - if( --ref_count==0 ) - delete this; - } - virtual ~filter_node() { -#ifdef __TBB_TEST_FILTER_NODE_COUNT - --(__TBB_TEST_FILTER_NODE_COUNT); -#endif - } -}; - -//! Node in parse tree representing result of make_filter. -template -class filter_node_leaf: public filter_node { - const tbb::filter::mode mode; - const Body& body; - /*override*/void add_to( pipeline& p ) { - concrete_filter* f = new concrete_filter(mode,body); - p.add_filter( *f ); - } -public: - filter_node_leaf( tbb::filter::mode m, const Body& b ) : mode(m), body(b) {} -}; - -//! Node in parse tree representing join of two filters. -class filter_node_join: public filter_node { - friend class filter_node; // to suppress GCC 3.2 warnings - filter_node& left; - filter_node& right; - /*override*/~filter_node_join() { - left.remove_ref(); - right.remove_ref(); - } - /*override*/void add_to( pipeline& p ) { - left.add_to(p); - right.add_to(p); - } -public: - filter_node_join( filter_node& x, filter_node& y ) : left(x), right(y) { - left.add_ref(); - right.add_ref(); - } -}; - -} // namespace internal -//! @endcond - -template -filter_t make_filter(tbb::filter::mode mode, const Body& body) { - return new internal::filter_node_leaf(mode, body); -} - -template -filter_t operator& (const filter_t& left, const filter_t& right) { - __TBB_ASSERT(left.root,"cannot use default-constructed filter_t as left argument of '&'"); - __TBB_ASSERT(right.root,"cannot use default-constructed filter_t as right argument of '&'"); - return new internal::filter_node_join(*left.root,*right.root); -} - -//! Class representing a chain of type-safe pipeline filters -template -class filter_t { - typedef internal::filter_node filter_node; - filter_node* root; - filter_t( filter_node* root_ ) : root(root_) { - root->add_ref(); - } - friend class internal::pipeline_proxy; - template - friend filter_t make_filter(tbb::filter::mode, const Body& ); - template - friend filter_t operator& (const filter_t& , const filter_t& ); -public: - filter_t() : root(NULL) {} - filter_t( const filter_t& rhs ) : root(rhs.root) { - if( root ) root->add_ref(); - } - template - filter_t( tbb::filter::mode mode, const Body& body ) : - root( new internal::filter_node_leaf(mode, body) ) { - root->add_ref(); - } - - void operator=( const filter_t& rhs ) { - // Order of operations below carefully chosen so that reference counts remain correct - // in unlikely event that remove_ref throws exception. - filter_node* old = root; - root = rhs.root; - if( root ) root->add_ref(); - if( old ) old->remove_ref(); - } - ~filter_t() { - if( root ) root->remove_ref(); - } - void clear() { - // Like operator= with filter_t() on right side. - if( root ) { - filter_node* old = root; - root = NULL; - old->remove_ref(); - } - } -}; - -inline internal::pipeline_proxy::pipeline_proxy( const filter_t& filter_chain ) : my_pipe() { - __TBB_ASSERT( filter_chain.root, "cannot apply parallel_pipeline to default-constructed filter_t" ); - filter_chain.root->add_to(my_pipe); -} - -inline void parallel_pipeline(size_t max_number_of_live_tokens, const filter_t& filter_chain -#if __TBB_TASK_GROUP_CONTEXT - , tbb::task_group_context& context -#endif - ) { - internal::pipeline_proxy pipe(filter_chain); - // tbb::pipeline::run() is called via the proxy - pipe->run(max_number_of_live_tokens -#if __TBB_TASK_GROUP_CONTEXT - , context -#endif - ); -} - -#if __TBB_TASK_GROUP_CONTEXT -inline void parallel_pipeline(size_t max_number_of_live_tokens, const filter_t& filter_chain) { - tbb::task_group_context context; - parallel_pipeline(max_number_of_live_tokens, filter_chain, context); -} -#endif // __TBB_TASK_GROUP_CONTEXT - -} // interface5 - -using interface5::flow_control; -using interface5::filter_t; -using interface5::make_filter; -using interface5::parallel_pipeline; - -} // tbb - -#endif /* __TBB_pipeline_H */ diff --git a/tbb30_20100406oss/include/tbb/queuing_mutex.h b/tbb30_20100406oss/include/tbb/queuing_mutex.h deleted file mode 100644 index ec9832c78..000000000 --- a/tbb30_20100406oss/include/tbb/queuing_mutex.h +++ /dev/null @@ -1,131 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_queuing_mutex_H -#define __TBB_queuing_mutex_H - -#include "tbb_config.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#include "atomic.h" -#include "tbb_profiling.h" - -namespace tbb { - -//! Queuing lock with local-only spinning. -/** @ingroup synchronization */ -class queuing_mutex { -public: - //! Construct unacquired mutex. - queuing_mutex() { - q_tail = NULL; -#if TBB_USE_THREADING_TOOLS - internal_construct(); -#endif - } - - //! The scoped locking pattern - /** It helps to avoid the common problem of forgetting to release lock. - It also nicely provides the "node" for queuing locks. */ - class scoped_lock: internal::no_copy { - //! Initialize fields to mean "no lock held". - void initialize() { - mutex = NULL; -#if TBB_USE_ASSERT - internal::poison_pointer(next); -#endif /* TBB_USE_ASSERT */ - } - public: - //! Construct lock that has not acquired a mutex. - /** Equivalent to zero-initialization of *this. */ - scoped_lock() {initialize();} - - //! Acquire lock on given mutex. - scoped_lock( queuing_mutex& m ) { - initialize(); - acquire(m); - } - - //! Release lock (if lock is held). - ~scoped_lock() { - if( mutex ) release(); - } - - //! Acquire lock on given mutex. - void __TBB_EXPORTED_METHOD acquire( queuing_mutex& m ); - - //! Acquire lock on given mutex if free (i.e. non-blocking) - bool __TBB_EXPORTED_METHOD try_acquire( queuing_mutex& m ); - - //! Release lock. - void __TBB_EXPORTED_METHOD release(); - - private: - //! The pointer to the mutex owned, or NULL if not holding a mutex. - queuing_mutex* mutex; - - //! The pointer to the next competitor for a mutex - scoped_lock *next; - - //! The local spin-wait variable - /** Inverted (0 - blocked, 1 - acquired the mutex) for the sake of - zero-initialization. Defining it as an entire word instead of - a byte seems to help performance slightly. */ - uintptr_t going; - }; - - void __TBB_EXPORTED_METHOD internal_construct(); - - // Mutex traits - static const bool is_rw_mutex = false; - static const bool is_recursive_mutex = false; - static const bool is_fair_mutex = true; - - friend class scoped_lock; -private: - //! The last competitor requesting the lock - atomic q_tail; - -}; - -__TBB_DEFINE_PROFILING_SET_NAME(queuing_mutex) - -} // namespace tbb - -#endif /* __TBB_queuing_mutex_H */ diff --git a/tbb30_20100406oss/include/tbb/queuing_rw_mutex.h b/tbb30_20100406oss/include/tbb/queuing_rw_mutex.h deleted file mode 100644 index 5e3547807..000000000 --- a/tbb30_20100406oss/include/tbb/queuing_rw_mutex.h +++ /dev/null @@ -1,173 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_queuing_rw_mutex_H -#define __TBB_queuing_rw_mutex_H - -#include "tbb_config.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#include "atomic.h" -#include "tbb_profiling.h" - -namespace tbb { - -//! Reader-writer lock with local-only spinning. -/** Adapted from Krieger, Stumm, et al. pseudocode at - http://www.eecg.toronto.edu/parallel/pubs_abs.html#Krieger_etal_ICPP93 - @ingroup synchronization */ -class queuing_rw_mutex { -public: - //! Construct unacquired mutex. - queuing_rw_mutex() { - q_tail = NULL; -#if TBB_USE_THREADING_TOOLS - internal_construct(); -#endif - } - - //! Destructor asserts if the mutex is acquired, i.e. q_tail is non-NULL - ~queuing_rw_mutex() { -#if TBB_USE_ASSERT - __TBB_ASSERT( !q_tail, "destruction of an acquired mutex"); -#endif - } - - class scoped_lock; - friend class scoped_lock; - - //! The scoped locking pattern - /** It helps to avoid the common problem of forgetting to release lock. - It also nicely provides the "node" for queuing locks. */ - class scoped_lock: internal::no_copy { - //! Initialize fields - void initialize() { - mutex = NULL; -#if TBB_USE_ASSERT - state = 0xFF; // Set to invalid state - internal::poison_pointer(next); - internal::poison_pointer(prev); -#endif /* TBB_USE_ASSERT */ - } - public: - //! Construct lock that has not acquired a mutex. - /** Equivalent to zero-initialization of *this. */ - scoped_lock() {initialize();} - - //! Acquire lock on given mutex. - scoped_lock( queuing_rw_mutex& m, bool write=true ) { - initialize(); - acquire(m,write); - } - - //! Release lock (if lock is held). - ~scoped_lock() { - if( mutex ) release(); - } - - //! Acquire lock on given mutex. - void acquire( queuing_rw_mutex& m, bool write=true ); - - //! Try acquire lock on given mutex. - bool try_acquire( queuing_rw_mutex& m, bool write=true ); - - //! Release lock. - void release(); - - //! Upgrade reader to become a writer. - /** Returns true if the upgrade happened without re-acquiring the lock and false if opposite */ - bool upgrade_to_writer(); - - //! Downgrade writer to become a reader. - bool downgrade_to_reader(); - - private: - //! The pointer to the current mutex to work - queuing_rw_mutex* mutex; - - //! The pointer to the previous and next competitors for a mutex - scoped_lock * prev, * next; - - typedef unsigned char state_t; - - //! State of the request: reader, writer, active reader, other service states - atomic state; - - //! The local spin-wait variable - /** Corresponds to "spin" in the pseudocode but inverted for the sake of zero-initialization */ - unsigned char going; - - //! A tiny internal lock - unsigned char internal_lock; - - //! Acquire the internal lock - void acquire_internal_lock(); - - //! Try to acquire the internal lock - /** Returns true if lock was successfully acquired. */ - bool try_acquire_internal_lock(); - - //! Release the internal lock - void release_internal_lock(); - - //! Wait for internal lock to be released - void wait_for_release_of_internal_lock(); - - //! A helper function - void unblock_or_wait_on_internal_lock( uintptr_t ); - }; - - void __TBB_EXPORTED_METHOD internal_construct(); - - // Mutex traits - static const bool is_rw_mutex = true; - static const bool is_recursive_mutex = false; - static const bool is_fair_mutex = true; - -private: - //! The last competitor requesting the lock - atomic q_tail; - -}; - -__TBB_DEFINE_PROFILING_SET_NAME(queuing_rw_mutex) - -} // namespace tbb - -#endif /* __TBB_queuing_rw_mutex_H */ diff --git a/tbb30_20100406oss/include/tbb/reader_writer_lock.h b/tbb30_20100406oss/include/tbb/reader_writer_lock.h deleted file mode 100644 index 3a639693b..000000000 --- a/tbb30_20100406oss/include/tbb/reader_writer_lock.h +++ /dev/null @@ -1,240 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_reader_writer_lock_H -#define __TBB_reader_writer_lock_H - -#include "tbb_thread.h" -#include "tbb_allocator.h" -#include "atomic.h" - -namespace tbb { -namespace interface5 { -//! Writer-preference reader-writer lock with local-only spinning on readers. -/** Loosely adapted from Mellor-Crummey and Scott pseudocode at - http://www.cs.rochester.edu/research/synchronization/pseudocode/rw.html#s_wp - @ingroup synchronization */ - class reader_writer_lock : tbb::internal::no_copy { - public: - friend class scoped_lock; - friend class scoped_lock_read; - //! Status type for nodes associated with lock instances - /** waiting_nonblocking: the wait state for nonblocking lock - instances; for writes, these transition straight to active - states; for reads, these are unused. - - waiting: the start and spin state for all lock instances; these will - transition to active state when appropriate. Non-blocking write locks - transition from this state to waiting_nonblocking immediately. - - active: the active state means that the lock instance holds - the lock; it will transition to invalid state during node deletion - - invalid: the end state for all nodes; this is set in the - destructor so if we encounter this state, we are looking at - memory that has already been freed - - The state diagrams below describe the status transitions. - Single arrows indicate that the thread that owns the node is - responsible for the transition; double arrows indicate that - any thread could make the transition. - - State diagram for scoped_lock status: - - waiting ----------> waiting_nonblocking - | _____________/ | - V V V - active -----------------> invalid - - State diagram for scoped_lock_read status: - - waiting - | - V - active ----------------->invalid - - */ - enum status_t { waiting_nonblocking, waiting, active, invalid }; - - //! Constructs a new reader_writer_lock - reader_writer_lock() { - internal_construct(); - } - - //! Destructs a reader_writer_lock object - ~reader_writer_lock() { - internal_destroy(); - } - - //! The scoped lock pattern for write locks - /** Scoped locks help avoid the common problem of forgetting to release the lock. - This type is also serves as the node for queuing locks. */ - class scoped_lock : tbb::internal::no_copy { - public: - friend class reader_writer_lock; - - //! Construct with blocking attempt to acquire write lock on the passed-in lock - scoped_lock(reader_writer_lock& lock) { - internal_construct(lock); - } - - //! Destructor, releases the write lock - ~scoped_lock() { - internal_destroy(); - } - - void* operator new(size_t s) { - return tbb::internal::allocate_via_handler_v3(s); - } - void operator delete(void* p) { - tbb::internal::deallocate_via_handler_v3(p); - } - - private: - //! The pointer to the mutex to lock - reader_writer_lock *mutex; - //! The next queued competitor for the mutex - scoped_lock* next; - //! Status flag of the thread associated with this node - atomic status; - - //! Construct scoped_lock that is not holding lock - scoped_lock(); - - void __TBB_EXPORTED_METHOD internal_construct(reader_writer_lock&); - void __TBB_EXPORTED_METHOD internal_destroy(); - }; - - //! The scoped lock pattern for read locks - class scoped_lock_read : tbb::internal::no_copy { - public: - friend class reader_writer_lock; - - //! Construct with blocking attempt to acquire read lock on the passed-in lock - scoped_lock_read(reader_writer_lock& lock) { - internal_construct(lock); - } - - //! Destructor, releases the read lock - ~scoped_lock_read() { - internal_destroy(); - } - - void* operator new(size_t s) { - return tbb::internal::allocate_via_handler_v3(s); - } - void operator delete(void* p) { - tbb::internal::deallocate_via_handler_v3(p); - } - - private: - //! The pointer to the mutex to lock - reader_writer_lock *mutex; - //! The next queued competitor for the mutex - scoped_lock_read *next; - //! Status flag of the thread associated with this node - atomic status; - - //! Construct scoped_lock_read that is not holding lock - scoped_lock_read(); - - void __TBB_EXPORTED_METHOD internal_construct(reader_writer_lock&); - void __TBB_EXPORTED_METHOD internal_destroy(); - }; - - //! Acquires the reader_writer_lock for write. - /** If the lock is currently held in write mode by another - context, the writer will block by spinning on a local - variable. Exceptions thrown: improper_lock The context tries - to acquire a reader_writer_lock that it already has write - ownership of.*/ - void __TBB_EXPORTED_METHOD lock(); - - //! Tries to acquire the reader_writer_lock for write. - /** This function does not block. Return Value: True or false, - depending on whether the lock is acquired or not. If the lock - is already held by this acquiring context, try_lock() returns - false. */ - bool __TBB_EXPORTED_METHOD try_lock(); - - //! Acquires the reader_writer_lock for read. - /** If the lock is currently held by a writer, this reader will - block and wait until the writers are done. Exceptions thrown: - improper_lock The context tries to acquire a - reader_writer_lock that it already has write ownership of. */ - void __TBB_EXPORTED_METHOD lock_read(); - - //! Tries to acquire the reader_writer_lock for read. - /** This function does not block. Return Value: True or false, - depending on whether the lock is acquired or not. */ - bool __TBB_EXPORTED_METHOD try_lock_read(); - - //! Releases the reader_writer_lock - void __TBB_EXPORTED_METHOD unlock(); - - private: - void __TBB_EXPORTED_METHOD internal_construct(); - void __TBB_EXPORTED_METHOD internal_destroy(); - - //! Attempts to acquire write lock - /** If unavailable, spins in blocking case, returns false in non-blocking case. */ - bool start_write(scoped_lock *); - //! Sets writer_head to w and attempts to unblock - void set_next_writer(scoped_lock *w); - //! Relinquishes write lock to next waiting writer or group of readers - void end_write(scoped_lock *); - //! Checks if current thread holds write lock - bool is_current_writer(); - - //! Attempts to acquire read lock - /** If unavailable, spins in blocking case, returns false in non-blocking case. */ - void start_read(scoped_lock_read *); - //! Unblocks pending readers - void unblock_readers(); - //! Relinquishes read lock by decrementing counter; last reader wakes pending writer - void end_read(); - - //! The list of pending readers - atomic reader_head; - //! The list of pending writers - atomic writer_head; - //! The last node in the list of pending writers - atomic writer_tail; - //! Writer that owns the mutex; tbb_thread::id() otherwise. - tbb_thread::id my_current_writer; - //! Status of mutex - atomic rdr_count_and_flags; -}; - -} // namespace interface5 - -using interface5::reader_writer_lock; - -} // namespace tbb - -#endif /* __TBB_reader_writer_lock_H */ diff --git a/tbb30_20100406oss/include/tbb/recursive_mutex.h b/tbb30_20100406oss/include/tbb/recursive_mutex.h deleted file mode 100644 index ddf428368..000000000 --- a/tbb30_20100406oss/include/tbb/recursive_mutex.h +++ /dev/null @@ -1,245 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_recursive_mutex_H -#define __TBB_recursive_mutex_H - -#if _WIN32||_WIN64 - #include - #if !defined(_WIN32_WINNT) - // The following Windows API function is declared explicitly; - // otherwise any user would have to specify /D_WIN32_WINNT=0x0400 - extern "C" BOOL WINAPI TryEnterCriticalSection( LPCRITICAL_SECTION ); - #endif -#else /* if not _WIN32||_WIN64 */ - #include -#endif /* _WIN32||_WIN64 */ - -#include -#include "aligned_space.h" -#include "tbb_stddef.h" -#include "tbb_profiling.h" - -namespace tbb { -//! Mutex that allows recursive mutex acquisition. -/** Mutex that allows recursive mutex acquisition. - @ingroup synchronization */ -class recursive_mutex { -public: - //! Construct unacquired recursive_mutex. - recursive_mutex() { -#if TBB_USE_ASSERT || TBB_USE_THREADING_TOOLS - internal_construct(); -#else - #if _WIN32||_WIN64 - InitializeCriticalSection(&impl); - #else - pthread_mutexattr_t mtx_attr; - int error_code = pthread_mutexattr_init( &mtx_attr ); - if( error_code ) - tbb::internal::handle_perror(error_code,"recursive_mutex: pthread_mutexattr_init failed"); - - pthread_mutexattr_settype( &mtx_attr, PTHREAD_MUTEX_RECURSIVE ); - error_code = pthread_mutex_init( &impl, &mtx_attr ); - if( error_code ) - tbb::internal::handle_perror(error_code,"recursive_mutex: pthread_mutex_init failed"); - - pthread_mutexattr_destroy( &mtx_attr ); - #endif /* _WIN32||_WIN64*/ -#endif /* TBB_USE_ASSERT */ - }; - - ~recursive_mutex() { -#if TBB_USE_ASSERT - internal_destroy(); -#else - #if _WIN32||_WIN64 - DeleteCriticalSection(&impl); - #else - pthread_mutex_destroy(&impl); - - #endif /* _WIN32||_WIN64 */ -#endif /* TBB_USE_ASSERT */ - }; - - class scoped_lock; - friend class scoped_lock; - - //! The scoped locking pattern - /** It helps to avoid the common problem of forgetting to release lock. - It also nicely provides the "node" for queuing locks. */ - class scoped_lock: internal::no_copy { - public: - //! Construct lock that has not acquired a recursive_mutex. - scoped_lock() : my_mutex(NULL) {}; - - //! Acquire lock on given mutex. - scoped_lock( recursive_mutex& mutex ) { -#if TBB_USE_ASSERT - my_mutex = &mutex; -#endif /* TBB_USE_ASSERT */ - acquire( mutex ); - } - - //! Release lock (if lock is held). - ~scoped_lock() { - if( my_mutex ) - release(); - } - - //! Acquire lock on given mutex. - void acquire( recursive_mutex& mutex ) { -#if TBB_USE_ASSERT - internal_acquire( mutex ); -#else - my_mutex = &mutex; - mutex.lock(); -#endif /* TBB_USE_ASSERT */ - } - - //! Try acquire lock on given recursive_mutex. - bool try_acquire( recursive_mutex& mutex ) { -#if TBB_USE_ASSERT - return internal_try_acquire( mutex ); -#else - bool result = mutex.try_lock(); - if( result ) - my_mutex = &mutex; - return result; -#endif /* TBB_USE_ASSERT */ - } - - //! Release lock - void release() { -#if TBB_USE_ASSERT - internal_release(); -#else - my_mutex->unlock(); - my_mutex = NULL; -#endif /* TBB_USE_ASSERT */ - } - - private: - //! The pointer to the current recursive_mutex to work - recursive_mutex* my_mutex; - - //! All checks from acquire using mutex.state were moved here - void __TBB_EXPORTED_METHOD internal_acquire( recursive_mutex& m ); - - //! All checks from try_acquire using mutex.state were moved here - bool __TBB_EXPORTED_METHOD internal_try_acquire( recursive_mutex& m ); - - //! All checks from release using mutex.state were moved here - void __TBB_EXPORTED_METHOD internal_release(); - - friend class recursive_mutex; - }; - - // Mutex traits - static const bool is_rw_mutex = false; - static const bool is_recursive_mutex = true; - static const bool is_fair_mutex = false; - - // C++0x compatibility interface - - //! Acquire lock - void lock() { -#if TBB_USE_ASSERT - aligned_space tmp; - new(tmp.begin()) scoped_lock(*this); -#else - #if _WIN32||_WIN64 - EnterCriticalSection(&impl); - #else - pthread_mutex_lock(&impl); - #endif /* _WIN32||_WIN64 */ -#endif /* TBB_USE_ASSERT */ - } - - //! Try acquiring lock (non-blocking) - /** Return true if lock acquired; false otherwise. */ - bool try_lock() { -#if TBB_USE_ASSERT - aligned_space tmp; - return (new(tmp.begin()) scoped_lock)->internal_try_acquire(*this); -#else - #if _WIN32||_WIN64 - return TryEnterCriticalSection(&impl)!=0; - #else - return pthread_mutex_trylock(&impl)==0; - #endif /* _WIN32||_WIN64 */ -#endif /* TBB_USE_ASSERT */ - } - - //! Release lock - void unlock() { -#if TBB_USE_ASSERT - aligned_space tmp; - scoped_lock& s = *tmp.begin(); - s.my_mutex = this; - s.internal_release(); -#else - #if _WIN32||_WIN64 - LeaveCriticalSection(&impl); - #else - pthread_mutex_unlock(&impl); - #endif /* _WIN32||_WIN64 */ -#endif /* TBB_USE_ASSERT */ - } - - //! Return native_handle - #if _WIN32||_WIN64 - typedef LPCRITICAL_SECTION native_handle_type; - #else - typedef pthread_mutex_t* native_handle_type; - #endif - native_handle_type native_handle() { return (native_handle_type) &impl; } - -private: -#if _WIN32||_WIN64 - CRITICAL_SECTION impl; - enum state_t { - INITIALIZED=0x1234, - DESTROYED=0x789A, - } state; -#else - pthread_mutex_t impl; -#endif /* _WIN32||_WIN64 */ - - //! All checks from mutex constructor using mutex.state were moved here - void __TBB_EXPORTED_METHOD internal_construct(); - - //! All checks from mutex destructor using mutex.state were moved here - void __TBB_EXPORTED_METHOD internal_destroy(); -}; - -__TBB_DEFINE_PROFILING_SET_NAME(recursive_mutex) - -} // namespace tbb - -#endif /* __TBB_recursive_mutex_H */ diff --git a/tbb30_20100406oss/include/tbb/scalable_allocator.h b/tbb30_20100406oss/include/tbb/scalable_allocator.h deleted file mode 100644 index 2293803a7..000000000 --- a/tbb30_20100406oss/include/tbb/scalable_allocator.h +++ /dev/null @@ -1,205 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_scalable_allocator_H -#define __TBB_scalable_allocator_H -/** @file */ - -#include /* Need ptrdiff_t and size_t from here. */ - -#if !defined(__cplusplus) && __ICC==1100 - #pragma warning (push) - #pragma warning (disable: 991) -#endif - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -#if _MSC_VER >= 1400 -#define __TBB_EXPORTED_FUNC __cdecl -#else -#define __TBB_EXPORTED_FUNC -#endif - -/** The "malloc" analogue to allocate block of memory of size bytes. - * @ingroup memory_allocation */ -void * __TBB_EXPORTED_FUNC scalable_malloc (size_t size); - -/** The "free" analogue to discard a previously allocated piece of memory. - @ingroup memory_allocation */ -void __TBB_EXPORTED_FUNC scalable_free (void* ptr); - -/** The "realloc" analogue complementing scalable_malloc. - @ingroup memory_allocation */ -void * __TBB_EXPORTED_FUNC scalable_realloc (void* ptr, size_t size); - -/** The "calloc" analogue complementing scalable_malloc. - @ingroup memory_allocation */ -void * __TBB_EXPORTED_FUNC scalable_calloc (size_t nobj, size_t size); - -/** The "posix_memalign" analogue. - @ingroup memory_allocation */ -int __TBB_EXPORTED_FUNC scalable_posix_memalign (void** memptr, size_t alignment, size_t size); - -/** The "_aligned_malloc" analogue. - @ingroup memory_allocation */ -void * __TBB_EXPORTED_FUNC scalable_aligned_malloc (size_t size, size_t alignment); - -/** The "_aligned_realloc" analogue. - @ingroup memory_allocation */ -void * __TBB_EXPORTED_FUNC scalable_aligned_realloc (void* ptr, size_t size, size_t alignment); - -/** The "_aligned_free" analogue. - @ingroup memory_allocation */ -void __TBB_EXPORTED_FUNC scalable_aligned_free (void* ptr); - -/** The analogue of _msize/malloc_size/malloc_usable_size. - Returns the usable size of a memory block previously allocated by scalable_*, - or 0 (zero) if ptr does not point to such a block. - @ingroup memory_allocation */ -size_t __TBB_EXPORTED_FUNC scalable_msize (void* ptr); - -#ifdef __cplusplus -} /* extern "C" */ -#endif /* __cplusplus */ - -#ifdef __cplusplus - -#include /* To use new with the placement argument */ - -/* Ensure that including this header does not cause implicit linkage with TBB */ -#ifndef __TBB_NO_IMPLICIT_LINKAGE - #define __TBB_NO_IMPLICIT_LINKAGE 1 - #include "tbb_stddef.h" - #undef __TBB_NO_IMPLICIT_LINKAGE -#else - #include "tbb_stddef.h" -#endif - - -namespace tbb { - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Workaround for erroneous "unreferenced parameter" warning in method destroy. - #pragma warning (push) - #pragma warning (disable: 4100) -#endif - -//! Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5 -/** The members are ordered the same way they are in section 20.4.1 - of the ISO C++ standard. - @ingroup memory_allocation */ -template -class scalable_allocator { -public: - typedef typename internal::allocator_type::value_type value_type; - typedef value_type* pointer; - typedef const value_type* const_pointer; - typedef value_type& reference; - typedef const value_type& const_reference; - typedef size_t size_type; - typedef ptrdiff_t difference_type; - template struct rebind { - typedef scalable_allocator other; - }; - - scalable_allocator() throw() {} - scalable_allocator( const scalable_allocator& ) throw() {} - template scalable_allocator(const scalable_allocator&) throw() {} - - pointer address(reference x) const {return &x;} - const_pointer address(const_reference x) const {return &x;} - - //! Allocate space for n objects. - pointer allocate( size_type n, const void* /*hint*/ =0 ) { - return static_cast( scalable_malloc( n * sizeof(value_type) ) ); - } - - //! Free previously allocated block of memory - void deallocate( pointer p, size_type ) { - scalable_free( p ); - } - - //! Largest value for which method allocate might succeed. - size_type max_size() const throw() { - size_type absolutemax = static_cast(-1) / sizeof (value_type); - return (absolutemax > 0 ? absolutemax : 1); - } - void construct( pointer p, const value_type& value ) {::new((void*)(p)) value_type(value);} - void destroy( pointer p ) {p->~value_type();} -}; - -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning (pop) -#endif // warning 4100 is back - -//! Analogous to std::allocator, as defined in ISO C++ Standard, Section 20.4.1 -/** @ingroup memory_allocation */ -template<> -class scalable_allocator { -public: - typedef void* pointer; - typedef const void* const_pointer; - typedef void value_type; - template struct rebind { - typedef scalable_allocator other; - }; -}; - -template -inline bool operator==( const scalable_allocator&, const scalable_allocator& ) {return true;} - -template -inline bool operator!=( const scalable_allocator&, const scalable_allocator& ) {return false;} - -} // namespace tbb - -#if _MSC_VER - #if __TBB_BUILD && !defined(__TBBMALLOC_NO_IMPLICIT_LINKAGE) - #define __TBBMALLOC_NO_IMPLICIT_LINKAGE 1 - #endif - - #if !__TBBMALLOC_NO_IMPLICIT_LINKAGE - #ifdef _DEBUG - #pragma comment(lib, "tbbmalloc_debug.lib") - #else - #pragma comment(lib, "tbbmalloc.lib") - #endif - #endif - - -#endif - -#endif /* __cplusplus */ - -#if !defined(__cplusplus) && __ICC==1100 - #pragma warning (pop) -#endif // ICC 11.0 warning 991 is back - -#endif /* __TBB_scalable_allocator_H */ diff --git a/tbb30_20100406oss/include/tbb/spin_mutex.h b/tbb30_20100406oss/include/tbb/spin_mutex.h deleted file mode 100644 index a88a14a0e..000000000 --- a/tbb30_20100406oss/include/tbb/spin_mutex.h +++ /dev/null @@ -1,192 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_spin_mutex_H -#define __TBB_spin_mutex_H - -#include -#include -#include "aligned_space.h" -#include "tbb_stddef.h" -#include "tbb_machine.h" -#include "tbb_profiling.h" - -namespace tbb { - -//! A lock that occupies a single byte. -/** A spin_mutex is a spin mutex that fits in a single byte. - It should be used only for locking short critical sections - (typically <20 instructions) when fairness is not an issue. - If zero-initialized, the mutex is considered unheld. - @ingroup synchronization */ -class spin_mutex { - //! 0 if lock is released, 1 if lock is acquired. - unsigned char flag; - -public: - //! Construct unacquired lock. - /** Equivalent to zero-initialization of *this. */ - spin_mutex() : flag(0) { -#if TBB_USE_THREADING_TOOLS - internal_construct(); -#endif - } - - //! Represents acquisition of a mutex. - class scoped_lock : internal::no_copy { - private: - //! Points to currently held mutex, or NULL if no lock is held. - spin_mutex* my_mutex; - - //! Value to store into spin_mutex::flag to unlock the mutex. - uintptr_t my_unlock_value; - - //! Like acquire, but with ITT instrumentation. - void __TBB_EXPORTED_METHOD internal_acquire( spin_mutex& m ); - - //! Like try_acquire, but with ITT instrumentation. - bool __TBB_EXPORTED_METHOD internal_try_acquire( spin_mutex& m ); - - //! Like release, but with ITT instrumentation. - void __TBB_EXPORTED_METHOD internal_release(); - - friend class spin_mutex; - - public: - //! Construct without acquiring a mutex. - scoped_lock() : my_mutex(NULL), my_unlock_value(0) {} - - //! Construct and acquire lock on a mutex. - scoped_lock( spin_mutex& m ) { -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - my_mutex=NULL; - internal_acquire(m); -#else - my_unlock_value = __TBB_LockByte(m.flag); - my_mutex=&m; -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/ - } - - //! Acquire lock. - void acquire( spin_mutex& m ) { -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - internal_acquire(m); -#else - my_unlock_value = __TBB_LockByte(m.flag); - my_mutex = &m; -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/ - } - - //! Try acquiring lock (non-blocking) - /** Return true if lock acquired; false otherwise. */ - bool try_acquire( spin_mutex& m ) { -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - return internal_try_acquire(m); -#else - bool result = __TBB_TryLockByte(m.flag); - if( result ) { - my_unlock_value = 0; - my_mutex = &m; - } - return result; -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/ - } - - //! Release lock - void release() { -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - internal_release(); -#else - __TBB_store_with_release(my_mutex->flag, static_cast(my_unlock_value)); - my_mutex = NULL; -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ - } - - //! Destroy lock. If holding a lock, releases the lock first. - ~scoped_lock() { - if( my_mutex ) { -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - internal_release(); -#else - __TBB_store_with_release(my_mutex->flag, static_cast(my_unlock_value)); -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ - } - } - }; - - void __TBB_EXPORTED_METHOD internal_construct(); - - // Mutex traits - static const bool is_rw_mutex = false; - static const bool is_recursive_mutex = false; - static const bool is_fair_mutex = false; - - // ISO C++0x compatibility methods - - //! Acquire lock - void lock() { -#if TBB_USE_THREADING_TOOLS - aligned_space tmp; - new(tmp.begin()) scoped_lock(*this); -#else - __TBB_LockByte(flag); -#endif /* TBB_USE_THREADING_TOOLS*/ - } - - //! Try acquiring lock (non-blocking) - /** Return true if lock acquired; false otherwise. */ - bool try_lock() { -#if TBB_USE_THREADING_TOOLS - aligned_space tmp; - return (new(tmp.begin()) scoped_lock)->internal_try_acquire(*this); -#else - return __TBB_TryLockByte(flag); -#endif /* TBB_USE_THREADING_TOOLS*/ - } - - //! Release lock - void unlock() { -#if TBB_USE_THREADING_TOOLS - aligned_space tmp; - scoped_lock& s = *tmp.begin(); - s.my_mutex = this; - s.my_unlock_value = 0; - s.internal_release(); -#else - __TBB_store_with_release(flag, 0); -#endif /* TBB_USE_THREADING_TOOLS */ - } - - friend class scoped_lock; -}; - -__TBB_DEFINE_PROFILING_SET_NAME(spin_mutex) - -} // namespace tbb - -#endif /* __TBB_spin_mutex_H */ diff --git a/tbb30_20100406oss/include/tbb/spin_rw_mutex.h b/tbb30_20100406oss/include/tbb/spin_rw_mutex.h deleted file mode 100644 index 38b3a1fb1..000000000 --- a/tbb30_20100406oss/include/tbb/spin_rw_mutex.h +++ /dev/null @@ -1,228 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_spin_rw_mutex_H -#define __TBB_spin_rw_mutex_H - -#include "tbb_stddef.h" -#include "tbb_machine.h" -#include "tbb_profiling.h" - -namespace tbb { - -class spin_rw_mutex_v3; -typedef spin_rw_mutex_v3 spin_rw_mutex; - -//! Fast, unfair, spinning reader-writer lock with backoff and writer-preference -/** @ingroup synchronization */ -class spin_rw_mutex_v3 { - //! @cond INTERNAL - - //! Internal acquire write lock. - bool __TBB_EXPORTED_METHOD internal_acquire_writer(); - - //! Out of line code for releasing a write lock. - /** This code is has debug checking and instrumentation for Intel(R) Thread Checker and Intel(R) Thread Profiler. */ - void __TBB_EXPORTED_METHOD internal_release_writer(); - - //! Internal acquire read lock. - void __TBB_EXPORTED_METHOD internal_acquire_reader(); - - //! Internal upgrade reader to become a writer. - bool __TBB_EXPORTED_METHOD internal_upgrade(); - - //! Out of line code for downgrading a writer to a reader. - /** This code is has debug checking and instrumentation for Intel(R) Thread Checker and Intel(R) Thread Profiler. */ - void __TBB_EXPORTED_METHOD internal_downgrade(); - - //! Internal release read lock. - void __TBB_EXPORTED_METHOD internal_release_reader(); - - //! Internal try_acquire write lock. - bool __TBB_EXPORTED_METHOD internal_try_acquire_writer(); - - //! Internal try_acquire read lock. - bool __TBB_EXPORTED_METHOD internal_try_acquire_reader(); - - //! @endcond -public: - //! Construct unacquired mutex. - spin_rw_mutex_v3() : state(0) { -#if TBB_USE_THREADING_TOOLS - internal_construct(); -#endif - } - -#if TBB_USE_ASSERT - //! Destructor asserts if the mutex is acquired, i.e. state is zero. - ~spin_rw_mutex_v3() { - __TBB_ASSERT( !state, "destruction of an acquired mutex"); - }; -#endif /* TBB_USE_ASSERT */ - - //! The scoped locking pattern - /** It helps to avoid the common problem of forgetting to release lock. - It also nicely provides the "node" for queuing locks. */ - class scoped_lock : internal::no_copy { - public: - //! Construct lock that has not acquired a mutex. - /** Equivalent to zero-initialization of *this. */ - scoped_lock() : mutex(NULL), is_writer(false) {} - - //! Acquire lock on given mutex. - scoped_lock( spin_rw_mutex& m, bool write = true ) : mutex(NULL) { - acquire(m, write); - } - - //! Release lock (if lock is held). - ~scoped_lock() { - if( mutex ) release(); - } - - //! Acquire lock on given mutex. - void acquire( spin_rw_mutex& m, bool write = true ) { - __TBB_ASSERT( !mutex, "holding mutex already" ); - is_writer = write; - mutex = &m; - if( write ) mutex->internal_acquire_writer(); - else mutex->internal_acquire_reader(); - } - - //! Upgrade reader to become a writer. - /** Returns true if the upgrade happened without re-acquiring the lock and false if opposite */ - bool upgrade_to_writer() { - __TBB_ASSERT( mutex, "lock is not acquired" ); - __TBB_ASSERT( !is_writer, "not a reader" ); - is_writer = true; - return mutex->internal_upgrade(); - } - - //! Release lock. - void release() { - __TBB_ASSERT( mutex, "lock is not acquired" ); - spin_rw_mutex *m = mutex; - mutex = NULL; -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - if( is_writer ) m->internal_release_writer(); - else m->internal_release_reader(); -#else - if( is_writer ) __TBB_AtomicAND( &m->state, READERS ); - else __TBB_FetchAndAddWrelease( &m->state, -(intptr_t)ONE_READER); -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ - } - - //! Downgrade writer to become a reader. - bool downgrade_to_reader() { -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - __TBB_ASSERT( mutex, "lock is not acquired" ); - __TBB_ASSERT( is_writer, "not a writer" ); - mutex->internal_downgrade(); -#else - __TBB_FetchAndAddW( &mutex->state, ((intptr_t)ONE_READER-WRITER)); -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ - is_writer = false; - - return true; - } - - //! Try acquire lock on given mutex. - bool try_acquire( spin_rw_mutex& m, bool write = true ) { - __TBB_ASSERT( !mutex, "holding mutex already" ); - bool result; - is_writer = write; - result = write? m.internal_try_acquire_writer() - : m.internal_try_acquire_reader(); - if( result ) - mutex = &m; - return result; - } - - private: - //! The pointer to the current mutex that is held, or NULL if no mutex is held. - spin_rw_mutex* mutex; - - //! If mutex!=NULL, then is_writer is true if holding a writer lock, false if holding a reader lock. - /** Not defined if not holding a lock. */ - bool is_writer; - }; - - // Mutex traits - static const bool is_rw_mutex = true; - static const bool is_recursive_mutex = false; - static const bool is_fair_mutex = false; - - // ISO C++0x compatibility methods - - //! Acquire writer lock - void lock() {internal_acquire_writer();} - - //! Try acquiring writer lock (non-blocking) - /** Return true if lock acquired; false otherwise. */ - bool try_lock() {return internal_try_acquire_writer();} - - //! Release lock - void unlock() { -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - if( state&WRITER ) internal_release_writer(); - else internal_release_reader(); -#else - if( state&WRITER ) __TBB_AtomicAND( &state, READERS ); - else __TBB_FetchAndAddWrelease( &state, -(intptr_t)ONE_READER); -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ - } - - // Methods for reader locks that resemble ISO C++0x compatibility methods. - - //! Acquire reader lock - void lock_read() {internal_acquire_reader();} - - //! Try acquiring reader lock (non-blocking) - /** Return true if reader lock acquired; false otherwise. */ - bool try_lock_read() {return internal_try_acquire_reader();} - -private: - typedef intptr_t state_t; - static const state_t WRITER = 1; - static const state_t WRITER_PENDING = 2; - static const state_t READERS = ~(WRITER | WRITER_PENDING); - static const state_t ONE_READER = 4; - static const state_t BUSY = WRITER | READERS; - //! State of lock - /** Bit 0 = writer is holding lock - Bit 1 = request by a writer to acquire lock (hint to readers to wait) - Bit 2..N = number of readers holding lock */ - state_t state; - - void __TBB_EXPORTED_METHOD internal_construct(); -}; - -__TBB_DEFINE_PROFILING_SET_NAME(spin_rw_mutex) - -} // namespace tbb - -#endif /* __TBB_spin_rw_mutex_H */ diff --git a/tbb30_20100406oss/include/tbb/task.h b/tbb30_20100406oss/include/tbb/task.h deleted file mode 100644 index 900f099ab..000000000 --- a/tbb30_20100406oss/include/tbb/task.h +++ /dev/null @@ -1,846 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_task_H -#define __TBB_task_H - -#include "tbb_stddef.h" -#include "tbb_machine.h" - -typedef struct ___itt_caller *__itt_caller; - -namespace tbb { - -class task; -class task_list; - -#if __TBB_TASK_GROUP_CONTEXT -class task_group_context; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -// MSVC does not allow taking the address of a member that was defined -// privately in task_base and made public in class task via a using declaration. -#if _MSC_VER || (__GNUC__==3 && __GNUC_MINOR__<3) -#define __TBB_TASK_BASE_ACCESS public -#else -#define __TBB_TASK_BASE_ACCESS private -#endif - -namespace internal { - - class allocate_additional_child_of_proxy: no_assign { - //! No longer used, but retained for binary layout compatibility. Always NULL. - task* self; - task& parent; - public: - explicit allocate_additional_child_of_proxy( task& parent_ ) : self(NULL), parent(parent_) {} - task& __TBB_EXPORTED_METHOD allocate( size_t size ) const; - void __TBB_EXPORTED_METHOD free( task& ) const; - }; - -} - -namespace interface5 { - namespace internal { - //! Base class for methods that became static in TBB 3.0. - /** TBB's evolution caused the "this" argument for several methods to become obsolete. - However, for backwards binary compatibility, the new methods need distinct names, - otherwise the One Definition Rule would be broken. Hence the new methods are - defined in this private base class, and then exposed in class task via - using declarations. */ - class task_base: tbb::internal::no_copy { - __TBB_TASK_BASE_ACCESS: - friend class tbb::task; - - //! Schedule task for execution when a worker becomes available. - static void spawn( task& t ); - - //! Spawn multiple tasks and clear list. - static void spawn( task_list& list ); - - //! Like allocate_child, except that task's parent becomes "t", not this. - /** Typically used in conjunction with schedule_to_reexecute to implement while loops. - Atomically increments the reference count of t.parent() */ - static tbb::internal::allocate_additional_child_of_proxy allocate_additional_child_of( task& t ) { - return tbb::internal::allocate_additional_child_of_proxy(t); - } - - //! Destroy a task. - /** Usually, calling this method is unnecessary, because a task is - implicitly deleted after its execute() method runs. However, - sometimes a task needs to be explicitly deallocated, such as - when a root task is used as the parent in spawn_and_wait_for_all. */ - static void __TBB_EXPORTED_FUNC destroy( task& victim ); - }; - } // internal -} // interface5 - -//! @cond INTERNAL -namespace internal { - - class scheduler: no_copy { - public: - //! For internal use only - virtual void spawn( task& first, task*& next ) = 0; - - //! For internal use only - virtual void wait_for_all( task& parent, task* child ) = 0; - - //! For internal use only - virtual void spawn_root_and_wait( task& first, task*& next ) = 0; - - //! Pure virtual destructor; - // Have to have it just to shut up overzealous compilation warnings - virtual ~scheduler() = 0; -#if __TBB_ARENA_PER_MASTER - - //! For internal use only - virtual void enqueue( task& t, void* reserved ) = 0; -#endif /* __TBB_ARENA_PER_MASTER */ - }; - - //! A reference count - /** Should always be non-negative. A signed type is used so that underflow can be detected. */ - typedef intptr_t reference_count; - - //! An id as used for specifying affinity. - typedef unsigned short affinity_id; - -#if __TBB_TASK_GROUP_CONTEXT - struct context_list_node_t { - context_list_node_t *my_prev, - *my_next; - }; - - class allocate_root_with_context_proxy: no_assign { - task_group_context& my_context; - public: - allocate_root_with_context_proxy ( task_group_context& ctx ) : my_context(ctx) {} - task& __TBB_EXPORTED_METHOD allocate( size_t size ) const; - void __TBB_EXPORTED_METHOD free( task& ) const; - }; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - - class allocate_root_proxy: no_assign { - public: - static task& __TBB_EXPORTED_FUNC allocate( size_t size ); - static void __TBB_EXPORTED_FUNC free( task& ); - }; - - class allocate_continuation_proxy: no_assign { - public: - task& __TBB_EXPORTED_METHOD allocate( size_t size ) const; - void __TBB_EXPORTED_METHOD free( task& ) const; - }; - - class allocate_child_proxy: no_assign { - public: - task& __TBB_EXPORTED_METHOD allocate( size_t size ) const; - void __TBB_EXPORTED_METHOD free( task& ) const; - }; - - //! Memory prefix to a task object. - /** This class is internal to the library. - Do not reference it directly, except within the library itself. - Fields are ordered in way that preserves backwards compatibility and yields - good packing on typical 32-bit and 64-bit platforms. - @ingroup task_scheduling */ - class task_prefix { - private: - friend class tbb::task; - friend class tbb::interface5::internal::task_base; - friend class tbb::task_list; - friend class internal::scheduler; - friend class internal::allocate_root_proxy; - friend class internal::allocate_child_proxy; - friend class internal::allocate_continuation_proxy; - friend class internal::allocate_additional_child_of_proxy; - -#if __TBB_TASK_GROUP_CONTEXT - //! Shared context that is used to communicate asynchronous state changes - /** Currently it is used to broadcast cancellation requests generated both - by users and as the result of unhandled exceptions in the task::execute() - methods. */ - task_group_context *context; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - - //! The scheduler that allocated the task, or NULL if the task is big. - /** Small tasks are pooled by the scheduler that allocated the task. - If a scheduler needs to free a small task allocated by another scheduler, - it returns the task to that other scheduler. This policy avoids - memory space blowup issues for memory allocators that allocate from - thread-specific pools. */ - scheduler* origin; - - //! The scheduler that owns the task. - scheduler* owner; - - //! The task whose reference count includes me. - /** In the "blocking style" of programming, this field points to the parent task. - In the "continuation-passing style" of programming, this field points to the - continuation of the parent. */ - tbb::task* parent; - - //! Reference count used for synchronization. - /** In the "continuation-passing style" of programming, this field is - the difference of the number of allocated children minus the - number of children that have completed. - In the "blocking style" of programming, this field is one more than the difference. */ - reference_count ref_count; - - //! Obsolete. Used to be scheduling depth before TBB 2.2 - /** Retained only for the sake of backward binary compatibility. **/ - int depth; - - //! A task::state_type, stored as a byte for compactness. - /** This state is exposed to users via method task::state(). */ - unsigned char state; - - //! Miscellaneous state that is not directly visible to users, stored as a byte for compactness. - /** 0x0 -> version 1.0 task - 0x1 -> version >=2.1 task - 0x20 -> task_proxy - 0x40 -> task has live ref_count - 0x80 -> a stolen task */ - unsigned char extra_state; - - affinity_id affinity; - - //! "next" field for list of task - tbb::task* next; - - //! The task corresponding to this task_prefix. - tbb::task& task() {return *reinterpret_cast(this+1);} - }; - -} // namespace internal -//! @endcond - -#if __TBB_TASK_GROUP_CONTEXT - -#if TBB_USE_CAPTURED_EXCEPTION - class tbb_exception; -#else - namespace internal { - class tbb_exception_ptr; - } -#endif /* !TBB_USE_CAPTURED_EXCEPTION */ - -//! Used to form groups of tasks -/** @ingroup task_scheduling - The context services explicit cancellation requests from user code, and unhandled - exceptions intercepted during tasks execution. Intercepting an exception results - in generating internal cancellation requests (which is processed in exactly the - same way as external ones). - - The context is associated with one or more root tasks and defines the cancellation - group that includes all the descendants of the corresponding root task(s). Association - is established when a context object is passed as an argument to the task::allocate_root() - method. See task_group_context::task_group_context for more details. - - The context can be bound to another one, and other contexts can be bound to it, - forming a tree-like structure: parent -> this -> children. Arrows here designate - cancellation propagation direction. If a task in a cancellation group is canceled - all the other tasks in this group and groups bound to it (as children) get canceled too. - - IMPLEMENTATION NOTE: - When adding new members to task_group_context or changing types of existing ones, - update the size of both padding buffers (_leading_padding and _trailing_padding) - appropriately. See also VERSIONING NOTE at the constructor definition below. **/ -class task_group_context : internal::no_copy { -private: -#if TBB_USE_CAPTURED_EXCEPTION - typedef tbb_exception exception_container_type; -#else - typedef internal::tbb_exception_ptr exception_container_type; -#endif - - enum version_traits_word_layout { - traits_offset = 16, - version_mask = 0xFFFF, - traits_mask = 0xFFFFul << traits_offset - }; - -public: - enum kind_type { - isolated, - bound - }; - - enum traits_type { - exact_exception = 0x0001ul << traits_offset, - concurrent_wait = 0x0004ul << traits_offset, -#if TBB_USE_CAPTURED_EXCEPTION - default_traits = 0 -#else - default_traits = exact_exception -#endif /* !TBB_USE_CAPTURED_EXCEPTION */ - }; - -private: - union { - //! Flavor of this context: bound or isolated. - kind_type my_kind; - uintptr_t _my_kind_aligner; - }; - - //! Pointer to the context of the parent cancellation group. NULL for isolated contexts. - task_group_context *my_parent; - - //! Used to form the thread specific list of contexts without additional memory allocation. - /** A context is included into the list of the current thread when its binding to - its parent happens. Any context can be present in the list of one thread only. **/ - internal::context_list_node_t my_node; - - //! Used to set and maintain stack stitching point for Intel Performance Tools. - __itt_caller itt_caller; - - //! Leading padding protecting accesses to frequently used members from false sharing. - /** Read accesses to the field my_cancellation_requested are on the hot path inside - the scheduler. This padding ensures that this field never shares the same cache - line with a local variable that is frequently written to. **/ - char _leading_padding[internal::NFS_MaxLineSize - - 2 * sizeof(uintptr_t)- sizeof(void*) - sizeof(internal::context_list_node_t) - - sizeof(__itt_caller)]; - - //! Specifies whether cancellation was request for this task group. - uintptr_t my_cancellation_requested; - - //! Version for run-time checks and behavioral traits of the context. - /** Version occupies low 16 bits, and traits (zero or more ORed enumerators - from the traits_type enumerations) take the next 16 bits. - Original (zeroth) version of the context did not support any traits. **/ - uintptr_t my_version_and_traits; - - //! Pointer to the container storing exception being propagated across this task group. - exception_container_type *my_exception; - - //! Scheduler that registered this context in its thread specific list. - /** This field is not terribly necessary, but it allows to get a small performance - benefit by getting us rid of using thread local storage. We do not care - about extra memory it takes since this data structure is excessively padded anyway. **/ - void *my_owner; - - //! Trailing padding protecting accesses to frequently used members from false sharing - /** \sa _leading_padding **/ - char _trailing_padding[internal::NFS_MaxLineSize - sizeof(intptr_t) - 2 * sizeof(void*)]; - -public: - //! Default & binding constructor. - /** By default a bound context is created. That is this context will be bound - (as child) to the context of the task calling task::allocate_root(this_context) - method. Cancellation requests passed to the parent context are propagated - to all the contexts bound to it. - - If task_group_context::isolated is used as the argument, then the tasks associated - with this context will never be affected by events in any other context. - - Creating isolated contexts involve much less overhead, but they have limited - utility. Normally when an exception occurs in an algorithm that has nested - ones running, it is desirably to have all the nested algorithms canceled - as well. Such a behavior requires nested algorithms to use bound contexts. - - There is one good place where using isolated algorithms is beneficial. It is - a master thread. That is if a particular algorithm is invoked directly from - the master thread (not from a TBB task), supplying it with explicitly - created isolated context will result in a faster algorithm startup. - - VERSIONING NOTE: - Implementation(s) of task_group_context constructor(s) cannot be made - entirely out-of-line because the run-time version must be set by the user - code. This will become critically important for binary compatibility, if - we ever have to change the size of the context object. - - Boosting the runtime version will also be necessary whenever new fields - are introduced in the currently unused padding areas or the meaning of - the existing fields is changed or extended. **/ - task_group_context ( kind_type relation_with_parent = bound, - uintptr_t traits = default_traits ) - : my_kind(relation_with_parent) - , my_version_and_traits(1 | traits) - { - init(); - } - - __TBB_EXPORTED_METHOD ~task_group_context (); - - //! Forcefully reinitializes the context after the task tree it was associated with is completed. - /** Because the method assumes that all the tasks that used to be associated with - this context have already finished, calling it while the context is still - in use somewhere in the task hierarchy leads to undefined behavior. - - IMPORTANT: This method is not thread safe! - - The method does not change the context's parent if it is set. **/ - void __TBB_EXPORTED_METHOD reset (); - - //! Initiates cancellation of all tasks in this cancellation group and its subordinate groups. - /** \return false if cancellation has already been requested, true otherwise. - - Note that canceling never fails. When false is returned, it just means that - another thread (or this one) has already sent cancellation request to this - context or to one of its ancestors (if this context is bound). It is guaranteed - that when this method is concurrently called on the same not yet cancelled - context, true will be returned by one and only one invocation. **/ - bool __TBB_EXPORTED_METHOD cancel_group_execution (); - - //! Returns true if the context received cancellation request. - bool __TBB_EXPORTED_METHOD is_group_execution_cancelled () const; - - //! Records the pending exception, and cancels the task group. - /** May be called only from inside a catch-block. If the context is already - canceled, does nothing. - The method brings the task group associated with this context exactly into - the state it would be in, if one of its tasks threw the currently pending - exception during its execution. In other words, it emulates the actions - of the scheduler's dispatch loop exception handler. **/ - void __TBB_EXPORTED_METHOD register_pending_exception (); - -protected: - //! Out-of-line part of the constructor. - /** Singled out to ensure backward binary compatibility of the future versions. **/ - void __TBB_EXPORTED_METHOD init (); - -private: - friend class task; - friend class internal::allocate_root_with_context_proxy; - - static const kind_type binding_required = bound; - static const kind_type binding_completed = kind_type(bound+1); - static const kind_type detached = kind_type(binding_completed+1); - static const kind_type dying = kind_type(detached+1); - - //! Checks if any of the ancestors has a cancellation request outstanding, - //! and propagates it back to descendants. - void propagate_cancellation_from_ancestors (); - - //! For debugging purposes only. - bool is_alive () { -#if TBB_USE_DEBUG - return my_version_and_traits != 0xDeadBeef; -#else - return true; -#endif /* TBB_USE_DEBUG */ - } -}; // class task_group_context - -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -//! Base class for user-defined tasks. -/** @ingroup task_scheduling */ -class task: __TBB_TASK_BASE_ACCESS interface5::internal::task_base { - - //! Set reference count - void __TBB_EXPORTED_METHOD internal_set_ref_count( int count ); - - //! Decrement reference count and return its new value. - internal::reference_count __TBB_EXPORTED_METHOD internal_decrement_ref_count(); - -protected: - //! Default constructor. - task() {prefix().extra_state=1;} - -public: - //! Destructor. - virtual ~task() {} - - //! Should be overridden by derived classes. - virtual task* execute() = 0; - - //! Enumeration of task states that the scheduler considers. - enum state_type { - //! task is running, and will be destroyed after method execute() completes. - executing, - //! task to be rescheduled. - reexecute, - //! task is in ready pool, or is going to be put there, or was just taken off. - ready, - //! task object is freshly allocated or recycled. - allocated, - //! task object is on free list, or is going to be put there, or was just taken off. - freed, - //! task to be recycled as continuation - recycle - }; - - //------------------------------------------------------------------------ - // Allocating tasks - //------------------------------------------------------------------------ - - //! Returns proxy for overloaded new that allocates a root task. - static internal::allocate_root_proxy allocate_root() { - return internal::allocate_root_proxy(); - } - -#if __TBB_TASK_GROUP_CONTEXT - //! Returns proxy for overloaded new that allocates a root task associated with user supplied context. - static internal::allocate_root_with_context_proxy allocate_root( task_group_context& ctx ) { - return internal::allocate_root_with_context_proxy(ctx); - } -#endif /* __TBB_TASK_GROUP_CONTEXT */ - - //! Returns proxy for overloaded new that allocates a continuation task of *this. - /** The continuation's parent becomes the parent of *this. */ - internal::allocate_continuation_proxy& allocate_continuation() { - return *reinterpret_cast(this); - } - - //! Returns proxy for overloaded new that allocates a child task of *this. - internal::allocate_child_proxy& allocate_child() { - return *reinterpret_cast(this); - } - - //! Define recommended static form via import from base class. - using task_base::allocate_additional_child_of; - -#if __TBB_DEPRECATED_TASK_INTERFACE - //! Destroy a task. - /** Usually, calling this method is unnecessary, because a task is - implicitly deleted after its execute() method runs. However, - sometimes a task needs to be explicitly deallocated, such as - when a root task is used as the parent in spawn_and_wait_for_all. */ - void __TBB_EXPORTED_METHOD destroy( task& t ); -#else /* !__TBB_DEPRECATED_TASK_INTERFACE */ - //! Define recommended static form via import from base class. - using task_base::destroy; -#endif /* !__TBB_DEPRECATED_TASK_INTERFACE */ - - //------------------------------------------------------------------------ - // Recycling of tasks - //------------------------------------------------------------------------ - - //! Change this to be a continuation of its former self. - /** The caller must guarantee that the task's refcount does not become zero until - after the method execute() returns. Typically, this is done by having - method execute() return a pointer to a child of the task. If the guarantee - cannot be made, use method recycle_as_safe_continuation instead. - - Because of the hazard, this method may be deprecated in the future. */ - void recycle_as_continuation() { - __TBB_ASSERT( prefix().state==executing, "execute not running?" ); - prefix().state = allocated; - } - - //! Recommended to use, safe variant of recycle_as_continuation - /** For safety, it requires additional increment of ref_count. - With no decendants and ref_count of 1, it has the semantics of recycle_to_reexecute. */ - void recycle_as_safe_continuation() { - __TBB_ASSERT( prefix().state==executing, "execute not running?" ); - prefix().state = recycle; - } - - //! Change this to be a child of new_parent. - void recycle_as_child_of( task& new_parent ) { - internal::task_prefix& p = prefix(); - __TBB_ASSERT( prefix().state==executing||prefix().state==allocated, "execute not running, or already recycled" ); - __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled as a child" ); - __TBB_ASSERT( p.parent==NULL, "parent must be null" ); - __TBB_ASSERT( new_parent.prefix().state<=recycle, "corrupt parent's state" ); - __TBB_ASSERT( new_parent.prefix().state!=freed, "parent already freed" ); - p.state = allocated; - p.parent = &new_parent; -#if __TBB_TASK_GROUP_CONTEXT - p.context = new_parent.prefix().context; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - } - - //! Schedule this for reexecution after current execute() returns. - /** Made obsolete by recycle_as_safe_continuation; may become deprecated. */ - void recycle_to_reexecute() { - __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" ); - __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled for reexecution" ); - prefix().state = reexecute; - } - - // All depth-related methods are obsolete, and are retained for the sake - // of backward source compatibility only - intptr_t depth() const {return 0;} - void set_depth( intptr_t ) {} - void add_to_depth( int ) {} - - - //------------------------------------------------------------------------ - // Spawning and blocking - //------------------------------------------------------------------------ - - //! Set reference count - void set_ref_count( int count ) { -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - internal_set_ref_count(count); -#else - prefix().ref_count = count; -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ - } - - //! Atomically increment reference count. - /** Has acquire semantics */ - void increment_ref_count() { - __TBB_FetchAndIncrementWacquire( &prefix().ref_count ); - } - - //! Atomically decrement reference count. - /** Has release semantics. */ - int decrement_ref_count() { -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - return int(internal_decrement_ref_count()); -#else - return int(__TBB_FetchAndDecrementWrelease( &prefix().ref_count ))-1; -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ - } - - //! Define recommended static forms via import from base class. - using task_base::spawn; - - //! Similar to spawn followed by wait_for_all, but more efficient. - void spawn_and_wait_for_all( task& child ) { - prefix().owner->wait_for_all( *this, &child ); - } - - //! Similar to spawn followed by wait_for_all, but more efficient. - void __TBB_EXPORTED_METHOD spawn_and_wait_for_all( task_list& list ); - - //! Spawn task allocated by allocate_root, wait for it to complete, and deallocate it. - static void spawn_root_and_wait( task& root ) { - root.prefix().owner->spawn_root_and_wait( root, root.prefix().next ); - } - - //! Spawn root tasks on list and wait for all of them to finish. - /** If there are more tasks than worker threads, the tasks are spawned in - order of front to back. */ - static void spawn_root_and_wait( task_list& root_list ); - - //! Wait for reference count to become one, and set reference count to zero. - /** Works on tasks while waiting. */ - void wait_for_all() { - prefix().owner->wait_for_all( *this, NULL ); - } - -#if __TBB_ARENA_PER_MASTER - //! Enqueue task for starvation-resistant execution. - static void enqueue( task& t ) { - t.prefix().owner->enqueue( t, NULL ); - } - -#endif /* __TBB_ARENA_PER_MASTER */ - //! The innermost task being executed or destroyed by the current thread at the moment. - static task& __TBB_EXPORTED_FUNC self(); - - //! task on whose behalf this task is working, or NULL if this is a root. - task* parent() const {return prefix().parent;} - -#if __TBB_TASK_GROUP_CONTEXT - //! Shared context that is used to communicate asynchronous state changes - task_group_context* context() {return prefix().context;} -#endif /* __TBB_TASK_GROUP_CONTEXT */ - - //! True if task was stolen from the task pool of another thread. - bool is_stolen_task() const { - return (prefix().extra_state & 0x80)!=0; - } - - //------------------------------------------------------------------------ - // Debugging - //------------------------------------------------------------------------ - - //! Current execution state - state_type state() const {return state_type(prefix().state);} - - //! The internal reference count. - int ref_count() const { -#if TBB_USE_ASSERT - internal::reference_count ref_count_ = prefix().ref_count; - __TBB_ASSERT( ref_count_==int(ref_count_), "integer overflow error"); -#endif - return int(prefix().ref_count); - } - - //! Obsolete, and only retained for the sake of backward compatibility. Always returns true. - bool __TBB_EXPORTED_METHOD is_owned_by_current_thread() const; - - //------------------------------------------------------------------------ - // Affinity - //------------------------------------------------------------------------ - - //! An id as used for specifying affinity. - /** Guaranteed to be integral type. Value of 0 means no affinity. */ - typedef internal::affinity_id affinity_id; - - //! Set affinity for this task. - void set_affinity( affinity_id id ) {prefix().affinity = id;} - - //! Current affinity of this task - affinity_id affinity() const {return prefix().affinity;} - - //! Invoked by scheduler to notify task that it ran on unexpected thread. - /** Invoked before method execute() runs, if task is stolen, or task has - affinity but will be executed on another thread. - - The default action does nothing. */ - virtual void __TBB_EXPORTED_METHOD note_affinity( affinity_id id ); - -#if __TBB_TASK_GROUP_CONTEXT - //! Initiates cancellation of all tasks in this cancellation group and its subordinate groups. - /** \return false if cancellation has already been requested, true otherwise. **/ - bool cancel_group_execution () { return prefix().context->cancel_group_execution(); } - - //! Returns true if the context received cancellation request. - bool is_cancelled () const { return prefix().context->is_group_execution_cancelled(); } -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -private: - friend class interface5::internal::task_base; - friend class task_list; - friend class internal::scheduler; - friend class internal::allocate_root_proxy; -#if __TBB_TASK_GROUP_CONTEXT - friend class internal::allocate_root_with_context_proxy; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - friend class internal::allocate_continuation_proxy; - friend class internal::allocate_child_proxy; - friend class internal::allocate_additional_child_of_proxy; - - //! Get reference to corresponding task_prefix. - /** Version tag prevents loader on Linux from using the wrong symbol in debug builds. **/ - internal::task_prefix& prefix( internal::version_tag* = NULL ) const { - return reinterpret_cast(const_cast(this))[-1]; - } -}; // class task - -//! task that does nothing. Useful for synchronization. -/** @ingroup task_scheduling */ -class empty_task: public task { - /*override*/ task* execute() { - return NULL; - } -}; - -//! A list of children. -/** Used for method task::spawn_children - @ingroup task_scheduling */ -class task_list: internal::no_copy { -private: - task* first; - task** next_ptr; - friend class task; - friend class interface5::internal::task_base; -public: - //! Construct empty list - task_list() : first(NULL), next_ptr(&first) {} - - //! Destroys the list, but does not destroy the task objects. - ~task_list() {} - - //! True if list if empty; false otherwise. - bool empty() const {return !first;} - - //! Push task onto back of list. - void push_back( task& task ) { - task.prefix().next = NULL; - *next_ptr = &task; - next_ptr = &task.prefix().next; - } - - //! Pop the front task from the list. - task& pop_front() { - __TBB_ASSERT( !empty(), "attempt to pop item from empty task_list" ); - task* result = first; - first = result->prefix().next; - if( !first ) next_ptr = &first; - return *result; - } - - //! Clear the list - void clear() { - first=NULL; - next_ptr=&first; - } -}; - -inline void interface5::internal::task_base::spawn( task& t ) { - t.prefix().owner->spawn( t, t.prefix().next ); -} - -inline void interface5::internal::task_base::spawn( task_list& list ) { - if( task* t = list.first ) { - t->prefix().owner->spawn( *t, *list.next_ptr ); - list.clear(); - } -} - -inline void task::spawn_root_and_wait( task_list& root_list ) { - if( task* t = root_list.first ) { - t->prefix().owner->spawn_root_and_wait( *t, *root_list.next_ptr ); - root_list.clear(); - } -} - -} // namespace tbb - -inline void *operator new( size_t bytes, const tbb::internal::allocate_root_proxy& ) { - return &tbb::internal::allocate_root_proxy::allocate(bytes); -} - -inline void operator delete( void* task, const tbb::internal::allocate_root_proxy& ) { - tbb::internal::allocate_root_proxy::free( *static_cast(task) ); -} - -#if __TBB_TASK_GROUP_CONTEXT -inline void *operator new( size_t bytes, const tbb::internal::allocate_root_with_context_proxy& p ) { - return &p.allocate(bytes); -} - -inline void operator delete( void* task, const tbb::internal::allocate_root_with_context_proxy& p ) { - p.free( *static_cast(task) ); -} -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -inline void *operator new( size_t bytes, const tbb::internal::allocate_continuation_proxy& p ) { - return &p.allocate(bytes); -} - -inline void operator delete( void* task, const tbb::internal::allocate_continuation_proxy& p ) { - p.free( *static_cast(task) ); -} - -inline void *operator new( size_t bytes, const tbb::internal::allocate_child_proxy& p ) { - return &p.allocate(bytes); -} - -inline void operator delete( void* task, const tbb::internal::allocate_child_proxy& p ) { - p.free( *static_cast(task) ); -} - -inline void *operator new( size_t bytes, const tbb::internal::allocate_additional_child_of_proxy& p ) { - return &p.allocate(bytes); -} - -inline void operator delete( void* task, const tbb::internal::allocate_additional_child_of_proxy& p ) { - p.free( *static_cast(task) ); -} - -#endif /* __TBB_task_H */ diff --git a/tbb30_20100406oss/include/tbb/task_group.h b/tbb30_20100406oss/include/tbb/task_group.h deleted file mode 100644 index d24569044..000000000 --- a/tbb30_20100406oss/include/tbb/task_group.h +++ /dev/null @@ -1,243 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_task_group_H -#define __TBB_task_group_H - -#include "task.h" -#include "tbb_exception.h" - -namespace tbb { - -namespace internal { - template class task_handle_task; -} - -template -class task_handle : internal::no_assign { - template friend class internal::task_handle_task; - - static const intptr_t scheduled = 0x1; - - F my_func; - intptr_t my_state; - - void mark_scheduled () { - // The check here is intentionally lax to avoid the impact of interlocked operation - if ( my_state & scheduled ) - internal::throw_exception( internal::eid_invalid_multiple_scheduling ); - my_state |= scheduled; - } -public: - task_handle( const F& f ) : my_func(f), my_state(0) {} - - void operator() () const { my_func(); } -}; - -enum task_group_status { - not_complete, - complete, - canceled -}; - -namespace internal { - -// Suppress gratuitous warnings from icc 11.0 when lambda expressions are used in instances of function_task. -//#pragma warning(disable: 588) - -template -class function_task : public task { - F my_func; - /*override*/ task* execute() { - my_func(); - return NULL; - } -public: - function_task( const F& f ) : my_func(f) {} -}; - -template -class task_handle_task : public task { - task_handle& my_handle; - /*override*/ task* execute() { - my_handle(); - return NULL; - } -public: - task_handle_task( task_handle& h ) : my_handle(h) { h.mark_scheduled(); } -}; - -class task_group_base : internal::no_copy { -protected: - empty_task* my_root; - task_group_context my_context; - - task& owner () { return *my_root; } - - template - task_group_status internal_run_and_wait( F& f ) { - __TBB_TRY { - if ( !my_context.is_group_execution_cancelled() ) - f(); - } __TBB_CATCH( ... ) { - my_context.register_pending_exception(); - } - return wait(); - } - - template - void internal_run( F& f ) { - owner().spawn( *new( owner().allocate_additional_child_of(*my_root) ) Task(f) ); - } - -public: - task_group_base( uintptr_t traits = 0 ) - : my_context(task_group_context::bound, task_group_context::default_traits | traits) - { - my_root = new( task::allocate_root(my_context) ) empty_task; - my_root->set_ref_count(1); - } - - template - void run( task_handle& h ) { - internal_run< task_handle, internal::task_handle_task >( h ); - } - - task_group_status wait() { - __TBB_TRY { - my_root->wait_for_all(); - } __TBB_CATCH( ... ) { - my_context.reset(); - __TBB_RETHROW(); - } - if ( my_context.is_group_execution_cancelled() ) { - my_context.reset(); - return canceled; - } - return complete; - } - - bool is_canceling() { - return my_context.is_group_execution_cancelled(); - } - - void cancel() { - my_context.cancel_group_execution(); - } -}; // class task_group_base - -} // namespace internal - -class task_group : public internal::task_group_base { -public: - task_group () : task_group_base( task_group_context::concurrent_wait ) {} - - ~task_group() __TBB_TRY { - __TBB_ASSERT( my_root->ref_count() != 0, NULL ); - if( my_root->ref_count() > 1 ) - my_root->wait_for_all(); - owner().destroy(*my_root); - } -#if TBB_USE_EXCEPTIONS - catch (...) { - owner().destroy(*my_root); - throw; - } -#endif /* TBB_USE_EXCEPTIONS */ - -#if __SUNPRO_CC - template - void run( task_handle& h ) { - internal_run< task_handle, internal::task_handle_task >( h ); - } -#else - using task_group_base::run; -#endif - - template - void run( const F& f ) { - internal_run< const F, internal::function_task >( f ); - } - - template - task_group_status run_and_wait( const F& f ) { - return internal_run_and_wait( f ); - } - - template - task_group_status run_and_wait( task_handle& h ) { - return internal_run_and_wait< task_handle >( h ); - } -}; // class task_group - -class structured_task_group : public internal::task_group_base { -public: - ~structured_task_group() { - if( my_root->ref_count() > 1 ) { - bool stack_unwinding_in_progress = std::uncaught_exception(); - // Always attempt to do proper cleanup to avoid inevitable memory corruption - // in case of missing wait (for the sake of better testability & debuggability) - if ( !is_canceling() ) - cancel(); - my_root->wait_for_all(); - owner().destroy(*my_root); - if ( !stack_unwinding_in_progress ) - internal::throw_exception( internal::eid_missing_wait ); - } - else { - if( my_root->ref_count() == 1 ) - my_root->set_ref_count(0); - owner().destroy(*my_root); - } - } - - template - task_group_status run_and_wait ( task_handle& h ) { - return internal_run_and_wait< task_handle >( h ); - } - - task_group_status wait() { - task_group_status res = task_group_base::wait(); - my_root->set_ref_count(1); - return res; - } -}; // class structured_task_group - -inline -bool is_current_task_group_canceling() { - return task::self().is_cancelled(); -} - -template -task_handle make_task( const F& f ) { - return task_handle( f ); -} - -} // namespace tbb - -#endif /* __TBB_task_group_H */ diff --git a/tbb30_20100406oss/include/tbb/task_scheduler_init.h b/tbb30_20100406oss/include/tbb/task_scheduler_init.h deleted file mode 100644 index 458afb26f..000000000 --- a/tbb30_20100406oss/include/tbb/task_scheduler_init.h +++ /dev/null @@ -1,106 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_task_scheduler_init_H -#define __TBB_task_scheduler_init_H - -#include "tbb_stddef.h" - -namespace tbb { - -typedef std::size_t stack_size_type; - -//! @cond INTERNAL -namespace internal { - //! Internal to library. Should not be used by clients. - /** @ingroup task_scheduling */ - class scheduler; -} // namespace internal -//! @endcond - -//! Class representing reference to tbb scheduler. -/** A thread must construct a task_scheduler_init, and keep it alive, - during the time that it uses the services of class task. - @ingroup task_scheduling */ -class task_scheduler_init: internal::no_copy { - /** NULL if not currently initialized. */ - internal::scheduler* my_scheduler; -public: - - //! Typedef for number of threads that is automatic. - static const int automatic = -1; - - //! Argument to initialize() or constructor that causes initialization to be deferred. - static const int deferred = -2; - - //! Ensure that scheduler exists for this thread - /** A value of -1 lets tbb decide on the number of threads, which is typically - the number of hardware threads. For production code, the default value of -1 - should be used, particularly if the client code is mixed with third party clients - that might also use tbb. - - The number_of_threads is ignored if any other task_scheduler_inits - currently exist. A thread may construct multiple task_scheduler_inits. - Doing so does no harm because the underlying scheduler is reference counted. */ - void __TBB_EXPORTED_METHOD initialize( int number_of_threads=automatic ); - - //! The overloaded method with stack size parameter - /** Overloading is necessary to preserve ABI compatibility */ - void __TBB_EXPORTED_METHOD initialize( int number_of_threads, stack_size_type thread_stack_size ); - - //! Inverse of method initialize. - void __TBB_EXPORTED_METHOD terminate(); - - //! Shorthand for default constructor followed by call to intialize(number_of_threads). - task_scheduler_init( int number_of_threads=automatic, stack_size_type thread_stack_size=0 ) : my_scheduler(NULL) { - initialize( number_of_threads, thread_stack_size ); - } - - //! Destroy scheduler for this thread if thread has no other live task_scheduler_inits. - ~task_scheduler_init() { - if( my_scheduler ) - terminate(); - internal::poison_pointer( my_scheduler ); - } - //! Returns the number of threads tbb scheduler would create if initialized by default. - /** Result returned by this method does not depend on whether the scheduler - has already been initialized. - - Because tbb 2.0 does not support blocking tasks yet, you may use this method - to boost the number of threads in the tbb's internal pool, if your tasks are - doing I/O operations. The optimal number of additional threads depends on how - much time your tasks spend in the blocked state. */ - static int __TBB_EXPORTED_FUNC default_num_threads (); - - //! Returns true if scheduler is active (initialized); false otherwise - bool is_active() const { return my_scheduler != NULL; } -}; - -} // namespace tbb - -#endif /* __TBB_task_scheduler_init_H */ diff --git a/tbb30_20100406oss/include/tbb/task_scheduler_observer.h b/tbb30_20100406oss/include/tbb/task_scheduler_observer.h deleted file mode 100644 index 61003e524..000000000 --- a/tbb30_20100406oss/include/tbb/task_scheduler_observer.h +++ /dev/null @@ -1,74 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_task_scheduler_observer_H -#define __TBB_task_scheduler_observer_H - -#include "atomic.h" - -#if __TBB_SCHEDULER_OBSERVER - -namespace tbb { - -namespace internal { - -class observer_proxy; - -class task_scheduler_observer_v3 { - friend class observer_proxy; - observer_proxy* my_proxy; - atomic my_busy_count; -public: - //! Enable or disable observation - void __TBB_EXPORTED_METHOD observe( bool state=true ); - - //! True if observation is enables; false otherwise. - bool is_observing() const {return my_proxy!=NULL;} - - //! Construct observer with observation disabled. - task_scheduler_observer_v3() : my_proxy(NULL) {my_busy_count=0;} - - //! Called by thread before first steal since observation became enabled - virtual void on_scheduler_entry( bool /*is_worker*/ ) {} - - //! Called by thread when it no longer takes part in task stealing. - virtual void on_scheduler_exit( bool /*is_worker*/ ) {} - - //! Destructor - virtual ~task_scheduler_observer_v3() {observe(false);} -}; - -} // namespace internal - -typedef internal::task_scheduler_observer_v3 task_scheduler_observer; - -} // namespace tbb - -#endif /* __TBB_SCHEDULER_OBSERVER */ - -#endif /* __TBB_task_scheduler_observer_H */ diff --git a/tbb30_20100406oss/include/tbb/tbb.h b/tbb30_20100406oss/include/tbb/tbb.h deleted file mode 100644 index 9c5ac0ff3..000000000 --- a/tbb30_20100406oss/include/tbb/tbb.h +++ /dev/null @@ -1,80 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_tbb_H -#define __TBB_tbb_H - -/** - This header bulk-includes declarations or definitions of all the functionality - provided by TBB (save for malloc dependent headers). - - If you use only a few TBB constructs, consider including specific headers only. - Any header listed below can be included independently of others. -**/ - -#include "aligned_space.h" -#include "atomic.h" -#include "blocked_range.h" -#include "blocked_range2d.h" -#include "blocked_range3d.h" -#include "cache_aligned_allocator.h" -#include "combinable.h" -#include "concurrent_unordered_map.h" -#include "concurrent_hash_map.h" -#include "concurrent_queue.h" -#include "concurrent_vector.h" -#include "critical_section.h" -#include "enumerable_thread_specific.h" -#include "mutex.h" -#include "null_mutex.h" -#include "null_rw_mutex.h" -#include "parallel_do.h" -#include "parallel_for.h" -#include "parallel_for_each.h" -#include "parallel_invoke.h" -#include "parallel_reduce.h" -#include "parallel_scan.h" -#include "parallel_sort.h" -#include "partitioner.h" -#include "pipeline.h" -#include "queuing_mutex.h" -#include "queuing_rw_mutex.h" -#include "reader_writer_lock.h" -#include "recursive_mutex.h" -#include "spin_mutex.h" -#include "spin_rw_mutex.h" -#include "task.h" -#include "task_group.h" -#include "task_scheduler_init.h" -#include "task_scheduler_observer.h" -#include "tbb_allocator.h" -#include "tbb_exception.h" -#include "tbb_thread.h" -#include "tick_count.h" - -#endif /* __TBB_tbb_H */ diff --git a/tbb30_20100406oss/include/tbb/tbb_allocator.h b/tbb30_20100406oss/include/tbb/tbb_allocator.h deleted file mode 100644 index 008422df3..000000000 --- a/tbb30_20100406oss/include/tbb/tbb_allocator.h +++ /dev/null @@ -1,214 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_tbb_allocator_H -#define __TBB_tbb_allocator_H - -#include "tbb_stddef.h" -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -namespace tbb { - -//! @cond INTERNAL -namespace internal { - - //! Deallocates memory using FreeHandler - /** The function uses scalable_free if scalable allocator is available and free if not*/ - void __TBB_EXPORTED_FUNC deallocate_via_handler_v3( void *p ); - - //! Allocates memory using MallocHandler - /** The function uses scalable_malloc if scalable allocator is available and malloc if not*/ - void* __TBB_EXPORTED_FUNC allocate_via_handler_v3( size_t n ); - - //! Returns true if standard malloc/free are used to work with memory. - bool __TBB_EXPORTED_FUNC is_malloc_used_v3(); -} -//! @endcond - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Workaround for erroneous "unreferenced parameter" warning in method destroy. - #pragma warning (push) - #pragma warning (disable: 4100) -#endif - -//! Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5 -/** The class selects the best memory allocation mechanism available - from scalable_malloc and standard malloc. - The members are ordered the same way they are in section 20.4.1 - of the ISO C++ standard. - @ingroup memory_allocation */ -template -class tbb_allocator { -public: - typedef typename internal::allocator_type::value_type value_type; - typedef value_type* pointer; - typedef const value_type* const_pointer; - typedef value_type& reference; - typedef const value_type& const_reference; - typedef size_t size_type; - typedef ptrdiff_t difference_type; - template struct rebind { - typedef tbb_allocator other; - }; - - //! Specifies current allocator - enum malloc_type { - scalable, - standard - }; - - tbb_allocator() throw() {} - tbb_allocator( const tbb_allocator& ) throw() {} - template tbb_allocator(const tbb_allocator&) throw() {} - - pointer address(reference x) const {return &x;} - const_pointer address(const_reference x) const {return &x;} - - //! Allocate space for n objects. - pointer allocate( size_type n, const void* /*hint*/ = 0) { - return pointer(internal::allocate_via_handler_v3( n * sizeof(value_type) )); - } - - //! Free previously allocated block of memory. - void deallocate( pointer p, size_type ) { - internal::deallocate_via_handler_v3(p); - } - - //! Largest value for which method allocate might succeed. - size_type max_size() const throw() { - size_type max = static_cast(-1) / sizeof (value_type); - return (max > 0 ? max : 1); - } - - //! Copy-construct value at location pointed to by p. - void construct( pointer p, const value_type& value ) {::new((void*)(p)) value_type(value);} - - //! Destroy value at location pointed to by p. - void destroy( pointer p ) {p->~value_type();} - - //! Returns current allocator - static malloc_type allocator_type() { - return internal::is_malloc_used_v3() ? standard : scalable; - } -}; - -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning (pop) -#endif // warning 4100 is back - -//! Analogous to std::allocator, as defined in ISO C++ Standard, Section 20.4.1 -/** @ingroup memory_allocation */ -template<> -class tbb_allocator { -public: - typedef void* pointer; - typedef const void* const_pointer; - typedef void value_type; - template struct rebind { - typedef tbb_allocator other; - }; -}; - -template -inline bool operator==( const tbb_allocator&, const tbb_allocator& ) {return true;} - -template -inline bool operator!=( const tbb_allocator&, const tbb_allocator& ) {return false;} - -//! Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5 -/** The class is an adapter over an actual allocator that fills the allocation - using memset function with template argument C as the value. - The members are ordered the same way they are in section 20.4.1 - of the ISO C++ standard. - @ingroup memory_allocation */ -template class Allocator = tbb_allocator> -class zero_allocator : public Allocator -{ -public: - typedef Allocator base_allocator_type; - typedef typename base_allocator_type::value_type value_type; - typedef typename base_allocator_type::pointer pointer; - typedef typename base_allocator_type::const_pointer const_pointer; - typedef typename base_allocator_type::reference reference; - typedef typename base_allocator_type::const_reference const_reference; - typedef typename base_allocator_type::size_type size_type; - typedef typename base_allocator_type::difference_type difference_type; - template struct rebind { - typedef zero_allocator other; - }; - - zero_allocator() throw() { } - zero_allocator(const zero_allocator &a) throw() : base_allocator_type( a ) { } - template - zero_allocator(const zero_allocator &a) throw() : base_allocator_type( Allocator( a ) ) { } - - pointer allocate(const size_type n, const void *hint = 0 ) { - pointer ptr = base_allocator_type::allocate( n, hint ); - std::memset( ptr, 0, n * sizeof(value_type) ); - return ptr; - } -}; - -//! Analogous to std::allocator, as defined in ISO C++ Standard, Section 20.4.1 -/** @ingroup memory_allocation */ -template class Allocator> -class zero_allocator : public Allocator { -public: - typedef Allocator base_allocator_type; - typedef typename base_allocator_type::value_type value_type; - typedef typename base_allocator_type::pointer pointer; - typedef typename base_allocator_type::const_pointer const_pointer; - template struct rebind { - typedef zero_allocator other; - }; -}; - -template class B1, typename T2, template class B2> -inline bool operator==( const zero_allocator &a, const zero_allocator &b) { - return static_cast< B1 >(a) == static_cast< B2 >(b); -} -template class B1, typename T2, template class B2> -inline bool operator!=( const zero_allocator &a, const zero_allocator &b) { - return static_cast< B1 >(a) != static_cast< B2 >(b); -} - -} // namespace tbb - -#endif /* __TBB_tbb_allocator_H */ diff --git a/tbb30_20100406oss/include/tbb/tbb_config.h b/tbb30_20100406oss/include/tbb/tbb_config.h deleted file mode 100644 index 6f26e47f1..000000000 --- a/tbb30_20100406oss/include/tbb/tbb_config.h +++ /dev/null @@ -1,198 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_tbb_config_H -#define __TBB_tbb_config_H - -/** This header is supposed to contain macro definitions and C style comments only. - The macros defined here are intended to control such aspects of TBB build as - - compilation modes - - feature sets - - workarounds presence -**/ - -/** Compilation modes **/ - -#ifndef TBB_USE_DEBUG -#ifdef TBB_DO_ASSERT -#define TBB_USE_DEBUG TBB_DO_ASSERT -#else -#define TBB_USE_DEBUG 0 -#endif /* TBB_DO_ASSERT */ -#else -#define TBB_DO_ASSERT TBB_USE_DEBUG -#endif /* TBB_USE_DEBUG */ - -#ifndef TBB_USE_ASSERT -#ifdef TBB_DO_ASSERT -#define TBB_USE_ASSERT TBB_DO_ASSERT -#else -#define TBB_USE_ASSERT TBB_USE_DEBUG -#endif /* TBB_DO_ASSERT */ -#endif /* TBB_USE_ASSERT */ - -#ifndef TBB_USE_THREADING_TOOLS -#ifdef TBB_DO_THREADING_TOOLS -#define TBB_USE_THREADING_TOOLS TBB_DO_THREADING_TOOLS -#else -#define TBB_USE_THREADING_TOOLS TBB_USE_DEBUG -#endif /* TBB_DO_THREADING_TOOLS */ -#endif /* TBB_USE_THREADING_TOOLS */ - -#ifndef TBB_USE_PERFORMANCE_WARNINGS -#ifdef TBB_PERFORMANCE_WARNINGS -#define TBB_USE_PERFORMANCE_WARNINGS TBB_PERFORMANCE_WARNINGS -#else -#define TBB_USE_PERFORMANCE_WARNINGS TBB_USE_DEBUG -#endif /* TBB_PEFORMANCE_WARNINGS */ -#endif /* TBB_USE_PERFORMANCE_WARNINGS */ - -#if !defined(__EXCEPTIONS) && !defined(_CPPUNWIND) && !defined(__SUNPRO_CC) || defined(_XBOX) - #if TBB_USE_EXCEPTIONS - #error Compilation settings do not support exception handling. Please do not set TBB_USE_EXCEPTIONS macro or set it to 0. - #elif !defined(TBB_USE_EXCEPTIONS) - #define TBB_USE_EXCEPTIONS 0 - #endif -#elif !defined(TBB_USE_EXCEPTIONS) - #define TBB_USE_EXCEPTIONS 1 -#endif - -#ifndef TBB_IMPLEMENT_CPP0X -/** By default, use C++0x classes if available **/ -#if __GNUC__==4 && __GNUC_MINOR__>=4 && __GXX_EXPERIMENTAL_CXX0X__ -#define TBB_IMPLEMENT_CPP0X 0 -#else -#define TBB_IMPLEMENT_CPP0X 1 -#endif -#endif /* TBB_IMPLEMENT_CPP0X */ - -/** Feature sets **/ - -#ifndef __TBB_COUNT_TASK_NODES - #define __TBB_COUNT_TASK_NODES TBB_USE_ASSERT -#endif - -#ifndef __TBB_TASK_GROUP_CONTEXT -#define __TBB_TASK_GROUP_CONTEXT 1 -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -#ifndef __TBB_SCHEDULER_OBSERVER -#define __TBB_SCHEDULER_OBSERVER 1 -#endif /* __TBB_SCHEDULER_OBSERVER */ - -#ifndef __TBB_ARENA_PER_MASTER -#define __TBB_ARENA_PER_MASTER 1 -#endif /* __TBB_ARENA_PER_MASTER */ - -/* TODO: The following condition should be extended as soon as new compilers/runtimes - with std::exception_ptr support appear. */ -#define __TBB_EXCEPTION_PTR_PRESENT (_MSC_VER >= 1600 || __GXX_EXPERIMENTAL_CXX0X__ && (__GNUC__==4 && __GNUC_MINOR__>=4)) - - -#ifndef TBB_USE_CAPTURED_EXCEPTION - #if __TBB_EXCEPTION_PTR_PRESENT - #define TBB_USE_CAPTURED_EXCEPTION 0 - #else - #define TBB_USE_CAPTURED_EXCEPTION 1 - #endif -#else /* defined TBB_USE_CAPTURED_EXCEPTION */ - #if !TBB_USE_CAPTURED_EXCEPTION && !__TBB_EXCEPTION_PTR_PRESENT - #error Current runtime does not support std::exception_ptr. Set TBB_USE_CAPTURED_EXCEPTION and make sure that your code is ready to catch tbb::captured_exception. - #endif -#endif /* defined TBB_USE_CAPTURED_EXCEPTION */ - - -#ifndef __TBB_DEFAULT_PARTITIONER -#if TBB_DEPRECATED -/** Default partitioner for parallel loop templates in TBB 1.0-2.1 */ -#define __TBB_DEFAULT_PARTITIONER tbb::simple_partitioner -#else -/** Default partitioner for parallel loop templates in TBB 2.2 */ -#define __TBB_DEFAULT_PARTITIONER tbb::auto_partitioner -#endif /* TBB_DEFAULT_PARTITIONER */ -#endif /* !defined(__TBB_DEFAULT_PARTITIONER */ - -/** Workarounds presence **/ - -#if __GNUC__==4 && __GNUC_MINOR__>=4 && !defined(__INTEL_COMPILER) - #define __TBB_GCC_WARNING_SUPPRESSION_ENABLED 1 -#endif - -/** Macros of the form __TBB_XXX_BROKEN denote known issues that are caused by - the bugs in compilers, standard or OS specific libraries. They should be - removed as soon as the corresponding bugs are fixed or the buggy OS/compiler - versions go out of the support list. -**/ - -#if _MSC_VER && __INTEL_COMPILER && (__INTEL_COMPILER<1110 || __INTEL_COMPILER==1110 && __INTEL_COMPILER_BUILD_DATE < 20091012) - /** Necessary to avoid ICL error (or warning in non-strict mode): - "exception specification for implicitly declared virtual destructor is - incompatible with that of overridden one". **/ - #define __TBB_DEFAULT_DTOR_THROW_SPEC_BROKEN 1 -#endif - -#if defined(_MSC_VER) && _MSC_VER < 1500 && !defined(__INTEL_COMPILER) - /** VS2005 and earlier do not allow declaring template class as a friend - of classes defined in other namespaces. **/ - #define __TBB_TEMPLATE_FRIENDS_BROKEN 1 -#endif - -#if __GLIBC__==2 && __GLIBC_MINOR__==3 || __MINGW32__ - //! Macro controlling EH usages in TBB tests - /** Some older versions of glibc crash when exception handling happens concurrently. **/ - #define __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN 1 -#endif - -#if (_WIN32||_WIN64) && __INTEL_COMPILER == 1110 - /** That's a bug in Intel compiler 11.1.044/IA-32/Windows, that leads to a worker thread crash on the thread's startup. **/ - #define __TBB_ICL_11_1_CODE_GEN_BROKEN 1 -#endif - -#if __GNUC__==3 && __GNUC_MINOR__==3 && !defined(__INTEL_COMPILER) - /** A bug in GCC 3.3 with access to nested classes declared in protected area */ - #define __TBB_GCC_3_3_PROTECTED_BROKEN 1 -#endif - -#if __FreeBSD__ - /** A bug in FreeBSD 8.0 results in kernel panic when there is contention - on a mutex created with this attribute. **/ - #define __TBB_PRIO_INHERIT_BROKEN 1 - - /** A bug in FreeBSD 8.0 results in test hanging when an exception occurs - during (concurrent?) object construction by means of placement new operator. **/ - #define __TBB_PLACEMENT_NEW_EXCEPTION_SAFETY_BROKEN 1 -#endif /* __FreeBSD__ */ - -#if (__linux__ || __APPLE__) && __i386__ && defined(__INTEL_COMPILER) - /** The Intel compiler for IA-32 (Linux|Mac OS X) crashes or generates - incorrect code when __asm__ arguments have a cast to volatile. **/ - #define __TBB_ICC_ASM_VOLATILE_BROKEN 1 -#endif - - -#endif /* __TBB_tbb_config_H */ diff --git a/tbb30_20100406oss/include/tbb/tbb_exception.h b/tbb30_20100406oss/include/tbb/tbb_exception.h deleted file mode 100644 index 3e1eecd85..000000000 --- a/tbb30_20100406oss/include/tbb/tbb_exception.h +++ /dev/null @@ -1,365 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_exception_H -#define __TBB_exception_H - -#include "tbb_stddef.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#if __SUNPRO_CC -#include // required to construct std exception classes -#endif - -namespace tbb { - -//! Exception for concurrent containers -class bad_last_alloc : public std::bad_alloc { -public: - /*override*/ const char* what() const throw(); -#if __TBB_DEFAULT_DTOR_THROW_SPEC_BROKEN - /*override*/ ~bad_last_alloc() throw() {} -#endif -}; - -//! Exception for PPL locks -class improper_lock : public std::exception { -public: - /*override*/ const char* what() const throw(); -}; - -//! Exception for missing wait on structured_task_group -class missing_wait : public std::exception { -public: - /*override*/ const char* what() const throw(); -}; - -//! Exception for repeated scheduling of the same task_handle -class invalid_multiple_scheduling : public std::exception { -public: - /*override*/ const char* what() const throw(); -}; - -namespace internal { -//! Obsolete -void __TBB_EXPORTED_FUNC throw_bad_last_alloc_exception_v4(); - -enum exception_id { - eid_bad_alloc = 1, - eid_bad_last_alloc, - eid_nonpositive_step, - eid_out_of_range, - eid_segment_range_error, - eid_index_range_error, - eid_missing_wait, - eid_invalid_multiple_scheduling, - eid_improper_lock, - eid_possible_deadlock, - eid_operation_not_permitted, - eid_condvar_wait_failed, - eid_invalid_load_factor, - eid_invalid_buckets_number, - eid_invalid_swap, - eid_reservation_length_error, - eid_invalid_key, - //! The last enumerator tracks the number of defined IDs. It must remain the last one. - /** When adding new IDs, place them immediately _before_ this comment (that is - _after_ all the existing IDs. NEVER insert new IDs between the existing ones. **/ - eid_max -}; - -//! Gathers all throw operators in one place. -/** Its purpose is to minimize code bloat that can be caused by throw operators - scattered in multiple places, especially in templates. **/ -void __TBB_EXPORTED_FUNC throw_exception_v4 ( exception_id ); - -//! Versionless convenience wrapper for throw_exception_v4() -inline void throw_exception ( exception_id eid ) { throw_exception_v4(eid); } - -} // namespace internal -} // namespace tbb - -#if __TBB_TASK_GROUP_CONTEXT -#include "tbb_allocator.h" -#include -#include -#include - -namespace tbb { - -//! Interface to be implemented by all exceptions TBB recognizes and propagates across the threads. -/** If an unhandled exception of the type derived from tbb::tbb_exception is intercepted - by the TBB scheduler in one of the worker threads, it is delivered to and re-thrown in - the root thread. The root thread is the thread that has started the outermost algorithm - or root task sharing the same task_group_context with the guilty algorithm/task (the one - that threw the exception first). - - Note: when documentation mentions workers with respect to exception handling, - masters are implied as well, because they are completely equivalent in this context. - Consequently a root thread can be master or worker thread. - - NOTE: In case of nested algorithms or complex task hierarchies when the nested - levels share (explicitly or by means of implicit inheritance) the task group - context of the outermost level, the exception may be (re-)thrown multiple times - (ultimately - in each worker on each nesting level) before reaching the root - thread at the outermost level. IMPORTANT: if you intercept an exception derived - from this class on a nested level, you must re-throw it in the catch block by means - of the "throw;" operator. - - TBB provides two implementations of this interface: tbb::captured_exception and - template class tbb::movable_exception. See their declarations for more info. **/ -class tbb_exception : public std::exception -{ - /** No operator new is provided because the TBB usage model assumes dynamic - creation of the TBB exception objects only by means of applying move() - operation on an exception thrown out of TBB scheduler. **/ - void* operator new ( size_t ); - -public: - //! Creates and returns pointer to the deep copy of this exception object. - /** Move semantics is allowed. **/ - virtual tbb_exception* move () throw() = 0; - - //! Destroys objects created by the move() method. - /** Frees memory and calls destructor for this exception object. - Can and must be used only on objects created by the move method. **/ - virtual void destroy () throw() = 0; - - //! Throws this exception object. - /** Make sure that if you have several levels of derivation from this interface - you implement or override this method on the most derived level. The implementation - is as simple as "throw *this;". Failure to do this will result in exception - of a base class type being thrown. **/ - virtual void throw_self () = 0; - - //! Returns RTTI name of the originally intercepted exception - virtual const char* name() const throw() = 0; - - //! Returns the result of originally intercepted exception's what() method. - virtual const char* what() const throw() = 0; - - /** Operator delete is provided only to allow using existing smart pointers - with TBB exception objects obtained as the result of applying move() - operation on an exception thrown out of TBB scheduler. - - When overriding method move() make sure to override operator delete as well - if memory is allocated not by TBB's scalable allocator. **/ - void operator delete ( void* p ) { - internal::deallocate_via_handler_v3(p); - } -}; - -//! This class is used by TBB to propagate information about unhandled exceptions into the root thread. -/** Exception of this type is thrown by TBB in the root thread (thread that started a parallel - algorithm ) if an unhandled exception was intercepted during the algorithm execution in one - of the workers. - \sa tbb::tbb_exception **/ -class captured_exception : public tbb_exception -{ -public: - captured_exception ( const captured_exception& src ) - : tbb_exception(src), my_dynamic(false) - { - set(src.my_exception_name, src.my_exception_info); - } - - captured_exception ( const char* name_, const char* info ) - : my_dynamic(false) - { - set(name_, info); - } - - __TBB_EXPORTED_METHOD ~captured_exception () throw() { - clear(); - } - - captured_exception& operator= ( const captured_exception& src ) { - if ( this != &src ) { - clear(); - set(src.my_exception_name, src.my_exception_info); - } - return *this; - } - - /*override*/ - captured_exception* __TBB_EXPORTED_METHOD move () throw(); - - /*override*/ - void __TBB_EXPORTED_METHOD destroy () throw(); - - /*override*/ - void throw_self () { __TBB_THROW(*this); } - - /*override*/ - const char* __TBB_EXPORTED_METHOD name() const throw(); - - /*override*/ - const char* __TBB_EXPORTED_METHOD what() const throw(); - - void __TBB_EXPORTED_METHOD set ( const char* name, const char* info ) throw(); - void __TBB_EXPORTED_METHOD clear () throw(); - -private: - //! Used only by method clone(). - captured_exception() {} - - //! Functionally equivalent to {captured_exception e(name,info); return e.clone();} - static captured_exception* allocate ( const char* name, const char* info ); - - bool my_dynamic; - const char* my_exception_name; - const char* my_exception_info; -}; - -//! Template that can be used to implement exception that transfers arbitrary ExceptionData to the root thread -/** Code using TBB can instantiate this template with an arbitrary ExceptionData type - and throw this exception object. Such exceptions are intercepted by the TBB scheduler - and delivered to the root thread (). - \sa tbb::tbb_exception **/ -template -class movable_exception : public tbb_exception -{ - typedef movable_exception self_type; - -public: - movable_exception ( const ExceptionData& data_ ) - : my_exception_data(data_) - , my_dynamic(false) - , my_exception_name( -#if TBB_USE_EXCEPTIONS - typeid(self_type).name() -#else /* !TBB_USE_EXCEPTIONS */ - "movable_exception" -#endif /* !TBB_USE_EXCEPTIONS */ - ) - {} - - movable_exception ( const movable_exception& src ) throw () - : tbb_exception(src) - , my_exception_data(src.my_exception_data) - , my_dynamic(false) - , my_exception_name(src.my_exception_name) - {} - - ~movable_exception () throw() {} - - const movable_exception& operator= ( const movable_exception& src ) { - if ( this != &src ) { - my_exception_data = src.my_exception_data; - my_exception_name = src.my_exception_name; - } - return *this; - } - - ExceptionData& data () throw() { return my_exception_data; } - - const ExceptionData& data () const throw() { return my_exception_data; } - - /*override*/ const char* name () const throw() { return my_exception_name; } - - /*override*/ const char* what () const throw() { return "tbb::movable_exception"; } - - /*override*/ - movable_exception* move () throw() { - void* e = internal::allocate_via_handler_v3(sizeof(movable_exception)); - if ( e ) { - ::new (e) movable_exception(*this); - ((movable_exception*)e)->my_dynamic = true; - } - return (movable_exception*)e; - } - /*override*/ - void destroy () throw() { - __TBB_ASSERT ( my_dynamic, "Method destroy can be called only on dynamically allocated movable_exceptions" ); - if ( my_dynamic ) { - this->~movable_exception(); - internal::deallocate_via_handler_v3(this); - } - } - /*override*/ - void throw_self () { __TBB_THROW( *this ); } - -protected: - //! User data - ExceptionData my_exception_data; - -private: - //! Flag specifying whether this object has been dynamically allocated (by the move method) - bool my_dynamic; - - //! RTTI name of this class - /** We rely on the fact that RTTI names are static string constants. **/ - const char* my_exception_name; -}; - -#if !TBB_USE_CAPTURED_EXCEPTION -namespace internal { - -//! Exception container that preserves the exact copy of the original exception -/** This class can be used only when the appropriate runtime support (mandated - by C++0x) is present **/ -class tbb_exception_ptr { - std::exception_ptr my_ptr; - -public: - static tbb_exception_ptr* allocate (); - static tbb_exception_ptr* allocate ( const tbb_exception& tag ); - //! This overload uses move semantics (i.e. it empties src) - static tbb_exception_ptr* allocate ( captured_exception& src ); - - //! Destroys this objects - /** Note that objects of this type can be created only by the allocate() method. **/ - void destroy () throw(); - - //! Throws the contained exception . - void throw_self () { std::rethrow_exception(my_ptr); } - -private: - tbb_exception_ptr ( const std::exception_ptr& src ) : my_ptr(src) {} - tbb_exception_ptr ( const captured_exception& src ) : my_ptr(std::copy_exception(src)) {} -}; // class tbb::internal::tbb_exception_ptr - -} // namespace internal -#endif /* !TBB_USE_CAPTURED_EXCEPTION */ - -} // namespace tbb - -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -#endif /* __TBB_exception_H */ diff --git a/tbb30_20100406oss/include/tbb/tbb_machine.h b/tbb30_20100406oss/include/tbb/tbb_machine.h deleted file mode 100644 index aa1306583..000000000 --- a/tbb30_20100406oss/include/tbb/tbb_machine.h +++ /dev/null @@ -1,642 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_machine_H -#define __TBB_machine_H - -#include "tbb_stddef.h" - -#if _WIN32||_WIN64 - -#ifdef _MANAGED -#pragma managed(push, off) -#endif - -#if __MINGW32__ -#include "machine/linux_ia32.h" -extern "C" __declspec(dllimport) int __stdcall SwitchToThread( void ); -#define __TBB_Yield() SwitchToThread() -#elif defined(_M_IX86) -#include "machine/windows_ia32.h" -#elif defined(_M_AMD64) -#include "machine/windows_intel64.h" -#elif _XBOX -#include "machine/xbox360_ppc.h" -#else -#error Unsupported platform -#endif - -#ifdef _MANAGED -#pragma managed(pop) -#endif - -#elif __linux__ || __FreeBSD__ - -#if __i386__ -#include "machine/linux_ia32.h" -#elif __x86_64__ -#include "machine/linux_intel64.h" -#elif __ia64__ -#include "machine/linux_ia64.h" -#endif - -#elif __APPLE__ - -#if __i386__ -#include "machine/linux_ia32.h" -#elif __x86_64__ -#include "machine/linux_intel64.h" -#elif __POWERPC__ -#include "machine/mac_ppc.h" -#endif - -#elif _AIX - -#include "machine/ibm_aix51.h" - -#elif __sun || __SUNPRO_CC - -#define __asm__ asm -#define __volatile__ volatile -#if __i386 || __i386__ -#include "machine/linux_ia32.h" -#elif __x86_64__ -#include "machine/linux_intel64.h" -#elif __sparc -#include "machine/sunos_sparc.h" -#endif - -#endif - -#if !defined(__TBB_CompareAndSwap4) \ - || !defined(__TBB_CompareAndSwap8) \ - || !defined(__TBB_Yield) \ - || !defined(__TBB_release_consistency_helper) -#error Minimal requirements for tbb_machine.h not satisfied -#endif - -#ifndef __TBB_load_with_acquire - //! Load with acquire semantics; i.e., no following memory operation can move above the load. - template - inline T __TBB_load_with_acquire(const volatile T& location) { - T temp = location; - __TBB_release_consistency_helper(); - return temp; - } -#endif - -#ifndef __TBB_store_with_release - //! Store with release semantics; i.e., no prior memory operation can move below the store. - template - inline void __TBB_store_with_release(volatile T& location, V value) { - __TBB_release_consistency_helper(); - location = T(value); - } -#endif - -#ifndef __TBB_Pause - inline void __TBB_Pause(int32_t) { - __TBB_Yield(); - } -#endif - -namespace tbb { -namespace internal { - -//! Class that implements exponential backoff. -/** See implementation of spin_wait_while_eq for an example. */ -class atomic_backoff { - //! Time delay, in units of "pause" instructions. - /** Should be equal to approximately the number of "pause" instructions - that take the same time as an context switch. */ - static const int32_t LOOPS_BEFORE_YIELD = 16; - int32_t count; -public: - atomic_backoff() : count(1) {} - - //! Pause for a while. - void pause() { - if( count<=LOOPS_BEFORE_YIELD ) { - __TBB_Pause(count); - // Pause twice as long the next time. - count*=2; - } else { - // Pause is so long that we might as well yield CPU to scheduler. - __TBB_Yield(); - } - } - - // pause for a few times and then return false immediately. - bool bounded_pause() { - if( count<=LOOPS_BEFORE_YIELD ) { - __TBB_Pause(count); - // Pause twice as long the next time. - count*=2; - return true; - } else { - return false; - } - } - - void reset() { - count = 1; - } -}; - -//! Spin WHILE the value of the variable is equal to a given value -/** T and U should be comparable types. */ -template -void spin_wait_while_eq( const volatile T& location, U value ) { - atomic_backoff backoff; - while( location==value ) backoff.pause(); -} - -//! Spin UNTIL the value of the variable is equal to a given value -/** T and U should be comparable types. */ -template -void spin_wait_until_eq( const volatile T& location, const U value ) { - atomic_backoff backoff; - while( location!=value ) backoff.pause(); -} - -// T should be unsigned, otherwise sign propagation will break correctness of bit manipulations. -// S should be either 1 or 2, for the mask calculation to work correctly. -// Together, these rules limit applicability of Masked CAS to unsigned char and unsigned short. -template -inline T __TBB_MaskedCompareAndSwap (volatile T *ptr, T value, T comparand ) { - volatile uint32_t * base = (uint32_t*)( (uintptr_t)ptr & ~(uintptr_t)0x3 ); -#if __TBB_BIG_ENDIAN - const uint8_t bitoffset = uint8_t( 8*( 4-S - (uintptr_t(ptr) & 0x3) ) ); -#else - const uint8_t bitoffset = uint8_t( 8*((uintptr_t)ptr & 0x3) ); -#endif - const uint32_t mask = ( (1<<(S*8)) - 1 )<> bitoffset); -} - -template -inline T __TBB_CompareAndSwapGeneric (volatile void *ptr, T value, T comparand ) { - return __TBB_CompareAndSwapW((T *)ptr,value,comparand); -} - -template<> -inline uint8_t __TBB_CompareAndSwapGeneric <1,uint8_t> (volatile void *ptr, uint8_t value, uint8_t comparand ) { -#ifdef __TBB_CompareAndSwap1 - return __TBB_CompareAndSwap1(ptr,value,comparand); -#else - return __TBB_MaskedCompareAndSwap<1,uint8_t>((volatile uint8_t *)ptr,value,comparand); -#endif -} - -template<> -inline uint16_t __TBB_CompareAndSwapGeneric <2,uint16_t> (volatile void *ptr, uint16_t value, uint16_t comparand ) { -#ifdef __TBB_CompareAndSwap2 - return __TBB_CompareAndSwap2(ptr,value,comparand); -#else - return __TBB_MaskedCompareAndSwap<2,uint16_t>((volatile uint16_t *)ptr,value,comparand); -#endif -} - -template<> -inline uint32_t __TBB_CompareAndSwapGeneric <4,uint32_t> (volatile void *ptr, uint32_t value, uint32_t comparand ) { - return __TBB_CompareAndSwap4(ptr,value,comparand); -} - -template<> -inline uint64_t __TBB_CompareAndSwapGeneric <8,uint64_t> (volatile void *ptr, uint64_t value, uint64_t comparand ) { - return __TBB_CompareAndSwap8(ptr,value,comparand); -} - -template -inline T __TBB_FetchAndAddGeneric (volatile void *ptr, T addend) { - atomic_backoff b; - T result; - for(;;) { - result = *reinterpret_cast(ptr); - // __TBB_CompareAndSwapGeneric presumed to have full fence. - if( __TBB_CompareAndSwapGeneric ( ptr, result+addend, result )==result ) - break; - b.pause(); - } - return result; -} - -template -inline T __TBB_FetchAndStoreGeneric (volatile void *ptr, T value) { - atomic_backoff b; - T result; - for(;;) { - result = *reinterpret_cast(ptr); - // __TBB_CompareAndSwapGeneric presumed to have full fence. - if( __TBB_CompareAndSwapGeneric ( ptr, value, result )==result ) - break; - b.pause(); - } - return result; -} - -// Macro __TBB_TypeWithAlignmentAtLeastAsStrict(T) should be a type with alignment at least as -// strict as type T. Type type should have a trivial default constructor and destructor, so that -// arrays of that type can be declared without initializers. -// It is correct (but perhaps a waste of space) if __TBB_TypeWithAlignmentAtLeastAsStrict(T) expands -// to a type bigger than T. -// The default definition here works on machines where integers are naturally aligned and the -// strictest alignment is 16. -#ifndef __TBB_TypeWithAlignmentAtLeastAsStrict - -#if __GNUC__ || __SUNPRO_CC -struct __TBB_machine_type_with_strictest_alignment { - int member[4]; -} __attribute__((aligned(16))); -#elif _MSC_VER -__declspec(align(16)) struct __TBB_machine_type_with_strictest_alignment { - int member[4]; -}; -#else -#error Must define __TBB_TypeWithAlignmentAtLeastAsStrict(T) or __TBB_machine_type_with_strictest_alignment -#endif - -template struct type_with_alignment {__TBB_machine_type_with_strictest_alignment member;}; -template<> struct type_with_alignment<1> { char member; }; -template<> struct type_with_alignment<2> { uint16_t member; }; -template<> struct type_with_alignment<4> { uint32_t member; }; -template<> struct type_with_alignment<8> { uint64_t member; }; - -#if _MSC_VER||defined(__GNUC__)&&__GNUC__==3 && __GNUC_MINOR__<=2 -//! Work around for bug in GNU 3.2 and MSVC compilers. -/** Bug is that compiler sometimes returns 0 for __alignof(T) when T has not yet been instantiated. - The work-around forces instantiation by forcing computation of sizeof(T) before __alignof(T). */ -template -struct work_around_alignment_bug { -#if _MSC_VER - static const size_t alignment = __alignof(T); -#else - static const size_t alignment = __alignof__(T); -#endif -}; -#define __TBB_TypeWithAlignmentAtLeastAsStrict(T) tbb::internal::type_with_alignment::alignment> -#elif __GNUC__ || __SUNPRO_CC -#define __TBB_TypeWithAlignmentAtLeastAsStrict(T) tbb::internal::type_with_alignment<__alignof__(T)> -#else -#define __TBB_TypeWithAlignmentAtLeastAsStrict(T) __TBB_machine_type_with_strictest_alignment -#endif -#endif /* ____TBB_TypeWithAlignmentAtLeastAsStrict */ - -// Template class here is to avoid instantiation of the static data for modules that don't use it -template -struct reverse { - static const T byte_table[256]; -}; -// An efficient implementation of the reverse function utilizes a 2^8 lookup table holding the bit-reversed -// values of [0..2^8 - 1]. Those values can also be computed on the fly at a slightly higher cost. -template -const T reverse::byte_table[256] = { - 0x00, 0x80, 0x40, 0xC0, 0x20, 0xA0, 0x60, 0xE0, 0x10, 0x90, 0x50, 0xD0, 0x30, 0xB0, 0x70, 0xF0, - 0x08, 0x88, 0x48, 0xC8, 0x28, 0xA8, 0x68, 0xE8, 0x18, 0x98, 0x58, 0xD8, 0x38, 0xB8, 0x78, 0xF8, - 0x04, 0x84, 0x44, 0xC4, 0x24, 0xA4, 0x64, 0xE4, 0x14, 0x94, 0x54, 0xD4, 0x34, 0xB4, 0x74, 0xF4, - 0x0C, 0x8C, 0x4C, 0xCC, 0x2C, 0xAC, 0x6C, 0xEC, 0x1C, 0x9C, 0x5C, 0xDC, 0x3C, 0xBC, 0x7C, 0xFC, - 0x02, 0x82, 0x42, 0xC2, 0x22, 0xA2, 0x62, 0xE2, 0x12, 0x92, 0x52, 0xD2, 0x32, 0xB2, 0x72, 0xF2, - 0x0A, 0x8A, 0x4A, 0xCA, 0x2A, 0xAA, 0x6A, 0xEA, 0x1A, 0x9A, 0x5A, 0xDA, 0x3A, 0xBA, 0x7A, 0xFA, - 0x06, 0x86, 0x46, 0xC6, 0x26, 0xA6, 0x66, 0xE6, 0x16, 0x96, 0x56, 0xD6, 0x36, 0xB6, 0x76, 0xF6, - 0x0E, 0x8E, 0x4E, 0xCE, 0x2E, 0xAE, 0x6E, 0xEE, 0x1E, 0x9E, 0x5E, 0xDE, 0x3E, 0xBE, 0x7E, 0xFE, - 0x01, 0x81, 0x41, 0xC1, 0x21, 0xA1, 0x61, 0xE1, 0x11, 0x91, 0x51, 0xD1, 0x31, 0xB1, 0x71, 0xF1, - 0x09, 0x89, 0x49, 0xC9, 0x29, 0xA9, 0x69, 0xE9, 0x19, 0x99, 0x59, 0xD9, 0x39, 0xB9, 0x79, 0xF9, - 0x05, 0x85, 0x45, 0xC5, 0x25, 0xA5, 0x65, 0xE5, 0x15, 0x95, 0x55, 0xD5, 0x35, 0xB5, 0x75, 0xF5, - 0x0D, 0x8D, 0x4D, 0xCD, 0x2D, 0xAD, 0x6D, 0xED, 0x1D, 0x9D, 0x5D, 0xDD, 0x3D, 0xBD, 0x7D, 0xFD, - 0x03, 0x83, 0x43, 0xC3, 0x23, 0xA3, 0x63, 0xE3, 0x13, 0x93, 0x53, 0xD3, 0x33, 0xB3, 0x73, 0xF3, - 0x0B, 0x8B, 0x4B, 0xCB, 0x2B, 0xAB, 0x6B, 0xEB, 0x1B, 0x9B, 0x5B, 0xDB, 0x3B, 0xBB, 0x7B, 0xFB, - 0x07, 0x87, 0x47, 0xC7, 0x27, 0xA7, 0x67, 0xE7, 0x17, 0x97, 0x57, 0xD7, 0x37, 0xB7, 0x77, 0xF7, - 0x0F, 0x8F, 0x4F, 0xCF, 0x2F, 0xAF, 0x6F, 0xEF, 0x1F, 0x9F, 0x5F, 0xDF, 0x3F, 0xBF, 0x7F, 0xFF -}; - -} // namespace internal -} // namespace tbb - -#ifndef __TBB_CompareAndSwap1 -#define __TBB_CompareAndSwap1 tbb::internal::__TBB_CompareAndSwapGeneric<1,uint8_t> -#endif - -#ifndef __TBB_CompareAndSwap2 -#define __TBB_CompareAndSwap2 tbb::internal::__TBB_CompareAndSwapGeneric<2,uint16_t> -#endif - -#ifndef __TBB_CompareAndSwapW -#define __TBB_CompareAndSwapW tbb::internal::__TBB_CompareAndSwapGeneric -#endif - -#ifndef __TBB_FetchAndAdd1 -#define __TBB_FetchAndAdd1 tbb::internal::__TBB_FetchAndAddGeneric<1,uint8_t> -#endif - -#ifndef __TBB_FetchAndAdd2 -#define __TBB_FetchAndAdd2 tbb::internal::__TBB_FetchAndAddGeneric<2,uint16_t> -#endif - -#ifndef __TBB_FetchAndAdd4 -#define __TBB_FetchAndAdd4 tbb::internal::__TBB_FetchAndAddGeneric<4,uint32_t> -#endif - -#ifndef __TBB_FetchAndAdd8 -#define __TBB_FetchAndAdd8 tbb::internal::__TBB_FetchAndAddGeneric<8,uint64_t> -#endif - -#ifndef __TBB_FetchAndAddW -#define __TBB_FetchAndAddW tbb::internal::__TBB_FetchAndAddGeneric -#endif - -#ifndef __TBB_FetchAndStore1 -#define __TBB_FetchAndStore1 tbb::internal::__TBB_FetchAndStoreGeneric<1,uint8_t> -#endif - -#ifndef __TBB_FetchAndStore2 -#define __TBB_FetchAndStore2 tbb::internal::__TBB_FetchAndStoreGeneric<2,uint16_t> -#endif - -#ifndef __TBB_FetchAndStore4 -#define __TBB_FetchAndStore4 tbb::internal::__TBB_FetchAndStoreGeneric<4,uint32_t> -#endif - -#ifndef __TBB_FetchAndStore8 -#define __TBB_FetchAndStore8 tbb::internal::__TBB_FetchAndStoreGeneric<8,uint64_t> -#endif - -#ifndef __TBB_FetchAndStoreW -#define __TBB_FetchAndStoreW tbb::internal::__TBB_FetchAndStoreGeneric -#endif - -#if __TBB_DECL_FENCED_ATOMICS - -#ifndef __TBB_CompareAndSwap1__TBB_full_fence -#define __TBB_CompareAndSwap1__TBB_full_fence __TBB_CompareAndSwap1 -#endif -#ifndef __TBB_CompareAndSwap1acquire -#define __TBB_CompareAndSwap1acquire __TBB_CompareAndSwap1__TBB_full_fence -#endif -#ifndef __TBB_CompareAndSwap1release -#define __TBB_CompareAndSwap1release __TBB_CompareAndSwap1__TBB_full_fence -#endif - -#ifndef __TBB_CompareAndSwap2__TBB_full_fence -#define __TBB_CompareAndSwap2__TBB_full_fence __TBB_CompareAndSwap2 -#endif -#ifndef __TBB_CompareAndSwap2acquire -#define __TBB_CompareAndSwap2acquire __TBB_CompareAndSwap2__TBB_full_fence -#endif -#ifndef __TBB_CompareAndSwap2release -#define __TBB_CompareAndSwap2release __TBB_CompareAndSwap2__TBB_full_fence -#endif - -#ifndef __TBB_CompareAndSwap4__TBB_full_fence -#define __TBB_CompareAndSwap4__TBB_full_fence __TBB_CompareAndSwap4 -#endif -#ifndef __TBB_CompareAndSwap4acquire -#define __TBB_CompareAndSwap4acquire __TBB_CompareAndSwap4__TBB_full_fence -#endif -#ifndef __TBB_CompareAndSwap4release -#define __TBB_CompareAndSwap4release __TBB_CompareAndSwap4__TBB_full_fence -#endif - -#ifndef __TBB_CompareAndSwap8__TBB_full_fence -#define __TBB_CompareAndSwap8__TBB_full_fence __TBB_CompareAndSwap8 -#endif -#ifndef __TBB_CompareAndSwap8acquire -#define __TBB_CompareAndSwap8acquire __TBB_CompareAndSwap8__TBB_full_fence -#endif -#ifndef __TBB_CompareAndSwap8release -#define __TBB_CompareAndSwap8release __TBB_CompareAndSwap8__TBB_full_fence -#endif - -#ifndef __TBB_FetchAndAdd1__TBB_full_fence -#define __TBB_FetchAndAdd1__TBB_full_fence __TBB_FetchAndAdd1 -#endif -#ifndef __TBB_FetchAndAdd1acquire -#define __TBB_FetchAndAdd1acquire __TBB_FetchAndAdd1__TBB_full_fence -#endif -#ifndef __TBB_FetchAndAdd1release -#define __TBB_FetchAndAdd1release __TBB_FetchAndAdd1__TBB_full_fence -#endif - -#ifndef __TBB_FetchAndAdd2__TBB_full_fence -#define __TBB_FetchAndAdd2__TBB_full_fence __TBB_FetchAndAdd2 -#endif -#ifndef __TBB_FetchAndAdd2acquire -#define __TBB_FetchAndAdd2acquire __TBB_FetchAndAdd2__TBB_full_fence -#endif -#ifndef __TBB_FetchAndAdd2release -#define __TBB_FetchAndAdd2release __TBB_FetchAndAdd2__TBB_full_fence -#endif - -#ifndef __TBB_FetchAndAdd4__TBB_full_fence -#define __TBB_FetchAndAdd4__TBB_full_fence __TBB_FetchAndAdd4 -#endif -#ifndef __TBB_FetchAndAdd4acquire -#define __TBB_FetchAndAdd4acquire __TBB_FetchAndAdd4__TBB_full_fence -#endif -#ifndef __TBB_FetchAndAdd4release -#define __TBB_FetchAndAdd4release __TBB_FetchAndAdd4__TBB_full_fence -#endif - -#ifndef __TBB_FetchAndAdd8__TBB_full_fence -#define __TBB_FetchAndAdd8__TBB_full_fence __TBB_FetchAndAdd8 -#endif -#ifndef __TBB_FetchAndAdd8acquire -#define __TBB_FetchAndAdd8acquire __TBB_FetchAndAdd8__TBB_full_fence -#endif -#ifndef __TBB_FetchAndAdd8release -#define __TBB_FetchAndAdd8release __TBB_FetchAndAdd8__TBB_full_fence -#endif - -#ifndef __TBB_FetchAndStore1__TBB_full_fence -#define __TBB_FetchAndStore1__TBB_full_fence __TBB_FetchAndStore1 -#endif -#ifndef __TBB_FetchAndStore1acquire -#define __TBB_FetchAndStore1acquire __TBB_FetchAndStore1__TBB_full_fence -#endif -#ifndef __TBB_FetchAndStore1release -#define __TBB_FetchAndStore1release __TBB_FetchAndStore1__TBB_full_fence -#endif - -#ifndef __TBB_FetchAndStore2__TBB_full_fence -#define __TBB_FetchAndStore2__TBB_full_fence __TBB_FetchAndStore2 -#endif -#ifndef __TBB_FetchAndStore2acquire -#define __TBB_FetchAndStore2acquire __TBB_FetchAndStore2__TBB_full_fence -#endif -#ifndef __TBB_FetchAndStore2release -#define __TBB_FetchAndStore2release __TBB_FetchAndStore2__TBB_full_fence -#endif - -#ifndef __TBB_FetchAndStore4__TBB_full_fence -#define __TBB_FetchAndStore4__TBB_full_fence __TBB_FetchAndStore4 -#endif -#ifndef __TBB_FetchAndStore4acquire -#define __TBB_FetchAndStore4acquire __TBB_FetchAndStore4__TBB_full_fence -#endif -#ifndef __TBB_FetchAndStore4release -#define __TBB_FetchAndStore4release __TBB_FetchAndStore4__TBB_full_fence -#endif - -#ifndef __TBB_FetchAndStore8__TBB_full_fence -#define __TBB_FetchAndStore8__TBB_full_fence __TBB_FetchAndStore8 -#endif -#ifndef __TBB_FetchAndStore8acquire -#define __TBB_FetchAndStore8acquire __TBB_FetchAndStore8__TBB_full_fence -#endif -#ifndef __TBB_FetchAndStore8release -#define __TBB_FetchAndStore8release __TBB_FetchAndStore8__TBB_full_fence -#endif - -#endif // __TBB_DECL_FENCED_ATOMICS - -// Special atomic functions -#ifndef __TBB_FetchAndAddWrelease -#define __TBB_FetchAndAddWrelease __TBB_FetchAndAddW -#endif - -#ifndef __TBB_FetchAndIncrementWacquire -#define __TBB_FetchAndIncrementWacquire(P) __TBB_FetchAndAddW(P,1) -#endif - -#ifndef __TBB_FetchAndDecrementWrelease -#define __TBB_FetchAndDecrementWrelease(P) __TBB_FetchAndAddW(P,(-1)) -#endif - -#if __TBB_WORDSIZE==4 -// On 32-bit platforms, "atomic.h" requires definition of __TBB_Store8 and __TBB_Load8 -#ifndef __TBB_Store8 -inline void __TBB_Store8 (volatile void *ptr, int64_t value) { - tbb::internal::atomic_backoff b; - for(;;) { - int64_t result = *(int64_t *)ptr; - if( __TBB_CompareAndSwap8(ptr,value,result)==result ) break; - b.pause(); - } -} -#endif - -#ifndef __TBB_Load8 -inline int64_t __TBB_Load8 (const volatile void *ptr) { - int64_t result = *(int64_t *)ptr; - result = __TBB_CompareAndSwap8((volatile void *)ptr,result,result); - return result; -} -#endif -#endif /* __TBB_WORDSIZE==4 */ - -#ifndef __TBB_Log2 -inline intptr_t __TBB_Log2( uintptr_t x ) { - if( x==0 ) return -1; - intptr_t result = 0; - uintptr_t tmp; -#if __TBB_WORDSIZE>=8 - if( (tmp = x>>32) ) { x=tmp; result += 32; } -#endif - if( (tmp = x>>16) ) { x=tmp; result += 16; } - if( (tmp = x>>8) ) { x=tmp; result += 8; } - if( (tmp = x>>4) ) { x=tmp; result += 4; } - if( (tmp = x>>2) ) { x=tmp; result += 2; } - return (x&2)? result+1: result; -} -#endif - -#ifndef __TBB_AtomicOR -inline void __TBB_AtomicOR( volatile void *operand, uintptr_t addend ) { - tbb::internal::atomic_backoff b; - for(;;) { - uintptr_t tmp = *(volatile uintptr_t *)operand; - uintptr_t result = __TBB_CompareAndSwapW(operand, tmp|addend, tmp); - if( result==tmp ) break; - b.pause(); - } -} -#endif - -#ifndef __TBB_AtomicAND -inline void __TBB_AtomicAND( volatile void *operand, uintptr_t addend ) { - tbb::internal::atomic_backoff b; - for(;;) { - uintptr_t tmp = *(volatile uintptr_t *)operand; - uintptr_t result = __TBB_CompareAndSwapW(operand, tmp&addend, tmp); - if( result==tmp ) break; - b.pause(); - } -} -#endif - -#ifndef __TBB_TryLockByte -inline bool __TBB_TryLockByte( unsigned char &flag ) { - return __TBB_CompareAndSwap1(&flag,1,0)==0; -} -#endif - -#ifndef __TBB_LockByte -inline uintptr_t __TBB_LockByte( unsigned char& flag ) { - if ( !__TBB_TryLockByte(flag) ) { - tbb::internal::atomic_backoff b; - do { - b.pause(); - } while ( !__TBB_TryLockByte(flag) ); - } - return 0; -} -#endif - -#ifndef __TBB_ReverseByte -inline unsigned char __TBB_ReverseByte(unsigned char src) { - return tbb::internal::reverse::byte_table[src]; -} -#endif - -template -T __TBB_ReverseBits(T src) -{ - T dst; - unsigned char *original = (unsigned char *) &src; - unsigned char *reversed = (unsigned char *) &dst; - - for( int i = sizeof(T)-1; i >= 0; i-- ) - reversed[i] = __TBB_ReverseByte( original[sizeof(T)-i-1] ); - - return dst; -} - -#endif /* __TBB_machine_H */ diff --git a/tbb30_20100406oss/include/tbb/tbb_profiling.h b/tbb30_20100406oss/include/tbb/tbb_profiling.h deleted file mode 100644 index c3bbb5125..000000000 --- a/tbb30_20100406oss/include/tbb/tbb_profiling.h +++ /dev/null @@ -1,105 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_profiling_H -#define __TBB_profiling_H - -// Check if the tools support is enabled -#if (_WIN32||_WIN64||__linux__) && !__MINGW32__ && TBB_USE_THREADING_TOOLS - -#if _WIN32||_WIN64 -#include /* mbstowcs_s */ -#endif -#include "tbb_stddef.h" - -namespace tbb { - namespace internal { -#if _WIN32||_WIN64 - void __TBB_EXPORTED_FUNC itt_set_sync_name_v3( void *obj, const wchar_t* name ); - inline size_t multibyte_to_widechar( wchar_t* wcs, const char* mbs, size_t bufsize) { -#if _MSC_VER>=1400 - size_t len; - mbstowcs_s( &len, wcs, bufsize, mbs, _TRUNCATE ); - return len; // mbstowcs_s counts null terminator -#else - size_t len = mbstowcs( wcs, mbs, bufsize ); - if(wcs && len!=size_t(-1) ) - wcs[lenModules (groups of functionality) implemented by the library - * - Classes provided by the library - * - Files constituting the library. - * . - * Please note that significant part of TBB functionality is implemented in the form of - * template functions, descriptions of which are not accessible on the Classes - * tab. Use Modules or Namespace/Namespace Members - * tabs to find them. - * - * Additional pieces of information can be found here - * - \subpage concepts - * . - */ - -/** \page concepts TBB concepts - - A concept is a set of requirements to a type, which are necessary and sufficient - for the type to model a particular behavior or a set of behaviors. Some concepts - are specific to a particular algorithm (e.g. algorithm body), while other ones - are common to several algorithms (e.g. range concept). - - All TBB algorithms make use of different classes implementing various concepts. - Implementation classes are supplied by the user as type arguments of template - parameters and/or as objects passed as function call arguments. The library - provides predefined implementations of some concepts (e.g. several kinds of - \ref range_req "ranges"), while other ones must always be implemented by the user. - - TBB defines a set of minimal requirements each concept must conform to. Here is - the list of different concepts hyperlinked to the corresponding requirements specifications: - - \subpage range_req - - \subpage parallel_do_body_req - - \subpage parallel_for_body_req - - \subpage parallel_reduce_body_req - - \subpage parallel_scan_body_req - - \subpage parallel_sort_iter_req -**/ - -// Define preprocessor symbols used to determine architecture -#if _WIN32||_WIN64 -# if defined(_M_AMD64) -# define __TBB_x86_64 1 -# elif defined(_M_IA64) -# define __TBB_ipf 1 -# elif defined(_M_IX86)||defined(__i386__) // the latter for MinGW support -# define __TBB_x86_32 1 -# endif -#else /* Assume generic Unix */ -# if !__linux__ && !__APPLE__ -# define __TBB_generic_os 1 -# endif -# if __x86_64__ -# define __TBB_x86_64 1 -# elif __ia64__ -# define __TBB_ipf 1 -# elif __i386__||__i386 // __i386 is for Sun OS -# define __TBB_x86_32 1 -# else -# define __TBB_generic_arch 1 -# endif -#endif - -#if _MSC_VER -// define the parts of stdint.h that are needed, but put them inside tbb::internal -namespace tbb { -namespace internal { - typedef __int8 int8_t; - typedef __int16 int16_t; - typedef __int32 int32_t; - typedef __int64 int64_t; - typedef unsigned __int8 uint8_t; - typedef unsigned __int16 uint16_t; - typedef unsigned __int32 uint32_t; - typedef unsigned __int64 uint64_t; -} // namespace internal -} // namespace tbb -#else -#include -#endif /* _MSC_VER */ - -#if _MSC_VER >=1400 -#define __TBB_EXPORTED_FUNC __cdecl -#define __TBB_EXPORTED_METHOD __thiscall -#else -#define __TBB_EXPORTED_FUNC -#define __TBB_EXPORTED_METHOD -#endif - -#include /* Need size_t and ptrdiff_t (the latter on Windows only) from here. */ - -#if _MSC_VER -#define __TBB_tbb_windef_H -#include "_tbb_windef.h" -#undef __TBB_tbb_windef_H -#endif - -#include "tbb_config.h" - -//! The namespace tbb contains all components of the library. -namespace tbb { - //! Type for an assertion handler - typedef void(*assertion_handler_type)( const char* filename, int line, const char* expression, const char * comment ); - -#if TBB_USE_ASSERT - -//! Assert that x is true. -/** If x is false, print assertion failure message. - If the comment argument is not NULL, it is printed as part of the failure message. - The comment argument has no other effect. */ -#define __TBB_ASSERT(predicate,message) ((predicate)?((void)0):tbb::assertion_failure(__FILE__,__LINE__,#predicate,message)) -#define __TBB_ASSERT_EX __TBB_ASSERT - - //! Set assertion handler and return previous value of it. - assertion_handler_type __TBB_EXPORTED_FUNC set_assertion_handler( assertion_handler_type new_handler ); - - //! Process an assertion failure. - /** Normally called from __TBB_ASSERT macro. - If assertion handler is null, print message for assertion failure and abort. - Otherwise call the assertion handler. */ - void __TBB_EXPORTED_FUNC assertion_failure( const char* filename, int line, const char* expression, const char* comment ); - -#else - -//! No-op version of __TBB_ASSERT. -#define __TBB_ASSERT(predicate,comment) ((void)0) -//! "Extended" version is useful to suppress warnings if a variable is only used with an assert -#define __TBB_ASSERT_EX(predicate,comment) ((void)(1 && (predicate))) - -#endif /* TBB_USE_ASSERT */ - -//! The function returns the interface version of the TBB shared library being used. -/** - * The version it returns is determined at runtime, not at compile/link time. - * So it can be different than the value of TBB_INTERFACE_VERSION obtained at compile time. - */ -extern "C" int __TBB_EXPORTED_FUNC TBB_runtime_interface_version(); - -//! Dummy type that distinguishes splitting constructor from copy constructor. -/** - * See description of parallel_for and parallel_reduce for example usages. - * @ingroup algorithms - */ -class split { -}; - -/** - * @cond INTERNAL - * @brief Identifiers declared inside namespace internal should never be used directly by client code. - */ -namespace internal { - -using std::size_t; - -//! Compile-time constant that is upper bound on cache line/sector size. -/** It should be used only in situations where having a compile-time upper - bound is more useful than a run-time exact answer. - @ingroup memory_allocation */ -const size_t NFS_MaxLineSize = 128; - -template -struct padded_base : T { - char pad[NFS_MaxLineSize - sizeof(T) % NFS_MaxLineSize]; -}; -template struct padded_base : T {}; - -//! Pads type T to fill out to a multiple of cache line size. -template -struct padded : padded_base {}; - -//! Extended variant of the standard offsetof macro -/** The standard offsetof macro is not sufficient for TBB as it can be used for - POD-types only. The constant 0x1000 (not NULL) is necessary to appease GCC. **/ -#define __TBB_offsetof(class_name, member_name) \ - ((ptrdiff_t)&(reinterpret_cast(0x1000)->member_name) - 0x1000) - -//! Returns address of the object containing a member with the given name and address -#define __TBB_get_object_ref(class_name, member_name, member_addr) \ - (*reinterpret_cast((char*)member_addr - __TBB_offsetof(class_name, member_name))) - -//! Throws std::runtime_error with what() returning error_code description prefixed with aux_info -void __TBB_EXPORTED_FUNC handle_perror( int error_code, const char* aux_info ); - -#if TBB_USE_EXCEPTIONS - #define __TBB_TRY try - #define __TBB_CATCH(e) catch(e) - #define __TBB_THROW(e) throw e - #define __TBB_RETHROW() throw -#else /* !TBB_USE_EXCEPTIONS */ - inline bool __TBB_false() { return false; } - #define __TBB_TRY - #define __TBB_CATCH(e) if ( tbb::internal::__TBB_false() ) - #define __TBB_THROW(e) ((void)0) - #define __TBB_RETHROW() ((void)0) -#endif /* !TBB_USE_EXCEPTIONS */ - -//! Report a runtime warning. -void __TBB_EXPORTED_FUNC runtime_warning( const char* format, ... ); - -#if TBB_USE_ASSERT -//! Set p to invalid pointer value. -template -inline void poison_pointer( T* & p ) { - p = reinterpret_cast(-1); -} -#else -template -inline void poison_pointer( T* ) {/*do nothing*/} -#endif /* TBB_USE_ASSERT */ - -//! Base class for types that should not be assigned. -class no_assign { - // Deny assignment - void operator=( const no_assign& ); -public: -#if __GNUC__ - //! Explicitly define default construction, because otherwise gcc issues gratuitous warning. - no_assign() {} -#endif /* __GNUC__ */ -}; - -//! Base class for types that should not be copied or assigned. -class no_copy: no_assign { - //! Deny copy construction - no_copy( const no_copy& ); -public: - //! Allow default construction - no_copy() {} -}; - -//! Class for determining type of std::allocator::value_type. -template -struct allocator_type { - typedef T value_type; -}; - -#if _MSC_VER -//! Microsoft std::allocator has non-standard extension that strips const from a type. -template -struct allocator_type { - typedef T value_type; -}; -#endif - -// Struct to be used as a version tag for inline functions. -/** Version tag can be necessary to prevent loader on Linux from using the wrong - symbol in debug builds (when inline functions are compiled as out-of-line). **/ -struct version_tag_v3 {}; - -typedef version_tag_v3 version_tag; - -} // internal -//! @endcond - -} // tbb - -#endif /* RC_INVOKED */ -#endif /* __TBB_tbb_stddef_H */ diff --git a/tbb30_20100406oss/include/tbb/tbb_thread.h b/tbb30_20100406oss/include/tbb/tbb_thread.h deleted file mode 100644 index 0f878ff1f..000000000 --- a/tbb30_20100406oss/include/tbb/tbb_thread.h +++ /dev/null @@ -1,306 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_tbb_thread_H -#define __TBB_tbb_thread_H - -#if _WIN32||_WIN64 -#include -#define __TBB_NATIVE_THREAD_ROUTINE unsigned WINAPI -#define __TBB_NATIVE_THREAD_ROUTINE_PTR(r) unsigned (WINAPI* r)( void* ) -#else -#define __TBB_NATIVE_THREAD_ROUTINE void* -#define __TBB_NATIVE_THREAD_ROUTINE_PTR(r) void* (*r)( void* ) -#include -#endif // _WIN32||_WIN64 - -#include "tbb_stddef.h" -#include "tick_count.h" -#include // Need std::terminate from here. - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -namespace tbb { - -//! @cond INTERNAL -namespace internal { - - class tbb_thread_v3; - -} // namespace internal - -void swap( internal::tbb_thread_v3& t1, internal::tbb_thread_v3& t2 ); - -namespace internal { - - //! Allocate a closure - void* __TBB_EXPORTED_FUNC allocate_closure_v3( size_t size ); - //! Free a closure allocated by allocate_closure_v3 - void __TBB_EXPORTED_FUNC free_closure_v3( void* ); - - struct thread_closure_base { - void* operator new( size_t size ) {return allocate_closure_v3(size);} - void operator delete( void* ptr ) {free_closure_v3(ptr);} - }; - - template struct thread_closure_0: thread_closure_base { - F function; - - static __TBB_NATIVE_THREAD_ROUTINE start_routine( void* c ) { - thread_closure_0 *self = static_cast(c); - __TBB_TRY { - self->function(); - } __TBB_CATCH( ... ) { - std::terminate(); - } - delete self; - return 0; - } - thread_closure_0( const F& f ) : function(f) {} - }; - //! Structure used to pass user function with 1 argument to thread. - template struct thread_closure_1: thread_closure_base { - F function; - X arg1; - //! Routine passed to Windows's _beginthreadex by thread::internal_start() inside tbb.dll - static __TBB_NATIVE_THREAD_ROUTINE start_routine( void* c ) { - thread_closure_1 *self = static_cast(c); - __TBB_TRY { - self->function(self->arg1); - } __TBB_CATCH( ... ) { - std::terminate(); - } - delete self; - return 0; - } - thread_closure_1( const F& f, const X& x ) : function(f), arg1(x) {} - }; - template struct thread_closure_2: thread_closure_base { - F function; - X arg1; - Y arg2; - //! Routine passed to Windows's _beginthreadex by thread::internal_start() inside tbb.dll - static __TBB_NATIVE_THREAD_ROUTINE start_routine( void* c ) { - thread_closure_2 *self = static_cast(c); - __TBB_TRY { - self->function(self->arg1, self->arg2); - } __TBB_CATCH( ... ) { - std::terminate(); - } - delete self; - return 0; - } - thread_closure_2( const F& f, const X& x, const Y& y ) : function(f), arg1(x), arg2(y) {} - }; - - //! Versioned thread class. - class tbb_thread_v3 { - tbb_thread_v3(const tbb_thread_v3&); // = delete; // Deny access - public: -#if _WIN32||_WIN64 - typedef HANDLE native_handle_type; -#else - typedef pthread_t native_handle_type; -#endif // _WIN32||_WIN64 - - class id; - //! Constructs a thread object that does not represent a thread of execution. - tbb_thread_v3() : my_handle(0) -#if _WIN32||_WIN64 - , my_thread_id(0) -#endif // _WIN32||_WIN64 - {} - - //! Constructs an object and executes f() in a new thread - template explicit tbb_thread_v3(F f) { - typedef internal::thread_closure_0 closure_type; - internal_start(closure_type::start_routine, new closure_type(f)); - } - //! Constructs an object and executes f(x) in a new thread - template tbb_thread_v3(F f, X x) { - typedef internal::thread_closure_1 closure_type; - internal_start(closure_type::start_routine, new closure_type(f,x)); - } - //! Constructs an object and executes f(x,y) in a new thread - template tbb_thread_v3(F f, X x, Y y) { - typedef internal::thread_closure_2 closure_type; - internal_start(closure_type::start_routine, new closure_type(f,x,y)); - } - - tbb_thread_v3& operator=(tbb_thread_v3& x) { - if (joinable()) detach(); - my_handle = x.my_handle; - x.my_handle = 0; -#if _WIN32||_WIN64 - my_thread_id = x.my_thread_id; - x.my_thread_id = 0; -#endif // _WIN32||_WIN64 - return *this; - } - void swap( tbb_thread_v3& t ) {tbb::swap( *this, t );} - bool joinable() const {return my_handle!=0; } - //! The completion of the thread represented by *this happens before join() returns. - void __TBB_EXPORTED_METHOD join(); - //! When detach() returns, *this no longer represents the possibly continuing thread of execution. - void __TBB_EXPORTED_METHOD detach(); - ~tbb_thread_v3() {if( joinable() ) detach();} - inline id get_id() const; - native_handle_type native_handle() { return my_handle; } - - //! The number of hardware thread contexts. - static unsigned __TBB_EXPORTED_FUNC hardware_concurrency(); - private: - native_handle_type my_handle; -#if _WIN32||_WIN64 - DWORD my_thread_id; -#endif // _WIN32||_WIN64 - - /** Runs start_routine(closure) on another thread and sets my_handle to the handle of the created thread. */ - void __TBB_EXPORTED_METHOD internal_start( __TBB_NATIVE_THREAD_ROUTINE_PTR(start_routine), - void* closure ); - friend void __TBB_EXPORTED_FUNC move_v3( tbb_thread_v3& t1, tbb_thread_v3& t2 ); - friend void tbb::swap( tbb_thread_v3& t1, tbb_thread_v3& t2 ); - }; - - class tbb_thread_v3::id { -#if _WIN32||_WIN64 - DWORD my_id; - id( DWORD id_ ) : my_id(id_) {} -#else - pthread_t my_id; - id( pthread_t id_ ) : my_id(id_) {} -#endif // _WIN32||_WIN64 - friend class tbb_thread_v3; - public: - id() : my_id(0) {} - - friend bool operator==( tbb_thread_v3::id x, tbb_thread_v3::id y ); - friend bool operator!=( tbb_thread_v3::id x, tbb_thread_v3::id y ); - friend bool operator<( tbb_thread_v3::id x, tbb_thread_v3::id y ); - friend bool operator<=( tbb_thread_v3::id x, tbb_thread_v3::id y ); - friend bool operator>( tbb_thread_v3::id x, tbb_thread_v3::id y ); - friend bool operator>=( tbb_thread_v3::id x, tbb_thread_v3::id y ); - - template - friend std::basic_ostream& - operator<< (std::basic_ostream &out, - tbb_thread_v3::id id) - { - out << id.my_id; - return out; - } - friend tbb_thread_v3::id __TBB_EXPORTED_FUNC thread_get_id_v3(); - }; // tbb_thread_v3::id - - tbb_thread_v3::id tbb_thread_v3::get_id() const { -#if _WIN32||_WIN64 - return id(my_thread_id); -#else - return id(my_handle); -#endif // _WIN32||_WIN64 - } - void __TBB_EXPORTED_FUNC move_v3( tbb_thread_v3& t1, tbb_thread_v3& t2 ); - tbb_thread_v3::id __TBB_EXPORTED_FUNC thread_get_id_v3(); - void __TBB_EXPORTED_FUNC thread_yield_v3(); - void __TBB_EXPORTED_FUNC thread_sleep_v3(const tick_count::interval_t &i); - - inline bool operator==(tbb_thread_v3::id x, tbb_thread_v3::id y) - { - return x.my_id == y.my_id; - } - inline bool operator!=(tbb_thread_v3::id x, tbb_thread_v3::id y) - { - return x.my_id != y.my_id; - } - inline bool operator<(tbb_thread_v3::id x, tbb_thread_v3::id y) - { - return x.my_id < y.my_id; - } - inline bool operator<=(tbb_thread_v3::id x, tbb_thread_v3::id y) - { - return x.my_id <= y.my_id; - } - inline bool operator>(tbb_thread_v3::id x, tbb_thread_v3::id y) - { - return x.my_id > y.my_id; - } - inline bool operator>=(tbb_thread_v3::id x, tbb_thread_v3::id y) - { - return x.my_id >= y.my_id; - } - -} // namespace internal; - -//! Users reference thread class by name tbb_thread -typedef internal::tbb_thread_v3 tbb_thread; - -using internal::operator==; -using internal::operator!=; -using internal::operator<; -using internal::operator>; -using internal::operator<=; -using internal::operator>=; - -inline void move( tbb_thread& t1, tbb_thread& t2 ) { - internal::move_v3(t1, t2); -} - -inline void swap( internal::tbb_thread_v3& t1, internal::tbb_thread_v3& t2 ) { - tbb::tbb_thread::native_handle_type h = t1.my_handle; - t1.my_handle = t2.my_handle; - t2.my_handle = h; -#if _WIN32||_WIN64 - DWORD i = t1.my_thread_id; - t1.my_thread_id = t2.my_thread_id; - t2.my_thread_id = i; -#endif /* _WIN32||_WIN64 */ -} - -namespace this_tbb_thread { - inline tbb_thread::id get_id() { return internal::thread_get_id_v3(); } - //! Offers the operating system the opportunity to schedule another thread. - inline void yield() { internal::thread_yield_v3(); } - //! The current thread blocks at least until the time specified. - inline void sleep(const tick_count::interval_t &i) { - internal::thread_sleep_v3(i); - } -} // namespace this_tbb_thread - -} // namespace tbb - -#endif /* __TBB_tbb_thread_H */ diff --git a/tbb30_20100406oss/include/tbb/tbbmalloc_proxy.h b/tbb30_20100406oss/include/tbb/tbbmalloc_proxy.h deleted file mode 100644 index f15ca12e1..000000000 --- a/tbb30_20100406oss/include/tbb/tbbmalloc_proxy.h +++ /dev/null @@ -1,74 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -/* -Replacing the standard memory allocation routines in Microsoft* C/C++ RTL -(malloc/free, global new/delete, etc.) with the TBB memory allocator. - -Include the following header to a source of any binary which is loaded during -application startup - -#include "tbb/tbbmalloc_proxy.h" - -or add following parameters to the linker options for the binary which is -loaded during application startup. It can be either exe-file or dll. - -For win32 -tbbmalloc_proxy.lib /INCLUDE:"___TBB_malloc_proxy" -win64 -tbbmalloc_proxy.lib /INCLUDE:"__TBB_malloc_proxy" -*/ - -#ifndef __TBB_tbbmalloc_proxy_H -#define __TBB_tbbmalloc_proxy_H - -#if _MSC_VER - -#ifdef _DEBUG - #pragma comment(lib, "tbbmalloc_proxy_debug.lib") -#else - #pragma comment(lib, "tbbmalloc_proxy.lib") -#endif - -#if defined(_WIN64) - #pragma comment(linker, "/include:__TBB_malloc_proxy") -#else - #pragma comment(linker, "/include:___TBB_malloc_proxy") -#endif - -#else -/* Primarily to support MinGW */ - -extern "C" void __TBB_malloc_proxy(); -struct __TBB_malloc_proxy_caller { - __TBB_malloc_proxy_caller() { __TBB_malloc_proxy(); } -} volatile __TBB_malloc_proxy_helper_object; - -#endif // _MSC_VER - -#endif //__TBB_tbbmalloc_proxy_H diff --git a/tbb30_20100406oss/include/tbb/tick_count.h b/tbb30_20100406oss/include/tbb/tick_count.h deleted file mode 100644 index a3d25d5a4..000000000 --- a/tbb30_20100406oss/include/tbb/tick_count.h +++ /dev/null @@ -1,155 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_tick_count_H -#define __TBB_tick_count_H - -#include "tbb_stddef.h" - -#if _WIN32||_WIN64 -#include -#elif __linux__ -#include -#else /* generic Unix */ -#include -#endif /* (choice of OS) */ - -namespace tbb { - -//! Absolute timestamp -/** @ingroup timing */ -class tick_count { -public: - //! Relative time interval. - class interval_t { - long long value; - explicit interval_t( long long value_ ) : value(value_) {} - public: - //! Construct a time interval representing zero time duration - interval_t() : value(0) {}; - - //! Construct a time interval representing sec seconds time duration - explicit interval_t( double sec ); - - //! Return the length of a time interval in seconds - double seconds() const; - - friend class tbb::tick_count; - - //! Extract the intervals from the tick_counts and subtract them. - friend interval_t operator-( const tick_count& t1, const tick_count& t0 ); - - //! Add two intervals. - friend interval_t operator+( const interval_t& i, const interval_t& j ) { - return interval_t(i.value+j.value); - } - - //! Subtract two intervals. - friend interval_t operator-( const interval_t& i, const interval_t& j ) { - return interval_t(i.value-j.value); - } - - //! Accumulation operator - interval_t& operator+=( const interval_t& i ) {value += i.value; return *this;} - - //! Subtraction operator - interval_t& operator-=( const interval_t& i ) {value -= i.value; return *this;} - }; - - //! Construct an absolute timestamp initialized to zero. - tick_count() : my_count(0) {}; - - //! Return current time. - static tick_count now(); - - //! Subtract two timestamps to get the time interval between - friend interval_t operator-( const tick_count& t1, const tick_count& t0 ); - -private: - long long my_count; -}; - -inline tick_count tick_count::now() { - tick_count result; -#if _WIN32||_WIN64 - LARGE_INTEGER qpcnt; - QueryPerformanceCounter(&qpcnt); - result.my_count = qpcnt.QuadPart; -#elif __linux__ - struct timespec ts; -#if TBB_USE_ASSERT - int status = -#endif /* TBB_USE_ASSERT */ - clock_gettime( CLOCK_REALTIME, &ts ); - __TBB_ASSERT( status==0, "CLOCK_REALTIME not supported" ); - result.my_count = static_cast(1000000000UL)*static_cast(ts.tv_sec) + static_cast(ts.tv_nsec); -#else /* generic Unix */ - struct timeval tv; -#if TBB_USE_ASSERT - int status = -#endif /* TBB_USE_ASSERT */ - gettimeofday(&tv, NULL); - __TBB_ASSERT( status==0, "gettimeofday failed" ); - result.my_count = static_cast(1000000)*static_cast(tv.tv_sec) + static_cast(tv.tv_usec); -#endif /*(choice of OS) */ - return result; -} - -inline tick_count::interval_t::interval_t( double sec ) -{ -#if _WIN32||_WIN64 - LARGE_INTEGER qpfreq; - QueryPerformanceFrequency(&qpfreq); - value = static_cast(sec*qpfreq.QuadPart); -#elif __linux__ - value = static_cast(sec*1E9); -#else /* generic Unix */ - value = static_cast(sec*1E6); -#endif /* (choice of OS) */ -} - -inline tick_count::interval_t operator-( const tick_count& t1, const tick_count& t0 ) { - return tick_count::interval_t( t1.my_count-t0.my_count ); -} - -inline double tick_count::interval_t::seconds() const { -#if _WIN32||_WIN64 - LARGE_INTEGER qpfreq; - QueryPerformanceFrequency(&qpfreq); - return value/(double)qpfreq.QuadPart; -#elif __linux__ - return value*1E-9; -#else /* generic Unix */ - return value*1E-6; -#endif /* (choice of OS) */ -} - -} // namespace tbb - -#endif /* __TBB_tick_count_H */ - diff --git a/tbb30_20100406oss/lib/ia32/vc10/irml/irml.lib b/tbb30_20100406oss/lib/ia32/vc10/irml/irml.lib deleted file mode 100644 index f5e7a8f26..000000000 Binary files a/tbb30_20100406oss/lib/ia32/vc10/irml/irml.lib and /dev/null differ diff --git a/tbb30_20100406oss/lib/ia32/vc10/irml/irml_debug.lib b/tbb30_20100406oss/lib/ia32/vc10/irml/irml_debug.lib deleted file mode 100644 index 093dee374..000000000 Binary files a/tbb30_20100406oss/lib/ia32/vc10/irml/irml_debug.lib and /dev/null differ diff --git a/tbb30_20100406oss/lib/ia32/vc10/irml_c/irml.lib b/tbb30_20100406oss/lib/ia32/vc10/irml_c/irml.lib deleted file mode 100644 index d84350400..000000000 Binary files a/tbb30_20100406oss/lib/ia32/vc10/irml_c/irml.lib and /dev/null differ diff --git a/tbb30_20100406oss/lib/ia32/vc10/irml_c/irml_debug.lib b/tbb30_20100406oss/lib/ia32/vc10/irml_c/irml_debug.lib deleted file mode 100644 index 848f0b313..000000000 Binary files a/tbb30_20100406oss/lib/ia32/vc10/irml_c/irml_debug.lib and /dev/null differ diff --git a/tbb30_20100406oss/lib/ia32/vc10/tbb.lib b/tbb30_20100406oss/lib/ia32/vc10/tbb.lib deleted file mode 100644 index 07e13c0f3..000000000 Binary files a/tbb30_20100406oss/lib/ia32/vc10/tbb.lib and /dev/null differ diff --git a/tbb30_20100406oss/lib/ia32/vc10/tbb_debug.lib b/tbb30_20100406oss/lib/ia32/vc10/tbb_debug.lib deleted file mode 100644 index 82d7eb45a..000000000 Binary files a/tbb30_20100406oss/lib/ia32/vc10/tbb_debug.lib and /dev/null differ diff --git a/tbb30_20100406oss/lib/ia32/vc10/tbbmalloc.lib b/tbb30_20100406oss/lib/ia32/vc10/tbbmalloc.lib deleted file mode 100644 index 8ef126946..000000000 Binary files a/tbb30_20100406oss/lib/ia32/vc10/tbbmalloc.lib and /dev/null differ diff --git a/tbb30_20100406oss/lib/ia32/vc10/tbbmalloc_debug.lib b/tbb30_20100406oss/lib/ia32/vc10/tbbmalloc_debug.lib deleted file mode 100644 index 96ba0e4a6..000000000 Binary files a/tbb30_20100406oss/lib/ia32/vc10/tbbmalloc_debug.lib and /dev/null differ diff --git a/tbb30_20100406oss/lib/ia32/vc10/tbbmalloc_proxy.lib b/tbb30_20100406oss/lib/ia32/vc10/tbbmalloc_proxy.lib deleted file mode 100644 index 7a153f0d7..000000000 Binary files a/tbb30_20100406oss/lib/ia32/vc10/tbbmalloc_proxy.lib and /dev/null differ diff --git a/tbb30_20100406oss/lib/ia32/vc10/tbbmalloc_proxy_debug.lib b/tbb30_20100406oss/lib/ia32/vc10/tbbmalloc_proxy_debug.lib deleted file mode 100644 index 5487ddaa9..000000000 Binary files a/tbb30_20100406oss/lib/ia32/vc10/tbbmalloc_proxy_debug.lib and /dev/null differ