/*** * Copyright (C) Microsoft. All rights reserved. * Licensed under the MIT license. See LICENSE.txt file in the project root for full license information. * * =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+ * * PPL interfaces * * For the latest on this and related APIs, please see: https://github.com/Microsoft/cpprestsdk * * =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- ****/ #pragma once #ifndef _PPLXINTERFACE_H #define _PPLXINTERFACE_H #if (defined(_MSC_VER) && (_MSC_VER >= 1800)) && !CPPREST_FORCE_PPLX #error This file must not be included for Visual Studio 12 or later #endif #if defined(_CRTBLD) #elif defined(_WIN32) #if (_MSC_VER >= 1700) #define _USE_REAL_ATOMICS #endif #else // GCC compiler #define _USE_REAL_ATOMICS #endif #include #ifdef _USE_REAL_ATOMICS #include #endif #define _pplx_cdecl __cdecl namespace pplx { /// /// An elementary abstraction for a task, defined as void (__cdecl * TaskProc_t)(void *). A TaskProc /// is called to invoke the body of a task. /// /**/ typedef void(_pplx_cdecl* TaskProc_t)(void*); /// /// Scheduler Interface /// struct __declspec(novtable) scheduler_interface { virtual void schedule(TaskProc_t, _In_ void*) = 0; }; /// /// Represents a pointer to a scheduler. This class exists to allow the /// the specification of a shared lifetime by using shared_ptr or just /// a plain reference by using raw pointer. /// struct scheduler_ptr { /// /// Creates a scheduler pointer from shared_ptr to scheduler /// explicit scheduler_ptr(std::shared_ptr scheduler) : m_sharedScheduler(std::move(scheduler)) { m_scheduler = m_sharedScheduler.get(); } /// /// Creates a scheduler pointer from raw pointer to scheduler /// explicit scheduler_ptr(_In_opt_ scheduler_interface* pScheduler) : m_scheduler(pScheduler) {} /// /// Behave like a pointer /// scheduler_interface* operator->() const { return get(); } /// /// Returns the raw pointer to the scheduler /// scheduler_interface* get() const { return m_scheduler; } /// /// Test whether the scheduler pointer is non-null /// operator bool() const { return get() != nullptr; } private: std::shared_ptr m_sharedScheduler; scheduler_interface* m_scheduler; }; /// /// Describes the execution status of a task_group or structured_task_group object. A value of this /// type is returned by numerous methods that wait on tasks scheduled to a task group to complete. /// /// /// /// /// /// /// /**/ enum task_group_status { /// /// The tasks queued to the task_group object have not completed. Note that this value is not presently /// returned by the Concurrency Runtime. /// /**/ not_complete, /// /// The tasks queued to the task_group or structured_task_group object completed successfully. /// /**/ completed, /// /// The task_group or structured_task_group object was canceled. One or more tasks may not have /// executed. /// /**/ canceled }; namespace details { /// /// Atomics /// #ifdef _USE_REAL_ATOMICS typedef std::atomic atomic_long; typedef std::atomic atomic_size_t; template _T atomic_compare_exchange(std::atomic<_T>& _Target, _T _Exchange, _T _Comparand) { _T _Result = _Comparand; _Target.compare_exchange_strong(_Result, _Exchange); return _Result; } template _T atomic_exchange(std::atomic<_T>& _Target, _T _Value) { return _Target.exchange(_Value); } template _T atomic_increment(std::atomic<_T>& _Target) { return _Target.fetch_add(1) + 1; } template _T atomic_decrement(std::atomic<_T>& _Target) { return _Target.fetch_sub(1) - 1; } template _T atomic_add(std::atomic<_T>& _Target, _T value) { return _Target.fetch_add(value) + value; } #else // not _USE_REAL_ATOMICS typedef long volatile atomic_long; typedef size_t volatile atomic_size_t; template inline T atomic_exchange(T volatile& _Target, T _Value) { return _InterlockedExchange(&_Target, _Value); } inline long atomic_increment(long volatile& _Target) { return _InterlockedIncrement(&_Target); } inline long atomic_add(long volatile& _Target, long value) { return _InterlockedExchangeAdd(&_Target, value) + value; } inline size_t atomic_increment(size_t volatile& _Target) { #if (defined(_M_IX86) || defined(_M_ARM)) return static_cast(_InterlockedIncrement(reinterpret_cast(&_Target))); #else return static_cast(_InterlockedIncrement64(reinterpret_cast<__int64 volatile*>(&_Target))); #endif } inline long atomic_decrement(long volatile& _Target) { return _InterlockedDecrement(&_Target); } inline size_t atomic_decrement(size_t volatile& _Target) { #if (defined(_M_IX86) || defined(_M_ARM)) return static_cast(_InterlockedDecrement(reinterpret_cast(&_Target))); #else return static_cast(_InterlockedDecrement64(reinterpret_cast<__int64 volatile*>(&_Target))); #endif } inline long atomic_compare_exchange(long volatile& _Target, long _Exchange, long _Comparand) { return _InterlockedCompareExchange(&_Target, _Exchange, _Comparand); } inline size_t atomic_compare_exchange(size_t volatile& _Target, size_t _Exchange, size_t _Comparand) { #if (defined(_M_IX86) || defined(_M_ARM)) return static_cast(_InterlockedCompareExchange( reinterpret_cast(_Target), static_cast(_Exchange), static_cast(_Comparand))); #else return static_cast(_InterlockedCompareExchange64(reinterpret_cast<__int64 volatile*>(_Target), static_cast<__int64>(_Exchange), static_cast<__int64>(_Comparand))); #endif } #endif // _USE_REAL_ATOMICS } // namespace details } // namespace pplx #endif // _PPLXINTERFACE_H