1
0
mirror of https://github.com/VCMP-SqMod/SqMod.git synced 2025-07-16 22:07:12 +02:00

Initial preparations for CURL and Discord integration.

This commit is contained in:
Sandu Liviu Catalin
2021-01-27 07:27:48 +02:00
parent 8257eb61d6
commit 95705e87c8
1751 changed files with 440547 additions and 854 deletions

View File

@ -0,0 +1,118 @@
//
// detail/impl/buffer_sequence_adapter.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_BUFFER_SEQUENCE_ADAPTER_IPP
#define ASIO_DETAIL_IMPL_BUFFER_SEQUENCE_ADAPTER_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_WINDOWS_RUNTIME)
#include <robuffer.h>
#include <windows.storage.streams.h>
#include <wrl/implements.h>
#include "asio/detail/buffer_sequence_adapter.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
class winrt_buffer_impl :
public Microsoft::WRL::RuntimeClass<
Microsoft::WRL::RuntimeClassFlags<
Microsoft::WRL::RuntimeClassType::WinRtClassicComMix>,
ABI::Windows::Storage::Streams::IBuffer,
Windows::Storage::Streams::IBufferByteAccess>
{
public:
explicit winrt_buffer_impl(const asio::const_buffer& b)
{
bytes_ = const_cast<byte*>(static_cast<const byte*>(b.data()));
length_ = b.size();
capacity_ = b.size();
}
explicit winrt_buffer_impl(const asio::mutable_buffer& b)
{
bytes_ = static_cast<byte*>(b.data());
length_ = 0;
capacity_ = b.size();
}
~winrt_buffer_impl()
{
}
STDMETHODIMP Buffer(byte** value)
{
*value = bytes_;
return S_OK;
}
STDMETHODIMP get_Capacity(UINT32* value)
{
*value = capacity_;
return S_OK;
}
STDMETHODIMP get_Length(UINT32 *value)
{
*value = length_;
return S_OK;
}
STDMETHODIMP put_Length(UINT32 value)
{
if (value > capacity_)
return E_INVALIDARG;
length_ = value;
return S_OK;
}
private:
byte* bytes_;
UINT32 length_;
UINT32 capacity_;
};
void buffer_sequence_adapter_base::init_native_buffer(
buffer_sequence_adapter_base::native_buffer_type& buf,
const asio::mutable_buffer& buffer)
{
std::memset(&buf, 0, sizeof(native_buffer_type));
Microsoft::WRL::ComPtr<IInspectable> insp
= Microsoft::WRL::Make<winrt_buffer_impl>(buffer);
buf = reinterpret_cast<Windows::Storage::Streams::IBuffer^>(insp.Get());
}
void buffer_sequence_adapter_base::init_native_buffer(
buffer_sequence_adapter_base::native_buffer_type& buf,
const asio::const_buffer& buffer)
{
std::memset(&buf, 0, sizeof(native_buffer_type));
Microsoft::WRL::ComPtr<IInspectable> insp
= Microsoft::WRL::Make<winrt_buffer_impl>(buffer);
Platform::Object^ buf_obj = reinterpret_cast<Platform::Object^>(insp.Get());
buf = reinterpret_cast<Windows::Storage::Streams::IBuffer^>(insp.Get());
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_WINDOWS_RUNTIME)
#endif // ASIO_DETAIL_IMPL_BUFFER_SEQUENCE_ADAPTER_IPP

View File

@ -0,0 +1,608 @@
//
// detail/impl/descriptor_ops.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_DESCRIPTOR_OPS_IPP
#define ASIO_DETAIL_IMPL_DESCRIPTOR_OPS_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <cerrno>
#include "asio/detail/descriptor_ops.hpp"
#include "asio/error.hpp"
#if !defined(ASIO_WINDOWS) \
&& !defined(ASIO_WINDOWS_RUNTIME) \
&& !defined(__CYGWIN__)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
namespace descriptor_ops {
int open(const char* path, int flags, asio::error_code& ec)
{
int result = ::open(path, flags);
get_last_error(ec, result < 0);
return result;
}
int close(int d, state_type& state, asio::error_code& ec)
{
int result = 0;
if (d != -1)
{
result = ::close(d);
get_last_error(ec, result < 0);
if (result != 0
&& (ec == asio::error::would_block
|| ec == asio::error::try_again))
{
// According to UNIX Network Programming Vol. 1, it is possible for
// close() to fail with EWOULDBLOCK under certain circumstances. What
// isn't clear is the state of the descriptor after this error. The one
// current OS where this behaviour is seen, Windows, says that the socket
// remains open. Therefore we'll put the descriptor back into blocking
// mode and have another attempt at closing it.
#if defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
int flags = ::fcntl(d, F_GETFL, 0);
if (flags >= 0)
::fcntl(d, F_SETFL, flags & ~O_NONBLOCK);
#else // defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
ioctl_arg_type arg = 0;
::ioctl(d, FIONBIO, &arg);
#endif // defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
state &= ~non_blocking;
result = ::close(d);
get_last_error(ec, result < 0);
}
}
return result;
}
bool set_user_non_blocking(int d, state_type& state,
bool value, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return false;
}
#if defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
int result = ::fcntl(d, F_GETFL, 0);
get_last_error(ec, result < 0);
if (result >= 0)
{
int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK));
result = ::fcntl(d, F_SETFL, flag);
get_last_error(ec, result < 0);
}
#else // defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
ioctl_arg_type arg = (value ? 1 : 0);
int result = ::ioctl(d, FIONBIO, &arg);
get_last_error(ec, result < 0);
#endif // defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
if (result >= 0)
{
if (value)
state |= user_set_non_blocking;
else
{
// Clearing the user-set non-blocking mode always overrides any
// internally-set non-blocking flag. Any subsequent asynchronous
// operations will need to re-enable non-blocking I/O.
state &= ~(user_set_non_blocking | internal_non_blocking);
}
return true;
}
return false;
}
bool set_internal_non_blocking(int d, state_type& state,
bool value, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return false;
}
if (!value && (state & user_set_non_blocking))
{
// It does not make sense to clear the internal non-blocking flag if the
// user still wants non-blocking behaviour. Return an error and let the
// caller figure out whether to update the user-set non-blocking flag.
ec = asio::error::invalid_argument;
return false;
}
#if defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
int result = ::fcntl(d, F_GETFL, 0);
get_last_error(ec, result < 0);
if (result >= 0)
{
int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK));
result = ::fcntl(d, F_SETFL, flag);
get_last_error(ec, result < 0);
}
#else // defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
ioctl_arg_type arg = (value ? 1 : 0);
int result = ::ioctl(d, FIONBIO, &arg);
get_last_error(ec, result < 0);
#endif // defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
if (result >= 0)
{
if (value)
state |= internal_non_blocking;
else
state &= ~internal_non_blocking;
return true;
}
return false;
}
std::size_t sync_read(int d, state_type state, buf* bufs,
std::size_t count, bool all_empty, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return 0;
}
// A request to read 0 bytes on a stream is a no-op.
if (all_empty)
{
ec.assign(0, ec.category());
return 0;
}
// Read some data.
for (;;)
{
// Try to complete the operation without blocking.
signed_size_type bytes = ::readv(d, bufs, static_cast<int>(count));
get_last_error(ec, bytes < 0);
// Check if operation succeeded.
if (bytes > 0)
return bytes;
// Check for EOF.
if (bytes == 0)
{
ec = asio::error::eof;
return 0;
}
// Operation failed.
if ((state & user_set_non_blocking)
|| (ec != asio::error::would_block
&& ec != asio::error::try_again))
return 0;
// Wait for descriptor to become ready.
if (descriptor_ops::poll_read(d, 0, ec) < 0)
return 0;
}
}
std::size_t sync_read1(int d, state_type state, void* data,
std::size_t size, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return 0;
}
// A request to read 0 bytes on a stream is a no-op.
if (size == 0)
{
ec.assign(0, ec.category());
return 0;
}
// Read some data.
for (;;)
{
// Try to complete the operation without blocking.
signed_size_type bytes = ::read(d, data, size);
get_last_error(ec, bytes < 0);
// Check if operation succeeded.
if (bytes > 0)
return bytes;
// Check for EOF.
if (bytes == 0)
{
ec = asio::error::eof;
return 0;
}
// Operation failed.
if ((state & user_set_non_blocking)
|| (ec != asio::error::would_block
&& ec != asio::error::try_again))
return 0;
// Wait for descriptor to become ready.
if (descriptor_ops::poll_read(d, 0, ec) < 0)
return 0;
}
}
bool non_blocking_read(int d, buf* bufs, std::size_t count,
asio::error_code& ec, std::size_t& bytes_transferred)
{
for (;;)
{
// Read some data.
signed_size_type bytes = ::readv(d, bufs, static_cast<int>(count));
get_last_error(ec, bytes < 0);
// Check for end of stream.
if (bytes == 0)
{
ec = asio::error::eof;
return true;
}
// Check if operation succeeded.
if (bytes > 0)
{
bytes_transferred = bytes;
return true;
}
// Retry operation if interrupted by signal.
if (ec == asio::error::interrupted)
continue;
// Check if we need to run the operation again.
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
return false;
// Operation failed.
bytes_transferred = 0;
return true;
}
}
bool non_blocking_read1(int d, void* data, std::size_t size,
asio::error_code& ec, std::size_t& bytes_transferred)
{
for (;;)
{
// Read some data.
signed_size_type bytes = ::read(d, data, size);
get_last_error(ec, bytes < 0);
// Check for end of stream.
if (bytes == 0)
{
ec = asio::error::eof;
return true;
}
// Check if operation succeeded.
if (bytes > 0)
{
bytes_transferred = bytes;
return true;
}
// Retry operation if interrupted by signal.
if (ec == asio::error::interrupted)
continue;
// Check if we need to run the operation again.
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
return false;
// Operation failed.
bytes_transferred = 0;
return true;
}
}
std::size_t sync_write(int d, state_type state, const buf* bufs,
std::size_t count, bool all_empty, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return 0;
}
// A request to write 0 bytes on a stream is a no-op.
if (all_empty)
{
ec.assign(0, ec.category());
return 0;
}
// Write some data.
for (;;)
{
// Try to complete the operation without blocking.
signed_size_type bytes = ::writev(d, bufs, static_cast<int>(count));
get_last_error(ec, bytes < 0);
// Check if operation succeeded.
if (bytes > 0)
return bytes;
// Operation failed.
if ((state & user_set_non_blocking)
|| (ec != asio::error::would_block
&& ec != asio::error::try_again))
return 0;
// Wait for descriptor to become ready.
if (descriptor_ops::poll_write(d, 0, ec) < 0)
return 0;
}
}
std::size_t sync_write1(int d, state_type state, const void* data,
std::size_t size, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return 0;
}
// A request to write 0 bytes on a stream is a no-op.
if (size == 0)
{
ec.assign(0, ec.category());
return 0;
}
// Write some data.
for (;;)
{
// Try to complete the operation without blocking.
signed_size_type bytes = ::write(d, data, size);
get_last_error(ec, bytes < 0);
// Check if operation succeeded.
if (bytes > 0)
return bytes;
// Operation failed.
if ((state & user_set_non_blocking)
|| (ec != asio::error::would_block
&& ec != asio::error::try_again))
return 0;
// Wait for descriptor to become ready.
if (descriptor_ops::poll_write(d, 0, ec) < 0)
return 0;
}
}
bool non_blocking_write(int d, const buf* bufs, std::size_t count,
asio::error_code& ec, std::size_t& bytes_transferred)
{
for (;;)
{
// Write some data.
signed_size_type bytes = ::writev(d, bufs, static_cast<int>(count));
get_last_error(ec, bytes < 0);
// Check if operation succeeded.
if (bytes >= 0)
{
bytes_transferred = bytes;
return true;
}
// Retry operation if interrupted by signal.
if (ec == asio::error::interrupted)
continue;
// Check if we need to run the operation again.
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
return false;
// Operation failed.
bytes_transferred = 0;
return true;
}
}
bool non_blocking_write1(int d, const void* data, std::size_t size,
asio::error_code& ec, std::size_t& bytes_transferred)
{
for (;;)
{
// Write some data.
signed_size_type bytes = ::write(d, data, size);
get_last_error(ec, bytes < 0);
// Check if operation succeeded.
if (bytes >= 0)
{
bytes_transferred = bytes;
return true;
}
// Retry operation if interrupted by signal.
if (ec == asio::error::interrupted)
continue;
// Check if we need to run the operation again.
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
return false;
// Operation failed.
bytes_transferred = 0;
return true;
}
}
int ioctl(int d, state_type& state, long cmd,
ioctl_arg_type* arg, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return -1;
}
int result = ::ioctl(d, cmd, arg);
get_last_error(ec, result < 0);
if (result >= 0)
{
// When updating the non-blocking mode we always perform the ioctl syscall,
// even if the flags would otherwise indicate that the descriptor is
// already in the correct state. This ensures that the underlying
// descriptor is put into the state that has been requested by the user. If
// the ioctl syscall was successful then we need to update the flags to
// match.
if (cmd == static_cast<long>(FIONBIO))
{
if (*arg)
{
state |= user_set_non_blocking;
}
else
{
// Clearing the non-blocking mode always overrides any internally-set
// non-blocking flag. Any subsequent asynchronous operations will need
// to re-enable non-blocking I/O.
state &= ~(user_set_non_blocking | internal_non_blocking);
}
}
}
return result;
}
int fcntl(int d, int cmd, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return -1;
}
int result = ::fcntl(d, cmd);
get_last_error(ec, result < 0);
return result;
}
int fcntl(int d, int cmd, long arg, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return -1;
}
int result = ::fcntl(d, cmd, arg);
get_last_error(ec, result < 0);
return result;
}
int poll_read(int d, state_type state, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return -1;
}
pollfd fds;
fds.fd = d;
fds.events = POLLIN;
fds.revents = 0;
int timeout = (state & user_set_non_blocking) ? 0 : -1;
int result = ::poll(&fds, 1, timeout);
get_last_error(ec, result < 0);
if (result == 0)
if (state & user_set_non_blocking)
ec = asio::error::would_block;
return result;
}
int poll_write(int d, state_type state, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return -1;
}
pollfd fds;
fds.fd = d;
fds.events = POLLOUT;
fds.revents = 0;
int timeout = (state & user_set_non_blocking) ? 0 : -1;
int result = ::poll(&fds, 1, timeout);
get_last_error(ec, result < 0);
if (result == 0)
if (state & user_set_non_blocking)
ec = asio::error::would_block;
return result;
}
int poll_error(int d, state_type state, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return -1;
}
pollfd fds;
fds.fd = d;
fds.events = POLLPRI | POLLERR | POLLHUP;
fds.revents = 0;
int timeout = (state & user_set_non_blocking) ? 0 : -1;
int result = ::poll(&fds, 1, timeout);
get_last_error(ec, result < 0);
if (result == 0)
if (state & user_set_non_blocking)
ec = asio::error::would_block;
return result;
}
} // namespace descriptor_ops
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
#endif // ASIO_DETAIL_IMPL_DESCRIPTOR_OPS_IPP

View File

@ -0,0 +1,91 @@
//
// detail/impl/dev_poll_reactor.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_HPP
#define ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_DEV_POLL)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
template <typename Time_Traits>
void dev_poll_reactor::add_timer_queue(timer_queue<Time_Traits>& queue)
{
do_add_timer_queue(queue);
}
template <typename Time_Traits>
void dev_poll_reactor::remove_timer_queue(timer_queue<Time_Traits>& queue)
{
do_remove_timer_queue(queue);
}
template <typename Time_Traits>
void dev_poll_reactor::schedule_timer(timer_queue<Time_Traits>& queue,
const typename Time_Traits::time_type& time,
typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op)
{
asio::detail::mutex::scoped_lock lock(mutex_);
if (shutdown_)
{
scheduler_.post_immediate_completion(op, false);
return;
}
bool earliest = queue.enqueue_timer(time, timer, op);
scheduler_.work_started();
if (earliest)
interrupter_.interrupt();
}
template <typename Time_Traits>
std::size_t dev_poll_reactor::cancel_timer(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data& timer,
std::size_t max_cancelled)
{
asio::detail::mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
lock.unlock();
scheduler_.post_deferred_completions(ops);
return n;
}
template <typename Time_Traits>
void dev_poll_reactor::move_timer(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data& target,
typename timer_queue<Time_Traits>::per_timer_data& source)
{
asio::detail::mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
queue.cancel_timer(target, ops);
queue.move_timer(target, source);
lock.unlock();
scheduler_.post_deferred_completions(ops);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_DEV_POLL)
#endif // ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_HPP

View File

@ -0,0 +1,446 @@
//
// detail/impl/dev_poll_reactor.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_IPP
#define ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_DEV_POLL)
#include "asio/detail/dev_poll_reactor.hpp"
#include "asio/detail/assert.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
dev_poll_reactor::dev_poll_reactor(asio::execution_context& ctx)
: asio::detail::execution_context_service_base<dev_poll_reactor>(ctx),
scheduler_(use_service<scheduler>(ctx)),
mutex_(),
dev_poll_fd_(do_dev_poll_create()),
interrupter_(),
shutdown_(false)
{
// Add the interrupter's descriptor to /dev/poll.
::pollfd ev = { 0, 0, 0 };
ev.fd = interrupter_.read_descriptor();
ev.events = POLLIN | POLLERR;
ev.revents = 0;
::write(dev_poll_fd_, &ev, sizeof(ev));
}
dev_poll_reactor::~dev_poll_reactor()
{
shutdown();
::close(dev_poll_fd_);
}
void dev_poll_reactor::shutdown()
{
asio::detail::mutex::scoped_lock lock(mutex_);
shutdown_ = true;
lock.unlock();
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
op_queue_[i].get_all_operations(ops);
timer_queues_.get_all_timers(ops);
scheduler_.abandon_operations(ops);
}
void dev_poll_reactor::notify_fork(
asio::execution_context::fork_event fork_ev)
{
if (fork_ev == asio::execution_context::fork_child)
{
detail::mutex::scoped_lock lock(mutex_);
if (dev_poll_fd_ != -1)
::close(dev_poll_fd_);
dev_poll_fd_ = -1;
dev_poll_fd_ = do_dev_poll_create();
interrupter_.recreate();
// Add the interrupter's descriptor to /dev/poll.
::pollfd ev = { 0, 0, 0 };
ev.fd = interrupter_.read_descriptor();
ev.events = POLLIN | POLLERR;
ev.revents = 0;
::write(dev_poll_fd_, &ev, sizeof(ev));
// Re-register all descriptors with /dev/poll. The changes will be written
// to the /dev/poll descriptor the next time the reactor is run.
for (int i = 0; i < max_ops; ++i)
{
reactor_op_queue<socket_type>::iterator iter = op_queue_[i].begin();
reactor_op_queue<socket_type>::iterator end = op_queue_[i].end();
for (; iter != end; ++iter)
{
::pollfd& pending_ev = add_pending_event_change(iter->first);
pending_ev.events |= POLLERR | POLLHUP;
switch (i)
{
case read_op: pending_ev.events |= POLLIN; break;
case write_op: pending_ev.events |= POLLOUT; break;
case except_op: pending_ev.events |= POLLPRI; break;
default: break;
}
}
}
interrupter_.interrupt();
}
}
void dev_poll_reactor::init_task()
{
scheduler_.init_task();
}
int dev_poll_reactor::register_descriptor(socket_type, per_descriptor_data&)
{
return 0;
}
int dev_poll_reactor::register_internal_descriptor(int op_type,
socket_type descriptor, per_descriptor_data&, reactor_op* op)
{
asio::detail::mutex::scoped_lock lock(mutex_);
op_queue_[op_type].enqueue_operation(descriptor, op);
::pollfd& ev = add_pending_event_change(descriptor);
ev.events = POLLERR | POLLHUP;
switch (op_type)
{
case read_op: ev.events |= POLLIN; break;
case write_op: ev.events |= POLLOUT; break;
case except_op: ev.events |= POLLPRI; break;
default: break;
}
interrupter_.interrupt();
return 0;
}
void dev_poll_reactor::move_descriptor(socket_type,
dev_poll_reactor::per_descriptor_data&,
dev_poll_reactor::per_descriptor_data&)
{
}
void dev_poll_reactor::start_op(int op_type, socket_type descriptor,
dev_poll_reactor::per_descriptor_data&, reactor_op* op,
bool is_continuation, bool allow_speculative)
{
asio::detail::mutex::scoped_lock lock(mutex_);
if (shutdown_)
{
post_immediate_completion(op, is_continuation);
return;
}
if (allow_speculative)
{
if (op_type != read_op || !op_queue_[except_op].has_operation(descriptor))
{
if (!op_queue_[op_type].has_operation(descriptor))
{
if (op->perform())
{
lock.unlock();
scheduler_.post_immediate_completion(op, is_continuation);
return;
}
}
}
}
bool first = op_queue_[op_type].enqueue_operation(descriptor, op);
scheduler_.work_started();
if (first)
{
::pollfd& ev = add_pending_event_change(descriptor);
ev.events = POLLERR | POLLHUP;
if (op_type == read_op
|| op_queue_[read_op].has_operation(descriptor))
ev.events |= POLLIN;
if (op_type == write_op
|| op_queue_[write_op].has_operation(descriptor))
ev.events |= POLLOUT;
if (op_type == except_op
|| op_queue_[except_op].has_operation(descriptor))
ev.events |= POLLPRI;
interrupter_.interrupt();
}
}
void dev_poll_reactor::cancel_ops(socket_type descriptor,
dev_poll_reactor::per_descriptor_data&)
{
asio::detail::mutex::scoped_lock lock(mutex_);
cancel_ops_unlocked(descriptor, asio::error::operation_aborted);
}
void dev_poll_reactor::deregister_descriptor(socket_type descriptor,
dev_poll_reactor::per_descriptor_data&, bool)
{
asio::detail::mutex::scoped_lock lock(mutex_);
// Remove the descriptor from /dev/poll.
::pollfd& ev = add_pending_event_change(descriptor);
ev.events = POLLREMOVE;
interrupter_.interrupt();
// Cancel any outstanding operations associated with the descriptor.
cancel_ops_unlocked(descriptor, asio::error::operation_aborted);
}
void dev_poll_reactor::deregister_internal_descriptor(
socket_type descriptor, dev_poll_reactor::per_descriptor_data&)
{
asio::detail::mutex::scoped_lock lock(mutex_);
// Remove the descriptor from /dev/poll. Since this function is only called
// during a fork, we can apply the change immediately.
::pollfd ev = { 0, 0, 0 };
ev.fd = descriptor;
ev.events = POLLREMOVE;
ev.revents = 0;
::write(dev_poll_fd_, &ev, sizeof(ev));
// Destroy all operations associated with the descriptor.
op_queue<operation> ops;
asio::error_code ec;
for (int i = 0; i < max_ops; ++i)
op_queue_[i].cancel_operations(descriptor, ops, ec);
}
void dev_poll_reactor::cleanup_descriptor_data(
dev_poll_reactor::per_descriptor_data&)
{
}
void dev_poll_reactor::run(long usec, op_queue<operation>& ops)
{
asio::detail::mutex::scoped_lock lock(mutex_);
// We can return immediately if there's no work to do and the reactor is
// not supposed to block.
if (usec == 0 && op_queue_[read_op].empty() && op_queue_[write_op].empty()
&& op_queue_[except_op].empty() && timer_queues_.all_empty())
return;
// Write the pending event registration changes to the /dev/poll descriptor.
std::size_t events_size = sizeof(::pollfd) * pending_event_changes_.size();
if (events_size > 0)
{
errno = 0;
int result = ::write(dev_poll_fd_,
&pending_event_changes_[0], events_size);
if (result != static_cast<int>(events_size))
{
asio::error_code ec = asio::error_code(
errno, asio::error::get_system_category());
for (std::size_t i = 0; i < pending_event_changes_.size(); ++i)
{
int descriptor = pending_event_changes_[i].fd;
for (int j = 0; j < max_ops; ++j)
op_queue_[j].cancel_operations(descriptor, ops, ec);
}
}
pending_event_changes_.clear();
pending_event_change_index_.clear();
}
// Calculate timeout.
int timeout;
if (usec == 0)
timeout = 0;
else
{
timeout = (usec < 0) ? -1 : ((usec - 1) / 1000 + 1);
timeout = get_timeout(timeout);
}
lock.unlock();
// Block on the /dev/poll descriptor.
::pollfd events[128] = { { 0, 0, 0 } };
::dvpoll dp = { 0, 0, 0 };
dp.dp_fds = events;
dp.dp_nfds = 128;
dp.dp_timeout = timeout;
int num_events = ::ioctl(dev_poll_fd_, DP_POLL, &dp);
lock.lock();
// Dispatch the waiting events.
for (int i = 0; i < num_events; ++i)
{
int descriptor = events[i].fd;
if (descriptor == interrupter_.read_descriptor())
{
interrupter_.reset();
}
else
{
bool more_reads = false;
bool more_writes = false;
bool more_except = false;
// Exception operations must be processed first to ensure that any
// out-of-band data is read before normal data.
if (events[i].events & (POLLPRI | POLLERR | POLLHUP))
more_except =
op_queue_[except_op].perform_operations(descriptor, ops);
else
more_except = op_queue_[except_op].has_operation(descriptor);
if (events[i].events & (POLLIN | POLLERR | POLLHUP))
more_reads = op_queue_[read_op].perform_operations(descriptor, ops);
else
more_reads = op_queue_[read_op].has_operation(descriptor);
if (events[i].events & (POLLOUT | POLLERR | POLLHUP))
more_writes = op_queue_[write_op].perform_operations(descriptor, ops);
else
more_writes = op_queue_[write_op].has_operation(descriptor);
if ((events[i].events & (POLLERR | POLLHUP)) != 0
&& !more_except && !more_reads && !more_writes)
{
// If we have an event and no operations associated with the
// descriptor then we need to delete the descriptor from /dev/poll.
// The poll operation can produce POLLHUP or POLLERR events when there
// is no operation pending, so if we do not remove the descriptor we
// can end up in a tight polling loop.
::pollfd ev = { 0, 0, 0 };
ev.fd = descriptor;
ev.events = POLLREMOVE;
ev.revents = 0;
::write(dev_poll_fd_, &ev, sizeof(ev));
}
else
{
::pollfd ev = { 0, 0, 0 };
ev.fd = descriptor;
ev.events = POLLERR | POLLHUP;
if (more_reads)
ev.events |= POLLIN;
if (more_writes)
ev.events |= POLLOUT;
if (more_except)
ev.events |= POLLPRI;
ev.revents = 0;
int result = ::write(dev_poll_fd_, &ev, sizeof(ev));
if (result != sizeof(ev))
{
asio::error_code ec(errno,
asio::error::get_system_category());
for (int j = 0; j < max_ops; ++j)
op_queue_[j].cancel_operations(descriptor, ops, ec);
}
}
}
}
timer_queues_.get_ready_timers(ops);
}
void dev_poll_reactor::interrupt()
{
interrupter_.interrupt();
}
int dev_poll_reactor::do_dev_poll_create()
{
int fd = ::open("/dev/poll", O_RDWR);
if (fd == -1)
{
asio::error_code ec(errno,
asio::error::get_system_category());
asio::detail::throw_error(ec, "/dev/poll");
}
return fd;
}
void dev_poll_reactor::do_add_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.insert(&queue);
}
void dev_poll_reactor::do_remove_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.erase(&queue);
}
int dev_poll_reactor::get_timeout(int msec)
{
// By default we will wait no longer than 5 minutes. This will ensure that
// any changes to the system clock are detected after no longer than this.
const int max_msec = 5 * 60 * 1000;
return timer_queues_.wait_duration_msec(
(msec < 0 || max_msec < msec) ? max_msec : msec);
}
void dev_poll_reactor::cancel_ops_unlocked(socket_type descriptor,
const asio::error_code& ec)
{
bool need_interrupt = false;
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
need_interrupt = op_queue_[i].cancel_operations(
descriptor, ops, ec) || need_interrupt;
scheduler_.post_deferred_completions(ops);
if (need_interrupt)
interrupter_.interrupt();
}
::pollfd& dev_poll_reactor::add_pending_event_change(int descriptor)
{
hash_map<int, std::size_t>::iterator iter
= pending_event_change_index_.find(descriptor);
if (iter == pending_event_change_index_.end())
{
std::size_t index = pending_event_changes_.size();
pending_event_changes_.reserve(pending_event_changes_.size() + 1);
pending_event_change_index_.insert(std::make_pair(descriptor, index));
pending_event_changes_.push_back(::pollfd());
pending_event_changes_[index].fd = descriptor;
pending_event_changes_[index].revents = 0;
return pending_event_changes_[index];
}
else
{
return pending_event_changes_[iter->second];
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_DEV_POLL)
#endif // ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_IPP

View File

@ -0,0 +1,89 @@
//
// detail/impl/epoll_reactor.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_EPOLL_REACTOR_HPP
#define ASIO_DETAIL_IMPL_EPOLL_REACTOR_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#if defined(ASIO_HAS_EPOLL)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
template <typename Time_Traits>
void epoll_reactor::add_timer_queue(timer_queue<Time_Traits>& queue)
{
do_add_timer_queue(queue);
}
template <typename Time_Traits>
void epoll_reactor::remove_timer_queue(timer_queue<Time_Traits>& queue)
{
do_remove_timer_queue(queue);
}
template <typename Time_Traits>
void epoll_reactor::schedule_timer(timer_queue<Time_Traits>& queue,
const typename Time_Traits::time_type& time,
typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op)
{
mutex::scoped_lock lock(mutex_);
if (shutdown_)
{
scheduler_.post_immediate_completion(op, false);
return;
}
bool earliest = queue.enqueue_timer(time, timer, op);
scheduler_.work_started();
if (earliest)
update_timeout();
}
template <typename Time_Traits>
std::size_t epoll_reactor::cancel_timer(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data& timer,
std::size_t max_cancelled)
{
mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
lock.unlock();
scheduler_.post_deferred_completions(ops);
return n;
}
template <typename Time_Traits>
void epoll_reactor::move_timer(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data& target,
typename timer_queue<Time_Traits>::per_timer_data& source)
{
mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
queue.cancel_timer(target, ops);
queue.move_timer(target, source);
lock.unlock();
scheduler_.post_deferred_completions(ops);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_EPOLL)
#endif // ASIO_DETAIL_IMPL_EPOLL_REACTOR_HPP

View File

@ -0,0 +1,787 @@
//
// detail/impl/epoll_reactor.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_EPOLL_REACTOR_IPP
#define ASIO_DETAIL_IMPL_EPOLL_REACTOR_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_EPOLL)
#include <cstddef>
#include <sys/epoll.h>
#include "asio/detail/epoll_reactor.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#if defined(ASIO_HAS_TIMERFD)
# include <sys/timerfd.h>
#endif // defined(ASIO_HAS_TIMERFD)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
epoll_reactor::epoll_reactor(asio::execution_context& ctx)
: execution_context_service_base<epoll_reactor>(ctx),
scheduler_(use_service<scheduler>(ctx)),
mutex_(ASIO_CONCURRENCY_HINT_IS_LOCKING(
REACTOR_REGISTRATION, scheduler_.concurrency_hint())),
interrupter_(),
epoll_fd_(do_epoll_create()),
timer_fd_(do_timerfd_create()),
shutdown_(false),
registered_descriptors_mutex_(mutex_.enabled())
{
// Add the interrupter's descriptor to epoll.
epoll_event ev = { 0, { 0 } };
ev.events = EPOLLIN | EPOLLERR | EPOLLET;
ev.data.ptr = &interrupter_;
epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, interrupter_.read_descriptor(), &ev);
interrupter_.interrupt();
// Add the timer descriptor to epoll.
if (timer_fd_ != -1)
{
ev.events = EPOLLIN | EPOLLERR;
ev.data.ptr = &timer_fd_;
epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, timer_fd_, &ev);
}
}
epoll_reactor::~epoll_reactor()
{
if (epoll_fd_ != -1)
close(epoll_fd_);
if (timer_fd_ != -1)
close(timer_fd_);
}
void epoll_reactor::shutdown()
{
mutex::scoped_lock lock(mutex_);
shutdown_ = true;
lock.unlock();
op_queue<operation> ops;
while (descriptor_state* state = registered_descriptors_.first())
{
for (int i = 0; i < max_ops; ++i)
ops.push(state->op_queue_[i]);
state->shutdown_ = true;
registered_descriptors_.free(state);
}
timer_queues_.get_all_timers(ops);
scheduler_.abandon_operations(ops);
}
void epoll_reactor::notify_fork(
asio::execution_context::fork_event fork_ev)
{
if (fork_ev == asio::execution_context::fork_child)
{
if (epoll_fd_ != -1)
::close(epoll_fd_);
epoll_fd_ = -1;
epoll_fd_ = do_epoll_create();
if (timer_fd_ != -1)
::close(timer_fd_);
timer_fd_ = -1;
timer_fd_ = do_timerfd_create();
interrupter_.recreate();
// Add the interrupter's descriptor to epoll.
epoll_event ev = { 0, { 0 } };
ev.events = EPOLLIN | EPOLLERR | EPOLLET;
ev.data.ptr = &interrupter_;
epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, interrupter_.read_descriptor(), &ev);
interrupter_.interrupt();
// Add the timer descriptor to epoll.
if (timer_fd_ != -1)
{
ev.events = EPOLLIN | EPOLLERR;
ev.data.ptr = &timer_fd_;
epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, timer_fd_, &ev);
}
update_timeout();
// Re-register all descriptors with epoll.
mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
for (descriptor_state* state = registered_descriptors_.first();
state != 0; state = state->next_)
{
ev.events = state->registered_events_;
ev.data.ptr = state;
int result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, state->descriptor_, &ev);
if (result != 0)
{
asio::error_code ec(errno,
asio::error::get_system_category());
asio::detail::throw_error(ec, "epoll re-registration");
}
}
}
}
void epoll_reactor::init_task()
{
scheduler_.init_task();
}
int epoll_reactor::register_descriptor(socket_type descriptor,
epoll_reactor::per_descriptor_data& descriptor_data)
{
descriptor_data = allocate_descriptor_state();
ASIO_HANDLER_REACTOR_REGISTRATION((
context(), static_cast<uintmax_t>(descriptor),
reinterpret_cast<uintmax_t>(descriptor_data)));
{
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
descriptor_data->reactor_ = this;
descriptor_data->descriptor_ = descriptor;
descriptor_data->shutdown_ = false;
for (int i = 0; i < max_ops; ++i)
descriptor_data->try_speculative_[i] = true;
}
epoll_event ev = { 0, { 0 } };
ev.events = EPOLLIN | EPOLLERR | EPOLLHUP | EPOLLPRI | EPOLLET;
descriptor_data->registered_events_ = ev.events;
ev.data.ptr = descriptor_data;
int result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, descriptor, &ev);
if (result != 0)
{
if (errno == EPERM)
{
// This file descriptor type is not supported by epoll. However, if it is
// a regular file then operations on it will not block. We will allow
// this descriptor to be used and fail later if an operation on it would
// otherwise require a trip through the reactor.
descriptor_data->registered_events_ = 0;
return 0;
}
return errno;
}
return 0;
}
int epoll_reactor::register_internal_descriptor(
int op_type, socket_type descriptor,
epoll_reactor::per_descriptor_data& descriptor_data, reactor_op* op)
{
descriptor_data = allocate_descriptor_state();
ASIO_HANDLER_REACTOR_REGISTRATION((
context(), static_cast<uintmax_t>(descriptor),
reinterpret_cast<uintmax_t>(descriptor_data)));
{
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
descriptor_data->reactor_ = this;
descriptor_data->descriptor_ = descriptor;
descriptor_data->shutdown_ = false;
descriptor_data->op_queue_[op_type].push(op);
for (int i = 0; i < max_ops; ++i)
descriptor_data->try_speculative_[i] = true;
}
epoll_event ev = { 0, { 0 } };
ev.events = EPOLLIN | EPOLLERR | EPOLLHUP | EPOLLPRI | EPOLLET;
descriptor_data->registered_events_ = ev.events;
ev.data.ptr = descriptor_data;
int result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, descriptor, &ev);
if (result != 0)
return errno;
return 0;
}
void epoll_reactor::move_descriptor(socket_type,
epoll_reactor::per_descriptor_data& target_descriptor_data,
epoll_reactor::per_descriptor_data& source_descriptor_data)
{
target_descriptor_data = source_descriptor_data;
source_descriptor_data = 0;
}
void epoll_reactor::start_op(int op_type, socket_type descriptor,
epoll_reactor::per_descriptor_data& descriptor_data, reactor_op* op,
bool is_continuation, bool allow_speculative)
{
if (!descriptor_data)
{
op->ec_ = asio::error::bad_descriptor;
post_immediate_completion(op, is_continuation);
return;
}
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
if (descriptor_data->shutdown_)
{
post_immediate_completion(op, is_continuation);
return;
}
if (descriptor_data->op_queue_[op_type].empty())
{
if (allow_speculative
&& (op_type != read_op
|| descriptor_data->op_queue_[except_op].empty()))
{
if (descriptor_data->try_speculative_[op_type])
{
if (reactor_op::status status = op->perform())
{
if (status == reactor_op::done_and_exhausted)
if (descriptor_data->registered_events_ != 0)
descriptor_data->try_speculative_[op_type] = false;
descriptor_lock.unlock();
scheduler_.post_immediate_completion(op, is_continuation);
return;
}
}
if (descriptor_data->registered_events_ == 0)
{
op->ec_ = asio::error::operation_not_supported;
scheduler_.post_immediate_completion(op, is_continuation);
return;
}
if (op_type == write_op)
{
if ((descriptor_data->registered_events_ & EPOLLOUT) == 0)
{
epoll_event ev = { 0, { 0 } };
ev.events = descriptor_data->registered_events_ | EPOLLOUT;
ev.data.ptr = descriptor_data;
if (epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, descriptor, &ev) == 0)
{
descriptor_data->registered_events_ |= ev.events;
}
else
{
op->ec_ = asio::error_code(errno,
asio::error::get_system_category());
scheduler_.post_immediate_completion(op, is_continuation);
return;
}
}
}
}
else if (descriptor_data->registered_events_ == 0)
{
op->ec_ = asio::error::operation_not_supported;
scheduler_.post_immediate_completion(op, is_continuation);
return;
}
else
{
if (op_type == write_op)
{
descriptor_data->registered_events_ |= EPOLLOUT;
}
epoll_event ev = { 0, { 0 } };
ev.events = descriptor_data->registered_events_;
ev.data.ptr = descriptor_data;
epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, descriptor, &ev);
}
}
descriptor_data->op_queue_[op_type].push(op);
scheduler_.work_started();
}
void epoll_reactor::cancel_ops(socket_type,
epoll_reactor::per_descriptor_data& descriptor_data)
{
if (!descriptor_data)
return;
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
{
while (reactor_op* op = descriptor_data->op_queue_[i].front())
{
op->ec_ = asio::error::operation_aborted;
descriptor_data->op_queue_[i].pop();
ops.push(op);
}
}
descriptor_lock.unlock();
scheduler_.post_deferred_completions(ops);
}
void epoll_reactor::deregister_descriptor(socket_type descriptor,
epoll_reactor::per_descriptor_data& descriptor_data, bool closing)
{
if (!descriptor_data)
return;
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
if (!descriptor_data->shutdown_)
{
if (closing)
{
// The descriptor will be automatically removed from the epoll set when
// it is closed.
}
else if (descriptor_data->registered_events_ != 0)
{
epoll_event ev = { 0, { 0 } };
epoll_ctl(epoll_fd_, EPOLL_CTL_DEL, descriptor, &ev);
}
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
{
while (reactor_op* op = descriptor_data->op_queue_[i].front())
{
op->ec_ = asio::error::operation_aborted;
descriptor_data->op_queue_[i].pop();
ops.push(op);
}
}
descriptor_data->descriptor_ = -1;
descriptor_data->shutdown_ = true;
descriptor_lock.unlock();
ASIO_HANDLER_REACTOR_DEREGISTRATION((
context(), static_cast<uintmax_t>(descriptor),
reinterpret_cast<uintmax_t>(descriptor_data)));
scheduler_.post_deferred_completions(ops);
// Leave descriptor_data set so that it will be freed by the subsequent
// call to cleanup_descriptor_data.
}
else
{
// We are shutting down, so prevent cleanup_descriptor_data from freeing
// the descriptor_data object and let the destructor free it instead.
descriptor_data = 0;
}
}
void epoll_reactor::deregister_internal_descriptor(socket_type descriptor,
epoll_reactor::per_descriptor_data& descriptor_data)
{
if (!descriptor_data)
return;
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
if (!descriptor_data->shutdown_)
{
epoll_event ev = { 0, { 0 } };
epoll_ctl(epoll_fd_, EPOLL_CTL_DEL, descriptor, &ev);
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
ops.push(descriptor_data->op_queue_[i]);
descriptor_data->descriptor_ = -1;
descriptor_data->shutdown_ = true;
descriptor_lock.unlock();
ASIO_HANDLER_REACTOR_DEREGISTRATION((
context(), static_cast<uintmax_t>(descriptor),
reinterpret_cast<uintmax_t>(descriptor_data)));
// Leave descriptor_data set so that it will be freed by the subsequent
// call to cleanup_descriptor_data.
}
else
{
// We are shutting down, so prevent cleanup_descriptor_data from freeing
// the descriptor_data object and let the destructor free it instead.
descriptor_data = 0;
}
}
void epoll_reactor::cleanup_descriptor_data(
per_descriptor_data& descriptor_data)
{
if (descriptor_data)
{
free_descriptor_state(descriptor_data);
descriptor_data = 0;
}
}
void epoll_reactor::run(long usec, op_queue<operation>& ops)
{
// This code relies on the fact that the scheduler queues the reactor task
// behind all descriptor operations generated by this function. This means,
// that by the time we reach this point, any previously returned descriptor
// operations have already been dequeued. Therefore it is now safe for us to
// reuse and return them for the scheduler to queue again.
// Calculate timeout. Check the timer queues only if timerfd is not in use.
int timeout;
if (usec == 0)
timeout = 0;
else
{
timeout = (usec < 0) ? -1 : ((usec - 1) / 1000 + 1);
if (timer_fd_ == -1)
{
mutex::scoped_lock lock(mutex_);
timeout = get_timeout(timeout);
}
}
// Block on the epoll descriptor.
epoll_event events[128];
int num_events = epoll_wait(epoll_fd_, events, 128, timeout);
#if defined(ASIO_ENABLE_HANDLER_TRACKING)
// Trace the waiting events.
for (int i = 0; i < num_events; ++i)
{
void* ptr = events[i].data.ptr;
if (ptr == &interrupter_)
{
// Ignore.
}
# if defined(ASIO_HAS_TIMERFD)
else if (ptr == &timer_fd_)
{
// Ignore.
}
# endif // defined(ASIO_HAS_TIMERFD)
else
{
unsigned event_mask = 0;
if ((events[i].events & EPOLLIN) != 0)
event_mask |= ASIO_HANDLER_REACTOR_READ_EVENT;
if ((events[i].events & EPOLLOUT))
event_mask |= ASIO_HANDLER_REACTOR_WRITE_EVENT;
if ((events[i].events & (EPOLLERR | EPOLLHUP)) != 0)
event_mask |= ASIO_HANDLER_REACTOR_ERROR_EVENT;
ASIO_HANDLER_REACTOR_EVENTS((context(),
reinterpret_cast<uintmax_t>(ptr), event_mask));
}
}
#endif // defined(ASIO_ENABLE_HANDLER_TRACKING)
#if defined(ASIO_HAS_TIMERFD)
bool check_timers = (timer_fd_ == -1);
#else // defined(ASIO_HAS_TIMERFD)
bool check_timers = true;
#endif // defined(ASIO_HAS_TIMERFD)
// Dispatch the waiting events.
for (int i = 0; i < num_events; ++i)
{
void* ptr = events[i].data.ptr;
if (ptr == &interrupter_)
{
// No need to reset the interrupter since we're leaving the descriptor
// in a ready-to-read state and relying on edge-triggered notifications
// to make it so that we only get woken up when the descriptor's epoll
// registration is updated.
#if defined(ASIO_HAS_TIMERFD)
if (timer_fd_ == -1)
check_timers = true;
#else // defined(ASIO_HAS_TIMERFD)
check_timers = true;
#endif // defined(ASIO_HAS_TIMERFD)
}
#if defined(ASIO_HAS_TIMERFD)
else if (ptr == &timer_fd_)
{
check_timers = true;
}
#endif // defined(ASIO_HAS_TIMERFD)
else
{
// The descriptor operation doesn't count as work in and of itself, so we
// don't call work_started() here. This still allows the scheduler to
// stop if the only remaining operations are descriptor operations.
descriptor_state* descriptor_data = static_cast<descriptor_state*>(ptr);
if (!ops.is_enqueued(descriptor_data))
{
descriptor_data->set_ready_events(events[i].events);
ops.push(descriptor_data);
}
else
{
descriptor_data->add_ready_events(events[i].events);
}
}
}
if (check_timers)
{
mutex::scoped_lock common_lock(mutex_);
timer_queues_.get_ready_timers(ops);
#if defined(ASIO_HAS_TIMERFD)
if (timer_fd_ != -1)
{
itimerspec new_timeout;
itimerspec old_timeout;
int flags = get_timeout(new_timeout);
timerfd_settime(timer_fd_, flags, &new_timeout, &old_timeout);
}
#endif // defined(ASIO_HAS_TIMERFD)
}
}
void epoll_reactor::interrupt()
{
epoll_event ev = { 0, { 0 } };
ev.events = EPOLLIN | EPOLLERR | EPOLLET;
ev.data.ptr = &interrupter_;
epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, interrupter_.read_descriptor(), &ev);
}
int epoll_reactor::do_epoll_create()
{
#if defined(EPOLL_CLOEXEC)
int fd = epoll_create1(EPOLL_CLOEXEC);
#else // defined(EPOLL_CLOEXEC)
int fd = -1;
errno = EINVAL;
#endif // defined(EPOLL_CLOEXEC)
if (fd == -1 && (errno == EINVAL || errno == ENOSYS))
{
fd = epoll_create(epoll_size);
if (fd != -1)
::fcntl(fd, F_SETFD, FD_CLOEXEC);
}
if (fd == -1)
{
asio::error_code ec(errno,
asio::error::get_system_category());
asio::detail::throw_error(ec, "epoll");
}
return fd;
}
int epoll_reactor::do_timerfd_create()
{
#if defined(ASIO_HAS_TIMERFD)
# if defined(TFD_CLOEXEC)
int fd = timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC);
# else // defined(TFD_CLOEXEC)
int fd = -1;
errno = EINVAL;
# endif // defined(TFD_CLOEXEC)
if (fd == -1 && errno == EINVAL)
{
fd = timerfd_create(CLOCK_MONOTONIC, 0);
if (fd != -1)
::fcntl(fd, F_SETFD, FD_CLOEXEC);
}
return fd;
#else // defined(ASIO_HAS_TIMERFD)
return -1;
#endif // defined(ASIO_HAS_TIMERFD)
}
epoll_reactor::descriptor_state* epoll_reactor::allocate_descriptor_state()
{
mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
return registered_descriptors_.alloc(ASIO_CONCURRENCY_HINT_IS_LOCKING(
REACTOR_IO, scheduler_.concurrency_hint()));
}
void epoll_reactor::free_descriptor_state(epoll_reactor::descriptor_state* s)
{
mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
registered_descriptors_.free(s);
}
void epoll_reactor::do_add_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.insert(&queue);
}
void epoll_reactor::do_remove_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.erase(&queue);
}
void epoll_reactor::update_timeout()
{
#if defined(ASIO_HAS_TIMERFD)
if (timer_fd_ != -1)
{
itimerspec new_timeout;
itimerspec old_timeout;
int flags = get_timeout(new_timeout);
timerfd_settime(timer_fd_, flags, &new_timeout, &old_timeout);
return;
}
#endif // defined(ASIO_HAS_TIMERFD)
interrupt();
}
int epoll_reactor::get_timeout(int msec)
{
// By default we will wait no longer than 5 minutes. This will ensure that
// any changes to the system clock are detected after no longer than this.
const int max_msec = 5 * 60 * 1000;
return timer_queues_.wait_duration_msec(
(msec < 0 || max_msec < msec) ? max_msec : msec);
}
#if defined(ASIO_HAS_TIMERFD)
int epoll_reactor::get_timeout(itimerspec& ts)
{
ts.it_interval.tv_sec = 0;
ts.it_interval.tv_nsec = 0;
long usec = timer_queues_.wait_duration_usec(5 * 60 * 1000 * 1000);
ts.it_value.tv_sec = usec / 1000000;
ts.it_value.tv_nsec = usec ? (usec % 1000000) * 1000 : 1;
return usec ? 0 : TFD_TIMER_ABSTIME;
}
#endif // defined(ASIO_HAS_TIMERFD)
struct epoll_reactor::perform_io_cleanup_on_block_exit
{
explicit perform_io_cleanup_on_block_exit(epoll_reactor* r)
: reactor_(r), first_op_(0)
{
}
~perform_io_cleanup_on_block_exit()
{
if (first_op_)
{
// Post the remaining completed operations for invocation.
if (!ops_.empty())
reactor_->scheduler_.post_deferred_completions(ops_);
// A user-initiated operation has completed, but there's no need to
// explicitly call work_finished() here. Instead, we'll take advantage of
// the fact that the scheduler will call work_finished() once we return.
}
else
{
// No user-initiated operations have completed, so we need to compensate
// for the work_finished() call that the scheduler will make once this
// operation returns.
reactor_->scheduler_.compensating_work_started();
}
}
epoll_reactor* reactor_;
op_queue<operation> ops_;
operation* first_op_;
};
epoll_reactor::descriptor_state::descriptor_state(bool locking)
: operation(&epoll_reactor::descriptor_state::do_complete),
mutex_(locking)
{
}
operation* epoll_reactor::descriptor_state::perform_io(uint32_t events)
{
mutex_.lock();
perform_io_cleanup_on_block_exit io_cleanup(reactor_);
mutex::scoped_lock descriptor_lock(mutex_, mutex::scoped_lock::adopt_lock);
// Exception operations must be processed first to ensure that any
// out-of-band data is read before normal data.
static const int flag[max_ops] = { EPOLLIN, EPOLLOUT, EPOLLPRI };
for (int j = max_ops - 1; j >= 0; --j)
{
if (events & (flag[j] | EPOLLERR | EPOLLHUP))
{
try_speculative_[j] = true;
while (reactor_op* op = op_queue_[j].front())
{
if (reactor_op::status status = op->perform())
{
op_queue_[j].pop();
io_cleanup.ops_.push(op);
if (status == reactor_op::done_and_exhausted)
{
try_speculative_[j] = false;
break;
}
}
else
break;
}
}
}
// The first operation will be returned for completion now. The others will
// be posted for later by the io_cleanup object's destructor.
io_cleanup.first_op_ = io_cleanup.ops_.front();
io_cleanup.ops_.pop();
return io_cleanup.first_op_;
}
void epoll_reactor::descriptor_state::do_complete(
void* owner, operation* base,
const asio::error_code& ec, std::size_t bytes_transferred)
{
if (owner)
{
descriptor_state* descriptor_data = static_cast<descriptor_state*>(base);
uint32_t events = static_cast<uint32_t>(bytes_transferred);
if (operation* op = descriptor_data->perform_io(events))
{
op->complete(owner, ec, 0);
}
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_EPOLL)
#endif // ASIO_DETAIL_IMPL_EPOLL_REACTOR_IPP

View File

@ -0,0 +1,171 @@
//
// detail/impl/eventfd_select_interrupter.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
// Copyright (c) 2008 Roelof Naude (roelof.naude at gmail dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_EVENTFD_SELECT_INTERRUPTER_IPP
#define ASIO_DETAIL_IMPL_EVENTFD_SELECT_INTERRUPTER_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_EVENTFD)
#include <sys/stat.h>
#include <sys/types.h>
#include <fcntl.h>
#if __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 && !defined(__UCLIBC__)
# include <asm/unistd.h>
#else // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 && !defined(__UCLIBC__)
# include <sys/eventfd.h>
#endif // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 && !defined(__UCLIBC__)
#include "asio/detail/cstdint.hpp"
#include "asio/detail/eventfd_select_interrupter.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
eventfd_select_interrupter::eventfd_select_interrupter()
{
open_descriptors();
}
void eventfd_select_interrupter::open_descriptors()
{
#if __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 && !defined(__UCLIBC__)
write_descriptor_ = read_descriptor_ = syscall(__NR_eventfd, 0);
if (read_descriptor_ != -1)
{
::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK);
::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC);
}
#else // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 && !defined(__UCLIBC__)
# if defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK)
write_descriptor_ = read_descriptor_ =
::eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
# else // defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK)
errno = EINVAL;
write_descriptor_ = read_descriptor_ = -1;
# endif // defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK)
if (read_descriptor_ == -1 && errno == EINVAL)
{
write_descriptor_ = read_descriptor_ = ::eventfd(0, 0);
if (read_descriptor_ != -1)
{
::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK);
::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC);
}
}
#endif // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 && !defined(__UCLIBC__)
if (read_descriptor_ == -1)
{
int pipe_fds[2];
if (pipe(pipe_fds) == 0)
{
read_descriptor_ = pipe_fds[0];
::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK);
::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC);
write_descriptor_ = pipe_fds[1];
::fcntl(write_descriptor_, F_SETFL, O_NONBLOCK);
::fcntl(write_descriptor_, F_SETFD, FD_CLOEXEC);
}
else
{
asio::error_code ec(errno,
asio::error::get_system_category());
asio::detail::throw_error(ec, "eventfd_select_interrupter");
}
}
}
eventfd_select_interrupter::~eventfd_select_interrupter()
{
close_descriptors();
}
void eventfd_select_interrupter::close_descriptors()
{
if (write_descriptor_ != -1 && write_descriptor_ != read_descriptor_)
::close(write_descriptor_);
if (read_descriptor_ != -1)
::close(read_descriptor_);
}
void eventfd_select_interrupter::recreate()
{
close_descriptors();
write_descriptor_ = -1;
read_descriptor_ = -1;
open_descriptors();
}
void eventfd_select_interrupter::interrupt()
{
uint64_t counter(1UL);
int result = ::write(write_descriptor_, &counter, sizeof(uint64_t));
(void)result;
}
bool eventfd_select_interrupter::reset()
{
if (write_descriptor_ == read_descriptor_)
{
for (;;)
{
// Only perform one read. The kernel maintains an atomic counter.
uint64_t counter(0);
errno = 0;
int bytes_read = ::read(read_descriptor_, &counter, sizeof(uint64_t));
if (bytes_read < 0 && errno == EINTR)
continue;
return true;
}
}
else
{
for (;;)
{
// Clear all data from the pipe.
char data[1024];
int bytes_read = ::read(read_descriptor_, data, sizeof(data));
if (bytes_read == sizeof(data))
continue;
if (bytes_read > 0)
return true;
if (bytes_read == 0)
return false;
if (errno == EINTR)
continue;
if (errno == EWOULDBLOCK)
return true;
if (errno == EAGAIN)
return true;
return false;
}
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_EVENTFD)
#endif // ASIO_DETAIL_IMPL_EVENTFD_SELECT_INTERRUPTER_IPP

View File

@ -0,0 +1,396 @@
//
// detail/impl/handler_tracking.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_HANDLER_TRACKING_IPP
#define ASIO_DETAIL_IMPL_HANDLER_TRACKING_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_CUSTOM_HANDLER_TRACKING)
// The handler tracking implementation is provided by the user-specified header.
#elif defined(ASIO_ENABLE_HANDLER_TRACKING)
#include <cstdarg>
#include <cstdio>
#include "asio/detail/handler_tracking.hpp"
#if defined(ASIO_HAS_BOOST_DATE_TIME)
# include "asio/time_traits.hpp"
#elif defined(ASIO_HAS_CHRONO)
# include "asio/detail/chrono.hpp"
# include "asio/detail/chrono_time_traits.hpp"
# include "asio/wait_traits.hpp"
#endif // defined(ASIO_HAS_BOOST_DATE_TIME)
#if defined(ASIO_WINDOWS_RUNTIME)
# include "asio/detail/socket_types.hpp"
#elif !defined(ASIO_WINDOWS)
# include <unistd.h>
#endif // !defined(ASIO_WINDOWS)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
struct handler_tracking_timestamp
{
uint64_t seconds;
uint64_t microseconds;
handler_tracking_timestamp()
{
#if defined(ASIO_HAS_BOOST_DATE_TIME)
boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1));
boost::posix_time::time_duration now =
boost::posix_time::microsec_clock::universal_time() - epoch;
#elif defined(ASIO_HAS_CHRONO)
typedef chrono_time_traits<chrono::system_clock,
asio::wait_traits<chrono::system_clock> > traits_helper;
traits_helper::posix_time_duration now(
chrono::system_clock::now().time_since_epoch());
#endif
seconds = static_cast<uint64_t>(now.total_seconds());
microseconds = static_cast<uint64_t>(now.total_microseconds() % 1000000);
}
};
struct handler_tracking::tracking_state
{
static_mutex mutex_;
uint64_t next_id_;
tss_ptr<completion>* current_completion_;
tss_ptr<location>* current_location_;
};
handler_tracking::tracking_state* handler_tracking::get_state()
{
static tracking_state state = { ASIO_STATIC_MUTEX_INIT, 1, 0, 0 };
return &state;
}
void handler_tracking::init()
{
static tracking_state* state = get_state();
state->mutex_.init();
static_mutex::scoped_lock lock(state->mutex_);
if (state->current_completion_ == 0)
state->current_completion_ = new tss_ptr<completion>;
if (state->current_location_ == 0)
state->current_location_ = new tss_ptr<location>;
}
handler_tracking::location::location(
const char* file, int line, const char* func)
: file_(file),
line_(line),
func_(func),
next_(*get_state()->current_location_)
{
if (file_)
*get_state()->current_location_ = this;
}
handler_tracking::location::~location()
{
if (file_)
*get_state()->current_location_ = next_;
}
void handler_tracking::creation(execution_context&,
handler_tracking::tracked_handler& h,
const char* object_type, void* object,
uintmax_t /*native_handle*/, const char* op_name)
{
static tracking_state* state = get_state();
static_mutex::scoped_lock lock(state->mutex_);
h.id_ = state->next_id_++;
lock.unlock();
handler_tracking_timestamp timestamp;
uint64_t current_id = 0;
if (completion* current_completion = *state->current_completion_)
current_id = current_completion->id_;
for (location* current_location = *state->current_location_;
current_location; current_location = current_location->next_)
{
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|%I64u^%I64u|%s%s%.80s%s(%.80s:%d)\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|%llu^%llu|%s%s%.80s%s(%.80s:%d)\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds,
current_id, h.id_,
current_location == *state->current_location_ ? "in " : "called from ",
current_location->func_ ? "'" : "",
current_location->func_ ? current_location->func_ : "",
current_location->func_ ? "' " : "",
current_location->file_, current_location->line_);
}
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|%I64u*%I64u|%.20s@%p.%.50s\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|%llu*%llu|%.20s@%p.%.50s\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds,
current_id, h.id_, object_type, object, op_name);
}
handler_tracking::completion::completion(
const handler_tracking::tracked_handler& h)
: id_(h.id_),
invoked_(false),
next_(*get_state()->current_completion_)
{
*get_state()->current_completion_ = this;
}
handler_tracking::completion::~completion()
{
if (id_)
{
handler_tracking_timestamp timestamp;
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|%c%I64u|\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|%c%llu|\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds,
invoked_ ? '!' : '~', id_);
}
*get_state()->current_completion_ = next_;
}
void handler_tracking::completion::invocation_begin()
{
handler_tracking_timestamp timestamp;
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|>%I64u|\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|>%llu|\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds, id_);
invoked_ = true;
}
void handler_tracking::completion::invocation_begin(
const asio::error_code& ec)
{
handler_tracking_timestamp timestamp;
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|>%llu|ec=%.20s:%d\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds,
id_, ec.category().name(), ec.value());
invoked_ = true;
}
void handler_tracking::completion::invocation_begin(
const asio::error_code& ec, std::size_t bytes_transferred)
{
handler_tracking_timestamp timestamp;
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d,bytes_transferred=%I64u\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|>%llu|ec=%.20s:%d,bytes_transferred=%llu\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds,
id_, ec.category().name(), ec.value(),
static_cast<uint64_t>(bytes_transferred));
invoked_ = true;
}
void handler_tracking::completion::invocation_begin(
const asio::error_code& ec, int signal_number)
{
handler_tracking_timestamp timestamp;
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d,signal_number=%d\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|>%llu|ec=%.20s:%d,signal_number=%d\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds,
id_, ec.category().name(), ec.value(), signal_number);
invoked_ = true;
}
void handler_tracking::completion::invocation_begin(
const asio::error_code& ec, const char* arg)
{
handler_tracking_timestamp timestamp;
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d,%.50s\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|>%llu|ec=%.20s:%d,%.50s\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds,
id_, ec.category().name(), ec.value(), arg);
invoked_ = true;
}
void handler_tracking::completion::invocation_end()
{
if (id_)
{
handler_tracking_timestamp timestamp;
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|<%I64u|\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|<%llu|\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds, id_);
id_ = 0;
}
}
void handler_tracking::operation(execution_context&,
const char* object_type, void* object,
uintmax_t /*native_handle*/, const char* op_name)
{
static tracking_state* state = get_state();
handler_tracking_timestamp timestamp;
unsigned long long current_id = 0;
if (completion* current_completion = *state->current_completion_)
current_id = current_completion->id_;
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|%I64u|%.20s@%p.%.50s\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|%llu|%.20s@%p.%.50s\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds,
current_id, object_type, object, op_name);
}
void handler_tracking::reactor_registration(execution_context& /*context*/,
uintmax_t /*native_handle*/, uintmax_t /*registration*/)
{
}
void handler_tracking::reactor_deregistration(execution_context& /*context*/,
uintmax_t /*native_handle*/, uintmax_t /*registration*/)
{
}
void handler_tracking::reactor_events(execution_context& /*context*/,
uintmax_t /*native_handle*/, unsigned /*events*/)
{
}
void handler_tracking::reactor_operation(
const tracked_handler& h, const char* op_name,
const asio::error_code& ec)
{
handler_tracking_timestamp timestamp;
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|.%I64u|%s,ec=%.20s:%d\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|.%llu|%s,ec=%.20s:%d\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds,
h.id_, op_name, ec.category().name(), ec.value());
}
void handler_tracking::reactor_operation(
const tracked_handler& h, const char* op_name,
const asio::error_code& ec, std::size_t bytes_transferred)
{
handler_tracking_timestamp timestamp;
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|.%I64u|%s,ec=%.20s:%d,bytes_transferred=%I64u\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|.%llu|%s,ec=%.20s:%d,bytes_transferred=%llu\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds,
h.id_, op_name, ec.category().name(), ec.value(),
static_cast<uint64_t>(bytes_transferred));
}
void handler_tracking::write_line(const char* format, ...)
{
using namespace std; // For sprintf (or equivalent).
va_list args;
va_start(args, format);
char line[256] = "";
#if defined(ASIO_HAS_SECURE_RTL)
int length = vsprintf_s(line, sizeof(line), format, args);
#else // defined(ASIO_HAS_SECURE_RTL)
int length = vsprintf(line, format, args);
#endif // defined(ASIO_HAS_SECURE_RTL)
va_end(args);
#if defined(ASIO_WINDOWS_RUNTIME)
wchar_t wline[256] = L"";
mbstowcs_s(0, wline, sizeof(wline) / sizeof(wchar_t), line, length);
::OutputDebugStringW(wline);
#elif defined(ASIO_WINDOWS)
HANDLE stderr_handle = ::GetStdHandle(STD_ERROR_HANDLE);
DWORD bytes_written = 0;
::WriteFile(stderr_handle, line, length, &bytes_written, 0);
#else // defined(ASIO_WINDOWS)
::write(STDERR_FILENO, line, length);
#endif // defined(ASIO_WINDOWS)
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_ENABLE_HANDLER_TRACKING)
#endif // ASIO_DETAIL_IMPL_HANDLER_TRACKING_IPP

View File

@ -0,0 +1,93 @@
//
// detail/impl/kqueue_reactor.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
// Copyright (c) 2005 Stefan Arentz (stefan at soze dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_KQUEUE_REACTOR_HPP
#define ASIO_DETAIL_IMPL_KQUEUE_REACTOR_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_KQUEUE)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
template <typename Time_Traits>
void kqueue_reactor::add_timer_queue(timer_queue<Time_Traits>& queue)
{
do_add_timer_queue(queue);
}
// Remove a timer queue from the reactor.
template <typename Time_Traits>
void kqueue_reactor::remove_timer_queue(timer_queue<Time_Traits>& queue)
{
do_remove_timer_queue(queue);
}
template <typename Time_Traits>
void kqueue_reactor::schedule_timer(timer_queue<Time_Traits>& queue,
const typename Time_Traits::time_type& time,
typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op)
{
mutex::scoped_lock lock(mutex_);
if (shutdown_)
{
scheduler_.post_immediate_completion(op, false);
return;
}
bool earliest = queue.enqueue_timer(time, timer, op);
scheduler_.work_started();
if (earliest)
interrupt();
}
template <typename Time_Traits>
std::size_t kqueue_reactor::cancel_timer(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data& timer,
std::size_t max_cancelled)
{
mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
lock.unlock();
scheduler_.post_deferred_completions(ops);
return n;
}
template <typename Time_Traits>
void kqueue_reactor::move_timer(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data& target,
typename timer_queue<Time_Traits>::per_timer_data& source)
{
mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
queue.cancel_timer(target, ops);
queue.move_timer(target, source);
lock.unlock();
scheduler_.post_deferred_completions(ops);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_KQUEUE)
#endif // ASIO_DETAIL_IMPL_KQUEUE_REACTOR_HPP

View File

@ -0,0 +1,570 @@
//
// detail/impl/kqueue_reactor.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
// Copyright (c) 2005 Stefan Arentz (stefan at soze dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_KQUEUE_REACTOR_IPP
#define ASIO_DETAIL_IMPL_KQUEUE_REACTOR_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_KQUEUE)
#include "asio/detail/kqueue_reactor.hpp"
#include "asio/detail/scheduler.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#if defined(__NetBSD__)
# include <sys/param.h>
#endif
#include "asio/detail/push_options.hpp"
#if defined(__NetBSD__) && __NetBSD_Version__ < 999001500
# define ASIO_KQUEUE_EV_SET(ev, ident, filt, flags, fflags, data, udata) \
EV_SET(ev, ident, filt, flags, fflags, data, \
reinterpret_cast<intptr_t>(static_cast<void*>(udata)))
#else
# define ASIO_KQUEUE_EV_SET(ev, ident, filt, flags, fflags, data, udata) \
EV_SET(ev, ident, filt, flags, fflags, data, udata)
#endif
namespace asio {
namespace detail {
kqueue_reactor::kqueue_reactor(asio::execution_context& ctx)
: execution_context_service_base<kqueue_reactor>(ctx),
scheduler_(use_service<scheduler>(ctx)),
mutex_(ASIO_CONCURRENCY_HINT_IS_LOCKING(
REACTOR_REGISTRATION, scheduler_.concurrency_hint())),
kqueue_fd_(do_kqueue_create()),
interrupter_(),
shutdown_(false),
registered_descriptors_mutex_(mutex_.enabled())
{
struct kevent events[1];
ASIO_KQUEUE_EV_SET(&events[0], interrupter_.read_descriptor(),
EVFILT_READ, EV_ADD, 0, 0, &interrupter_);
if (::kevent(kqueue_fd_, events, 1, 0, 0, 0) == -1)
{
asio::error_code error(errno,
asio::error::get_system_category());
asio::detail::throw_error(error);
}
}
kqueue_reactor::~kqueue_reactor()
{
close(kqueue_fd_);
}
void kqueue_reactor::shutdown()
{
mutex::scoped_lock lock(mutex_);
shutdown_ = true;
lock.unlock();
op_queue<operation> ops;
while (descriptor_state* state = registered_descriptors_.first())
{
for (int i = 0; i < max_ops; ++i)
ops.push(state->op_queue_[i]);
state->shutdown_ = true;
registered_descriptors_.free(state);
}
timer_queues_.get_all_timers(ops);
scheduler_.abandon_operations(ops);
}
void kqueue_reactor::notify_fork(
asio::execution_context::fork_event fork_ev)
{
if (fork_ev == asio::execution_context::fork_child)
{
// The kqueue descriptor is automatically closed in the child.
kqueue_fd_ = -1;
kqueue_fd_ = do_kqueue_create();
interrupter_.recreate();
struct kevent events[2];
ASIO_KQUEUE_EV_SET(&events[0], interrupter_.read_descriptor(),
EVFILT_READ, EV_ADD, 0, 0, &interrupter_);
if (::kevent(kqueue_fd_, events, 1, 0, 0, 0) == -1)
{
asio::error_code ec(errno,
asio::error::get_system_category());
asio::detail::throw_error(ec, "kqueue interrupter registration");
}
// Re-register all descriptors with kqueue.
mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
for (descriptor_state* state = registered_descriptors_.first();
state != 0; state = state->next_)
{
if (state->num_kevents_ > 0)
{
ASIO_KQUEUE_EV_SET(&events[0], state->descriptor_,
EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, state);
ASIO_KQUEUE_EV_SET(&events[1], state->descriptor_,
EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, state);
if (::kevent(kqueue_fd_, events, state->num_kevents_, 0, 0, 0) == -1)
{
asio::error_code ec(errno,
asio::error::get_system_category());
asio::detail::throw_error(ec, "kqueue re-registration");
}
}
}
}
}
void kqueue_reactor::init_task()
{
scheduler_.init_task();
}
int kqueue_reactor::register_descriptor(socket_type descriptor,
kqueue_reactor::per_descriptor_data& descriptor_data)
{
descriptor_data = allocate_descriptor_state();
ASIO_HANDLER_REACTOR_REGISTRATION((
context(), static_cast<uintmax_t>(descriptor),
reinterpret_cast<uintmax_t>(descriptor_data)));
mutex::scoped_lock lock(descriptor_data->mutex_);
descriptor_data->descriptor_ = descriptor;
descriptor_data->num_kevents_ = 0;
descriptor_data->shutdown_ = false;
return 0;
}
int kqueue_reactor::register_internal_descriptor(
int op_type, socket_type descriptor,
kqueue_reactor::per_descriptor_data& descriptor_data, reactor_op* op)
{
descriptor_data = allocate_descriptor_state();
ASIO_HANDLER_REACTOR_REGISTRATION((
context(), static_cast<uintmax_t>(descriptor),
reinterpret_cast<uintmax_t>(descriptor_data)));
mutex::scoped_lock lock(descriptor_data->mutex_);
descriptor_data->descriptor_ = descriptor;
descriptor_data->num_kevents_ = 1;
descriptor_data->shutdown_ = false;
descriptor_data->op_queue_[op_type].push(op);
struct kevent events[1];
ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ,
EV_ADD | EV_CLEAR, 0, 0, descriptor_data);
if (::kevent(kqueue_fd_, events, 1, 0, 0, 0) == -1)
return errno;
return 0;
}
void kqueue_reactor::move_descriptor(socket_type,
kqueue_reactor::per_descriptor_data& target_descriptor_data,
kqueue_reactor::per_descriptor_data& source_descriptor_data)
{
target_descriptor_data = source_descriptor_data;
source_descriptor_data = 0;
}
void kqueue_reactor::start_op(int op_type, socket_type descriptor,
kqueue_reactor::per_descriptor_data& descriptor_data, reactor_op* op,
bool is_continuation, bool allow_speculative)
{
if (!descriptor_data)
{
op->ec_ = asio::error::bad_descriptor;
post_immediate_completion(op, is_continuation);
return;
}
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
if (descriptor_data->shutdown_)
{
post_immediate_completion(op, is_continuation);
return;
}
if (descriptor_data->op_queue_[op_type].empty())
{
static const int num_kevents[max_ops] = { 1, 2, 1 };
if (allow_speculative
&& (op_type != read_op
|| descriptor_data->op_queue_[except_op].empty()))
{
if (op->perform())
{
descriptor_lock.unlock();
scheduler_.post_immediate_completion(op, is_continuation);
return;
}
if (descriptor_data->num_kevents_ < num_kevents[op_type])
{
struct kevent events[2];
ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ,
EV_ADD | EV_CLEAR, 0, 0, descriptor_data);
ASIO_KQUEUE_EV_SET(&events[1], descriptor, EVFILT_WRITE,
EV_ADD | EV_CLEAR, 0, 0, descriptor_data);
if (::kevent(kqueue_fd_, events, num_kevents[op_type], 0, 0, 0) != -1)
{
descriptor_data->num_kevents_ = num_kevents[op_type];
}
else
{
op->ec_ = asio::error_code(errno,
asio::error::get_system_category());
scheduler_.post_immediate_completion(op, is_continuation);
return;
}
}
}
else
{
if (descriptor_data->num_kevents_ < num_kevents[op_type])
descriptor_data->num_kevents_ = num_kevents[op_type];
struct kevent events[2];
ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ,
EV_ADD | EV_CLEAR, 0, 0, descriptor_data);
ASIO_KQUEUE_EV_SET(&events[1], descriptor, EVFILT_WRITE,
EV_ADD | EV_CLEAR, 0, 0, descriptor_data);
::kevent(kqueue_fd_, events, descriptor_data->num_kevents_, 0, 0, 0);
}
}
descriptor_data->op_queue_[op_type].push(op);
scheduler_.work_started();
}
void kqueue_reactor::cancel_ops(socket_type,
kqueue_reactor::per_descriptor_data& descriptor_data)
{
if (!descriptor_data)
return;
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
{
while (reactor_op* op = descriptor_data->op_queue_[i].front())
{
op->ec_ = asio::error::operation_aborted;
descriptor_data->op_queue_[i].pop();
ops.push(op);
}
}
descriptor_lock.unlock();
scheduler_.post_deferred_completions(ops);
}
void kqueue_reactor::deregister_descriptor(socket_type descriptor,
kqueue_reactor::per_descriptor_data& descriptor_data, bool closing)
{
if (!descriptor_data)
return;
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
if (!descriptor_data->shutdown_)
{
if (closing)
{
// The descriptor will be automatically removed from the kqueue when it
// is closed.
}
else
{
struct kevent events[2];
ASIO_KQUEUE_EV_SET(&events[0], descriptor,
EVFILT_READ, EV_DELETE, 0, 0, 0);
ASIO_KQUEUE_EV_SET(&events[1], descriptor,
EVFILT_WRITE, EV_DELETE, 0, 0, 0);
::kevent(kqueue_fd_, events, descriptor_data->num_kevents_, 0, 0, 0);
}
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
{
while (reactor_op* op = descriptor_data->op_queue_[i].front())
{
op->ec_ = asio::error::operation_aborted;
descriptor_data->op_queue_[i].pop();
ops.push(op);
}
}
descriptor_data->descriptor_ = -1;
descriptor_data->shutdown_ = true;
descriptor_lock.unlock();
ASIO_HANDLER_REACTOR_DEREGISTRATION((
context(), static_cast<uintmax_t>(descriptor),
reinterpret_cast<uintmax_t>(descriptor_data)));
scheduler_.post_deferred_completions(ops);
// Leave descriptor_data set so that it will be freed by the subsequent
// call to cleanup_descriptor_data.
}
else
{
// We are shutting down, so prevent cleanup_descriptor_data from freeing
// the descriptor_data object and let the destructor free it instead.
descriptor_data = 0;
}
}
void kqueue_reactor::deregister_internal_descriptor(socket_type descriptor,
kqueue_reactor::per_descriptor_data& descriptor_data)
{
if (!descriptor_data)
return;
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
if (!descriptor_data->shutdown_)
{
struct kevent events[2];
ASIO_KQUEUE_EV_SET(&events[0], descriptor,
EVFILT_READ, EV_DELETE, 0, 0, 0);
ASIO_KQUEUE_EV_SET(&events[1], descriptor,
EVFILT_WRITE, EV_DELETE, 0, 0, 0);
::kevent(kqueue_fd_, events, descriptor_data->num_kevents_, 0, 0, 0);
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
ops.push(descriptor_data->op_queue_[i]);
descriptor_data->descriptor_ = -1;
descriptor_data->shutdown_ = true;
descriptor_lock.unlock();
ASIO_HANDLER_REACTOR_DEREGISTRATION((
context(), static_cast<uintmax_t>(descriptor),
reinterpret_cast<uintmax_t>(descriptor_data)));
// Leave descriptor_data set so that it will be freed by the subsequent
// call to cleanup_descriptor_data.
}
else
{
// We are shutting down, so prevent cleanup_descriptor_data from freeing
// the descriptor_data object and let the destructor free it instead.
descriptor_data = 0;
}
}
void kqueue_reactor::cleanup_descriptor_data(
per_descriptor_data& descriptor_data)
{
if (descriptor_data)
{
free_descriptor_state(descriptor_data);
descriptor_data = 0;
}
}
void kqueue_reactor::run(long usec, op_queue<operation>& ops)
{
mutex::scoped_lock lock(mutex_);
// Determine how long to block while waiting for events.
timespec timeout_buf = { 0, 0 };
timespec* timeout = usec ? get_timeout(usec, timeout_buf) : &timeout_buf;
lock.unlock();
// Block on the kqueue descriptor.
struct kevent events[128];
int num_events = kevent(kqueue_fd_, 0, 0, events, 128, timeout);
#if defined(ASIO_ENABLE_HANDLER_TRACKING)
// Trace the waiting events.
for (int i = 0; i < num_events; ++i)
{
void* ptr = reinterpret_cast<void*>(events[i].udata);
if (ptr != &interrupter_)
{
unsigned event_mask = 0;
switch (events[i].filter)
{
case EVFILT_READ:
event_mask |= ASIO_HANDLER_REACTOR_READ_EVENT;
break;
case EVFILT_WRITE:
event_mask |= ASIO_HANDLER_REACTOR_WRITE_EVENT;
break;
}
if ((events[i].flags & (EV_ERROR | EV_OOBAND)) != 0)
event_mask |= ASIO_HANDLER_REACTOR_ERROR_EVENT;
ASIO_HANDLER_REACTOR_EVENTS((context(),
reinterpret_cast<uintmax_t>(ptr), event_mask));
}
}
#endif // defined(ASIO_ENABLE_HANDLER_TRACKING)
// Dispatch the waiting events.
for (int i = 0; i < num_events; ++i)
{
void* ptr = reinterpret_cast<void*>(events[i].udata);
if (ptr == &interrupter_)
{
interrupter_.reset();
}
else
{
descriptor_state* descriptor_data = static_cast<descriptor_state*>(ptr);
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
if (events[i].filter == EVFILT_WRITE
&& descriptor_data->num_kevents_ == 2
&& descriptor_data->op_queue_[write_op].empty())
{
// Some descriptor types, like serial ports, don't seem to support
// EV_CLEAR with EVFILT_WRITE. Since we have no pending write
// operations we'll remove the EVFILT_WRITE registration here so that
// we don't end up in a tight spin.
struct kevent delete_events[1];
ASIO_KQUEUE_EV_SET(&delete_events[0],
descriptor_data->descriptor_, EVFILT_WRITE, EV_DELETE, 0, 0, 0);
::kevent(kqueue_fd_, delete_events, 1, 0, 0, 0);
descriptor_data->num_kevents_ = 1;
}
// Exception operations must be processed first to ensure that any
// out-of-band data is read before normal data.
#if defined(__NetBSD__)
static const unsigned int filter[max_ops] =
#else
static const int filter[max_ops] =
#endif
{ EVFILT_READ, EVFILT_WRITE, EVFILT_READ };
for (int j = max_ops - 1; j >= 0; --j)
{
if (events[i].filter == filter[j])
{
if (j != except_op || events[i].flags & EV_OOBAND)
{
while (reactor_op* op = descriptor_data->op_queue_[j].front())
{
if (events[i].flags & EV_ERROR)
{
op->ec_ = asio::error_code(
static_cast<int>(events[i].data),
asio::error::get_system_category());
descriptor_data->op_queue_[j].pop();
ops.push(op);
}
if (op->perform())
{
descriptor_data->op_queue_[j].pop();
ops.push(op);
}
else
break;
}
}
}
}
}
}
lock.lock();
timer_queues_.get_ready_timers(ops);
}
void kqueue_reactor::interrupt()
{
interrupter_.interrupt();
}
int kqueue_reactor::do_kqueue_create()
{
int fd = ::kqueue();
if (fd == -1)
{
asio::error_code ec(errno,
asio::error::get_system_category());
asio::detail::throw_error(ec, "kqueue");
}
return fd;
}
kqueue_reactor::descriptor_state* kqueue_reactor::allocate_descriptor_state()
{
mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
return registered_descriptors_.alloc(ASIO_CONCURRENCY_HINT_IS_LOCKING(
REACTOR_IO, scheduler_.concurrency_hint()));
}
void kqueue_reactor::free_descriptor_state(kqueue_reactor::descriptor_state* s)
{
mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
registered_descriptors_.free(s);
}
void kqueue_reactor::do_add_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.insert(&queue);
}
void kqueue_reactor::do_remove_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.erase(&queue);
}
timespec* kqueue_reactor::get_timeout(long usec, timespec& ts)
{
// By default we will wait no longer than 5 minutes. This will ensure that
// any changes to the system clock are detected after no longer than this.
const long max_usec = 5 * 60 * 1000 * 1000;
usec = timer_queues_.wait_duration_usec(
(usec < 0 || max_usec < usec) ? max_usec : usec);
ts.tv_sec = usec / 1000000;
ts.tv_nsec = (usec % 1000000) * 1000;
return &ts;
}
} // namespace detail
} // namespace asio
#undef ASIO_KQUEUE_EV_SET
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_KQUEUE)
#endif // ASIO_DETAIL_IMPL_KQUEUE_REACTOR_IPP

View File

@ -0,0 +1,74 @@
//
// detail/impl/null_event.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_NULL_EVENT_IPP
#define ASIO_DETAIL_IMPL_NULL_EVENT_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_WINDOWS_RUNTIME)
# include <thread>
#elif defined(ASIO_WINDOWS) || defined(__CYGWIN__)
# include "asio/detail/socket_types.hpp"
#else
# include <unistd.h>
# if defined(__hpux)
# include <sys/time.h>
# endif
# if !defined(__hpux) || defined(__SELECT)
# include <sys/select.h>
# endif
#endif
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
void null_event::do_wait()
{
#if defined(ASIO_WINDOWS_RUNTIME)
std::this_thread::sleep_until((std::chrono::steady_clock::time_point::max)());
#elif defined(ASIO_WINDOWS) || defined(__CYGWIN__)
::Sleep(INFINITE);
#else
::pause();
#endif
}
void null_event::do_wait_for_usec(long usec)
{
#if defined(ASIO_WINDOWS_RUNTIME)
std::this_thread::sleep_for(std::chrono::microseconds(usec));
#elif defined(ASIO_WINDOWS) || defined(__CYGWIN__)
::Sleep(usec / 1000);
#elif defined(__hpux) && defined(__SELECT)
timespec ts;
ts.tv_sec = usec / 1000000;
ts.tv_nsec = (usec % 1000000) * 1000;
::pselect(0, 0, 0, 0, &ts, 0);
#else
timeval tv;
tv.tv_sec = usec / 1000000;
tv.tv_usec = usec % 1000000;
::select(0, 0, 0, 0, &tv);
#endif
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_NULL_EVENT_IPP

View File

@ -0,0 +1,129 @@
//
// detail/impl/pipe_select_interrupter.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_PIPE_SELECT_INTERRUPTER_IPP
#define ASIO_DETAIL_IMPL_PIPE_SELECT_INTERRUPTER_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if !defined(ASIO_WINDOWS_RUNTIME)
#if !defined(ASIO_WINDOWS)
#if !defined(__CYGWIN__)
#if !defined(__SYMBIAN32__)
#if !defined(ASIO_HAS_EVENTFD)
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include "asio/detail/pipe_select_interrupter.hpp"
#include "asio/detail/socket_types.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
pipe_select_interrupter::pipe_select_interrupter()
{
open_descriptors();
}
void pipe_select_interrupter::open_descriptors()
{
int pipe_fds[2];
if (pipe(pipe_fds) == 0)
{
read_descriptor_ = pipe_fds[0];
::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK);
write_descriptor_ = pipe_fds[1];
::fcntl(write_descriptor_, F_SETFL, O_NONBLOCK);
#if defined(FD_CLOEXEC)
::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC);
::fcntl(write_descriptor_, F_SETFD, FD_CLOEXEC);
#endif // defined(FD_CLOEXEC)
}
else
{
asio::error_code ec(errno,
asio::error::get_system_category());
asio::detail::throw_error(ec, "pipe_select_interrupter");
}
}
pipe_select_interrupter::~pipe_select_interrupter()
{
close_descriptors();
}
void pipe_select_interrupter::close_descriptors()
{
if (read_descriptor_ != -1)
::close(read_descriptor_);
if (write_descriptor_ != -1)
::close(write_descriptor_);
}
void pipe_select_interrupter::recreate()
{
close_descriptors();
write_descriptor_ = -1;
read_descriptor_ = -1;
open_descriptors();
}
void pipe_select_interrupter::interrupt()
{
char byte = 0;
signed_size_type result = ::write(write_descriptor_, &byte, 1);
(void)result;
}
bool pipe_select_interrupter::reset()
{
for (;;)
{
char data[1024];
signed_size_type bytes_read = ::read(read_descriptor_, data, sizeof(data));
if (bytes_read == sizeof(data))
continue;
if (bytes_read > 0)
return true;
if (bytes_read == 0)
return false;
if (errno == EINTR)
continue;
if (errno == EWOULDBLOCK || errno == EAGAIN)
return true;
return false;
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // !defined(ASIO_HAS_EVENTFD)
#endif // !defined(__SYMBIAN32__)
#endif // !defined(__CYGWIN__)
#endif // !defined(ASIO_WINDOWS)
#endif // !defined(ASIO_WINDOWS_RUNTIME)
#endif // ASIO_DETAIL_IMPL_PIPE_SELECT_INTERRUPTER_IPP

View File

@ -0,0 +1,63 @@
//
// detail/impl/posix_event.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_POSIX_EVENT_IPP
#define ASIO_DETAIL_IMPL_POSIX_EVENT_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_PTHREADS)
#include "asio/detail/posix_event.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
posix_event::posix_event()
: state_(0)
{
#if (defined(__MACH__) && defined(__APPLE__)) \
|| (defined(__ANDROID__) && (__ANDROID_API__ < 21))
int error = ::pthread_cond_init(&cond_, 0);
#else // (defined(__MACH__) && defined(__APPLE__))
// || (defined(__ANDROID__) && (__ANDROID_API__ < 21))
::pthread_condattr_t attr;
int error = ::pthread_condattr_init(&attr);
if (error == 0)
{
error = ::pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
if (error == 0)
error = ::pthread_cond_init(&cond_, &attr);
::pthread_condattr_destroy(&attr);
}
#endif // (defined(__MACH__) && defined(__APPLE__))
// || (defined(__ANDROID__) && (__ANDROID_API__ < 21))
asio::error_code ec(error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "event");
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_PTHREADS)
#endif // ASIO_DETAIL_IMPL_POSIX_EVENT_IPP

View File

@ -0,0 +1,46 @@
//
// detail/impl/posix_mutex.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_POSIX_MUTEX_IPP
#define ASIO_DETAIL_IMPL_POSIX_MUTEX_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_PTHREADS)
#include "asio/detail/posix_mutex.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
posix_mutex::posix_mutex()
{
int error = ::pthread_mutex_init(&mutex_, 0);
asio::error_code ec(error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "mutex");
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_PTHREADS)
#endif // ASIO_DETAIL_IMPL_POSIX_MUTEX_IPP

View File

@ -0,0 +1,84 @@
//
// detail/impl/posix_thread.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_POSIX_THREAD_IPP
#define ASIO_DETAIL_IMPL_POSIX_THREAD_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_PTHREADS)
#include "asio/detail/posix_thread.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
posix_thread::~posix_thread()
{
if (!joined_)
::pthread_detach(thread_);
}
void posix_thread::join()
{
if (!joined_)
{
::pthread_join(thread_, 0);
joined_ = true;
}
}
std::size_t posix_thread::hardware_concurrency()
{
#if defined(_SC_NPROCESSORS_ONLN)
long result = sysconf(_SC_NPROCESSORS_ONLN);
if (result > 0)
return result;
#endif // defined(_SC_NPROCESSORS_ONLN)
return 0;
}
void posix_thread::start_thread(func_base* arg)
{
int error = ::pthread_create(&thread_, 0,
asio_detail_posix_thread_function, arg);
if (error != 0)
{
delete arg;
asio::error_code ec(error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "thread");
}
}
void* asio_detail_posix_thread_function(void* arg)
{
posix_thread::auto_func_base_ptr func = {
static_cast<posix_thread::func_base*>(arg) };
func.ptr->run();
return 0;
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_PTHREADS)
#endif // ASIO_DETAIL_IMPL_POSIX_THREAD_IPP

View File

@ -0,0 +1,46 @@
//
// detail/impl/posix_tss_ptr.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_POSIX_TSS_PTR_IPP
#define ASIO_DETAIL_IMPL_POSIX_TSS_PTR_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_PTHREADS)
#include "asio/detail/posix_tss_ptr.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
void posix_tss_ptr_create(pthread_key_t& key)
{
int error = ::pthread_key_create(&key, 0);
asio::error_code ec(error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "tss");
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_PTHREADS)
#endif // ASIO_DETAIL_IMPL_POSIX_TSS_PTR_IPP

View File

@ -0,0 +1,223 @@
//
// detail/impl/reactive_descriptor_service.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_REACTIVE_DESCRIPTOR_SERVICE_IPP
#define ASIO_DETAIL_IMPL_REACTIVE_DESCRIPTOR_SERVICE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if !defined(ASIO_WINDOWS) \
&& !defined(ASIO_WINDOWS_RUNTIME) \
&& !defined(__CYGWIN__)
#include "asio/error.hpp"
#include "asio/detail/reactive_descriptor_service.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
reactive_descriptor_service::reactive_descriptor_service(
execution_context& context)
: execution_context_service_base<reactive_descriptor_service>(context),
reactor_(asio::use_service<reactor>(context))
{
reactor_.init_task();
}
void reactive_descriptor_service::shutdown()
{
}
void reactive_descriptor_service::construct(
reactive_descriptor_service::implementation_type& impl)
{
impl.descriptor_ = -1;
impl.state_ = 0;
}
void reactive_descriptor_service::move_construct(
reactive_descriptor_service::implementation_type& impl,
reactive_descriptor_service::implementation_type& other_impl)
ASIO_NOEXCEPT
{
impl.descriptor_ = other_impl.descriptor_;
other_impl.descriptor_ = -1;
impl.state_ = other_impl.state_;
other_impl.state_ = 0;
reactor_.move_descriptor(impl.descriptor_,
impl.reactor_data_, other_impl.reactor_data_);
}
void reactive_descriptor_service::move_assign(
reactive_descriptor_service::implementation_type& impl,
reactive_descriptor_service& other_service,
reactive_descriptor_service::implementation_type& other_impl)
{
destroy(impl);
impl.descriptor_ = other_impl.descriptor_;
other_impl.descriptor_ = -1;
impl.state_ = other_impl.state_;
other_impl.state_ = 0;
other_service.reactor_.move_descriptor(impl.descriptor_,
impl.reactor_data_, other_impl.reactor_data_);
}
void reactive_descriptor_service::destroy(
reactive_descriptor_service::implementation_type& impl)
{
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((reactor_.context(),
"descriptor", &impl, impl.descriptor_, "close"));
reactor_.deregister_descriptor(impl.descriptor_, impl.reactor_data_,
(impl.state_ & descriptor_ops::possible_dup) == 0);
asio::error_code ignored_ec;
descriptor_ops::close(impl.descriptor_, impl.state_, ignored_ec);
reactor_.cleanup_descriptor_data(impl.reactor_data_);
}
}
asio::error_code reactive_descriptor_service::assign(
reactive_descriptor_service::implementation_type& impl,
const native_handle_type& native_descriptor, asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
return ec;
}
if (int err = reactor_.register_descriptor(
native_descriptor, impl.reactor_data_))
{
ec = asio::error_code(err,
asio::error::get_system_category());
return ec;
}
impl.descriptor_ = native_descriptor;
impl.state_ = descriptor_ops::possible_dup;
ec = asio::error_code();
return ec;
}
asio::error_code reactive_descriptor_service::close(
reactive_descriptor_service::implementation_type& impl,
asio::error_code& ec)
{
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((reactor_.context(),
"descriptor", &impl, impl.descriptor_, "close"));
reactor_.deregister_descriptor(impl.descriptor_, impl.reactor_data_,
(impl.state_ & descriptor_ops::possible_dup) == 0);
descriptor_ops::close(impl.descriptor_, impl.state_, ec);
reactor_.cleanup_descriptor_data(impl.reactor_data_);
}
else
{
ec = asio::error_code();
}
// The descriptor is closed by the OS even if close() returns an error.
//
// (Actually, POSIX says the state of the descriptor is unspecified. On
// Linux the descriptor is apparently closed anyway; e.g. see
// http://lkml.org/lkml/2005/9/10/129
// We'll just have to assume that other OSes follow the same behaviour.)
construct(impl);
return ec;
}
reactive_descriptor_service::native_handle_type
reactive_descriptor_service::release(
reactive_descriptor_service::implementation_type& impl)
{
native_handle_type descriptor = impl.descriptor_;
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((reactor_.context(),
"descriptor", &impl, impl.descriptor_, "release"));
reactor_.deregister_descriptor(impl.descriptor_, impl.reactor_data_, false);
reactor_.cleanup_descriptor_data(impl.reactor_data_);
construct(impl);
}
return descriptor;
}
asio::error_code reactive_descriptor_service::cancel(
reactive_descriptor_service::implementation_type& impl,
asio::error_code& ec)
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
return ec;
}
ASIO_HANDLER_OPERATION((reactor_.context(),
"descriptor", &impl, impl.descriptor_, "cancel"));
reactor_.cancel_ops(impl.descriptor_, impl.reactor_data_);
ec = asio::error_code();
return ec;
}
void reactive_descriptor_service::start_op(
reactive_descriptor_service::implementation_type& impl,
int op_type, reactor_op* op, bool is_continuation,
bool is_non_blocking, bool noop)
{
if (!noop)
{
if ((impl.state_ & descriptor_ops::non_blocking) ||
descriptor_ops::set_internal_non_blocking(
impl.descriptor_, impl.state_, true, op->ec_))
{
reactor_.start_op(op_type, impl.descriptor_,
impl.reactor_data_, op, is_continuation, is_non_blocking);
return;
}
}
reactor_.post_immediate_completion(op, is_continuation);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
#endif // ASIO_DETAIL_IMPL_REACTIVE_DESCRIPTOR_SERVICE_IPP

View File

@ -0,0 +1,149 @@
//
// detail/impl/reactive_serial_port_service.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
// Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_REACTIVE_SERIAL_PORT_SERVICE_IPP
#define ASIO_DETAIL_IMPL_REACTIVE_SERIAL_PORT_SERVICE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_SERIAL_PORT)
#if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)
#include <cstring>
#include "asio/detail/reactive_serial_port_service.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
reactive_serial_port_service::reactive_serial_port_service(
execution_context& context)
: execution_context_service_base<reactive_serial_port_service>(context),
descriptor_service_(context)
{
}
void reactive_serial_port_service::shutdown()
{
descriptor_service_.shutdown();
}
asio::error_code reactive_serial_port_service::open(
reactive_serial_port_service::implementation_type& impl,
const std::string& device, asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
return ec;
}
descriptor_ops::state_type state = 0;
int fd = descriptor_ops::open(device.c_str(),
O_RDWR | O_NONBLOCK | O_NOCTTY, ec);
if (fd < 0)
return ec;
int s = descriptor_ops::fcntl(fd, F_GETFL, ec);
if (s >= 0)
s = descriptor_ops::fcntl(fd, F_SETFL, s | O_NONBLOCK, ec);
if (s < 0)
{
asio::error_code ignored_ec;
descriptor_ops::close(fd, state, ignored_ec);
return ec;
}
// Set up default serial port options.
termios ios;
s = ::tcgetattr(fd, &ios);
descriptor_ops::get_last_error(ec, s < 0);
if (s >= 0)
{
#if defined(_BSD_SOURCE) || defined(_DEFAULT_SOURCE)
::cfmakeraw(&ios);
#else
ios.c_iflag &= ~(IGNBRK | BRKINT | PARMRK
| ISTRIP | INLCR | IGNCR | ICRNL | IXON);
ios.c_oflag &= ~OPOST;
ios.c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN);
ios.c_cflag &= ~(CSIZE | PARENB);
ios.c_cflag |= CS8;
#endif
ios.c_iflag |= IGNPAR;
ios.c_cflag |= CREAD | CLOCAL;
s = ::tcsetattr(fd, TCSANOW, &ios);
descriptor_ops::get_last_error(ec, s < 0);
}
if (s < 0)
{
asio::error_code ignored_ec;
descriptor_ops::close(fd, state, ignored_ec);
return ec;
}
// We're done. Take ownership of the serial port descriptor.
if (descriptor_service_.assign(impl, fd, ec))
{
asio::error_code ignored_ec;
descriptor_ops::close(fd, state, ignored_ec);
}
return ec;
}
asio::error_code reactive_serial_port_service::do_set_option(
reactive_serial_port_service::implementation_type& impl,
reactive_serial_port_service::store_function_type store,
const void* option, asio::error_code& ec)
{
termios ios;
int s = ::tcgetattr(descriptor_service_.native_handle(impl), &ios);
descriptor_ops::get_last_error(ec, s < 0);
if (s < 0)
return ec;
if (store(option, ios, ec))
return ec;
s = ::tcsetattr(descriptor_service_.native_handle(impl), TCSANOW, &ios);
descriptor_ops::get_last_error(ec, s < 0);
return ec;
}
asio::error_code reactive_serial_port_service::do_get_option(
const reactive_serial_port_service::implementation_type& impl,
reactive_serial_port_service::load_function_type load,
void* option, asio::error_code& ec) const
{
termios ios;
int s = ::tcgetattr(descriptor_service_.native_handle(impl), &ios);
descriptor_ops::get_last_error(ec, s < 0);
if (s < 0)
return ec;
return load(option, ios, ec);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)
#endif // defined(ASIO_HAS_SERIAL_PORT)
#endif // ASIO_DETAIL_IMPL_REACTIVE_SERIAL_PORT_SERVICE_IPP

View File

@ -0,0 +1,300 @@
//
// detail/reactive_socket_service_base.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_REACTIVE_SOCKET_SERVICE_BASE_IPP
#define ASIO_DETAIL_IMPL_REACTIVE_SOCKET_SERVICE_BASE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if !defined(ASIO_HAS_IOCP) \
&& !defined(ASIO_WINDOWS_RUNTIME)
#include "asio/detail/reactive_socket_service_base.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
reactive_socket_service_base::reactive_socket_service_base(
execution_context& context)
: reactor_(use_service<reactor>(context))
{
reactor_.init_task();
}
void reactive_socket_service_base::base_shutdown()
{
}
void reactive_socket_service_base::construct(
reactive_socket_service_base::base_implementation_type& impl)
{
impl.socket_ = invalid_socket;
impl.state_ = 0;
}
void reactive_socket_service_base::base_move_construct(
reactive_socket_service_base::base_implementation_type& impl,
reactive_socket_service_base::base_implementation_type& other_impl)
ASIO_NOEXCEPT
{
impl.socket_ = other_impl.socket_;
other_impl.socket_ = invalid_socket;
impl.state_ = other_impl.state_;
other_impl.state_ = 0;
reactor_.move_descriptor(impl.socket_,
impl.reactor_data_, other_impl.reactor_data_);
}
void reactive_socket_service_base::base_move_assign(
reactive_socket_service_base::base_implementation_type& impl,
reactive_socket_service_base& other_service,
reactive_socket_service_base::base_implementation_type& other_impl)
{
destroy(impl);
impl.socket_ = other_impl.socket_;
other_impl.socket_ = invalid_socket;
impl.state_ = other_impl.state_;
other_impl.state_ = 0;
other_service.reactor_.move_descriptor(impl.socket_,
impl.reactor_data_, other_impl.reactor_data_);
}
void reactive_socket_service_base::destroy(
reactive_socket_service_base::base_implementation_type& impl)
{
if (impl.socket_ != invalid_socket)
{
ASIO_HANDLER_OPERATION((reactor_.context(),
"socket", &impl, impl.socket_, "close"));
reactor_.deregister_descriptor(impl.socket_, impl.reactor_data_,
(impl.state_ & socket_ops::possible_dup) == 0);
asio::error_code ignored_ec;
socket_ops::close(impl.socket_, impl.state_, true, ignored_ec);
reactor_.cleanup_descriptor_data(impl.reactor_data_);
}
}
asio::error_code reactive_socket_service_base::close(
reactive_socket_service_base::base_implementation_type& impl,
asio::error_code& ec)
{
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((reactor_.context(),
"socket", &impl, impl.socket_, "close"));
reactor_.deregister_descriptor(impl.socket_, impl.reactor_data_,
(impl.state_ & socket_ops::possible_dup) == 0);
socket_ops::close(impl.socket_, impl.state_, false, ec);
reactor_.cleanup_descriptor_data(impl.reactor_data_);
}
else
{
ec = asio::error_code();
}
// The descriptor is closed by the OS even if close() returns an error.
//
// (Actually, POSIX says the state of the descriptor is unspecified. On
// Linux the descriptor is apparently closed anyway; e.g. see
// http://lkml.org/lkml/2005/9/10/129
// We'll just have to assume that other OSes follow the same behaviour. The
// known exception is when Windows's closesocket() function fails with
// WSAEWOULDBLOCK, but this case is handled inside socket_ops::close().
construct(impl);
return ec;
}
socket_type reactive_socket_service_base::release(
reactive_socket_service_base::base_implementation_type& impl,
asio::error_code& ec)
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
return invalid_socket;
}
ASIO_HANDLER_OPERATION((reactor_.context(),
"socket", &impl, impl.socket_, "release"));
reactor_.deregister_descriptor(impl.socket_, impl.reactor_data_, false);
reactor_.cleanup_descriptor_data(impl.reactor_data_);
socket_type sock = impl.socket_;
construct(impl);
ec = asio::error_code();
return sock;
}
asio::error_code reactive_socket_service_base::cancel(
reactive_socket_service_base::base_implementation_type& impl,
asio::error_code& ec)
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
return ec;
}
ASIO_HANDLER_OPERATION((reactor_.context(),
"socket", &impl, impl.socket_, "cancel"));
reactor_.cancel_ops(impl.socket_, impl.reactor_data_);
ec = asio::error_code();
return ec;
}
asio::error_code reactive_socket_service_base::do_open(
reactive_socket_service_base::base_implementation_type& impl,
int af, int type, int protocol, asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
return ec;
}
socket_holder sock(socket_ops::socket(af, type, protocol, ec));
if (sock.get() == invalid_socket)
return ec;
if (int err = reactor_.register_descriptor(sock.get(), impl.reactor_data_))
{
ec = asio::error_code(err,
asio::error::get_system_category());
return ec;
}
impl.socket_ = sock.release();
switch (type)
{
case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break;
case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break;
default: impl.state_ = 0; break;
}
ec = asio::error_code();
return ec;
}
asio::error_code reactive_socket_service_base::do_assign(
reactive_socket_service_base::base_implementation_type& impl, int type,
const reactive_socket_service_base::native_handle_type& native_socket,
asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
return ec;
}
if (int err = reactor_.register_descriptor(
native_socket, impl.reactor_data_))
{
ec = asio::error_code(err,
asio::error::get_system_category());
return ec;
}
impl.socket_ = native_socket;
switch (type)
{
case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break;
case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break;
default: impl.state_ = 0; break;
}
impl.state_ |= socket_ops::possible_dup;
ec = asio::error_code();
return ec;
}
void reactive_socket_service_base::start_op(
reactive_socket_service_base::base_implementation_type& impl,
int op_type, reactor_op* op, bool is_continuation,
bool is_non_blocking, bool noop)
{
if (!noop)
{
if ((impl.state_ & socket_ops::non_blocking)
|| socket_ops::set_internal_non_blocking(
impl.socket_, impl.state_, true, op->ec_))
{
reactor_.start_op(op_type, impl.socket_,
impl.reactor_data_, op, is_continuation, is_non_blocking);
return;
}
}
reactor_.post_immediate_completion(op, is_continuation);
}
void reactive_socket_service_base::start_accept_op(
reactive_socket_service_base::base_implementation_type& impl,
reactor_op* op, bool is_continuation, bool peer_is_open)
{
if (!peer_is_open)
start_op(impl, reactor::read_op, op, is_continuation, true, false);
else
{
op->ec_ = asio::error::already_open;
reactor_.post_immediate_completion(op, is_continuation);
}
}
void reactive_socket_service_base::start_connect_op(
reactive_socket_service_base::base_implementation_type& impl,
reactor_op* op, bool is_continuation,
const socket_addr_type* addr, size_t addrlen)
{
if ((impl.state_ & socket_ops::non_blocking)
|| socket_ops::set_internal_non_blocking(
impl.socket_, impl.state_, true, op->ec_))
{
if (socket_ops::connect(impl.socket_, addr, addrlen, op->ec_) != 0)
{
if (op->ec_ == asio::error::in_progress
|| op->ec_ == asio::error::would_block)
{
op->ec_ = asio::error_code();
reactor_.start_op(reactor::connect_op, impl.socket_,
impl.reactor_data_, op, is_continuation, false);
return;
}
}
}
reactor_.post_immediate_completion(op, is_continuation);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // !defined(ASIO_HAS_IOCP)
// && !defined(ASIO_WINDOWS_RUNTIME)
#endif // ASIO_DETAIL_IMPL_REACTIVE_SOCKET_SERVICE_BASE_IPP

View File

@ -0,0 +1,158 @@
//
// detail/impl/resolver_service_base.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_RESOLVER_SERVICE_BASE_IPP
#define ASIO_DETAIL_IMPL_RESOLVER_SERVICE_BASE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/resolver_service_base.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
class resolver_service_base::work_scheduler_runner
{
public:
work_scheduler_runner(scheduler_impl& work_scheduler)
: work_scheduler_(work_scheduler)
{
}
void operator()()
{
asio::error_code ec;
work_scheduler_.run(ec);
}
private:
scheduler_impl& work_scheduler_;
};
resolver_service_base::resolver_service_base(execution_context& context)
: scheduler_(asio::use_service<scheduler_impl>(context)),
work_scheduler_(new scheduler_impl(context, -1, false)),
work_thread_(0)
{
work_scheduler_->work_started();
}
resolver_service_base::~resolver_service_base()
{
base_shutdown();
}
void resolver_service_base::base_shutdown()
{
if (work_scheduler_.get())
{
work_scheduler_->work_finished();
work_scheduler_->stop();
if (work_thread_.get())
{
work_thread_->join();
work_thread_.reset();
}
work_scheduler_.reset();
}
}
void resolver_service_base::base_notify_fork(
execution_context::fork_event fork_ev)
{
if (work_thread_.get())
{
if (fork_ev == execution_context::fork_prepare)
{
work_scheduler_->stop();
work_thread_->join();
work_thread_.reset();
}
}
else if (fork_ev != execution_context::fork_prepare)
{
work_scheduler_->restart();
}
}
void resolver_service_base::construct(
resolver_service_base::implementation_type& impl)
{
impl.reset(static_cast<void*>(0), socket_ops::noop_deleter());
}
void resolver_service_base::destroy(
resolver_service_base::implementation_type& impl)
{
ASIO_HANDLER_OPERATION((scheduler_.context(),
"resolver", &impl, 0, "cancel"));
impl.reset();
}
void resolver_service_base::move_construct(implementation_type& impl,
implementation_type& other_impl)
{
impl = ASIO_MOVE_CAST(implementation_type)(other_impl);
}
void resolver_service_base::move_assign(implementation_type& impl,
resolver_service_base&, implementation_type& other_impl)
{
destroy(impl);
impl = ASIO_MOVE_CAST(implementation_type)(other_impl);
}
void resolver_service_base::cancel(
resolver_service_base::implementation_type& impl)
{
ASIO_HANDLER_OPERATION((scheduler_.context(),
"resolver", &impl, 0, "cancel"));
impl.reset(static_cast<void*>(0), socket_ops::noop_deleter());
}
void resolver_service_base::start_resolve_op(resolve_op* op)
{
if (ASIO_CONCURRENCY_HINT_IS_LOCKING(SCHEDULER,
scheduler_.concurrency_hint()))
{
start_work_thread();
scheduler_.work_started();
work_scheduler_->post_immediate_completion(op, false);
}
else
{
op->ec_ = asio::error::operation_not_supported;
scheduler_.post_immediate_completion(op, false);
}
}
void resolver_service_base::start_work_thread()
{
asio::detail::mutex::scoped_lock lock(mutex_);
if (!work_thread_.get())
{
work_thread_.reset(new asio::detail::thread(
work_scheduler_runner(*work_scheduler_)));
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_RESOLVER_SERVICE_BASE_IPP

View File

@ -0,0 +1,654 @@
//
// detail/impl/scheduler.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_SCHEDULER_IPP
#define ASIO_DETAIL_IMPL_SCHEDULER_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/concurrency_hint.hpp"
#include "asio/detail/event.hpp"
#include "asio/detail/limits.hpp"
#include "asio/detail/reactor.hpp"
#include "asio/detail/scheduler.hpp"
#include "asio/detail/scheduler_thread_info.hpp"
#include "asio/detail/signal_blocker.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
class scheduler::thread_function
{
public:
explicit thread_function(scheduler* s)
: this_(s)
{
}
void operator()()
{
asio::error_code ec;
this_->run(ec);
}
private:
scheduler* this_;
};
struct scheduler::task_cleanup
{
~task_cleanup()
{
if (this_thread_->private_outstanding_work > 0)
{
asio::detail::increment(
scheduler_->outstanding_work_,
this_thread_->private_outstanding_work);
}
this_thread_->private_outstanding_work = 0;
// Enqueue the completed operations and reinsert the task at the end of
// the operation queue.
lock_->lock();
scheduler_->task_interrupted_ = true;
scheduler_->op_queue_.push(this_thread_->private_op_queue);
scheduler_->op_queue_.push(&scheduler_->task_operation_);
}
scheduler* scheduler_;
mutex::scoped_lock* lock_;
thread_info* this_thread_;
};
struct scheduler::work_cleanup
{
~work_cleanup()
{
if (this_thread_->private_outstanding_work > 1)
{
asio::detail::increment(
scheduler_->outstanding_work_,
this_thread_->private_outstanding_work - 1);
}
else if (this_thread_->private_outstanding_work < 1)
{
scheduler_->work_finished();
}
this_thread_->private_outstanding_work = 0;
#if defined(ASIO_HAS_THREADS)
if (!this_thread_->private_op_queue.empty())
{
lock_->lock();
scheduler_->op_queue_.push(this_thread_->private_op_queue);
}
#endif // defined(ASIO_HAS_THREADS)
}
scheduler* scheduler_;
mutex::scoped_lock* lock_;
thread_info* this_thread_;
};
scheduler::scheduler(asio::execution_context& ctx,
int concurrency_hint, bool own_thread)
: asio::detail::execution_context_service_base<scheduler>(ctx),
one_thread_(concurrency_hint == 1
|| !ASIO_CONCURRENCY_HINT_IS_LOCKING(
SCHEDULER, concurrency_hint)
|| !ASIO_CONCURRENCY_HINT_IS_LOCKING(
REACTOR_IO, concurrency_hint)),
mutex_(ASIO_CONCURRENCY_HINT_IS_LOCKING(
SCHEDULER, concurrency_hint)),
task_(0),
task_interrupted_(true),
outstanding_work_(0),
stopped_(false),
shutdown_(false),
concurrency_hint_(concurrency_hint),
thread_(0)
{
ASIO_HANDLER_TRACKING_INIT;
if (own_thread)
{
++outstanding_work_;
asio::detail::signal_blocker sb;
thread_ = new asio::detail::thread(thread_function(this));
}
}
scheduler::~scheduler()
{
if (thread_)
{
mutex::scoped_lock lock(mutex_);
shutdown_ = true;
stop_all_threads(lock);
lock.unlock();
thread_->join();
delete thread_;
}
}
void scheduler::shutdown()
{
mutex::scoped_lock lock(mutex_);
shutdown_ = true;
if (thread_)
stop_all_threads(lock);
lock.unlock();
// Join thread to ensure task operation is returned to queue.
if (thread_)
{
thread_->join();
delete thread_;
thread_ = 0;
}
// Destroy handler objects.
while (!op_queue_.empty())
{
operation* o = op_queue_.front();
op_queue_.pop();
if (o != &task_operation_)
o->destroy();
}
// Reset to initial state.
task_ = 0;
}
void scheduler::init_task()
{
mutex::scoped_lock lock(mutex_);
if (!shutdown_ && !task_)
{
task_ = &use_service<reactor>(this->context());
op_queue_.push(&task_operation_);
wake_one_thread_and_unlock(lock);
}
}
std::size_t scheduler::run(asio::error_code& ec)
{
ec = asio::error_code();
if (outstanding_work_ == 0)
{
stop();
return 0;
}
thread_info this_thread;
this_thread.private_outstanding_work = 0;
thread_call_stack::context ctx(this, this_thread);
mutex::scoped_lock lock(mutex_);
std::size_t n = 0;
for (; do_run_one(lock, this_thread, ec); lock.lock())
if (n != (std::numeric_limits<std::size_t>::max)())
++n;
return n;
}
std::size_t scheduler::run_one(asio::error_code& ec)
{
ec = asio::error_code();
if (outstanding_work_ == 0)
{
stop();
return 0;
}
thread_info this_thread;
this_thread.private_outstanding_work = 0;
thread_call_stack::context ctx(this, this_thread);
mutex::scoped_lock lock(mutex_);
return do_run_one(lock, this_thread, ec);
}
std::size_t scheduler::wait_one(long usec, asio::error_code& ec)
{
ec = asio::error_code();
if (outstanding_work_ == 0)
{
stop();
return 0;
}
thread_info this_thread;
this_thread.private_outstanding_work = 0;
thread_call_stack::context ctx(this, this_thread);
mutex::scoped_lock lock(mutex_);
return do_wait_one(lock, this_thread, usec, ec);
}
std::size_t scheduler::poll(asio::error_code& ec)
{
ec = asio::error_code();
if (outstanding_work_ == 0)
{
stop();
return 0;
}
thread_info this_thread;
this_thread.private_outstanding_work = 0;
thread_call_stack::context ctx(this, this_thread);
mutex::scoped_lock lock(mutex_);
#if defined(ASIO_HAS_THREADS)
// We want to support nested calls to poll() and poll_one(), so any handlers
// that are already on a thread-private queue need to be put on to the main
// queue now.
if (one_thread_)
if (thread_info* outer_info = static_cast<thread_info*>(ctx.next_by_key()))
op_queue_.push(outer_info->private_op_queue);
#endif // defined(ASIO_HAS_THREADS)
std::size_t n = 0;
for (; do_poll_one(lock, this_thread, ec); lock.lock())
if (n != (std::numeric_limits<std::size_t>::max)())
++n;
return n;
}
std::size_t scheduler::poll_one(asio::error_code& ec)
{
ec = asio::error_code();
if (outstanding_work_ == 0)
{
stop();
return 0;
}
thread_info this_thread;
this_thread.private_outstanding_work = 0;
thread_call_stack::context ctx(this, this_thread);
mutex::scoped_lock lock(mutex_);
#if defined(ASIO_HAS_THREADS)
// We want to support nested calls to poll() and poll_one(), so any handlers
// that are already on a thread-private queue need to be put on to the main
// queue now.
if (one_thread_)
if (thread_info* outer_info = static_cast<thread_info*>(ctx.next_by_key()))
op_queue_.push(outer_info->private_op_queue);
#endif // defined(ASIO_HAS_THREADS)
return do_poll_one(lock, this_thread, ec);
}
void scheduler::stop()
{
mutex::scoped_lock lock(mutex_);
stop_all_threads(lock);
}
bool scheduler::stopped() const
{
mutex::scoped_lock lock(mutex_);
return stopped_;
}
void scheduler::restart()
{
mutex::scoped_lock lock(mutex_);
stopped_ = false;
}
void scheduler::compensating_work_started()
{
thread_info_base* this_thread = thread_call_stack::contains(this);
++static_cast<thread_info*>(this_thread)->private_outstanding_work;
}
void scheduler::capture_current_exception()
{
if (thread_info_base* this_thread = thread_call_stack::contains(this))
this_thread->capture_current_exception();
}
void scheduler::post_immediate_completion(
scheduler::operation* op, bool is_continuation)
{
#if defined(ASIO_HAS_THREADS)
if (one_thread_ || is_continuation)
{
if (thread_info_base* this_thread = thread_call_stack::contains(this))
{
++static_cast<thread_info*>(this_thread)->private_outstanding_work;
static_cast<thread_info*>(this_thread)->private_op_queue.push(op);
return;
}
}
#else // defined(ASIO_HAS_THREADS)
(void)is_continuation;
#endif // defined(ASIO_HAS_THREADS)
work_started();
mutex::scoped_lock lock(mutex_);
op_queue_.push(op);
wake_one_thread_and_unlock(lock);
}
void scheduler::post_immediate_completions(std::size_t n,
op_queue<scheduler::operation>& ops, bool is_continuation)
{
#if defined(ASIO_HAS_THREADS)
if (one_thread_ || is_continuation)
{
if (thread_info_base* this_thread = thread_call_stack::contains(this))
{
static_cast<thread_info*>(this_thread)->private_outstanding_work
+= static_cast<long>(n);
static_cast<thread_info*>(this_thread)->private_op_queue.push(ops);
return;
}
}
#else // defined(ASIO_HAS_THREADS)
(void)is_continuation;
#endif // defined(ASIO_HAS_THREADS)
increment(outstanding_work_, static_cast<long>(n));
mutex::scoped_lock lock(mutex_);
op_queue_.push(ops);
wake_one_thread_and_unlock(lock);
}
void scheduler::post_deferred_completion(scheduler::operation* op)
{
#if defined(ASIO_HAS_THREADS)
if (one_thread_)
{
if (thread_info_base* this_thread = thread_call_stack::contains(this))
{
static_cast<thread_info*>(this_thread)->private_op_queue.push(op);
return;
}
}
#endif // defined(ASIO_HAS_THREADS)
mutex::scoped_lock lock(mutex_);
op_queue_.push(op);
wake_one_thread_and_unlock(lock);
}
void scheduler::post_deferred_completions(
op_queue<scheduler::operation>& ops)
{
if (!ops.empty())
{
#if defined(ASIO_HAS_THREADS)
if (one_thread_)
{
if (thread_info_base* this_thread = thread_call_stack::contains(this))
{
static_cast<thread_info*>(this_thread)->private_op_queue.push(ops);
return;
}
}
#endif // defined(ASIO_HAS_THREADS)
mutex::scoped_lock lock(mutex_);
op_queue_.push(ops);
wake_one_thread_and_unlock(lock);
}
}
void scheduler::do_dispatch(
scheduler::operation* op)
{
work_started();
mutex::scoped_lock lock(mutex_);
op_queue_.push(op);
wake_one_thread_and_unlock(lock);
}
void scheduler::abandon_operations(
op_queue<scheduler::operation>& ops)
{
op_queue<scheduler::operation> ops2;
ops2.push(ops);
}
std::size_t scheduler::do_run_one(mutex::scoped_lock& lock,
scheduler::thread_info& this_thread,
const asio::error_code& ec)
{
while (!stopped_)
{
if (!op_queue_.empty())
{
// Prepare to execute first handler from queue.
operation* o = op_queue_.front();
op_queue_.pop();
bool more_handlers = (!op_queue_.empty());
if (o == &task_operation_)
{
task_interrupted_ = more_handlers;
if (more_handlers && !one_thread_)
wakeup_event_.unlock_and_signal_one(lock);
else
lock.unlock();
task_cleanup on_exit = { this, &lock, &this_thread };
(void)on_exit;
// Run the task. May throw an exception. Only block if the operation
// queue is empty and we're not polling, otherwise we want to return
// as soon as possible.
task_->run(more_handlers ? 0 : -1, this_thread.private_op_queue);
}
else
{
std::size_t task_result = o->task_result_;
if (more_handlers && !one_thread_)
wake_one_thread_and_unlock(lock);
else
lock.unlock();
// Ensure the count of outstanding work is decremented on block exit.
work_cleanup on_exit = { this, &lock, &this_thread };
(void)on_exit;
// Complete the operation. May throw an exception. Deletes the object.
o->complete(this, ec, task_result);
this_thread.rethrow_pending_exception();
return 1;
}
}
else
{
wakeup_event_.clear(lock);
wakeup_event_.wait(lock);
}
}
return 0;
}
std::size_t scheduler::do_wait_one(mutex::scoped_lock& lock,
scheduler::thread_info& this_thread, long usec,
const asio::error_code& ec)
{
if (stopped_)
return 0;
operation* o = op_queue_.front();
if (o == 0)
{
wakeup_event_.clear(lock);
wakeup_event_.wait_for_usec(lock, usec);
usec = 0; // Wait at most once.
o = op_queue_.front();
}
if (o == &task_operation_)
{
op_queue_.pop();
bool more_handlers = (!op_queue_.empty());
task_interrupted_ = more_handlers;
if (more_handlers && !one_thread_)
wakeup_event_.unlock_and_signal_one(lock);
else
lock.unlock();
{
task_cleanup on_exit = { this, &lock, &this_thread };
(void)on_exit;
// Run the task. May throw an exception. Only block if the operation
// queue is empty and we're not polling, otherwise we want to return
// as soon as possible.
task_->run(more_handlers ? 0 : usec, this_thread.private_op_queue);
}
o = op_queue_.front();
if (o == &task_operation_)
{
if (!one_thread_)
wakeup_event_.maybe_unlock_and_signal_one(lock);
return 0;
}
}
if (o == 0)
return 0;
op_queue_.pop();
bool more_handlers = (!op_queue_.empty());
std::size_t task_result = o->task_result_;
if (more_handlers && !one_thread_)
wake_one_thread_and_unlock(lock);
else
lock.unlock();
// Ensure the count of outstanding work is decremented on block exit.
work_cleanup on_exit = { this, &lock, &this_thread };
(void)on_exit;
// Complete the operation. May throw an exception. Deletes the object.
o->complete(this, ec, task_result);
this_thread.rethrow_pending_exception();
return 1;
}
std::size_t scheduler::do_poll_one(mutex::scoped_lock& lock,
scheduler::thread_info& this_thread,
const asio::error_code& ec)
{
if (stopped_)
return 0;
operation* o = op_queue_.front();
if (o == &task_operation_)
{
op_queue_.pop();
lock.unlock();
{
task_cleanup c = { this, &lock, &this_thread };
(void)c;
// Run the task. May throw an exception. Only block if the operation
// queue is empty and we're not polling, otherwise we want to return
// as soon as possible.
task_->run(0, this_thread.private_op_queue);
}
o = op_queue_.front();
if (o == &task_operation_)
{
wakeup_event_.maybe_unlock_and_signal_one(lock);
return 0;
}
}
if (o == 0)
return 0;
op_queue_.pop();
bool more_handlers = (!op_queue_.empty());
std::size_t task_result = o->task_result_;
if (more_handlers && !one_thread_)
wake_one_thread_and_unlock(lock);
else
lock.unlock();
// Ensure the count of outstanding work is decremented on block exit.
work_cleanup on_exit = { this, &lock, &this_thread };
(void)on_exit;
// Complete the operation. May throw an exception. Deletes the object.
o->complete(this, ec, task_result);
this_thread.rethrow_pending_exception();
return 1;
}
void scheduler::stop_all_threads(
mutex::scoped_lock& lock)
{
stopped_ = true;
wakeup_event_.signal_all(lock);
if (!task_interrupted_ && task_)
{
task_interrupted_ = true;
task_->interrupt();
}
}
void scheduler::wake_one_thread_and_unlock(
mutex::scoped_lock& lock)
{
if (!wakeup_event_.maybe_unlock_and_signal_one(lock))
{
if (!task_interrupted_ && task_)
{
task_interrupted_ = true;
task_->interrupt();
}
lock.unlock();
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_SCHEDULER_IPP

View File

@ -0,0 +1,100 @@
//
// detail/impl/select_reactor.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_SELECT_REACTOR_HPP
#define ASIO_DETAIL_IMPL_SELECT_REACTOR_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_IOCP) \
|| (!defined(ASIO_HAS_DEV_POLL) \
&& !defined(ASIO_HAS_EPOLL) \
&& !defined(ASIO_HAS_KQUEUE) \
&& !defined(ASIO_WINDOWS_RUNTIME))
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
template <typename Time_Traits>
void select_reactor::add_timer_queue(timer_queue<Time_Traits>& queue)
{
do_add_timer_queue(queue);
}
// Remove a timer queue from the reactor.
template <typename Time_Traits>
void select_reactor::remove_timer_queue(timer_queue<Time_Traits>& queue)
{
do_remove_timer_queue(queue);
}
template <typename Time_Traits>
void select_reactor::schedule_timer(timer_queue<Time_Traits>& queue,
const typename Time_Traits::time_type& time,
typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op)
{
asio::detail::mutex::scoped_lock lock(mutex_);
if (shutdown_)
{
scheduler_.post_immediate_completion(op, false);
return;
}
bool earliest = queue.enqueue_timer(time, timer, op);
scheduler_.work_started();
if (earliest)
interrupter_.interrupt();
}
template <typename Time_Traits>
std::size_t select_reactor::cancel_timer(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data& timer,
std::size_t max_cancelled)
{
asio::detail::mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
lock.unlock();
scheduler_.post_deferred_completions(ops);
return n;
}
template <typename Time_Traits>
void select_reactor::move_timer(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data& target,
typename timer_queue<Time_Traits>::per_timer_data& source)
{
asio::detail::mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
queue.cancel_timer(target, ops);
queue.move_timer(target, source);
lock.unlock();
scheduler_.post_deferred_completions(ops);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_IOCP)
// || (!defined(ASIO_HAS_DEV_POLL)
// && !defined(ASIO_HAS_EPOLL)
// && !defined(ASIO_HAS_KQUEUE)
// && !defined(ASIO_WINDOWS_RUNTIME))
#endif // ASIO_DETAIL_IMPL_SELECT_REACTOR_HPP

View File

@ -0,0 +1,338 @@
//
// detail/impl/select_reactor.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_SELECT_REACTOR_IPP
#define ASIO_DETAIL_IMPL_SELECT_REACTOR_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_IOCP) \
|| (!defined(ASIO_HAS_DEV_POLL) \
&& !defined(ASIO_HAS_EPOLL) \
&& !defined(ASIO_HAS_KQUEUE) \
&& !defined(ASIO_WINDOWS_RUNTIME))
#include "asio/detail/fd_set_adapter.hpp"
#include "asio/detail/select_reactor.hpp"
#include "asio/detail/signal_blocker.hpp"
#include "asio/detail/socket_ops.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
#if defined(ASIO_HAS_IOCP)
class select_reactor::thread_function
{
public:
explicit thread_function(select_reactor* r)
: this_(r)
{
}
void operator()()
{
this_->run_thread();
}
private:
select_reactor* this_;
};
#endif // defined(ASIO_HAS_IOCP)
select_reactor::select_reactor(asio::execution_context& ctx)
: execution_context_service_base<select_reactor>(ctx),
scheduler_(use_service<scheduler_type>(ctx)),
mutex_(),
interrupter_(),
#if defined(ASIO_HAS_IOCP)
stop_thread_(false),
thread_(0),
#endif // defined(ASIO_HAS_IOCP)
shutdown_(false)
{
#if defined(ASIO_HAS_IOCP)
asio::detail::signal_blocker sb;
thread_ = new asio::detail::thread(thread_function(this));
#endif // defined(ASIO_HAS_IOCP)
}
select_reactor::~select_reactor()
{
shutdown();
}
void select_reactor::shutdown()
{
asio::detail::mutex::scoped_lock lock(mutex_);
shutdown_ = true;
#if defined(ASIO_HAS_IOCP)
stop_thread_ = true;
if (thread_)
interrupter_.interrupt();
#endif // defined(ASIO_HAS_IOCP)
lock.unlock();
#if defined(ASIO_HAS_IOCP)
if (thread_)
{
thread_->join();
delete thread_;
thread_ = 0;
}
#endif // defined(ASIO_HAS_IOCP)
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
op_queue_[i].get_all_operations(ops);
timer_queues_.get_all_timers(ops);
scheduler_.abandon_operations(ops);
}
void select_reactor::notify_fork(
asio::execution_context::fork_event fork_ev)
{
if (fork_ev == asio::execution_context::fork_child)
interrupter_.recreate();
}
void select_reactor::init_task()
{
scheduler_.init_task();
}
int select_reactor::register_descriptor(socket_type,
select_reactor::per_descriptor_data&)
{
return 0;
}
int select_reactor::register_internal_descriptor(
int op_type, socket_type descriptor,
select_reactor::per_descriptor_data&, reactor_op* op)
{
asio::detail::mutex::scoped_lock lock(mutex_);
op_queue_[op_type].enqueue_operation(descriptor, op);
interrupter_.interrupt();
return 0;
}
void select_reactor::move_descriptor(socket_type,
select_reactor::per_descriptor_data&,
select_reactor::per_descriptor_data&)
{
}
void select_reactor::start_op(int op_type, socket_type descriptor,
select_reactor::per_descriptor_data&, reactor_op* op,
bool is_continuation, bool)
{
asio::detail::mutex::scoped_lock lock(mutex_);
if (shutdown_)
{
post_immediate_completion(op, is_continuation);
return;
}
bool first = op_queue_[op_type].enqueue_operation(descriptor, op);
scheduler_.work_started();
if (first)
interrupter_.interrupt();
}
void select_reactor::cancel_ops(socket_type descriptor,
select_reactor::per_descriptor_data&)
{
asio::detail::mutex::scoped_lock lock(mutex_);
cancel_ops_unlocked(descriptor, asio::error::operation_aborted);
}
void select_reactor::deregister_descriptor(socket_type descriptor,
select_reactor::per_descriptor_data&, bool)
{
asio::detail::mutex::scoped_lock lock(mutex_);
cancel_ops_unlocked(descriptor, asio::error::operation_aborted);
}
void select_reactor::deregister_internal_descriptor(
socket_type descriptor, select_reactor::per_descriptor_data&)
{
asio::detail::mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
op_queue_[i].cancel_operations(descriptor, ops);
}
void select_reactor::cleanup_descriptor_data(
select_reactor::per_descriptor_data&)
{
}
void select_reactor::run(long usec, op_queue<operation>& ops)
{
asio::detail::mutex::scoped_lock lock(mutex_);
#if defined(ASIO_HAS_IOCP)
// Check if the thread is supposed to stop.
if (stop_thread_)
return;
#endif // defined(ASIO_HAS_IOCP)
// Set up the descriptor sets.
for (int i = 0; i < max_select_ops; ++i)
fd_sets_[i].reset();
fd_sets_[read_op].set(interrupter_.read_descriptor());
socket_type max_fd = 0;
bool have_work_to_do = !timer_queues_.all_empty();
for (int i = 0; i < max_select_ops; ++i)
{
have_work_to_do = have_work_to_do || !op_queue_[i].empty();
fd_sets_[i].set(op_queue_[i], ops);
if (fd_sets_[i].max_descriptor() > max_fd)
max_fd = fd_sets_[i].max_descriptor();
}
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
// Connection operations on Windows use both except and write fd_sets.
have_work_to_do = have_work_to_do || !op_queue_[connect_op].empty();
fd_sets_[write_op].set(op_queue_[connect_op], ops);
if (fd_sets_[write_op].max_descriptor() > max_fd)
max_fd = fd_sets_[write_op].max_descriptor();
fd_sets_[except_op].set(op_queue_[connect_op], ops);
if (fd_sets_[except_op].max_descriptor() > max_fd)
max_fd = fd_sets_[except_op].max_descriptor();
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
// We can return immediately if there's no work to do and the reactor is
// not supposed to block.
if (!usec && !have_work_to_do)
return;
// Determine how long to block while waiting for events.
timeval tv_buf = { 0, 0 };
timeval* tv = usec ? get_timeout(usec, tv_buf) : &tv_buf;
lock.unlock();
// Block on the select call until descriptors become ready.
asio::error_code ec;
int retval = socket_ops::select(static_cast<int>(max_fd + 1),
fd_sets_[read_op], fd_sets_[write_op], fd_sets_[except_op], tv, ec);
// Reset the interrupter.
if (retval > 0 && fd_sets_[read_op].is_set(interrupter_.read_descriptor()))
{
if (!interrupter_.reset())
{
lock.lock();
interrupter_.recreate();
}
--retval;
}
lock.lock();
// Dispatch all ready operations.
if (retval > 0)
{
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
// Connection operations on Windows use both except and write fd_sets.
fd_sets_[except_op].perform(op_queue_[connect_op], ops);
fd_sets_[write_op].perform(op_queue_[connect_op], ops);
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
// Exception operations must be processed first to ensure that any
// out-of-band data is read before normal data.
for (int i = max_select_ops - 1; i >= 0; --i)
fd_sets_[i].perform(op_queue_[i], ops);
}
timer_queues_.get_ready_timers(ops);
}
void select_reactor::interrupt()
{
interrupter_.interrupt();
}
#if defined(ASIO_HAS_IOCP)
void select_reactor::run_thread()
{
asio::detail::mutex::scoped_lock lock(mutex_);
while (!stop_thread_)
{
lock.unlock();
op_queue<operation> ops;
run(true, ops);
scheduler_.post_deferred_completions(ops);
lock.lock();
}
}
#endif // defined(ASIO_HAS_IOCP)
void select_reactor::do_add_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.insert(&queue);
}
void select_reactor::do_remove_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.erase(&queue);
}
timeval* select_reactor::get_timeout(long usec, timeval& tv)
{
// By default we will wait no longer than 5 minutes. This will ensure that
// any changes to the system clock are detected after no longer than this.
const long max_usec = 5 * 60 * 1000 * 1000;
usec = timer_queues_.wait_duration_usec(
(usec < 0 || max_usec < usec) ? max_usec : usec);
tv.tv_sec = usec / 1000000;
tv.tv_usec = usec % 1000000;
return &tv;
}
void select_reactor::cancel_ops_unlocked(socket_type descriptor,
const asio::error_code& ec)
{
bool need_interrupt = false;
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
need_interrupt = op_queue_[i].cancel_operations(
descriptor, ops, ec) || need_interrupt;
scheduler_.post_deferred_completions(ops);
if (need_interrupt)
interrupter_.interrupt();
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_IOCP)
// || (!defined(ASIO_HAS_DEV_POLL)
// && !defined(ASIO_HAS_EPOLL)
// && !defined(ASIO_HAS_KQUEUE))
// && !defined(ASIO_WINDOWS_RUNTIME))
#endif // ASIO_DETAIL_IMPL_SELECT_REACTOR_IPP

View File

@ -0,0 +1,94 @@
//
// detail/impl/service_registry.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_SERVICE_REGISTRY_HPP
#define ASIO_DETAIL_IMPL_SERVICE_REGISTRY_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
template <typename Service>
Service& service_registry::use_service()
{
execution_context::service::key key;
init_key<Service>(key, 0);
factory_type factory = &service_registry::create<Service, execution_context>;
return *static_cast<Service*>(do_use_service(key, factory, &owner_));
}
template <typename Service>
Service& service_registry::use_service(io_context& owner)
{
execution_context::service::key key;
init_key<Service>(key, 0);
factory_type factory = &service_registry::create<Service, io_context>;
return *static_cast<Service*>(do_use_service(key, factory, &owner));
}
template <typename Service>
void service_registry::add_service(Service* new_service)
{
execution_context::service::key key;
init_key<Service>(key, 0);
return do_add_service(key, new_service);
}
template <typename Service>
bool service_registry::has_service() const
{
execution_context::service::key key;
init_key<Service>(key, 0);
return do_has_service(key);
}
template <typename Service>
inline void service_registry::init_key(
execution_context::service::key& key, ...)
{
init_key_from_id(key, Service::id);
}
#if !defined(ASIO_NO_TYPEID)
template <typename Service>
void service_registry::init_key(execution_context::service::key& key,
typename enable_if<
is_base_of<typename Service::key_type, Service>::value>::type*)
{
key.type_info_ = &typeid(typeid_wrapper<Service>);
key.id_ = 0;
}
template <typename Service>
void service_registry::init_key_from_id(execution_context::service::key& key,
const service_id<Service>& /*id*/)
{
key.type_info_ = &typeid(typeid_wrapper<Service>);
key.id_ = 0;
}
#endif // !defined(ASIO_NO_TYPEID)
template <typename Service, typename Owner>
execution_context::service* service_registry::create(void* owner)
{
return new Service(*static_cast<Owner*>(owner));
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_SERVICE_REGISTRY_HPP

View File

@ -0,0 +1,197 @@
//
// detail/impl/service_registry.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_SERVICE_REGISTRY_IPP
#define ASIO_DETAIL_IMPL_SERVICE_REGISTRY_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <vector>
#include "asio/detail/service_registry.hpp"
#include "asio/detail/throw_exception.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
service_registry::service_registry(execution_context& owner)
: owner_(owner),
first_service_(0)
{
}
service_registry::~service_registry()
{
}
void service_registry::shutdown_services()
{
execution_context::service* service = first_service_;
while (service)
{
service->shutdown();
service = service->next_;
}
}
void service_registry::destroy_services()
{
while (first_service_)
{
execution_context::service* next_service = first_service_->next_;
destroy(first_service_);
first_service_ = next_service;
}
}
void service_registry::notify_fork(execution_context::fork_event fork_ev)
{
// Make a copy of all of the services while holding the lock. We don't want
// to hold the lock while calling into each service, as it may try to call
// back into this class.
std::vector<execution_context::service*> services;
{
asio::detail::mutex::scoped_lock lock(mutex_);
execution_context::service* service = first_service_;
while (service)
{
services.push_back(service);
service = service->next_;
}
}
// If processing the fork_prepare event, we want to go in reverse order of
// service registration, which happens to be the existing order of the
// services in the vector. For the other events we want to go in the other
// direction.
std::size_t num_services = services.size();
if (fork_ev == execution_context::fork_prepare)
for (std::size_t i = 0; i < num_services; ++i)
services[i]->notify_fork(fork_ev);
else
for (std::size_t i = num_services; i > 0; --i)
services[i - 1]->notify_fork(fork_ev);
}
void service_registry::init_key_from_id(execution_context::service::key& key,
const execution_context::id& id)
{
key.type_info_ = 0;
key.id_ = &id;
}
bool service_registry::keys_match(
const execution_context::service::key& key1,
const execution_context::service::key& key2)
{
if (key1.id_ && key2.id_)
if (key1.id_ == key2.id_)
return true;
if (key1.type_info_ && key2.type_info_)
if (*key1.type_info_ == *key2.type_info_)
return true;
return false;
}
void service_registry::destroy(execution_context::service* service)
{
delete service;
}
execution_context::service* service_registry::do_use_service(
const execution_context::service::key& key,
factory_type factory, void* owner)
{
asio::detail::mutex::scoped_lock lock(mutex_);
// First see if there is an existing service object with the given key.
execution_context::service* service = first_service_;
while (service)
{
if (keys_match(service->key_, key))
return service;
service = service->next_;
}
// Create a new service object. The service registry's mutex is not locked
// at this time to allow for nested calls into this function from the new
// service's constructor.
lock.unlock();
auto_service_ptr new_service = { factory(owner) };
new_service.ptr_->key_ = key;
lock.lock();
// Check that nobody else created another service object of the same type
// while the lock was released.
service = first_service_;
while (service)
{
if (keys_match(service->key_, key))
return service;
service = service->next_;
}
// Service was successfully initialised, pass ownership to registry.
new_service.ptr_->next_ = first_service_;
first_service_ = new_service.ptr_;
new_service.ptr_ = 0;
return first_service_;
}
void service_registry::do_add_service(
const execution_context::service::key& key,
execution_context::service* new_service)
{
if (&owner_ != &new_service->context())
asio::detail::throw_exception(invalid_service_owner());
asio::detail::mutex::scoped_lock lock(mutex_);
// Check if there is an existing service object with the given key.
execution_context::service* service = first_service_;
while (service)
{
if (keys_match(service->key_, key))
asio::detail::throw_exception(service_already_exists());
service = service->next_;
}
// Take ownership of the service object.
new_service->key_ = key;
new_service->next_ = first_service_;
first_service_ = new_service;
}
bool service_registry::do_has_service(
const execution_context::service::key& key) const
{
asio::detail::mutex::scoped_lock lock(mutex_);
execution_context::service* service = first_service_;
while (service)
{
if (keys_match(service->key_, key))
return true;
service = service->next_;
}
return false;
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_SERVICE_REGISTRY_IPP

View File

@ -0,0 +1,668 @@
//
// detail/impl/signal_set_service.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_SIGNAL_SET_SERVICE_IPP
#define ASIO_DETAIL_IMPL_SIGNAL_SET_SERVICE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <cstring>
#include <stdexcept>
#include "asio/detail/reactor.hpp"
#include "asio/detail/signal_blocker.hpp"
#include "asio/detail/signal_set_service.hpp"
#include "asio/detail/static_mutex.hpp"
#include "asio/detail/throw_exception.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
struct signal_state
{
// Mutex used for protecting global state.
static_mutex mutex_;
// The read end of the pipe used for signal notifications.
int read_descriptor_;
// The write end of the pipe used for signal notifications.
int write_descriptor_;
// Whether the signal state has been prepared for a fork.
bool fork_prepared_;
// The head of a linked list of all signal_set_service instances.
class signal_set_service* service_list_;
// A count of the number of objects that are registered for each signal.
std::size_t registration_count_[max_signal_number];
};
signal_state* get_signal_state()
{
static signal_state state = {
ASIO_STATIC_MUTEX_INIT, -1, -1, false, 0, { 0 } };
return &state;
}
void asio_signal_handler(int signal_number)
{
#if defined(ASIO_WINDOWS) \
|| defined(ASIO_WINDOWS_RUNTIME) \
|| defined(__CYGWIN__)
signal_set_service::deliver_signal(signal_number);
#else // defined(ASIO_WINDOWS)
// || defined(ASIO_WINDOWS_RUNTIME)
// || defined(__CYGWIN__)
int saved_errno = errno;
signal_state* state = get_signal_state();
signed_size_type result = ::write(state->write_descriptor_,
&signal_number, sizeof(signal_number));
(void)result;
errno = saved_errno;
#endif // defined(ASIO_WINDOWS)
// || defined(ASIO_WINDOWS_RUNTIME)
// || defined(__CYGWIN__)
#if defined(ASIO_HAS_SIGNAL) && !defined(ASIO_HAS_SIGACTION)
::signal(signal_number, asio_signal_handler);
#endif // defined(ASIO_HAS_SIGNAL) && !defined(ASIO_HAS_SIGACTION)
}
#if !defined(ASIO_WINDOWS) \
&& !defined(ASIO_WINDOWS_RUNTIME) \
&& !defined(__CYGWIN__)
class signal_set_service::pipe_read_op : public reactor_op
{
public:
pipe_read_op()
: reactor_op(asio::error_code(),
&pipe_read_op::do_perform, pipe_read_op::do_complete)
{
}
static status do_perform(reactor_op*)
{
signal_state* state = get_signal_state();
int fd = state->read_descriptor_;
int signal_number = 0;
while (::read(fd, &signal_number, sizeof(int)) == sizeof(int))
if (signal_number >= 0 && signal_number < max_signal_number)
signal_set_service::deliver_signal(signal_number);
return not_done;
}
static void do_complete(void* /*owner*/, operation* base,
const asio::error_code& /*ec*/,
std::size_t /*bytes_transferred*/)
{
pipe_read_op* o(static_cast<pipe_read_op*>(base));
delete o;
}
};
#endif // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
signal_set_service::signal_set_service(execution_context& context)
: execution_context_service_base<signal_set_service>(context),
scheduler_(asio::use_service<scheduler_impl>(context)),
#if !defined(ASIO_WINDOWS) \
&& !defined(ASIO_WINDOWS_RUNTIME) \
&& !defined(__CYGWIN__)
reactor_(asio::use_service<reactor>(context)),
#endif // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
next_(0),
prev_(0)
{
get_signal_state()->mutex_.init();
#if !defined(ASIO_WINDOWS) \
&& !defined(ASIO_WINDOWS_RUNTIME) \
&& !defined(__CYGWIN__)
reactor_.init_task();
#endif // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
for (int i = 0; i < max_signal_number; ++i)
registrations_[i] = 0;
add_service(this);
}
signal_set_service::~signal_set_service()
{
remove_service(this);
}
void signal_set_service::shutdown()
{
remove_service(this);
op_queue<operation> ops;
for (int i = 0; i < max_signal_number; ++i)
{
registration* reg = registrations_[i];
while (reg)
{
ops.push(*reg->queue_);
reg = reg->next_in_table_;
}
}
scheduler_.abandon_operations(ops);
}
void signal_set_service::notify_fork(execution_context::fork_event fork_ev)
{
#if !defined(ASIO_WINDOWS) \
&& !defined(ASIO_WINDOWS_RUNTIME) \
&& !defined(__CYGWIN__)
signal_state* state = get_signal_state();
static_mutex::scoped_lock lock(state->mutex_);
switch (fork_ev)
{
case execution_context::fork_prepare:
{
int read_descriptor = state->read_descriptor_;
state->fork_prepared_ = true;
lock.unlock();
reactor_.deregister_internal_descriptor(read_descriptor, reactor_data_);
reactor_.cleanup_descriptor_data(reactor_data_);
}
break;
case execution_context::fork_parent:
if (state->fork_prepared_)
{
int read_descriptor = state->read_descriptor_;
state->fork_prepared_ = false;
lock.unlock();
reactor_.register_internal_descriptor(reactor::read_op,
read_descriptor, reactor_data_, new pipe_read_op);
}
break;
case execution_context::fork_child:
if (state->fork_prepared_)
{
asio::detail::signal_blocker blocker;
close_descriptors();
open_descriptors();
int read_descriptor = state->read_descriptor_;
state->fork_prepared_ = false;
lock.unlock();
reactor_.register_internal_descriptor(reactor::read_op,
read_descriptor, reactor_data_, new pipe_read_op);
}
break;
default:
break;
}
#else // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
(void)fork_ev;
#endif // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
}
void signal_set_service::construct(
signal_set_service::implementation_type& impl)
{
impl.signals_ = 0;
}
void signal_set_service::destroy(
signal_set_service::implementation_type& impl)
{
asio::error_code ignored_ec;
clear(impl, ignored_ec);
cancel(impl, ignored_ec);
}
asio::error_code signal_set_service::add(
signal_set_service::implementation_type& impl,
int signal_number, asio::error_code& ec)
{
// Check that the signal number is valid.
if (signal_number < 0 || signal_number >= max_signal_number)
{
ec = asio::error::invalid_argument;
return ec;
}
signal_state* state = get_signal_state();
static_mutex::scoped_lock lock(state->mutex_);
// Find the appropriate place to insert the registration.
registration** insertion_point = &impl.signals_;
registration* next = impl.signals_;
while (next && next->signal_number_ < signal_number)
{
insertion_point = &next->next_in_set_;
next = next->next_in_set_;
}
// Only do something if the signal is not already registered.
if (next == 0 || next->signal_number_ != signal_number)
{
registration* new_registration = new registration;
#if defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION)
// Register for the signal if we're the first.
if (state->registration_count_[signal_number] == 0)
{
# if defined(ASIO_HAS_SIGACTION)
using namespace std; // For memset.
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_handler = asio_signal_handler;
sigfillset(&sa.sa_mask);
if (::sigaction(signal_number, &sa, 0) == -1)
# else // defined(ASIO_HAS_SIGACTION)
if (::signal(signal_number, asio_signal_handler) == SIG_ERR)
# endif // defined(ASIO_HAS_SIGACTION)
{
# if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
ec = asio::error::invalid_argument;
# else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
ec = asio::error_code(errno,
asio::error::get_system_category());
# endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
delete new_registration;
return ec;
}
}
#endif // defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION)
// Record the new registration in the set.
new_registration->signal_number_ = signal_number;
new_registration->queue_ = &impl.queue_;
new_registration->next_in_set_ = next;
*insertion_point = new_registration;
// Insert registration into the registration table.
new_registration->next_in_table_ = registrations_[signal_number];
if (registrations_[signal_number])
registrations_[signal_number]->prev_in_table_ = new_registration;
registrations_[signal_number] = new_registration;
++state->registration_count_[signal_number];
}
ec = asio::error_code();
return ec;
}
asio::error_code signal_set_service::remove(
signal_set_service::implementation_type& impl,
int signal_number, asio::error_code& ec)
{
// Check that the signal number is valid.
if (signal_number < 0 || signal_number >= max_signal_number)
{
ec = asio::error::invalid_argument;
return ec;
}
signal_state* state = get_signal_state();
static_mutex::scoped_lock lock(state->mutex_);
// Find the signal number in the list of registrations.
registration** deletion_point = &impl.signals_;
registration* reg = impl.signals_;
while (reg && reg->signal_number_ < signal_number)
{
deletion_point = &reg->next_in_set_;
reg = reg->next_in_set_;
}
if (reg != 0 && reg->signal_number_ == signal_number)
{
#if defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION)
// Set signal handler back to the default if we're the last.
if (state->registration_count_[signal_number] == 1)
{
# if defined(ASIO_HAS_SIGACTION)
using namespace std; // For memset.
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_handler = SIG_DFL;
if (::sigaction(signal_number, &sa, 0) == -1)
# else // defined(ASIO_HAS_SIGACTION)
if (::signal(signal_number, SIG_DFL) == SIG_ERR)
# endif // defined(ASIO_HAS_SIGACTION)
{
# if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
ec = asio::error::invalid_argument;
# else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
ec = asio::error_code(errno,
asio::error::get_system_category());
# endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
return ec;
}
}
#endif // defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION)
// Remove the registration from the set.
*deletion_point = reg->next_in_set_;
// Remove the registration from the registration table.
if (registrations_[signal_number] == reg)
registrations_[signal_number] = reg->next_in_table_;
if (reg->prev_in_table_)
reg->prev_in_table_->next_in_table_ = reg->next_in_table_;
if (reg->next_in_table_)
reg->next_in_table_->prev_in_table_ = reg->prev_in_table_;
--state->registration_count_[signal_number];
delete reg;
}
ec = asio::error_code();
return ec;
}
asio::error_code signal_set_service::clear(
signal_set_service::implementation_type& impl,
asio::error_code& ec)
{
signal_state* state = get_signal_state();
static_mutex::scoped_lock lock(state->mutex_);
while (registration* reg = impl.signals_)
{
#if defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION)
// Set signal handler back to the default if we're the last.
if (state->registration_count_[reg->signal_number_] == 1)
{
# if defined(ASIO_HAS_SIGACTION)
using namespace std; // For memset.
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_handler = SIG_DFL;
if (::sigaction(reg->signal_number_, &sa, 0) == -1)
# else // defined(ASIO_HAS_SIGACTION)
if (::signal(reg->signal_number_, SIG_DFL) == SIG_ERR)
# endif // defined(ASIO_HAS_SIGACTION)
{
# if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
ec = asio::error::invalid_argument;
# else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
ec = asio::error_code(errno,
asio::error::get_system_category());
# endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
return ec;
}
}
#endif // defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION)
// Remove the registration from the registration table.
if (registrations_[reg->signal_number_] == reg)
registrations_[reg->signal_number_] = reg->next_in_table_;
if (reg->prev_in_table_)
reg->prev_in_table_->next_in_table_ = reg->next_in_table_;
if (reg->next_in_table_)
reg->next_in_table_->prev_in_table_ = reg->prev_in_table_;
--state->registration_count_[reg->signal_number_];
impl.signals_ = reg->next_in_set_;
delete reg;
}
ec = asio::error_code();
return ec;
}
asio::error_code signal_set_service::cancel(
signal_set_service::implementation_type& impl,
asio::error_code& ec)
{
ASIO_HANDLER_OPERATION((scheduler_.context(),
"signal_set", &impl, 0, "cancel"));
op_queue<operation> ops;
{
signal_state* state = get_signal_state();
static_mutex::scoped_lock lock(state->mutex_);
while (signal_op* op = impl.queue_.front())
{
op->ec_ = asio::error::operation_aborted;
impl.queue_.pop();
ops.push(op);
}
}
scheduler_.post_deferred_completions(ops);
ec = asio::error_code();
return ec;
}
void signal_set_service::deliver_signal(int signal_number)
{
signal_state* state = get_signal_state();
static_mutex::scoped_lock lock(state->mutex_);
signal_set_service* service = state->service_list_;
while (service)
{
op_queue<operation> ops;
registration* reg = service->registrations_[signal_number];
while (reg)
{
if (reg->queue_->empty())
{
++reg->undelivered_;
}
else
{
while (signal_op* op = reg->queue_->front())
{
op->signal_number_ = signal_number;
reg->queue_->pop();
ops.push(op);
}
}
reg = reg->next_in_table_;
}
service->scheduler_.post_deferred_completions(ops);
service = service->next_;
}
}
void signal_set_service::add_service(signal_set_service* service)
{
signal_state* state = get_signal_state();
static_mutex::scoped_lock lock(state->mutex_);
#if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)
// If this is the first service to be created, open a new pipe.
if (state->service_list_ == 0)
open_descriptors();
#endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)
// If a scheduler_ object is thread-unsafe then it must be the only
// scheduler used to create signal_set objects.
if (state->service_list_ != 0)
{
if (!ASIO_CONCURRENCY_HINT_IS_LOCKING(SCHEDULER,
service->scheduler_.concurrency_hint())
|| !ASIO_CONCURRENCY_HINT_IS_LOCKING(SCHEDULER,
state->service_list_->scheduler_.concurrency_hint()))
{
std::logic_error ex(
"Thread-unsafe execution context objects require "
"exclusive access to signal handling.");
asio::detail::throw_exception(ex);
}
}
// Insert service into linked list of all services.
service->next_ = state->service_list_;
service->prev_ = 0;
if (state->service_list_)
state->service_list_->prev_ = service;
state->service_list_ = service;
#if !defined(ASIO_WINDOWS) \
&& !defined(ASIO_WINDOWS_RUNTIME) \
&& !defined(__CYGWIN__)
// Register for pipe readiness notifications.
int read_descriptor = state->read_descriptor_;
lock.unlock();
service->reactor_.register_internal_descriptor(reactor::read_op,
read_descriptor, service->reactor_data_, new pipe_read_op);
#endif // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
}
void signal_set_service::remove_service(signal_set_service* service)
{
signal_state* state = get_signal_state();
static_mutex::scoped_lock lock(state->mutex_);
if (service->next_ || service->prev_ || state->service_list_ == service)
{
#if !defined(ASIO_WINDOWS) \
&& !defined(ASIO_WINDOWS_RUNTIME) \
&& !defined(__CYGWIN__)
// Disable the pipe readiness notifications.
int read_descriptor = state->read_descriptor_;
lock.unlock();
service->reactor_.deregister_internal_descriptor(
read_descriptor, service->reactor_data_);
service->reactor_.cleanup_descriptor_data(service->reactor_data_);
lock.lock();
#endif // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
// Remove service from linked list of all services.
if (state->service_list_ == service)
state->service_list_ = service->next_;
if (service->prev_)
service->prev_->next_ = service->next_;
if (service->next_)
service->next_->prev_= service->prev_;
service->next_ = 0;
service->prev_ = 0;
#if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)
// If this is the last service to be removed, close the pipe.
if (state->service_list_ == 0)
close_descriptors();
#endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)
}
}
void signal_set_service::open_descriptors()
{
#if !defined(ASIO_WINDOWS) \
&& !defined(ASIO_WINDOWS_RUNTIME) \
&& !defined(__CYGWIN__)
signal_state* state = get_signal_state();
int pipe_fds[2];
if (::pipe(pipe_fds) == 0)
{
state->read_descriptor_ = pipe_fds[0];
::fcntl(state->read_descriptor_, F_SETFL, O_NONBLOCK);
state->write_descriptor_ = pipe_fds[1];
::fcntl(state->write_descriptor_, F_SETFL, O_NONBLOCK);
#if defined(FD_CLOEXEC)
::fcntl(state->read_descriptor_, F_SETFD, FD_CLOEXEC);
::fcntl(state->write_descriptor_, F_SETFD, FD_CLOEXEC);
#endif // defined(FD_CLOEXEC)
}
else
{
asio::error_code ec(errno,
asio::error::get_system_category());
asio::detail::throw_error(ec, "signal_set_service pipe");
}
#endif // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
}
void signal_set_service::close_descriptors()
{
#if !defined(ASIO_WINDOWS) \
&& !defined(ASIO_WINDOWS_RUNTIME) \
&& !defined(__CYGWIN__)
signal_state* state = get_signal_state();
if (state->read_descriptor_ != -1)
::close(state->read_descriptor_);
state->read_descriptor_ = -1;
if (state->write_descriptor_ != -1)
::close(state->write_descriptor_);
state->write_descriptor_ = -1;
#endif // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
}
void signal_set_service::start_wait_op(
signal_set_service::implementation_type& impl, signal_op* op)
{
scheduler_.work_started();
signal_state* state = get_signal_state();
static_mutex::scoped_lock lock(state->mutex_);
registration* reg = impl.signals_;
while (reg)
{
if (reg->undelivered_ > 0)
{
--reg->undelivered_;
op->signal_number_ = reg->signal_number_;
scheduler_.post_deferred_completion(op);
return;
}
reg = reg->next_in_set_;
}
impl.queue_.push(op);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_SIGNAL_SET_SERVICE_IPP

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,185 @@
//
// detail/impl/socket_select_interrupter.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_SOCKET_SELECT_INTERRUPTER_IPP
#define ASIO_DETAIL_IMPL_SOCKET_SELECT_INTERRUPTER_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if !defined(ASIO_WINDOWS_RUNTIME)
#if defined(ASIO_WINDOWS) \
|| defined(__CYGWIN__) \
|| defined(__SYMBIAN32__)
#include <cstdlib>
#include "asio/detail/socket_holder.hpp"
#include "asio/detail/socket_ops.hpp"
#include "asio/detail/socket_select_interrupter.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
socket_select_interrupter::socket_select_interrupter()
{
open_descriptors();
}
void socket_select_interrupter::open_descriptors()
{
asio::error_code ec;
socket_holder acceptor(socket_ops::socket(
AF_INET, SOCK_STREAM, IPPROTO_TCP, ec));
if (acceptor.get() == invalid_socket)
asio::detail::throw_error(ec, "socket_select_interrupter");
int opt = 1;
socket_ops::state_type acceptor_state = 0;
socket_ops::setsockopt(acceptor.get(), acceptor_state,
SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt), ec);
using namespace std; // For memset.
sockaddr_in4_type addr;
std::size_t addr_len = sizeof(addr);
memset(&addr, 0, sizeof(addr));
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = socket_ops::host_to_network_long(INADDR_LOOPBACK);
addr.sin_port = 0;
if (socket_ops::bind(acceptor.get(), (const socket_addr_type*)&addr,
addr_len, ec) == socket_error_retval)
asio::detail::throw_error(ec, "socket_select_interrupter");
if (socket_ops::getsockname(acceptor.get(), (socket_addr_type*)&addr,
&addr_len, ec) == socket_error_retval)
asio::detail::throw_error(ec, "socket_select_interrupter");
// Some broken firewalls on Windows will intermittently cause getsockname to
// return 0.0.0.0 when the socket is actually bound to 127.0.0.1. We
// explicitly specify the target address here to work around this problem.
if (addr.sin_addr.s_addr == socket_ops::host_to_network_long(INADDR_ANY))
addr.sin_addr.s_addr = socket_ops::host_to_network_long(INADDR_LOOPBACK);
if (socket_ops::listen(acceptor.get(),
SOMAXCONN, ec) == socket_error_retval)
asio::detail::throw_error(ec, "socket_select_interrupter");
socket_holder client(socket_ops::socket(
AF_INET, SOCK_STREAM, IPPROTO_TCP, ec));
if (client.get() == invalid_socket)
asio::detail::throw_error(ec, "socket_select_interrupter");
if (socket_ops::connect(client.get(), (const socket_addr_type*)&addr,
addr_len, ec) == socket_error_retval)
asio::detail::throw_error(ec, "socket_select_interrupter");
socket_holder server(socket_ops::accept(acceptor.get(), 0, 0, ec));
if (server.get() == invalid_socket)
asio::detail::throw_error(ec, "socket_select_interrupter");
ioctl_arg_type non_blocking = 1;
socket_ops::state_type client_state = 0;
if (socket_ops::ioctl(client.get(), client_state,
FIONBIO, &non_blocking, ec))
asio::detail::throw_error(ec, "socket_select_interrupter");
opt = 1;
socket_ops::setsockopt(client.get(), client_state,
IPPROTO_TCP, TCP_NODELAY, &opt, sizeof(opt), ec);
non_blocking = 1;
socket_ops::state_type server_state = 0;
if (socket_ops::ioctl(server.get(), server_state,
FIONBIO, &non_blocking, ec))
asio::detail::throw_error(ec, "socket_select_interrupter");
opt = 1;
socket_ops::setsockopt(server.get(), server_state,
IPPROTO_TCP, TCP_NODELAY, &opt, sizeof(opt), ec);
read_descriptor_ = server.release();
write_descriptor_ = client.release();
}
socket_select_interrupter::~socket_select_interrupter()
{
close_descriptors();
}
void socket_select_interrupter::close_descriptors()
{
asio::error_code ec;
socket_ops::state_type state = socket_ops::internal_non_blocking;
if (read_descriptor_ != invalid_socket)
socket_ops::close(read_descriptor_, state, true, ec);
if (write_descriptor_ != invalid_socket)
socket_ops::close(write_descriptor_, state, true, ec);
}
void socket_select_interrupter::recreate()
{
close_descriptors();
write_descriptor_ = invalid_socket;
read_descriptor_ = invalid_socket;
open_descriptors();
}
void socket_select_interrupter::interrupt()
{
char byte = 0;
socket_ops::buf b;
socket_ops::init_buf(b, &byte, 1);
asio::error_code ec;
socket_ops::send(write_descriptor_, &b, 1, 0, ec);
}
bool socket_select_interrupter::reset()
{
char data[1024];
socket_ops::buf b;
socket_ops::init_buf(b, data, sizeof(data));
asio::error_code ec;
for (;;)
{
int bytes_read = socket_ops::recv(read_descriptor_, &b, 1, 0, ec);
if (bytes_read == sizeof(data))
continue;
if (bytes_read > 0)
return true;
if (bytes_read == 0)
return false;
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
return true;
return false;
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_WINDOWS)
// || defined(__CYGWIN__)
// || defined(__SYMBIAN32__)
#endif // !defined(ASIO_WINDOWS_RUNTIME)
#endif // ASIO_DETAIL_IMPL_SOCKET_SELECT_INTERRUPTER_IPP

View File

@ -0,0 +1,385 @@
//
// detail/impl/strand_executor_service.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_HPP
#define ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/call_stack.hpp"
#include "asio/detail/fenced_block.hpp"
#include "asio/detail/handler_invoke_helpers.hpp"
#include "asio/detail/recycling_allocator.hpp"
#include "asio/executor_work_guard.hpp"
#include "asio/defer.hpp"
#include "asio/dispatch.hpp"
#include "asio/post.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
template <typename F, typename Allocator>
class strand_executor_service::allocator_binder
{
public:
typedef Allocator allocator_type;
allocator_binder(ASIO_MOVE_ARG(F) f, const Allocator& a)
: f_(ASIO_MOVE_CAST(F)(f)),
allocator_(a)
{
}
allocator_binder(const allocator_binder& other)
: f_(other.f_),
allocator_(other.allocator_)
{
}
#if defined(ASIO_HAS_MOVE)
allocator_binder(allocator_binder&& other)
: f_(ASIO_MOVE_CAST(F)(other.f_)),
allocator_(ASIO_MOVE_CAST(allocator_type)(other.allocator_))
{
}
#endif // defined(ASIO_HAS_MOVE)
allocator_type get_allocator() const ASIO_NOEXCEPT
{
return allocator_;
}
void operator()()
{
f_();
}
private:
F f_;
allocator_type allocator_;
};
template <typename Executor>
class strand_executor_service::invoker<Executor,
typename enable_if<
execution::is_executor<Executor>::value
>::type>
{
public:
invoker(const implementation_type& impl, Executor& ex)
: impl_(impl),
executor_(asio::prefer(ex, execution::outstanding_work.tracked))
{
}
invoker(const invoker& other)
: impl_(other.impl_),
executor_(other.executor_)
{
}
#if defined(ASIO_HAS_MOVE)
invoker(invoker&& other)
: impl_(ASIO_MOVE_CAST(implementation_type)(other.impl_)),
executor_(ASIO_MOVE_CAST(executor_type)(other.executor_))
{
}
#endif // defined(ASIO_HAS_MOVE)
struct on_invoker_exit
{
invoker* this_;
~on_invoker_exit()
{
this_->impl_->mutex_->lock();
this_->impl_->ready_queue_.push(this_->impl_->waiting_queue_);
bool more_handlers = this_->impl_->locked_ =
!this_->impl_->ready_queue_.empty();
this_->impl_->mutex_->unlock();
if (more_handlers)
{
recycling_allocator<void> allocator;
execution::execute(
asio::prefer(
asio::require(this_->executor_,
execution::blocking.never),
execution::allocator(allocator)),
ASIO_MOVE_CAST(invoker)(*this_));
}
}
};
void operator()()
{
// Indicate that this strand is executing on the current thread.
call_stack<strand_impl>::context ctx(impl_.get());
// Ensure the next handler, if any, is scheduled on block exit.
on_invoker_exit on_exit = { this };
(void)on_exit;
// Run all ready handlers. No lock is required since the ready queue is
// accessed only within the strand.
asio::error_code ec;
while (scheduler_operation* o = impl_->ready_queue_.front())
{
impl_->ready_queue_.pop();
o->complete(impl_.get(), ec, 0);
}
}
private:
typedef typename decay<
typename prefer_result<
Executor,
execution::outstanding_work_t::tracked_t
>::type
>::type executor_type;
implementation_type impl_;
executor_type executor_;
};
#if !defined(ASIO_NO_TS_EXECUTORS)
template <typename Executor>
class strand_executor_service::invoker<Executor,
typename enable_if<
!execution::is_executor<Executor>::value
>::type>
{
public:
invoker(const implementation_type& impl, Executor& ex)
: impl_(impl),
work_(ex)
{
}
invoker(const invoker& other)
: impl_(other.impl_),
work_(other.work_)
{
}
#if defined(ASIO_HAS_MOVE)
invoker(invoker&& other)
: impl_(ASIO_MOVE_CAST(implementation_type)(other.impl_)),
work_(ASIO_MOVE_CAST(executor_work_guard<Executor>)(other.work_))
{
}
#endif // defined(ASIO_HAS_MOVE)
struct on_invoker_exit
{
invoker* this_;
~on_invoker_exit()
{
this_->impl_->mutex_->lock();
this_->impl_->ready_queue_.push(this_->impl_->waiting_queue_);
bool more_handlers = this_->impl_->locked_ =
!this_->impl_->ready_queue_.empty();
this_->impl_->mutex_->unlock();
if (more_handlers)
{
Executor ex(this_->work_.get_executor());
recycling_allocator<void> allocator;
ex.post(ASIO_MOVE_CAST(invoker)(*this_), allocator);
}
}
};
void operator()()
{
// Indicate that this strand is executing on the current thread.
call_stack<strand_impl>::context ctx(impl_.get());
// Ensure the next handler, if any, is scheduled on block exit.
on_invoker_exit on_exit = { this };
(void)on_exit;
// Run all ready handlers. No lock is required since the ready queue is
// accessed only within the strand.
asio::error_code ec;
while (scheduler_operation* o = impl_->ready_queue_.front())
{
impl_->ready_queue_.pop();
o->complete(impl_.get(), ec, 0);
}
}
private:
implementation_type impl_;
executor_work_guard<Executor> work_;
};
#endif // !defined(ASIO_NO_TS_EXECUTORS)
template <typename Executor, typename Function>
inline void strand_executor_service::execute(const implementation_type& impl,
Executor& ex, ASIO_MOVE_ARG(Function) function,
typename enable_if<
can_query<Executor, execution::allocator_t<void> >::value
>::type*)
{
return strand_executor_service::do_execute(impl, ex,
ASIO_MOVE_CAST(Function)(function),
asio::query(ex, execution::allocator));
}
template <typename Executor, typename Function>
inline void strand_executor_service::execute(const implementation_type& impl,
Executor& ex, ASIO_MOVE_ARG(Function) function,
typename enable_if<
!can_query<Executor, execution::allocator_t<void> >::value
>::type*)
{
return strand_executor_service::do_execute(impl, ex,
ASIO_MOVE_CAST(Function)(function),
std::allocator<void>());
}
template <typename Executor, typename Function, typename Allocator>
void strand_executor_service::do_execute(const implementation_type& impl,
Executor& ex, ASIO_MOVE_ARG(Function) function, const Allocator& a)
{
typedef typename decay<Function>::type function_type;
// If the executor is not never-blocking, and we are already in the strand,
// then the function can run immediately.
if (asio::query(ex, execution::blocking) != execution::blocking.never
&& call_stack<strand_impl>::contains(impl.get()))
{
// Make a local, non-const copy of the function.
function_type tmp(ASIO_MOVE_CAST(Function)(function));
fenced_block b(fenced_block::full);
asio_handler_invoke_helpers::invoke(tmp, tmp);
return;
}
// Allocate and construct an operation to wrap the function.
typedef executor_op<function_type, Allocator> op;
typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 };
p.p = new (p.v) op(ASIO_MOVE_CAST(Function)(function), a);
ASIO_HANDLER_CREATION((impl->service_->context(), *p.p,
"strand_executor", impl.get(), 0, "execute"));
// Add the function to the strand and schedule the strand if required.
bool first = enqueue(impl, p.p);
p.v = p.p = 0;
if (first)
{
execution::execute(ex, invoker<Executor>(impl, ex));
}
}
template <typename Executor, typename Function, typename Allocator>
void strand_executor_service::dispatch(const implementation_type& impl,
Executor& ex, ASIO_MOVE_ARG(Function) function, const Allocator& a)
{
typedef typename decay<Function>::type function_type;
// If we are already in the strand then the function can run immediately.
if (call_stack<strand_impl>::contains(impl.get()))
{
// Make a local, non-const copy of the function.
function_type tmp(ASIO_MOVE_CAST(Function)(function));
fenced_block b(fenced_block::full);
asio_handler_invoke_helpers::invoke(tmp, tmp);
return;
}
// Allocate and construct an operation to wrap the function.
typedef executor_op<function_type, Allocator> op;
typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 };
p.p = new (p.v) op(ASIO_MOVE_CAST(Function)(function), a);
ASIO_HANDLER_CREATION((impl->service_->context(), *p.p,
"strand_executor", impl.get(), 0, "dispatch"));
// Add the function to the strand and schedule the strand if required.
bool first = enqueue(impl, p.p);
p.v = p.p = 0;
if (first)
{
asio::dispatch(ex,
allocator_binder<invoker<Executor>, Allocator>(
invoker<Executor>(impl, ex), a));
}
}
// Request invocation of the given function and return immediately.
template <typename Executor, typename Function, typename Allocator>
void strand_executor_service::post(const implementation_type& impl,
Executor& ex, ASIO_MOVE_ARG(Function) function, const Allocator& a)
{
typedef typename decay<Function>::type function_type;
// Allocate and construct an operation to wrap the function.
typedef executor_op<function_type, Allocator> op;
typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 };
p.p = new (p.v) op(ASIO_MOVE_CAST(Function)(function), a);
ASIO_HANDLER_CREATION((impl->service_->context(), *p.p,
"strand_executor", impl.get(), 0, "post"));
// Add the function to the strand and schedule the strand if required.
bool first = enqueue(impl, p.p);
p.v = p.p = 0;
if (first)
{
asio::post(ex,
allocator_binder<invoker<Executor>, Allocator>(
invoker<Executor>(impl, ex), a));
}
}
// Request invocation of the given function and return immediately.
template <typename Executor, typename Function, typename Allocator>
void strand_executor_service::defer(const implementation_type& impl,
Executor& ex, ASIO_MOVE_ARG(Function) function, const Allocator& a)
{
typedef typename decay<Function>::type function_type;
// Allocate and construct an operation to wrap the function.
typedef executor_op<function_type, Allocator> op;
typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 };
p.p = new (p.v) op(ASIO_MOVE_CAST(Function)(function), a);
ASIO_HANDLER_CREATION((impl->service_->context(), *p.p,
"strand_executor", impl.get(), 0, "defer"));
// Add the function to the strand and schedule the strand if required.
bool first = enqueue(impl, p.p);
p.v = p.p = 0;
if (first)
{
asio::defer(ex,
allocator_binder<invoker<Executor>, Allocator>(
invoker<Executor>(impl, ex), a));
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_HPP

View File

@ -0,0 +1,134 @@
//
// detail/impl/strand_executor_service.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_IPP
#define ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/strand_executor_service.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
strand_executor_service::strand_executor_service(execution_context& ctx)
: execution_context_service_base<strand_executor_service>(ctx),
mutex_(),
salt_(0),
impl_list_(0)
{
}
void strand_executor_service::shutdown()
{
op_queue<scheduler_operation> ops;
asio::detail::mutex::scoped_lock lock(mutex_);
strand_impl* impl = impl_list_;
while (impl)
{
impl->mutex_->lock();
impl->shutdown_ = true;
ops.push(impl->waiting_queue_);
ops.push(impl->ready_queue_);
impl->mutex_->unlock();
impl = impl->next_;
}
}
strand_executor_service::implementation_type
strand_executor_service::create_implementation()
{
implementation_type new_impl(new strand_impl);
new_impl->locked_ = false;
new_impl->shutdown_ = false;
asio::detail::mutex::scoped_lock lock(mutex_);
// Select a mutex from the pool of shared mutexes.
std::size_t salt = salt_++;
std::size_t mutex_index = reinterpret_cast<std::size_t>(new_impl.get());
mutex_index += (reinterpret_cast<std::size_t>(new_impl.get()) >> 3);
mutex_index ^= salt + 0x9e3779b9 + (mutex_index << 6) + (mutex_index >> 2);
mutex_index = mutex_index % num_mutexes;
if (!mutexes_[mutex_index].get())
mutexes_[mutex_index].reset(new mutex);
new_impl->mutex_ = mutexes_[mutex_index].get();
// Insert implementation into linked list of all implementations.
new_impl->next_ = impl_list_;
new_impl->prev_ = 0;
if (impl_list_)
impl_list_->prev_ = new_impl.get();
impl_list_ = new_impl.get();
new_impl->service_ = this;
return new_impl;
}
strand_executor_service::strand_impl::~strand_impl()
{
asio::detail::mutex::scoped_lock lock(service_->mutex_);
// Remove implementation from linked list of all implementations.
if (service_->impl_list_ == this)
service_->impl_list_ = next_;
if (prev_)
prev_->next_ = next_;
if (next_)
next_->prev_= prev_;
}
bool strand_executor_service::enqueue(const implementation_type& impl,
scheduler_operation* op)
{
impl->mutex_->lock();
if (impl->shutdown_)
{
impl->mutex_->unlock();
op->destroy();
return false;
}
else if (impl->locked_)
{
// Some other function already holds the strand lock. Enqueue for later.
impl->waiting_queue_.push(op);
impl->mutex_->unlock();
return false;
}
else
{
// The function is acquiring the strand lock and so is responsible for
// scheduling the strand.
impl->locked_ = true;
impl->mutex_->unlock();
impl->ready_queue_.push(op);
return true;
}
}
bool strand_executor_service::running_in_this_thread(
const implementation_type& impl)
{
return !!call_stack<strand_impl>::contains(impl.get());
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_IPP

View File

@ -0,0 +1,117 @@
//
// detail/impl/strand_service.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_STRAND_SERVICE_HPP
#define ASIO_DETAIL_IMPL_STRAND_SERVICE_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/call_stack.hpp"
#include "asio/detail/completion_handler.hpp"
#include "asio/detail/fenced_block.hpp"
#include "asio/detail/handler_alloc_helpers.hpp"
#include "asio/detail/handler_invoke_helpers.hpp"
#include "asio/detail/memory.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
inline strand_service::strand_impl::strand_impl()
: operation(&strand_service::do_complete),
locked_(false)
{
}
struct strand_service::on_dispatch_exit
{
io_context_impl* io_context_impl_;
strand_impl* impl_;
~on_dispatch_exit()
{
impl_->mutex_.lock();
impl_->ready_queue_.push(impl_->waiting_queue_);
bool more_handlers = impl_->locked_ = !impl_->ready_queue_.empty();
impl_->mutex_.unlock();
if (more_handlers)
io_context_impl_->post_immediate_completion(impl_, false);
}
};
template <typename Handler>
void strand_service::dispatch(strand_service::implementation_type& impl,
Handler& handler)
{
// If we are already in the strand then the handler can run immediately.
if (call_stack<strand_impl>::contains(impl))
{
fenced_block b(fenced_block::full);
asio_handler_invoke_helpers::invoke(handler, handler);
return;
}
// Allocate and construct an operation to wrap the handler.
typedef completion_handler<Handler, io_context::executor_type> op;
typename op::ptr p = { asio::detail::addressof(handler),
op::ptr::allocate(handler), 0 };
p.p = new (p.v) op(handler, io_context_.get_executor());
ASIO_HANDLER_CREATION((this->context(),
*p.p, "strand", impl, 0, "dispatch"));
bool dispatch_immediately = do_dispatch(impl, p.p);
operation* o = p.p;
p.v = p.p = 0;
if (dispatch_immediately)
{
// Indicate that this strand is executing on the current thread.
call_stack<strand_impl>::context ctx(impl);
// Ensure the next handler, if any, is scheduled on block exit.
on_dispatch_exit on_exit = { &io_context_impl_, impl };
(void)on_exit;
op::do_complete(&io_context_impl_, o, asio::error_code(), 0);
}
}
// Request the io_context to invoke the given handler and return immediately.
template <typename Handler>
void strand_service::post(strand_service::implementation_type& impl,
Handler& handler)
{
bool is_continuation =
asio_handler_cont_helpers::is_continuation(handler);
// Allocate and construct an operation to wrap the handler.
typedef completion_handler<Handler, io_context::executor_type> op;
typename op::ptr p = { asio::detail::addressof(handler),
op::ptr::allocate(handler), 0 };
p.p = new (p.v) op(handler, io_context_.get_executor());
ASIO_HANDLER_CREATION((this->context(),
*p.p, "strand", impl, 0, "post"));
do_post(impl, p.p, is_continuation);
p.v = p.p = 0;
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_STRAND_SERVICE_HPP

View File

@ -0,0 +1,178 @@
//
// detail/impl/strand_service.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_STRAND_SERVICE_IPP
#define ASIO_DETAIL_IMPL_STRAND_SERVICE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/call_stack.hpp"
#include "asio/detail/strand_service.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
struct strand_service::on_do_complete_exit
{
io_context_impl* owner_;
strand_impl* impl_;
~on_do_complete_exit()
{
impl_->mutex_.lock();
impl_->ready_queue_.push(impl_->waiting_queue_);
bool more_handlers = impl_->locked_ = !impl_->ready_queue_.empty();
impl_->mutex_.unlock();
if (more_handlers)
owner_->post_immediate_completion(impl_, true);
}
};
strand_service::strand_service(asio::io_context& io_context)
: asio::detail::service_base<strand_service>(io_context),
io_context_(io_context),
io_context_impl_(asio::use_service<io_context_impl>(io_context)),
mutex_(),
salt_(0)
{
}
void strand_service::shutdown()
{
op_queue<operation> ops;
asio::detail::mutex::scoped_lock lock(mutex_);
for (std::size_t i = 0; i < num_implementations; ++i)
{
if (strand_impl* impl = implementations_[i].get())
{
ops.push(impl->waiting_queue_);
ops.push(impl->ready_queue_);
}
}
}
void strand_service::construct(strand_service::implementation_type& impl)
{
asio::detail::mutex::scoped_lock lock(mutex_);
std::size_t salt = salt_++;
#if defined(ASIO_ENABLE_SEQUENTIAL_STRAND_ALLOCATION)
std::size_t index = salt;
#else // defined(ASIO_ENABLE_SEQUENTIAL_STRAND_ALLOCATION)
std::size_t index = reinterpret_cast<std::size_t>(&impl);
index += (reinterpret_cast<std::size_t>(&impl) >> 3);
index ^= salt + 0x9e3779b9 + (index << 6) + (index >> 2);
#endif // defined(ASIO_ENABLE_SEQUENTIAL_STRAND_ALLOCATION)
index = index % num_implementations;
if (!implementations_[index].get())
implementations_[index].reset(new strand_impl);
impl = implementations_[index].get();
}
bool strand_service::running_in_this_thread(
const implementation_type& impl) const
{
return call_stack<strand_impl>::contains(impl) != 0;
}
bool strand_service::do_dispatch(implementation_type& impl, operation* op)
{
// If we are running inside the io_context, and no other handler already
// holds the strand lock, then the handler can run immediately.
bool can_dispatch = io_context_impl_.can_dispatch();
impl->mutex_.lock();
if (can_dispatch && !impl->locked_)
{
// Immediate invocation is allowed.
impl->locked_ = true;
impl->mutex_.unlock();
return true;
}
if (impl->locked_)
{
// Some other handler already holds the strand lock. Enqueue for later.
impl->waiting_queue_.push(op);
impl->mutex_.unlock();
}
else
{
// The handler is acquiring the strand lock and so is responsible for
// scheduling the strand.
impl->locked_ = true;
impl->mutex_.unlock();
impl->ready_queue_.push(op);
io_context_impl_.post_immediate_completion(impl, false);
}
return false;
}
void strand_service::do_post(implementation_type& impl,
operation* op, bool is_continuation)
{
impl->mutex_.lock();
if (impl->locked_)
{
// Some other handler already holds the strand lock. Enqueue for later.
impl->waiting_queue_.push(op);
impl->mutex_.unlock();
}
else
{
// The handler is acquiring the strand lock and so is responsible for
// scheduling the strand.
impl->locked_ = true;
impl->mutex_.unlock();
impl->ready_queue_.push(op);
io_context_impl_.post_immediate_completion(impl, is_continuation);
}
}
void strand_service::do_complete(void* owner, operation* base,
const asio::error_code& ec, std::size_t /*bytes_transferred*/)
{
if (owner)
{
strand_impl* impl = static_cast<strand_impl*>(base);
// Indicate that this strand is executing on the current thread.
call_stack<strand_impl>::context ctx(impl);
// Ensure the next handler, if any, is scheduled on block exit.
on_do_complete_exit on_exit;
on_exit.owner_ = static_cast<io_context_impl*>(owner);
on_exit.impl_ = impl;
// Run all ready handlers. No lock is required since the ready queue is
// accessed only within the strand.
while (operation* o = impl->ready_queue_.front())
{
impl->ready_queue_.pop();
o->complete(owner, ec, 0);
}
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_STRAND_SERVICE_IPP

View File

@ -0,0 +1,60 @@
//
// detail/impl/throw_error.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_THROW_ERROR_IPP
#define ASIO_DETAIL_IMPL_THROW_ERROR_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/detail/throw_exception.hpp"
#include "asio/system_error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
void do_throw_error(const asio::error_code& err)
{
asio::system_error e(err);
asio::detail::throw_exception(e);
}
void do_throw_error(const asio::error_code& err, const char* location)
{
// boostify: non-boost code starts here
#if defined(ASIO_MSVC) && defined(ASIO_HAS_STD_SYSTEM_ERROR)
// Microsoft's implementation of std::system_error is non-conformant in that
// it ignores the error code's message when a "what" string is supplied. We'll
// work around this by explicitly formatting the "what" string.
std::string what_msg = location;
what_msg += ": ";
what_msg += err.message();
asio::system_error e(err, what_msg);
asio::detail::throw_exception(e);
#else // defined(ASIO_MSVC) && defined(ASIO_HAS_STD_SYSTEM_ERROR)
// boostify: non-boost code ends here
asio::system_error e(err, location);
asio::detail::throw_exception(e);
// boostify: non-boost code starts here
#endif // defined(ASIO_MSVC) && defined(ASIO_HAS_STD_SYSTEM_ERROR)
// boostify: non-boost code ends here
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_THROW_ERROR_IPP

View File

@ -0,0 +1,91 @@
//
// detail/impl/timer_queue_ptime.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_TIMER_QUEUE_PTIME_IPP
#define ASIO_DETAIL_IMPL_TIMER_QUEUE_PTIME_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_BOOST_DATE_TIME)
#include "asio/detail/timer_queue_ptime.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
timer_queue<time_traits<boost::posix_time::ptime> >::timer_queue()
{
}
timer_queue<time_traits<boost::posix_time::ptime> >::~timer_queue()
{
}
bool timer_queue<time_traits<boost::posix_time::ptime> >::enqueue_timer(
const time_type& time, per_timer_data& timer, wait_op* op)
{
return impl_.enqueue_timer(time, timer, op);
}
bool timer_queue<time_traits<boost::posix_time::ptime> >::empty() const
{
return impl_.empty();
}
long timer_queue<time_traits<boost::posix_time::ptime> >::wait_duration_msec(
long max_duration) const
{
return impl_.wait_duration_msec(max_duration);
}
long timer_queue<time_traits<boost::posix_time::ptime> >::wait_duration_usec(
long max_duration) const
{
return impl_.wait_duration_usec(max_duration);
}
void timer_queue<time_traits<boost::posix_time::ptime> >::get_ready_timers(
op_queue<operation>& ops)
{
impl_.get_ready_timers(ops);
}
void timer_queue<time_traits<boost::posix_time::ptime> >::get_all_timers(
op_queue<operation>& ops)
{
impl_.get_all_timers(ops);
}
std::size_t timer_queue<time_traits<boost::posix_time::ptime> >::cancel_timer(
per_timer_data& timer, op_queue<operation>& ops, std::size_t max_cancelled)
{
return impl_.cancel_timer(timer, ops, max_cancelled);
}
void timer_queue<time_traits<boost::posix_time::ptime> >::move_timer(
per_timer_data& target, per_timer_data& source)
{
impl_.move_timer(target, source);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_BOOST_DATE_TIME)
#endif // ASIO_DETAIL_IMPL_TIMER_QUEUE_PTIME_IPP

View File

@ -0,0 +1,101 @@
//
// detail/impl/timer_queue_set.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_TIMER_QUEUE_SET_IPP
#define ASIO_DETAIL_IMPL_TIMER_QUEUE_SET_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/timer_queue_set.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
timer_queue_set::timer_queue_set()
: first_(0)
{
}
void timer_queue_set::insert(timer_queue_base* q)
{
q->next_ = first_;
first_ = q;
}
void timer_queue_set::erase(timer_queue_base* q)
{
if (first_)
{
if (q == first_)
{
first_ = q->next_;
q->next_ = 0;
return;
}
for (timer_queue_base* p = first_; p->next_; p = p->next_)
{
if (p->next_ == q)
{
p->next_ = q->next_;
q->next_ = 0;
return;
}
}
}
}
bool timer_queue_set::all_empty() const
{
for (timer_queue_base* p = first_; p; p = p->next_)
if (!p->empty())
return false;
return true;
}
long timer_queue_set::wait_duration_msec(long max_duration) const
{
long min_duration = max_duration;
for (timer_queue_base* p = first_; p; p = p->next_)
min_duration = p->wait_duration_msec(min_duration);
return min_duration;
}
long timer_queue_set::wait_duration_usec(long max_duration) const
{
long min_duration = max_duration;
for (timer_queue_base* p = first_; p; p = p->next_)
min_duration = p->wait_duration_usec(min_duration);
return min_duration;
}
void timer_queue_set::get_ready_timers(op_queue<operation>& ops)
{
for (timer_queue_base* p = first_; p; p = p->next_)
p->get_ready_timers(ops);
}
void timer_queue_set::get_all_timers(op_queue<operation>& ops)
{
for (timer_queue_base* p = first_; p; p = p->next_)
p->get_all_timers(ops);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_TIMER_QUEUE_SET_IPP

View File

@ -0,0 +1,76 @@
//
// detail/win_event.ipp
// ~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WIN_EVENT_IPP
#define ASIO_DETAIL_IMPL_WIN_EVENT_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_WINDOWS)
#include "asio/detail/throw_error.hpp"
#include "asio/detail/win_event.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
win_event::win_event()
: state_(0)
{
#if defined(ASIO_WINDOWS_APP)
events_[0] = ::CreateEventExW(0, 0,
CREATE_EVENT_MANUAL_RESET, EVENT_ALL_ACCESS);
#else // defined(ASIO_WINDOWS_APP)
events_[0] = ::CreateEventW(0, true, false, 0);
#endif // defined(ASIO_WINDOWS_APP)
if (!events_[0])
{
DWORD last_error = ::GetLastError();
asio::error_code ec(last_error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "event");
}
#if defined(ASIO_WINDOWS_APP)
events_[1] = ::CreateEventExW(0, 0, 0, EVENT_ALL_ACCESS);
#else // defined(ASIO_WINDOWS_APP)
events_[1] = ::CreateEventW(0, false, false, 0);
#endif // defined(ASIO_WINDOWS_APP)
if (!events_[1])
{
DWORD last_error = ::GetLastError();
::CloseHandle(events_[0]);
asio::error_code ec(last_error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "event");
}
}
win_event::~win_event()
{
::CloseHandle(events_[0]);
::CloseHandle(events_[1]);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_WINDOWS)
#endif // ASIO_DETAIL_IMPL_WIN_EVENT_IPP

View File

@ -0,0 +1,525 @@
//
// detail/impl/win_iocp_handle_service.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
// Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WIN_IOCP_HANDLE_SERVICE_IPP
#define ASIO_DETAIL_IMPL_WIN_IOCP_HANDLE_SERVICE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_IOCP)
#include "asio/detail/win_iocp_handle_service.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
class win_iocp_handle_service::overlapped_wrapper
: public OVERLAPPED
{
public:
explicit overlapped_wrapper(asio::error_code& ec)
{
Internal = 0;
InternalHigh = 0;
Offset = 0;
OffsetHigh = 0;
// Create a non-signalled manual-reset event, for GetOverlappedResult.
hEvent = ::CreateEventW(0, TRUE, FALSE, 0);
if (hEvent)
{
// As documented in GetQueuedCompletionStatus, setting the low order
// bit of this event prevents our synchronous writes from being treated
// as completion port events.
DWORD_PTR tmp = reinterpret_cast<DWORD_PTR>(hEvent);
hEvent = reinterpret_cast<HANDLE>(tmp | 1);
}
else
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
}
}
~overlapped_wrapper()
{
if (hEvent)
{
::CloseHandle(hEvent);
}
}
};
win_iocp_handle_service::win_iocp_handle_service(execution_context& context)
: execution_context_service_base<win_iocp_handle_service>(context),
iocp_service_(asio::use_service<win_iocp_io_context>(context)),
mutex_(),
impl_list_(0)
{
}
void win_iocp_handle_service::shutdown()
{
// Close all implementations, causing all operations to complete.
asio::detail::mutex::scoped_lock lock(mutex_);
implementation_type* impl = impl_list_;
while (impl)
{
close_for_destruction(*impl);
impl = impl->next_;
}
}
void win_iocp_handle_service::construct(
win_iocp_handle_service::implementation_type& impl)
{
impl.handle_ = INVALID_HANDLE_VALUE;
impl.safe_cancellation_thread_id_ = 0;
// Insert implementation into linked list of all implementations.
asio::detail::mutex::scoped_lock lock(mutex_);
impl.next_ = impl_list_;
impl.prev_ = 0;
if (impl_list_)
impl_list_->prev_ = &impl;
impl_list_ = &impl;
}
void win_iocp_handle_service::move_construct(
win_iocp_handle_service::implementation_type& impl,
win_iocp_handle_service::implementation_type& other_impl)
{
impl.handle_ = other_impl.handle_;
other_impl.handle_ = INVALID_HANDLE_VALUE;
impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_;
other_impl.safe_cancellation_thread_id_ = 0;
// Insert implementation into linked list of all implementations.
asio::detail::mutex::scoped_lock lock(mutex_);
impl.next_ = impl_list_;
impl.prev_ = 0;
if (impl_list_)
impl_list_->prev_ = &impl;
impl_list_ = &impl;
}
void win_iocp_handle_service::move_assign(
win_iocp_handle_service::implementation_type& impl,
win_iocp_handle_service& other_service,
win_iocp_handle_service::implementation_type& other_impl)
{
close_for_destruction(impl);
if (this != &other_service)
{
// Remove implementation from linked list of all implementations.
asio::detail::mutex::scoped_lock lock(mutex_);
if (impl_list_ == &impl)
impl_list_ = impl.next_;
if (impl.prev_)
impl.prev_->next_ = impl.next_;
if (impl.next_)
impl.next_->prev_= impl.prev_;
impl.next_ = 0;
impl.prev_ = 0;
}
impl.handle_ = other_impl.handle_;
other_impl.handle_ = INVALID_HANDLE_VALUE;
impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_;
other_impl.safe_cancellation_thread_id_ = 0;
if (this != &other_service)
{
// Insert implementation into linked list of all implementations.
asio::detail::mutex::scoped_lock lock(other_service.mutex_);
impl.next_ = other_service.impl_list_;
impl.prev_ = 0;
if (other_service.impl_list_)
other_service.impl_list_->prev_ = &impl;
other_service.impl_list_ = &impl;
}
}
void win_iocp_handle_service::destroy(
win_iocp_handle_service::implementation_type& impl)
{
close_for_destruction(impl);
// Remove implementation from linked list of all implementations.
asio::detail::mutex::scoped_lock lock(mutex_);
if (impl_list_ == &impl)
impl_list_ = impl.next_;
if (impl.prev_)
impl.prev_->next_ = impl.next_;
if (impl.next_)
impl.next_->prev_= impl.prev_;
impl.next_ = 0;
impl.prev_ = 0;
}
asio::error_code win_iocp_handle_service::assign(
win_iocp_handle_service::implementation_type& impl,
const native_handle_type& handle, asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
return ec;
}
if (iocp_service_.register_handle(handle, ec))
return ec;
impl.handle_ = handle;
ec = asio::error_code();
return ec;
}
asio::error_code win_iocp_handle_service::close(
win_iocp_handle_service::implementation_type& impl,
asio::error_code& ec)
{
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((iocp_service_.context(), "handle",
&impl, reinterpret_cast<uintmax_t>(impl.handle_), "close"));
if (!::CloseHandle(impl.handle_))
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
}
else
{
ec = asio::error_code();
}
impl.handle_ = INVALID_HANDLE_VALUE;
impl.safe_cancellation_thread_id_ = 0;
}
else
{
ec = asio::error_code();
}
return ec;
}
asio::error_code win_iocp_handle_service::cancel(
win_iocp_handle_service::implementation_type& impl,
asio::error_code& ec)
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
return ec;
}
ASIO_HANDLER_OPERATION((iocp_service_.context(), "handle",
&impl, reinterpret_cast<uintmax_t>(impl.handle_), "cancel"));
if (FARPROC cancel_io_ex_ptr = ::GetProcAddress(
::GetModuleHandleA("KERNEL32"), "CancelIoEx"))
{
// The version of Windows supports cancellation from any thread.
typedef BOOL (WINAPI* cancel_io_ex_t)(HANDLE, LPOVERLAPPED);
cancel_io_ex_t cancel_io_ex = reinterpret_cast<cancel_io_ex_t>(
reinterpret_cast<void*>(cancel_io_ex_ptr));
if (!cancel_io_ex(impl.handle_, 0))
{
DWORD last_error = ::GetLastError();
if (last_error == ERROR_NOT_FOUND)
{
// ERROR_NOT_FOUND means that there were no operations to be
// cancelled. We swallow this error to match the behaviour on other
// platforms.
ec = asio::error_code();
}
else
{
ec = asio::error_code(last_error,
asio::error::get_system_category());
}
}
else
{
ec = asio::error_code();
}
}
else if (impl.safe_cancellation_thread_id_ == 0)
{
// No operations have been started, so there's nothing to cancel.
ec = asio::error_code();
}
else if (impl.safe_cancellation_thread_id_ == ::GetCurrentThreadId())
{
// Asynchronous operations have been started from the current thread only,
// so it is safe to try to cancel them using CancelIo.
if (!::CancelIo(impl.handle_))
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
}
else
{
ec = asio::error_code();
}
}
else
{
// Asynchronous operations have been started from more than one thread,
// so cancellation is not safe.
ec = asio::error::operation_not_supported;
}
return ec;
}
size_t win_iocp_handle_service::do_write(
win_iocp_handle_service::implementation_type& impl, uint64_t offset,
const asio::const_buffer& buffer, asio::error_code& ec)
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
return 0;
}
// A request to write 0 bytes on a handle is a no-op.
if (buffer.size() == 0)
{
ec = asio::error_code();
return 0;
}
overlapped_wrapper overlapped(ec);
if (ec)
{
return 0;
}
// Write the data.
overlapped.Offset = offset & 0xFFFFFFFF;
overlapped.OffsetHigh = (offset >> 32) & 0xFFFFFFFF;
BOOL ok = ::WriteFile(impl.handle_, buffer.data(),
static_cast<DWORD>(buffer.size()), 0, &overlapped);
if (!ok)
{
DWORD last_error = ::GetLastError();
if (last_error != ERROR_IO_PENDING)
{
ec = asio::error_code(last_error,
asio::error::get_system_category());
return 0;
}
}
// Wait for the operation to complete.
DWORD bytes_transferred = 0;
ok = ::GetOverlappedResult(impl.handle_,
&overlapped, &bytes_transferred, TRUE);
if (!ok)
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
return 0;
}
ec = asio::error_code();
return bytes_transferred;
}
void win_iocp_handle_service::start_write_op(
win_iocp_handle_service::implementation_type& impl, uint64_t offset,
const asio::const_buffer& buffer, operation* op)
{
update_cancellation_thread_id(impl);
iocp_service_.work_started();
if (!is_open(impl))
{
iocp_service_.on_completion(op, asio::error::bad_descriptor);
}
else if (buffer.size() == 0)
{
// A request to write 0 bytes on a handle is a no-op.
iocp_service_.on_completion(op);
}
else
{
DWORD bytes_transferred = 0;
op->Offset = offset & 0xFFFFFFFF;
op->OffsetHigh = (offset >> 32) & 0xFFFFFFFF;
BOOL ok = ::WriteFile(impl.handle_, buffer.data(),
static_cast<DWORD>(buffer.size()),
&bytes_transferred, op);
DWORD last_error = ::GetLastError();
if (!ok && last_error != ERROR_IO_PENDING
&& last_error != ERROR_MORE_DATA)
{
iocp_service_.on_completion(op, last_error, bytes_transferred);
}
else
{
iocp_service_.on_pending(op);
}
}
}
size_t win_iocp_handle_service::do_read(
win_iocp_handle_service::implementation_type& impl, uint64_t offset,
const asio::mutable_buffer& buffer, asio::error_code& ec)
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
return 0;
}
// A request to read 0 bytes on a stream handle is a no-op.
if (buffer.size() == 0)
{
ec = asio::error_code();
return 0;
}
overlapped_wrapper overlapped(ec);
if (ec)
{
return 0;
}
// Read some data.
overlapped.Offset = offset & 0xFFFFFFFF;
overlapped.OffsetHigh = (offset >> 32) & 0xFFFFFFFF;
BOOL ok = ::ReadFile(impl.handle_, buffer.data(),
static_cast<DWORD>(buffer.size()), 0, &overlapped);
if (!ok)
{
DWORD last_error = ::GetLastError();
if (last_error != ERROR_IO_PENDING && last_error != ERROR_MORE_DATA)
{
if (last_error == ERROR_HANDLE_EOF)
{
ec = asio::error::eof;
}
else
{
ec = asio::error_code(last_error,
asio::error::get_system_category());
}
return 0;
}
}
// Wait for the operation to complete.
DWORD bytes_transferred = 0;
ok = ::GetOverlappedResult(impl.handle_,
&overlapped, &bytes_transferred, TRUE);
if (!ok)
{
DWORD last_error = ::GetLastError();
if (last_error == ERROR_HANDLE_EOF)
{
ec = asio::error::eof;
}
else
{
ec = asio::error_code(last_error,
asio::error::get_system_category());
}
return (last_error == ERROR_MORE_DATA) ? bytes_transferred : 0;
}
ec = asio::error_code();
return bytes_transferred;
}
void win_iocp_handle_service::start_read_op(
win_iocp_handle_service::implementation_type& impl, uint64_t offset,
const asio::mutable_buffer& buffer, operation* op)
{
update_cancellation_thread_id(impl);
iocp_service_.work_started();
if (!is_open(impl))
{
iocp_service_.on_completion(op, asio::error::bad_descriptor);
}
else if (buffer.size() == 0)
{
// A request to read 0 bytes on a handle is a no-op.
iocp_service_.on_completion(op);
}
else
{
DWORD bytes_transferred = 0;
op->Offset = offset & 0xFFFFFFFF;
op->OffsetHigh = (offset >> 32) & 0xFFFFFFFF;
BOOL ok = ::ReadFile(impl.handle_, buffer.data(),
static_cast<DWORD>(buffer.size()),
&bytes_transferred, op);
DWORD last_error = ::GetLastError();
if (!ok && last_error != ERROR_IO_PENDING
&& last_error != ERROR_MORE_DATA)
{
iocp_service_.on_completion(op, last_error, bytes_transferred);
}
else
{
iocp_service_.on_pending(op);
}
}
}
void win_iocp_handle_service::update_cancellation_thread_id(
win_iocp_handle_service::implementation_type& impl)
{
if (impl.safe_cancellation_thread_id_ == 0)
impl.safe_cancellation_thread_id_ = ::GetCurrentThreadId();
else if (impl.safe_cancellation_thread_id_ != ::GetCurrentThreadId())
impl.safe_cancellation_thread_id_ = ~DWORD(0);
}
void win_iocp_handle_service::close_for_destruction(implementation_type& impl)
{
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((iocp_service_.context(), "handle",
&impl, reinterpret_cast<uintmax_t>(impl.handle_), "close"));
::CloseHandle(impl.handle_);
impl.handle_ = INVALID_HANDLE_VALUE;
impl.safe_cancellation_thread_id_ = 0;
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_IOCP)
#endif // ASIO_DETAIL_IMPL_WIN_IOCP_HANDLE_SERVICE_IPP

View File

@ -0,0 +1,104 @@
//
// detail/impl/win_iocp_io_context.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_HPP
#define ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_IOCP)
#include "asio/detail/completion_handler.hpp"
#include "asio/detail/fenced_block.hpp"
#include "asio/detail/handler_alloc_helpers.hpp"
#include "asio/detail/handler_invoke_helpers.hpp"
#include "asio/detail/memory.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
template <typename Time_Traits>
void win_iocp_io_context::add_timer_queue(
timer_queue<Time_Traits>& queue)
{
do_add_timer_queue(queue);
}
template <typename Time_Traits>
void win_iocp_io_context::remove_timer_queue(
timer_queue<Time_Traits>& queue)
{
do_remove_timer_queue(queue);
}
template <typename Time_Traits>
void win_iocp_io_context::schedule_timer(timer_queue<Time_Traits>& queue,
const typename Time_Traits::time_type& time,
typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op)
{
// If the service has been shut down we silently discard the timer.
if (::InterlockedExchangeAdd(&shutdown_, 0) != 0)
{
post_immediate_completion(op, false);
return;
}
mutex::scoped_lock lock(dispatch_mutex_);
bool earliest = queue.enqueue_timer(time, timer, op);
work_started();
if (earliest)
update_timeout();
}
template <typename Time_Traits>
std::size_t win_iocp_io_context::cancel_timer(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data& timer,
std::size_t max_cancelled)
{
// If the service has been shut down we silently ignore the cancellation.
if (::InterlockedExchangeAdd(&shutdown_, 0) != 0)
return 0;
mutex::scoped_lock lock(dispatch_mutex_);
op_queue<win_iocp_operation> ops;
std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
lock.unlock();
post_deferred_completions(ops);
return n;
}
template <typename Time_Traits>
void win_iocp_io_context::move_timer(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data& to,
typename timer_queue<Time_Traits>::per_timer_data& from)
{
asio::detail::mutex::scoped_lock lock(dispatch_mutex_);
op_queue<operation> ops;
queue.cancel_timer(to, ops);
queue.move_timer(to, from);
lock.unlock();
post_deferred_completions(ops);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_IOCP)
#endif // ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_HPP

View File

@ -0,0 +1,603 @@
//
// detail/impl/win_iocp_io_context.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_IPP
#define ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_IOCP)
#include "asio/error.hpp"
#include "asio/detail/cstdint.hpp"
#include "asio/detail/handler_alloc_helpers.hpp"
#include "asio/detail/handler_invoke_helpers.hpp"
#include "asio/detail/limits.hpp"
#include "asio/detail/thread.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/detail/win_iocp_io_context.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
struct win_iocp_io_context::thread_function
{
explicit thread_function(win_iocp_io_context* s)
: this_(s)
{
}
void operator()()
{
asio::error_code ec;
this_->run(ec);
}
win_iocp_io_context* this_;
};
struct win_iocp_io_context::work_finished_on_block_exit
{
~work_finished_on_block_exit()
{
io_context_->work_finished();
}
win_iocp_io_context* io_context_;
};
struct win_iocp_io_context::timer_thread_function
{
void operator()()
{
while (::InterlockedExchangeAdd(&io_context_->shutdown_, 0) == 0)
{
if (::WaitForSingleObject(io_context_->waitable_timer_.handle,
INFINITE) == WAIT_OBJECT_0)
{
::InterlockedExchange(&io_context_->dispatch_required_, 1);
::PostQueuedCompletionStatus(io_context_->iocp_.handle,
0, wake_for_dispatch, 0);
}
}
}
win_iocp_io_context* io_context_;
};
win_iocp_io_context::win_iocp_io_context(
asio::execution_context& ctx, int concurrency_hint, bool own_thread)
: execution_context_service_base<win_iocp_io_context>(ctx),
iocp_(),
outstanding_work_(0),
stopped_(0),
stop_event_posted_(0),
shutdown_(0),
gqcs_timeout_(get_gqcs_timeout()),
dispatch_required_(0),
concurrency_hint_(concurrency_hint)
{
ASIO_HANDLER_TRACKING_INIT;
iocp_.handle = ::CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0, 0,
static_cast<DWORD>(concurrency_hint >= 0 ? concurrency_hint : DWORD(~0)));
if (!iocp_.handle)
{
DWORD last_error = ::GetLastError();
asio::error_code ec(last_error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "iocp");
}
if (own_thread)
{
::InterlockedIncrement(&outstanding_work_);
thread_.reset(new asio::detail::thread(thread_function(this)));
}
}
win_iocp_io_context::~win_iocp_io_context()
{
if (thread_.get())
{
stop();
thread_->join();
thread_.reset();
}
}
void win_iocp_io_context::shutdown()
{
::InterlockedExchange(&shutdown_, 1);
if (timer_thread_.get())
{
LARGE_INTEGER timeout;
timeout.QuadPart = 1;
::SetWaitableTimer(waitable_timer_.handle, &timeout, 1, 0, 0, FALSE);
}
if (thread_.get())
{
stop();
thread_->join();
thread_.reset();
::InterlockedDecrement(&outstanding_work_);
}
while (::InterlockedExchangeAdd(&outstanding_work_, 0) > 0)
{
op_queue<win_iocp_operation> ops;
timer_queues_.get_all_timers(ops);
ops.push(completed_ops_);
if (!ops.empty())
{
while (win_iocp_operation* op = ops.front())
{
ops.pop();
::InterlockedDecrement(&outstanding_work_);
op->destroy();
}
}
else
{
DWORD bytes_transferred = 0;
dword_ptr_t completion_key = 0;
LPOVERLAPPED overlapped = 0;
::GetQueuedCompletionStatus(iocp_.handle, &bytes_transferred,
&completion_key, &overlapped, gqcs_timeout_);
if (overlapped)
{
::InterlockedDecrement(&outstanding_work_);
static_cast<win_iocp_operation*>(overlapped)->destroy();
}
}
}
if (timer_thread_.get())
timer_thread_->join();
}
asio::error_code win_iocp_io_context::register_handle(
HANDLE handle, asio::error_code& ec)
{
if (::CreateIoCompletionPort(handle, iocp_.handle, 0, 0) == 0)
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
}
else
{
ec = asio::error_code();
}
return ec;
}
size_t win_iocp_io_context::run(asio::error_code& ec)
{
if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)
{
stop();
ec = asio::error_code();
return 0;
}
win_iocp_thread_info this_thread;
thread_call_stack::context ctx(this, this_thread);
size_t n = 0;
while (do_one(INFINITE, this_thread, ec))
if (n != (std::numeric_limits<size_t>::max)())
++n;
return n;
}
size_t win_iocp_io_context::run_one(asio::error_code& ec)
{
if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)
{
stop();
ec = asio::error_code();
return 0;
}
win_iocp_thread_info this_thread;
thread_call_stack::context ctx(this, this_thread);
return do_one(INFINITE, this_thread, ec);
}
size_t win_iocp_io_context::wait_one(long usec, asio::error_code& ec)
{
if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)
{
stop();
ec = asio::error_code();
return 0;
}
win_iocp_thread_info this_thread;
thread_call_stack::context ctx(this, this_thread);
return do_one(usec < 0 ? INFINITE : ((usec - 1) / 1000 + 1), this_thread, ec);
}
size_t win_iocp_io_context::poll(asio::error_code& ec)
{
if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)
{
stop();
ec = asio::error_code();
return 0;
}
win_iocp_thread_info this_thread;
thread_call_stack::context ctx(this, this_thread);
size_t n = 0;
while (do_one(0, this_thread, ec))
if (n != (std::numeric_limits<size_t>::max)())
++n;
return n;
}
size_t win_iocp_io_context::poll_one(asio::error_code& ec)
{
if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)
{
stop();
ec = asio::error_code();
return 0;
}
win_iocp_thread_info this_thread;
thread_call_stack::context ctx(this, this_thread);
return do_one(0, this_thread, ec);
}
void win_iocp_io_context::stop()
{
if (::InterlockedExchange(&stopped_, 1) == 0)
{
if (::InterlockedExchange(&stop_event_posted_, 1) == 0)
{
if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, 0))
{
DWORD last_error = ::GetLastError();
asio::error_code ec(last_error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "pqcs");
}
}
}
}
void win_iocp_io_context::capture_current_exception()
{
if (thread_info_base* this_thread = thread_call_stack::contains(this))
this_thread->capture_current_exception();
}
void win_iocp_io_context::post_deferred_completion(win_iocp_operation* op)
{
// Flag the operation as ready.
op->ready_ = 1;
// Enqueue the operation on the I/O completion port.
if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, op))
{
// Out of resources. Put on completed queue instead.
mutex::scoped_lock lock(dispatch_mutex_);
completed_ops_.push(op);
::InterlockedExchange(&dispatch_required_, 1);
}
}
void win_iocp_io_context::post_deferred_completions(
op_queue<win_iocp_operation>& ops)
{
while (win_iocp_operation* op = ops.front())
{
ops.pop();
// Flag the operation as ready.
op->ready_ = 1;
// Enqueue the operation on the I/O completion port.
if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, op))
{
// Out of resources. Put on completed queue instead.
mutex::scoped_lock lock(dispatch_mutex_);
completed_ops_.push(op);
completed_ops_.push(ops);
::InterlockedExchange(&dispatch_required_, 1);
}
}
}
void win_iocp_io_context::abandon_operations(
op_queue<win_iocp_operation>& ops)
{
while (win_iocp_operation* op = ops.front())
{
ops.pop();
::InterlockedDecrement(&outstanding_work_);
op->destroy();
}
}
void win_iocp_io_context::on_pending(win_iocp_operation* op)
{
if (::InterlockedCompareExchange(&op->ready_, 1, 0) == 1)
{
// Enqueue the operation on the I/O completion port.
if (!::PostQueuedCompletionStatus(iocp_.handle,
0, overlapped_contains_result, op))
{
// Out of resources. Put on completed queue instead.
mutex::scoped_lock lock(dispatch_mutex_);
completed_ops_.push(op);
::InterlockedExchange(&dispatch_required_, 1);
}
}
}
void win_iocp_io_context::on_completion(win_iocp_operation* op,
DWORD last_error, DWORD bytes_transferred)
{
// Flag that the operation is ready for invocation.
op->ready_ = 1;
// Store results in the OVERLAPPED structure.
op->Internal = reinterpret_cast<ulong_ptr_t>(
&asio::error::get_system_category());
op->Offset = last_error;
op->OffsetHigh = bytes_transferred;
// Enqueue the operation on the I/O completion port.
if (!::PostQueuedCompletionStatus(iocp_.handle,
0, overlapped_contains_result, op))
{
// Out of resources. Put on completed queue instead.
mutex::scoped_lock lock(dispatch_mutex_);
completed_ops_.push(op);
::InterlockedExchange(&dispatch_required_, 1);
}
}
void win_iocp_io_context::on_completion(win_iocp_operation* op,
const asio::error_code& ec, DWORD bytes_transferred)
{
// Flag that the operation is ready for invocation.
op->ready_ = 1;
// Store results in the OVERLAPPED structure.
op->Internal = reinterpret_cast<ulong_ptr_t>(&ec.category());
op->Offset = ec.value();
op->OffsetHigh = bytes_transferred;
// Enqueue the operation on the I/O completion port.
if (!::PostQueuedCompletionStatus(iocp_.handle,
0, overlapped_contains_result, op))
{
// Out of resources. Put on completed queue instead.
mutex::scoped_lock lock(dispatch_mutex_);
completed_ops_.push(op);
::InterlockedExchange(&dispatch_required_, 1);
}
}
size_t win_iocp_io_context::do_one(DWORD msec,
win_iocp_thread_info& this_thread, asio::error_code& ec)
{
for (;;)
{
// Try to acquire responsibility for dispatching timers and completed ops.
if (::InterlockedCompareExchange(&dispatch_required_, 0, 1) == 1)
{
mutex::scoped_lock lock(dispatch_mutex_);
// Dispatch pending timers and operations.
op_queue<win_iocp_operation> ops;
ops.push(completed_ops_);
timer_queues_.get_ready_timers(ops);
post_deferred_completions(ops);
update_timeout();
}
// Get the next operation from the queue.
DWORD bytes_transferred = 0;
dword_ptr_t completion_key = 0;
LPOVERLAPPED overlapped = 0;
::SetLastError(0);
BOOL ok = ::GetQueuedCompletionStatus(iocp_.handle,
&bytes_transferred, &completion_key, &overlapped,
msec < gqcs_timeout_ ? msec : gqcs_timeout_);
DWORD last_error = ::GetLastError();
if (overlapped)
{
win_iocp_operation* op = static_cast<win_iocp_operation*>(overlapped);
asio::error_code result_ec(last_error,
asio::error::get_system_category());
// We may have been passed the last_error and bytes_transferred in the
// OVERLAPPED structure itself.
if (completion_key == overlapped_contains_result)
{
result_ec = asio::error_code(static_cast<int>(op->Offset),
*reinterpret_cast<asio::error_category*>(op->Internal));
bytes_transferred = op->OffsetHigh;
}
// Otherwise ensure any result has been saved into the OVERLAPPED
// structure.
else
{
op->Internal = reinterpret_cast<ulong_ptr_t>(&result_ec.category());
op->Offset = result_ec.value();
op->OffsetHigh = bytes_transferred;
}
// Dispatch the operation only if ready. The operation may not be ready
// if the initiating function (e.g. a call to WSARecv) has not yet
// returned. This is because the initiating function still wants access
// to the operation's OVERLAPPED structure.
if (::InterlockedCompareExchange(&op->ready_, 1, 0) == 1)
{
// Ensure the count of outstanding work is decremented on block exit.
work_finished_on_block_exit on_exit = { this };
(void)on_exit;
op->complete(this, result_ec, bytes_transferred);
this_thread.rethrow_pending_exception();
ec = asio::error_code();
return 1;
}
}
else if (!ok)
{
if (last_error != WAIT_TIMEOUT)
{
ec = asio::error_code(last_error,
asio::error::get_system_category());
return 0;
}
// If we're waiting indefinitely we need to keep going until we get a
// real handler.
if (msec == INFINITE)
continue;
ec = asio::error_code();
return 0;
}
else if (completion_key == wake_for_dispatch)
{
// We have been woken up to try to acquire responsibility for dispatching
// timers and completed operations.
}
else
{
// Indicate that there is no longer an in-flight stop event.
::InterlockedExchange(&stop_event_posted_, 0);
// The stopped_ flag is always checked to ensure that any leftover
// stop events from a previous run invocation are ignored.
if (::InterlockedExchangeAdd(&stopped_, 0) != 0)
{
// Wake up next thread that is blocked on GetQueuedCompletionStatus.
if (::InterlockedExchange(&stop_event_posted_, 1) == 0)
{
if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, 0))
{
last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
return 0;
}
}
ec = asio::error_code();
return 0;
}
}
}
}
DWORD win_iocp_io_context::get_gqcs_timeout()
{
OSVERSIONINFOEX osvi;
ZeroMemory(&osvi, sizeof(osvi));
osvi.dwOSVersionInfoSize = sizeof(osvi);
osvi.dwMajorVersion = 6ul;
const uint64_t condition_mask = ::VerSetConditionMask(
0, VER_MAJORVERSION, VER_GREATER_EQUAL);
if (!!::VerifyVersionInfo(&osvi, VER_MAJORVERSION, condition_mask))
return INFINITE;
return default_gqcs_timeout;
}
void win_iocp_io_context::do_add_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(dispatch_mutex_);
timer_queues_.insert(&queue);
if (!waitable_timer_.handle)
{
waitable_timer_.handle = ::CreateWaitableTimer(0, FALSE, 0);
if (waitable_timer_.handle == 0)
{
DWORD last_error = ::GetLastError();
asio::error_code ec(last_error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "timer");
}
LARGE_INTEGER timeout;
timeout.QuadPart = -max_timeout_usec;
timeout.QuadPart *= 10;
::SetWaitableTimer(waitable_timer_.handle,
&timeout, max_timeout_msec, 0, 0, FALSE);
}
if (!timer_thread_.get())
{
timer_thread_function thread_function = { this };
timer_thread_.reset(new thread(thread_function, 65536));
}
}
void win_iocp_io_context::do_remove_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(dispatch_mutex_);
timer_queues_.erase(&queue);
}
void win_iocp_io_context::update_timeout()
{
if (timer_thread_.get())
{
// There's no point updating the waitable timer if the new timeout period
// exceeds the maximum timeout. In that case, we might as well wait for the
// existing period of the timer to expire.
long timeout_usec = timer_queues_.wait_duration_usec(max_timeout_usec);
if (timeout_usec < max_timeout_usec)
{
LARGE_INTEGER timeout;
timeout.QuadPart = -timeout_usec;
timeout.QuadPart *= 10;
::SetWaitableTimer(waitable_timer_.handle,
&timeout, max_timeout_msec, 0, 0, FALSE);
}
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_IOCP)
#endif // ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_IPP

View File

@ -0,0 +1,192 @@
//
// detail/impl/win_iocp_serial_port_service.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
// Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WIN_IOCP_SERIAL_PORT_SERVICE_IPP
#define ASIO_DETAIL_IMPL_WIN_IOCP_SERIAL_PORT_SERVICE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_IOCP) && defined(ASIO_HAS_SERIAL_PORT)
#include <cstring>
#include "asio/detail/win_iocp_serial_port_service.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
win_iocp_serial_port_service::win_iocp_serial_port_service(
execution_context& context)
: execution_context_service_base<win_iocp_serial_port_service>(context),
handle_service_(context)
{
}
void win_iocp_serial_port_service::shutdown()
{
}
asio::error_code win_iocp_serial_port_service::open(
win_iocp_serial_port_service::implementation_type& impl,
const std::string& device, asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
return ec;
}
// For convenience, add a leading \\.\ sequence if not already present.
std::string name = (device[0] == '\\') ? device : "\\\\.\\" + device;
// Open a handle to the serial port.
::HANDLE handle = ::CreateFileA(name.c_str(),
GENERIC_READ | GENERIC_WRITE, 0, 0,
OPEN_EXISTING, FILE_FLAG_OVERLAPPED, 0);
if (handle == INVALID_HANDLE_VALUE)
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
return ec;
}
// Determine the initial serial port parameters.
using namespace std; // For memset.
::DCB dcb;
memset(&dcb, 0, sizeof(DCB));
dcb.DCBlength = sizeof(DCB);
if (!::GetCommState(handle, &dcb))
{
DWORD last_error = ::GetLastError();
::CloseHandle(handle);
ec = asio::error_code(last_error,
asio::error::get_system_category());
return ec;
}
// Set some default serial port parameters. This implementation does not
// support changing all of these, so they might as well be in a known state.
dcb.fBinary = TRUE; // Win32 only supports binary mode.
dcb.fNull = FALSE; // Do not ignore NULL characters.
dcb.fAbortOnError = FALSE; // Ignore serial framing errors.
dcb.BaudRate = CBR_9600; // 9600 baud by default
dcb.ByteSize = 8; // 8 bit bytes
dcb.fOutxCtsFlow = FALSE; // No flow control
dcb.fOutxDsrFlow = FALSE;
dcb.fDtrControl = DTR_CONTROL_DISABLE;
dcb.fDsrSensitivity = FALSE;
dcb.fOutX = FALSE;
dcb.fInX = FALSE;
dcb.fRtsControl = RTS_CONTROL_DISABLE;
dcb.fParity = FALSE; // No parity
dcb.Parity = NOPARITY;
dcb.StopBits = ONESTOPBIT; // One stop bit
if (!::SetCommState(handle, &dcb))
{
DWORD last_error = ::GetLastError();
::CloseHandle(handle);
ec = asio::error_code(last_error,
asio::error::get_system_category());
return ec;
}
// Set up timeouts so that the serial port will behave similarly to a
// network socket. Reads wait for at least one byte, then return with
// whatever they have. Writes return once everything is out the door.
::COMMTIMEOUTS timeouts;
timeouts.ReadIntervalTimeout = 1;
timeouts.ReadTotalTimeoutMultiplier = 0;
timeouts.ReadTotalTimeoutConstant = 0;
timeouts.WriteTotalTimeoutMultiplier = 0;
timeouts.WriteTotalTimeoutConstant = 0;
if (!::SetCommTimeouts(handle, &timeouts))
{
DWORD last_error = ::GetLastError();
::CloseHandle(handle);
ec = asio::error_code(last_error,
asio::error::get_system_category());
return ec;
}
// We're done. Take ownership of the serial port handle.
if (handle_service_.assign(impl, handle, ec))
::CloseHandle(handle);
return ec;
}
asio::error_code win_iocp_serial_port_service::do_set_option(
win_iocp_serial_port_service::implementation_type& impl,
win_iocp_serial_port_service::store_function_type store,
const void* option, asio::error_code& ec)
{
using namespace std; // For memcpy.
::DCB dcb;
memset(&dcb, 0, sizeof(DCB));
dcb.DCBlength = sizeof(DCB);
if (!::GetCommState(handle_service_.native_handle(impl), &dcb))
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
return ec;
}
if (store(option, dcb, ec))
return ec;
if (!::SetCommState(handle_service_.native_handle(impl), &dcb))
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
return ec;
}
ec = asio::error_code();
return ec;
}
asio::error_code win_iocp_serial_port_service::do_get_option(
const win_iocp_serial_port_service::implementation_type& impl,
win_iocp_serial_port_service::load_function_type load,
void* option, asio::error_code& ec) const
{
using namespace std; // For memset.
::DCB dcb;
memset(&dcb, 0, sizeof(DCB));
dcb.DCBlength = sizeof(DCB);
if (!::GetCommState(handle_service_.native_handle(impl), &dcb))
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
return ec;
}
return load(option, dcb, ec);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_IOCP) && defined(ASIO_HAS_SERIAL_PORT)
#endif // ASIO_DETAIL_IMPL_WIN_IOCP_SERIAL_PORT_SERVICE_IPP

View File

@ -0,0 +1,801 @@
//
// detail/impl/win_iocp_socket_service_base.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WIN_IOCP_SOCKET_SERVICE_BASE_IPP
#define ASIO_DETAIL_IMPL_WIN_IOCP_SOCKET_SERVICE_BASE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_IOCP)
#include "asio/detail/win_iocp_socket_service_base.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
win_iocp_socket_service_base::win_iocp_socket_service_base(
execution_context& context)
: context_(context),
iocp_service_(use_service<win_iocp_io_context>(context)),
reactor_(0),
connect_ex_(0),
nt_set_info_(0),
mutex_(),
impl_list_(0)
{
}
void win_iocp_socket_service_base::base_shutdown()
{
// Close all implementations, causing all operations to complete.
asio::detail::mutex::scoped_lock lock(mutex_);
base_implementation_type* impl = impl_list_;
while (impl)
{
close_for_destruction(*impl);
impl = impl->next_;
}
}
void win_iocp_socket_service_base::construct(
win_iocp_socket_service_base::base_implementation_type& impl)
{
impl.socket_ = invalid_socket;
impl.state_ = 0;
impl.cancel_token_.reset();
#if defined(ASIO_ENABLE_CANCELIO)
impl.safe_cancellation_thread_id_ = 0;
#endif // defined(ASIO_ENABLE_CANCELIO)
// Insert implementation into linked list of all implementations.
asio::detail::mutex::scoped_lock lock(mutex_);
impl.next_ = impl_list_;
impl.prev_ = 0;
if (impl_list_)
impl_list_->prev_ = &impl;
impl_list_ = &impl;
}
void win_iocp_socket_service_base::base_move_construct(
win_iocp_socket_service_base::base_implementation_type& impl,
win_iocp_socket_service_base::base_implementation_type& other_impl)
ASIO_NOEXCEPT
{
impl.socket_ = other_impl.socket_;
other_impl.socket_ = invalid_socket;
impl.state_ = other_impl.state_;
other_impl.state_ = 0;
impl.cancel_token_ = other_impl.cancel_token_;
other_impl.cancel_token_.reset();
#if defined(ASIO_ENABLE_CANCELIO)
impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_;
other_impl.safe_cancellation_thread_id_ = 0;
#endif // defined(ASIO_ENABLE_CANCELIO)
// Insert implementation into linked list of all implementations.
asio::detail::mutex::scoped_lock lock(mutex_);
impl.next_ = impl_list_;
impl.prev_ = 0;
if (impl_list_)
impl_list_->prev_ = &impl;
impl_list_ = &impl;
}
void win_iocp_socket_service_base::base_move_assign(
win_iocp_socket_service_base::base_implementation_type& impl,
win_iocp_socket_service_base& other_service,
win_iocp_socket_service_base::base_implementation_type& other_impl)
{
close_for_destruction(impl);
if (this != &other_service)
{
// Remove implementation from linked list of all implementations.
asio::detail::mutex::scoped_lock lock(mutex_);
if (impl_list_ == &impl)
impl_list_ = impl.next_;
if (impl.prev_)
impl.prev_->next_ = impl.next_;
if (impl.next_)
impl.next_->prev_= impl.prev_;
impl.next_ = 0;
impl.prev_ = 0;
}
impl.socket_ = other_impl.socket_;
other_impl.socket_ = invalid_socket;
impl.state_ = other_impl.state_;
other_impl.state_ = 0;
impl.cancel_token_ = other_impl.cancel_token_;
other_impl.cancel_token_.reset();
#if defined(ASIO_ENABLE_CANCELIO)
impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_;
other_impl.safe_cancellation_thread_id_ = 0;
#endif // defined(ASIO_ENABLE_CANCELIO)
if (this != &other_service)
{
// Insert implementation into linked list of all implementations.
asio::detail::mutex::scoped_lock lock(other_service.mutex_);
impl.next_ = other_service.impl_list_;
impl.prev_ = 0;
if (other_service.impl_list_)
other_service.impl_list_->prev_ = &impl;
other_service.impl_list_ = &impl;
}
}
void win_iocp_socket_service_base::destroy(
win_iocp_socket_service_base::base_implementation_type& impl)
{
close_for_destruction(impl);
// Remove implementation from linked list of all implementations.
asio::detail::mutex::scoped_lock lock(mutex_);
if (impl_list_ == &impl)
impl_list_ = impl.next_;
if (impl.prev_)
impl.prev_->next_ = impl.next_;
if (impl.next_)
impl.next_->prev_= impl.prev_;
impl.next_ = 0;
impl.prev_ = 0;
}
asio::error_code win_iocp_socket_service_base::close(
win_iocp_socket_service_base::base_implementation_type& impl,
asio::error_code& ec)
{
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((iocp_service_.context(),
"socket", &impl, impl.socket_, "close"));
// Check if the reactor was created, in which case we need to close the
// socket on the reactor as well to cancel any operations that might be
// running there.
select_reactor* r = static_cast<select_reactor*>(
interlocked_compare_exchange_pointer(
reinterpret_cast<void**>(&reactor_), 0, 0));
if (r)
r->deregister_descriptor(impl.socket_, impl.reactor_data_, true);
socket_ops::close(impl.socket_, impl.state_, false, ec);
if (r)
r->cleanup_descriptor_data(impl.reactor_data_);
}
else
{
ec = asio::error_code();
}
impl.socket_ = invalid_socket;
impl.state_ = 0;
impl.cancel_token_.reset();
#if defined(ASIO_ENABLE_CANCELIO)
impl.safe_cancellation_thread_id_ = 0;
#endif // defined(ASIO_ENABLE_CANCELIO)
return ec;
}
socket_type win_iocp_socket_service_base::release(
win_iocp_socket_service_base::base_implementation_type& impl,
asio::error_code& ec)
{
if (!is_open(impl))
return invalid_socket;
cancel(impl, ec);
if (ec)
return invalid_socket;
nt_set_info_fn fn = get_nt_set_info();
if (fn == 0)
{
ec = asio::error::operation_not_supported;
return invalid_socket;
}
HANDLE sock_as_handle = reinterpret_cast<HANDLE>(impl.socket_);
ULONG_PTR iosb[2] = { 0, 0 };
void* info[2] = { 0, 0 };
if (fn(sock_as_handle, iosb, &info, sizeof(info),
61 /* FileReplaceCompletionInformation */))
{
ec = asio::error::operation_not_supported;
return invalid_socket;
}
socket_type tmp = impl.socket_;
impl.socket_ = invalid_socket;
return tmp;
}
asio::error_code win_iocp_socket_service_base::cancel(
win_iocp_socket_service_base::base_implementation_type& impl,
asio::error_code& ec)
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
return ec;
}
ASIO_HANDLER_OPERATION((iocp_service_.context(),
"socket", &impl, impl.socket_, "cancel"));
if (FARPROC cancel_io_ex_ptr = ::GetProcAddress(
::GetModuleHandleA("KERNEL32"), "CancelIoEx"))
{
// The version of Windows supports cancellation from any thread.
typedef BOOL (WINAPI* cancel_io_ex_t)(HANDLE, LPOVERLAPPED);
cancel_io_ex_t cancel_io_ex = reinterpret_cast<cancel_io_ex_t>(
reinterpret_cast<void*>(cancel_io_ex_ptr));
socket_type sock = impl.socket_;
HANDLE sock_as_handle = reinterpret_cast<HANDLE>(sock);
if (!cancel_io_ex(sock_as_handle, 0))
{
DWORD last_error = ::GetLastError();
if (last_error == ERROR_NOT_FOUND)
{
// ERROR_NOT_FOUND means that there were no operations to be
// cancelled. We swallow this error to match the behaviour on other
// platforms.
ec = asio::error_code();
}
else
{
ec = asio::error_code(last_error,
asio::error::get_system_category());
}
}
else
{
ec = asio::error_code();
}
}
#if defined(ASIO_ENABLE_CANCELIO)
else if (impl.safe_cancellation_thread_id_ == 0)
{
// No operations have been started, so there's nothing to cancel.
ec = asio::error_code();
}
else if (impl.safe_cancellation_thread_id_ == ::GetCurrentThreadId())
{
// Asynchronous operations have been started from the current thread only,
// so it is safe to try to cancel them using CancelIo.
socket_type sock = impl.socket_;
HANDLE sock_as_handle = reinterpret_cast<HANDLE>(sock);
if (!::CancelIo(sock_as_handle))
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
}
else
{
ec = asio::error_code();
}
}
else
{
// Asynchronous operations have been started from more than one thread,
// so cancellation is not safe.
ec = asio::error::operation_not_supported;
}
#else // defined(ASIO_ENABLE_CANCELIO)
else
{
// Cancellation is not supported as CancelIo may not be used.
ec = asio::error::operation_not_supported;
}
#endif // defined(ASIO_ENABLE_CANCELIO)
// Cancel any operations started via the reactor.
if (!ec)
{
select_reactor* r = static_cast<select_reactor*>(
interlocked_compare_exchange_pointer(
reinterpret_cast<void**>(&reactor_), 0, 0));
if (r)
r->cancel_ops(impl.socket_, impl.reactor_data_);
}
return ec;
}
asio::error_code win_iocp_socket_service_base::do_open(
win_iocp_socket_service_base::base_implementation_type& impl,
int family, int type, int protocol, asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
return ec;
}
socket_holder sock(socket_ops::socket(family, type, protocol, ec));
if (sock.get() == invalid_socket)
return ec;
HANDLE sock_as_handle = reinterpret_cast<HANDLE>(sock.get());
if (iocp_service_.register_handle(sock_as_handle, ec))
return ec;
impl.socket_ = sock.release();
switch (type)
{
case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break;
case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break;
default: impl.state_ = 0; break;
}
impl.cancel_token_.reset(static_cast<void*>(0), socket_ops::noop_deleter());
ec = asio::error_code();
return ec;
}
asio::error_code win_iocp_socket_service_base::do_assign(
win_iocp_socket_service_base::base_implementation_type& impl,
int type, socket_type native_socket, asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
return ec;
}
HANDLE sock_as_handle = reinterpret_cast<HANDLE>(native_socket);
if (iocp_service_.register_handle(sock_as_handle, ec))
return ec;
impl.socket_ = native_socket;
switch (type)
{
case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break;
case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break;
default: impl.state_ = 0; break;
}
impl.cancel_token_.reset(static_cast<void*>(0), socket_ops::noop_deleter());
ec = asio::error_code();
return ec;
}
void win_iocp_socket_service_base::start_send_op(
win_iocp_socket_service_base::base_implementation_type& impl,
WSABUF* buffers, std::size_t buffer_count,
socket_base::message_flags flags, bool noop, operation* op)
{
update_cancellation_thread_id(impl);
iocp_service_.work_started();
if (noop)
iocp_service_.on_completion(op);
else if (!is_open(impl))
iocp_service_.on_completion(op, asio::error::bad_descriptor);
else
{
DWORD bytes_transferred = 0;
int result = ::WSASend(impl.socket_, buffers,
static_cast<DWORD>(buffer_count), &bytes_transferred, flags, op, 0);
DWORD last_error = ::WSAGetLastError();
if (last_error == ERROR_PORT_UNREACHABLE)
last_error = WSAECONNREFUSED;
if (result != 0 && last_error != WSA_IO_PENDING)
iocp_service_.on_completion(op, last_error, bytes_transferred);
else
iocp_service_.on_pending(op);
}
}
void win_iocp_socket_service_base::start_send_to_op(
win_iocp_socket_service_base::base_implementation_type& impl,
WSABUF* buffers, std::size_t buffer_count,
const socket_addr_type* addr, int addrlen,
socket_base::message_flags flags, operation* op)
{
update_cancellation_thread_id(impl);
iocp_service_.work_started();
if (!is_open(impl))
iocp_service_.on_completion(op, asio::error::bad_descriptor);
else
{
DWORD bytes_transferred = 0;
int result = ::WSASendTo(impl.socket_, buffers,
static_cast<DWORD>(buffer_count),
&bytes_transferred, flags, addr, addrlen, op, 0);
DWORD last_error = ::WSAGetLastError();
if (last_error == ERROR_PORT_UNREACHABLE)
last_error = WSAECONNREFUSED;
if (result != 0 && last_error != WSA_IO_PENDING)
iocp_service_.on_completion(op, last_error, bytes_transferred);
else
iocp_service_.on_pending(op);
}
}
void win_iocp_socket_service_base::start_receive_op(
win_iocp_socket_service_base::base_implementation_type& impl,
WSABUF* buffers, std::size_t buffer_count,
socket_base::message_flags flags, bool noop, operation* op)
{
update_cancellation_thread_id(impl);
iocp_service_.work_started();
if (noop)
iocp_service_.on_completion(op);
else if (!is_open(impl))
iocp_service_.on_completion(op, asio::error::bad_descriptor);
else
{
DWORD bytes_transferred = 0;
DWORD recv_flags = flags;
int result = ::WSARecv(impl.socket_, buffers,
static_cast<DWORD>(buffer_count),
&bytes_transferred, &recv_flags, op, 0);
DWORD last_error = ::WSAGetLastError();
if (last_error == ERROR_NETNAME_DELETED)
last_error = WSAECONNRESET;
else if (last_error == ERROR_PORT_UNREACHABLE)
last_error = WSAECONNREFUSED;
if (result != 0 && last_error != WSA_IO_PENDING)
iocp_service_.on_completion(op, last_error, bytes_transferred);
else
iocp_service_.on_pending(op);
}
}
void win_iocp_socket_service_base::start_null_buffers_receive_op(
win_iocp_socket_service_base::base_implementation_type& impl,
socket_base::message_flags flags, reactor_op* op)
{
if ((impl.state_ & socket_ops::stream_oriented) != 0)
{
// For stream sockets on Windows, we may issue a 0-byte overlapped
// WSARecv to wait until there is data available on the socket.
::WSABUF buf = { 0, 0 };
start_receive_op(impl, &buf, 1, flags, false, op);
}
else
{
start_reactor_op(impl,
(flags & socket_base::message_out_of_band)
? select_reactor::except_op : select_reactor::read_op,
op);
}
}
void win_iocp_socket_service_base::start_receive_from_op(
win_iocp_socket_service_base::base_implementation_type& impl,
WSABUF* buffers, std::size_t buffer_count, socket_addr_type* addr,
socket_base::message_flags flags, int* addrlen, operation* op)
{
update_cancellation_thread_id(impl);
iocp_service_.work_started();
if (!is_open(impl))
iocp_service_.on_completion(op, asio::error::bad_descriptor);
else
{
DWORD bytes_transferred = 0;
DWORD recv_flags = flags;
int result = ::WSARecvFrom(impl.socket_, buffers,
static_cast<DWORD>(buffer_count),
&bytes_transferred, &recv_flags, addr, addrlen, op, 0);
DWORD last_error = ::WSAGetLastError();
if (last_error == ERROR_PORT_UNREACHABLE)
last_error = WSAECONNREFUSED;
if (result != 0 && last_error != WSA_IO_PENDING)
iocp_service_.on_completion(op, last_error, bytes_transferred);
else
iocp_service_.on_pending(op);
}
}
void win_iocp_socket_service_base::start_accept_op(
win_iocp_socket_service_base::base_implementation_type& impl,
bool peer_is_open, socket_holder& new_socket, int family, int type,
int protocol, void* output_buffer, DWORD address_length, operation* op)
{
update_cancellation_thread_id(impl);
iocp_service_.work_started();
if (!is_open(impl))
iocp_service_.on_completion(op, asio::error::bad_descriptor);
else if (peer_is_open)
iocp_service_.on_completion(op, asio::error::already_open);
else
{
asio::error_code ec;
new_socket.reset(socket_ops::socket(family, type, protocol, ec));
if (new_socket.get() == invalid_socket)
iocp_service_.on_completion(op, ec);
else
{
DWORD bytes_read = 0;
BOOL result = ::AcceptEx(impl.socket_, new_socket.get(), output_buffer,
0, address_length, address_length, &bytes_read, op);
DWORD last_error = ::WSAGetLastError();
if (!result && last_error != WSA_IO_PENDING)
iocp_service_.on_completion(op, last_error);
else
iocp_service_.on_pending(op);
}
}
}
void win_iocp_socket_service_base::restart_accept_op(
socket_type s, socket_holder& new_socket, int family, int type,
int protocol, void* output_buffer, DWORD address_length, operation* op)
{
new_socket.reset();
iocp_service_.work_started();
asio::error_code ec;
new_socket.reset(socket_ops::socket(family, type, protocol, ec));
if (new_socket.get() == invalid_socket)
iocp_service_.on_completion(op, ec);
else
{
DWORD bytes_read = 0;
BOOL result = ::AcceptEx(s, new_socket.get(), output_buffer,
0, address_length, address_length, &bytes_read, op);
DWORD last_error = ::WSAGetLastError();
if (!result && last_error != WSA_IO_PENDING)
iocp_service_.on_completion(op, last_error);
else
iocp_service_.on_pending(op);
}
}
void win_iocp_socket_service_base::start_reactor_op(
win_iocp_socket_service_base::base_implementation_type& impl,
int op_type, reactor_op* op)
{
select_reactor& r = get_reactor();
update_cancellation_thread_id(impl);
if (is_open(impl))
{
r.start_op(op_type, impl.socket_, impl.reactor_data_, op, false, false);
return;
}
else
op->ec_ = asio::error::bad_descriptor;
iocp_service_.post_immediate_completion(op, false);
}
void win_iocp_socket_service_base::start_connect_op(
win_iocp_socket_service_base::base_implementation_type& impl,
int family, int type, const socket_addr_type* addr,
std::size_t addrlen, win_iocp_socket_connect_op_base* op)
{
// If ConnectEx is available, use that.
if (family == ASIO_OS_DEF(AF_INET)
|| family == ASIO_OS_DEF(AF_INET6))
{
if (connect_ex_fn connect_ex = get_connect_ex(impl, type))
{
union address_union
{
socket_addr_type base;
sockaddr_in4_type v4;
sockaddr_in6_type v6;
} a;
using namespace std; // For memset.
memset(&a, 0, sizeof(a));
a.base.sa_family = family;
socket_ops::bind(impl.socket_, &a.base,
family == ASIO_OS_DEF(AF_INET)
? sizeof(a.v4) : sizeof(a.v6), op->ec_);
if (op->ec_ && op->ec_ != asio::error::invalid_argument)
{
iocp_service_.post_immediate_completion(op, false);
return;
}
op->connect_ex_ = true;
update_cancellation_thread_id(impl);
iocp_service_.work_started();
BOOL result = connect_ex(impl.socket_,
addr, static_cast<int>(addrlen), 0, 0, 0, op);
DWORD last_error = ::WSAGetLastError();
if (!result && last_error != WSA_IO_PENDING)
iocp_service_.on_completion(op, last_error);
else
iocp_service_.on_pending(op);
return;
}
}
// Otherwise, fall back to a reactor-based implementation.
select_reactor& r = get_reactor();
update_cancellation_thread_id(impl);
if ((impl.state_ & socket_ops::non_blocking) != 0
|| socket_ops::set_internal_non_blocking(
impl.socket_, impl.state_, true, op->ec_))
{
if (socket_ops::connect(impl.socket_, addr, addrlen, op->ec_) != 0)
{
if (op->ec_ == asio::error::in_progress
|| op->ec_ == asio::error::would_block)
{
op->ec_ = asio::error_code();
r.start_op(select_reactor::connect_op, impl.socket_,
impl.reactor_data_, op, false, false);
return;
}
}
}
r.post_immediate_completion(op, false);
}
void win_iocp_socket_service_base::close_for_destruction(
win_iocp_socket_service_base::base_implementation_type& impl)
{
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((iocp_service_.context(),
"socket", &impl, impl.socket_, "close"));
// Check if the reactor was created, in which case we need to close the
// socket on the reactor as well to cancel any operations that might be
// running there.
select_reactor* r = static_cast<select_reactor*>(
interlocked_compare_exchange_pointer(
reinterpret_cast<void**>(&reactor_), 0, 0));
if (r)
r->deregister_descriptor(impl.socket_, impl.reactor_data_, true);
asio::error_code ignored_ec;
socket_ops::close(impl.socket_, impl.state_, true, ignored_ec);
if (r)
r->cleanup_descriptor_data(impl.reactor_data_);
}
impl.socket_ = invalid_socket;
impl.state_ = 0;
impl.cancel_token_.reset();
#if defined(ASIO_ENABLE_CANCELIO)
impl.safe_cancellation_thread_id_ = 0;
#endif // defined(ASIO_ENABLE_CANCELIO)
}
void win_iocp_socket_service_base::update_cancellation_thread_id(
win_iocp_socket_service_base::base_implementation_type& impl)
{
#if defined(ASIO_ENABLE_CANCELIO)
if (impl.safe_cancellation_thread_id_ == 0)
impl.safe_cancellation_thread_id_ = ::GetCurrentThreadId();
else if (impl.safe_cancellation_thread_id_ != ::GetCurrentThreadId())
impl.safe_cancellation_thread_id_ = ~DWORD(0);
#else // defined(ASIO_ENABLE_CANCELIO)
(void)impl;
#endif // defined(ASIO_ENABLE_CANCELIO)
}
select_reactor& win_iocp_socket_service_base::get_reactor()
{
select_reactor* r = static_cast<select_reactor*>(
interlocked_compare_exchange_pointer(
reinterpret_cast<void**>(&reactor_), 0, 0));
if (!r)
{
r = &(use_service<select_reactor>(context_));
interlocked_exchange_pointer(reinterpret_cast<void**>(&reactor_), r);
}
return *r;
}
win_iocp_socket_service_base::connect_ex_fn
win_iocp_socket_service_base::get_connect_ex(
win_iocp_socket_service_base::base_implementation_type& impl, int type)
{
#if defined(ASIO_DISABLE_CONNECTEX)
(void)impl;
(void)type;
return 0;
#else // defined(ASIO_DISABLE_CONNECTEX)
if (type != ASIO_OS_DEF(SOCK_STREAM)
&& type != ASIO_OS_DEF(SOCK_SEQPACKET))
return 0;
void* ptr = interlocked_compare_exchange_pointer(&connect_ex_, 0, 0);
if (!ptr)
{
GUID guid = { 0x25a207b9, 0xddf3, 0x4660,
{ 0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e } };
DWORD bytes = 0;
if (::WSAIoctl(impl.socket_, SIO_GET_EXTENSION_FUNCTION_POINTER,
&guid, sizeof(guid), &ptr, sizeof(ptr), &bytes, 0, 0) != 0)
{
// Set connect_ex_ to a special value to indicate that ConnectEx is
// unavailable. That way we won't bother trying to look it up again.
ptr = this;
}
interlocked_exchange_pointer(&connect_ex_, ptr);
}
return reinterpret_cast<connect_ex_fn>(ptr == this ? 0 : ptr);
#endif // defined(ASIO_DISABLE_CONNECTEX)
}
win_iocp_socket_service_base::nt_set_info_fn
win_iocp_socket_service_base::get_nt_set_info()
{
void* ptr = interlocked_compare_exchange_pointer(&nt_set_info_, 0, 0);
if (!ptr)
{
if (HMODULE h = ::GetModuleHandleA("NTDLL.DLL"))
ptr = reinterpret_cast<void*>(GetProcAddress(h, "NtSetInformationFile"));
// On failure, set nt_set_info_ to a special value to indicate that the
// NtSetInformationFile function is unavailable. That way we won't bother
// trying to look it up again.
interlocked_exchange_pointer(&nt_set_info_, ptr ? ptr : this);
}
return reinterpret_cast<nt_set_info_fn>(ptr == this ? 0 : ptr);
}
void* win_iocp_socket_service_base::interlocked_compare_exchange_pointer(
void** dest, void* exch, void* cmp)
{
#if defined(_M_IX86)
return reinterpret_cast<void*>(InterlockedCompareExchange(
reinterpret_cast<PLONG>(dest), reinterpret_cast<LONG>(exch),
reinterpret_cast<LONG>(cmp)));
#else
return InterlockedCompareExchangePointer(dest, exch, cmp);
#endif
}
void* win_iocp_socket_service_base::interlocked_exchange_pointer(
void** dest, void* val)
{
#if defined(_M_IX86)
return reinterpret_cast<void*>(InterlockedExchange(
reinterpret_cast<PLONG>(dest), reinterpret_cast<LONG>(val)));
#else
return InterlockedExchangePointer(dest, val);
#endif
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_IOCP)
#endif // ASIO_DETAIL_IMPL_WIN_IOCP_SOCKET_SERVICE_BASE_IPP

View File

@ -0,0 +1,84 @@
//
// detail/impl/win_mutex.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WIN_MUTEX_IPP
#define ASIO_DETAIL_IMPL_WIN_MUTEX_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_WINDOWS)
#include "asio/detail/throw_error.hpp"
#include "asio/detail/win_mutex.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
win_mutex::win_mutex()
{
int error = do_init();
asio::error_code ec(error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "mutex");
}
int win_mutex::do_init()
{
#if defined(__MINGW32__)
// Not sure if MinGW supports structured exception handling, so for now
// we'll just call the Windows API and hope.
# if defined(UNDER_CE)
::InitializeCriticalSection(&crit_section_);
# elif defined(ASIO_WINDOWS_APP)
if (!::InitializeCriticalSectionEx(&crit_section_, 0, 0))
return ::GetLastError();
# else
if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000))
return ::GetLastError();
# endif
return 0;
#else
__try
{
# if defined(UNDER_CE)
::InitializeCriticalSection(&crit_section_);
# elif defined(ASIO_WINDOWS_APP)
if (!::InitializeCriticalSectionEx(&crit_section_, 0, 0))
return ::GetLastError();
# else
if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000))
return ::GetLastError();
# endif
}
__except(GetExceptionCode() == STATUS_NO_MEMORY
? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH)
{
return ERROR_OUTOFMEMORY;
}
return 0;
#endif
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_WINDOWS)
#endif // ASIO_DETAIL_IMPL_WIN_MUTEX_IPP

View File

@ -0,0 +1,448 @@
//
// detail/impl/win_object_handle_service.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
// Copyright (c) 2011 Boris Schaeling (boris@highscore.de)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WIN_OBJECT_HANDLE_SERVICE_IPP
#define ASIO_DETAIL_IMPL_WIN_OBJECT_HANDLE_SERVICE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE)
#include "asio/detail/win_object_handle_service.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
win_object_handle_service::win_object_handle_service(execution_context& context)
: execution_context_service_base<win_object_handle_service>(context),
scheduler_(asio::use_service<scheduler_impl>(context)),
mutex_(),
impl_list_(0),
shutdown_(false)
{
}
void win_object_handle_service::shutdown()
{
mutex::scoped_lock lock(mutex_);
// Setting this flag to true prevents new objects from being registered, and
// new asynchronous wait operations from being started. We only need to worry
// about cleaning up the operations that are currently in progress.
shutdown_ = true;
op_queue<operation> ops;
for (implementation_type* impl = impl_list_; impl; impl = impl->next_)
ops.push(impl->op_queue_);
lock.unlock();
scheduler_.abandon_operations(ops);
}
void win_object_handle_service::construct(
win_object_handle_service::implementation_type& impl)
{
impl.handle_ = INVALID_HANDLE_VALUE;
impl.wait_handle_ = INVALID_HANDLE_VALUE;
impl.owner_ = this;
// Insert implementation into linked list of all implementations.
mutex::scoped_lock lock(mutex_);
if (!shutdown_)
{
impl.next_ = impl_list_;
impl.prev_ = 0;
if (impl_list_)
impl_list_->prev_ = &impl;
impl_list_ = &impl;
}
}
void win_object_handle_service::move_construct(
win_object_handle_service::implementation_type& impl,
win_object_handle_service::implementation_type& other_impl)
{
mutex::scoped_lock lock(mutex_);
// Insert implementation into linked list of all implementations.
if (!shutdown_)
{
impl.next_ = impl_list_;
impl.prev_ = 0;
if (impl_list_)
impl_list_->prev_ = &impl;
impl_list_ = &impl;
}
impl.handle_ = other_impl.handle_;
other_impl.handle_ = INVALID_HANDLE_VALUE;
impl.wait_handle_ = other_impl.wait_handle_;
other_impl.wait_handle_ = INVALID_HANDLE_VALUE;
impl.op_queue_.push(other_impl.op_queue_);
impl.owner_ = this;
// We must not hold the lock while calling UnregisterWaitEx. This is because
// the registered callback function might be invoked while we are waiting for
// UnregisterWaitEx to complete.
lock.unlock();
if (impl.wait_handle_ != INVALID_HANDLE_VALUE)
::UnregisterWaitEx(impl.wait_handle_, INVALID_HANDLE_VALUE);
if (!impl.op_queue_.empty())
register_wait_callback(impl, lock);
}
void win_object_handle_service::move_assign(
win_object_handle_service::implementation_type& impl,
win_object_handle_service& other_service,
win_object_handle_service::implementation_type& other_impl)
{
asio::error_code ignored_ec;
close(impl, ignored_ec);
mutex::scoped_lock lock(mutex_);
if (this != &other_service)
{
// Remove implementation from linked list of all implementations.
if (impl_list_ == &impl)
impl_list_ = impl.next_;
if (impl.prev_)
impl.prev_->next_ = impl.next_;
if (impl.next_)
impl.next_->prev_= impl.prev_;
impl.next_ = 0;
impl.prev_ = 0;
}
impl.handle_ = other_impl.handle_;
other_impl.handle_ = INVALID_HANDLE_VALUE;
impl.wait_handle_ = other_impl.wait_handle_;
other_impl.wait_handle_ = INVALID_HANDLE_VALUE;
impl.op_queue_.push(other_impl.op_queue_);
impl.owner_ = this;
if (this != &other_service)
{
// Insert implementation into linked list of all implementations.
impl.next_ = other_service.impl_list_;
impl.prev_ = 0;
if (other_service.impl_list_)
other_service.impl_list_->prev_ = &impl;
other_service.impl_list_ = &impl;
}
// We must not hold the lock while calling UnregisterWaitEx. This is because
// the registered callback function might be invoked while we are waiting for
// UnregisterWaitEx to complete.
lock.unlock();
if (impl.wait_handle_ != INVALID_HANDLE_VALUE)
::UnregisterWaitEx(impl.wait_handle_, INVALID_HANDLE_VALUE);
if (!impl.op_queue_.empty())
register_wait_callback(impl, lock);
}
void win_object_handle_service::destroy(
win_object_handle_service::implementation_type& impl)
{
mutex::scoped_lock lock(mutex_);
// Remove implementation from linked list of all implementations.
if (impl_list_ == &impl)
impl_list_ = impl.next_;
if (impl.prev_)
impl.prev_->next_ = impl.next_;
if (impl.next_)
impl.next_->prev_= impl.prev_;
impl.next_ = 0;
impl.prev_ = 0;
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((scheduler_.context(), "object_handle",
&impl, reinterpret_cast<uintmax_t>(impl.wait_handle_), "close"));
HANDLE wait_handle = impl.wait_handle_;
impl.wait_handle_ = INVALID_HANDLE_VALUE;
op_queue<operation> ops;
while (wait_op* op = impl.op_queue_.front())
{
op->ec_ = asio::error::operation_aborted;
impl.op_queue_.pop();
ops.push(op);
}
// We must not hold the lock while calling UnregisterWaitEx. This is
// because the registered callback function might be invoked while we are
// waiting for UnregisterWaitEx to complete.
lock.unlock();
if (wait_handle != INVALID_HANDLE_VALUE)
::UnregisterWaitEx(wait_handle, INVALID_HANDLE_VALUE);
::CloseHandle(impl.handle_);
impl.handle_ = INVALID_HANDLE_VALUE;
scheduler_.post_deferred_completions(ops);
}
}
asio::error_code win_object_handle_service::assign(
win_object_handle_service::implementation_type& impl,
const native_handle_type& handle, asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
return ec;
}
impl.handle_ = handle;
ec = asio::error_code();
return ec;
}
asio::error_code win_object_handle_service::close(
win_object_handle_service::implementation_type& impl,
asio::error_code& ec)
{
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((scheduler_.context(), "object_handle",
&impl, reinterpret_cast<uintmax_t>(impl.wait_handle_), "close"));
mutex::scoped_lock lock(mutex_);
HANDLE wait_handle = impl.wait_handle_;
impl.wait_handle_ = INVALID_HANDLE_VALUE;
op_queue<operation> completed_ops;
while (wait_op* op = impl.op_queue_.front())
{
impl.op_queue_.pop();
op->ec_ = asio::error::operation_aborted;
completed_ops.push(op);
}
// We must not hold the lock while calling UnregisterWaitEx. This is
// because the registered callback function might be invoked while we are
// waiting for UnregisterWaitEx to complete.
lock.unlock();
if (wait_handle != INVALID_HANDLE_VALUE)
::UnregisterWaitEx(wait_handle, INVALID_HANDLE_VALUE);
if (::CloseHandle(impl.handle_))
{
impl.handle_ = INVALID_HANDLE_VALUE;
ec = asio::error_code();
}
else
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
}
scheduler_.post_deferred_completions(completed_ops);
}
else
{
ec = asio::error_code();
}
return ec;
}
asio::error_code win_object_handle_service::cancel(
win_object_handle_service::implementation_type& impl,
asio::error_code& ec)
{
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((scheduler_.context(), "object_handle",
&impl, reinterpret_cast<uintmax_t>(impl.wait_handle_), "cancel"));
mutex::scoped_lock lock(mutex_);
HANDLE wait_handle = impl.wait_handle_;
impl.wait_handle_ = INVALID_HANDLE_VALUE;
op_queue<operation> completed_ops;
while (wait_op* op = impl.op_queue_.front())
{
op->ec_ = asio::error::operation_aborted;
impl.op_queue_.pop();
completed_ops.push(op);
}
// We must not hold the lock while calling UnregisterWaitEx. This is
// because the registered callback function might be invoked while we are
// waiting for UnregisterWaitEx to complete.
lock.unlock();
if (wait_handle != INVALID_HANDLE_VALUE)
::UnregisterWaitEx(wait_handle, INVALID_HANDLE_VALUE);
ec = asio::error_code();
scheduler_.post_deferred_completions(completed_ops);
}
else
{
ec = asio::error::bad_descriptor;
}
return ec;
}
void win_object_handle_service::wait(
win_object_handle_service::implementation_type& impl,
asio::error_code& ec)
{
switch (::WaitForSingleObject(impl.handle_, INFINITE))
{
case WAIT_FAILED:
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
break;
}
case WAIT_OBJECT_0:
case WAIT_ABANDONED:
default:
ec = asio::error_code();
break;
}
}
void win_object_handle_service::start_wait_op(
win_object_handle_service::implementation_type& impl, wait_op* op)
{
scheduler_.work_started();
if (is_open(impl))
{
mutex::scoped_lock lock(mutex_);
if (!shutdown_)
{
impl.op_queue_.push(op);
// Only the first operation to be queued gets to register a wait callback.
// Subsequent operations have to wait for the first to finish.
if (impl.op_queue_.front() == op)
register_wait_callback(impl, lock);
}
else
{
lock.unlock();
scheduler_.post_deferred_completion(op);
}
}
else
{
op->ec_ = asio::error::bad_descriptor;
scheduler_.post_deferred_completion(op);
}
}
void win_object_handle_service::register_wait_callback(
win_object_handle_service::implementation_type& impl,
mutex::scoped_lock& lock)
{
lock.lock();
if (!RegisterWaitForSingleObject(&impl.wait_handle_,
impl.handle_, &win_object_handle_service::wait_callback,
&impl, INFINITE, WT_EXECUTEONLYONCE))
{
DWORD last_error = ::GetLastError();
asio::error_code ec(last_error,
asio::error::get_system_category());
op_queue<operation> completed_ops;
while (wait_op* op = impl.op_queue_.front())
{
op->ec_ = ec;
impl.op_queue_.pop();
completed_ops.push(op);
}
lock.unlock();
scheduler_.post_deferred_completions(completed_ops);
}
}
void win_object_handle_service::wait_callback(PVOID param, BOOLEAN)
{
implementation_type* impl = static_cast<implementation_type*>(param);
mutex::scoped_lock lock(impl->owner_->mutex_);
if (impl->wait_handle_ != INVALID_HANDLE_VALUE)
{
::UnregisterWaitEx(impl->wait_handle_, NULL);
impl->wait_handle_ = INVALID_HANDLE_VALUE;
}
if (wait_op* op = impl->op_queue_.front())
{
op_queue<operation> completed_ops;
op->ec_ = asio::error_code();
impl->op_queue_.pop();
completed_ops.push(op);
if (!impl->op_queue_.empty())
{
if (!RegisterWaitForSingleObject(&impl->wait_handle_,
impl->handle_, &win_object_handle_service::wait_callback,
param, INFINITE, WT_EXECUTEONLYONCE))
{
DWORD last_error = ::GetLastError();
asio::error_code ec(last_error,
asio::error::get_system_category());
while ((op = impl->op_queue_.front()) != 0)
{
op->ec_ = ec;
impl->op_queue_.pop();
completed_ops.push(op);
}
}
}
scheduler_impl& sched = impl->owner_->scheduler_;
lock.unlock();
sched.post_deferred_completions(completed_ops);
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE)
#endif // ASIO_DETAIL_IMPL_WIN_OBJECT_HANDLE_SERVICE_IPP

View File

@ -0,0 +1,136 @@
//
// detail/impl/win_static_mutex.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WIN_STATIC_MUTEX_IPP
#define ASIO_DETAIL_IMPL_WIN_STATIC_MUTEX_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_WINDOWS)
#include <cstdio>
#include "asio/detail/throw_error.hpp"
#include "asio/detail/win_static_mutex.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
void win_static_mutex::init()
{
int error = do_init();
asio::error_code ec(error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "static_mutex");
}
int win_static_mutex::do_init()
{
using namespace std; // For sprintf.
wchar_t mutex_name[128];
#if defined(ASIO_HAS_SECURE_RTL)
swprintf_s(
#else // defined(ASIO_HAS_SECURE_RTL)
_snwprintf(
#endif // defined(ASIO_HAS_SECURE_RTL)
mutex_name, 128, L"asio-58CCDC44-6264-4842-90C2-F3C545CB8AA7-%u-%p",
static_cast<unsigned int>(::GetCurrentProcessId()), this);
#if defined(ASIO_WINDOWS_APP)
HANDLE mutex = ::CreateMutexExW(0, mutex_name, CREATE_MUTEX_INITIAL_OWNER, 0);
#else // defined(ASIO_WINDOWS_APP)
HANDLE mutex = ::CreateMutexW(0, TRUE, mutex_name);
#endif // defined(ASIO_WINDOWS_APP)
DWORD last_error = ::GetLastError();
if (mutex == 0)
return ::GetLastError();
if (last_error == ERROR_ALREADY_EXISTS)
{
#if defined(ASIO_WINDOWS_APP)
::WaitForSingleObjectEx(mutex, INFINITE, false);
#else // defined(ASIO_WINDOWS_APP)
::WaitForSingleObject(mutex, INFINITE);
#endif // defined(ASIO_WINDOWS_APP)
}
if (initialised_)
{
::ReleaseMutex(mutex);
::CloseHandle(mutex);
return 0;
}
#if defined(__MINGW32__)
// Not sure if MinGW supports structured exception handling, so for now
// we'll just call the Windows API and hope.
# if defined(UNDER_CE)
::InitializeCriticalSection(&crit_section_);
# else
if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000))
{
last_error = ::GetLastError();
::ReleaseMutex(mutex);
::CloseHandle(mutex);
return last_error;
}
# endif
#else
__try
{
# if defined(UNDER_CE)
::InitializeCriticalSection(&crit_section_);
# elif defined(ASIO_WINDOWS_APP)
if (!::InitializeCriticalSectionEx(&crit_section_, 0, 0))
{
last_error = ::GetLastError();
::ReleaseMutex(mutex);
::CloseHandle(mutex);
return last_error;
}
# else
if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000))
{
last_error = ::GetLastError();
::ReleaseMutex(mutex);
::CloseHandle(mutex);
return last_error;
}
# endif
}
__except(GetExceptionCode() == STATUS_NO_MEMORY
? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH)
{
::ReleaseMutex(mutex);
::CloseHandle(mutex);
return ERROR_OUTOFMEMORY;
}
#endif
initialised_ = true;
::ReleaseMutex(mutex);
::CloseHandle(mutex);
return 0;
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_WINDOWS)
#endif // ASIO_DETAIL_IMPL_WIN_STATIC_MUTEX_IPP

View File

@ -0,0 +1,150 @@
//
// detail/impl/win_thread.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WIN_THREAD_IPP
#define ASIO_DETAIL_IMPL_WIN_THREAD_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_WINDOWS) \
&& !defined(ASIO_WINDOWS_APP) \
&& !defined(UNDER_CE)
#include <process.h>
#include "asio/detail/throw_error.hpp"
#include "asio/detail/win_thread.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
win_thread::~win_thread()
{
::CloseHandle(thread_);
// The exit_event_ handle is deliberately allowed to leak here since it
// is an error for the owner of an internal thread not to join() it.
}
void win_thread::join()
{
HANDLE handles[2] = { exit_event_, thread_ };
::WaitForMultipleObjects(2, handles, FALSE, INFINITE);
::CloseHandle(exit_event_);
if (terminate_threads())
{
::TerminateThread(thread_, 0);
}
else
{
::QueueUserAPC(apc_function, thread_, 0);
::WaitForSingleObject(thread_, INFINITE);
}
}
std::size_t win_thread::hardware_concurrency()
{
SYSTEM_INFO system_info;
::GetSystemInfo(&system_info);
return system_info.dwNumberOfProcessors;
}
void win_thread::start_thread(func_base* arg, unsigned int stack_size)
{
::HANDLE entry_event = 0;
arg->entry_event_ = entry_event = ::CreateEventW(0, true, false, 0);
if (!entry_event)
{
DWORD last_error = ::GetLastError();
delete arg;
asio::error_code ec(last_error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "thread.entry_event");
}
arg->exit_event_ = exit_event_ = ::CreateEventW(0, true, false, 0);
if (!exit_event_)
{
DWORD last_error = ::GetLastError();
delete arg;
asio::error_code ec(last_error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "thread.exit_event");
}
unsigned int thread_id = 0;
thread_ = reinterpret_cast<HANDLE>(::_beginthreadex(0,
stack_size, win_thread_function, arg, 0, &thread_id));
if (!thread_)
{
DWORD last_error = ::GetLastError();
delete arg;
if (entry_event)
::CloseHandle(entry_event);
if (exit_event_)
::CloseHandle(exit_event_);
asio::error_code ec(last_error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "thread");
}
if (entry_event)
{
::WaitForSingleObject(entry_event, INFINITE);
::CloseHandle(entry_event);
}
}
unsigned int __stdcall win_thread_function(void* arg)
{
win_thread::auto_func_base_ptr func = {
static_cast<win_thread::func_base*>(arg) };
::SetEvent(func.ptr->entry_event_);
func.ptr->run();
// Signal that the thread has finished its work, but rather than returning go
// to sleep to put the thread into a well known state. If the thread is being
// joined during global object destruction then it may be killed using
// TerminateThread (to avoid a deadlock in DllMain). Otherwise, the SleepEx
// call will be interrupted using QueueUserAPC and the thread will shut down
// cleanly.
HANDLE exit_event = func.ptr->exit_event_;
delete func.ptr;
func.ptr = 0;
::SetEvent(exit_event);
::SleepEx(INFINITE, TRUE);
return 0;
}
#if defined(WINVER) && (WINVER < 0x0500)
void __stdcall apc_function(ULONG) {}
#else
void __stdcall apc_function(ULONG_PTR) {}
#endif
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_APP)
// && !defined(UNDER_CE)
#endif // ASIO_DETAIL_IMPL_WIN_THREAD_IPP

View File

@ -0,0 +1,57 @@
//
// detail/impl/win_tss_ptr.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WIN_TSS_PTR_IPP
#define ASIO_DETAIL_IMPL_WIN_TSS_PTR_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_WINDOWS)
#include "asio/detail/throw_error.hpp"
#include "asio/detail/win_tss_ptr.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
DWORD win_tss_ptr_create()
{
#if defined(UNDER_CE)
const DWORD out_of_indexes = 0xFFFFFFFF;
#else
const DWORD out_of_indexes = TLS_OUT_OF_INDEXES;
#endif
DWORD tss_key = ::TlsAlloc();
if (tss_key == out_of_indexes)
{
DWORD last_error = ::GetLastError();
asio::error_code ec(last_error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "tss");
}
return tss_key;
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_WINDOWS)
#endif // ASIO_DETAIL_IMPL_WIN_TSS_PTR_IPP

View File

@ -0,0 +1,626 @@
//
// detail/impl/winrt_ssocket_service_base.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WINRT_SSOCKET_SERVICE_BASE_IPP
#define ASIO_DETAIL_IMPL_WINRT_SSOCKET_SERVICE_BASE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_WINDOWS_RUNTIME)
#include <cstring>
#include "asio/detail/winrt_ssocket_service_base.hpp"
#include "asio/detail/winrt_async_op.hpp"
#include "asio/detail/winrt_utils.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
winrt_ssocket_service_base::winrt_ssocket_service_base(
execution_context& context)
: scheduler_(use_service<scheduler_impl>(context)),
async_manager_(use_service<winrt_async_manager>(context)),
mutex_(),
impl_list_(0)
{
}
void winrt_ssocket_service_base::base_shutdown()
{
// Close all implementations, causing all operations to complete.
asio::detail::mutex::scoped_lock lock(mutex_);
base_implementation_type* impl = impl_list_;
while (impl)
{
asio::error_code ignored_ec;
close(*impl, ignored_ec);
impl = impl->next_;
}
}
void winrt_ssocket_service_base::construct(
winrt_ssocket_service_base::base_implementation_type& impl)
{
// Insert implementation into linked list of all implementations.
asio::detail::mutex::scoped_lock lock(mutex_);
impl.next_ = impl_list_;
impl.prev_ = 0;
if (impl_list_)
impl_list_->prev_ = &impl;
impl_list_ = &impl;
}
void winrt_ssocket_service_base::base_move_construct(
winrt_ssocket_service_base::base_implementation_type& impl,
winrt_ssocket_service_base::base_implementation_type& other_impl)
ASIO_NOEXCEPT
{
impl.socket_ = other_impl.socket_;
other_impl.socket_ = nullptr;
// Insert implementation into linked list of all implementations.
asio::detail::mutex::scoped_lock lock(mutex_);
impl.next_ = impl_list_;
impl.prev_ = 0;
if (impl_list_)
impl_list_->prev_ = &impl;
impl_list_ = &impl;
}
void winrt_ssocket_service_base::base_move_assign(
winrt_ssocket_service_base::base_implementation_type& impl,
winrt_ssocket_service_base& other_service,
winrt_ssocket_service_base::base_implementation_type& other_impl)
{
asio::error_code ignored_ec;
close(impl, ignored_ec);
if (this != &other_service)
{
// Remove implementation from linked list of all implementations.
asio::detail::mutex::scoped_lock lock(mutex_);
if (impl_list_ == &impl)
impl_list_ = impl.next_;
if (impl.prev_)
impl.prev_->next_ = impl.next_;
if (impl.next_)
impl.next_->prev_= impl.prev_;
impl.next_ = 0;
impl.prev_ = 0;
}
impl.socket_ = other_impl.socket_;
other_impl.socket_ = nullptr;
if (this != &other_service)
{
// Insert implementation into linked list of all implementations.
asio::detail::mutex::scoped_lock lock(other_service.mutex_);
impl.next_ = other_service.impl_list_;
impl.prev_ = 0;
if (other_service.impl_list_)
other_service.impl_list_->prev_ = &impl;
other_service.impl_list_ = &impl;
}
}
void winrt_ssocket_service_base::destroy(
winrt_ssocket_service_base::base_implementation_type& impl)
{
asio::error_code ignored_ec;
close(impl, ignored_ec);
// Remove implementation from linked list of all implementations.
asio::detail::mutex::scoped_lock lock(mutex_);
if (impl_list_ == &impl)
impl_list_ = impl.next_;
if (impl.prev_)
impl.prev_->next_ = impl.next_;
if (impl.next_)
impl.next_->prev_= impl.prev_;
impl.next_ = 0;
impl.prev_ = 0;
}
asio::error_code winrt_ssocket_service_base::close(
winrt_ssocket_service_base::base_implementation_type& impl,
asio::error_code& ec)
{
delete impl.socket_;
impl.socket_ = nullptr;
ec = asio::error_code();
return ec;
}
winrt_ssocket_service_base::native_handle_type
winrt_ssocket_service_base::release(
winrt_ssocket_service_base::base_implementation_type& impl,
asio::error_code& ec)
{
if (!is_open(impl))
return nullptr;
cancel(impl, ec);
if (ec)
return nullptr;
native_handle_type tmp = impl.socket_;
impl.socket_ = nullptr;
return tmp;
}
std::size_t winrt_ssocket_service_base::do_get_endpoint(
const base_implementation_type& impl, bool local,
void* addr, std::size_t addr_len, asio::error_code& ec) const
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
return addr_len;
}
try
{
std::string addr_string = winrt_utils::string(local
? impl.socket_->Information->LocalAddress->CanonicalName
: impl.socket_->Information->RemoteAddress->CanonicalName);
unsigned short port = winrt_utils::integer(local
? impl.socket_->Information->LocalPort
: impl.socket_->Information->RemotePort);
unsigned long scope = 0;
switch (reinterpret_cast<const socket_addr_type*>(addr)->sa_family)
{
case ASIO_OS_DEF(AF_INET):
if (addr_len < sizeof(sockaddr_in4_type))
{
ec = asio::error::invalid_argument;
return addr_len;
}
else
{
socket_ops::inet_pton(ASIO_OS_DEF(AF_INET), addr_string.c_str(),
&reinterpret_cast<sockaddr_in4_type*>(addr)->sin_addr, &scope, ec);
reinterpret_cast<sockaddr_in4_type*>(addr)->sin_port
= socket_ops::host_to_network_short(port);
ec = asio::error_code();
return sizeof(sockaddr_in4_type);
}
case ASIO_OS_DEF(AF_INET6):
if (addr_len < sizeof(sockaddr_in6_type))
{
ec = asio::error::invalid_argument;
return addr_len;
}
else
{
socket_ops::inet_pton(ASIO_OS_DEF(AF_INET6), addr_string.c_str(),
&reinterpret_cast<sockaddr_in6_type*>(addr)->sin6_addr, &scope, ec);
reinterpret_cast<sockaddr_in6_type*>(addr)->sin6_port
= socket_ops::host_to_network_short(port);
ec = asio::error_code();
return sizeof(sockaddr_in6_type);
}
default:
ec = asio::error::address_family_not_supported;
return addr_len;
}
}
catch (Platform::Exception^ e)
{
ec = asio::error_code(e->HResult,
asio::system_category());
return addr_len;
}
}
asio::error_code winrt_ssocket_service_base::do_set_option(
winrt_ssocket_service_base::base_implementation_type& impl,
int level, int optname, const void* optval,
std::size_t optlen, asio::error_code& ec)
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
return ec;
}
try
{
if (level == ASIO_OS_DEF(SOL_SOCKET)
&& optname == ASIO_OS_DEF(SO_KEEPALIVE))
{
if (optlen == sizeof(int))
{
int value = 0;
std::memcpy(&value, optval, optlen);
impl.socket_->Control->KeepAlive = !!value;
ec = asio::error_code();
}
else
{
ec = asio::error::invalid_argument;
}
}
else if (level == ASIO_OS_DEF(IPPROTO_TCP)
&& optname == ASIO_OS_DEF(TCP_NODELAY))
{
if (optlen == sizeof(int))
{
int value = 0;
std::memcpy(&value, optval, optlen);
impl.socket_->Control->NoDelay = !!value;
ec = asio::error_code();
}
else
{
ec = asio::error::invalid_argument;
}
}
else
{
ec = asio::error::invalid_argument;
}
}
catch (Platform::Exception^ e)
{
ec = asio::error_code(e->HResult,
asio::system_category());
}
return ec;
}
void winrt_ssocket_service_base::do_get_option(
const winrt_ssocket_service_base::base_implementation_type& impl,
int level, int optname, void* optval,
std::size_t* optlen, asio::error_code& ec) const
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
return;
}
try
{
if (level == ASIO_OS_DEF(SOL_SOCKET)
&& optname == ASIO_OS_DEF(SO_KEEPALIVE))
{
if (*optlen >= sizeof(int))
{
int value = impl.socket_->Control->KeepAlive ? 1 : 0;
std::memcpy(optval, &value, sizeof(int));
*optlen = sizeof(int);
ec = asio::error_code();
}
else
{
ec = asio::error::invalid_argument;
}
}
else if (level == ASIO_OS_DEF(IPPROTO_TCP)
&& optname == ASIO_OS_DEF(TCP_NODELAY))
{
if (*optlen >= sizeof(int))
{
int value = impl.socket_->Control->NoDelay ? 1 : 0;
std::memcpy(optval, &value, sizeof(int));
*optlen = sizeof(int);
ec = asio::error_code();
}
else
{
ec = asio::error::invalid_argument;
}
}
else
{
ec = asio::error::invalid_argument;
}
}
catch (Platform::Exception^ e)
{
ec = asio::error_code(e->HResult,
asio::system_category());
}
}
asio::error_code winrt_ssocket_service_base::do_connect(
winrt_ssocket_service_base::base_implementation_type& impl,
const void* addr, asio::error_code& ec)
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
return ec;
}
char addr_string[max_addr_v6_str_len];
unsigned short port;
switch (reinterpret_cast<const socket_addr_type*>(addr)->sa_family)
{
case ASIO_OS_DEF(AF_INET):
socket_ops::inet_ntop(ASIO_OS_DEF(AF_INET),
&reinterpret_cast<const sockaddr_in4_type*>(addr)->sin_addr,
addr_string, sizeof(addr_string), 0, ec);
port = socket_ops::network_to_host_short(
reinterpret_cast<const sockaddr_in4_type*>(addr)->sin_port);
break;
case ASIO_OS_DEF(AF_INET6):
socket_ops::inet_ntop(ASIO_OS_DEF(AF_INET6),
&reinterpret_cast<const sockaddr_in6_type*>(addr)->sin6_addr,
addr_string, sizeof(addr_string), 0, ec);
port = socket_ops::network_to_host_short(
reinterpret_cast<const sockaddr_in6_type*>(addr)->sin6_port);
break;
default:
ec = asio::error::address_family_not_supported;
return ec;
}
if (!ec) try
{
async_manager_.sync(impl.socket_->ConnectAsync(
ref new Windows::Networking::HostName(
winrt_utils::string(addr_string)),
winrt_utils::string(port)), ec);
}
catch (Platform::Exception^ e)
{
ec = asio::error_code(e->HResult,
asio::system_category());
}
return ec;
}
void winrt_ssocket_service_base::start_connect_op(
winrt_ssocket_service_base::base_implementation_type& impl,
const void* addr, winrt_async_op<void>* op, bool is_continuation)
{
if (!is_open(impl))
{
op->ec_ = asio::error::bad_descriptor;
scheduler_.post_immediate_completion(op, is_continuation);
return;
}
char addr_string[max_addr_v6_str_len];
unsigned short port = 0;
switch (reinterpret_cast<const socket_addr_type*>(addr)->sa_family)
{
case ASIO_OS_DEF(AF_INET):
socket_ops::inet_ntop(ASIO_OS_DEF(AF_INET),
&reinterpret_cast<const sockaddr_in4_type*>(addr)->sin_addr,
addr_string, sizeof(addr_string), 0, op->ec_);
port = socket_ops::network_to_host_short(
reinterpret_cast<const sockaddr_in4_type*>(addr)->sin_port);
break;
case ASIO_OS_DEF(AF_INET6):
socket_ops::inet_ntop(ASIO_OS_DEF(AF_INET6),
&reinterpret_cast<const sockaddr_in6_type*>(addr)->sin6_addr,
addr_string, sizeof(addr_string), 0, op->ec_);
port = socket_ops::network_to_host_short(
reinterpret_cast<const sockaddr_in6_type*>(addr)->sin6_port);
break;
default:
op->ec_ = asio::error::address_family_not_supported;
break;
}
if (op->ec_)
{
scheduler_.post_immediate_completion(op, is_continuation);
return;
}
try
{
async_manager_.async(impl.socket_->ConnectAsync(
ref new Windows::Networking::HostName(
winrt_utils::string(addr_string)),
winrt_utils::string(port)), op);
}
catch (Platform::Exception^ e)
{
op->ec_ = asio::error_code(
e->HResult, asio::system_category());
scheduler_.post_immediate_completion(op, is_continuation);
}
}
std::size_t winrt_ssocket_service_base::do_send(
winrt_ssocket_service_base::base_implementation_type& impl,
const asio::const_buffer& data,
socket_base::message_flags flags, asio::error_code& ec)
{
if (flags)
{
ec = asio::error::operation_not_supported;
return 0;
}
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
return 0;
}
try
{
buffer_sequence_adapter<asio::const_buffer,
asio::const_buffer> bufs(asio::buffer(data));
if (bufs.all_empty())
{
ec = asio::error_code();
return 0;
}
return async_manager_.sync(
impl.socket_->OutputStream->WriteAsync(bufs.buffers()[0]), ec);
}
catch (Platform::Exception^ e)
{
ec = asio::error_code(e->HResult,
asio::system_category());
return 0;
}
}
void winrt_ssocket_service_base::start_send_op(
winrt_ssocket_service_base::base_implementation_type& impl,
const asio::const_buffer& data, socket_base::message_flags flags,
winrt_async_op<unsigned int>* op, bool is_continuation)
{
if (flags)
{
op->ec_ = asio::error::operation_not_supported;
scheduler_.post_immediate_completion(op, is_continuation);
return;
}
if (!is_open(impl))
{
op->ec_ = asio::error::bad_descriptor;
scheduler_.post_immediate_completion(op, is_continuation);
return;
}
try
{
buffer_sequence_adapter<asio::const_buffer,
asio::const_buffer> bufs(asio::buffer(data));
if (bufs.all_empty())
{
scheduler_.post_immediate_completion(op, is_continuation);
return;
}
async_manager_.async(
impl.socket_->OutputStream->WriteAsync(bufs.buffers()[0]), op);
}
catch (Platform::Exception^ e)
{
op->ec_ = asio::error_code(e->HResult,
asio::system_category());
scheduler_.post_immediate_completion(op, is_continuation);
}
}
std::size_t winrt_ssocket_service_base::do_receive(
winrt_ssocket_service_base::base_implementation_type& impl,
const asio::mutable_buffer& data,
socket_base::message_flags flags, asio::error_code& ec)
{
if (flags)
{
ec = asio::error::operation_not_supported;
return 0;
}
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
return 0;
}
try
{
buffer_sequence_adapter<asio::mutable_buffer,
asio::mutable_buffer> bufs(asio::buffer(data));
if (bufs.all_empty())
{
ec = asio::error_code();
return 0;
}
async_manager_.sync(
impl.socket_->InputStream->ReadAsync(
bufs.buffers()[0], bufs.buffers()[0]->Capacity,
Windows::Storage::Streams::InputStreamOptions::Partial), ec);
std::size_t bytes_transferred = bufs.buffers()[0]->Length;
if (bytes_transferred == 0 && !ec)
{
ec = asio::error::eof;
}
return bytes_transferred;
}
catch (Platform::Exception^ e)
{
ec = asio::error_code(e->HResult,
asio::system_category());
return 0;
}
}
void winrt_ssocket_service_base::start_receive_op(
winrt_ssocket_service_base::base_implementation_type& impl,
const asio::mutable_buffer& data, socket_base::message_flags flags,
winrt_async_op<Windows::Storage::Streams::IBuffer^>* op,
bool is_continuation)
{
if (flags)
{
op->ec_ = asio::error::operation_not_supported;
scheduler_.post_immediate_completion(op, is_continuation);
return;
}
if (!is_open(impl))
{
op->ec_ = asio::error::bad_descriptor;
scheduler_.post_immediate_completion(op, is_continuation);
return;
}
try
{
buffer_sequence_adapter<asio::mutable_buffer,
asio::mutable_buffer> bufs(asio::buffer(data));
if (bufs.all_empty())
{
scheduler_.post_immediate_completion(op, is_continuation);
return;
}
async_manager_.async(
impl.socket_->InputStream->ReadAsync(
bufs.buffers()[0], bufs.buffers()[0]->Capacity,
Windows::Storage::Streams::InputStreamOptions::Partial), op);
}
catch (Platform::Exception^ e)
{
op->ec_ = asio::error_code(e->HResult,
asio::system_category());
scheduler_.post_immediate_completion(op, is_continuation);
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_WINDOWS_RUNTIME)
#endif // ASIO_DETAIL_IMPL_WINRT_SSOCKET_SERVICE_BASE_IPP

View File

@ -0,0 +1,92 @@
//
// detail/impl/winrt_timer_scheduler.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_HPP
#define ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_WINDOWS_RUNTIME)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
template <typename Time_Traits>
void winrt_timer_scheduler::add_timer_queue(timer_queue<Time_Traits>& queue)
{
do_add_timer_queue(queue);
}
// Remove a timer queue from the reactor.
template <typename Time_Traits>
void winrt_timer_scheduler::remove_timer_queue(timer_queue<Time_Traits>& queue)
{
do_remove_timer_queue(queue);
}
template <typename Time_Traits>
void winrt_timer_scheduler::schedule_timer(timer_queue<Time_Traits>& queue,
const typename Time_Traits::time_type& time,
typename timer_queue<Time_Traits>::per_timer_data& timer, wait_op* op)
{
asio::detail::mutex::scoped_lock lock(mutex_);
if (shutdown_)
{
scheduler_.post_immediate_completion(op, false);
return;
}
bool earliest = queue.enqueue_timer(time, timer, op);
scheduler_.work_started();
if (earliest)
event_.signal(lock);
}
template <typename Time_Traits>
std::size_t winrt_timer_scheduler::cancel_timer(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data& timer,
std::size_t max_cancelled)
{
asio::detail::mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
lock.unlock();
scheduler_.post_deferred_completions(ops);
return n;
}
template <typename Time_Traits>
void winrt_timer_scheduler::move_timer(timer_queue<Time_Traits>& queue,
typename timer_queue<Time_Traits>::per_timer_data& to,
typename timer_queue<Time_Traits>::per_timer_data& from)
{
asio::detail::mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
queue.cancel_timer(to, ops);
queue.move_timer(to, from);
lock.unlock();
scheduler_.post_deferred_completions(ops);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_WINDOWS_RUNTIME)
#endif // ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_HPP

View File

@ -0,0 +1,121 @@
//
// detail/impl/winrt_timer_scheduler.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_IPP
#define ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_WINDOWS_RUNTIME)
#include "asio/detail/bind_handler.hpp"
#include "asio/detail/winrt_timer_scheduler.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
winrt_timer_scheduler::winrt_timer_scheduler(execution_context& context)
: execution_context_service_base<winrt_timer_scheduler>(context),
scheduler_(use_service<scheduler_impl>(context)),
mutex_(),
event_(),
timer_queues_(),
thread_(0),
stop_thread_(false),
shutdown_(false)
{
thread_ = new asio::detail::thread(
bind_handler(&winrt_timer_scheduler::call_run_thread, this));
}
winrt_timer_scheduler::~winrt_timer_scheduler()
{
shutdown();
}
void winrt_timer_scheduler::shutdown()
{
asio::detail::mutex::scoped_lock lock(mutex_);
shutdown_ = true;
stop_thread_ = true;
event_.signal(lock);
lock.unlock();
if (thread_)
{
thread_->join();
delete thread_;
thread_ = 0;
}
op_queue<operation> ops;
timer_queues_.get_all_timers(ops);
scheduler_.abandon_operations(ops);
}
void winrt_timer_scheduler::notify_fork(execution_context::fork_event)
{
}
void winrt_timer_scheduler::init_task()
{
}
void winrt_timer_scheduler::run_thread()
{
asio::detail::mutex::scoped_lock lock(mutex_);
while (!stop_thread_)
{
const long max_wait_duration = 5 * 60 * 1000000;
long wait_duration = timer_queues_.wait_duration_usec(max_wait_duration);
event_.wait_for_usec(lock, wait_duration);
event_.clear(lock);
op_queue<operation> ops;
timer_queues_.get_ready_timers(ops);
if (!ops.empty())
{
lock.unlock();
scheduler_.post_deferred_completions(ops);
lock.lock();
}
}
}
void winrt_timer_scheduler::call_run_thread(winrt_timer_scheduler* scheduler)
{
scheduler->run_thread();
}
void winrt_timer_scheduler::do_add_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.insert(&queue);
}
void winrt_timer_scheduler::do_remove_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.erase(&queue);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_WINDOWS_RUNTIME)
#endif // ASIO_DETAIL_IMPL_WINRT_TIMER_SCHEDULER_IPP

View File

@ -0,0 +1,82 @@
//
// detail/impl/winsock_init.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2021 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WINSOCK_INIT_IPP
#define ASIO_DETAIL_IMPL_WINSOCK_INIT_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
#include "asio/detail/socket_types.hpp"
#include "asio/detail/winsock_init.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
void winsock_init_base::startup(data& d,
unsigned char major, unsigned char minor)
{
if (::InterlockedIncrement(&d.init_count_) == 1)
{
WSADATA wsa_data;
long result = ::WSAStartup(MAKEWORD(major, minor), &wsa_data);
::InterlockedExchange(&d.result_, result);
}
}
void winsock_init_base::manual_startup(data& d)
{
if (::InterlockedIncrement(&d.init_count_) == 1)
{
::InterlockedExchange(&d.result_, 0);
}
}
void winsock_init_base::cleanup(data& d)
{
if (::InterlockedDecrement(&d.init_count_) == 0)
{
::WSACleanup();
}
}
void winsock_init_base::manual_cleanup(data& d)
{
::InterlockedDecrement(&d.init_count_);
}
void winsock_init_base::throw_on_error(data& d)
{
long result = ::InterlockedExchangeAdd(&d.result_, 0);
if (result != 0)
{
asio::error_code ec(result,
asio::error::get_system_category());
asio::detail::throw_error(ec, "winsock");
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
#endif // ASIO_DETAIL_IMPL_WINSOCK_INIT_IPP