summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKevin Smith <git@kismith.co.uk>2012-08-02 20:41:55 (GMT)
committerKevin Smith <git@kismith.co.uk>2012-08-02 21:03:09 (GMT)
commitd5ace22054203c7989691ae8b3fa4e4784d1b57e (patch)
tree64d400cdb10644967df183d0f202fcbf8160a773 /3rdParty/Boost/src/boost/asio/detail/impl
parent6f26d9aa86f0909af13b23b1a925b8d492e74154 (diff)
downloadswift-contrib-d5ace22054203c7989691ae8b3fa4e4784d1b57e.zip
swift-contrib-d5ace22054203c7989691ae8b3fa4e4784d1b57e.tar.bz2
Add two extra Boost dependencies, upgrade to 1.47.0ks/boost1.47
Diffstat (limited to '3rdParty/Boost/src/boost/asio/detail/impl')
-rw-r--r--3rdParty/Boost/src/boost/asio/detail/impl/descriptor_ops.ipp85
-rw-r--r--3rdParty/Boost/src/boost/asio/detail/impl/dev_poll_reactor.hpp5
-rw-r--r--3rdParty/Boost/src/boost/asio/detail/impl/dev_poll_reactor.ipp121
-rw-r--r--3rdParty/Boost/src/boost/asio/detail/impl/epoll_reactor.hpp5
-rw-r--r--3rdParty/Boost/src/boost/asio/detail/impl/epoll_reactor.ipp180
-rw-r--r--3rdParty/Boost/src/boost/asio/detail/impl/eventfd_select_interrupter.ipp47
-rw-r--r--3rdParty/Boost/src/boost/asio/detail/impl/handler_tracking.ipp299
-rw-r--r--3rdParty/Boost/src/boost/asio/detail/impl/kqueue_reactor.hpp5
-rw-r--r--3rdParty/Boost/src/boost/asio/detail/impl/kqueue_reactor.ipp139
-rw-r--r--3rdParty/Boost/src/boost/asio/detail/impl/pipe_select_interrupter.ipp25
-rw-r--r--3rdParty/Boost/src/boost/asio/detail/impl/reactive_descriptor_service.ipp85
-rw-r--r--3rdParty/Boost/src/boost/asio/detail/impl/reactive_serial_port_service.ipp6
-rw-r--r--3rdParty/Boost/src/boost/asio/detail/impl/reactive_socket_service_base.ipp69
-rw-r--r--3rdParty/Boost/src/boost/asio/detail/impl/resolver_service_base.ipp32
-rw-r--r--3rdParty/Boost/src/boost/asio/detail/impl/select_reactor.hpp5
-rw-r--r--3rdParty/Boost/src/boost/asio/detail/impl/select_reactor.ipp39
-rw-r--r--3rdParty/Boost/src/boost/asio/detail/impl/service_registry.ipp32
-rw-r--r--3rdParty/Boost/src/boost/asio/detail/impl/signal_set_service.ipp592
-rw-r--r--3rdParty/Boost/src/boost/asio/detail/impl/socket_ops.ipp241
-rw-r--r--3rdParty/Boost/src/boost/asio/detail/impl/socket_select_interrupter.ipp20
-rw-r--r--3rdParty/Boost/src/boost/asio/detail/impl/strand_service.hpp44
-rw-r--r--3rdParty/Boost/src/boost/asio/detail/impl/strand_service.ipp41
-rw-r--r--3rdParty/Boost/src/boost/asio/detail/impl/task_io_service.hpp16
-rw-r--r--3rdParty/Boost/src/boost/asio/detail/impl/task_io_service.ipp14
-rw-r--r--3rdParty/Boost/src/boost/asio/detail/impl/win_iocp_handle_service.ipp84
-rw-r--r--3rdParty/Boost/src/boost/asio/detail/impl/win_iocp_io_service.hpp21
-rw-r--r--3rdParty/Boost/src/boost/asio/detail/impl/win_iocp_io_service.ipp20
-rw-r--r--3rdParty/Boost/src/boost/asio/detail/impl/win_iocp_serial_port_service.ipp6
-rw-r--r--3rdParty/Boost/src/boost/asio/detail/impl/win_iocp_socket_service_base.ipp102
-rw-r--r--3rdParty/Boost/src/boost/asio/detail/impl/win_static_mutex.ipp120
-rw-r--r--3rdParty/Boost/src/boost/asio/detail/impl/win_thread.ipp13
31 files changed, 2343 insertions, 170 deletions
diff --git a/3rdParty/Boost/src/boost/asio/detail/impl/descriptor_ops.ipp b/3rdParty/Boost/src/boost/asio/detail/impl/descriptor_ops.ipp
index 9a2bb3b..ca2222c 100644
--- a/3rdParty/Boost/src/boost/asio/detail/impl/descriptor_ops.ipp
+++ b/3rdParty/Boost/src/boost/asio/detail/impl/descriptor_ops.ipp
@@ -43,8 +43,19 @@ int close(int d, state_type& state, boost::system::error_code& ec)
int result = 0;
if (d != -1)
{
- if (state & internal_non_blocking)
+ errno = 0;
+ result = error_wrapper(::close(d), ec);
+
+ if (result != 0
+ && (ec == boost::asio::error::would_block
+ || ec == boost::asio::error::try_again))
{
+ // According to UNIX Network Programming Vol. 1, it is possible for
+ // close() to fail with EWOULDBLOCK under certain circumstances. What
+ // isn't clear is the state of the descriptor after this error. The one
+ // current OS where this behaviour is seen, Windows, says that the socket
+ // remains open. Therefore we'll put the descriptor back into blocking
+ // mode and have another attempt at closing it.
#if defined(__SYMBIAN32__)
int flags = ::fcntl(d, F_GETFL, 0);
if (flags >= 0)
@@ -53,11 +64,11 @@ int close(int d, state_type& state, boost::system::error_code& ec)
ioctl_arg_type arg = 0;
::ioctl(d, FIONBIO, &arg);
#endif // defined(__SYMBIAN32__)
- state &= ~internal_non_blocking;
- }
+ state &= ~non_blocking;
- errno = 0;
- result = error_wrapper(::close(d), ec);
+ errno = 0;
+ result = error_wrapper(::close(d), ec);
+ }
}
if (result == 0)
@@ -65,8 +76,49 @@ int close(int d, state_type& state, boost::system::error_code& ec)
return result;
}
-bool set_internal_non_blocking(int d,
- state_type& state, boost::system::error_code& ec)
+bool set_user_non_blocking(int d, state_type& state,
+ bool value, boost::system::error_code& ec)
+{
+ if (d == -1)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return false;
+ }
+
+ errno = 0;
+#if defined(__SYMBIAN32__)
+ int result = error_wrapper(::fcntl(d, F_GETFL, 0), ec);
+ if (result >= 0)
+ {
+ errno = 0;
+ int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK));
+ result = error_wrapper(::fcntl(d, F_SETFL, flag), ec);
+ }
+#else // defined(__SYMBIAN32__)
+ ioctl_arg_type arg = (value ? 1 : 0);
+ int result = error_wrapper(::ioctl(d, FIONBIO, &arg), ec);
+#endif // defined(__SYMBIAN32__)
+
+ if (result >= 0)
+ {
+ ec = boost::system::error_code();
+ if (value)
+ state |= user_set_non_blocking;
+ else
+ {
+ // Clearing the user-set non-blocking mode always overrides any
+ // internally-set non-blocking flag. Any subsequent asynchronous
+ // operations will need to re-enable non-blocking I/O.
+ state &= ~(user_set_non_blocking | internal_non_blocking);
+ }
+ return true;
+ }
+
+ return false;
+}
+
+bool set_internal_non_blocking(int d, state_type& state,
+ bool value, boost::system::error_code& ec)
{
if (d == -1)
{
@@ -74,23 +126,36 @@ bool set_internal_non_blocking(int d,
return false;
}
+ if (!value && (state & user_set_non_blocking))
+ {
+ // It does not make sense to clear the internal non-blocking flag if the
+ // user still wants non-blocking behaviour. Return an error and let the
+ // caller figure out whether to update the user-set non-blocking flag.
+ ec = boost::asio::error::invalid_argument;
+ return false;
+ }
+
errno = 0;
#if defined(__SYMBIAN32__)
int result = error_wrapper(::fcntl(d, F_GETFL, 0), ec);
if (result >= 0)
{
errno = 0;
- result = error_wrapper(::fcntl(d, F_SETFL, result | O_NONBLOCK), ec);
+ int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK));
+ result = error_wrapper(::fcntl(d, F_SETFL, flag), ec);
}
#else // defined(__SYMBIAN32__)
- ioctl_arg_type arg = 1;
+ ioctl_arg_type arg = (value ? 1 : 0);
int result = error_wrapper(::ioctl(d, FIONBIO, &arg), ec);
#endif // defined(__SYMBIAN32__)
if (result >= 0)
{
ec = boost::system::error_code();
- state |= internal_non_blocking;
+ if (value)
+ state |= internal_non_blocking;
+ else
+ state &= ~internal_non_blocking;
return true;
}
diff --git a/3rdParty/Boost/src/boost/asio/detail/impl/dev_poll_reactor.hpp b/3rdParty/Boost/src/boost/asio/detail/impl/dev_poll_reactor.hpp
index a6b7078..54b313f 100644
--- a/3rdParty/Boost/src/boost/asio/detail/impl/dev_poll_reactor.hpp
+++ b/3rdParty/Boost/src/boost/asio/detail/impl/dev_poll_reactor.hpp
@@ -58,11 +58,12 @@ void dev_poll_reactor::schedule_timer(timer_queue<Time_Traits>& queue,
template <typename Time_Traits>
std::size_t dev_poll_reactor::cancel_timer(timer_queue<Time_Traits>& queue,
- typename timer_queue<Time_Traits>::per_timer_data& timer)
+ typename timer_queue<Time_Traits>::per_timer_data& timer,
+ std::size_t max_cancelled)
{
boost::asio::detail::mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
- std::size_t n = queue.cancel_timer(timer, ops);
+ std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
lock.unlock();
io_service_.post_deferred_completions(ops);
return n;
diff --git a/3rdParty/Boost/src/boost/asio/detail/impl/dev_poll_reactor.ipp b/3rdParty/Boost/src/boost/asio/detail/impl/dev_poll_reactor.ipp
index b9d5e61..a098256 100644
--- a/3rdParty/Boost/src/boost/asio/detail/impl/dev_poll_reactor.ipp
+++ b/3rdParty/Boost/src/boost/asio/detail/impl/dev_poll_reactor.ipp
@@ -19,6 +19,7 @@
#if defined(BOOST_ASIO_HAS_DEV_POLL)
+#include <boost/assert.hpp>
#include <boost/asio/detail/dev_poll_reactor.hpp>
#include <boost/asio/detail/throw_error.hpp>
#include <boost/asio/error.hpp>
@@ -38,7 +39,7 @@ dev_poll_reactor::dev_poll_reactor(boost::asio::io_service& io_service)
shutdown_(false)
{
// Add the interrupter's descriptor to /dev/poll.
- ::pollfd ev = { 0 };
+ ::pollfd ev = { 0, 0, 0 };
ev.fd = interrupter_.read_descriptor();
ev.events = POLLIN | POLLERR;
ev.revents = 0;
@@ -63,8 +64,68 @@ void dev_poll_reactor::shutdown_service()
op_queue_[i].get_all_operations(ops);
timer_queues_.get_all_timers(ops);
+
+ io_service_.abandon_operations(ops);
}
+// Helper class to re-register all descriptors with /dev/poll.
+class dev_poll_reactor::fork_helper
+{
+public:
+ fork_helper(dev_poll_reactor* reactor, short events)
+ : reactor_(reactor), events_(events)
+ {
+ }
+
+ bool set(int descriptor)
+ {
+ ::pollfd& ev = reactor_->add_pending_event_change(descriptor);
+ ev.events = events_;
+ return true;
+ }
+
+private:
+ dev_poll_reactor* reactor_;
+ short events_;
+};
+
+void dev_poll_reactor::fork_service(boost::asio::io_service::fork_event fork_ev)
+{
+ if (fork_ev == boost::asio::io_service::fork_child)
+ {
+ detail::mutex::scoped_lock lock(mutex_);
+
+ if (dev_poll_fd_ != -1)
+ ::close(dev_poll_fd_);
+ dev_poll_fd_ = -1;
+ dev_poll_fd_ = do_dev_poll_create();
+
+ interrupter_.recreate();
+
+ // Add the interrupter's descriptor to /dev/poll.
+ ::pollfd ev = { 0, 0, 0 };
+ ev.fd = interrupter_.read_descriptor();
+ ev.events = POLLIN | POLLERR;
+ ev.revents = 0;
+ ::write(dev_poll_fd_, &ev, sizeof(ev));
+
+ // Re-register all descriptors with /dev/poll. The changes will be written
+ // to the /dev/poll descriptor the next time the reactor is run.
+ op_queue<operation> ops;
+ fork_helper read_op_helper(this, POLLERR | POLLHUP | POLLIN);
+ op_queue_[read_op].get_descriptors(read_op_helper, ops);
+ fork_helper write_op_helper(this, POLLERR | POLLHUP | POLLOUT);
+ op_queue_[write_op].get_descriptors(write_op_helper, ops);
+ fork_helper except_op_helper(this, POLLERR | POLLHUP | POLLPRI);
+ op_queue_[except_op].get_descriptors(except_op_helper, ops);
+ interrupter_.interrupt();
+
+ // The ops op_queue will always be empty because the fork_helper's set()
+ // member function never returns false.
+ BOOST_ASSERT(ops.empty());
+ }
+}
+
void dev_poll_reactor::init_task()
{
io_service_.init_task();
@@ -75,6 +136,32 @@ int dev_poll_reactor::register_descriptor(socket_type, per_descriptor_data&)
return 0;
}
+int dev_poll_reactor::register_internal_descriptor(int op_type,
+ socket_type descriptor, per_descriptor_data&, reactor_op* op)
+{
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+
+ op_queue_[op_type].enqueue_operation(descriptor, op);
+ ::pollfd& ev = add_pending_event_change(descriptor);
+ ev.events = POLLERR | POLLHUP;
+ switch (op_type)
+ {
+ case read_op: ev.events |= POLLIN; break;
+ case write_op: ev.events |= POLLOUT; break;
+ case except_op: ev.events |= POLLPRI; break;
+ default: break;
+ }
+ interrupter_.interrupt();
+
+ return 0;
+}
+
+void dev_poll_reactor::move_descriptor(socket_type,
+ dev_poll_reactor::per_descriptor_data&,
+ dev_poll_reactor::per_descriptor_data&)
+{
+}
+
void dev_poll_reactor::start_op(int op_type, socket_type descriptor,
dev_poll_reactor::per_descriptor_data&,
reactor_op* op, bool allow_speculative)
@@ -129,8 +216,8 @@ void dev_poll_reactor::cancel_ops(socket_type descriptor,
cancel_ops_unlocked(descriptor, boost::asio::error::operation_aborted);
}
-void dev_poll_reactor::close_descriptor(socket_type descriptor,
- dev_poll_reactor::per_descriptor_data&)
+void dev_poll_reactor::deregister_descriptor(socket_type descriptor,
+ dev_poll_reactor::per_descriptor_data&, bool)
{
boost::asio::detail::mutex::scoped_lock lock(mutex_);
@@ -143,6 +230,26 @@ void dev_poll_reactor::close_descriptor(socket_type descriptor,
cancel_ops_unlocked(descriptor, boost::asio::error::operation_aborted);
}
+void dev_poll_reactor::deregister_internal_descriptor(
+ socket_type descriptor, dev_poll_reactor::per_descriptor_data&)
+{
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+
+ // Remove the descriptor from /dev/poll. Since this function is only called
+ // during a fork, we can apply the change immediately.
+ ::pollfd ev = { 0, 0, 0 };
+ ev.fd = descriptor;
+ ev.events = POLLREMOVE;
+ ev.revents = 0;
+ ::write(dev_poll_fd_, &ev, sizeof(ev));
+
+ // Destroy all operations associated with the descriptor.
+ op_queue<operation> ops;
+ boost::system::error_code ec;
+ for (int i = 0; i < max_ops; ++i)
+ op_queue_[i].cancel_operations(descriptor, ops, ec);
+}
+
void dev_poll_reactor::run(bool block, op_queue<operation>& ops)
{
boost::asio::detail::mutex::scoped_lock lock(mutex_);
@@ -179,8 +286,8 @@ void dev_poll_reactor::run(bool block, op_queue<operation>& ops)
lock.unlock();
// Block on the /dev/poll descriptor.
- ::pollfd events[128] = { { 0 } };
- ::dvpoll dp = { 0 };
+ ::pollfd events[128] = { { 0, 0, 0 } };
+ ::dvpoll dp = { 0, 0, 0 };
dp.dp_fds = events;
dp.dp_nfds = 128;
dp.dp_timeout = timeout;
@@ -228,7 +335,7 @@ void dev_poll_reactor::run(bool block, op_queue<operation>& ops)
// The poll operation can produce POLLHUP or POLLERR events when there
// is no operation pending, so if we do not remove the descriptor we
// can end up in a tight polling loop.
- ::pollfd ev = { 0 };
+ ::pollfd ev = { 0, 0, 0 };
ev.fd = descriptor;
ev.events = POLLREMOVE;
ev.revents = 0;
@@ -236,7 +343,7 @@ void dev_poll_reactor::run(bool block, op_queue<operation>& ops)
}
else
{
- ::pollfd ev = { 0 };
+ ::pollfd ev = { 0, 0, 0 };
ev.fd = descriptor;
ev.events = POLLERR | POLLHUP;
if (more_reads)
diff --git a/3rdParty/Boost/src/boost/asio/detail/impl/epoll_reactor.hpp b/3rdParty/Boost/src/boost/asio/detail/impl/epoll_reactor.hpp
index 0339cfd..0e8ae40 100644
--- a/3rdParty/Boost/src/boost/asio/detail/impl/epoll_reactor.hpp
+++ b/3rdParty/Boost/src/boost/asio/detail/impl/epoll_reactor.hpp
@@ -56,11 +56,12 @@ void epoll_reactor::schedule_timer(timer_queue<Time_Traits>& queue,
template <typename Time_Traits>
std::size_t epoll_reactor::cancel_timer(timer_queue<Time_Traits>& queue,
- typename timer_queue<Time_Traits>::per_timer_data& timer)
+ typename timer_queue<Time_Traits>::per_timer_data& timer,
+ std::size_t max_cancelled)
{
mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
- std::size_t n = queue.cancel_timer(timer, ops);
+ std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
lock.unlock();
io_service_.post_deferred_completions(ops);
return n;
diff --git a/3rdParty/Boost/src/boost/asio/detail/impl/epoll_reactor.ipp b/3rdParty/Boost/src/boost/asio/detail/impl/epoll_reactor.ipp
index 5afb891..3be2426 100644
--- a/3rdParty/Boost/src/boost/asio/detail/impl/epoll_reactor.ipp
+++ b/3rdParty/Boost/src/boost/asio/detail/impl/epoll_reactor.ipp
@@ -40,11 +40,7 @@ epoll_reactor::epoll_reactor(boost::asio::io_service& io_service)
io_service_(use_service<io_service_impl>(io_service)),
mutex_(),
epoll_fd_(do_epoll_create()),
-#if defined(BOOST_ASIO_HAS_TIMERFD)
- timer_fd_(timerfd_create(CLOCK_MONOTONIC, 0)),
-#else // defined(BOOST_ASIO_HAS_TIMERFD)
- timer_fd_(-1),
-#endif // defined(BOOST_ASIO_HAS_TIMERFD)
+ timer_fd_(do_timerfd_create()),
interrupter_(),
shutdown_(false)
{
@@ -66,7 +62,8 @@ epoll_reactor::epoll_reactor(boost::asio::io_service& io_service)
epoll_reactor::~epoll_reactor()
{
- close(epoll_fd_);
+ if (epoll_fd_ != -1)
+ close(epoll_fd_);
if (timer_fd_ != -1)
close(timer_fd_);
}
@@ -88,6 +85,59 @@ void epoll_reactor::shutdown_service()
}
timer_queues_.get_all_timers(ops);
+
+ io_service_.abandon_operations(ops);
+}
+
+void epoll_reactor::fork_service(boost::asio::io_service::fork_event fork_ev)
+{
+ if (fork_ev == boost::asio::io_service::fork_child)
+ {
+ if (epoll_fd_ != -1)
+ ::close(epoll_fd_);
+ epoll_fd_ = -1;
+ epoll_fd_ = do_epoll_create();
+
+ if (timer_fd_ != -1)
+ ::close(timer_fd_);
+ timer_fd_ = -1;
+ timer_fd_ = do_timerfd_create();
+
+ interrupter_.recreate();
+
+ // Add the interrupter's descriptor to epoll.
+ epoll_event ev = { 0, { 0 } };
+ ev.events = EPOLLIN | EPOLLERR | EPOLLET;
+ ev.data.ptr = &interrupter_;
+ epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, interrupter_.read_descriptor(), &ev);
+ interrupter_.interrupt();
+
+ // Add the timer descriptor to epoll.
+ if (timer_fd_ != -1)
+ {
+ ev.events = EPOLLIN | EPOLLERR;
+ ev.data.ptr = &timer_fd_;
+ epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, timer_fd_, &ev);
+ }
+
+ update_timeout();
+
+ // Re-register all descriptors with epoll.
+ mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
+ for (descriptor_state* state = registered_descriptors_.first();
+ state != 0; state = state->next_)
+ {
+ ev.events = EPOLLIN | EPOLLERR | EPOLLHUP | EPOLLOUT | EPOLLPRI | EPOLLET;
+ ev.data.ptr = state;
+ int result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, state->descriptor_, &ev);
+ if (result != 0)
+ {
+ boost::system::error_code ec(errno,
+ boost::asio::error::get_system_category());
+ boost::asio::detail::throw_error(ec, "epoll re-registration");
+ }
+ }
+ }
}
void epoll_reactor::init_task()
@@ -101,6 +151,7 @@ int epoll_reactor::register_descriptor(socket_type descriptor,
mutex::scoped_lock lock(registered_descriptors_mutex_);
descriptor_data = registered_descriptors_.alloc();
+ descriptor_data->descriptor_ = descriptor;
descriptor_data->shutdown_ = false;
lock.unlock();
@@ -115,6 +166,37 @@ int epoll_reactor::register_descriptor(socket_type descriptor,
return 0;
}
+int epoll_reactor::register_internal_descriptor(
+ int op_type, socket_type descriptor,
+ epoll_reactor::per_descriptor_data& descriptor_data, reactor_op* op)
+{
+ mutex::scoped_lock lock(registered_descriptors_mutex_);
+
+ descriptor_data = registered_descriptors_.alloc();
+ descriptor_data->descriptor_ = descriptor;
+ descriptor_data->shutdown_ = false;
+ descriptor_data->op_queue_[op_type].push(op);
+
+ lock.unlock();
+
+ epoll_event ev = { 0, { 0 } };
+ ev.events = EPOLLIN | EPOLLERR | EPOLLHUP | EPOLLOUT | EPOLLPRI | EPOLLET;
+ ev.data.ptr = descriptor_data;
+ int result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, descriptor, &ev);
+ if (result != 0)
+ return errno;
+
+ return 0;
+}
+
+void epoll_reactor::move_descriptor(socket_type,
+ epoll_reactor::per_descriptor_data& target_descriptor_data,
+ epoll_reactor::per_descriptor_data& source_descriptor_data)
+{
+ target_descriptor_data = source_descriptor_data;
+ source_descriptor_data = 0;
+}
+
void epoll_reactor::start_op(int op_type, socket_type descriptor,
epoll_reactor::per_descriptor_data& descriptor_data,
reactor_op* op, bool allow_speculative)
@@ -185,8 +267,8 @@ void epoll_reactor::cancel_ops(socket_type,
io_service_.post_deferred_completions(ops);
}
-void epoll_reactor::close_descriptor(socket_type,
- epoll_reactor::per_descriptor_data& descriptor_data)
+void epoll_reactor::deregister_descriptor(socket_type descriptor,
+ epoll_reactor::per_descriptor_data& descriptor_data, bool closing)
{
if (!descriptor_data)
return;
@@ -196,8 +278,16 @@ void epoll_reactor::close_descriptor(socket_type,
if (!descriptor_data->shutdown_)
{
- // Remove the descriptor from the set of known descriptors. The descriptor
- // will be automatically removed from the epoll set when it is closed.
+ if (closing)
+ {
+ // The descriptor will be automatically removed from the epoll set when
+ // it is closed.
+ }
+ else
+ {
+ epoll_event ev = { 0, { 0 } };
+ epoll_ctl(epoll_fd_, EPOLL_CTL_DEL, descriptor, &ev);
+ }
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
@@ -210,6 +300,7 @@ void epoll_reactor::close_descriptor(socket_type,
}
}
+ descriptor_data->descriptor_ = -1;
descriptor_data->shutdown_ = true;
descriptor_lock.unlock();
@@ -223,6 +314,36 @@ void epoll_reactor::close_descriptor(socket_type,
}
}
+void epoll_reactor::deregister_internal_descriptor(socket_type descriptor,
+ epoll_reactor::per_descriptor_data& descriptor_data)
+{
+ if (!descriptor_data)
+ return;
+
+ mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
+ mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
+
+ if (!descriptor_data->shutdown_)
+ {
+ epoll_event ev = { 0, { 0 } };
+ epoll_ctl(epoll_fd_, EPOLL_CTL_DEL, descriptor, &ev);
+
+ op_queue<operation> ops;
+ for (int i = 0; i < max_ops; ++i)
+ ops.push(descriptor_data->op_queue_[i]);
+
+ descriptor_data->descriptor_ = -1;
+ descriptor_data->shutdown_ = true;
+
+ descriptor_lock.unlock();
+
+ registered_descriptors_.free(descriptor_data);
+ descriptor_data = 0;
+
+ descriptors_lock.unlock();
+ }
+}
+
void epoll_reactor::run(bool block, op_queue<operation>& ops)
{
// Calculate a timeout only if timerfd is not used.
@@ -323,16 +444,53 @@ void epoll_reactor::interrupt()
int epoll_reactor::do_epoll_create()
{
- int fd = epoll_create(epoll_size);
+#if defined(EPOLL_CLOEXEC)
+ int fd = epoll_create1(EPOLL_CLOEXEC);
+#else // defined(EPOLL_CLOEXEC)
+ int fd = -1;
+ errno = EINVAL;
+#endif // defined(EPOLL_CLOEXEC)
+
+ if (fd == -1 && errno == EINVAL)
+ {
+ fd = epoll_create(epoll_size);
+ if (fd != -1)
+ ::fcntl(fd, F_SETFD, FD_CLOEXEC);
+ }
+
if (fd == -1)
{
boost::system::error_code ec(errno,
boost::asio::error::get_system_category());
boost::asio::detail::throw_error(ec, "epoll");
}
+
return fd;
}
+int epoll_reactor::do_timerfd_create()
+{
+#if defined(BOOST_ASIO_HAS_TIMERFD)
+# if defined(TFD_CLOEXEC)
+ int fd = timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC);
+# else // defined(TFD_CLOEXEC)
+ int fd = -1;
+ errno = EINVAL;
+# endif // defined(TFD_CLOEXEC)
+
+ if (fd == -1 && errno == EINVAL)
+ {
+ fd = timerfd_create(CLOCK_MONOTONIC, 0);
+ if (fd != -1)
+ ::fcntl(fd, F_SETFD, FD_CLOEXEC);
+ }
+
+ return fd;
+#else // defined(BOOST_ASIO_HAS_TIMERFD)
+ return -1;
+#endif // defined(BOOST_ASIO_HAS_TIMERFD)
+}
+
void epoll_reactor::do_add_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
diff --git a/3rdParty/Boost/src/boost/asio/detail/impl/eventfd_select_interrupter.ipp b/3rdParty/Boost/src/boost/asio/detail/impl/eventfd_select_interrupter.ipp
index d270b31..e931eff 100644
--- a/3rdParty/Boost/src/boost/asio/detail/impl/eventfd_select_interrupter.ipp
+++ b/3rdParty/Boost/src/boost/asio/detail/impl/eventfd_select_interrupter.ipp
@@ -40,24 +40,48 @@ namespace detail {
eventfd_select_interrupter::eventfd_select_interrupter()
{
+ open_descriptors();
+}
+
+void eventfd_select_interrupter::open_descriptors()
+{
#if __GLIBC__ == 2 && __GLIBC_MINOR__ < 8
write_descriptor_ = read_descriptor_ = syscall(__NR_eventfd, 0);
-#else // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8
- write_descriptor_ = read_descriptor_ = ::eventfd(0, 0);
-#endif // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8
if (read_descriptor_ != -1)
{
::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK);
+ ::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC);
}
- else
+#else // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8
+# if defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK)
+ write_descriptor_ = read_descriptor_ =
+ ::eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
+# else // defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK)
+ errno = EINVAL;
+ write_descriptor_ = read_descriptor_ = -1;
+# endif // defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK)
+ if (read_descriptor_ == -1 && errno == EINVAL)
+ {
+ write_descriptor_ = read_descriptor_ = ::eventfd(0, 0);
+ if (read_descriptor_ != -1)
+ {
+ ::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK);
+ ::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC);
+ }
+ }
+#endif // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8
+
+ if (read_descriptor_ == -1)
{
int pipe_fds[2];
if (pipe(pipe_fds) == 0)
{
read_descriptor_ = pipe_fds[0];
::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK);
+ ::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC);
write_descriptor_ = pipe_fds[1];
::fcntl(write_descriptor_, F_SETFL, O_NONBLOCK);
+ ::fcntl(write_descriptor_, F_SETFD, FD_CLOEXEC);
}
else
{
@@ -70,12 +94,27 @@ eventfd_select_interrupter::eventfd_select_interrupter()
eventfd_select_interrupter::~eventfd_select_interrupter()
{
+ close_descriptors();
+}
+
+void eventfd_select_interrupter::close_descriptors()
+{
if (write_descriptor_ != -1 && write_descriptor_ != read_descriptor_)
::close(write_descriptor_);
if (read_descriptor_ != -1)
::close(read_descriptor_);
}
+void eventfd_select_interrupter::recreate()
+{
+ close_descriptors();
+
+ write_descriptor_ = -1;
+ read_descriptor_ = -1;
+
+ open_descriptors();
+}
+
void eventfd_select_interrupter::interrupt()
{
uint64_t counter(1UL);
diff --git a/3rdParty/Boost/src/boost/asio/detail/impl/handler_tracking.ipp b/3rdParty/Boost/src/boost/asio/detail/impl/handler_tracking.ipp
new file mode 100644
index 0000000..ec58195
--- /dev/null
+++ b/3rdParty/Boost/src/boost/asio/detail/impl/handler_tracking.ipp
@@ -0,0 +1,299 @@
+//
+// detail/impl/handler_tracking.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2011 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_HANDLER_TRACKING_IPP
+#define BOOST_ASIO_DETAIL_IMPL_HANDLER_TRACKING_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if defined(BOOST_ASIO_ENABLE_HANDLER_TRACKING)
+
+#include <cstdarg>
+#include <cstdio>
+#include <boost/asio/detail/handler_tracking.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+#include <boost/date_time/posix_time/posix_time_types.hpp>
+#include <boost/asio/detail/pop_options.hpp>
+
+#if !defined(BOOST_WINDOWS)
+# include <unistd.h>
+#endif // !defined(BOOST_WINDOWS)
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+struct handler_tracking::tracking_state
+{
+ static_mutex mutex_;
+ boost::uint64_t next_id_;
+ tss_ptr<completion>* current_completion_;
+};
+
+handler_tracking::tracking_state* handler_tracking::get_state()
+{
+ static tracking_state state = { BOOST_ASIO_STATIC_MUTEX_INIT, 1, 0 };
+ return &state;
+}
+
+void handler_tracking::init()
+{
+ static tracking_state* state = get_state();
+
+ state->mutex_.init();
+
+ static_mutex::scoped_lock lock(state->mutex_);
+ if (state->current_completion_ == 0)
+ state->current_completion_ = new tss_ptr<completion>;
+}
+
+void handler_tracking::creation(handler_tracking::tracked_handler* h,
+ const char* object_type, void* object, const char* op_name)
+{
+ static tracking_state* state = get_state();
+
+ static_mutex::scoped_lock lock(state->mutex_);
+ h->id_ = state->next_id_++;
+ lock.unlock();
+
+ boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1));
+ boost::posix_time::time_duration now =
+ boost::posix_time::microsec_clock::universal_time() - epoch;
+
+ boost::uint64_t current_id = 0;
+ if (completion* current_completion = *state->current_completion_)
+ current_id = current_completion->id_;
+
+ write_line(
+#if defined(BOOST_WINDOWS)
+ "@asio|%I64u.%06I64u|%I64u*%I64u|%.20s@%p.%.50s\n",
+#else // defined(BOOST_WINDOWS)
+ "@asio|%llu.%06llu|%llu*%llu|%.20s@%p.%.50s\n",
+#endif // defined(BOOST_WINDOWS)
+ static_cast<boost::uint64_t>(now.total_seconds()),
+ static_cast<boost::uint64_t>(now.total_microseconds() % 1000000),
+ current_id, h->id_, object_type, object, op_name);
+}
+
+handler_tracking::completion::completion(handler_tracking::tracked_handler* h)
+ : id_(h->id_),
+ invoked_(false),
+ next_(*get_state()->current_completion_)
+{
+ *get_state()->current_completion_ = this;
+}
+
+handler_tracking::completion::~completion()
+{
+ if (id_)
+ {
+ boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1));
+ boost::posix_time::time_duration now =
+ boost::posix_time::microsec_clock::universal_time() - epoch;
+
+ write_line(
+#if defined(BOOST_WINDOWS)
+ "@asio|%I64u.%06I64u|%c%I64u|\n",
+#else // defined(BOOST_WINDOWS)
+ "@asio|%llu.%06llu|%c%llu|\n",
+#endif // defined(BOOST_WINDOWS)
+ static_cast<boost::uint64_t>(now.total_seconds()),
+ static_cast<boost::uint64_t>(now.total_microseconds() % 1000000),
+ invoked_ ? '!' : '~', id_);
+ }
+
+ *get_state()->current_completion_ = next_;
+}
+
+void handler_tracking::completion::invocation_begin()
+{
+ boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1));
+ boost::posix_time::time_duration now =
+ boost::posix_time::microsec_clock::universal_time() - epoch;
+
+ write_line(
+#if defined(BOOST_WINDOWS)
+ "@asio|%I64u.%06I64u|>%I64u|\n",
+#else // defined(BOOST_WINDOWS)
+ "@asio|%llu.%06llu|>%llu|\n",
+#endif // defined(BOOST_WINDOWS)
+ static_cast<boost::uint64_t>(now.total_seconds()),
+ static_cast<boost::uint64_t>(now.total_microseconds() % 1000000), id_);
+
+ invoked_ = true;
+}
+
+void handler_tracking::completion::invocation_begin(
+ const boost::system::error_code& ec)
+{
+ boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1));
+ boost::posix_time::time_duration now =
+ boost::posix_time::microsec_clock::universal_time() - epoch;
+
+ write_line(
+#if defined(BOOST_WINDOWS)
+ "@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d\n",
+#else // defined(BOOST_WINDOWS)
+ "@asio|%llu.%06llu|>%llu|ec=%.20s:%d\n",
+#endif // defined(BOOST_WINDOWS)
+ static_cast<boost::uint64_t>(now.total_seconds()),
+ static_cast<boost::uint64_t>(now.total_microseconds() % 1000000),
+ id_, ec.category().name(), ec.value());
+
+ invoked_ = true;
+}
+
+void handler_tracking::completion::invocation_begin(
+ const boost::system::error_code& ec, std::size_t bytes_transferred)
+{
+ boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1));
+ boost::posix_time::time_duration now =
+ boost::posix_time::microsec_clock::universal_time() - epoch;
+
+ write_line(
+#if defined(BOOST_WINDOWS)
+ "@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d,bytes_transferred=%I64u\n",
+#else // defined(BOOST_WINDOWS)
+ "@asio|%llu.%06llu|>%llu|ec=%.20s:%d,bytes_transferred=%llu\n",
+#endif // defined(BOOST_WINDOWS)
+ static_cast<boost::uint64_t>(now.total_seconds()),
+ static_cast<boost::uint64_t>(now.total_microseconds() % 1000000),
+ id_, ec.category().name(), ec.value(),
+ static_cast<boost::uint64_t>(bytes_transferred));
+
+ invoked_ = true;
+}
+
+void handler_tracking::completion::invocation_begin(
+ const boost::system::error_code& ec, int signal_number)
+{
+ boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1));
+ boost::posix_time::time_duration now =
+ boost::posix_time::microsec_clock::universal_time() - epoch;
+
+ write_line(
+#if defined(BOOST_WINDOWS)
+ "@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d,signal_number=%d\n",
+#else // defined(BOOST_WINDOWS)
+ "@asio|%llu.%06llu|>%llu|ec=%.20s:%d,signal_number=%d\n",
+#endif // defined(BOOST_WINDOWS)
+ static_cast<boost::uint64_t>(now.total_seconds()),
+ static_cast<boost::uint64_t>(now.total_microseconds() % 1000000),
+ id_, ec.category().name(), ec.value(), signal_number);
+
+ invoked_ = true;
+}
+
+void handler_tracking::completion::invocation_begin(
+ const boost::system::error_code& ec, const char* arg)
+{
+ boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1));
+ boost::posix_time::time_duration now =
+ boost::posix_time::microsec_clock::universal_time() - epoch;
+
+ write_line(
+#if defined(BOOST_WINDOWS)
+ "@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d,%.50s\n",
+#else // defined(BOOST_WINDOWS)
+ "@asio|%llu.%06llu|>%llu|ec=%.20s:%d,%.50s\n",
+#endif // defined(BOOST_WINDOWS)
+ static_cast<boost::uint64_t>(now.total_seconds()),
+ static_cast<boost::uint64_t>(now.total_microseconds() % 1000000),
+ id_, ec.category().name(), ec.value(), arg);
+
+ invoked_ = true;
+}
+
+void handler_tracking::completion::invocation_end()
+{
+ if (id_)
+ {
+ boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1));
+ boost::posix_time::time_duration now =
+ boost::posix_time::microsec_clock::universal_time() - epoch;
+
+ write_line(
+#if defined(BOOST_WINDOWS)
+ "@asio|%I64u.%06I64u|<%I64u|\n",
+#else // defined(BOOST_WINDOWS)
+ "@asio|%llu.%06llu|<%llu|\n",
+#endif // defined(BOOST_WINDOWS)
+ static_cast<boost::uint64_t>(now.total_seconds()),
+ static_cast<boost::uint64_t>(now.total_microseconds() % 1000000), id_);
+
+ id_ = 0;
+ }
+}
+
+void handler_tracking::operation(const char* object_type,
+ void* object, const char* op_name)
+{
+ static tracking_state* state = get_state();
+
+ boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1));
+ boost::posix_time::time_duration now =
+ boost::posix_time::microsec_clock::universal_time() - epoch;
+
+ unsigned long long current_id = 0;
+ if (completion* current_completion = *state->current_completion_)
+ current_id = current_completion->id_;
+
+ write_line(
+#if defined(BOOST_WINDOWS)
+ "@asio|%I64u.%06I64u|%I64u|%.20s@%p.%.50s\n",
+#else // defined(BOOST_WINDOWS)
+ "@asio|%llu.%06llu|%llu|%.20s@%p.%.50s\n",
+#endif // defined(BOOST_WINDOWS)
+ static_cast<boost::uint64_t>(now.total_seconds()),
+ static_cast<boost::uint64_t>(now.total_microseconds() % 1000000),
+ current_id, object_type, object, op_name);
+}
+
+void handler_tracking::write_line(const char* format, ...)
+{
+ using namespace std; // For sprintf (or equivalent).
+
+ va_list args;
+ va_start(args, format);
+
+ char line[256] = "";
+#if BOOST_WORKAROUND(BOOST_MSVC, >= 1400) && !defined(UNDER_CE)
+ int length = vsprintf_s(line, sizeof(line), format, args);
+#else // BOOST_WORKAROUND(BOOST_MSVC, >= 1400) && !defined(UNDER_CE)
+ int length = vsprintf(line, format, args);
+#endif // BOOST_WORKAROUND(BOOST_MSVC, >= 1400) && !defined(UNDER_CE)
+
+ va_end(args);
+
+#if defined(BOOST_WINDOWS)
+ HANDLE stderr_handle = ::GetStdHandle(STD_ERROR_HANDLE);
+ DWORD bytes_written = 0;
+ ::WriteFile(stderr_handle, line, length, &bytes_written, 0);
+#else // defined(BOOST_WINDOWS)
+ ::write(STDERR_FILENO, line, length);
+#endif // defined(BOOST_WINDOWS)
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // defined(BOOST_ASIO_ENABLE_HANDLER_TRACKING)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_HANDLER_TRACKING_IPP
diff --git a/3rdParty/Boost/src/boost/asio/detail/impl/kqueue_reactor.hpp b/3rdParty/Boost/src/boost/asio/detail/impl/kqueue_reactor.hpp
index 779f272..4116997 100644
--- a/3rdParty/Boost/src/boost/asio/detail/impl/kqueue_reactor.hpp
+++ b/3rdParty/Boost/src/boost/asio/detail/impl/kqueue_reactor.hpp
@@ -60,11 +60,12 @@ void kqueue_reactor::schedule_timer(timer_queue<Time_Traits>& queue,
template <typename Time_Traits>
std::size_t kqueue_reactor::cancel_timer(timer_queue<Time_Traits>& queue,
- typename timer_queue<Time_Traits>::per_timer_data& timer)
+ typename timer_queue<Time_Traits>::per_timer_data& timer,
+ std::size_t max_cancelled)
{
boost::asio::detail::mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
- std::size_t n = queue.cancel_timer(timer, ops);
+ std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
lock.unlock();
io_service_.post_deferred_completions(ops);
return n;
diff --git a/3rdParty/Boost/src/boost/asio/detail/impl/kqueue_reactor.ipp b/3rdParty/Boost/src/boost/asio/detail/impl/kqueue_reactor.ipp
index 3ac9eae..f56c4c7 100644
--- a/3rdParty/Boost/src/boost/asio/detail/impl/kqueue_reactor.ipp
+++ b/3rdParty/Boost/src/boost/asio/detail/impl/kqueue_reactor.ipp
@@ -75,6 +75,47 @@ void kqueue_reactor::shutdown_service()
}
timer_queues_.get_all_timers(ops);
+
+ io_service_.abandon_operations(ops);
+}
+
+void kqueue_reactor::fork_service(boost::asio::io_service::fork_event fork_ev)
+{
+ if (fork_ev == boost::asio::io_service::fork_child)
+ {
+ // The kqueue descriptor is automatically closed in the child.
+ kqueue_fd_ = -1;
+ kqueue_fd_ = do_kqueue_create();
+
+ interrupter_.recreate();
+
+ // Re-register all descriptors with kqueue.
+ mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
+ for (descriptor_state* state = registered_descriptors_.first();
+ state != 0; state = state->next_)
+ {
+ struct kevent events[2];
+ int num_events = 0;
+
+ if (!state->op_queue_[read_op].empty())
+ BOOST_ASIO_KQUEUE_EV_SET(&events[num_events++], state->descriptor_,
+ EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, state);
+ else if (!state->op_queue_[except_op].empty())
+ BOOST_ASIO_KQUEUE_EV_SET(&events[num_events++], state->descriptor_,
+ EVFILT_READ, EV_ADD | EV_CLEAR, EV_OOBAND, 0, state);
+
+ if (!state->op_queue_[write_op].empty())
+ BOOST_ASIO_KQUEUE_EV_SET(&events[num_events++], state->descriptor_,
+ EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, state);
+
+ if (num_events && ::kevent(kqueue_fd_, events, num_events, 0, 0, 0) == -1)
+ {
+ boost::system::error_code error(errno,
+ boost::asio::error::get_system_category());
+ boost::asio::detail::throw_error(error);
+ }
+ }
+ }
}
void kqueue_reactor::init_task()
@@ -82,17 +123,58 @@ void kqueue_reactor::init_task()
io_service_.init_task();
}
-int kqueue_reactor::register_descriptor(socket_type,
+int kqueue_reactor::register_descriptor(socket_type descriptor,
kqueue_reactor::per_descriptor_data& descriptor_data)
{
mutex::scoped_lock lock(registered_descriptors_mutex_);
descriptor_data = registered_descriptors_.alloc();
+ descriptor_data->descriptor_ = descriptor;
descriptor_data->shutdown_ = false;
return 0;
}
+int kqueue_reactor::register_internal_descriptor(
+ int op_type, socket_type descriptor,
+ kqueue_reactor::per_descriptor_data& descriptor_data, reactor_op* op)
+{
+ mutex::scoped_lock lock(registered_descriptors_mutex_);
+
+ descriptor_data = registered_descriptors_.alloc();
+ descriptor_data->descriptor_ = descriptor;
+ descriptor_data->shutdown_ = false;
+ descriptor_data->op_queue_[op_type].push(op);
+
+ struct kevent event;
+ switch (op_type)
+ {
+ case read_op:
+ BOOST_ASIO_KQUEUE_EV_SET(&event, descriptor, EVFILT_READ,
+ EV_ADD | EV_CLEAR, 0, 0, descriptor_data);
+ break;
+ case write_op:
+ BOOST_ASIO_KQUEUE_EV_SET(&event, descriptor, EVFILT_WRITE,
+ EV_ADD | EV_CLEAR, 0, 0, descriptor_data);
+ break;
+ case except_op:
+ BOOST_ASIO_KQUEUE_EV_SET(&event, descriptor, EVFILT_READ,
+ EV_ADD | EV_CLEAR, EV_OOBAND, 0, descriptor_data);
+ break;
+ }
+ ::kevent(kqueue_fd_, &event, 1, 0, 0, 0);
+
+ return 0;
+}
+
+void kqueue_reactor::move_descriptor(socket_type,
+ kqueue_reactor::per_descriptor_data& target_descriptor_data,
+ kqueue_reactor::per_descriptor_data& source_descriptor_data)
+{
+ target_descriptor_data = source_descriptor_data;
+ source_descriptor_data = 0;
+}
+
void kqueue_reactor::start_op(int op_type, socket_type descriptor,
kqueue_reactor::per_descriptor_data& descriptor_data,
reactor_op* op, bool allow_speculative)
@@ -187,8 +269,8 @@ void kqueue_reactor::cancel_ops(socket_type,
io_service_.post_deferred_completions(ops);
}
-void kqueue_reactor::close_descriptor(socket_type,
- kqueue_reactor::per_descriptor_data& descriptor_data)
+void kqueue_reactor::deregister_descriptor(socket_type descriptor,
+ kqueue_reactor::per_descriptor_data& descriptor_data, bool closing)
{
if (!descriptor_data)
return;
@@ -198,8 +280,20 @@ void kqueue_reactor::close_descriptor(socket_type,
if (!descriptor_data->shutdown_)
{
- // Remove the descriptor from the set of known descriptors. The descriptor
- // will be automatically removed from the kqueue set when it is closed.
+ if (closing)
+ {
+ // The descriptor will be automatically removed from the kqueue when it
+ // is closed.
+ }
+ else
+ {
+ struct kevent events[2];
+ BOOST_ASIO_KQUEUE_EV_SET(&events[0], descriptor,
+ EVFILT_READ, EV_DELETE, 0, 0, 0);
+ BOOST_ASIO_KQUEUE_EV_SET(&events[1], descriptor,
+ EVFILT_WRITE, EV_DELETE, 0, 0, 0);
+ ::kevent(kqueue_fd_, events, 2, 0, 0, 0);
+ }
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
@@ -212,6 +306,7 @@ void kqueue_reactor::close_descriptor(socket_type,
}
}
+ descriptor_data->descriptor_ = -1;
descriptor_data->shutdown_ = true;
descriptor_lock.unlock();
@@ -225,6 +320,40 @@ void kqueue_reactor::close_descriptor(socket_type,
}
}
+void kqueue_reactor::deregister_internal_descriptor(socket_type descriptor,
+ kqueue_reactor::per_descriptor_data& descriptor_data)
+{
+ if (!descriptor_data)
+ return;
+
+ mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
+ mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
+
+ if (!descriptor_data->shutdown_)
+ {
+ struct kevent events[2];
+ BOOST_ASIO_KQUEUE_EV_SET(&events[0], descriptor,
+ EVFILT_READ, EV_DELETE, 0, 0, 0);
+ BOOST_ASIO_KQUEUE_EV_SET(&events[1], descriptor,
+ EVFILT_WRITE, EV_DELETE, 0, 0, 0);
+ ::kevent(kqueue_fd_, events, 2, 0, 0, 0);
+
+ op_queue<operation> ops;
+ for (int i = 0; i < max_ops; ++i)
+ ops.push(descriptor_data->op_queue_[i]);
+
+ descriptor_data->descriptor_ = -1;
+ descriptor_data->shutdown_ = true;
+
+ descriptor_lock.unlock();
+
+ registered_descriptors_.free(descriptor_data);
+ descriptor_data = 0;
+
+ descriptors_lock.unlock();
+ }
+}
+
void kqueue_reactor::run(bool block, op_queue<operation>& ops)
{
mutex::scoped_lock lock(mutex_);
diff --git a/3rdParty/Boost/src/boost/asio/detail/impl/pipe_select_interrupter.ipp b/3rdParty/Boost/src/boost/asio/detail/impl/pipe_select_interrupter.ipp
index 9a0a872..59aa053 100644
--- a/3rdParty/Boost/src/boost/asio/detail/impl/pipe_select_interrupter.ipp
+++ b/3rdParty/Boost/src/boost/asio/detail/impl/pipe_select_interrupter.ipp
@@ -38,6 +38,11 @@ namespace detail {
pipe_select_interrupter::pipe_select_interrupter()
{
+ open_descriptors();
+}
+
+void pipe_select_interrupter::open_descriptors()
+{
int pipe_fds[2];
if (pipe(pipe_fds) == 0)
{
@@ -45,6 +50,11 @@ pipe_select_interrupter::pipe_select_interrupter()
::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK);
write_descriptor_ = pipe_fds[1];
::fcntl(write_descriptor_, F_SETFL, O_NONBLOCK);
+
+#if defined(FD_CLOEXEC)
+ ::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC);
+ ::fcntl(write_descriptor_, F_SETFD, FD_CLOEXEC);
+#endif // defined(FD_CLOEXEC)
}
else
{
@@ -56,12 +66,27 @@ pipe_select_interrupter::pipe_select_interrupter()
pipe_select_interrupter::~pipe_select_interrupter()
{
+ close_descriptors();
+}
+
+void pipe_select_interrupter::close_descriptors()
+{
if (read_descriptor_ != -1)
::close(read_descriptor_);
if (write_descriptor_ != -1)
::close(write_descriptor_);
}
+void pipe_select_interrupter::recreate()
+{
+ close_descriptors();
+
+ write_descriptor_ = -1;
+ read_descriptor_ = -1;
+
+ open_descriptors();
+}
+
void pipe_select_interrupter::interrupt()
{
char byte = 0;
diff --git a/3rdParty/Boost/src/boost/asio/detail/impl/reactive_descriptor_service.ipp b/3rdParty/Boost/src/boost/asio/detail/impl/reactive_descriptor_service.ipp
index a1ee09a..38d42be 100644
--- a/3rdParty/Boost/src/boost/asio/detail/impl/reactive_descriptor_service.ipp
+++ b/3rdParty/Boost/src/boost/asio/detail/impl/reactive_descriptor_service.ipp
@@ -46,11 +46,47 @@ void reactive_descriptor_service::construct(
impl.state_ = 0;
}
+void reactive_descriptor_service::move_construct(
+ reactive_descriptor_service::implementation_type& impl,
+ reactive_descriptor_service::implementation_type& other_impl)
+{
+ impl.descriptor_ = other_impl.descriptor_;
+ other_impl.descriptor_ = -1;
+
+ impl.state_ = other_impl.state_;
+ other_impl.state_ = 0;
+
+ reactor_.move_descriptor(impl.descriptor_,
+ impl.reactor_data_, other_impl.reactor_data_);
+}
+
+void reactive_descriptor_service::move_assign(
+ reactive_descriptor_service::implementation_type& impl,
+ reactive_descriptor_service& other_service,
+ reactive_descriptor_service::implementation_type& other_impl)
+{
+ destroy(impl);
+
+ impl.descriptor_ = other_impl.descriptor_;
+ other_impl.descriptor_ = -1;
+
+ impl.state_ = other_impl.state_;
+ other_impl.state_ = 0;
+
+ other_service.reactor_.move_descriptor(impl.descriptor_,
+ impl.reactor_data_, other_impl.reactor_data_);
+}
+
void reactive_descriptor_service::destroy(
reactive_descriptor_service::implementation_type& impl)
{
if (is_open(impl))
- reactor_.close_descriptor(impl.descriptor_, impl.reactor_data_);
+ {
+ BOOST_ASIO_HANDLER_OPERATION(("descriptor", &impl, "close"));
+
+ reactor_.deregister_descriptor(impl.descriptor_, impl.reactor_data_,
+ (impl.state_ & descriptor_ops::possible_dup) == 0);
+ }
boost::system::error_code ignored_ec;
descriptor_ops::close(impl.descriptor_, impl.state_, ignored_ec);
@@ -58,7 +94,7 @@ void reactive_descriptor_service::destroy(
boost::system::error_code reactive_descriptor_service::assign(
reactive_descriptor_service::implementation_type& impl,
- const native_type& native_descriptor, boost::system::error_code& ec)
+ const native_handle_type& native_descriptor, boost::system::error_code& ec)
{
if (is_open(impl))
{
@@ -75,7 +111,7 @@ boost::system::error_code reactive_descriptor_service::assign(
}
impl.descriptor_ = native_descriptor;
- impl.state_ = 0;
+ impl.state_ = descriptor_ops::possible_dup;
ec = boost::system::error_code();
return ec;
}
@@ -85,14 +121,43 @@ boost::system::error_code reactive_descriptor_service::close(
boost::system::error_code& ec)
{
if (is_open(impl))
- reactor_.close_descriptor(impl.descriptor_, impl.reactor_data_);
+ {
+ BOOST_ASIO_HANDLER_OPERATION(("descriptor", &impl, "close"));
- if (descriptor_ops::close(impl.descriptor_, impl.state_, ec) == 0)
- construct(impl);
+ reactor_.deregister_descriptor(impl.descriptor_, impl.reactor_data_,
+ (impl.state_ & descriptor_ops::possible_dup) == 0);
+ }
+
+ descriptor_ops::close(impl.descriptor_, impl.state_, ec);
+
+ // The descriptor is closed by the OS even if close() returns an error.
+ //
+ // (Actually, POSIX says the state of the descriptor is unspecified. On
+ // Linux the descriptor is apparently closed anyway; e.g. see
+ // http://lkml.org/lkml/2005/9/10/129
+ // We'll just have to assume that other OSes follow the same behaviour.)
+ construct(impl);
return ec;
}
+reactive_descriptor_service::native_handle_type
+reactive_descriptor_service::release(
+ reactive_descriptor_service::implementation_type& impl)
+{
+ native_handle_type descriptor = impl.descriptor_;
+
+ if (is_open(impl))
+ {
+ BOOST_ASIO_HANDLER_OPERATION(("descriptor", &impl, "release"));
+
+ reactor_.deregister_descriptor(impl.descriptor_, impl.reactor_data_, false);
+ construct(impl);
+ }
+
+ return descriptor;
+}
+
boost::system::error_code reactive_descriptor_service::cancel(
reactive_descriptor_service::implementation_type& impl,
boost::system::error_code& ec)
@@ -103,6 +168,8 @@ boost::system::error_code reactive_descriptor_service::cancel(
return ec;
}
+ BOOST_ASIO_HANDLER_OPERATION(("descriptor", &impl, "cancel"));
+
reactor_.cancel_ops(impl.descriptor_, impl.reactor_data_);
ec = boost::system::error_code();
return ec;
@@ -110,16 +177,16 @@ boost::system::error_code reactive_descriptor_service::cancel(
void reactive_descriptor_service::start_op(
reactive_descriptor_service::implementation_type& impl,
- int op_type, reactor_op* op, bool non_blocking, bool noop)
+ int op_type, reactor_op* op, bool is_non_blocking, bool noop)
{
if (!noop)
{
if ((impl.state_ & descriptor_ops::non_blocking) ||
descriptor_ops::set_internal_non_blocking(
- impl.descriptor_, impl.state_, op->ec_))
+ impl.descriptor_, impl.state_, true, op->ec_))
{
reactor_.start_op(op_type, impl.descriptor_,
- impl.reactor_data_, op, non_blocking);
+ impl.reactor_data_, op, is_non_blocking);
return;
}
}
diff --git a/3rdParty/Boost/src/boost/asio/detail/impl/reactive_serial_port_service.ipp b/3rdParty/Boost/src/boost/asio/detail/impl/reactive_serial_port_service.ipp
index ece61d3..f97946f 100644
--- a/3rdParty/Boost/src/boost/asio/detail/impl/reactive_serial_port_service.ipp
+++ b/3rdParty/Boost/src/boost/asio/detail/impl/reactive_serial_port_service.ipp
@@ -113,7 +113,7 @@ boost::system::error_code reactive_serial_port_service::do_set_option(
termios ios;
errno = 0;
descriptor_ops::error_wrapper(::tcgetattr(
- descriptor_service_.native(impl), &ios), ec);
+ descriptor_service_.native_handle(impl), &ios), ec);
if (ec)
return ec;
@@ -122,7 +122,7 @@ boost::system::error_code reactive_serial_port_service::do_set_option(
errno = 0;
descriptor_ops::error_wrapper(::tcsetattr(
- descriptor_service_.native(impl), TCSANOW, &ios), ec);
+ descriptor_service_.native_handle(impl), TCSANOW, &ios), ec);
return ec;
}
@@ -134,7 +134,7 @@ boost::system::error_code reactive_serial_port_service::do_get_option(
termios ios;
errno = 0;
descriptor_ops::error_wrapper(::tcgetattr(
- descriptor_service_.native(impl), &ios), ec);
+ descriptor_service_.native_handle(impl), &ios), ec);
if (ec)
return ec;
diff --git a/3rdParty/Boost/src/boost/asio/detail/impl/reactive_socket_service_base.ipp b/3rdParty/Boost/src/boost/asio/detail/impl/reactive_socket_service_base.ipp
index 31f5bc4..0936e92 100644
--- a/3rdParty/Boost/src/boost/asio/detail/impl/reactive_socket_service_base.ipp
+++ b/3rdParty/Boost/src/boost/asio/detail/impl/reactive_socket_service_base.ipp
@@ -45,12 +45,46 @@ void reactive_socket_service_base::construct(
impl.state_ = 0;
}
+void reactive_socket_service_base::base_move_construct(
+ reactive_socket_service_base::base_implementation_type& impl,
+ reactive_socket_service_base::base_implementation_type& other_impl)
+{
+ impl.socket_ = other_impl.socket_;
+ other_impl.socket_ = invalid_socket;
+
+ impl.state_ = other_impl.state_;
+ other_impl.state_ = 0;
+
+ reactor_.move_descriptor(impl.socket_,
+ impl.reactor_data_, other_impl.reactor_data_);
+}
+
+void reactive_socket_service_base::base_move_assign(
+ reactive_socket_service_base::base_implementation_type& impl,
+ reactive_socket_service_base& other_service,
+ reactive_socket_service_base::base_implementation_type& other_impl)
+{
+ destroy(impl);
+
+ impl.socket_ = other_impl.socket_;
+ other_impl.socket_ = invalid_socket;
+
+ impl.state_ = other_impl.state_;
+ other_impl.state_ = 0;
+
+ other_service.reactor_.move_descriptor(impl.socket_,
+ impl.reactor_data_, other_impl.reactor_data_);
+}
+
void reactive_socket_service_base::destroy(
reactive_socket_service_base::base_implementation_type& impl)
{
if (impl.socket_ != invalid_socket)
{
- reactor_.close_descriptor(impl.socket_, impl.reactor_data_);
+ BOOST_ASIO_HANDLER_OPERATION(("socket", &impl, "close"));
+
+ reactor_.deregister_descriptor(impl.socket_, impl.reactor_data_,
+ (impl.state_ & socket_ops::possible_dup) == 0);
boost::system::error_code ignored_ec;
socket_ops::close(impl.socket_, impl.state_, true, ignored_ec);
@@ -62,10 +96,24 @@ boost::system::error_code reactive_socket_service_base::close(
boost::system::error_code& ec)
{
if (is_open(impl))
- reactor_.close_descriptor(impl.socket_, impl.reactor_data_);
+ {
+ BOOST_ASIO_HANDLER_OPERATION(("socket", &impl, "close"));
- if (socket_ops::close(impl.socket_, impl.state_, false, ec) == 0)
- construct(impl);
+ reactor_.deregister_descriptor(impl.socket_, impl.reactor_data_,
+ (impl.state_ & socket_ops::possible_dup) == 0);
+ }
+
+ socket_ops::close(impl.socket_, impl.state_, false, ec);
+
+ // The descriptor is closed by the OS even if close() returns an error.
+ //
+ // (Actually, POSIX says the state of the descriptor is unspecified. On
+ // Linux the descriptor is apparently closed anyway; e.g. see
+ // http://lkml.org/lkml/2005/9/10/129
+ // We'll just have to assume that other OSes follow the same behaviour. The
+ // known exception is when Windows's closesocket() function fails with
+ // WSAEWOULDBLOCK, but this case is handled inside socket_ops::close().
+ construct(impl);
return ec;
}
@@ -80,6 +128,8 @@ boost::system::error_code reactive_socket_service_base::cancel(
return ec;
}
+ BOOST_ASIO_HANDLER_OPERATION(("socket", &impl, "cancel"));
+
reactor_.cancel_ops(impl.socket_, impl.reactor_data_);
ec = boost::system::error_code();
return ec;
@@ -119,7 +169,7 @@ boost::system::error_code reactive_socket_service_base::do_open(
boost::system::error_code reactive_socket_service_base::do_assign(
reactive_socket_service_base::base_implementation_type& impl, int type,
- const reactive_socket_service_base::native_type& native_socket,
+ const reactive_socket_service_base::native_handle_type& native_socket,
boost::system::error_code& ec)
{
if (is_open(impl))
@@ -143,22 +193,23 @@ boost::system::error_code reactive_socket_service_base::do_assign(
case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break;
default: impl.state_ = 0; break;
}
+ impl.state_ |= socket_ops::possible_dup;
ec = boost::system::error_code();
return ec;
}
void reactive_socket_service_base::start_op(
reactive_socket_service_base::base_implementation_type& impl,
- int op_type, reactor_op* op, bool non_blocking, bool noop)
+ int op_type, reactor_op* op, bool is_non_blocking, bool noop)
{
if (!noop)
{
if ((impl.state_ & socket_ops::non_blocking)
|| socket_ops::set_internal_non_blocking(
- impl.socket_, impl.state_, op->ec_))
+ impl.socket_, impl.state_, true, op->ec_))
{
reactor_.start_op(op_type, impl.socket_,
- impl.reactor_data_, op, non_blocking);
+ impl.reactor_data_, op, is_non_blocking);
return;
}
}
@@ -185,7 +236,7 @@ void reactive_socket_service_base::start_connect_op(
{
if ((impl.state_ & socket_ops::non_blocking)
|| socket_ops::set_internal_non_blocking(
- impl.socket_, impl.state_, op->ec_))
+ impl.socket_, impl.state_, true, op->ec_))
{
if (socket_ops::connect(impl.socket_, addr, addrlen, op->ec_) != 0)
{
diff --git a/3rdParty/Boost/src/boost/asio/detail/impl/resolver_service_base.ipp b/3rdParty/Boost/src/boost/asio/detail/impl/resolver_service_base.ipp
index e456bb9..2418807 100644
--- a/3rdParty/Boost/src/boost/asio/detail/impl/resolver_service_base.ipp
+++ b/3rdParty/Boost/src/boost/asio/detail/impl/resolver_service_base.ipp
@@ -53,10 +53,10 @@ resolver_service_base::~resolver_service_base()
void resolver_service_base::shutdown_service()
{
work_.reset();
- if (work_io_service_)
+ if (work_io_service_.get())
{
work_io_service_->stop();
- if (work_thread_)
+ if (work_thread_.get())
{
work_thread_->join();
work_thread_.reset();
@@ -65,6 +65,25 @@ void resolver_service_base::shutdown_service()
}
}
+void resolver_service_base::fork_service(
+ boost::asio::io_service::fork_event fork_ev)
+{
+ if (work_thread_.get())
+ {
+ if (fork_ev == boost::asio::io_service::fork_prepare)
+ {
+ work_io_service_->stop();
+ work_thread_->join();
+ }
+ else
+ {
+ work_io_service_->reset();
+ work_thread_.reset(new boost::asio::detail::thread(
+ work_io_service_runner(*work_io_service_)));
+ }
+ }
+}
+
void resolver_service_base::construct(
resolver_service_base::implementation_type& impl)
{
@@ -72,13 +91,18 @@ void resolver_service_base::construct(
}
void resolver_service_base::destroy(
- resolver_service_base::implementation_type&)
+ resolver_service_base::implementation_type& impl)
{
+ BOOST_ASIO_HANDLER_OPERATION(("resolver", &impl, "cancel"));
+
+ impl.reset();
}
void resolver_service_base::cancel(
resolver_service_base::implementation_type& impl)
{
+ BOOST_ASIO_HANDLER_OPERATION(("resolver", &impl, "cancel"));
+
impl.reset(static_cast<void*>(0), socket_ops::noop_deleter());
}
@@ -92,7 +116,7 @@ void resolver_service_base::start_resolve_op(operation* op)
void resolver_service_base::start_work_thread()
{
boost::asio::detail::mutex::scoped_lock lock(mutex_);
- if (!work_thread_)
+ if (!work_thread_.get())
{
work_thread_.reset(new boost::asio::detail::thread(
work_io_service_runner(*work_io_service_)));
diff --git a/3rdParty/Boost/src/boost/asio/detail/impl/select_reactor.hpp b/3rdParty/Boost/src/boost/asio/detail/impl/select_reactor.hpp
index 3773bfb..5ba1806 100644
--- a/3rdParty/Boost/src/boost/asio/detail/impl/select_reactor.hpp
+++ b/3rdParty/Boost/src/boost/asio/detail/impl/select_reactor.hpp
@@ -62,11 +62,12 @@ void select_reactor::schedule_timer(timer_queue<Time_Traits>& queue,
template <typename Time_Traits>
std::size_t select_reactor::cancel_timer(timer_queue<Time_Traits>& queue,
- typename timer_queue<Time_Traits>::per_timer_data& timer)
+ typename timer_queue<Time_Traits>::per_timer_data& timer,
+ std::size_t max_cancelled)
{
boost::asio::detail::mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
- std::size_t n = queue.cancel_timer(timer, ops);
+ std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
lock.unlock();
io_service_.post_deferred_completions(ops);
return n;
diff --git a/3rdParty/Boost/src/boost/asio/detail/impl/select_reactor.ipp b/3rdParty/Boost/src/boost/asio/detail/impl/select_reactor.ipp
index 8fcf68e..7117353 100644
--- a/3rdParty/Boost/src/boost/asio/detail/impl/select_reactor.ipp
+++ b/3rdParty/Boost/src/boost/asio/detail/impl/select_reactor.ipp
@@ -82,6 +82,14 @@ void select_reactor::shutdown_service()
op_queue_[i].get_all_operations(ops);
timer_queues_.get_all_timers(ops);
+
+ io_service_.abandon_operations(ops);
+}
+
+void select_reactor::fork_service(boost::asio::io_service::fork_event fork_ev)
+{
+ if (fork_ev == boost::asio::io_service::fork_child)
+ interrupter_.recreate();
}
void select_reactor::init_task()
@@ -95,6 +103,24 @@ int select_reactor::register_descriptor(socket_type,
return 0;
}
+int select_reactor::register_internal_descriptor(
+ int op_type, socket_type descriptor,
+ select_reactor::per_descriptor_data&, reactor_op* op)
+{
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+
+ op_queue_[op_type].enqueue_operation(descriptor, op);
+ interrupter_.interrupt();
+
+ return 0;
+}
+
+void select_reactor::move_descriptor(socket_type,
+ select_reactor::per_descriptor_data&,
+ select_reactor::per_descriptor_data&)
+{
+}
+
void select_reactor::start_op(int op_type, socket_type descriptor,
select_reactor::per_descriptor_data&, reactor_op* op, bool)
{
@@ -119,13 +145,22 @@ void select_reactor::cancel_ops(socket_type descriptor,
cancel_ops_unlocked(descriptor, boost::asio::error::operation_aborted);
}
-void select_reactor::close_descriptor(socket_type descriptor,
- select_reactor::per_descriptor_data&)
+void select_reactor::deregister_descriptor(socket_type descriptor,
+ select_reactor::per_descriptor_data&, bool)
{
boost::asio::detail::mutex::scoped_lock lock(mutex_);
cancel_ops_unlocked(descriptor, boost::asio::error::operation_aborted);
}
+void select_reactor::deregister_internal_descriptor(
+ socket_type descriptor, select_reactor::per_descriptor_data&)
+{
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+ op_queue<operation> ops;
+ for (int i = 0; i < max_ops; ++i)
+ op_queue_[i].cancel_operations(descriptor, ops);
+}
+
void select_reactor::run(bool block, op_queue<operation>& ops)
{
boost::asio::detail::mutex::scoped_lock lock(mutex_);
diff --git a/3rdParty/Boost/src/boost/asio/detail/impl/service_registry.ipp b/3rdParty/Boost/src/boost/asio/detail/impl/service_registry.ipp
index c2f07ec..8c80f6c 100644
--- a/3rdParty/Boost/src/boost/asio/detail/impl/service_registry.ipp
+++ b/3rdParty/Boost/src/boost/asio/detail/impl/service_registry.ipp
@@ -17,6 +17,7 @@
#include <boost/asio/detail/config.hpp>
#include <boost/throw_exception.hpp>
+#include <vector>
#include <boost/asio/detail/service_registry.hpp>
#include <boost/asio/detail/push_options.hpp>
@@ -52,6 +53,35 @@ service_registry::~service_registry()
}
}
+void service_registry::notify_fork(boost::asio::io_service::fork_event fork_ev)
+{
+ // Make a copy of all of the services while holding the lock. We don't want
+ // to hold the lock while calling into each service, as it may try to call
+ // back into this class.
+ std::vector<boost::asio::io_service::service*> services;
+ {
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+ boost::asio::io_service::service* service = first_service_;
+ while (service)
+ {
+ services.push_back(service);
+ service = service->next_;
+ }
+ }
+
+ // If processing the fork_prepare event, we want to go in reverse order of
+ // service registration, which happens to be the existing order of the
+ // services in the vector. For the other events we want to go in the other
+ // direction.
+ std::size_t num_services = services.size();
+ if (fork_ev == boost::asio::io_service::fork_prepare)
+ for (std::size_t i = 0; i < num_services; ++i)
+ services[i]->fork_service(fork_ev);
+ else
+ for (std::size_t i = num_services; i > 0; --i)
+ services[i - 1]->fork_service(fork_ev);
+}
+
void service_registry::init_key(boost::asio::io_service::service::key& key,
const boost::asio::io_service::id& id)
{
@@ -121,7 +151,7 @@ void service_registry::do_add_service(
const boost::asio::io_service::service::key& key,
boost::asio::io_service::service* new_service)
{
- if (&owner_ != &new_service->io_service())
+ if (&owner_ != &new_service->get_io_service())
boost::throw_exception(invalid_service_owner());
boost::asio::detail::mutex::scoped_lock lock(mutex_);
diff --git a/3rdParty/Boost/src/boost/asio/detail/impl/signal_set_service.ipp b/3rdParty/Boost/src/boost/asio/detail/impl/signal_set_service.ipp
new file mode 100644
index 0000000..c1dedd4
--- /dev/null
+++ b/3rdParty/Boost/src/boost/asio/detail/impl/signal_set_service.ipp
@@ -0,0 +1,592 @@
+//
+// detail/impl/signal_set_service.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2011 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_SIGNAL_SET_SERVICE_IPP
+#define BOOST_ASIO_DETAIL_IMPL_SIGNAL_SET_SERVICE_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#include <cstring>
+#include <boost/asio/detail/reactor.hpp>
+#include <boost/asio/detail/signal_blocker.hpp>
+#include <boost/asio/detail/signal_set_service.hpp>
+#include <boost/asio/detail/static_mutex.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+struct signal_state
+{
+ // Mutex used for protecting global state.
+ static_mutex mutex_;
+
+ // The read end of the pipe used for signal notifications.
+ int read_descriptor_;
+
+ // The write end of the pipe used for signal notifications.
+ int write_descriptor_;
+
+ // Whether the signal state has been prepared for a fork.
+ bool fork_prepared_;
+
+ // The head of a linked list of all signal_set_service instances.
+ class signal_set_service* service_list_;
+
+ // A count of the number of objects that are registered for each signal.
+ std::size_t registration_count_[max_signal_number];
+};
+
+signal_state* get_signal_state()
+{
+ static signal_state state = {
+ BOOST_ASIO_STATIC_MUTEX_INIT, -1, -1, false, 0, { 0 } };
+ return &state;
+}
+
+void asio_signal_handler(int signal_number)
+{
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ signal_set_service::deliver_signal(signal_number);
+#else // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ int saved_errno = errno;
+ signal_state* state = get_signal_state();
+ int result = ::write(state->write_descriptor_,
+ &signal_number, sizeof(signal_number));
+ (void)result;
+ errno = saved_errno;
+#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+
+#if defined(BOOST_ASIO_HAS_SIGNAL) && !defined(BOOST_ASIO_HAS_SIGACTION)
+ signal(signal_number, asio_signal_handler);
+#endif // defined(BOOST_ASIO_HAS_SIGNAL) && !defined(BOOST_ASIO_HAS_SIGACTION)
+}
+
+#if !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+class signal_set_service::pipe_read_op : public reactor_op
+{
+public:
+ pipe_read_op()
+ : reactor_op(&pipe_read_op::do_perform, pipe_read_op::do_complete)
+ {
+ }
+
+ static bool do_perform(reactor_op*)
+ {
+ signal_state* state = get_signal_state();
+
+ int fd = state->read_descriptor_;
+ int signal_number = 0;
+ while (::read(fd, &signal_number, sizeof(int)) == sizeof(int))
+ if (signal_number >= 0 && signal_number < max_signal_number)
+ signal_set_service::deliver_signal(signal_number);
+
+ return false;
+ }
+
+ static void do_complete(io_service_impl* /*owner*/, operation* base,
+ boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
+ {
+ pipe_read_op* o(static_cast<pipe_read_op*>(base));
+ delete o;
+ }
+};
+#endif // !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+
+signal_set_service::signal_set_service(
+ boost::asio::io_service& io_service)
+ : io_service_(boost::asio::use_service<io_service_impl>(io_service)),
+#if !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+ reactor_(boost::asio::use_service<reactor>(io_service)),
+#endif // !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+ next_(0),
+ prev_(0)
+{
+ get_signal_state()->mutex_.init();
+
+#if !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+ reactor_.init_task();
+#endif // !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+
+ for (int i = 0; i < max_signal_number; ++i)
+ registrations_[i] = 0;
+
+ add_service(this);
+}
+
+signal_set_service::~signal_set_service()
+{
+ remove_service(this);
+}
+
+void signal_set_service::shutdown_service()
+{
+ remove_service(this);
+
+ op_queue<operation> ops;
+
+ for (int i = 0; i < max_signal_number; ++i)
+ {
+ registration* reg = registrations_[i];
+ while (reg)
+ {
+ ops.push(*reg->queue_);
+ reg = reg->next_in_table_;
+ }
+ }
+
+ io_service_.abandon_operations(ops);
+}
+
+void signal_set_service::fork_service(
+ boost::asio::io_service::fork_event fork_ev)
+{
+#if !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+ signal_state* state = get_signal_state();
+ static_mutex::scoped_lock lock(state->mutex_);
+
+ switch (fork_ev)
+ {
+ case boost::asio::io_service::fork_prepare:
+ reactor_.deregister_internal_descriptor(
+ state->read_descriptor_, reactor_data_);
+ state->fork_prepared_ = true;
+ break;
+ case boost::asio::io_service::fork_parent:
+ state->fork_prepared_ = false;
+ reactor_.register_internal_descriptor(reactor::read_op,
+ state->read_descriptor_, reactor_data_, new pipe_read_op);
+ break;
+ case boost::asio::io_service::fork_child:
+ if (state->fork_prepared_)
+ {
+ boost::asio::detail::signal_blocker blocker;
+ close_descriptors();
+ open_descriptors();
+ state->fork_prepared_ = false;
+ }
+ reactor_.register_internal_descriptor(reactor::read_op,
+ state->read_descriptor_, reactor_data_, new pipe_read_op);
+ break;
+ default:
+ break;
+ }
+#else // !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+ (void)fork_ev;
+#endif // !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+}
+
+void signal_set_service::construct(
+ signal_set_service::implementation_type& impl)
+{
+ impl.signals_ = 0;
+}
+
+void signal_set_service::destroy(
+ signal_set_service::implementation_type& impl)
+{
+ boost::system::error_code ignored_ec;
+ clear(impl, ignored_ec);
+ cancel(impl, ignored_ec);
+}
+
+boost::system::error_code signal_set_service::add(
+ signal_set_service::implementation_type& impl,
+ int signal_number, boost::system::error_code& ec)
+{
+ // Check that the signal number is valid.
+ if (signal_number < 0 || signal_number > max_signal_number)
+ {
+ ec = boost::asio::error::invalid_argument;
+ return ec;
+ }
+
+ signal_state* state = get_signal_state();
+ static_mutex::scoped_lock lock(state->mutex_);
+
+ // Find the appropriate place to insert the registration.
+ registration** insertion_point = &impl.signals_;
+ registration* next = impl.signals_;
+ while (next && next->signal_number_ < signal_number)
+ {
+ insertion_point = &next->next_in_set_;
+ next = next->next_in_set_;
+ }
+
+ // Only do something if the signal is not already registered.
+ if (next == 0 || next->signal_number_ != signal_number)
+ {
+ registration* new_registration = new registration;
+
+#if defined(BOOST_ASIO_HAS_SIGNAL) || defined(BOOST_ASIO_HAS_SIGACTION)
+ // Register for the signal if we're the first.
+ if (state->registration_count_[signal_number] == 0)
+ {
+# if defined(BOOST_ASIO_HAS_SIGACTION)
+ using namespace std; // For memset.
+ struct sigaction sa;
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_handler = asio_signal_handler;
+ sigfillset(&sa.sa_mask);
+ if (::sigaction(signal_number, &sa, 0) == -1)
+# else // defined(BOOST_ASIO_HAS_SIGACTION)
+ if (::signal(signal_number, asio_signal_handler) == SIG_ERR)
+# endif // defined(BOOST_ASIO_HAS_SIGACTION)
+ {
+# if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ ec = boost::asio::error::invalid_argument;
+# else // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ ec = boost::system::error_code(errno,
+ boost::asio::error::get_system_category());
+# endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ delete new_registration;
+ return ec;
+ }
+ }
+#endif // defined(BOOST_ASIO_HAS_SIGNAL) || defined(BOOST_ASIO_HAS_SIGACTION)
+
+ // Record the new registration in the set.
+ new_registration->signal_number_ = signal_number;
+ new_registration->queue_ = &impl.queue_;
+ new_registration->next_in_set_ = next;
+ *insertion_point = new_registration;
+
+ // Insert registration into the registration table.
+ new_registration->next_in_table_ = registrations_[signal_number];
+ if (registrations_[signal_number])
+ registrations_[signal_number]->prev_in_table_ = new_registration;
+ registrations_[signal_number] = new_registration;
+
+ ++state->registration_count_[signal_number];
+ }
+
+ ec = boost::system::error_code();
+ return ec;
+}
+
+boost::system::error_code signal_set_service::remove(
+ signal_set_service::implementation_type& impl,
+ int signal_number, boost::system::error_code& ec)
+{
+ // Check that the signal number is valid.
+ if (signal_number < 0 || signal_number > max_signal_number)
+ {
+ ec = boost::asio::error::invalid_argument;
+ return ec;
+ }
+
+ signal_state* state = get_signal_state();
+ static_mutex::scoped_lock lock(state->mutex_);
+
+ // Find the signal number in the list of registrations.
+ registration** deletion_point = &impl.signals_;
+ registration* reg = impl.signals_;
+ while (reg && reg->signal_number_ < signal_number)
+ {
+ deletion_point = &reg->next_in_set_;
+ reg = reg->next_in_set_;
+ }
+
+ if (reg != 0 && reg->signal_number_ == signal_number)
+ {
+#if defined(BOOST_ASIO_HAS_SIGNAL) || defined(BOOST_ASIO_HAS_SIGACTION)
+ // Set signal handler back to the default if we're the last.
+ if (state->registration_count_[signal_number] == 1)
+ {
+# if defined(BOOST_ASIO_HAS_SIGACTION)
+ using namespace std; // For memset.
+ struct sigaction sa;
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_handler = SIG_DFL;
+ if (::sigaction(signal_number, &sa, 0) == -1)
+# else // defined(BOOST_ASIO_HAS_SIGACTION)
+ if (::signal(signal_number, SIG_DFL) == SIG_ERR)
+# endif // defined(BOOST_ASIO_HAS_SIGACTION)
+ {
+# if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ ec = boost::asio::error::invalid_argument;
+# else // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ ec = boost::system::error_code(errno,
+ boost::asio::error::get_system_category());
+# endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ return ec;
+ }
+ }
+#endif // defined(BOOST_ASIO_HAS_SIGNAL) || defined(BOOST_ASIO_HAS_SIGACTION)
+
+ // Remove the registration from the set.
+ *deletion_point = reg->next_in_set_;
+
+ // Remove the registration from the registration table.
+ if (registrations_[signal_number] == reg)
+ registrations_[signal_number] = reg->next_in_table_;
+ if (reg->prev_in_table_)
+ reg->prev_in_table_->next_in_table_ = reg->next_in_table_;
+ if (reg->next_in_table_)
+ reg->next_in_table_->prev_in_table_ = reg->prev_in_table_;
+
+ --state->registration_count_[signal_number];
+
+ delete reg;
+ }
+
+ ec = boost::system::error_code();
+ return ec;
+}
+
+boost::system::error_code signal_set_service::clear(
+ signal_set_service::implementation_type& impl,
+ boost::system::error_code& ec)
+{
+ signal_state* state = get_signal_state();
+ static_mutex::scoped_lock lock(state->mutex_);
+
+ while (registration* reg = impl.signals_)
+ {
+#if defined(BOOST_ASIO_HAS_SIGNAL) || defined(BOOST_ASIO_HAS_SIGACTION)
+ // Set signal handler back to the default if we're the last.
+ if (state->registration_count_[reg->signal_number_] == 1)
+ {
+# if defined(BOOST_ASIO_HAS_SIGACTION)
+ using namespace std; // For memset.
+ struct sigaction sa;
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_handler = SIG_DFL;
+ if (::sigaction(reg->signal_number_, &sa, 0) == -1)
+# else // defined(BOOST_ASIO_HAS_SIGACTION)
+ if (::signal(reg->signal_number_, SIG_DFL) == SIG_ERR)
+# endif // defined(BOOST_ASIO_HAS_SIGACTION)
+ {
+# if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ ec = boost::asio::error::invalid_argument;
+# else // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ ec = boost::system::error_code(errno,
+ boost::asio::error::get_system_category());
+# endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ return ec;
+ }
+ }
+#endif // defined(BOOST_ASIO_HAS_SIGNAL) || defined(BOOST_ASIO_HAS_SIGACTION)
+
+ // Remove the registration from the registration table.
+ if (registrations_[reg->signal_number_] == reg)
+ registrations_[reg->signal_number_] = reg->next_in_table_;
+ if (reg->prev_in_table_)
+ reg->prev_in_table_->next_in_table_ = reg->next_in_table_;
+ if (reg->next_in_table_)
+ reg->next_in_table_->prev_in_table_ = reg->prev_in_table_;
+
+ --state->registration_count_[reg->signal_number_];
+
+ impl.signals_ = reg->next_in_set_;
+ delete reg;
+ }
+
+ ec = boost::system::error_code();
+ return ec;
+}
+
+boost::system::error_code signal_set_service::cancel(
+ signal_set_service::implementation_type& impl,
+ boost::system::error_code& ec)
+{
+ BOOST_ASIO_HANDLER_OPERATION(("signal_set", &impl, "cancel"));
+
+ op_queue<operation> ops;
+ {
+ signal_state* state = get_signal_state();
+ static_mutex::scoped_lock lock(state->mutex_);
+
+ while (signal_op* op = impl.queue_.front())
+ {
+ op->ec_ = boost::asio::error::operation_aborted;
+ impl.queue_.pop();
+ ops.push(op);
+ }
+ }
+
+ io_service_.post_deferred_completions(ops);
+
+ ec = boost::system::error_code();
+ return ec;
+}
+
+void signal_set_service::deliver_signal(int signal_number)
+{
+ signal_state* state = get_signal_state();
+ static_mutex::scoped_lock lock(state->mutex_);
+
+ signal_set_service* service = state->service_list_;
+ while (service)
+ {
+ op_queue<operation> ops;
+
+ registration* reg = service->registrations_[signal_number];
+ while (reg)
+ {
+ if (reg->queue_->empty())
+ {
+ ++reg->undelivered_;
+ }
+ else
+ {
+ while (signal_op* op = reg->queue_->front())
+ {
+ op->signal_number_ = signal_number;
+ reg->queue_->pop();
+ ops.push(op);
+ }
+ }
+
+ reg = reg->next_in_table_;
+ }
+
+ service->io_service_.post_deferred_completions(ops);
+
+ service = service->next_;
+ }
+}
+
+void signal_set_service::add_service(signal_set_service* service)
+{
+ signal_state* state = get_signal_state();
+ static_mutex::scoped_lock lock(state->mutex_);
+
+#if !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+ // If this is the first service to be created, open a new pipe.
+ if (state->service_list_ == 0)
+ open_descriptors();
+#endif // !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+
+ // Insert service into linked list of all services.
+ service->next_ = state->service_list_;
+ service->prev_ = 0;
+ if (state->service_list_)
+ state->service_list_->prev_ = service;
+ state->service_list_ = service;
+
+#if !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+ // Register for pipe readiness notifications.
+ service->reactor_.register_internal_descriptor(reactor::read_op,
+ state->read_descriptor_, service->reactor_data_, new pipe_read_op);
+#endif // !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+}
+
+void signal_set_service::remove_service(signal_set_service* service)
+{
+ signal_state* state = get_signal_state();
+ static_mutex::scoped_lock lock(state->mutex_);
+
+ if (service->next_ || service->prev_ || state->service_list_ == service)
+ {
+#if !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+ // Disable the pipe readiness notifications.
+ service->reactor_.deregister_descriptor(
+ state->read_descriptor_, service->reactor_data_, false);
+#endif // !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+
+ // Remove service from linked list of all services.
+ if (state->service_list_ == service)
+ state->service_list_ = service->next_;
+ if (service->prev_)
+ service->prev_->next_ = service->next_;
+ if (service->next_)
+ service->next_->prev_= service->prev_;
+ service->next_ = 0;
+ service->prev_ = 0;
+
+#if !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+ // If this is the last service to be removed, close the pipe.
+ if (state->service_list_ == 0)
+ close_descriptors();
+#endif // !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+ }
+}
+
+void signal_set_service::open_descriptors()
+{
+#if !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+ signal_state* state = get_signal_state();
+
+ int pipe_fds[2];
+ if (::pipe(pipe_fds) == 0)
+ {
+ state->read_descriptor_ = pipe_fds[0];
+ ::fcntl(state->read_descriptor_, F_SETFL, O_NONBLOCK);
+
+ state->write_descriptor_ = pipe_fds[1];
+ ::fcntl(state->write_descriptor_, F_SETFL, O_NONBLOCK);
+
+#if defined(FD_CLOEXEC)
+ ::fcntl(state->read_descriptor_, F_SETFD, FD_CLOEXEC);
+ ::fcntl(state->write_descriptor_, F_SETFD, FD_CLOEXEC);
+#endif // defined(FD_CLOEXEC)
+ }
+ else
+ {
+ boost::system::error_code ec(errno,
+ boost::asio::error::get_system_category());
+ boost::asio::detail::throw_error(ec, "signal_set_service pipe");
+ }
+#endif // !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+}
+
+void signal_set_service::close_descriptors()
+{
+#if !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+ signal_state* state = get_signal_state();
+
+ if (state->read_descriptor_ != -1)
+ ::close(state->read_descriptor_);
+ state->read_descriptor_ = -1;
+
+ if (state->write_descriptor_ != -1)
+ ::close(state->write_descriptor_);
+ state->write_descriptor_ = -1;
+#endif // !defined(BOOST_WINDOWS) && !defined(__CYGWIN__)
+}
+
+void signal_set_service::start_wait_op(
+ signal_set_service::implementation_type& impl, signal_op* op)
+{
+ io_service_.work_started();
+
+ signal_state* state = get_signal_state();
+ static_mutex::scoped_lock lock(state->mutex_);
+
+ registration* reg = impl.signals_;
+ while (reg)
+ {
+ if (reg->undelivered_ > 0)
+ {
+ --reg->undelivered_;
+ io_service_.post_deferred_completion(op);
+ return;
+ }
+
+ reg = reg->next_in_set_;
+ }
+
+ impl.queue_.push(op);
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // BOOST_ASIO_DETAIL_IMPL_SIGNAL_SET_SERVICE_IPP
diff --git a/3rdParty/Boost/src/boost/asio/detail/impl/socket_ops.ipp b/3rdParty/Boost/src/boost/asio/detail/impl/socket_ops.ipp
index e240acd..55b6348 100644
--- a/3rdParty/Boost/src/boost/asio/detail/impl/socket_ops.ipp
+++ b/3rdParty/Boost/src/boost/asio/detail/impl/socket_ops.ipp
@@ -278,28 +278,9 @@ int close(socket_type s, state_type& state,
int result = 0;
if (s != invalid_socket)
{
-#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
- if ((state & non_blocking) && (state & user_set_linger))
- {
- ioctl_arg_type arg = 0;
- ::ioctlsocket(s, FIONBIO, &arg);
- state &= ~non_blocking;
- }
-#else // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
- if (state & non_blocking)
- {
-#if defined(__SYMBIAN32__)
- int flags = ::fcntl(s, F_GETFL, 0);
- if (flags >= 0)
- ::fcntl(s, F_SETFL, flags & ~O_NONBLOCK);
-#else // defined(__SYMBIAN32__)
- ioctl_arg_type arg = 0;
- ::ioctl(s, FIONBIO, &arg);
-#endif // defined(__SYMBIAN32__)
- state &= ~non_blocking;
- }
-#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
-
+ // We don't want the destructor to block, so set the socket to linger in
+ // the background. If the user doesn't like this behaviour then they need
+ // to explicitly close the socket.
if (destruction && (state & user_set_linger))
{
::linger opt;
@@ -316,6 +297,39 @@ int close(socket_type s, state_type& state,
#else // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
result = error_wrapper(::close(s), ec);
#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+
+ if (result != 0
+ && (ec == boost::asio::error::would_block
+ || ec == boost::asio::error::try_again))
+ {
+ // According to UNIX Network Programming Vol. 1, it is possible for
+ // close() to fail with EWOULDBLOCK under certain circumstances. What
+ // isn't clear is the state of the descriptor after this error. The one
+ // current OS where this behaviour is seen, Windows, says that the socket
+ // remains open. Therefore we'll put the descriptor back into blocking
+ // mode and have another attempt at closing it.
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ ioctl_arg_type arg = 0;
+ ::ioctlsocket(s, FIONBIO, &arg);
+#else // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+# if defined(__SYMBIAN32__)
+ int flags = ::fcntl(s, F_GETFL, 0);
+ if (flags >= 0)
+ ::fcntl(s, F_SETFL, flags & ~O_NONBLOCK);
+# else // defined(__SYMBIAN32__)
+ ioctl_arg_type arg = 0;
+ ::ioctl(s, FIONBIO, &arg);
+# endif // defined(__SYMBIAN32__)
+#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ state &= ~non_blocking;
+
+ clear_last_error();
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ result = error_wrapper(::closesocket(s), ec);
+#else // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ result = error_wrapper(::close(s), ec);
+#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ }
}
if (result == 0)
@@ -323,8 +337,52 @@ int close(socket_type s, state_type& state,
return result;
}
+bool set_user_non_blocking(socket_type s,
+ state_type& state, bool value, boost::system::error_code& ec)
+{
+ if (s == invalid_socket)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return false;
+ }
+
+ clear_last_error();
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ ioctl_arg_type arg = (value ? 1 : 0);
+ int result = error_wrapper(::ioctlsocket(s, FIONBIO, &arg), ec);
+#elif defined(__SYMBIAN32__)
+ int result = error_wrapper(::fcntl(s, F_GETFL, 0), ec);
+ if (result >= 0)
+ {
+ clear_last_error();
+ int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK));
+ result = error_wrapper(::fcntl(s, F_SETFL, flag), ec);
+ }
+#else
+ ioctl_arg_type arg = (value ? 1 : 0);
+ int result = error_wrapper(::ioctl(s, FIONBIO, &arg), ec);
+#endif
+
+ if (result >= 0)
+ {
+ ec = boost::system::error_code();
+ if (value)
+ state |= user_set_non_blocking;
+ else
+ {
+ // Clearing the user-set non-blocking mode always overrides any
+ // internally-set non-blocking flag. Any subsequent asynchronous
+ // operations will need to re-enable non-blocking I/O.
+ state &= ~(user_set_non_blocking | internal_non_blocking);
+ }
+ return true;
+ }
+
+ return false;
+}
+
bool set_internal_non_blocking(socket_type s,
- state_type& state, boost::system::error_code& ec)
+ state_type& state, bool value, boost::system::error_code& ec)
{
if (s == invalid_socket)
{
@@ -332,26 +390,39 @@ bool set_internal_non_blocking(socket_type s,
return false;
}
+ if (!value && (state & user_set_non_blocking))
+ {
+ // It does not make sense to clear the internal non-blocking flag if the
+ // user still wants non-blocking behaviour. Return an error and let the
+ // caller figure out whether to update the user-set non-blocking flag.
+ ec = boost::asio::error::invalid_argument;
+ return false;
+ }
+
clear_last_error();
#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
- ioctl_arg_type arg = 1;
+ ioctl_arg_type arg = (value ? 1 : 0);
int result = error_wrapper(::ioctlsocket(s, FIONBIO, &arg), ec);
#elif defined(__SYMBIAN32__)
int result = error_wrapper(::fcntl(s, F_GETFL, 0), ec);
if (result >= 0)
{
clear_last_error();
- result = error_wrapper(::fcntl(s, F_SETFL, result | O_NONBLOCK), ec);
+ int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK));
+ result = error_wrapper(::fcntl(s, F_SETFL, flag), ec);
}
#else
- ioctl_arg_type arg = 1;
+ ioctl_arg_type arg = (value ? 1 : 0);
int result = error_wrapper(::ioctl(s, FIONBIO, &arg), ec);
#endif
if (result >= 0)
{
ec = boost::system::error_code();
- state |= internal_non_blocking;
+ if (value)
+ state |= internal_non_blocking;
+ else
+ state &= ~internal_non_blocking;
return true;
}
@@ -863,6 +934,116 @@ bool non_blocking_recvfrom(socket_type s,
#endif // defined(BOOST_ASIO_HAS_IOCP)
+int recvmsg(socket_type s, buf* bufs, size_t count,
+ int in_flags, int& out_flags, boost::system::error_code& ec)
+{
+ clear_last_error();
+#if defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ out_flags = 0;
+ return socket_ops::recv(s, bufs, count, in_flags, ec);
+#else // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+ msghdr msg = msghdr();
+ msg.msg_iov = bufs;
+ msg.msg_iovlen = count;
+ int result = error_wrapper(::recvmsg(s, &msg, in_flags), ec);
+ if (result >= 0)
+ {
+ ec = boost::system::error_code();
+ out_flags = msg.msg_flags;
+ }
+ else
+ out_flags = 0;
+ return result;
+#endif // defined(BOOST_WINDOWS) || defined(__CYGWIN__)
+}
+
+size_t sync_recvmsg(socket_type s, state_type state,
+ buf* bufs, size_t count, int in_flags, int& out_flags,
+ boost::system::error_code& ec)
+{
+ if (s == invalid_socket)
+ {
+ ec = boost::asio::error::bad_descriptor;
+ return 0;
+ }
+
+ // Read some data.
+ for (;;)
+ {
+ // Try to complete the operation without blocking.
+ int bytes = socket_ops::recvmsg(s, bufs, count, in_flags, out_flags, ec);
+
+ // Check if operation succeeded.
+ if (bytes >= 0)
+ return bytes;
+
+ // Operation failed.
+ if ((state & user_set_non_blocking)
+ || (ec != boost::asio::error::would_block
+ && ec != boost::asio::error::try_again))
+ return 0;
+
+ // Wait for socket to become ready.
+ if (socket_ops::poll_read(s, ec) < 0)
+ return 0;
+ }
+}
+
+#if defined(BOOST_ASIO_HAS_IOCP)
+
+void complete_iocp_recvmsg(
+ const weak_cancel_token_type& cancel_token,
+ boost::system::error_code& ec)
+{
+ // Map non-portable errors to their portable counterparts.
+ if (ec.value() == ERROR_NETNAME_DELETED)
+ {
+ if (cancel_token.expired())
+ ec = boost::asio::error::operation_aborted;
+ else
+ ec = boost::asio::error::connection_reset;
+ }
+ else if (ec.value() == ERROR_PORT_UNREACHABLE)
+ {
+ ec = boost::asio::error::connection_refused;
+ }
+}
+
+#else // defined(BOOST_ASIO_HAS_IOCP)
+
+bool non_blocking_recvmsg(socket_type s,
+ buf* bufs, size_t count, int in_flags, int& out_flags,
+ boost::system::error_code& ec, size_t& bytes_transferred)
+{
+ for (;;)
+ {
+ // Read some data.
+ int bytes = socket_ops::recvmsg(s, bufs, count, in_flags, out_flags, ec);
+
+ // Retry operation if interrupted by signal.
+ if (ec == boost::asio::error::interrupted)
+ continue;
+
+ // Check if we need to run the operation again.
+ if (ec == boost::asio::error::would_block
+ || ec == boost::asio::error::try_again)
+ return false;
+
+ // Operation is complete.
+ if (bytes >= 0)
+ {
+ ec = boost::system::error_code();
+ bytes_transferred = bytes;
+ }
+ else
+ bytes_transferred = 0;
+
+ return true;
+ }
+}
+
+#endif // defined(BOOST_ASIO_HAS_IOCP)
+
int send(socket_type s, const buf* bufs, size_t count, int flags,
boost::system::error_code& ec)
{
@@ -1680,7 +1861,8 @@ const char* inet_ntop(int af, const void* src, char* dest, size_t length,
using namespace std; // For strcat and sprintf.
char if_name[IF_NAMESIZE + 1] = "%";
const in6_addr_type* ipv6_address = static_cast<const in6_addr_type*>(src);
- bool is_link_local = IN6_IS_ADDR_LINKLOCAL(ipv6_address);
+ bool is_link_local = ((ipv6_address->s6_addr[0] == 0xfe)
+ && ((ipv6_address->s6_addr[1] & 0xc0) == 0x80));
if (!is_link_local || if_indextoname(scope_id, if_name + 1) == 0)
sprintf(if_name + 1, "%lu", scope_id);
strcat(dest, if_name);
@@ -1764,7 +1946,8 @@ int inet_pton(int af, const char* src, void* dest,
if (const char* if_name = strchr(src, '%'))
{
in6_addr_type* ipv6_address = static_cast<in6_addr_type*>(dest);
- bool is_link_local = IN6_IS_ADDR_LINKLOCAL(ipv6_address);
+ bool is_link_local = ((ipv6_address->s6_addr[0] == 0xfe)
+ && ((ipv6_address->s6_addr[1] & 0xc0) == 0x80));
if (is_link_local)
*scope_id = if_nametoindex(if_name + 1);
if (*scope_id == 0)
diff --git a/3rdParty/Boost/src/boost/asio/detail/impl/socket_select_interrupter.ipp b/3rdParty/Boost/src/boost/asio/detail/impl/socket_select_interrupter.ipp
index 3b64771..533bafd 100644
--- a/3rdParty/Boost/src/boost/asio/detail/impl/socket_select_interrupter.ipp
+++ b/3rdParty/Boost/src/boost/asio/detail/impl/socket_select_interrupter.ipp
@@ -36,6 +36,11 @@ namespace detail {
socket_select_interrupter::socket_select_interrupter()
{
+ open_descriptors();
+}
+
+void socket_select_interrupter::open_descriptors()
+{
boost::system::error_code ec;
socket_holder acceptor(socket_ops::socket(
AF_INET, SOCK_STREAM, IPPROTO_TCP, ec));
@@ -110,6 +115,11 @@ socket_select_interrupter::socket_select_interrupter()
socket_select_interrupter::~socket_select_interrupter()
{
+ close_descriptors();
+}
+
+void socket_select_interrupter::close_descriptors()
+{
boost::system::error_code ec;
socket_ops::state_type state = socket_ops::internal_non_blocking;
if (read_descriptor_ != invalid_socket)
@@ -118,6 +128,16 @@ socket_select_interrupter::~socket_select_interrupter()
socket_ops::close(write_descriptor_, state, true, ec);
}
+void socket_select_interrupter::recreate()
+{
+ close_descriptors();
+
+ write_descriptor_ = invalid_socket;
+ read_descriptor_ = invalid_socket;
+
+ open_descriptors();
+}
+
void socket_select_interrupter::interrupt()
{
char byte = 0;
diff --git a/3rdParty/Boost/src/boost/asio/detail/impl/strand_service.hpp b/3rdParty/Boost/src/boost/asio/detail/impl/strand_service.hpp
index 5cb320d..bb3698a 100644
--- a/3rdParty/Boost/src/boost/asio/detail/impl/strand_service.hpp
+++ b/3rdParty/Boost/src/boost/asio/detail/impl/strand_service.hpp
@@ -73,19 +73,14 @@ void strand_service::dispatch(strand_service::implementation_type& impl,
sizeof(op), handler), 0 };
p.p = new (p.v) op(handler);
- // If we are running inside the io_service, and no other handler is queued
- // or running, then the handler can run immediately.
- bool can_dispatch = call_stack<io_service_impl>::contains(&io_service_);
- impl->mutex_.lock();
- bool first = (++impl->count_ == 1);
- if (can_dispatch && first)
- {
- // Immediate invocation is allowed.
- impl->mutex_.unlock();
+ BOOST_ASIO_HANDLER_CREATION((p.p, "strand", impl, "dispatch"));
- // Memory must be releaesed before any upcall is made.
- p.reset();
+ bool dispatch_immediately = do_dispatch(impl, p.p);
+ operation* o = p.p;
+ p.v = p.p = 0;
+ if (dispatch_immediately)
+ {
// Indicate that this strand is executing on the current thread.
call_stack<strand_impl>::context ctx(impl);
@@ -93,20 +88,9 @@ void strand_service::dispatch(strand_service::implementation_type& impl,
on_dispatch_exit on_exit = { &io_service_, impl };
(void)on_exit;
- boost::asio::detail::fenced_block b;
- boost_asio_handler_invoke_helpers::invoke(handler, handler);
- return;
+ completion_handler<Handler>::do_complete(
+ &io_service_, o, boost::system::error_code(), 0);
}
-
- // Immediate invocation is not allowed, so enqueue for later.
- impl->queue_.push(p.p);
- impl->mutex_.unlock();
- p.v = p.p = 0;
-
- // The first handler to be enqueued is responsible for scheduling the
- // strand.
- if (first)
- io_service_.post_immediate_completion(impl);
}
// Request the io_service to invoke the given handler and return immediately.
@@ -121,16 +105,10 @@ void strand_service::post(strand_service::implementation_type& impl,
sizeof(op), handler), 0 };
p.p = new (p.v) op(handler);
- // Add the handler to the queue.
- impl->mutex_.lock();
- bool first = (++impl->count_ == 1);
- impl->queue_.push(p.p);
- impl->mutex_.unlock();
- p.v = p.p = 0;
+ BOOST_ASIO_HANDLER_CREATION((p.p, "strand", impl, "post"));
- // The first handler to be enqueue is responsible for scheduling the strand.
- if (first)
- io_service_.post_immediate_completion(impl);
+ do_post(impl, p.p);
+ p.v = p.p = 0;
}
} // namespace detail
diff --git a/3rdParty/Boost/src/boost/asio/detail/impl/strand_service.ipp b/3rdParty/Boost/src/boost/asio/detail/impl/strand_service.ipp
index 6a42146..62a8d5c 100644
--- a/3rdParty/Boost/src/boost/asio/detail/impl/strand_service.ipp
+++ b/3rdParty/Boost/src/boost/asio/detail/impl/strand_service.ipp
@@ -70,11 +70,50 @@ void strand_service::construct(strand_service::implementation_type& impl)
boost::asio::detail::mutex::scoped_lock lock(mutex_);
- if (!implementations_[index])
+ if (!implementations_[index].get())
implementations_[index].reset(new strand_impl);
impl = implementations_[index].get();
}
+bool strand_service::do_dispatch(implementation_type& impl, operation* op)
+{
+ // If we are running inside the io_service, and no other handler is queued
+ // or running, then the handler can run immediately.
+ bool can_dispatch = call_stack<io_service_impl>::contains(&io_service_);
+ impl->mutex_.lock();
+ bool first = (++impl->count_ == 1);
+ if (can_dispatch && first)
+ {
+ // Immediate invocation is allowed.
+ impl->mutex_.unlock();
+ return true;
+ }
+
+ // Immediate invocation is not allowed, so enqueue for later.
+ impl->queue_.push(op);
+ impl->mutex_.unlock();
+
+ // The first handler to be enqueued is responsible for scheduling the
+ // strand.
+ if (first)
+ io_service_.post_immediate_completion(impl);
+
+ return false;
+}
+
+void strand_service::do_post(implementation_type& impl, operation* op)
+{
+ // Add the handler to the queue.
+ impl->mutex_.lock();
+ bool first = (++impl->count_ == 1);
+ impl->queue_.push(op);
+ impl->mutex_.unlock();
+
+ // The first handler to be enqueue is responsible for scheduling the strand.
+ if (first)
+ io_service_.post_immediate_completion(impl);
+}
+
void strand_service::do_complete(io_service_impl* owner, operation* base,
boost::system::error_code /*ec*/, std::size_t /*bytes_transferred*/)
{
diff --git a/3rdParty/Boost/src/boost/asio/detail/impl/task_io_service.hpp b/3rdParty/Boost/src/boost/asio/detail/impl/task_io_service.hpp
index a002189..ee23cb9 100644
--- a/3rdParty/Boost/src/boost/asio/detail/impl/task_io_service.hpp
+++ b/3rdParty/Boost/src/boost/asio/detail/impl/task_io_service.hpp
@@ -36,7 +36,19 @@ void task_io_service::dispatch(Handler handler)
boost_asio_handler_invoke_helpers::invoke(handler, handler);
}
else
- post(handler);
+ {
+ // Allocate and construct an operation to wrap the handler.
+ typedef completion_handler<Handler> op;
+ typename op::ptr p = { boost::addressof(handler),
+ boost_asio_handler_alloc_helpers::allocate(
+ sizeof(op), handler), 0 };
+ p.p = new (p.v) op(handler);
+
+ BOOST_ASIO_HANDLER_CREATION((p.p, "io_service", this, "dispatch"));
+
+ post_immediate_completion(p.p);
+ p.v = p.p = 0;
+ }
}
template <typename Handler>
@@ -49,6 +61,8 @@ void task_io_service::post(Handler handler)
sizeof(op), handler), 0 };
p.p = new (p.v) op(handler);
+ BOOST_ASIO_HANDLER_CREATION((p.p, "io_service", this, "post"));
+
post_immediate_completion(p.p);
p.v = p.p = 0;
}
diff --git a/3rdParty/Boost/src/boost/asio/detail/impl/task_io_service.ipp b/3rdParty/Boost/src/boost/asio/detail/impl/task_io_service.ipp
index babfa7b..5b1d069 100644
--- a/3rdParty/Boost/src/boost/asio/detail/impl/task_io_service.ipp
+++ b/3rdParty/Boost/src/boost/asio/detail/impl/task_io_service.ipp
@@ -74,6 +74,7 @@ task_io_service::task_io_service(boost::asio::io_service& io_service)
shutdown_(false),
first_idle_thread_(0)
{
+ BOOST_ASIO_HANDLER_TRACKING_INIT;
}
void task_io_service::init(std::size_t /*concurrency_hint*/)
@@ -194,6 +195,12 @@ void task_io_service::stop()
stop_all_threads(lock);
}
+bool task_io_service::stopped() const
+{
+ mutex::scoped_lock lock(mutex_);
+ return stopped_;
+}
+
void task_io_service::reset()
{
mutex::scoped_lock lock(mutex_);
@@ -224,6 +231,13 @@ void task_io_service::post_deferred_completions(
}
}
+void task_io_service::abandon_operations(
+ op_queue<task_io_service::operation>& ops)
+{
+ op_queue<task_io_service::operation> ops2;
+ ops2.push(ops);
+}
+
std::size_t task_io_service::do_one(mutex::scoped_lock& lock,
task_io_service::idle_thread_info* this_idle_thread)
{
diff --git a/3rdParty/Boost/src/boost/asio/detail/impl/win_iocp_handle_service.ipp b/3rdParty/Boost/src/boost/asio/detail/impl/win_iocp_handle_service.ipp
index eb6643a..b525e06 100644
--- a/3rdParty/Boost/src/boost/asio/detail/impl/win_iocp_handle_service.ipp
+++ b/3rdParty/Boost/src/boost/asio/detail/impl/win_iocp_handle_service.ipp
@@ -100,6 +100,64 @@ void win_iocp_handle_service::construct(
impl_list_ = &impl;
}
+void win_iocp_handle_service::move_construct(
+ win_iocp_handle_service::implementation_type& impl,
+ win_iocp_handle_service::implementation_type& other_impl)
+{
+ impl.handle_ = other_impl.handle_;
+ other_impl.handle_ = INVALID_HANDLE_VALUE;
+
+ impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_;
+ other_impl.safe_cancellation_thread_id_ = 0;
+
+ // Insert implementation into linked list of all implementations.
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+ impl.next_ = impl_list_;
+ impl.prev_ = 0;
+ if (impl_list_)
+ impl_list_->prev_ = &impl;
+ impl_list_ = &impl;
+}
+
+void win_iocp_handle_service::move_assign(
+ win_iocp_handle_service::implementation_type& impl,
+ win_iocp_handle_service& other_service,
+ win_iocp_handle_service::implementation_type& other_impl)
+{
+ close_for_destruction(impl);
+
+ if (this != &other_service)
+ {
+ // Remove implementation from linked list of all implementations.
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+ if (impl_list_ == &impl)
+ impl_list_ = impl.next_;
+ if (impl.prev_)
+ impl.prev_->next_ = impl.next_;
+ if (impl.next_)
+ impl.next_->prev_= impl.prev_;
+ impl.next_ = 0;
+ impl.prev_ = 0;
+ }
+
+ impl.handle_ = other_impl.handle_;
+ other_impl.handle_ = INVALID_HANDLE_VALUE;
+
+ impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_;
+ other_impl.safe_cancellation_thread_id_ = 0;
+
+ if (this != &other_service)
+ {
+ // Insert implementation into linked list of all implementations.
+ boost::asio::detail::mutex::scoped_lock lock(other_service.mutex_);
+ impl.next_ = other_service.impl_list_;
+ impl.prev_ = 0;
+ if (other_service.impl_list_)
+ other_service.impl_list_->prev_ = &impl;
+ other_service.impl_list_ = &impl;
+ }
+}
+
void win_iocp_handle_service::destroy(
win_iocp_handle_service::implementation_type& impl)
{
@@ -119,7 +177,7 @@ void win_iocp_handle_service::destroy(
boost::system::error_code win_iocp_handle_service::assign(
win_iocp_handle_service::implementation_type& impl,
- const native_type& native_handle, boost::system::error_code& ec)
+ const native_handle_type& handle, boost::system::error_code& ec)
{
if (is_open(impl))
{
@@ -127,10 +185,10 @@ boost::system::error_code win_iocp_handle_service::assign(
return ec;
}
- if (iocp_service_.register_handle(native_handle, ec))
+ if (iocp_service_.register_handle(handle, ec))
return ec;
- impl.handle_ = native_handle;
+ impl.handle_ = handle;
ec = boost::system::error_code();
return ec;
}
@@ -141,19 +199,27 @@ boost::system::error_code win_iocp_handle_service::close(
{
if (is_open(impl))
{
+ BOOST_ASIO_HANDLER_OPERATION(("handle", &impl, "close"));
+
if (!::CloseHandle(impl.handle_))
{
DWORD last_error = ::GetLastError();
ec = boost::system::error_code(last_error,
boost::asio::error::get_system_category());
- return ec;
+ }
+ else
+ {
+ ec = boost::system::error_code();
}
impl.handle_ = INVALID_HANDLE_VALUE;
impl.safe_cancellation_thread_id_ = 0;
}
+ else
+ {
+ ec = boost::system::error_code();
+ }
- ec = boost::system::error_code();
return ec;
}
@@ -164,8 +230,12 @@ boost::system::error_code win_iocp_handle_service::cancel(
if (!is_open(impl))
{
ec = boost::asio::error::bad_descriptor;
+ return ec;
}
- else if (FARPROC cancel_io_ex_ptr = ::GetProcAddress(
+
+ BOOST_ASIO_HANDLER_OPERATION(("handle", &impl, "cancel"));
+
+ if (FARPROC cancel_io_ex_ptr = ::GetProcAddress(
::GetModuleHandleA("KERNEL32"), "CancelIoEx"))
{
// The version of Windows supports cancellation from any thread.
@@ -437,6 +507,8 @@ void win_iocp_handle_service::close_for_destruction(implementation_type& impl)
{
if (is_open(impl))
{
+ BOOST_ASIO_HANDLER_OPERATION(("handle", &impl, "close"));
+
::CloseHandle(impl.handle_);
impl.handle_ = INVALID_HANDLE_VALUE;
impl.safe_cancellation_thread_id_ = 0;
diff --git a/3rdParty/Boost/src/boost/asio/detail/impl/win_iocp_io_service.hpp b/3rdParty/Boost/src/boost/asio/detail/impl/win_iocp_io_service.hpp
index 18b9413..f174dc7 100644
--- a/3rdParty/Boost/src/boost/asio/detail/impl/win_iocp_io_service.hpp
+++ b/3rdParty/Boost/src/boost/asio/detail/impl/win_iocp_io_service.hpp
@@ -40,7 +40,19 @@ void win_iocp_io_service::dispatch(Handler handler)
boost_asio_handler_invoke_helpers::invoke(handler, handler);
}
else
- post(handler);
+ {
+ // Allocate and construct an operation to wrap the handler.
+ typedef completion_handler<Handler> op;
+ typename op::ptr p = { boost::addressof(handler),
+ boost_asio_handler_alloc_helpers::allocate(
+ sizeof(op), handler), 0 };
+ p.p = new (p.v) op(handler);
+
+ BOOST_ASIO_HANDLER_CREATION((p.p, "io_service", this, "dispatch"));
+
+ post_immediate_completion(p.p);
+ p.v = p.p = 0;
+ }
}
template <typename Handler>
@@ -53,6 +65,8 @@ void win_iocp_io_service::post(Handler handler)
sizeof(op), handler), 0 };
p.p = new (p.v) op(handler);
+ BOOST_ASIO_HANDLER_CREATION((p.p, "io_service", this, "post"));
+
post_immediate_completion(p.p);
p.v = p.p = 0;
}
@@ -93,7 +107,8 @@ void win_iocp_io_service::schedule_timer(timer_queue<Time_Traits>& queue,
template <typename Time_Traits>
std::size_t win_iocp_io_service::cancel_timer(timer_queue<Time_Traits>& queue,
- typename timer_queue<Time_Traits>::per_timer_data& timer)
+ typename timer_queue<Time_Traits>::per_timer_data& timer,
+ std::size_t max_cancelled)
{
// If the service has been shut down we silently ignore the cancellation.
if (::InterlockedExchangeAdd(&shutdown_, 0) != 0)
@@ -101,7 +116,7 @@ std::size_t win_iocp_io_service::cancel_timer(timer_queue<Time_Traits>& queue,
mutex::scoped_lock lock(dispatch_mutex_);
op_queue<win_iocp_operation> ops;
- std::size_t n = queue.cancel_timer(timer, ops);
+ std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
post_deferred_completions(ops);
return n;
}
diff --git a/3rdParty/Boost/src/boost/asio/detail/impl/win_iocp_io_service.ipp b/3rdParty/Boost/src/boost/asio/detail/impl/win_iocp_io_service.ipp
index 9711702..4607669 100644
--- a/3rdParty/Boost/src/boost/asio/detail/impl/win_iocp_io_service.ipp
+++ b/3rdParty/Boost/src/boost/asio/detail/impl/win_iocp_io_service.ipp
@@ -70,6 +70,7 @@ win_iocp_io_service::win_iocp_io_service(boost::asio::io_service& io_service)
shutdown_(0),
dispatch_required_(0)
{
+ BOOST_ASIO_HANDLER_TRACKING_INIT;
}
void win_iocp_io_service::init(size_t concurrency_hint)
@@ -89,7 +90,7 @@ void win_iocp_io_service::shutdown_service()
{
::InterlockedExchange(&shutdown_, 1);
- if (timer_thread_)
+ if (timer_thread_.get())
{
LARGE_INTEGER timeout;
timeout.QuadPart = 1;
@@ -125,7 +126,7 @@ void win_iocp_io_service::shutdown_service()
}
}
- if (timer_thread_)
+ if (timer_thread_.get())
timer_thread_->join();
}
@@ -262,6 +263,17 @@ void win_iocp_io_service::post_deferred_completions(
}
}
+void win_iocp_io_service::abandon_operations(
+ op_queue<win_iocp_operation>& ops)
+{
+ while (win_iocp_operation* op = ops.front())
+ {
+ ops.pop();
+ ::InterlockedDecrement(&outstanding_work_);
+ op->destroy();
+ }
+}
+
void win_iocp_io_service::on_pending(win_iocp_operation* op)
{
if (::InterlockedCompareExchange(&op->ready_, 1, 0) == 1)
@@ -455,7 +467,7 @@ void win_iocp_io_service::do_add_timer_queue(timer_queue_base& queue)
&timeout, max_timeout_msec, 0, 0, FALSE);
}
- if (!timer_thread_)
+ if (!timer_thread_.get())
{
timer_thread_function thread_function = { this };
timer_thread_.reset(new thread(thread_function, 65536));
@@ -471,7 +483,7 @@ void win_iocp_io_service::do_remove_timer_queue(timer_queue_base& queue)
void win_iocp_io_service::update_timeout()
{
- if (timer_thread_)
+ if (timer_thread_.get())
{
// There's no point updating the waitable timer if the new timeout period
// exceeds the maximum timeout. In that case, we might as well wait for the
diff --git a/3rdParty/Boost/src/boost/asio/detail/impl/win_iocp_serial_port_service.ipp b/3rdParty/Boost/src/boost/asio/detail/impl/win_iocp_serial_port_service.ipp
index 32ab6d1..0c641fc 100644
--- a/3rdParty/Boost/src/boost/asio/detail/impl/win_iocp_serial_port_service.ipp
+++ b/3rdParty/Boost/src/boost/asio/detail/impl/win_iocp_serial_port_service.ipp
@@ -127,7 +127,7 @@ boost::system::error_code win_iocp_serial_port_service::do_set_option(
::DCB dcb;
memset(&dcb, 0, sizeof(DCB));
dcb.DCBlength = sizeof(DCB);
- if (!::GetCommState(handle_service_.native(impl), &dcb))
+ if (!::GetCommState(handle_service_.native_handle(impl), &dcb))
{
DWORD last_error = ::GetLastError();
ec = boost::system::error_code(last_error,
@@ -138,7 +138,7 @@ boost::system::error_code win_iocp_serial_port_service::do_set_option(
if (store(option, dcb, ec))
return ec;
- if (!::SetCommState(handle_service_.native(impl), &dcb))
+ if (!::SetCommState(handle_service_.native_handle(impl), &dcb))
{
DWORD last_error = ::GetLastError();
ec = boost::system::error_code(last_error,
@@ -160,7 +160,7 @@ boost::system::error_code win_iocp_serial_port_service::do_get_option(
::DCB dcb;
memset(&dcb, 0, sizeof(DCB));
dcb.DCBlength = sizeof(DCB);
- if (!::GetCommState(handle_service_.native(impl), &dcb))
+ if (!::GetCommState(handle_service_.native_handle(impl), &dcb))
{
DWORD last_error = ::GetLastError();
ec = boost::system::error_code(last_error,
diff --git a/3rdParty/Boost/src/boost/asio/detail/impl/win_iocp_socket_service_base.ipp b/3rdParty/Boost/src/boost/asio/detail/impl/win_iocp_socket_service_base.ipp
index 0a2825b..2b8d0cb 100644
--- a/3rdParty/Boost/src/boost/asio/detail/impl/win_iocp_socket_service_base.ipp
+++ b/3rdParty/Boost/src/boost/asio/detail/impl/win_iocp_socket_service_base.ipp
@@ -69,6 +69,80 @@ void win_iocp_socket_service_base::construct(
impl_list_ = &impl;
}
+void win_iocp_socket_service_base::base_move_construct(
+ win_iocp_socket_service_base::base_implementation_type& impl,
+ win_iocp_socket_service_base::base_implementation_type& other_impl)
+{
+ impl.socket_ = other_impl.socket_;
+ other_impl.socket_ = invalid_socket;
+
+ impl.state_ = other_impl.state_;
+ other_impl.state_ = 0;
+
+ impl.cancel_token_ = other_impl.cancel_token_;
+ other_impl.cancel_token_.reset();
+
+#if defined(BOOST_ASIO_ENABLE_CANCELIO)
+ impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_;
+ other_impl.safe_cancellation_thread_id_ = 0;
+#endif // defined(BOOST_ASIO_ENABLE_CANCELIO)
+
+ // Insert implementation into linked list of all implementations.
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+ impl.next_ = impl_list_;
+ impl.prev_ = 0;
+ if (impl_list_)
+ impl_list_->prev_ = &impl;
+ impl_list_ = &impl;
+}
+
+void win_iocp_socket_service_base::base_move_assign(
+ win_iocp_socket_service_base::base_implementation_type& impl,
+ win_iocp_socket_service_base& other_service,
+ win_iocp_socket_service_base::base_implementation_type& other_impl)
+{
+ close_for_destruction(impl);
+
+ if (this != &other_service)
+ {
+ // Remove implementation from linked list of all implementations.
+ boost::asio::detail::mutex::scoped_lock lock(mutex_);
+ if (impl_list_ == &impl)
+ impl_list_ = impl.next_;
+ if (impl.prev_)
+ impl.prev_->next_ = impl.next_;
+ if (impl.next_)
+ impl.next_->prev_= impl.prev_;
+ impl.next_ = 0;
+ impl.prev_ = 0;
+ }
+
+ impl.socket_ = other_impl.socket_;
+ other_impl.socket_ = invalid_socket;
+
+ impl.state_ = other_impl.state_;
+ other_impl.state_ = 0;
+
+ impl.cancel_token_ = other_impl.cancel_token_;
+ other_impl.cancel_token_.reset();
+
+#if defined(BOOST_ASIO_ENABLE_CANCELIO)
+ impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_;
+ other_impl.safe_cancellation_thread_id_ = 0;
+#endif // defined(BOOST_ASIO_ENABLE_CANCELIO)
+
+ if (this != &other_service)
+ {
+ // Insert implementation into linked list of all implementations.
+ boost::asio::detail::mutex::scoped_lock lock(other_service.mutex_);
+ impl.next_ = other_service.impl_list_;
+ impl.prev_ = 0;
+ if (other_service.impl_list_)
+ other_service.impl_list_->prev_ = &impl;
+ other_service.impl_list_ = &impl;
+ }
+}
+
void win_iocp_socket_service_base::destroy(
win_iocp_socket_service_base::base_implementation_type& impl)
{
@@ -92,6 +166,8 @@ boost::system::error_code win_iocp_socket_service_base::close(
{
if (is_open(impl))
{
+ BOOST_ASIO_HANDLER_OPERATION(("socket", &impl, "close"));
+
// Check if the reactor was created, in which case we need to close the
// socket on the reactor as well to cancel any operations that might be
// running there.
@@ -99,18 +175,17 @@ boost::system::error_code win_iocp_socket_service_base::close(
interlocked_compare_exchange_pointer(
reinterpret_cast<void**>(&reactor_), 0, 0));
if (r)
- r->close_descriptor(impl.socket_, impl.reactor_data_);
+ r->deregister_descriptor(impl.socket_, impl.reactor_data_, true);
}
- if (socket_ops::close(impl.socket_, impl.state_, false, ec) == 0)
- {
- impl.socket_ = invalid_socket;
- impl.state_ = 0;
- impl.cancel_token_.reset();
+ socket_ops::close(impl.socket_, impl.state_, false, ec);
+
+ impl.socket_ = invalid_socket;
+ impl.state_ = 0;
+ impl.cancel_token_.reset();
#if defined(BOOST_ASIO_ENABLE_CANCELIO)
- impl.safe_cancellation_thread_id_ = 0;
+ impl.safe_cancellation_thread_id_ = 0;
#endif // defined(BOOST_ASIO_ENABLE_CANCELIO)
- }
return ec;
}
@@ -124,7 +199,10 @@ boost::system::error_code win_iocp_socket_service_base::cancel(
ec = boost::asio::error::bad_descriptor;
return ec;
}
- else if (FARPROC cancel_io_ex_ptr = ::GetProcAddress(
+
+ BOOST_ASIO_HANDLER_OPERATION(("socket", &impl, "cancel"));
+
+ if (FARPROC cancel_io_ex_ptr = ::GetProcAddress(
::GetModuleHandleA("KERNEL32"), "CancelIoEx"))
{
// The version of Windows supports cancellation from any thread.
@@ -474,7 +552,7 @@ void win_iocp_socket_service_base::start_connect_op(
if ((impl.state_ & socket_ops::non_blocking) != 0
|| socket_ops::set_internal_non_blocking(
- impl.socket_, impl.state_, op->ec_))
+ impl.socket_, impl.state_, true, op->ec_))
{
if (socket_ops::connect(impl.socket_, addr, addrlen, op->ec_) != 0)
{
@@ -497,6 +575,8 @@ void win_iocp_socket_service_base::close_for_destruction(
{
if (is_open(impl))
{
+ BOOST_ASIO_HANDLER_OPERATION(("socket", &impl, "close"));
+
// Check if the reactor was created, in which case we need to close the
// socket on the reactor as well to cancel any operations that might be
// running there.
@@ -504,7 +584,7 @@ void win_iocp_socket_service_base::close_for_destruction(
interlocked_compare_exchange_pointer(
reinterpret_cast<void**>(&reactor_), 0, 0));
if (r)
- r->close_descriptor(impl.socket_, impl.reactor_data_);
+ r->deregister_descriptor(impl.socket_, impl.reactor_data_, true);
}
boost::system::error_code ignored_ec;
diff --git a/3rdParty/Boost/src/boost/asio/detail/impl/win_static_mutex.ipp b/3rdParty/Boost/src/boost/asio/detail/impl/win_static_mutex.ipp
new file mode 100644
index 0000000..bf3193a
--- /dev/null
+++ b/3rdParty/Boost/src/boost/asio/detail/impl/win_static_mutex.ipp
@@ -0,0 +1,120 @@
+//
+// detail/impl/win_static_mutex.ipp
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+//
+// Copyright (c) 2003-2011 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+//
+// Distributed under the Boost Software License, Version 1.0. (See accompanying
+// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
+//
+
+#ifndef BOOST_ASIO_DETAIL_IMPL_WIN_STATIC_MUTEX_IPP
+#define BOOST_ASIO_DETAIL_IMPL_WIN_STATIC_MUTEX_IPP
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1200)
+# pragma once
+#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
+
+#include <boost/asio/detail/config.hpp>
+
+#if defined(BOOST_WINDOWS)
+
+#include <cstdio>
+#include <boost/asio/detail/throw_error.hpp>
+#include <boost/asio/detail/win_static_mutex.hpp>
+#include <boost/asio/error.hpp>
+
+#include <boost/asio/detail/push_options.hpp>
+
+namespace boost {
+namespace asio {
+namespace detail {
+
+void win_static_mutex::init()
+{
+ int error = do_init();
+ boost::system::error_code ec(error,
+ boost::asio::error::get_system_category());
+ boost::asio::detail::throw_error(ec, "static_mutex");
+}
+
+int win_static_mutex::do_init()
+{
+ using namespace std; // For sprintf.
+ wchar_t mutex_name[128];
+#if BOOST_WORKAROUND(BOOST_MSVC, >= 1400) && !defined(UNDER_CE)
+ swprintf_s(mutex_name, 128,
+#else // BOOST_WORKAROUND(BOOST_MSVC, >= 1400) && !defined(UNDER_CE)
+ swprintf(mutex_name,
+#endif // BOOST_WORKAROUND(BOOST_MSVC, >= 1400) && !defined(UNDER_CE)
+ L"asio-58CCDC44-6264-4842-90C2-F3C545CB8AA7-%u-%p",
+ static_cast<unsigned int>(::GetCurrentProcessId()), this);
+
+ HANDLE mutex = ::CreateMutexW(0, TRUE, mutex_name);
+ DWORD last_error = ::GetLastError();
+ if (mutex == 0)
+ return ::GetLastError();
+
+ if (last_error == ERROR_ALREADY_EXISTS)
+ ::WaitForSingleObject(mutex, INFINITE);
+
+ if (initialised_)
+ {
+ ::ReleaseMutex(mutex);
+ ::CloseHandle(mutex);
+ return 0;
+ }
+
+#if defined(__MINGW32__)
+ // Not sure if MinGW supports structured exception handling, so for now
+ // we'll just call the Windows API and hope.
+# if defined(UNDER_CE)
+ ::InitializeCriticalSection(&crit_section_);
+# else
+ if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000))
+ {
+ last_error = ::GetLastError();
+ ::ReleaseMutex(mutex);
+ ::CloseHandle(mutex);
+ return last_error;
+ }
+# endif
+#else
+ __try
+ {
+# if defined(UNDER_CE)
+ ::InitializeCriticalSection(&crit_section_);
+# else
+ if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000))
+ {
+ last_error = ::GetLastError();
+ ::ReleaseMutex(mutex);
+ ::CloseHandle(mutex);
+ return last_error;
+ }
+# endif
+ }
+ __except(GetExceptionCode() == STATUS_NO_MEMORY
+ ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH)
+ {
+ ::ReleaseMutex(mutex);
+ ::CloseHandle(mutex);
+ return ERROR_OUTOFMEMORY;
+ }
+#endif
+
+ initialised_ = true;
+ ::ReleaseMutex(mutex);
+ ::CloseHandle(mutex);
+ return 0;
+}
+
+} // namespace detail
+} // namespace asio
+} // namespace boost
+
+#include <boost/asio/detail/pop_options.hpp>
+
+#endif // defined(BOOST_WINDOWS)
+
+#endif // BOOST_ASIO_DETAIL_IMPL_WIN_STATIC_MUTEX_IPP
diff --git a/3rdParty/Boost/src/boost/asio/detail/impl/win_thread.ipp b/3rdParty/Boost/src/boost/asio/detail/impl/win_thread.ipp
index 07cc5c2..c9a3fa7 100644
--- a/3rdParty/Boost/src/boost/asio/detail/impl/win_thread.ipp
+++ b/3rdParty/Boost/src/boost/asio/detail/impl/win_thread.ipp
@@ -102,12 +102,12 @@ void win_thread::start_thread(func_base* arg, unsigned int stack_size)
unsigned int __stdcall win_thread_function(void* arg)
{
- std::auto_ptr<win_thread::func_base> func(
- static_cast<win_thread::func_base*>(arg));
+ win_thread::auto_func_base_ptr func = {
+ static_cast<win_thread::func_base*>(arg) };
- ::SetEvent(func->entry_event_);
+ ::SetEvent(func.ptr->entry_event_);
- func->run();
+ func.ptr->run();
// Signal that the thread has finished its work, but rather than returning go
// to sleep to put the thread into a well known state. If the thread is being
@@ -115,8 +115,9 @@ unsigned int __stdcall win_thread_function(void* arg)
// TerminateThread (to avoid a deadlock in DllMain). Otherwise, the SleepEx
// call will be interrupted using QueueUserAPC and the thread will shut down
// cleanly.
- HANDLE exit_event = func->exit_event_;
- func.reset();
+ HANDLE exit_event = func.ptr->exit_event_;
+ delete func.ptr;
+ func.ptr = 0;
::SetEvent(exit_event);
::SleepEx(INFINITE, TRUE);