summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to '3rdParty/Boost/src/boost/asio/detail/impl/kqueue_reactor.ipp')
-rw-r--r--3rdParty/Boost/src/boost/asio/detail/impl/kqueue_reactor.ipp165
1 files changed, 151 insertions, 14 deletions
diff --git a/3rdParty/Boost/src/boost/asio/detail/impl/kqueue_reactor.ipp b/3rdParty/Boost/src/boost/asio/detail/impl/kqueue_reactor.ipp
index 3ac9eae..a819eb9 100644
--- a/3rdParty/Boost/src/boost/asio/detail/impl/kqueue_reactor.ipp
+++ b/3rdParty/Boost/src/boost/asio/detail/impl/kqueue_reactor.ipp
@@ -2,7 +2,7 @@
// detail/impl/kqueue_reactor.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
-// Copyright (c) 2003-2011 Christopher M. Kohlhoff (chris at kohlhoff dot com)
+// Copyright (c) 2003-2012 Christopher M. Kohlhoff (chris at kohlhoff dot com)
// Copyright (c) 2005 Stefan Arentz (stefan at soze dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
@@ -28,8 +28,8 @@
#if defined(__NetBSD__)
# define BOOST_ASIO_KQUEUE_EV_SET(ev, ident, filt, flags, fflags, data, udata) \
- EV_SET(ev, ident, filt, flags, fflags, \
- data, reinterpret_cast<intptr_t>(udata))
+ EV_SET(ev, ident, filt, flags, fflags, data, \
+ reinterpret_cast<intptr_t>(static_cast<void*>(udata)))
#else
# define BOOST_ASIO_KQUEUE_EV_SET(ev, ident, filt, flags, fflags, data, udata) \
EV_SET(ev, ident, filt, flags, fflags, data, udata)
@@ -75,6 +75,47 @@ void kqueue_reactor::shutdown_service()
}
timer_queues_.get_all_timers(ops);
+
+ io_service_.abandon_operations(ops);
+}
+
+void kqueue_reactor::fork_service(boost::asio::io_service::fork_event fork_ev)
+{
+ if (fork_ev == boost::asio::io_service::fork_child)
+ {
+ // The kqueue descriptor is automatically closed in the child.
+ kqueue_fd_ = -1;
+ kqueue_fd_ = do_kqueue_create();
+
+ interrupter_.recreate();
+
+ // Re-register all descriptors with kqueue.
+ mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
+ for (descriptor_state* state = registered_descriptors_.first();
+ state != 0; state = state->next_)
+ {
+ struct kevent events[2];
+ int num_events = 0;
+
+ if (!state->op_queue_[read_op].empty())
+ BOOST_ASIO_KQUEUE_EV_SET(&events[num_events++], state->descriptor_,
+ EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, state);
+ else if (!state->op_queue_[except_op].empty())
+ BOOST_ASIO_KQUEUE_EV_SET(&events[num_events++], state->descriptor_,
+ EVFILT_READ, EV_ADD | EV_CLEAR, EV_OOBAND, 0, state);
+
+ if (!state->op_queue_[write_op].empty())
+ BOOST_ASIO_KQUEUE_EV_SET(&events[num_events++], state->descriptor_,
+ EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, state);
+
+ if (num_events && ::kevent(kqueue_fd_, events, num_events, 0, 0, 0) == -1)
+ {
+ boost::system::error_code error(errno,
+ boost::asio::error::get_system_category());
+ boost::asio::detail::throw_error(error);
+ }
+ }
+ }
}
void kqueue_reactor::init_task()
@@ -82,17 +123,60 @@ void kqueue_reactor::init_task()
io_service_.init_task();
}
-int kqueue_reactor::register_descriptor(socket_type,
+int kqueue_reactor::register_descriptor(socket_type descriptor,
kqueue_reactor::per_descriptor_data& descriptor_data)
{
- mutex::scoped_lock lock(registered_descriptors_mutex_);
+ descriptor_data = allocate_descriptor_state();
+
+ mutex::scoped_lock lock(descriptor_data->mutex_);
+
+ descriptor_data->descriptor_ = descriptor;
+ descriptor_data->shutdown_ = false;
+
+ return 0;
+}
+
+int kqueue_reactor::register_internal_descriptor(
+ int op_type, socket_type descriptor,
+ kqueue_reactor::per_descriptor_data& descriptor_data, reactor_op* op)
+{
+ descriptor_data = allocate_descriptor_state();
- descriptor_data = registered_descriptors_.alloc();
+ mutex::scoped_lock lock(descriptor_data->mutex_);
+
+ descriptor_data->descriptor_ = descriptor;
descriptor_data->shutdown_ = false;
+ descriptor_data->op_queue_[op_type].push(op);
+
+ struct kevent event;
+ switch (op_type)
+ {
+ case read_op:
+ BOOST_ASIO_KQUEUE_EV_SET(&event, descriptor, EVFILT_READ,
+ EV_ADD | EV_CLEAR, 0, 0, descriptor_data);
+ break;
+ case write_op:
+ BOOST_ASIO_KQUEUE_EV_SET(&event, descriptor, EVFILT_WRITE,
+ EV_ADD | EV_CLEAR, 0, 0, descriptor_data);
+ break;
+ case except_op:
+ BOOST_ASIO_KQUEUE_EV_SET(&event, descriptor, EVFILT_READ,
+ EV_ADD | EV_CLEAR, EV_OOBAND, 0, descriptor_data);
+ break;
+ }
+ ::kevent(kqueue_fd_, &event, 1, 0, 0, 0);
return 0;
}
+void kqueue_reactor::move_descriptor(socket_type,
+ kqueue_reactor::per_descriptor_data& target_descriptor_data,
+ kqueue_reactor::per_descriptor_data& source_descriptor_data)
+{
+ target_descriptor_data = source_descriptor_data;
+ source_descriptor_data = 0;
+}
+
void kqueue_reactor::start_op(int op_type, socket_type descriptor,
kqueue_reactor::per_descriptor_data& descriptor_data,
reactor_op* op, bool allow_speculative)
@@ -187,19 +271,30 @@ void kqueue_reactor::cancel_ops(socket_type,
io_service_.post_deferred_completions(ops);
}
-void kqueue_reactor::close_descriptor(socket_type,
- kqueue_reactor::per_descriptor_data& descriptor_data)
+void kqueue_reactor::deregister_descriptor(socket_type descriptor,
+ kqueue_reactor::per_descriptor_data& descriptor_data, bool closing)
{
if (!descriptor_data)
return;
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
- mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
if (!descriptor_data->shutdown_)
{
- // Remove the descriptor from the set of known descriptors. The descriptor
- // will be automatically removed from the kqueue set when it is closed.
+ if (closing)
+ {
+ // The descriptor will be automatically removed from the kqueue when it
+ // is closed.
+ }
+ else
+ {
+ struct kevent events[2];
+ BOOST_ASIO_KQUEUE_EV_SET(&events[0], descriptor,
+ EVFILT_READ, EV_DELETE, 0, 0, 0);
+ BOOST_ASIO_KQUEUE_EV_SET(&events[1], descriptor,
+ EVFILT_WRITE, EV_DELETE, 0, 0, 0);
+ ::kevent(kqueue_fd_, events, 2, 0, 0, 0);
+ }
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
@@ -212,19 +307,49 @@ void kqueue_reactor::close_descriptor(socket_type,
}
}
+ descriptor_data->descriptor_ = -1;
descriptor_data->shutdown_ = true;
descriptor_lock.unlock();
- registered_descriptors_.free(descriptor_data);
+ free_descriptor_state(descriptor_data);
descriptor_data = 0;
- descriptors_lock.unlock();
-
io_service_.post_deferred_completions(ops);
}
}
+void kqueue_reactor::deregister_internal_descriptor(socket_type descriptor,
+ kqueue_reactor::per_descriptor_data& descriptor_data)
+{
+ if (!descriptor_data)
+ return;
+
+ mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
+
+ if (!descriptor_data->shutdown_)
+ {
+ struct kevent events[2];
+ BOOST_ASIO_KQUEUE_EV_SET(&events[0], descriptor,
+ EVFILT_READ, EV_DELETE, 0, 0, 0);
+ BOOST_ASIO_KQUEUE_EV_SET(&events[1], descriptor,
+ EVFILT_WRITE, EV_DELETE, 0, 0, 0);
+ ::kevent(kqueue_fd_, events, 2, 0, 0, 0);
+
+ op_queue<operation> ops;
+ for (int i = 0; i < max_ops; ++i)
+ ops.push(descriptor_data->op_queue_[i]);
+
+ descriptor_data->descriptor_ = -1;
+ descriptor_data->shutdown_ = true;
+
+ descriptor_lock.unlock();
+
+ free_descriptor_state(descriptor_data);
+ descriptor_data = 0;
+ }
+}
+
void kqueue_reactor::run(bool block, op_queue<operation>& ops)
{
mutex::scoped_lock lock(mutex_);
@@ -354,6 +479,18 @@ int kqueue_reactor::do_kqueue_create()
return fd;
}
+kqueue_reactor::descriptor_state* kqueue_reactor::allocate_descriptor_state()
+{
+ mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
+ return registered_descriptors_.alloc();
+}
+
+void kqueue_reactor::free_descriptor_state(kqueue_reactor::descriptor_state* s)
+{
+ mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
+ registered_descriptors_.free(s);
+}
+
void kqueue_reactor::do_add_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);