epoll_reactor.hpp 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733
  1. //
  2. // epoll_reactor.hpp
  3. // ~~~~~~~~~~~~~~~~~
  4. //
  5. // Copyright (c) 2003-2008 Christopher M. Kohlhoff (chris at kohlhoff dot com)
  6. //
  7. // Distributed under the Boost Software License, Version 1.0. (See accompanying
  8. // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
  9. //
  10. #ifndef BOOST_ASIO_DETAIL_EPOLL_REACTOR_HPP
  11. #define BOOST_ASIO_DETAIL_EPOLL_REACTOR_HPP
  12. #if defined(_MSC_VER) && (_MSC_VER >= 1200)
  13. # pragma once
  14. #endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
  15. #include <boost/asio/detail/push_options.hpp>
  16. #include <boost/asio/detail/epoll_reactor_fwd.hpp>
  17. #if defined(BOOST_ASIO_HAS_EPOLL)
  18. #include <boost/asio/detail/push_options.hpp>
  19. #include <cstddef>
  20. #include <vector>
  21. #include <sys/epoll.h>
  22. #include <boost/config.hpp>
  23. #include <boost/date_time/posix_time/posix_time_types.hpp>
  24. #include <boost/throw_exception.hpp>
  25. #include <boost/system/system_error.hpp>
  26. #include <boost/asio/detail/pop_options.hpp>
  27. #include <boost/asio/error.hpp>
  28. #include <boost/asio/io_service.hpp>
  29. #include <boost/asio/detail/bind_handler.hpp>
  30. #include <boost/asio/detail/hash_map.hpp>
  31. #include <boost/asio/detail/mutex.hpp>
  32. #include <boost/asio/detail/task_io_service.hpp>
  33. #include <boost/asio/detail/thread.hpp>
  34. #include <boost/asio/detail/reactor_op_queue.hpp>
  35. #include <boost/asio/detail/select_interrupter.hpp>
  36. #include <boost/asio/detail/service_base.hpp>
  37. #include <boost/asio/detail/signal_blocker.hpp>
  38. #include <boost/asio/detail/socket_types.hpp>
  39. #include <boost/asio/detail/timer_queue.hpp>
  40. namespace boost {
  41. namespace asio {
  42. namespace detail {
  43. template <bool Own_Thread>
  44. class epoll_reactor
  45. : public boost::asio::detail::service_base<epoll_reactor<Own_Thread> >
  46. {
  47. public:
  48. // Per-descriptor data.
  49. struct per_descriptor_data
  50. {
  51. bool allow_speculative_read;
  52. bool allow_speculative_write;
  53. };
  54. // Constructor.
  55. epoll_reactor(boost::asio::io_service& io_service)
  56. : boost::asio::detail::service_base<epoll_reactor<Own_Thread> >(io_service),
  57. mutex_(),
  58. epoll_fd_(do_epoll_create()),
  59. wait_in_progress_(false),
  60. interrupter_(),
  61. read_op_queue_(),
  62. write_op_queue_(),
  63. except_op_queue_(),
  64. pending_cancellations_(),
  65. stop_thread_(false),
  66. thread_(0),
  67. shutdown_(false),
  68. need_epoll_wait_(true)
  69. {
  70. // Start the reactor's internal thread only if needed.
  71. if (Own_Thread)
  72. {
  73. boost::asio::detail::signal_blocker sb;
  74. thread_ = new boost::asio::detail::thread(
  75. bind_handler(&epoll_reactor::call_run_thread, this));
  76. }
  77. // Add the interrupter's descriptor to epoll.
  78. epoll_event ev = { 0, { 0 } };
  79. ev.events = EPOLLIN | EPOLLERR;
  80. ev.data.fd = interrupter_.read_descriptor();
  81. epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, interrupter_.read_descriptor(), &ev);
  82. }
  83. // Destructor.
  84. ~epoll_reactor()
  85. {
  86. shutdown_service();
  87. close(epoll_fd_);
  88. }
  89. // Destroy all user-defined handler objects owned by the service.
  90. void shutdown_service()
  91. {
  92. boost::asio::detail::mutex::scoped_lock lock(mutex_);
  93. shutdown_ = true;
  94. stop_thread_ = true;
  95. lock.unlock();
  96. if (thread_)
  97. {
  98. interrupter_.interrupt();
  99. thread_->join();
  100. delete thread_;
  101. thread_ = 0;
  102. }
  103. read_op_queue_.destroy_operations();
  104. write_op_queue_.destroy_operations();
  105. except_op_queue_.destroy_operations();
  106. for (std::size_t i = 0; i < timer_queues_.size(); ++i)
  107. timer_queues_[i]->destroy_timers();
  108. timer_queues_.clear();
  109. }
  110. // Initialise the task, but only if the reactor is not in its own thread.
  111. void init_task()
  112. {
  113. if (!Own_Thread)
  114. {
  115. typedef task_io_service<epoll_reactor<Own_Thread> > task_io_service_type;
  116. use_service<task_io_service_type>(this->get_io_service()).init_task();
  117. }
  118. }
  119. // Register a socket with the reactor. Returns 0 on success, system error
  120. // code on failure.
  121. int register_descriptor(socket_type descriptor,
  122. per_descriptor_data& descriptor_data)
  123. {
  124. // No need to lock according to epoll documentation.
  125. descriptor_data.allow_speculative_read = true;
  126. descriptor_data.allow_speculative_write = true;
  127. epoll_event ev = { 0, { 0 } };
  128. ev.events = 0;
  129. ev.data.fd = descriptor;
  130. int result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, descriptor, &ev);
  131. if (result != 0)
  132. return errno;
  133. return 0;
  134. }
  135. // Start a new read operation. The handler object will be invoked when the
  136. // given descriptor is ready to be read, or an error has occurred.
  137. template <typename Handler>
  138. void start_read_op(socket_type descriptor,
  139. per_descriptor_data& descriptor_data,
  140. Handler handler, bool allow_speculative_read = true)
  141. {
  142. if (allow_speculative_read && descriptor_data.allow_speculative_read)
  143. {
  144. boost::system::error_code ec;
  145. std::size_t bytes_transferred = 0;
  146. if (handler.perform(ec, bytes_transferred))
  147. {
  148. handler.complete(ec, bytes_transferred);
  149. return;
  150. }
  151. // We only get one shot at a speculative read in this function.
  152. allow_speculative_read = false;
  153. }
  154. boost::asio::detail::mutex::scoped_lock lock(mutex_);
  155. if (shutdown_)
  156. return;
  157. if (!allow_speculative_read)
  158. need_epoll_wait_ = true;
  159. else if (!read_op_queue_.has_operation(descriptor))
  160. {
  161. // Speculative reads are ok as there are no queued read operations.
  162. descriptor_data.allow_speculative_read = true;
  163. boost::system::error_code ec;
  164. std::size_t bytes_transferred = 0;
  165. if (handler.perform(ec, bytes_transferred))
  166. {
  167. handler.complete(ec, bytes_transferred);
  168. return;
  169. }
  170. }
  171. // Speculative reads are not ok as there will be queued read operations.
  172. descriptor_data.allow_speculative_read = false;
  173. if (read_op_queue_.enqueue_operation(descriptor, handler))
  174. {
  175. epoll_event ev = { 0, { 0 } };
  176. ev.events = EPOLLIN | EPOLLERR | EPOLLHUP;
  177. if (write_op_queue_.has_operation(descriptor))
  178. ev.events |= EPOLLOUT;
  179. if (except_op_queue_.has_operation(descriptor))
  180. ev.events |= EPOLLPRI;
  181. ev.data.fd = descriptor;
  182. int result = epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, descriptor, &ev);
  183. if (result != 0 && errno == ENOENT)
  184. result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, descriptor, &ev);
  185. if (result != 0)
  186. {
  187. boost::system::error_code ec(errno,
  188. boost::asio::error::get_system_category());
  189. read_op_queue_.perform_all_operations(descriptor, ec);
  190. }
  191. }
  192. }
  193. // Start a new write operation. The handler object will be invoked when the
  194. // given descriptor is ready to be written, or an error has occurred.
  195. template <typename Handler>
  196. void start_write_op(socket_type descriptor,
  197. per_descriptor_data& descriptor_data,
  198. Handler handler, bool allow_speculative_write = true)
  199. {
  200. if (allow_speculative_write && descriptor_data.allow_speculative_write)
  201. {
  202. boost::system::error_code ec;
  203. std::size_t bytes_transferred = 0;
  204. if (handler.perform(ec, bytes_transferred))
  205. {
  206. handler.complete(ec, bytes_transferred);
  207. return;
  208. }
  209. // We only get one shot at a speculative write in this function.
  210. allow_speculative_write = false;
  211. }
  212. boost::asio::detail::mutex::scoped_lock lock(mutex_);
  213. if (shutdown_)
  214. return;
  215. if (!allow_speculative_write)
  216. need_epoll_wait_ = true;
  217. else if (!write_op_queue_.has_operation(descriptor))
  218. {
  219. // Speculative writes are ok as there are no queued write operations.
  220. descriptor_data.allow_speculative_write = true;
  221. boost::system::error_code ec;
  222. std::size_t bytes_transferred = 0;
  223. if (handler.perform(ec, bytes_transferred))
  224. {
  225. handler.complete(ec, bytes_transferred);
  226. return;
  227. }
  228. }
  229. // Speculative writes are not ok as there will be queued write operations.
  230. descriptor_data.allow_speculative_write = false;
  231. if (write_op_queue_.enqueue_operation(descriptor, handler))
  232. {
  233. epoll_event ev = { 0, { 0 } };
  234. ev.events = EPOLLOUT | EPOLLERR | EPOLLHUP;
  235. if (read_op_queue_.has_operation(descriptor))
  236. ev.events |= EPOLLIN;
  237. if (except_op_queue_.has_operation(descriptor))
  238. ev.events |= EPOLLPRI;
  239. ev.data.fd = descriptor;
  240. int result = epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, descriptor, &ev);
  241. if (result != 0 && errno == ENOENT)
  242. result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, descriptor, &ev);
  243. if (result != 0)
  244. {
  245. boost::system::error_code ec(errno,
  246. boost::asio::error::get_system_category());
  247. write_op_queue_.perform_all_operations(descriptor, ec);
  248. }
  249. }
  250. }
  251. // Start a new exception operation. The handler object will be invoked when
  252. // the given descriptor has exception information, or an error has occurred.
  253. template <typename Handler>
  254. void start_except_op(socket_type descriptor,
  255. per_descriptor_data&, Handler handler)
  256. {
  257. boost::asio::detail::mutex::scoped_lock lock(mutex_);
  258. if (shutdown_)
  259. return;
  260. if (except_op_queue_.enqueue_operation(descriptor, handler))
  261. {
  262. epoll_event ev = { 0, { 0 } };
  263. ev.events = EPOLLPRI | EPOLLERR | EPOLLHUP;
  264. if (read_op_queue_.has_operation(descriptor))
  265. ev.events |= EPOLLIN;
  266. if (write_op_queue_.has_operation(descriptor))
  267. ev.events |= EPOLLOUT;
  268. ev.data.fd = descriptor;
  269. int result = epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, descriptor, &ev);
  270. if (result != 0 && errno == ENOENT)
  271. result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, descriptor, &ev);
  272. if (result != 0)
  273. {
  274. boost::system::error_code ec(errno,
  275. boost::asio::error::get_system_category());
  276. except_op_queue_.perform_all_operations(descriptor, ec);
  277. }
  278. }
  279. }
  280. // Start a new write operation. The handler object will be invoked when the
  281. // given descriptor is ready for writing or an error has occurred. Speculative
  282. // writes are not allowed.
  283. template <typename Handler>
  284. void start_connect_op(socket_type descriptor,
  285. per_descriptor_data& descriptor_data, Handler handler)
  286. {
  287. boost::asio::detail::mutex::scoped_lock lock(mutex_);
  288. if (shutdown_)
  289. return;
  290. // Speculative writes are not ok as there will be queued write operations.
  291. descriptor_data.allow_speculative_write = false;
  292. if (write_op_queue_.enqueue_operation(descriptor, handler))
  293. {
  294. epoll_event ev = { 0, { 0 } };
  295. ev.events = EPOLLOUT | EPOLLERR | EPOLLHUP;
  296. if (read_op_queue_.has_operation(descriptor))
  297. ev.events |= EPOLLIN;
  298. if (except_op_queue_.has_operation(descriptor))
  299. ev.events |= EPOLLPRI;
  300. ev.data.fd = descriptor;
  301. int result = epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, descriptor, &ev);
  302. if (result != 0 && errno == ENOENT)
  303. result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, descriptor, &ev);
  304. if (result != 0)
  305. {
  306. boost::system::error_code ec(errno,
  307. boost::asio::error::get_system_category());
  308. write_op_queue_.perform_all_operations(descriptor, ec);
  309. }
  310. }
  311. }
  312. // Cancel all operations associated with the given descriptor. The
  313. // handlers associated with the descriptor will be invoked with the
  314. // operation_aborted error.
  315. void cancel_ops(socket_type descriptor, per_descriptor_data&)
  316. {
  317. boost::asio::detail::mutex::scoped_lock lock(mutex_);
  318. cancel_ops_unlocked(descriptor);
  319. }
  320. // Cancel any operations that are running against the descriptor and remove
  321. // its registration from the reactor.
  322. void close_descriptor(socket_type descriptor, per_descriptor_data&)
  323. {
  324. boost::asio::detail::mutex::scoped_lock lock(mutex_);
  325. // Remove the descriptor from epoll.
  326. epoll_event ev = { 0, { 0 } };
  327. epoll_ctl(epoll_fd_, EPOLL_CTL_DEL, descriptor, &ev);
  328. // Cancel any outstanding operations associated with the descriptor.
  329. cancel_ops_unlocked(descriptor);
  330. }
  331. // Add a new timer queue to the reactor.
  332. template <typename Time_Traits>
  333. void add_timer_queue(timer_queue<Time_Traits>& timer_queue)
  334. {
  335. boost::asio::detail::mutex::scoped_lock lock(mutex_);
  336. timer_queues_.push_back(&timer_queue);
  337. }
  338. // Remove a timer queue from the reactor.
  339. template <typename Time_Traits>
  340. void remove_timer_queue(timer_queue<Time_Traits>& timer_queue)
  341. {
  342. boost::asio::detail::mutex::scoped_lock lock(mutex_);
  343. for (std::size_t i = 0; i < timer_queues_.size(); ++i)
  344. {
  345. if (timer_queues_[i] == &timer_queue)
  346. {
  347. timer_queues_.erase(timer_queues_.begin() + i);
  348. return;
  349. }
  350. }
  351. }
  352. // Schedule a timer in the given timer queue to expire at the specified
  353. // absolute time. The handler object will be invoked when the timer expires.
  354. template <typename Time_Traits, typename Handler>
  355. void schedule_timer(timer_queue<Time_Traits>& timer_queue,
  356. const typename Time_Traits::time_type& time, Handler handler, void* token)
  357. {
  358. boost::asio::detail::mutex::scoped_lock lock(mutex_);
  359. if (!shutdown_)
  360. if (timer_queue.enqueue_timer(time, handler, token))
  361. interrupter_.interrupt();
  362. }
  363. // Cancel the timer associated with the given token. Returns the number of
  364. // handlers that have been posted or dispatched.
  365. template <typename Time_Traits>
  366. std::size_t cancel_timer(timer_queue<Time_Traits>& timer_queue, void* token)
  367. {
  368. boost::asio::detail::mutex::scoped_lock lock(mutex_);
  369. std::size_t n = timer_queue.cancel_timer(token);
  370. if (n > 0)
  371. interrupter_.interrupt();
  372. return n;
  373. }
  374. private:
  375. friend class task_io_service<epoll_reactor<Own_Thread> >;
  376. // Run epoll once until interrupted or events are ready to be dispatched.
  377. void run(bool block)
  378. {
  379. boost::asio::detail::mutex::scoped_lock lock(mutex_);
  380. // Dispatch any operation cancellations that were made while the select
  381. // loop was not running.
  382. read_op_queue_.perform_cancellations();
  383. write_op_queue_.perform_cancellations();
  384. except_op_queue_.perform_cancellations();
  385. for (std::size_t i = 0; i < timer_queues_.size(); ++i)
  386. timer_queues_[i]->dispatch_cancellations();
  387. // Check if the thread is supposed to stop.
  388. if (stop_thread_)
  389. {
  390. complete_operations_and_timers(lock);
  391. return;
  392. }
  393. // We can return immediately if there's no work to do and the reactor is
  394. // not supposed to block.
  395. if (!block && read_op_queue_.empty() && write_op_queue_.empty()
  396. && except_op_queue_.empty() && all_timer_queues_are_empty())
  397. {
  398. complete_operations_and_timers(lock);
  399. return;
  400. }
  401. int timeout = block ? get_timeout() : 0;
  402. wait_in_progress_ = true;
  403. lock.unlock();
  404. // Block on the epoll descriptor.
  405. epoll_event events[128];
  406. int num_events = (block || need_epoll_wait_)
  407. ? epoll_wait(epoll_fd_, events, 128, timeout)
  408. : 0;
  409. lock.lock();
  410. wait_in_progress_ = false;
  411. // Block signals while performing operations.
  412. boost::asio::detail::signal_blocker sb;
  413. // Dispatch the waiting events.
  414. for (int i = 0; i < num_events; ++i)
  415. {
  416. int descriptor = events[i].data.fd;
  417. if (descriptor == interrupter_.read_descriptor())
  418. {
  419. interrupter_.reset();
  420. }
  421. else
  422. {
  423. bool more_reads = false;
  424. bool more_writes = false;
  425. bool more_except = false;
  426. boost::system::error_code ec;
  427. // Exception operations must be processed first to ensure that any
  428. // out-of-band data is read before normal data.
  429. if (events[i].events & (EPOLLPRI | EPOLLERR | EPOLLHUP))
  430. more_except = except_op_queue_.perform_operation(descriptor, ec);
  431. else
  432. more_except = except_op_queue_.has_operation(descriptor);
  433. if (events[i].events & (EPOLLIN | EPOLLERR | EPOLLHUP))
  434. more_reads = read_op_queue_.perform_operation(descriptor, ec);
  435. else
  436. more_reads = read_op_queue_.has_operation(descriptor);
  437. if (events[i].events & (EPOLLOUT | EPOLLERR | EPOLLHUP))
  438. more_writes = write_op_queue_.perform_operation(descriptor, ec);
  439. else
  440. more_writes = write_op_queue_.has_operation(descriptor);
  441. if ((events[i].events & (EPOLLERR | EPOLLHUP)) != 0
  442. && (events[i].events & ~(EPOLLERR | EPOLLHUP)) == 0
  443. && !more_except && !more_reads && !more_writes)
  444. {
  445. // If we have an event and no operations associated with the
  446. // descriptor then we need to delete the descriptor from epoll. The
  447. // epoll_wait system call can produce EPOLLHUP or EPOLLERR events
  448. // when there is no operation pending, so if we do not remove the
  449. // descriptor we can end up in a tight loop of repeated
  450. // calls to epoll_wait.
  451. epoll_event ev = { 0, { 0 } };
  452. epoll_ctl(epoll_fd_, EPOLL_CTL_DEL, descriptor, &ev);
  453. }
  454. else
  455. {
  456. epoll_event ev = { 0, { 0 } };
  457. ev.events = EPOLLERR | EPOLLHUP;
  458. if (more_reads)
  459. ev.events |= EPOLLIN;
  460. if (more_writes)
  461. ev.events |= EPOLLOUT;
  462. if (more_except)
  463. ev.events |= EPOLLPRI;
  464. ev.data.fd = descriptor;
  465. int result = epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, descriptor, &ev);
  466. if (result != 0 && errno == ENOENT)
  467. result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, descriptor, &ev);
  468. if (result != 0)
  469. {
  470. ec = boost::system::error_code(errno,
  471. boost::asio::error::get_system_category());
  472. read_op_queue_.perform_all_operations(descriptor, ec);
  473. write_op_queue_.perform_all_operations(descriptor, ec);
  474. except_op_queue_.perform_all_operations(descriptor, ec);
  475. }
  476. }
  477. }
  478. }
  479. read_op_queue_.perform_cancellations();
  480. write_op_queue_.perform_cancellations();
  481. except_op_queue_.perform_cancellations();
  482. for (std::size_t i = 0; i < timer_queues_.size(); ++i)
  483. {
  484. timer_queues_[i]->dispatch_timers();
  485. timer_queues_[i]->dispatch_cancellations();
  486. }
  487. // Issue any pending cancellations.
  488. for (size_t i = 0; i < pending_cancellations_.size(); ++i)
  489. cancel_ops_unlocked(pending_cancellations_[i]);
  490. pending_cancellations_.clear();
  491. // Determine whether epoll_wait should be called when the reactor next runs.
  492. need_epoll_wait_ = !read_op_queue_.empty()
  493. || !write_op_queue_.empty() || !except_op_queue_.empty();
  494. complete_operations_and_timers(lock);
  495. }
  496. // Run the select loop in the thread.
  497. void run_thread()
  498. {
  499. boost::asio::detail::mutex::scoped_lock lock(mutex_);
  500. while (!stop_thread_)
  501. {
  502. lock.unlock();
  503. run(true);
  504. lock.lock();
  505. }
  506. }
  507. // Entry point for the select loop thread.
  508. static void call_run_thread(epoll_reactor* reactor)
  509. {
  510. reactor->run_thread();
  511. }
  512. // Interrupt the select loop.
  513. void interrupt()
  514. {
  515. interrupter_.interrupt();
  516. }
  517. // The hint to pass to epoll_create to size its data structures.
  518. enum { epoll_size = 20000 };
  519. // Create the epoll file descriptor. Throws an exception if the descriptor
  520. // cannot be created.
  521. static int do_epoll_create()
  522. {
  523. int fd = epoll_create(epoll_size);
  524. if (fd == -1)
  525. {
  526. boost::throw_exception(
  527. boost::system::system_error(
  528. boost::system::error_code(errno,
  529. boost::asio::error::get_system_category()),
  530. "epoll"));
  531. }
  532. return fd;
  533. }
  534. // Check if all timer queues are empty.
  535. bool all_timer_queues_are_empty() const
  536. {
  537. for (std::size_t i = 0; i < timer_queues_.size(); ++i)
  538. if (!timer_queues_[i]->empty())
  539. return false;
  540. return true;
  541. }
  542. // Get the timeout value for the epoll_wait call. The timeout value is
  543. // returned as a number of milliseconds. A return value of -1 indicates
  544. // that epoll_wait should block indefinitely.
  545. int get_timeout()
  546. {
  547. if (all_timer_queues_are_empty())
  548. return -1;
  549. // By default we will wait no longer than 5 minutes. This will ensure that
  550. // any changes to the system clock are detected after no longer than this.
  551. boost::posix_time::time_duration minimum_wait_duration
  552. = boost::posix_time::minutes(5);
  553. for (std::size_t i = 0; i < timer_queues_.size(); ++i)
  554. {
  555. boost::posix_time::time_duration wait_duration
  556. = timer_queues_[i]->wait_duration();
  557. if (wait_duration < minimum_wait_duration)
  558. minimum_wait_duration = wait_duration;
  559. }
  560. if (minimum_wait_duration > boost::posix_time::time_duration())
  561. {
  562. int milliseconds = minimum_wait_duration.total_milliseconds();
  563. return milliseconds > 0 ? milliseconds : 1;
  564. }
  565. else
  566. {
  567. return 0;
  568. }
  569. }
  570. // Cancel all operations associated with the given descriptor. The do_cancel
  571. // function of the handler objects will be invoked. This function does not
  572. // acquire the epoll_reactor's mutex.
  573. void cancel_ops_unlocked(socket_type descriptor)
  574. {
  575. bool interrupt = read_op_queue_.cancel_operations(descriptor);
  576. interrupt = write_op_queue_.cancel_operations(descriptor) || interrupt;
  577. interrupt = except_op_queue_.cancel_operations(descriptor) || interrupt;
  578. if (interrupt)
  579. interrupter_.interrupt();
  580. }
  581. // Clean up operations and timers. We must not hold the lock since the
  582. // destructors may make calls back into this reactor. We make a copy of the
  583. // vector of timer queues since the original may be modified while the lock
  584. // is not held.
  585. void complete_operations_and_timers(
  586. boost::asio::detail::mutex::scoped_lock& lock)
  587. {
  588. timer_queues_for_cleanup_ = timer_queues_;
  589. lock.unlock();
  590. read_op_queue_.complete_operations();
  591. write_op_queue_.complete_operations();
  592. except_op_queue_.complete_operations();
  593. for (std::size_t i = 0; i < timer_queues_for_cleanup_.size(); ++i)
  594. timer_queues_for_cleanup_[i]->complete_timers();
  595. }
  596. // Mutex to protect access to internal data.
  597. boost::asio::detail::mutex mutex_;
  598. // The epoll file descriptor.
  599. int epoll_fd_;
  600. // Whether the epoll_wait call is currently in progress
  601. bool wait_in_progress_;
  602. // The interrupter is used to break a blocking epoll_wait call.
  603. select_interrupter interrupter_;
  604. // The queue of read operations.
  605. reactor_op_queue<socket_type> read_op_queue_;
  606. // The queue of write operations.
  607. reactor_op_queue<socket_type> write_op_queue_;
  608. // The queue of except operations.
  609. reactor_op_queue<socket_type> except_op_queue_;
  610. // The timer queues.
  611. std::vector<timer_queue_base*> timer_queues_;
  612. // A copy of the timer queues, used when cleaning up timers. The copy is
  613. // stored as a class data member to avoid unnecessary memory allocation.
  614. std::vector<timer_queue_base*> timer_queues_for_cleanup_;
  615. // The descriptors that are pending cancellation.
  616. std::vector<socket_type> pending_cancellations_;
  617. // Does the reactor loop thread need to stop.
  618. bool stop_thread_;
  619. // The thread that is running the reactor loop.
  620. boost::asio::detail::thread* thread_;
  621. // Whether the service has been shut down.
  622. bool shutdown_;
  623. // Whether we need to call epoll_wait the next time the reactor is run.
  624. bool need_epoll_wait_;
  625. };
  626. } // namespace detail
  627. } // namespace asio
  628. } // namespace boost
  629. #endif // defined(BOOST_ASIO_HAS_EPOLL)
  630. #include <boost/asio/detail/pop_options.hpp>
  631. #endif // BOOST_ASIO_DETAIL_EPOLL_REACTOR_HPP