memory_segment_mapped_unittest.cc 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613
  1. // Copyright (C) 2013 Internet Systems Consortium, Inc. ("ISC")
  2. //
  3. // Permission to use, copy, modify, and/or distribute this software for any
  4. // purpose with or without fee is hereby granted, provided that the above
  5. // copyright notice and this permission notice appear in all copies.
  6. //
  7. // THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
  8. // REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
  9. // AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
  10. // INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
  11. // LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
  12. // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  13. // PERFORMANCE OF THIS SOFTWARE.
  14. #include <util/tests/memory_segment_common_unittest.h>
  15. #include <util/unittests/check_valgrind.h>
  16. #include <util/tests/interprocess_util.h>
  17. #include <util/memory_segment_mapped.h>
  18. #include <exceptions/exceptions.h>
  19. #include <gtest/gtest.h>
  20. #include <boost/interprocess/file_mapping.hpp>
  21. #include <boost/interprocess/mapped_region.hpp>
  22. #include <boost/scoped_ptr.hpp>
  23. #include <boost/foreach.hpp>
  24. #include <stdint.h>
  25. #include <cstdlib>
  26. #include <cstring>
  27. #include <limits>
  28. #include <stdexcept>
  29. #include <fstream>
  30. #include <string>
  31. #include <vector>
  32. #include <map>
  33. #include <sys/stat.h>
  34. #include <unistd.h>
  35. #include <stdlib.h>
  36. using namespace isc::util;
  37. using boost::scoped_ptr;
  38. using isc::util::test::parentReadState;
  39. namespace {
  40. // Shortcut to keep code shorter
  41. const MemorySegmentMapped::OpenMode OPEN_FOR_WRITE =
  42. MemorySegmentMapped::OPEN_FOR_WRITE;
  43. const MemorySegmentMapped::OpenMode OPEN_OR_CREATE =
  44. MemorySegmentMapped::OPEN_OR_CREATE;
  45. const MemorySegmentMapped::OpenMode CREATE_ONLY =
  46. MemorySegmentMapped::CREATE_ONLY;
  47. const char* const mapped_file = TEST_DATA_BUILDDIR "/test.mapped";
  48. const size_t DEFAULT_INITIAL_SIZE = 32 * 1024; // intentionally hardcoded
  49. // A simple RAII-style wrapper for a pipe. Several tests in this file use
  50. // pipes, so this helper will be useful.
  51. class PipeHolder {
  52. public:
  53. PipeHolder() {
  54. if (pipe(fds_) == -1) {
  55. isc_throw(isc::Unexpected, "pipe failed");
  56. }
  57. }
  58. ~PipeHolder() {
  59. close(fds_[0]);
  60. close(fds_[1]);
  61. }
  62. int getReadFD() const { return (fds_[0]); }
  63. int getWriteFD() const { return (fds_[1]); }
  64. private:
  65. int fds_[2];
  66. };
  67. class MemorySegmentMappedTest : public ::testing::Test {
  68. protected:
  69. MemorySegmentMappedTest() {
  70. resetSegment();
  71. }
  72. ~MemorySegmentMappedTest() {
  73. segment_.reset();
  74. boost::interprocess::file_mapping::remove(mapped_file);
  75. }
  76. // For initialization and for tests after the segment possibly becomes
  77. // broken.
  78. void resetSegment() {
  79. segment_.reset();
  80. boost::interprocess::file_mapping::remove(mapped_file);
  81. segment_.reset(new MemorySegmentMapped(mapped_file, OPEN_OR_CREATE));
  82. }
  83. scoped_ptr<MemorySegmentMapped> segment_;
  84. };
  85. TEST_F(MemorySegmentMappedTest, createAndModify) {
  86. // We are going to do the same set of basic tests twice; one after creating
  87. // the mapped file, the other by re-opening the existing file in the
  88. // read-write mode.
  89. for (int i = 0; i < 2; ++i) {
  90. // It should have the default size (intentionally hardcoded)
  91. EXPECT_EQ(DEFAULT_INITIAL_SIZE, segment_->getSize());
  92. // By default, nothing is allocated.
  93. EXPECT_TRUE(segment_->allMemoryDeallocated());
  94. void* ptr = segment_->allocate(1024);
  95. EXPECT_NE(static_cast<void*>(NULL), ptr);
  96. // Now, we have an allocation:
  97. EXPECT_FALSE(segment_->allMemoryDeallocated());
  98. // deallocate it; it shouldn't cause disruption.
  99. segment_->deallocate(ptr, 1024);
  100. EXPECT_TRUE(segment_->allMemoryDeallocated());
  101. // re-open it in read-write mode, but don't try to create it
  102. // this time.
  103. segment_.reset(); // make sure close is first.
  104. segment_.reset(new MemorySegmentMapped(mapped_file, OPEN_FOR_WRITE));
  105. }
  106. }
  107. TEST_F(MemorySegmentMappedTest, createWithSize) {
  108. boost::interprocess::file_mapping::remove(mapped_file);
  109. // Re-create the mapped file with a non-default initial size, and confirm
  110. // the size is actually the specified one.
  111. const size_t new_size = 64 * 1024;
  112. EXPECT_NE(new_size, segment_->getSize());
  113. segment_.reset();
  114. segment_.reset(new MemorySegmentMapped(mapped_file, OPEN_OR_CREATE,
  115. new_size));
  116. EXPECT_EQ(new_size, segment_->getSize());
  117. }
  118. TEST_F(MemorySegmentMappedTest, createOnly) {
  119. // First, allocate some data in the existing segment
  120. EXPECT_TRUE(segment_->allocate(16));
  121. // Close it, and then open it again in the create-only mode. the existing
  122. // file should be internally removed, and so the resulting segment
  123. // should be "empty" (all deallocated).
  124. segment_.reset();
  125. segment_.reset(new MemorySegmentMapped(mapped_file, CREATE_ONLY));
  126. EXPECT_TRUE(segment_->allMemoryDeallocated());
  127. }
  128. TEST_F(MemorySegmentMappedTest, openFail) {
  129. // The given file is directory
  130. EXPECT_THROW(MemorySegmentMapped("/", OPEN_OR_CREATE),
  131. MemorySegmentOpenError);
  132. // file doesn't exist and directory isn't writable (we assume the
  133. // following path is not writable for the user running the test).
  134. EXPECT_THROW(MemorySegmentMapped("/random-glkwjer098/test.mapped",
  135. OPEN_OR_CREATE), MemorySegmentOpenError);
  136. // It should fail when file doesn't exist and it's read-only (so
  137. // open-only).
  138. EXPECT_THROW(MemorySegmentMapped(TEST_DATA_BUILDDIR "/nosuchfile.mapped"),
  139. MemorySegmentOpenError);
  140. // Likewise, it should fail in read-write mode when creation is
  141. // suppressed.
  142. EXPECT_THROW(MemorySegmentMapped(TEST_DATA_BUILDDIR "/nosuchfile.mapped",
  143. OPEN_FOR_WRITE), MemorySegmentOpenError);
  144. // creating with a very small size fails (for sure about 0, and other
  145. // small values should also make it fail, but it's internal restriction
  146. // of Boost and cannot be predictable).
  147. EXPECT_THROW(MemorySegmentMapped(mapped_file, OPEN_OR_CREATE, 0),
  148. MemorySegmentOpenError);
  149. // invalid read-write mode
  150. EXPECT_THROW(MemorySegmentMapped(
  151. mapped_file,
  152. static_cast<MemorySegmentMapped::OpenMode>(
  153. static_cast<int>(CREATE_ONLY) + 1)),
  154. isc::InvalidParameter);
  155. // Close the existing segment, break its file with bogus data, and
  156. // try to reopen. It should fail with exception whether in the
  157. // read-only or read-write, or "create if not exist" mode.
  158. segment_.reset();
  159. std::ofstream ofs(mapped_file, std::ios::trunc);
  160. ofs << std::string(1024, 'x');
  161. ofs.close();
  162. EXPECT_THROW(MemorySegmentMapped sgmt(mapped_file), MemorySegmentOpenError);
  163. EXPECT_THROW(MemorySegmentMapped sgmt(mapped_file, OPEN_FOR_WRITE),
  164. MemorySegmentOpenError);
  165. EXPECT_THROW(MemorySegmentMapped sgmt(mapped_file, OPEN_OR_CREATE),
  166. MemorySegmentOpenError);
  167. }
  168. TEST_F(MemorySegmentMappedTest, allocate) {
  169. // Various case of allocation. The simplest cases are covered above.
  170. // Initially, nothing is allocated.
  171. EXPECT_TRUE(segment_->allMemoryDeallocated());
  172. // (Clearly) exceeding the available size, which should cause growing
  173. // the segment
  174. const size_t prev_size = segment_->getSize();
  175. EXPECT_THROW(segment_->allocate(prev_size + 1), MemorySegmentGrown);
  176. // The size should have been doubled.
  177. EXPECT_EQ(prev_size * 2, segment_->getSize());
  178. // But nothing should have been allocated.
  179. EXPECT_TRUE(segment_->allMemoryDeallocated());
  180. // Now, the allocation should now succeed.
  181. void* ptr = segment_->allocate(prev_size + 1);
  182. EXPECT_NE(static_cast<void*>(NULL), ptr);
  183. EXPECT_FALSE(segment_->allMemoryDeallocated());
  184. // Same set of checks, but for a larger size.
  185. EXPECT_THROW(segment_->allocate(prev_size * 10), MemorySegmentGrown);
  186. // the segment should have grown to the minimum power-of-2 size that
  187. // could allocate the given size of memory.
  188. EXPECT_EQ(prev_size * 16, segment_->getSize());
  189. // And allocate() should now succeed.
  190. ptr = segment_->allocate(prev_size * 10);
  191. EXPECT_NE(static_cast<void*>(NULL), ptr);
  192. // (we'll left the regions created in the file there; the entire file
  193. // will be removed at the end of the test)
  194. }
  195. TEST_F(MemorySegmentMappedTest, badAllocate) {
  196. // Make the mapped file non-writable; managed_mapped_file::grow() will
  197. // fail, resulting in std::bad_alloc
  198. const int ret = chmod(mapped_file, 0444);
  199. ASSERT_EQ(0, ret);
  200. EXPECT_THROW(segment_->allocate(DEFAULT_INITIAL_SIZE * 2), std::bad_alloc);
  201. }
  202. // XXX: this test can cause too strong side effect (creating a very large
  203. // file), so we disable it by default
  204. TEST_F(MemorySegmentMappedTest, DISABLED_allocateHuge) {
  205. EXPECT_THROW(segment_->allocate(std::numeric_limits<size_t>::max()),
  206. std::bad_alloc);
  207. }
  208. TEST_F(MemorySegmentMappedTest, badDeallocate) {
  209. void* ptr = segment_->allocate(4);
  210. EXPECT_NE(static_cast<void*>(NULL), ptr);
  211. segment_->deallocate(ptr, 4); // this is okay
  212. // This is duplicate dealloc; should trigger assertion failure.
  213. if (!isc::util::unittests::runningOnValgrind()) {
  214. EXPECT_DEATH_IF_SUPPORTED({segment_->deallocate(ptr, 4);}, "");
  215. resetSegment(); // the segment is possibly broken; reset it.
  216. }
  217. // Deallocating at an invalid address; this would result in crash (the
  218. // behavior may not be portable enough; if so we should disable it by
  219. // default).
  220. if (!isc::util::unittests::runningOnValgrind()) {
  221. ptr = segment_->allocate(4);
  222. EXPECT_NE(static_cast<void*>(NULL), ptr);
  223. EXPECT_DEATH_IF_SUPPORTED({
  224. segment_->deallocate(static_cast<char*>(ptr) + 1, 3);
  225. }, "");
  226. resetSegment();
  227. }
  228. // Invalid size; this implementation doesn't detect such errors.
  229. ptr = segment_->allocate(4);
  230. EXPECT_NE(static_cast<void*>(NULL), ptr);
  231. segment_->deallocate(ptr, 8);
  232. EXPECT_TRUE(segment_->allMemoryDeallocated());
  233. }
  234. // A helper of namedAddress.
  235. void
  236. checkNamedData(const std::string& name, const std::vector<uint8_t>& data,
  237. MemorySegment& sgmt, bool delete_after_check = false)
  238. {
  239. void* dp = sgmt.getNamedAddress(name.c_str());
  240. ASSERT_TRUE(dp);
  241. EXPECT_EQ(0, std::memcmp(dp, &data[0], data.size()));
  242. if (delete_after_check) {
  243. sgmt.deallocate(dp, data.size());
  244. sgmt.clearNamedAddress(name.c_str());
  245. }
  246. }
  247. TEST_F(MemorySegmentMappedTest, namedAddress) {
  248. // common test cases
  249. isc::util::test::checkSegmentNamedAddress(*segment_, false);
  250. // Set it again and read it in the read-only mode.
  251. void* ptr16 = segment_->allocate(sizeof(uint16_t));
  252. const uint16_t test_val16 = 42000;
  253. *static_cast<uint16_t*>(ptr16) = test_val16;
  254. EXPECT_FALSE(segment_->setNamedAddress("test address", ptr16));
  255. segment_.reset(); // close it before opening another one
  256. segment_.reset(new MemorySegmentMapped(mapped_file));
  257. EXPECT_NE(static_cast<void*>(NULL),
  258. segment_->getNamedAddress("test address"));
  259. EXPECT_EQ(test_val16, *static_cast<const uint16_t*>(
  260. segment_->getNamedAddress("test address")));
  261. // try to set an unusually long name. We re-create the file so
  262. // creating the name would cause allocation failure and trigger internal
  263. // segment extension.
  264. segment_.reset();
  265. boost::interprocess::file_mapping::remove(mapped_file);
  266. segment_.reset(new MemorySegmentMapped(mapped_file, OPEN_OR_CREATE, 1024));
  267. const std::string long_name(1025, 'x'); // definitely larger than segment
  268. // setNamedAddress should return true, indicating segment has grown.
  269. EXPECT_TRUE(segment_->setNamedAddress(long_name.c_str(), NULL));
  270. EXPECT_EQ(static_cast<void*>(NULL),
  271. segment_->getNamedAddress(long_name.c_str()));
  272. // Check contents pointed by named addresses survive growing and
  273. // shrinking segment.
  274. segment_.reset();
  275. boost::interprocess::file_mapping::remove(mapped_file);
  276. segment_.reset(new MemorySegmentMapped(mapped_file, OPEN_OR_CREATE));
  277. typedef std::map<std::string, std::vector<uint8_t> > TestData;
  278. TestData data_list;
  279. data_list["data1"] =
  280. std::vector<uint8_t>(80); // arbitrarily chosen small data
  281. data_list["data2"] =
  282. std::vector<uint8_t>(5000); // larger than usual segment size
  283. data_list["data3"] =
  284. std::vector<uint8_t>(65535); // bigger than most usual data
  285. bool grown = false;
  286. // Allocate memory and store data
  287. for (TestData::iterator it = data_list.begin(); it != data_list.end();
  288. ++it)
  289. {
  290. std::vector<uint8_t>& data = it->second;
  291. for (int i = 0; i < data.size(); ++i) {
  292. data[i] = i;
  293. }
  294. void *dp = NULL;
  295. while (!dp) {
  296. try {
  297. dp = segment_->allocate(data.size());
  298. std::memcpy(dp, &data[0], data.size());
  299. segment_->setNamedAddress(it->first.c_str(), dp);
  300. } catch (const MemorySegmentGrown&) {
  301. grown = true;
  302. }
  303. }
  304. }
  305. // Confirm there's at least one segment extension
  306. EXPECT_TRUE(grown);
  307. // Check named data are still valid
  308. for (TestData::iterator it = data_list.begin(); it != data_list.end();
  309. ++it)
  310. {
  311. checkNamedData(it->first, it->second, *segment_);
  312. }
  313. // Confirm they are still valid, while we shrink the segment. We'll
  314. // intentionally delete bigger data first so it'll be more likely that
  315. // shrink has some real effect.
  316. const char* const names[] = { "data3", "data2", "data1", NULL };
  317. for (int i = 0; names[i]; ++i) {
  318. checkNamedData(names[i], data_list[names[i]], *segment_, true);
  319. segment_->shrinkToFit();
  320. }
  321. }
  322. TEST_F(MemorySegmentMappedTest, multiProcess) {
  323. // Test using fork() doesn't work well on valgrind
  324. if (isc::util::unittests::runningOnValgrind()) {
  325. return;
  326. }
  327. // allocate some data and name its address
  328. void* ptr = segment_->allocate(sizeof(uint32_t));
  329. *static_cast<uint32_t*>(ptr) = 424242;
  330. segment_->setNamedAddress("test address", ptr);
  331. // close the read-write segment at this point. our intended use case is
  332. // to have one or more reader process or at most one exclusive writer
  333. // process. so we don't mix reader and writer.
  334. segment_.reset();
  335. // Spawn another process and have it open and read the same data.
  336. PipeHolder pipe_to_child;
  337. PipeHolder pipe_to_parent;
  338. const pid_t child_pid = fork();
  339. ASSERT_NE(-1, child_pid);
  340. if (child_pid == 0) {
  341. // child: wait until the parent has opened the read-only segment.
  342. char from_parent;
  343. EXPECT_EQ(1, read(pipe_to_child.getReadFD(), &from_parent,
  344. sizeof(from_parent)));
  345. EXPECT_EQ(0, from_parent);
  346. MemorySegmentMapped sgmt(mapped_file);
  347. void* ptr_child = sgmt.getNamedAddress("test address");
  348. EXPECT_TRUE(ptr_child);
  349. if (ptr_child) {
  350. const uint32_t val = *static_cast<const uint32_t*>(ptr_child);
  351. EXPECT_EQ(424242, val);
  352. // tell the parent whether it succeeded. 0 means it did,
  353. // 0xff means it failed.
  354. const char ok = (val == 424242) ? 0 : 0xff;
  355. EXPECT_EQ(1, write(pipe_to_parent.getWriteFD(), &ok, sizeof(ok)));
  356. }
  357. exit(0);
  358. }
  359. // parent: open another read-only segment, then tell the child to open
  360. // its own segment.
  361. segment_.reset(new MemorySegmentMapped(mapped_file));
  362. ptr = segment_->getNamedAddress("test address");
  363. ASSERT_TRUE(ptr);
  364. EXPECT_EQ(424242, *static_cast<const uint32_t*>(ptr));
  365. const char some_data = 0;
  366. EXPECT_EQ(1, write(pipe_to_child.getWriteFD(), &some_data,
  367. sizeof(some_data)));
  368. // wait for the completion of the child and checks the result.
  369. EXPECT_EQ(0, parentReadState(pipe_to_parent.getReadFD()));
  370. }
  371. TEST_F(MemorySegmentMappedTest, nullDeallocate) {
  372. // NULL deallocation is a no-op.
  373. EXPECT_NO_THROW(segment_->deallocate(0, 1024));
  374. EXPECT_TRUE(segment_->allMemoryDeallocated());
  375. }
  376. TEST_F(MemorySegmentMappedTest, shrink) {
  377. segment_->shrinkToFit();
  378. // Normally we should be able to expect that the resulting size is
  379. // smaller than the initial default size. But it's not really
  380. // guaranteed by the API, so we may have to disable this check (or
  381. // use EXPECT_GE).
  382. const size_t shrinked_size = segment_->getSize();
  383. EXPECT_GT(DEFAULT_INITIAL_SIZE, shrinked_size);
  384. // Another shrink shouldn't cause disruption. We expect the size is
  385. // the same so we confirm it. The underlying library doesn't guarantee
  386. // that, so we may have to change it to EXPECT_GE if the test fails
  387. // on that (MemorySegmentMapped class doesn't rely on this expectation,
  388. // so it's okay even if it does not always hold).
  389. segment_->shrinkToFit();
  390. EXPECT_EQ(shrinked_size, segment_->getSize());
  391. // Check that the segment is still usable after shrink.
  392. void* p = segment_->allocate(sizeof(uint32_t));
  393. segment_->deallocate(p, sizeof(uint32_t));
  394. }
  395. TEST_F(MemorySegmentMappedTest, violateReadOnly) {
  396. // Create a named address for the tests below, then reset the writer
  397. // segment so that it won't fail for different reason (i.e., read-write
  398. // conflict).
  399. void* ptr = segment_->allocate(sizeof(uint32_t));
  400. segment_->setNamedAddress("test address", ptr);
  401. segment_.reset();
  402. // Attempts to modify memory from the read-only segment directly
  403. // will result in a crash.
  404. if (!isc::util::unittests::runningOnValgrind()) {
  405. EXPECT_DEATH_IF_SUPPORTED({
  406. MemorySegmentMapped segment_ro(mapped_file);
  407. EXPECT_TRUE(segment_ro.getNamedAddress("test address"));
  408. *static_cast<uint32_t*>(
  409. segment_ro.getNamedAddress("test address")) = 0;
  410. }, "");
  411. }
  412. // If the segment is opened in the read-only mode, modification
  413. // attempts are prohibited. When detectable it must result in an
  414. // exception.
  415. MemorySegmentMapped segment_ro(mapped_file);
  416. ptr = segment_ro.getNamedAddress("test address");
  417. EXPECT_NE(static_cast<void*>(NULL), ptr);
  418. EXPECT_THROW(segment_ro.deallocate(ptr, 4), MemorySegmentError);
  419. EXPECT_THROW(segment_ro.allocate(16), MemorySegmentError);
  420. // allocation that would otherwise require growing the segment; permission
  421. // check should be performed before that.
  422. EXPECT_THROW(segment_ro.allocate(DEFAULT_INITIAL_SIZE * 2),
  423. MemorySegmentError);
  424. EXPECT_THROW(segment_ro.setNamedAddress("test", NULL), MemorySegmentError);
  425. EXPECT_THROW(segment_ro.clearNamedAddress("test"), MemorySegmentError);
  426. EXPECT_THROW(segment_ro.shrinkToFit(), MemorySegmentError);
  427. }
  428. TEST_F(MemorySegmentMappedTest, getCheckSum) {
  429. const size_t old_cksum = segment_->getCheckSum();
  430. // We assume the initial segment size is sufficiently larger than
  431. // the page size. We'll allocate memory of the page size, and
  432. // increment all bytes in that page by one. It will increase our
  433. // simple checksum value (which just uses the first byte of each
  434. // page) by one, too.
  435. const size_t page_sz = boost::interprocess::mapped_region::get_page_size();
  436. uint8_t* cp0 = static_cast<uint8_t*>(segment_->allocate(page_sz));
  437. for (uint8_t* cp = cp0; cp < cp0 + page_sz; ++cp) {
  438. ++*cp;
  439. }
  440. EXPECT_EQ(old_cksum + 1, segment_->getCheckSum());
  441. }
  442. // Mode of opening segments in the tests below.
  443. enum TestOpenMode {
  444. READER = 0,
  445. WRITER_FOR_WRITE,
  446. WRITER_OPEN_OR_CREATE,
  447. WRITER_CREATE_ONLY
  448. };
  449. // A shortcut to attempt to open a specified type of segment (generally
  450. // expecting it to fail)
  451. void
  452. setSegment(TestOpenMode mode, scoped_ptr<MemorySegmentMapped>& sgmt_ptr) {
  453. switch (mode) {
  454. case READER:
  455. sgmt_ptr.reset(new MemorySegmentMapped(mapped_file));
  456. break;
  457. case WRITER_FOR_WRITE:
  458. sgmt_ptr.reset(new MemorySegmentMapped(mapped_file, OPEN_FOR_WRITE));
  459. break;
  460. case WRITER_OPEN_OR_CREATE:
  461. sgmt_ptr.reset(new MemorySegmentMapped(mapped_file, OPEN_OR_CREATE));
  462. break;
  463. case WRITER_CREATE_ONLY:
  464. sgmt_ptr.reset(new MemorySegmentMapped(mapped_file, CREATE_ONLY));
  465. break;
  466. }
  467. }
  468. // Common logic for conflictReaderWriter test. The segment opened in the
  469. // parent process will prevent the segment in the child from being used.
  470. void
  471. conflictCheck(TestOpenMode parent_mode, TestOpenMode child_mode) {
  472. PipeHolder pipe_to_child;
  473. PipeHolder pipe_to_parent;
  474. const pid_t child_pid = fork();
  475. ASSERT_NE(-1, child_pid);
  476. if (child_pid == 0) {
  477. char ch;
  478. EXPECT_EQ(1, read(pipe_to_child.getReadFD(), &ch, sizeof(ch)));
  479. ch = 0; // 0 = open success, 1 = fail
  480. try {
  481. scoped_ptr<MemorySegmentMapped> sgmt;
  482. setSegment(child_mode, sgmt);
  483. EXPECT_EQ(1, write(pipe_to_parent.getWriteFD(), &ch, sizeof(ch)));
  484. } catch (const MemorySegmentOpenError&) {
  485. ch = 1;
  486. EXPECT_EQ(1, write(pipe_to_parent.getWriteFD(), &ch, sizeof(ch)));
  487. }
  488. exit(0);
  489. }
  490. // parent: open a segment, then tell the child to open its own segment of
  491. // the specified type.
  492. scoped_ptr<MemorySegmentMapped> sgmt;
  493. setSegment(parent_mode, sgmt);
  494. const char some_data = 0;
  495. EXPECT_EQ(1, write(pipe_to_child.getWriteFD(), &some_data,
  496. sizeof(some_data)));
  497. // wait for the completion of the child and checks the result. open at
  498. // the child side should fail, so the parent should get the value of 1.
  499. EXPECT_EQ(1, parentReadState(pipe_to_parent.getReadFD()));
  500. }
  501. TEST_F(MemorySegmentMappedTest, conflictReaderWriter) {
  502. // Test using fork() doesn't work well on valgrind
  503. if (isc::util::unittests::runningOnValgrind()) {
  504. return;
  505. }
  506. // Below, we check all combinations of conflicts between reader and writer
  507. // will fail. We first make sure there's no other reader or writer.
  508. segment_.reset();
  509. // reader opens segment, then writer (OPEN_FOR_WRITE) tries to open
  510. conflictCheck(READER, WRITER_FOR_WRITE);
  511. // reader opens segment, then writer (OPEN_OR_CREATE) tries to open
  512. conflictCheck(READER, WRITER_OPEN_OR_CREATE);
  513. // reader opens segment, then writer (CREATE_ONLY) tries to open
  514. conflictCheck(READER, WRITER_CREATE_ONLY);
  515. // writer (OPEN_FOR_WRITE) opens a segment, then reader tries to open
  516. conflictCheck(WRITER_FOR_WRITE, READER);
  517. // writer (OPEN_OR_CREATE) opens a segment, then reader tries to open
  518. conflictCheck(WRITER_OPEN_OR_CREATE, READER);
  519. // writer (CREATE_ONLY) opens a segment, then reader tries to open
  520. conflictCheck(WRITER_CREATE_ONLY, READER);
  521. // writer opens segment, then another writer (OPEN_FOR_WRITE) tries to open
  522. conflictCheck(WRITER_FOR_WRITE, WRITER_FOR_WRITE);
  523. // writer opens segment, then another writer (OPEN_OR_CREATE) tries to open
  524. conflictCheck(WRITER_FOR_WRITE, WRITER_OPEN_OR_CREATE);
  525. // writer opens segment, then another writer (CREATE_ONLY) tries to open
  526. conflictCheck(WRITER_FOR_WRITE, WRITER_CREATE_ONLY);
  527. }
  528. }