Skip to content

Commit

Permalink
Transactional hash map hash_map_tm test
Browse files Browse the repository at this point in the history
  • Loading branch information
polytypic committed Sep 24, 2019
1 parent e0a87f8 commit 58b70b2
Show file tree
Hide file tree
Showing 6 changed files with 328 additions and 4 deletions.
4 changes: 3 additions & 1 deletion internals/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,4 @@
add_conventional_library(testing PUBLIC trade_v1)
add_conventional_library(testing)
target_link_libraries(testing INTERFACE dumpster_v1 trade_v1)

add_conventional_executable_tests(PRIVATE testing trade_v1 testing_v1 std_thread)
4 changes: 3 additions & 1 deletion internals/include/testing/config.hpp
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
#pragma once

#include "dumpster_v1/config.hpp"
#include "trade_v1/config.hpp"

namespace testing {

namespace dumpster = dumpster_v1;
namespace trade = trade_v1;

}
} // namespace testing
231 changes: 231 additions & 0 deletions internals/include/testing/hash_map_tm.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,231 @@
#pragma once

#include "testing/config.hpp"

#include "trade_v1/trade.hpp"

#include "dumpster_v1/primes.hpp"

#include "polyfill_v1/memory.hpp"
#include <functional>
#include <optional>
#include <utility>

namespace testing {

/// A transactional hash map for testing purposes.
template <class Key,
class Mapped,
class Hash = std::hash<Key>,
class Equal = std::equal_to<Key>>
class hash_map_tm;

class hash_map_tm_private {
template <class, class, class, class> friend class hash_map_tm;

// This hack is a workaround for not having std::shared_ptr<T[]> support
// in AppleClang.
template <class T> struct array_hack {
void operator delete(void *self) { delete[] reinterpret_cast<T *>(self); }
T &at(size_t i) { return reinterpret_cast<T *>(this)[i]; }
};
};

template <class Key, class Mapped, class Hash, class Equal>
class hash_map_tm : hash_map_tm_private {
template <class T> using ptr_t = std::shared_ptr<T>;

struct node_t;

using link = trade::atom<std::shared_ptr<node_t>>;

trade::atom<size_t> m_item_count;
trade::atom<size_t> m_buckets_count;
trade::atom<ptr_t<array_hack<link>>> m_buckets;

public:
using size_type = size_t;

using key_type = Key;
using mapped_type = Mapped;

hash_map_tm();

hash_map_tm(const hash_map_tm &) = delete;
hash_map_tm &operator=(const hash_map_tm &) = delete;

size_t size() const;

bool empty() const;

void clear();

void swap(hash_map_tm &that);

template <class ForwardableMapped, class Config = trade::stack_t<1024>>
bool add_or_set(const Key &key,
ForwardableMapped &&mapped,
Config config = trade::stack<1024>);

std::optional<Mapped> try_get(const Key &key) const;

bool remove(const Key &key);

#ifndef NDEBUG
static std::atomic<size_t> s_live_nodes; // Only for testing purposes
#endif
};

// -----------------------------------------------------------------------------

template <class Key, class Mapped, class Hash, class Equal>
struct hash_map_tm<Key, Mapped, Hash, Equal>::node_t {
#ifndef NDEBUG
~node_t() { --s_live_nodes; }
#endif
template <class ForwardableKey, class ForwardableMapped>
node_t(ForwardableKey &&key, ForwardableMapped &&value)
: m_next(nullptr), m_key(std::forward<ForwardableKey>(key)),
m_mapped(std::forward<ForwardableMapped>(value)) {
#ifndef NDEBUG
++s_live_nodes;
#endif
}
link m_next;
const Key m_key;
trade::atom<Mapped> m_mapped;
};

//

template <class Key, class Mapped, class Hash, class Equal>
hash_map_tm<Key, Mapped, Hash, Equal>::hash_map_tm()
: m_item_count(0), m_buckets_count(0), m_buckets(nullptr) {}

template <class Key, class Mapped, class Hash, class Equal>
size_t hash_map_tm<Key, Mapped, Hash, Equal>::size() const {
return trade::atomically(trade::assume_readonly,
[&]() { return m_item_count.load(); });
}

template <class Key, class Mapped, class Hash, class Equal>
bool hash_map_tm<Key, Mapped, Hash, Equal>::empty() const {
return trade::atomically(trade::assume_readonly,
[&]() { return m_item_count == 0; });
}

template <class Key, class Mapped, class Hash, class Equal>
void hash_map_tm<Key, Mapped, Hash, Equal>::clear() {
trade::atomically([&]() {
m_item_count = 0;
m_buckets_count = 0;
m_buckets = nullptr;
});
}

template <class Key, class Mapped, class Hash, class Equal>
void hash_map_tm<Key, Mapped, Hash, Equal>::swap(hash_map_tm &that) {
trade::atomically([&]() {
std::swap(m_item_count.ref(), that.m_item_count.ref());
std::swap(m_buckets_count.ref(), that.m_buckets_count.ref());
std::swap(m_buckets.ref(), that.m_buckets.ref());
});
}

template <class Key, class Mapped, class Hash, class Equal>
template <class ForwardableMapped, class Config>
bool hash_map_tm<Key, Mapped, Hash, Equal>::add_or_set(
const Key &key, ForwardableMapped &&mapped, Config config) {
auto key_hash = Hash()(key);

return trade::atomically(config, [&]() {
auto item_count = m_item_count.load();
auto buckets_count = m_buckets_count.load();
auto buckets = m_buckets.load();

if (buckets_count <= item_count) {
auto old_buckets = std::move(buckets);
auto old_buckets_count = buckets_count;

m_buckets_count = buckets_count =
dumpster::prime_less_than_next_pow_2_or_1(old_buckets_count * 2 + 1);
m_buckets = buckets = ptr_t<array_hack<link>>(
reinterpret_cast<array_hack<link> *>(new link[buckets_count]));

for (size_t i = 0; i < old_buckets_count; ++i) {
auto work = old_buckets->at(i).load();
while (work) {
auto &ref_next = work->m_next.ref();
auto &ref_bucket =
buckets->at(Hash()(work->m_key) % buckets_count).ref();
auto next = std::move(ref_next);
ref_next = std::move(ref_bucket);
ref_bucket = std::move(work);
work = std::move(next);
}
}
}

auto prev = &buckets->at(key_hash % buckets_count);
while (true) {
if (auto node = prev->load()) {
if (Equal()(node->m_key, key)) {
node->m_mapped = std::forward<ForwardableMapped>(mapped);
return false;
} else {
prev = &node->m_next;
}
} else {
prev->ref().reset(
new node_t(key, std::forward<ForwardableMapped>(mapped)));
m_item_count = item_count + 1;
return true;
}
}
});
}

template <class Key, class Mapped, class Hash, class Equal>
std::optional<Mapped>
hash_map_tm<Key, Mapped, Hash, Equal>::try_get(const Key &key) const {
auto key_hash = Hash()(key);
return trade::atomically(
trade::assume_readonly, [&]() -> std::optional<Mapped> {
if (auto buckets_count = m_buckets_count.load())
for (auto node =
m_buckets.load()->at(key_hash % buckets_count).load();
node;
node = node->m_next)
if (Equal()(node->m_key, key))
return node->m_mapped.load();
return std::nullopt;
});
}

template <class Key, class Mapped, class Hash, class Equal>
bool hash_map_tm<Key, Mapped, Hash, Equal>::remove(const Key &key) {
auto key_hash = Hash()(key);
return trade::atomically([&]() {
if (auto buckets_count = m_buckets_count.load()) {
auto prev = &m_buckets.load()->at(key_hash % buckets_count);
while (true) {
auto node = prev->load();
if (!node)
break;
if (Equal()(node->m_key, key)) {
*prev = node->m_next;
return true;
}
prev = &node->m_next;
}
}
return false;
});
}

#ifndef NDEBUG
template <class Key, class Mapped, class Hash, class Equal>
std::atomic<size_t> hash_map_tm<Key, Mapped, Hash, Equal>::s_live_nodes = 0;
#endif

} // namespace testing
22 changes: 22 additions & 0 deletions internals/include/testing/memory.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
#pragma once

#include "trade_v1/trade.hpp"

namespace testing {

template <class Object> class unique {};

template <class Object> class shared {};

} // namespace testing

namespace trade_v1 {

using namespace testing;

template <class Object> class atom<unique<Object>> {

shared<Object> load() const;
};

} // namespace trade_v1
4 changes: 2 additions & 2 deletions internals/testing/contention_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,11 @@ using namespace trade_v1;

auto contention_test = test([]() {
const size_t n_threads = std::thread::hardware_concurrency();
const size_t n_ops = 100000;
const size_t n_ops = 1000000;

atom<size_t> n_threads_started = 0, n_threads_stopped = 0;

constexpr size_t n_atoms = 7;
constexpr size_t n_atoms = 7000;

std::unique_ptr<atom<int>[]> atoms(new atom<int>[n_atoms]);
for (size_t i = 0; i < n_atoms; ++i)
Expand Down
67 changes: 67 additions & 0 deletions internals/testing/hash_map_tm_test.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
#include "testing/hash_map_tm.hpp"

#include "testing_v1/test.hpp"

#include "dumpster_v1/ranqd1.hpp"

#include "polyfill_v1/memory.hpp"
#include <thread>

using namespace testing_v1;

using namespace testing;
using namespace trade;

auto hash_map_test = test([]() {
const size_t n_threads = std::thread::hardware_concurrency();
const size_t n_ops = 100000;
const uint32_t max_keys = 31;

using hash_map_tm_type = hash_map_tm<uint32_t, size_t>;

hash_map_tm_type map;

atom<size_t> done(0);

auto start = std::chrono::high_resolution_clock::now();

for (size_t t = 0; t < n_threads; ++t)
std::thread([&, t]() {
auto s = static_cast<uint32_t>(t);

for (size_t i = 0; i < n_ops; ++i) {
uint32_t key = (s = dumpster::ranqd1(s)) % max_keys;
map.add_or_set(key, t, trade::stack<8192>);
}

atomically([&]() { done.ref() += 1; });
}).detach();

atomically(assume_readonly, [&]() {
if (done != n_threads)
retry();
});

std::chrono::duration<double> elapsed =
std::chrono::high_resolution_clock::now() - start;
auto n_total = n_ops * n_threads;
fprintf(stderr,
"%f Mops in %f s = %f Mops/s\n",
n_total / 1000000.0,
elapsed.count(),
n_total / elapsed.count() / 1000000.0);

verify(map.size() == max_keys);

{
hash_map_tm_type other;
map.swap(other);
verify(other.size() == max_keys);
verify(map.size() == 0);
other.clear();
}

#ifndef NDEBUG
verify(!hash_map_tm_type::s_live_nodes);
#endif
});

0 comments on commit 58b70b2

Please sign in to comment.