This repository was archived by the owner on Mar 22, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 76
/
Copy pathebr.hpp
285 lines (246 loc) · 8.04 KB
/
ebr.hpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
/*-
* Copyright (c) 2015-2018 Mindaugas Rasiukevicius <rmind at noxt eu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2021-2022, Intel Corporation */
/**
* @file
* C++ EBR API.
*/
#ifndef LIBPMEMOBJ_EBR_HPP
#define LIBPMEMOBJ_EBR_HPP
#include <atomic>
#include <cassert>
#include <functional>
#include <mutex>
#include <thread>
#include <unordered_map>
#include <libpmemobj++/detail/common.hpp>
namespace pmem
{
namespace detail
{
/**
* Epoch-based reclamation (EBR). Reference:
*
* K. Fraser, Practical lock-freedom,
* Technical Report UCAM-CL-TR-579, February 2004
* https://www.cl.cam.ac.uk/techreports/UCAM-CL-TR-579.pdf
*
* Summary:
*
* Any workers (threads or processes) actively referencing (accessing)
* the globally visible objects must do that in the critical path covered
* using the dedicated function. The grace period is determined using "epochs"
* implemented as a global counter (and, for example, a dedicated G/C list for
* each epoch). Objects in the current global epoch can be staged for
* reclamation (garbage collection). Then, the objects in the target epoch can
* be reclaimed after two successful increments of the global epoch. Only three
* epochs are needed (e, e-1 and e-2), therefore we use clock arithmetics.
*/
class ebr {
using atomic = std::atomic<size_t>;
using reference = std::reference_wrapper<atomic>;
public:
class worker;
inline ebr();
inline worker register_worker();
inline bool sync();
inline void full_sync();
inline size_t staging_epoch();
inline size_t gc_epoch();
class worker {
public:
inline worker(const worker &w) = delete;
inline worker(worker &&w) = default;
inline ~worker();
inline worker &operator=(worker &w) = delete;
inline worker &operator=(worker &&w) = default;
template <typename F>
void critical(F &&f);
private:
worker(ebr *e_, reference ref);
reference local_epoch;
ebr *e;
friend ebr;
};
private:
static const size_t ACTIVE_FLAG = static_cast<size_t>(1)
<< (sizeof(size_t) * 8 - 1);
static const size_t EPOCHS_NUMBER = 3;
atomic global_epoch;
std::unordered_map<std::thread::id, atomic> workers;
std::mutex mtx;
};
/**
* Default and only ebr constructor.
*/
inline ebr::ebr() : global_epoch(0)
{
#if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
VALGRIND_HG_DISABLE_CHECKING(&global_epoch, sizeof(global_epoch));
#endif
}
/**
* Registers and returns a new worker, which can perform critical operations
* (accessing some shared data that can be removed in other threads). There can
* be only one worker per thread. The worker will be automatically unregistered
* in the destructor.
*
* @throw runtime_error if there is already a registered worker for the current
* thread.
*
* @return new registered worker.
*/
inline ebr::worker
ebr::register_worker()
{
std::lock_guard<std::mutex> lock(mtx);
auto res = workers.emplace(std::this_thread::get_id(), 0);
if (!res.second) {
throw std::runtime_error(
"There can be only one worker per thread");
}
return worker{this, reference{res.first->second}};
}
/**
* Attempts to synchronise and announce a new epoch.
*
* The synchronisation points must be serialized (e.g. if there are multiple G/C
* workers or other writers). Generally, calls to ebr::staging_epoch() and
* ebr::gc_epoch() would be a part of the same serialized path (calling sync()
* and gc_epoch()/staging_epoch() concurrently in two other threads will cause
* an undefined behavior).
*
* @return true if a new epoch is announced and false if it wasn't possible in
* the current state.
*/
inline bool
ebr::sync()
{
auto current_epoch = global_epoch.load();
std::lock_guard<std::mutex> lock(mtx);
for (auto &w : workers) {
LIBPMEMOBJ_CPP_ANNOTATE_HAPPENS_BEFORE(
std::memory_order_seq_cst, &w.second);
auto local_e = w.second.load();
bool active = local_e & ACTIVE_FLAG;
if (active && (local_e != (current_epoch | ACTIVE_FLAG))) {
return false;
}
}
LIBPMEMOBJ_CPP_ANNOTATE_HAPPENS_BEFORE(std::memory_order_seq_cst,
&global_epoch);
global_epoch.store((current_epoch + 1) % EPOCHS_NUMBER);
return true;
}
/**
* Perform full synchronisation ensuring that all objects which are no longer
* globally visible (and potentially staged for reclamation) at the time of
* calling this routine will be safe to reclaim/destroy after this
* synchronisation routine completes and returns. Note: the synchronisation may
* take across multiple epochs.
*/
inline void
ebr::full_sync()
{
size_t syncs_cnt = 0;
while (true) {
if (sync() && ++syncs_cnt == EPOCHS_NUMBER) {
break;
}
}
}
/**
* Returns the epoch where objects can be staged for reclamation. This can be
* used as a reference value for the pending queue/tag, used to postpone the
* reclamation until this epoch becomes available for G/C. Note that this
* function would normally be serialized together with the ebr::sync() calls.
*
* @return the epoch where objects can be staged for reclamation.
*/
inline size_t
ebr::staging_epoch()
{
auto res = global_epoch.load();
LIBPMEMOBJ_CPP_ANNOTATE_HAPPENS_AFTER(std::memory_order_seq_cst,
&global_epoch);
return res;
}
/**
* Returns the epoch available for reclamation, i.e. the epoch where it is
* guaranteed that the objects are safe to be reclaimed/destroyed. Note that
* this function would normally be serialized together with the ebr::sync()
* calls.
*
* @return the epoch available for reclamation.
*/
inline size_t
ebr::gc_epoch()
{
auto res = (global_epoch.load() + 1) % EPOCHS_NUMBER;
LIBPMEMOBJ_CPP_ANNOTATE_HAPPENS_AFTER(std::memory_order_seq_cst,
&global_epoch);
return res;
}
inline ebr::worker::worker(ebr *e_, reference ref) : local_epoch(ref), e(e_)
{
#if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
VALGRIND_HG_DISABLE_CHECKING(&ref.get(), sizeof(ref.get()));
#endif
}
/**
* Unregisters the worker from the list of the workers in the ebr. All workers
* should be destroyed before the destruction of ebr object.
*/
inline ebr::worker::~worker()
{
std::lock_guard<std::mutex> lock(e->mtx);
e->workers.erase(std::this_thread::get_id());
}
/**
* Performs critical operations. Typically, this would be used by the readers
* when accessing some shared data. Reclamation of objects is guaranteed not to
* occur in the critical path.
*
* @param[in] f the function which will be executed as a critical operation.
* This function's signature should be void().
*/
template <typename F>
void
ebr::worker::critical(F &&f)
{
auto new_epoch = e->global_epoch.load() | ACTIVE_FLAG;
LIBPMEMOBJ_CPP_ANNOTATE_HAPPENS_AFTER(std::memory_order_seq_cst,
&(e->global_epoch));
local_epoch.get().store(new_epoch);
LIBPMEMOBJ_CPP_ANNOTATE_HAPPENS_AFTER(std::memory_order_seq_cst,
&local_epoch);
f();
local_epoch.get().store(0);
}
} /* namespace detail */
} /* namespace pmem */
#endif /* LIBPMEMOBJ_EBR_HPP */