Skip to content

Commit 1ee92d0

Browse files
committed
Encapsulate the pair of pointers with atomic switch into a class
1 parent c57c5a6 commit 1ee92d0

File tree

2 files changed

+40
-27
lines changed

2 files changed

+40
-27
lines changed

bindings/profilers/wall.cc

Lines changed: 4 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -560,7 +560,6 @@ WallProfiler::WallProfiler(std::chrono::microseconds samplingPeriod,
560560
contexts_.reserve(duration * 2 / samplingPeriod);
561561
}
562562

563-
curContext_.store(&context1_, std::memory_order_relaxed);
564563
collectionMode_.store(CollectionMode::kNoCollect, std::memory_order_relaxed);
565564
gcCount.store(0, std::memory_order_relaxed);
566565

@@ -1007,28 +1006,13 @@ v8::CpuProfiler* WallProfiler::CreateV8CpuProfiler() {
10071006
}
10081007

10091008
v8::Local<v8::Value> WallProfiler::GetContext(Isolate* isolate) {
1010-
auto context = *curContext_.load(std::memory_order_relaxed);
1009+
auto context = curContext_.Get();
10111010
if (!context) return v8::Undefined(isolate);
10121011
return context->Get(isolate);
10131012
}
10141013

10151014
void WallProfiler::SetContext(Isolate* isolate, Local<Value> value) {
1016-
// Need to be careful here, because we might be interrupted by a
1017-
// signal handler that will make use of curContext_.
1018-
// Update of shared_ptr is not atomic, so instead we use a pointer
1019-
// (curContext_) that points on two shared_ptr (context1_ and context2_),
1020-
// update the shared_ptr that is not currently in use and then atomically
1021-
// update curContext_.
1022-
auto newCurContext = curContext_.load(std::memory_order_relaxed) == &context1_
1023-
? &context2_
1024-
: &context1_;
1025-
if (!value->IsNullOrUndefined()) {
1026-
*newCurContext = std::make_shared<Global<Value>>(isolate, value);
1027-
} else {
1028-
newCurContext->reset();
1029-
}
1030-
std::atomic_signal_fence(std::memory_order_release);
1031-
curContext_.store(newCurContext, std::memory_order_relaxed);
1015+
curContext_.Set(isolate, value);
10321016
}
10331017

10341018
NAN_GETTER(WallProfiler::GetContext) {
@@ -1109,10 +1093,10 @@ void WallProfiler::PushContext(int64_t time_from,
11091093
// Be careful this is called in a signal handler context therefore all
11101094
// operations must be async signal safe (in particular no allocations).
11111095
// Our ring buffer avoids allocations.
1112-
auto context = curContext_.load(std::memory_order_relaxed);
1096+
auto context = curContext_.Get();
11131097
std::atomic_signal_fence(std::memory_order_acquire);
11141098
if (contexts_.size() < contexts_.capacity()) {
1115-
contexts_.push_back({*context, time_from, time_to, cpu_time, async_id});
1099+
contexts_.push_back({context, time_from, time_to, cpu_time, async_id});
11161100
std::atomic_fetch_add_explicit(
11171101
reinterpret_cast<std::atomic<uint32_t>*>(&fields_[kSampleCount]),
11181102
1U,

bindings/profilers/wall.hh

Lines changed: 36 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -36,26 +36,55 @@ struct Result {
3636
std::string msg;
3737
};
3838

39+
using ContextPtr = std::shared_ptr<v8::Global<v8::Value>>;
40+
41+
/**
42+
* Class that allows atomic updates to a ContextPtr. Since update of shared_ptr
43+
* is not atomic, we use a pointer that alternates between pointing to one of
44+
* two shared_ptrs instead, and we use atomic operations to update the pointer.
45+
*/
46+
class AtomicContextPtr {
47+
ContextPtr ptr1;
48+
ContextPtr ptr2;
49+
std::atomic<ContextPtr*> currentPtr = &ptr1;
50+
51+
void Set(v8::Isolate* isolate, v8::Local<v8::Value> value) {
52+
auto oldPtr = currentPtr.load(std::memory_order_relaxed);
53+
std::atomic_signal_fence(std::memory_order_acquire);
54+
auto newPtr = oldPtr == &ptr1 ? &ptr2 : &ptr1;
55+
if (!value->IsNullOrUndefined()) {
56+
*newPtr = std::make_shared<v8::Global<v8::Value>>(isolate, value);
57+
} else {
58+
newPtr->reset();
59+
}
60+
std::atomic_signal_fence(std::memory_order_release);
61+
currentPtr.store(newPtr, std::memory_order_relaxed);
62+
std::atomic_signal_fence(std::memory_order_release);
63+
}
64+
65+
ContextPtr Get() {
66+
auto ptr = currentPtr.load(std::memory_order_relaxed);
67+
std::atomic_signal_fence(std::memory_order_acquire);
68+
return ptr ? *ptr : ContextPtr();
69+
}
70+
71+
friend class WallProfiler;
72+
};
73+
3974
class WallProfiler : public Nan::ObjectWrap {
4075
public:
4176
enum class CollectionMode { kNoCollect, kPassThrough, kCollectContexts };
4277

4378
private:
4479
enum Fields { kSampleCount, kFieldCount };
4580

46-
using ContextPtr = std::shared_ptr<v8::Global<v8::Value>>;
47-
4881
std::chrono::microseconds samplingPeriod_{0};
4982
v8::CpuProfiler* cpuProfiler_ = nullptr;
5083
// TODO: Investigate use of v8::Persistent instead of shared_ptr<Global> to
5184
// avoid heap allocation. Need to figure out the right move/copy semantics in
5285
// and out of the ring buffer.
5386

54-
// We're using a pair of shared pointers and an atomic pointer-to-current as
55-
// a way to ensure signal safety on update.
56-
ContextPtr context1_;
57-
ContextPtr context2_;
58-
std::atomic<ContextPtr*> curContext_;
87+
AtomicContextPtr curContext_;
5988

6089
std::atomic<int> gcCount = 0;
6190
double gcAsyncId;

0 commit comments

Comments
 (0)