Skip to content

Commit 916d921

Browse files
committed
Encapsulate the pair of pointers with atomic switch into a class
1 parent d0537a9 commit 916d921

File tree

2 files changed

+40
-27
lines changed

2 files changed

+40
-27
lines changed

bindings/profilers/wall.cc

Lines changed: 4 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -560,7 +560,6 @@ WallProfiler::WallProfiler(std::chrono::microseconds samplingPeriod,
560560
contexts_.reserve(duration * 2 / samplingPeriod);
561561
}
562562

563-
curContext_.store(&context1_, std::memory_order_relaxed);
564563
collectionMode_.store(CollectionMode::kNoCollect, std::memory_order_relaxed);
565564
gcCount.store(0, std::memory_order_relaxed);
566565

@@ -1006,28 +1005,13 @@ v8::CpuProfiler* WallProfiler::CreateV8CpuProfiler() {
10061005
}
10071006

10081007
v8::Local<v8::Value> WallProfiler::GetContext(Isolate* isolate) {
1009-
auto context = *curContext_.load(std::memory_order_relaxed);
1008+
auto context = curContext_.Get();
10101009
if (!context) return v8::Undefined(isolate);
10111010
return context->Get(isolate);
10121011
}
10131012

10141013
void WallProfiler::SetContext(Isolate* isolate, Local<Value> value) {
1015-
// Need to be careful here, because we might be interrupted by a
1016-
// signal handler that will make use of curContext_.
1017-
// Update of shared_ptr is not atomic, so instead we use a pointer
1018-
// (curContext_) that points on two shared_ptr (context1_ and context2_),
1019-
// update the shared_ptr that is not currently in use and then atomically
1020-
// update curContext_.
1021-
auto newCurContext = curContext_.load(std::memory_order_relaxed) == &context1_
1022-
? &context2_
1023-
: &context1_;
1024-
if (!value->IsNullOrUndefined()) {
1025-
*newCurContext = std::make_shared<Global<Value>>(isolate, value);
1026-
} else {
1027-
newCurContext->reset();
1028-
}
1029-
std::atomic_signal_fence(std::memory_order_release);
1030-
curContext_.store(newCurContext, std::memory_order_relaxed);
1014+
curContext_.Set(isolate, value);
10311015
}
10321016

10331017
NAN_GETTER(WallProfiler::GetContext) {
@@ -1102,10 +1086,10 @@ void WallProfiler::PushContext(int64_t time_from,
11021086
// Be careful this is called in a signal handler context therefore all
11031087
// operations must be async signal safe (in particular no allocations).
11041088
// Our ring buffer avoids allocations.
1105-
auto context = curContext_.load(std::memory_order_relaxed);
1089+
auto context = curContext_.Get();
11061090
std::atomic_signal_fence(std::memory_order_acquire);
11071091
if (contexts_.size() < contexts_.capacity()) {
1108-
contexts_.push_back({*context, time_from, time_to, cpu_time, async_id});
1092+
contexts_.push_back({context, time_from, time_to, cpu_time, async_id});
11091093
std::atomic_fetch_add_explicit(
11101094
reinterpret_cast<std::atomic<uint32_t>*>(&fields_[kSampleCount]),
11111095
1U,

bindings/profilers/wall.hh

Lines changed: 36 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -36,26 +36,55 @@ struct Result {
3636
std::string msg;
3737
};
3838

39+
using ContextPtr = std::shared_ptr<v8::Global<v8::Value>>;
40+
41+
/**
42+
* Class that allows atomic updates to a ContextPtr. Since update of shared_ptr
43+
* is not atomic, we use a pointer that alternates between pointing to one of
44+
* two shared_ptrs instead, and we use atomic operations to update the pointer.
45+
*/
46+
class AtomicContextPtr {
47+
ContextPtr ptr1;
48+
ContextPtr ptr2;
49+
std::atomic<ContextPtr*> currentPtr = &ptr1;
50+
51+
void Set(v8::Isolate* isolate, v8::Local<v8::Value> value) {
52+
auto oldPtr = currentPtr.load(std::memory_order_relaxed);
53+
std::atomic_signal_fence(std::memory_order_acquire);
54+
auto newPtr = oldPtr == &ptr1 ? &ptr2 : &ptr1;
55+
if (!value->IsNullOrUndefined()) {
56+
*newPtr = std::make_shared<v8::Global<v8::Value>>(isolate, value);
57+
} else {
58+
newPtr->reset();
59+
}
60+
std::atomic_signal_fence(std::memory_order_release);
61+
currentPtr.store(newPtr, std::memory_order_relaxed);
62+
std::atomic_signal_fence(std::memory_order_release);
63+
}
64+
65+
ContextPtr Get() {
66+
auto ptr = currentPtr.load(std::memory_order_relaxed);
67+
std::atomic_signal_fence(std::memory_order_acquire);
68+
return ptr ? *ptr : ContextPtr();
69+
}
70+
71+
friend class WallProfiler;
72+
};
73+
3974
class WallProfiler : public Nan::ObjectWrap {
4075
public:
4176
enum class CollectionMode { kNoCollect, kPassThrough, kCollectContexts };
4277

4378
private:
4479
enum Fields { kSampleCount, kFieldCount };
4580

46-
using ContextPtr = std::shared_ptr<v8::Global<v8::Value>>;
47-
4881
std::chrono::microseconds samplingPeriod_{0};
4982
v8::CpuProfiler* cpuProfiler_ = nullptr;
5083
// TODO: Investigate use of v8::Persistent instead of shared_ptr<Global> to
5184
// avoid heap allocation. Need to figure out the right move/copy semantics in
5285
// and out of the ring buffer.
5386

54-
// We're using a pair of shared pointers and an atomic pointer-to-current as
55-
// a way to ensure signal safety on update.
56-
ContextPtr context1_;
57-
ContextPtr context2_;
58-
std::atomic<ContextPtr*> curContext_;
87+
AtomicContextPtr curContext_;
5988

6089
std::atomic<int> gcCount = 0;
6190
double gcAsyncId;

0 commit comments

Comments
 (0)