From dcf455c231a37ce190cb67992603c38d7f7a9b5c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Juan=20Jos=C3=A9=20Arboleda?= Date: Mon, 18 Mar 2024 17:54:03 -0500 Subject: [PATCH] nsolid: support Heap Sampling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds support to the "sampled" heapsnapshot. This new API methods in N|Solid allow users to sample allocations in the V8 heap; internally it does the calls to the V8 Profiler for start sampling and stop sampling. This patch requires a new mutex and a new thread map to manage which thread is currently sampling, same as CPU profiler and HeapSnapshot. Signed-off-by: Juan José Arboleda PR-URL: https://github.com/nodesource/nsolid/pull/103 --- src/nsolid.cc | 36 +- src/nsolid.h | 134 ++++++- src/nsolid/nsolid_heap_snapshot.cc | 332 +++++++++++++++++- src/nsolid/nsolid_heap_snapshot.h | 46 ++- test/addons/nsolid-custom-command/binding.cc | 4 +- test/addons/nsolid-heap-sampler/binding.cc | 152 ++++++++ test/addons/nsolid-heap-sampler/binding.gyp | 16 + .../nsolid-heap-sampler.js | 91 +++++ .../nsolid-track-heap-objects/binding.cc | 3 +- 9 files changed, 805 insertions(+), 9 deletions(-) create mode 100644 test/addons/nsolid-heap-sampler/binding.cc create mode 100644 test/addons/nsolid-heap-sampler/binding.gyp create mode 100644 test/addons/nsolid-heap-sampler/nsolid-heap-sampler.js diff --git a/src/nsolid.cc b/src/nsolid.cc index d0be80eaa32..ccf92a42d27 100644 --- a/src/nsolid.cc +++ b/src/nsolid.cc @@ -521,14 +521,46 @@ int CpuProfiler::get_cpu_profile_(SharedEnvInst envinst, deleter); } +int Snapshot::start_allocation_sampling_(SharedEnvInst envinst, + uint64_t sample_interval, + int stack_depth, + v8::HeapProfiler::SamplingFlags flags, + uint64_t duration, + internal::user_data data, + snapshot_proxy_sig proxy) { + return EnvList::Inst()->HeapSnapshot()->StartSamplingProfiler( + envinst, + sample_interval, + stack_depth, + flags, + duration, + std::move(data), + proxy); +} + +int Snapshot::StopSampling(SharedEnvInst envinst) { + if (envinst == nullptr) + return UV_ESRCH; + + return EnvList::Inst()->HeapSnapshot()->StopSamplingProfiler(envinst); +} + +int Snapshot::StopSamplingSync(SharedEnvInst envinst) { + if (envinst == nullptr) + return UV_ESRCH; + + return EnvList::Inst()->HeapSnapshot()->StopSamplingProfilerSync(envinst); +} + + int Snapshot::start_tracking_heap_objects_(SharedEnvInst envinst, bool redacted, - bool trackAllocations, + bool track_allocations, uint64_t duration, internal::user_data data, snapshot_proxy_sig proxy) { return EnvList::Inst()->HeapSnapshot()->StartTrackingHeapObjects( - envinst, redacted, trackAllocations, duration, std::move(data), proxy); + envinst, redacted, track_allocations, duration, std::move(data), proxy); } int Snapshot::StopTrackingHeapObjects(SharedEnvInst envinst) { diff --git a/src/nsolid.h b/src/nsolid.h index c45e12ecc42..7cbf5403dc0 100644 --- a/src/nsolid.h +++ b/src/nsolid.h @@ -3,6 +3,7 @@ #include "node.h" #include "uv.h" +#include "v8-profiler.h" #include #include @@ -1015,14 +1016,89 @@ class NODE_EXTERN Snapshot { */ static int StopTrackingHeapObjectsSync(SharedEnvInst envinst); + /** + * @brief Will start sampling heap allocations. + * + * @param envinst SharedEnvInst of thread to take a sampled snapshot from. + * @param duration duration in milliseconds of the heap profiler after which + * the sampled heap snapshot will be returned in the callback. + * @param cb callback function with the following signature: + * `cb(int status, std::string snapshot, ...Data)`. It will be called from the + * NSolid thread. + * @param data variable number of arguments to be propagated to the callback. + * @return NSOLID_E_SUCCESS in case of success or a different NSOLID_E_ + * error value otherwise. + */ + template + static int StartSampling(SharedEnvInst envinst, + uint64_t duration, + Cb&& cb, + Data&&... data); + + /** + * @brief Will start sampling heap allocations. + * + * @param envinst SharedEnvInst of thread to take the snapshot from. + * @param sample_interval frequency (in bytes) at which the heap profiler + * samples are taken. Each allocation is sampled every sample_interval bytes + * allocated. + * @param stack_depth refers to the depth of the call stack that will be + * captured during heap profiling + * @param flags additional flags or options that can be used when + * starting heap profiling. See + * \link v8::HeapProfiler::SamplingFlags SamplingFlags\endlink + * for available flags. + * @param duration duration in milliseconds of the heap profiler after which + * the heap snapshot will be returned in the callback. + * @param cb callback function with the following signature: + * `cb(int status, std::string snapshot, ...Data)`. It will be called from the + * NSolid thread. + * @param data variable number of arguments to be propagated to the callback. + * @return NSOLID_E_SUCCESS in case of success or a different NSOLID_E_ + * error value otherwise. + */ + template + static int StartSampling(SharedEnvInst envinst, + uint64_t sample_interval, + int stack_depth, + v8::HeapProfiler::SamplingFlags flags, + uint64_t duration, + Cb&& cb, + Data&&... data); + + /** + * @brief Stops the HeapProfiler allocation sampler + * @param envinst SharedEnvInst of thread to take the snapshot from. + * @return NSOLID_E_SUCCESS in case of success or a different NSOLID_E_ + * error value otherwise. + */ + static int StopSampling(SharedEnvInst envinst); + + /** + * @brief Stops the HeapProfiler allocation sampler synchronously + * @param envinst SharedEnvInst of thread to take the snapshot from. + * @return NSOLID_E_SUCCESS in case of success or a different NSOLID_E_ + * error value otherwise. + */ + static int StopSamplingSync(SharedEnvInst envinst); + private: static int start_tracking_heap_objects_(SharedEnvInst envinst, bool redacted, - bool trackAllocations, + bool track_allocations, uint64_t duration, internal::user_data data, snapshot_proxy_sig proxy); + static int start_allocation_sampling_(SharedEnvInst envinst, + uint64_t sample_interval, + int stack_depth, + v8::HeapProfiler::SamplingFlags flags, + uint64_t duration, + internal::user_data data, + snapshot_proxy_sig proxy); + + static int get_snapshot_(SharedEnvInst envinst, bool redacted, void* data, @@ -1219,6 +1295,62 @@ int Snapshot::StartTrackingHeapObjects(SharedEnvInst envinst, snapshot_proxy_); } + +template +int Snapshot::StartSampling(SharedEnvInst envinst, + uint64_t duration, + Cb&& cb, + Data&&... data) { + if (envinst == nullptr) { + return UV_ESRCH; + } + + // Use default profiler values + uint64_t sample_interval = 512 * 1024; + int stack_depth = 16; + return Snapshot::StartSampling(envinst, + sample_interval, + stack_depth, + v8::HeapProfiler::kSamplingNoFlags, + duration, + cb, + data...); +} + +template +int Snapshot::StartSampling(SharedEnvInst envinst, + uint64_t sample_interval, + int stack_depth, + v8::HeapProfiler::SamplingFlags flags, + uint64_t duration, + Cb&& cb, + Data&&... data) { + if (envinst == nullptr) { + return UV_ESRCH; + } + + // NOLINTNEXTLINE(build/namespaces) + using namespace std::placeholders; + using UserData = decltype(std::bind( + std::forward(cb), _1, _2, std::forward(data)...)); + + auto user_data = internal::user_data(new (std::nothrow) UserData( + std::bind(std::forward(cb), _1, _2, std::forward(data)...)), + internal::delete_proxy_); + if (user_data == nullptr) { + return UV_ENOMEM; + } + + return start_allocation_sampling_(envinst, + sample_interval, + stack_depth, + flags, + duration, + std::move(user_data), + snapshot_proxy_); +} + + template int Snapshot::TakeSnapshot(SharedEnvInst envinst, bool redacted, diff --git a/src/nsolid/nsolid_heap_snapshot.cc b/src/nsolid/nsolid_heap_snapshot.cc index 5f6c2381b28..7287ec92d60 100644 --- a/src/nsolid/nsolid_heap_snapshot.cc +++ b/src/nsolid/nsolid_heap_snapshot.cc @@ -8,11 +8,203 @@ namespace nsolid { NSolidHeapSnapshot::NSolidHeapSnapshot() { ASSERT_EQ(0, in_progress_heap_snapshots_.init(true)); + ASSERT_EQ(0, in_progress_heap_sampling_.init(true)); +} + +int NSolidHeapSnapshot::StartSamplingProfiler( + SharedEnvInst envinst, + uint64_t sample_interval, + int stack_depth, + v8::HeapProfiler::SamplingFlags samplingFlag, + uint64_t duration, + internal::user_data data, + Snapshot::snapshot_proxy_sig proxy) { + uint64_t thread_id = envinst->thread_id(); + uint64_t snaphot_id = + in_progress_timers_.fetch_add(1, std::memory_order_relaxed); + + nsuv::ns_mutex::scoped_lock lock(&in_progress_heap_sampling_); + auto it = threads_running_heap_sampling_.emplace( + thread_id, + HeapSamplerStor{sample_interval, + stack_depth, + samplingFlag, + duration, + kFlagIsSamplingHeap, + snaphot_id, + proxy, + std::move(data)}); + + if (it.second == false) { + return UV_EEXIST; + } + + int status = RunCommand(envinst, + CommandType::Interrupt, + start_sampling_profiler, + duration, + snaphot_id, + this); + + if (status != 0) { + threads_running_heap_sampling_.erase(it.first); + } + + return status; +} + +int NSolidHeapSnapshot::StopSamplingProfiler(SharedEnvInst envinst) { + uint64_t thread_id = envinst->thread_id(); + nsuv::ns_mutex::scoped_lock lock(&in_progress_heap_sampling_); + auto it = threads_running_heap_sampling_.find(thread_id); + // Make sure there is a snapshot running + if (it == threads_running_heap_sampling_.end() || + !(it->second.flags & kFlagIsSamplingHeap)) { + return UV_ENOENT; + } + + HeapSamplerStor& stor = it->second; + int er = RunCommand(envinst, + CommandType::Interrupt, + stop_sampling_profiler, + stor.snapshot_id, + this); + return er; +} + +namespace { +nlohmann::ordered_json build_sampling_heap_profile_node( + v8::Isolate* isolate, const v8::AllocationProfile::Node* node) { + nlohmann::ordered_json result; + + // Construct callFrame + json callFrame = { + {"functionName", *v8::String::Utf8Value(isolate, node->name)}, + {"scriptId", node->script_id}, + {"url", *v8::String::Utf8Value(isolate, node->script_name)}, + {"lineNumber", node->line_number - 1}, + {"columnNumber", node->column_number - 1} + }; + + // Calculate selfSize + size_t selfSize = 0; + for (const auto& allocation : node->allocations) + selfSize += allocation.size * allocation.count; + + // Add properties to result + result["callFrame"] = callFrame; + result["selfSize"] = selfSize; + result["id"] = node->node_id; + + // Add children if there are any + if (node->children.empty()) + return result; + + // Recursively build children + nlohmann::ordered_json children; + for (const auto* child : node->children) + children.push_back(build_sampling_heap_profile_node(isolate, child)); + result["children"] = children; + + return result; +} + +nlohmann::ordered_json build_v8_allocation_profile( + v8::Isolate* isolate, + v8::AllocationProfile* profile) { + // We need to parse the profile and convert it to a JSON string with the next + // schema, this does not support Serializing the profile to a string. + // { head: SamplingHeapProfileNode, samples: [SamplingHeapProfileSample] } + nlohmann::ordered_json json_profile; + + v8::AllocationProfile::Node* root = profile->GetRootNode(); + nlohmann::ordered_json head = build_sampling_heap_profile_node(isolate, root); + json_profile["head"] = head; + + const std::vector& samples = + profile->GetSamples(); + + if (samples.empty()) { + json_profile["samples"] = nlohmann::ordered_json::array(); + return json_profile; + } + + for (const auto& sample : samples) { + nlohmann::ordered_json sample_json; + sample_json["size"] = sample.size * sample.count; + sample_json["nodeId"] = sample.node_id; + sample_json["ordinal"] = sample.sample_id; + json_profile["samples"].push_back(sample_json); + } + + return json_profile; +} +} // namespace + +int NSolidHeapSnapshot::StopSamplingProfilerSync(SharedEnvInst envinst) { + uint64_t thread_id = envinst->thread_id(); + nsuv::ns_mutex::scoped_lock lock(&in_progress_heap_sampling_); + auto it = threads_running_heap_sampling_.find(thread_id); + // Make sure there is a snapshot running + if (it == threads_running_heap_sampling_.end()) + return UV_ENOENT; + + HeapSamplerStor& stor = it->second; + // If this condition is reached. This was called by EnvList::RemoveEnv. + // It wants to stop any pending snapshot w/ tracking heap object. + if (!(stor.flags & kFlagIsSamplingHeap) || stor.flags & kFlagIsDone) { + // If no pending trackers, just do nothing + // There are not pending snapshots with trackers + return UV_ENOENT; + } + + v8::Isolate* isolate = envinst->isolate(); + v8::HeapProfiler* profiler = isolate->GetHeapProfiler(); + ASSERT_NOT_NULL(profiler); + + v8::HandleScope scope(isolate); + + // Get the sampled heap profile + v8::AllocationProfile* sampled = profiler->GetAllocationProfile(); + + if (sampled == nullptr) { + QueueCallback(sampling_cb, + thread_id, + // TODO(juan) - this is not a heap snapshot failure + heap_profiler::HEAP_SNAPSHOT_FAILURE, + std::string(), + this); + } else { + nlohmann::ordered_json profile_json = + build_v8_allocation_profile(isolate, sampled); + + stor.cb(0, profile_json.dump(), stor.data.get()); + // Send the empty string to signal the end of the profile + stor.cb(0, std::string(), stor.data.get()); + + // V8 uses new, so we need to delete it + // This will make ASAN happy + delete sampled; + } + // Stop the profiler no matter what + profiler->StopSamplingHeapProfiler(); + + // Set the flag to done + stor.flags |= kFlagIsDone; + // Delete the snapshot from the map + threads_running_heap_sampling_.erase(it); + return 0; } NSolidHeapSnapshot::~NSolidHeapSnapshot() { - nsuv::ns_mutex::scoped_lock lock(&in_progress_heap_snapshots_); - threads_running_snapshots_.clear(); + { + nsuv::ns_mutex::scoped_lock lock(&in_progress_heap_snapshots_); + threads_running_snapshots_.clear(); + } + { + nsuv::ns_mutex::scoped_lock lock2(&in_progress_heap_sampling_); + threads_running_heap_sampling_.clear(); + } } int NSolidHeapSnapshot::StartTrackingHeapObjects( @@ -350,5 +542,141 @@ void NSolidHeapSnapshot::snapshot_cb(uint64_t thread_id, } } +void NSolidHeapSnapshot::sampling_cb(uint64_t thread_id, + int status, + const std::string snapshot, + NSolidHeapSnapshot* snapshotter) { + nsuv::ns_mutex::scoped_lock lock(&snapshotter->in_progress_heap_sampling_); + auto it = snapshotter->threads_running_heap_sampling_.find(thread_id); + ASSERT(it != snapshotter->threads_running_heap_sampling_.end()); + HeapSamplerStor& stor = it->second; + stor.cb(status, snapshot, stor.data.get()); + if (snapshot.empty()) { + snapshotter->threads_running_heap_sampling_.erase(it); + } +} + +void NSolidHeapSnapshot::start_sampling_profiler( + SharedEnvInst envinst, + uint64_t duration, + uint64_t snapshot_id, + NSolidHeapSnapshot* snapshotter) { + uint64_t thread_id = envinst->thread_id(); + nsuv::ns_mutex::scoped_lock lock(&snapshotter->in_progress_heap_sampling_); + auto it = snapshotter->threads_running_heap_sampling_.find(thread_id); + ASSERT(it != snapshotter->threads_running_heap_sampling_.end()); + + HeapSamplerStor& stor = it->second; + ASSERT(stor.flags & kFlagIsSamplingHeap); + + v8::Isolate* isolate = envinst->isolate(); + v8::HeapProfiler* profiler = isolate->GetHeapProfiler(); + ASSERT_NOT_NULL(profiler); + + profiler->StartSamplingHeapProfiler( + stor.sample_interval, + stor.stack_depth, + stor.sampling_flags); + + // Schedule a timer to take the snapshot + int er = QueueCallback(duration, + stop_sampling_profiler, + envinst, + snapshot_id, + snapshotter); + + if (er) { + // In case the the thread is already gone, the cpu profile will be stopped + // on RemoveEnv, so do nothing here. + } +} + +void NSolidHeapSnapshot::stop_sampling_profiler( + SharedEnvInst envinst, + uint64_t snapshot_id, + NSolidHeapSnapshot* snapshotter) { + uint64_t thread_id = envinst->thread_id(); + nsuv::ns_mutex::scoped_lock lock(&snapshotter->in_progress_heap_sampling_); + auto it = snapshotter->threads_running_heap_sampling_.find(thread_id); + if (it == snapshotter->threads_running_heap_sampling_.end()) { + // The profiler was stopped before the timer was triggered + return; + } + + HeapSamplerStor& stor = it->second; + if (stor.snapshot_id != snapshot_id) { + // The snapshot was stopped before the timer was triggered + return; + } + + ASSERT(stor.flags & kFlagIsSamplingHeap); + + // Give control back to the V8 thread + int er = RunCommand(envinst, + CommandType::Interrupt, + take_sampled_snapshot, + snapshotter); + if (er) { + // In case the the thread is already gone, the heap sampling will be stopped + // on RemoveEnv, so do nothing here. + } +} + +void NSolidHeapSnapshot::take_sampled_snapshot( + SharedEnvInst envinst, + NSolidHeapSnapshot* snapshotter) { + v8::Isolate* isolate = envinst->isolate(); + v8::HeapProfiler* profiler = isolate->GetHeapProfiler(); + ASSERT_NOT_NULL(profiler); + + v8::HandleScope scope(isolate); + + uint64_t thread_id = envinst->thread_id(); + nsuv::ns_mutex::scoped_lock lock(&snapshotter->in_progress_heap_sampling_); + auto it = snapshotter->threads_running_heap_sampling_.find(thread_id); + ASSERT(it != snapshotter->threads_running_heap_sampling_.end()); + + HeapSamplerStor& stor = it->second; + // If this condition is reached. This was called by EnvList::RemoveEnv. + // It wants to stop any pending snapshot w/ tracking heap object. + if (!(stor.flags & kFlagIsSamplingHeap) || stor.flags & kFlagIsDone) { + // If no pending trackers, just do nothing + // There are not peding snapshots with trackers + return; + } + + // Get the sampled heap profile + v8::AllocationProfile* sampled = profiler->GetAllocationProfile(); + if (sampled == nullptr) { + QueueCallback(sampling_cb, + thread_id, + // TODO(juan) - this is not a heap snapshot failure + heap_profiler::HEAP_SNAPSHOT_FAILURE, + std::string(), + snapshotter); + } else { + nlohmann::ordered_json profile_json = + build_v8_allocation_profile(isolate, sampled); + + QueueCallback(sampling_cb, + thread_id, + 0, + profile_json.dump(), + snapshotter); + QueueCallback(sampling_cb, + thread_id, + 0, + std::string(), + snapshotter); + + // V8 uses new, so we need to delete it + // This will make ASAN happy + delete sampled; + } + // Stop the profiler no matter what + profiler->StopSamplingHeapProfiler(); + stor.flags |= kFlagIsDone; +} + } // namespace nsolid } // namespace node diff --git a/src/nsolid/nsolid_heap_snapshot.h b/src/nsolid/nsolid_heap_snapshot.h index 416d065bdb9..34894638276 100644 --- a/src/nsolid/nsolid_heap_snapshot.h +++ b/src/nsolid/nsolid_heap_snapshot.h @@ -20,7 +20,8 @@ class NSolidHeapSnapshot { enum HeapSnapshotFlags { kFlagNone = 0, kFlagIsTrackingHeapObjects = 1 << 0, - kFlagIsDone = 1 << 1 + kFlagIsSamplingHeap = 1 << 1, + kFlagIsDone = 1 << 2 }; struct HeapSnapshotStor { @@ -31,6 +32,17 @@ class NSolidHeapSnapshot { internal::user_data data; }; + struct HeapSamplerStor { + uint64_t sample_interval; + int stack_depth; + v8::HeapProfiler::SamplingFlags sampling_flags; + uint64_t duration; + uint32_t flags; + uint64_t snapshot_id; + Snapshot::snapshot_proxy_sig cb; + internal::user_data data; + }; + NSOLID_DELETE_UNUSED_CONSTRUCTORS(NSolidHeapSnapshot) NSolidHeapSnapshot(); @@ -53,6 +65,18 @@ class NSolidHeapSnapshot { int StopTrackingHeapObjects(SharedEnvInst envinst); int StopTrackingHeapObjectsSync(SharedEnvInst envinst); + int StartSamplingProfiler(SharedEnvInst envinst, + uint64_t sampleInterval, + int stackDepth, + // TODO(juan) - this is a bitfield ask to reviewers + // if it is ok to use the v8 enum for this one. + v8::HeapProfiler::SamplingFlags samplingFlags, + uint64_t duration, + internal::user_data data, + Snapshot::snapshot_proxy_sig proxy); + + int StopSamplingProfiler(SharedEnvInst envinst); + int StopSamplingProfilerSync(SharedEnvInst envinst); private: static void start_tracking_heapobjects(SharedEnvInst envinst, @@ -64,6 +88,18 @@ class NSolidHeapSnapshot { static void stop_tracking_heap_objects(SharedEnvInst envinst_sp, NSolidHeapSnapshot*); + static void start_sampling_profiler(SharedEnvInst envinst, + uint64_t duration, + uint64_t snapshot_id, + NSolidHeapSnapshot*); + + static void stop_sampling_profiler(SharedEnvInst envinst_sp, + uint64_t snapshot_id, + NSolidHeapSnapshot*); + + static void take_sampled_snapshot(SharedEnvInst envinst_sp, + NSolidHeapSnapshot*); + static void take_snapshot(SharedEnvInst envinst_sp, NSolidHeapSnapshot*); static void take_snapshot_timer(SharedEnvInst envinst_sp, @@ -75,9 +111,17 @@ class NSolidHeapSnapshot { const std::string& snapshot, NSolidHeapSnapshot* snapshotter); + static void sampling_cb(uint64_t thread_id, + int status, + const std::string snapshot, + NSolidHeapSnapshot* snapshotter); + std::map threads_running_snapshots_; nsuv::ns_mutex in_progress_heap_snapshots_; std::atomic in_progress_timers_{0}; + + std::map threads_running_heap_sampling_; + nsuv::ns_mutex in_progress_heap_sampling_; }; } // namespace nsolid diff --git a/test/addons/nsolid-custom-command/binding.cc b/test/addons/nsolid-custom-command/binding.cc index 36ab48ae7d3..89945e7fb0f 100644 --- a/test/addons/nsolid-custom-command/binding.cc +++ b/test/addons/nsolid-custom-command/binding.cc @@ -66,11 +66,11 @@ static void custom_command_cb(std::string req_id, NewStringType::kNormal).ToLocalChecked(); } - fn->Call(context, + v8::MaybeLocal ret = fn->Call(context, Undefined(isolate), 5, argv); - + assert(!ret.IsEmpty()); cb_map_.erase(iter); } diff --git a/test/addons/nsolid-heap-sampler/binding.cc b/test/addons/nsolid-heap-sampler/binding.cc new file mode 100644 index 00000000000..d14bbbe5cee --- /dev/null +++ b/test/addons/nsolid-heap-sampler/binding.cc @@ -0,0 +1,152 @@ +#include +#include +#include +#include +#include + +#include +#include + +using v8::Context; +using v8::Function; +using v8::FunctionCallbackInfo; +using v8::Global; +using v8::HandleScope; +using v8::Integer; +using v8::Isolate; +using v8::Local; +using v8::NewStringType; +using v8::Number; +using v8::String; +using v8::Value; + +struct Stor { + std::string profile; + Global cb; +}; + +std::map profiles; +uv_mutex_t profiles_map_lock; + +static void sample_cb(node::nsolid::SharedEnvInst envinst, int status) { + uint64_t thread_id = node::nsolid::GetThreadId(envinst); + uv_mutex_lock(&profiles_map_lock); + auto it = profiles.find(thread_id); + assert(it != profiles.end()); + Isolate* isolate = Isolate::GetCurrent(); + HandleScope scope(isolate); + Local context = isolate->GetCurrentContext(); + Context::Scope context_scope(context); + Local cb = it->second.cb.Get(isolate); + Local argv[] = { + Integer::New(isolate, status), + String::NewFromUtf8( + isolate, + it->second.profile.c_str(), + NewStringType::kNormal).ToLocalChecked() + }; + + profiles.erase(it); + uv_mutex_unlock(&profiles_map_lock); + v8::MaybeLocal ret = cb->Call(context, Undefined(isolate), 2, argv); + assert(!ret.IsEmpty()); +} + +static void got_sampled(int status, + std::string profile, + uint64_t thread_id) { + assert(status == 0); + uv_mutex_lock(&profiles_map_lock); + auto it = profiles.find(thread_id); + assert(it != profiles.end()); + if (profile.empty()) { + if (it->second.cb.IsEmpty()) { + profiles.erase(it); + } else { + node::nsolid::SharedEnvInst envinst = node::nsolid::GetEnvInst(thread_id); + assert(0 == node::nsolid::RunCommand(envinst, + node::nsolid::CommandType::EventLoop, + sample_cb, + status)); + } + } else { + it->second.profile += profile; + } + uv_mutex_unlock(&profiles_map_lock); +} + +static void at_exit_cb() { + uv_mutex_destroy(&profiles_map_lock); +} + + +static void StartHeapSampler( + const FunctionCallbackInfo& args) { + v8::HandleScope handle_scope(args.GetIsolate()); + // thread_id + assert(args[0]->IsUint32()); + // Stop after this many milliseconds + assert(args[1]->IsNumber()); + + uint64_t thread_id = args[0].As()->Value(); + uint64_t duration = static_cast( + args[1]->NumberValue(args.GetIsolate()->GetCurrentContext()).FromJust()); + + uv_mutex_lock(&profiles_map_lock); + auto pair = profiles.emplace(thread_id, Stor()); + if (args.Length() > 2) { + pair.first->second.cb.Reset(args.GetIsolate(), args[2].As()); + } + uv_mutex_unlock(&profiles_map_lock); + int ret = node::nsolid::Snapshot::StartSampling( + node::nsolid::GetEnvInst(thread_id), + duration, + got_sampled, + thread_id); + if (ret != 0) { + if (pair.second) { + uv_mutex_lock(&profiles_map_lock); + profiles.erase(thread_id); + uv_mutex_unlock(&profiles_map_lock); + } + } else { + assert(pair.second); + } + args.GetReturnValue().Set(Integer::New(args.GetIsolate(), ret)); +} + +static void StopHeapSampler(const FunctionCallbackInfo& args) { + v8::HandleScope handle_scope(args.GetIsolate()); + // thread_id + assert(args[0]->IsUint32()); + + uint64_t thread_id = args[0].As()->Value(); + + int ret = node::nsolid::Snapshot::StopSampling( + node::nsolid::GetEnvInst(thread_id)); + args.GetReturnValue().Set(Integer::New(args.GetIsolate(), ret)); +} + +static void StopHeapSamplerSync(const FunctionCallbackInfo& args) { + v8::HandleScope handle_scope(args.GetIsolate()); + // thread_id + assert(args[0]->IsUint32()); + + uint64_t thread_id = args[0].As()->Value(); + + int ret = node::nsolid::Snapshot::StopSamplingSync( + node::nsolid::GetEnvInst(thread_id)); + args.GetReturnValue().Set(Integer::New(args.GetIsolate(), ret)); +} + +NODE_MODULE_INIT(/* exports, module, context */) { + NODE_SET_METHOD( + exports, "startSampling", StartHeapSampler); + NODE_SET_METHOD(exports, "stopSampling", StopHeapSampler); + NODE_SET_METHOD(exports, "stopSamplingSync", StopHeapSamplerSync); + node::nsolid::SharedEnvInst envinst = node::nsolid::GetLocalEnvInst(context); + if (node::nsolid::IsMainThread(envinst)) { + assert(0 == uv_mutex_init(&profiles_map_lock)); + atexit(at_exit_cb); + } +} diff --git a/test/addons/nsolid-heap-sampler/binding.gyp b/test/addons/nsolid-heap-sampler/binding.gyp new file mode 100644 index 00000000000..a0b2d6e7496 --- /dev/null +++ b/test/addons/nsolid-heap-sampler/binding.gyp @@ -0,0 +1,16 @@ +{ + 'targets': [{ + 'target_name': 'binding', + 'sources': [ 'binding.cc' ], + 'includes': ['../common.gypi'], + 'target_defaults': { + 'default_configuration': 'Release', + 'configurations': { + 'Debug': { + 'defines': [ 'DEBUG', '_DEBUG' ], + 'cflags': [ '-g', '-O0', '-fstandalone-debug' ], + } + }, + }, + }], +} diff --git a/test/addons/nsolid-heap-sampler/nsolid-heap-sampler.js b/test/addons/nsolid-heap-sampler/nsolid-heap-sampler.js new file mode 100644 index 00000000000..75646b336bb --- /dev/null +++ b/test/addons/nsolid-heap-sampler/nsolid-heap-sampler.js @@ -0,0 +1,91 @@ +'use strict'; +// Flags: --expose-internals + +const { buildType, mustCall, skip } = require('../../common'); +const assert = require('assert'); +const bindingPath = require.resolve(`./build/${buildType}/binding`); +const binding = require(bindingPath); +const { Worker, isMainThread, parentPort, threadId } = require('worker_threads'); +const { internalBinding } = require('internal/test/binding'); +const { UV_EEXIST, UV_ENOENT } = internalBinding('uv'); + +let er; + +if (process.env.NSOLID_COMMAND) + skip('required to run without the Console'); + +if (!isMainThread && +process.argv[2] !== process.pid) + skip('Test must first run as the main thread'); + +if (!isMainThread) { + // Starting the profile from this thread. + er = binding.startSampling(threadId, 5000); + assert.strictEqual(er, 0); + parentPort.postMessage('hi'); + setTimeout(() => {}, 2000); + return; +} + +const keepAlive = setInterval(() => {}, 1000); + +process.on('beforeExit', mustCall(() => { + er = binding.stopSampling(0); + // It could be peding or not. + assert.ok(er === 0 || er === UV_ENOENT); +})); + +// Normal usage check. +er = binding.startSampling(threadId, 10000, mustCall((status, profile) => { + assert.strictEqual(status, 0); + assert(JSON.parse(profile)); + + // // Check error codes for invalid calls. + er = binding.startSampling(threadId, 10000, mustCall((status, profile) => { + assert.strictEqual(status, 0); + assert(JSON.parse(profile)); + + er = binding.stopSampling(threadId); + assert.strictEqual(er, UV_ENOENT); + // Test getting profile + er = binding.startSampling(threadId, 10, mustCall((status, profile) => { + assert.strictEqual(status, 0); + assert(JSON.parse(profile)); + + er = binding.stopSampling(threadId); + assert.strictEqual(er, UV_ENOENT); + testWorker(); + clearInterval(keepAlive); + })); + assert.strictEqual(er, 0); + })); + + assert.strictEqual(er, 0); + er = binding.startSampling(threadId, 10); + assert.strictEqual(er, UV_EEXIST); + er = binding.stopSamplingSync(threadId); + assert.strictEqual(er, 0); +})); + +assert.strictEqual(er, 0); +er = binding.stopSampling(threadId); +assert.strictEqual(er, 0); + +function testWorker() { + const worker = new Worker(__filename, { argv: [process.pid] }); + worker.on('exit', mustCall((code) => { + assert.strictEqual(code, 0); + })); + worker.once('message', mustCall((msg) => { + assert.strictEqual(msg, 'hi'); + + er = binding.startSampling(worker.threadId, 500); + assert.strictEqual(er, UV_EEXIST); + + er = binding.stopSampling(worker.threadId); + assert.strictEqual(er, 0); + setTimeout(() => { + er = binding.startSampling(threadId, 2000); + assert.strictEqual(er, 0); + }, 2000); + })); +} diff --git a/test/addons/nsolid-track-heap-objects/binding.cc b/test/addons/nsolid-track-heap-objects/binding.cc index d812e4550fd..059ec098c5c 100644 --- a/test/addons/nsolid-track-heap-objects/binding.cc +++ b/test/addons/nsolid-track-heap-objects/binding.cc @@ -47,7 +47,8 @@ static void profile_cb(node::nsolid::SharedEnvInst envinst, int status) { profiles.erase(it); uv_mutex_unlock(&profiles_map_lock); - cb->Call(context, Undefined(isolate), 2, argv); + v8::MaybeLocal ret = cb->Call(context, Undefined(isolate), 2, argv); + assert(!ret.IsEmpty()); } static void got_profile(int status,