mirror of
https://github.com/zebrajr/node.git
synced 2025-12-06 00:20:08 +01:00
worker: add eventLoopUtilization()
Allow calling eventLoopUtilization() directly on a worker thread:
const worker = new Worker('./foo.js');
const elu = worker.performance.eventLoopUtilization();
setTimeout(() => {
worker.performance.eventLoopUtilization(elu);
}, 10);
Add a new performance object on the Worker instance that will hopefully
one day hold all the other performance metrics, such as nodeTiming.
Include benchmarks and tests.
PR-URL: https://github.com/nodejs/node/pull/35664
Reviewed-By: Juan José Arboleda <soyjuanarbol@gmail.com>
Reviewed-By: Anna Henningsen <anna@addaleax.net>
Reviewed-By: Gerhard Stöbich <deb2001-github@yahoo.de>
Reviewed-By: James M Snell <jasnell@gmail.com>
Backport-PR-URL: https://github.com/nodejs/node/pull/37165
This commit is contained in:
parent
0f6d44500c
commit
eec7542781
61
benchmark/worker/bench-eventlooputil.js
Normal file
61
benchmark/worker/bench-eventlooputil.js
Normal file
|
|
@ -0,0 +1,61 @@
|
|||
'use strict';
|
||||
|
||||
const common = require('../common.js');
|
||||
const { Worker, parentPort } = require('worker_threads');
|
||||
|
||||
if (process.argv[2] === 'idle cats') {
|
||||
return parentPort.once('message', () => {});
|
||||
}
|
||||
|
||||
const bench = common.createBenchmark(main, {
|
||||
n: [1e6],
|
||||
method: [
|
||||
'ELU_simple',
|
||||
'ELU_passed',
|
||||
],
|
||||
});
|
||||
|
||||
function main({ method, n }) {
|
||||
switch (method) {
|
||||
case 'ELU_simple':
|
||||
benchELUSimple(n);
|
||||
break;
|
||||
case 'ELU_passed':
|
||||
benchELUPassed(n);
|
||||
break;
|
||||
default:
|
||||
throw new Error(`Unsupported method ${method}`);
|
||||
}
|
||||
}
|
||||
|
||||
function benchELUSimple(n) {
|
||||
const worker = new Worker(__filename, { argv: ['idle cats'] });
|
||||
|
||||
spinUntilIdle(worker, () => {
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++)
|
||||
worker.performance.eventLoopUtilization();
|
||||
bench.end(n);
|
||||
worker.postMessage('bye');
|
||||
});
|
||||
}
|
||||
|
||||
function benchELUPassed(n) {
|
||||
const worker = new Worker(__filename, { argv: ['idle cats'] });
|
||||
|
||||
spinUntilIdle(worker, () => {
|
||||
let elu = worker.performance.eventLoopUtilization();
|
||||
bench.start();
|
||||
for (let i = 0; i < n; i++)
|
||||
elu = worker.performance.eventLoopUtilization(elu);
|
||||
bench.end(n);
|
||||
worker.postMessage('bye');
|
||||
});
|
||||
}
|
||||
|
||||
function spinUntilIdle(w, cb) {
|
||||
const t = w.performance.eventLoopUtilization();
|
||||
if (t.idle + t.active > 0)
|
||||
return process.nextTick(cb);
|
||||
setTimeout(() => spinUntilIdle(w, cb), 1);
|
||||
}
|
||||
|
|
@ -53,6 +53,68 @@ added: v8.5.0
|
|||
If `name` is not provided, removes all `PerformanceMark` objects from the
|
||||
Performance Timeline. If `name` is provided, removes only the named mark.
|
||||
|
||||
### `performance.eventLoopUtilization([utilization1[, utilization2]])`
|
||||
<!-- YAML
|
||||
added:
|
||||
- v14.10.0
|
||||
- v12.19.0
|
||||
-->
|
||||
|
||||
* `utilization1` {Object} The result of a previous call to
|
||||
`eventLoopUtilization()`.
|
||||
* `utilization2` {Object} The result of a previous call to
|
||||
`eventLoopUtilization()` prior to `utilization1`.
|
||||
* Returns {Object}
|
||||
* `idle` {number}
|
||||
* `active` {number}
|
||||
* `utilization` {number}
|
||||
|
||||
The `eventLoopUtilization()` method returns an object that contains the
|
||||
cumulative duration of time the event loop has been both idle and active as a
|
||||
high resolution milliseconds timer. The `utilization` value is the calculated
|
||||
Event Loop Utilization (ELU).
|
||||
|
||||
If bootstrapping has not yet finished on the main thread the properties have
|
||||
the value of `0`. The ELU is immediately available on [Worker threads][] since
|
||||
bootstrap happens within the event loop.
|
||||
|
||||
Both `utilization1` and `utilization2` are optional parameters.
|
||||
|
||||
If `utilization1` is passed, then the delta between the current call's `active`
|
||||
and `idle` times, as well as the corresponding `utilization` value are
|
||||
calculated and returned (similar to [`process.hrtime()`][]).
|
||||
|
||||
If `utilization1` and `utilization2` are both passed, then the delta is
|
||||
calculated between the two arguments. This is a convenience option because,
|
||||
unlike [`process.hrtime()`][], calculating the ELU is more complex than a
|
||||
single subtraction.
|
||||
|
||||
ELU is similar to CPU utilization, except that it only measures event loop
|
||||
statistics and not CPU usage. It represents the percentage of time the event
|
||||
loop has spent outside the event loop's event provider (e.g. `epoll_wait`).
|
||||
No other CPU idle time is taken into consideration. The following is an example
|
||||
of how a mostly idle process will have a high ELU.
|
||||
|
||||
```js
|
||||
'use strict';
|
||||
const { eventLoopUtilization } = require('perf_hooks').performance;
|
||||
const { spawnSync } = require('child_process');
|
||||
|
||||
setImmediate(() => {
|
||||
const elu = eventLoopUtilization();
|
||||
spawnSync('sleep', ['5']);
|
||||
console.log(eventLoopUtilization(elu).utilization);
|
||||
});
|
||||
```
|
||||
|
||||
Although the CPU is mostly idle while running this script, the value of
|
||||
`utilization` is `1`. This is because the call to
|
||||
[`child_process.spawnSync()`][] blocks the event loop from proceeding.
|
||||
|
||||
Passing in a user-defined object instead of the result of a previous call to
|
||||
`eventLoopUtilization()` will lead to undefined behavior. The return values
|
||||
are not guaranteed to reflect any correct state of the event loop.
|
||||
|
||||
### `performance.mark([name])`
|
||||
<!-- YAML
|
||||
added: v8.5.0
|
||||
|
|
@ -760,3 +822,4 @@ require('some-module');
|
|||
[Performance Timeline]: https://w3c.github.io/performance-timeline/
|
||||
[Web Performance APIs]: https://w3c.github.io/perf-timing-primer/
|
||||
[User Timing]: https://www.w3.org/TR/user-timing/
|
||||
[Worker threads]: worker_threads.md#worker_threads_worker_threads
|
||||
|
|
|
|||
|
|
@ -767,6 +767,65 @@ If the Worker thread is no longer running, which may occur before the
|
|||
[`'exit'` event][] is emitted, the returned `Promise` will be rejected
|
||||
immediately with an [`ERR_WORKER_NOT_RUNNING`][] error.
|
||||
|
||||
### `worker.performance`
|
||||
<!-- YAML
|
||||
added: REPLACEME
|
||||
-->
|
||||
|
||||
An object that can be used to query performance information from a worker
|
||||
instance. Similar to [`perf_hooks.performance`][].
|
||||
|
||||
#### `performance.eventLoopUtilization([utilization1[, utilization2]])`
|
||||
<!-- YAML
|
||||
added: REPLACEME
|
||||
-->
|
||||
|
||||
* `utilization1` {Object} The result of a previous call to
|
||||
`eventLoopUtilization()`.
|
||||
* `utilization2` {Object} The result of a previous call to
|
||||
`eventLoopUtilization()` prior to `utilization1`.
|
||||
* Returns {Object}
|
||||
* `idle` {number}
|
||||
* `active` {number}
|
||||
* `utilization` {number}
|
||||
|
||||
The same call as [`perf_hooks` `eventLoopUtilization()`][], except the values
|
||||
of the worker instance are returned.
|
||||
|
||||
One difference is that, unlike the main thread, bootstrapping within a worker
|
||||
is done within the event loop. So the event loop utilization will be
|
||||
immediately available once the worker's script begins execution.
|
||||
|
||||
An `idle` time that does not increase does not indicate that the worker is
|
||||
stuck in bootstrap. The following examples shows how the worker's entire
|
||||
lifetime will never accumulate any `idle` time, but is still be able to process
|
||||
messages.
|
||||
|
||||
```js
|
||||
const { Worker, isMainThread, parentPort } = require('worker_threads');
|
||||
|
||||
if (isMainThread) {
|
||||
const worker = new Worker(__filename);
|
||||
setInterval(() => {
|
||||
worker.postMessage('hi');
|
||||
console.log(worker.performance.eventLoopUtilization());
|
||||
}, 100).unref();
|
||||
return;
|
||||
}
|
||||
|
||||
parentPort.on('message', () => console.log('msg')).unref();
|
||||
(function r(n) {
|
||||
if (--n < 0) return;
|
||||
const t = Date.now();
|
||||
while (Date.now() - t < 300);
|
||||
setImmediate(r, n);
|
||||
})(10);
|
||||
```
|
||||
|
||||
The event loop utilization of a worker is available only after the [`'online'`
|
||||
event][] emitted, and if called before this, or after the [`'exit'`
|
||||
event][], then all properties have the value of `0`.
|
||||
|
||||
### `worker.postMessage(value[, transferList])`
|
||||
<!-- YAML
|
||||
added: v10.5.0
|
||||
|
|
@ -880,6 +939,7 @@ active handle in the event system. If the worker is already `unref()`ed calling
|
|||
|
||||
[`'close'` event]: #worker_threads_event_close
|
||||
[`'exit'` event]: #worker_threads_event_exit
|
||||
[`'online'` event]: #worker_threads_event_online
|
||||
[`ArrayBuffer`]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/ArrayBuffer
|
||||
[`AsyncResource`]: async_hooks.html#async_hooks_class_asyncresource
|
||||
[`Buffer`]: buffer.html
|
||||
|
|
@ -899,6 +959,8 @@ active handle in the event system. If the worker is already `unref()`ed calling
|
|||
[`fs.open()`]: fs.html#fs_fs_open_path_flags_mode_callback
|
||||
[`fs.close()`]: fs.html#fs_fs_close_fd_callback
|
||||
[`markAsUntransferable()`]: #worker_threads_worker_markasuntransferable_object
|
||||
[`perf_hooks.performance`]: #perf_hooks.md#perf_hooks_perf_hooks_performance
|
||||
[`perf_hooks` `eventLoopUtilization()`]: perf_hooks.md#perf_hooks_performance_eventlooputilization_utilization1_utilization2
|
||||
[`port.on('message')`]: #worker_threads_event_message
|
||||
[`port.onmessage()`]: https://developer.mozilla.org/en-US/docs/Web/API/MessagePort/onmessage
|
||||
[`port.postMessage()`]: #worker_threads_port_postmessage_value_transferlist
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ const {
|
|||
const EventEmitter = require('events');
|
||||
const assert = require('internal/assert');
|
||||
const path = require('path');
|
||||
const { timeOrigin } = internalBinding('performance');
|
||||
|
||||
const errorCodes = require('internal/errors').codes;
|
||||
const {
|
||||
|
|
@ -67,6 +68,8 @@ const kOnMessage = Symbol('kOnMessage');
|
|||
const kOnCouldNotSerializeErr = Symbol('kOnCouldNotSerializeErr');
|
||||
const kOnErrorMessage = Symbol('kOnErrorMessage');
|
||||
const kParentSideStdio = Symbol('kParentSideStdio');
|
||||
const kLoopStartTime = Symbol('kLoopStartTime');
|
||||
const kIsOnline = Symbol('kIsOnline');
|
||||
|
||||
const SHARE_ENV = SymbolFor('nodejs.worker_threads.SHARE_ENV');
|
||||
let debug = require('internal/util/debuglog').debuglog('worker', (fn) => {
|
||||
|
|
@ -214,6 +217,12 @@ class Worker extends EventEmitter {
|
|||
null,
|
||||
hasStdin: !!options.stdin
|
||||
}, transferList);
|
||||
// Use this to cache the Worker's loopStart value once available.
|
||||
this[kLoopStartTime] = -1;
|
||||
this[kIsOnline] = false;
|
||||
this.performance = {
|
||||
eventLoopUtilization: eventLoopUtilization.bind(this),
|
||||
};
|
||||
// Actually start the new thread now that everything is in place.
|
||||
this[kHandle].startThread();
|
||||
}
|
||||
|
|
@ -245,6 +254,7 @@ class Worker extends EventEmitter {
|
|||
[kOnMessage](message) {
|
||||
switch (message.type) {
|
||||
case messageTypes.UP_AND_RUNNING:
|
||||
this[kIsOnline] = true;
|
||||
return this.emit('online');
|
||||
case messageTypes.COULD_NOT_SERIALIZE_ERROR:
|
||||
return this[kOnCouldNotSerializeErr]();
|
||||
|
|
@ -406,6 +416,52 @@ function makeResourceLimits(float64arr) {
|
|||
};
|
||||
}
|
||||
|
||||
function eventLoopUtilization(util1, util2) {
|
||||
// TODO(trevnorris): Works to solve the thread-safe read/write issue of
|
||||
// loopTime, but has the drawback that it can't be set until the event loop
|
||||
// has had a chance to turn. So it will be impossible to read the ELU of
|
||||
// a worker thread immediately after it's been created.
|
||||
if (!this[kIsOnline] || !this[kHandle]) {
|
||||
return { idle: 0, active: 0, utilization: 0 };
|
||||
}
|
||||
|
||||
// Cache loopStart, since it's only written to once.
|
||||
if (this[kLoopStartTime] === -1) {
|
||||
this[kLoopStartTime] = this[kHandle].loopStartTime();
|
||||
if (this[kLoopStartTime] === -1)
|
||||
return { idle: 0, active: 0, utilization: 0 };
|
||||
}
|
||||
|
||||
if (util2) {
|
||||
const idle = util1.idle - util2.idle;
|
||||
const active = util1.active - util2.active;
|
||||
return { idle, active, utilization: active / (idle + active) };
|
||||
}
|
||||
|
||||
const idle = this[kHandle].loopIdleTime();
|
||||
|
||||
// Using performance.now() here is fine since it's always the time from
|
||||
// the beginning of the process, and is why it needs to be offset by the
|
||||
// loopStart time (which is also calculated from the beginning of the
|
||||
// process).
|
||||
const active = now() - this[kLoopStartTime] - idle;
|
||||
|
||||
if (!util1) {
|
||||
return { idle, active, utilization: active / (idle + active) };
|
||||
}
|
||||
|
||||
const idle_delta = idle - util1.idle;
|
||||
const active_delta = active - util1.active;
|
||||
const utilization = active_delta / (idle_delta + active_delta);
|
||||
return { idle: idle_delta, active: active_delta, utilization };
|
||||
}
|
||||
|
||||
// Duplicate code from performance.now() so don't need to require perf_hooks.
|
||||
function now() {
|
||||
const hr = process.hrtime();
|
||||
return (hr[0] * 1000 + hr[1] / 1e6) - timeOrigin;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
ownsProcessState,
|
||||
isMainThread,
|
||||
|
|
|
|||
|
|
@ -765,6 +765,39 @@ void Worker::TakeHeapSnapshot(const FunctionCallbackInfo<Value>& args) {
|
|||
args.GetReturnValue().Set(scheduled ? taker->object() : Local<Object>());
|
||||
}
|
||||
|
||||
void Worker::LoopIdleTime(const FunctionCallbackInfo<Value>& args) {
|
||||
Worker* w;
|
||||
ASSIGN_OR_RETURN_UNWRAP(&w, args.This());
|
||||
|
||||
Mutex::ScopedLock lock(w->mutex_);
|
||||
// Using w->is_stopped() here leads to a deadlock, and checking is_stopped()
|
||||
// before locking the mutex is a race condition. So manually do the same
|
||||
// check.
|
||||
if (w->stopped_ || w->env_ == nullptr)
|
||||
return args.GetReturnValue().Set(-1);
|
||||
|
||||
uint64_t idle_time = uv_metrics_idle_time(w->env_->event_loop());
|
||||
args.GetReturnValue().Set(1.0 * idle_time / 1e6);
|
||||
}
|
||||
|
||||
void Worker::LoopStartTime(const FunctionCallbackInfo<Value>& args) {
|
||||
Worker* w;
|
||||
ASSIGN_OR_RETURN_UNWRAP(&w, args.This());
|
||||
|
||||
Mutex::ScopedLock lock(w->mutex_);
|
||||
// Using w->is_stopped() here leads to a deadlock, and checking is_stopped()
|
||||
// before locking the mutex is a race condition. So manually do the same
|
||||
// check.
|
||||
if (w->stopped_ || w->env_ == nullptr)
|
||||
return args.GetReturnValue().Set(-1);
|
||||
|
||||
double loop_start_time = w->env_->performance_state()->milestones[
|
||||
node::performance::NODE_PERFORMANCE_MILESTONE_LOOP_START];
|
||||
CHECK_GE(loop_start_time, 0);
|
||||
args.GetReturnValue().Set(
|
||||
(loop_start_time - node::performance::timeOrigin) / 1e6);
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
// Return the MessagePort that is global for this Environment and communicates
|
||||
|
|
@ -798,6 +831,8 @@ void InitWorker(Local<Object> target,
|
|||
env->SetProtoMethod(w, "unref", Worker::Unref);
|
||||
env->SetProtoMethod(w, "getResourceLimits", Worker::GetResourceLimits);
|
||||
env->SetProtoMethod(w, "takeHeapSnapshot", Worker::TakeHeapSnapshot);
|
||||
env->SetProtoMethod(w, "loopIdleTime", Worker::LoopIdleTime);
|
||||
env->SetProtoMethod(w, "loopStartTime", Worker::LoopStartTime);
|
||||
|
||||
Local<String> workerString =
|
||||
FIXED_ONE_BYTE_STRING(env->isolate(), "Worker");
|
||||
|
|
|
|||
|
|
@ -66,6 +66,8 @@ class Worker : public AsyncWrap {
|
|||
const v8::FunctionCallbackInfo<v8::Value>& args);
|
||||
v8::Local<v8::Float64Array> GetResourceLimits(v8::Isolate* isolate) const;
|
||||
static void TakeHeapSnapshot(const v8::FunctionCallbackInfo<v8::Value>& args);
|
||||
static void LoopIdleTime(const v8::FunctionCallbackInfo<v8::Value>& args);
|
||||
static void LoopStartTime(const v8::FunctionCallbackInfo<v8::Value>& args);
|
||||
|
||||
private:
|
||||
void CreateEnvMessagePort(Environment* env);
|
||||
|
|
|
|||
|
|
@ -1,8 +1,9 @@
|
|||
'use strict';
|
||||
|
||||
require('../common');
|
||||
const { mustCall } = require('../common');
|
||||
|
||||
const TIMEOUT = 50;
|
||||
const TIMEOUT = 10;
|
||||
const SPIN_DUR = 50;
|
||||
|
||||
const assert = require('assert');
|
||||
const { performance } = require('perf_hooks');
|
||||
|
|
@ -21,13 +22,15 @@ if (nodeTiming.loopStart === -1) {
|
|||
{ idle: 0, active: 0, utilization: 0 });
|
||||
}
|
||||
|
||||
// Place in setTimeout() to make sure there is some idle time, but not going to
|
||||
// assert this since it could make the test flaky.
|
||||
setTimeout(() => {
|
||||
setTimeout(mustCall(function r() {
|
||||
const t = Date.now();
|
||||
const elu1 = eventLoopUtilization();
|
||||
|
||||
while (Date.now() - t < 50) { }
|
||||
// Force idle time to accumulate before allowing test to continue.
|
||||
if (elu1.idle <= 0)
|
||||
return setTimeout(mustCall(r), 5);
|
||||
|
||||
while (Date.now() - t < SPIN_DUR) { }
|
||||
|
||||
const elu2 = eventLoopUtilization();
|
||||
const elu3 = eventLoopUtilization(elu1);
|
||||
|
|
@ -38,12 +41,13 @@ setTimeout(() => {
|
|||
assert.strictEqual(elu3.utilization, 1);
|
||||
assert.strictEqual(elu4.utilization, 1);
|
||||
assert.strictEqual(elu2.active - elu1.active, elu4.active);
|
||||
assert.ok(elu2.active > elu3.active);
|
||||
assert.ok(elu2.active > elu4.active);
|
||||
assert.ok(elu3.active > elu4.active);
|
||||
assert.ok(elu3.active > SPIN_DUR - 10, `${elu3.active} <= ${SPIN_DUR - 10}`);
|
||||
assert.ok(elu3.active > elu4.active, `${elu3.active} <= ${elu4.active}`);
|
||||
assert.ok(elu2.active > elu3.active, `${elu2.active} <= ${elu3.active}`);
|
||||
assert.ok(elu2.active > elu4.active, `${elu2.active} <= ${elu4.active}`);
|
||||
|
||||
setTimeout(runIdleTimeTest, TIMEOUT);
|
||||
}, 5);
|
||||
setTimeout(mustCall(runIdleTimeTest), TIMEOUT);
|
||||
}), 5);
|
||||
|
||||
function runIdleTimeTest() {
|
||||
const idleTime = nodeTiming.idleTime;
|
||||
|
|
@ -55,7 +59,7 @@ function runIdleTimeTest() {
|
|||
assert.strictEqual(elu1.idle, idleTime);
|
||||
assert.strictEqual(elu1.utilization, elu1.active / sum);
|
||||
|
||||
setTimeout(runCalcTest, TIMEOUT, elu1);
|
||||
setTimeout(mustCall(runCalcTest), TIMEOUT, elu1);
|
||||
}
|
||||
|
||||
function runCalcTest(elu1) {
|
||||
|
|
@ -65,18 +69,20 @@ function runCalcTest(elu1) {
|
|||
const active_delta = elu2.active - elu1.active;
|
||||
const idle_delta = elu2.idle - elu1.idle;
|
||||
|
||||
assert.ok(elu2.idle >= 0);
|
||||
assert.ok(elu2.active >= 0);
|
||||
assert.ok(elu3.idle >= 0);
|
||||
assert.ok(elu3.active >= 0);
|
||||
assert.ok(elu2.idle + elu2.active > elu1.idle + elu2.active);
|
||||
assert.ok(elu2.idle + elu2.active >= now - nodeTiming.loopStart);
|
||||
assert.ok(elu2.idle >= 0, `${elu2.idle} < 0`);
|
||||
assert.ok(elu2.active >= 0, `${elu2.active} < 0`);
|
||||
assert.ok(elu3.idle >= 0, `${elu3.idle} < 0`);
|
||||
assert.ok(elu3.active >= 0, `${elu3.active} < 0`);
|
||||
assert.ok(elu2.idle + elu2.active > elu1.idle + elu1.active,
|
||||
`${elu2.idle + elu2.active} <= ${elu1.idle + elu1.active}`);
|
||||
assert.ok(elu2.idle + elu2.active >= now - nodeTiming.loopStart,
|
||||
`${elu2.idle + elu2.active} < ${now - nodeTiming.loopStart}`);
|
||||
assert.strictEqual(elu3.active, elu2.active - elu1.active);
|
||||
assert.strictEqual(elu3.idle, elu2.idle - elu1.idle);
|
||||
assert.strictEqual(elu3.utilization,
|
||||
active_delta / (idle_delta + active_delta));
|
||||
|
||||
setImmediate(runWorkerTest);
|
||||
setImmediate(mustCall(runWorkerTest));
|
||||
}
|
||||
|
||||
function runWorkerTest() {
|
||||
|
|
@ -90,10 +96,11 @@ function runWorkerTest() {
|
|||
const elu1 = eventLoopUtilization();
|
||||
const worker = new Worker(__filename, { argv: [ 'iamalive' ] });
|
||||
|
||||
worker.on('message', (msg) => {
|
||||
worker.on('message', mustCall((msg) => {
|
||||
const elu2 = eventLoopUtilization(elu1);
|
||||
const data = JSON.parse(msg);
|
||||
|
||||
assert.ok(elu2.active + elu2.idle > data.active + data.idle);
|
||||
});
|
||||
assert.ok(elu2.active + elu2.idle > data.active + data.idle,
|
||||
`${elu2.active + elu2.idle} <= ${data.active + data.idle}`);
|
||||
}));
|
||||
}
|
||||
|
|
|
|||
116
test/parallel/test-worker-eventlooputil.js
Normal file
116
test/parallel/test-worker-eventlooputil.js
Normal file
|
|
@ -0,0 +1,116 @@
|
|||
'use strict';
|
||||
|
||||
const { mustCall, mustCallAtLeast } = require('../common');
|
||||
|
||||
const assert = require('assert');
|
||||
const {
|
||||
Worker,
|
||||
MessageChannel,
|
||||
MessagePort,
|
||||
parentPort,
|
||||
} = require('worker_threads');
|
||||
const { eventLoopUtilization, now } = require('perf_hooks').performance;
|
||||
|
||||
// Use argv to detect whether we're running as a Worker called by this test vs.
|
||||
// this test also being called as a Worker.
|
||||
if (process.argv[2] === 'iamalive') {
|
||||
const iaElu = idleActive(eventLoopUtilization());
|
||||
// Checks that the worker bootstrap is running after the event loop started.
|
||||
assert.ok(iaElu > 0, `${iaElu} <= 0`);
|
||||
parentPort.once('message', mustCall((msg) => {
|
||||
assert.ok(msg.metricsCh instanceof MessagePort);
|
||||
msg.metricsCh.on('message', mustCallAtLeast(workerOnMetricsMsg, 1));
|
||||
}));
|
||||
return;
|
||||
}
|
||||
|
||||
function workerOnMetricsMsg(msg) {
|
||||
if (msg.cmd === 'close') {
|
||||
return this.close();
|
||||
}
|
||||
|
||||
if (msg.cmd === 'elu') {
|
||||
return this.postMessage(eventLoopUtilization());
|
||||
}
|
||||
|
||||
if (msg.cmd === 'spin') {
|
||||
const t = now();
|
||||
while (now() - t < msg.dur);
|
||||
return this.postMessage(eventLoopUtilization());
|
||||
}
|
||||
}
|
||||
|
||||
let worker;
|
||||
let metricsCh;
|
||||
let mainElu;
|
||||
let workerELU;
|
||||
|
||||
(function r() {
|
||||
// Force some idle time to accumulate before proceeding with test.
|
||||
if (eventLoopUtilization().idle <= 0)
|
||||
return setTimeout(mustCall(r), 5);
|
||||
|
||||
worker = new Worker(__filename, { argv: [ 'iamalive' ] });
|
||||
metricsCh = new MessageChannel();
|
||||
worker.postMessage({ metricsCh: metricsCh.port1 }, [ metricsCh.port1 ]);
|
||||
|
||||
workerELU = worker.performance.eventLoopUtilization;
|
||||
mainElu = eventLoopUtilization();
|
||||
metricsCh.port2.once('message', mustCall(checkWorkerIdle));
|
||||
metricsCh.port2.postMessage({ cmd: 'elu' });
|
||||
// Make sure it's still safe to call eventLoopUtilization() after the worker
|
||||
// hass been closed.
|
||||
worker.on('exit', mustCall(() => {
|
||||
assert.deepStrictEqual(worker.performance.eventLoopUtilization(),
|
||||
{ idle: 0, active: 0, utilization: 0 });
|
||||
}));
|
||||
})();
|
||||
|
||||
|
||||
function checkWorkerIdle(wElu) {
|
||||
const tmpMainElu = eventLoopUtilization(mainElu);
|
||||
const perfWorkerElu = workerELU();
|
||||
const eluDiff = eventLoopUtilization(perfWorkerElu, mainElu);
|
||||
|
||||
assert.strictEqual(idleActive(eluDiff),
|
||||
(perfWorkerElu.active - mainElu.active) +
|
||||
(perfWorkerElu.idle - mainElu.idle));
|
||||
assert.ok(idleActive(wElu) > 0, `${idleActive(wElu)} <= 0`);
|
||||
assert.ok(idleActive(workerELU(wElu)) > 0,
|
||||
`${idleActive(workerELU(wElu))} <= 0`);
|
||||
assert.ok(idleActive(perfWorkerElu) > idleActive(wElu),
|
||||
`${idleActive(perfWorkerElu)} <= ${idleActive(wElu)}`);
|
||||
assert.ok(idleActive(tmpMainElu) > idleActive(perfWorkerElu),
|
||||
`${idleActive(tmpMainElu)} <= ${idleActive(perfWorkerElu)}`);
|
||||
|
||||
wElu = workerELU();
|
||||
setTimeout(mustCall(() => {
|
||||
wElu = workerELU(wElu);
|
||||
// Some clocks fire early. Removing a few milliseconds to cover that.
|
||||
assert.ok(idleActive(wElu) >= 45, `${idleActive(wElu)} < 45`);
|
||||
// Cutting the idle time in half since it's possible that the call took a
|
||||
// lot of resources to process?
|
||||
assert.ok(wElu.idle >= 25, `${wElu.idle} < 25`);
|
||||
|
||||
checkWorkerActive();
|
||||
}), 50);
|
||||
}
|
||||
|
||||
function checkWorkerActive() {
|
||||
const w = workerELU();
|
||||
|
||||
metricsCh.port2.postMessage({ cmd: 'spin', dur: 50 });
|
||||
metricsCh.port2.once('message', (wElu) => {
|
||||
const w2 = workerELU(w);
|
||||
|
||||
assert.ok(w2.active >= 50, `${w2.active} < 50`);
|
||||
assert.ok(idleActive(wElu) > idleActive(w2),
|
||||
`${idleActive(wElu)} <= ${idleActive(w2)}`);
|
||||
|
||||
metricsCh.port2.postMessage({ cmd: 'close' });
|
||||
});
|
||||
}
|
||||
|
||||
function idleActive(elu) {
|
||||
return elu.idle + elu.active;
|
||||
}
|
||||
Loading…
Reference in New Issue
Block a user