Skip to content

Commit

Permalink
test: Increased timeout for integration tests to avoid random failure…
Browse files Browse the repository at this point in the history
…s. (#1827)
  • Loading branch information
bizob2828 authored Oct 23, 2023
1 parent 1ed0c5c commit 5202048
Show file tree
Hide file tree
Showing 4 changed files with 120 additions and 101 deletions.
2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@
"bench": "node ./bin/run-bench.js",
"docker-env": "./bin/docker-env-vars.sh",
"docs": "npm ci && jsdoc -c ./jsdoc-conf.json --private -r .",
"integration": "npm run prepare-test && npm run sub-install && time c8 -o ./coverage/integration tap --test-regex='(\\/|^test\\/integration\\/.*\\.tap\\.js)$' --timeout=300 --no-coverage --reporter classic",
"integration": "npm run prepare-test && npm run sub-install && time c8 -o ./coverage/integration tap --test-regex='(\\/|^test\\/integration\\/.*\\.tap\\.js)$' --timeout=600 --no-coverage --reporter classic",
"prepare-test": "npm run ssl && npm run docker-env",
"lint": "eslint ./*.{js,mjs} lib test bin examples",
"lint:fix": "eslint --fix, ./*.{js,mjs} lib test bin examples",
Expand Down
116 changes: 116 additions & 0 deletions test/integration/newrelic-harvest-limits.tap.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
/*
* Copyright 2020 New Relic Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*/

'use strict'

const tap = require('tap')
const nock = require('nock')
const sinon = require('sinon')
const helper = require('../lib/agent_helper')
const TEST_DOMAIN = 'test-collector.newrelic.com'
const TEST_COLLECTOR_URL = `https://${TEST_DOMAIN}`
const RUN_ID = 'runId'

function nockRequest(endpointMethod, runId) {
const relativepath = helper.generateCollectorPath(endpointMethod, runId)
return nock(TEST_COLLECTOR_URL).post(relativepath)
}

/**
* This test asserts that when the agent re-connects it pulls the harvest limits from the original
* config max_samples_stored for every piece of data. This is done so on restart loops we aren't using
* the new harvest limit value from server that has been down sampled. It could result in harvest limits
* being 0 if enough restarts occur.
*/
tap.test('Connect calls re-generate harvest limits from original config values', (t) => {
let agent
let serverHarvest

t.before(() => {
serverHarvest = {
event_harvest_config: {
report_period_ms: 100,
harvest_limits: {
analytic_event_data: 10,
custom_event_data: 10,
error_event_data: 1,
span_event_data: 10,
log_event_data: 10
}
}
}
nock.disableNetConnect()
nockRequest('preconnect').reply(200, { return_value: TEST_DOMAIN })
nockRequest('connect').reply(200, { return_value: { agent_run_id: RUN_ID, ...serverHarvest } })
nockRequest('agent_settings', RUN_ID).reply(200, { return_value: [] })
nockRequest('metric_data', RUN_ID).reply(409, { return_value: [] })
nockRequest('preconnect').reply(200, { return_value: TEST_DOMAIN })
nockRequest('connect').reply(200, { return_value: { agent_run_id: RUN_ID, ...serverHarvest } })
nockRequest('agent_settings', RUN_ID).reply(200, { return_value: [] })
agent = helper.loadMockedAgent({
license_key: 'license key here',
host: TEST_DOMAIN,
application_logging: {
enabled: true
}
})
})

t.teardown(() => {
helper.unloadAgent(agent)
if (!nock.isDone()) {
// eslint-disable-next-line no-console
console.error('Cleaning pending mocks: %j', nock.pendingMocks())
nock.cleanAll()
}

nock.enableNetConnect()
})

const originalConfig = Object.assign({}, agent.config)
agent.config.no_immediate_harvest = true
sinon.spy(agent.collector, '_connect')

/**
* This flow starts agent which pre-connects, connects and gets agent settings.
* Then we call send metrics and since the metrics collector endpoint is responding
* with 409 it will issue a restart and make another pre-connect, connect and agent
* settings call.
*/
agent.start((err) => {
t.error(err)
const config = agent.config
t.same(
config.event_harvest_config,
serverHarvest.event_harvest_config,
'config should have been updated from server'
)
agent.metrics.once('finished metric_data data send.', function onMetricsFinished() {
const connectCalls = agent.collector._connect.args
t.same(
config.event_harvest_config,
serverHarvest.event_harvest_config,
'config should have been updated from server after reconnect'
)
t.equal(connectCalls.length, 2, 'should have reconnected once')
connectCalls.forEach((call) => {
const factsConfig = call[0][0]
t.not(
factsConfig.event_harvest_config.harvest_limits,
config.event_harvest_config.harvest_limits,
'facts harvest config should not be same as new harvest config'
)
t.same(
factsConfig.event_harvest_config.harvest_limits,
originalConfig.event_harvest_config.harvest_limits,
'connect should send up original harvest limits'
)
})
t.end()
})

agent.metrics.send()
})
})
101 changes: 2 additions & 99 deletions test/integration/newrelic-response-handling.tap.js
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ const endpointDataChecks = {
return agent.spanEventAggregator.length > 0
},
custom_event_data: function hasCustomEventData(agent) {
// TODO... prob don't ned to grrab events
// TODO... prob don't need to grab events
return agent.customEventAggregator.length > 0
},
sql_trace_data: function hasSqlTraceData(agent) {
Expand All @@ -52,103 +52,6 @@ tap.test('New Relic response code handling', (t) => {
})
})

/**
* This test asserts that when the agent re-connects it pulls the harvest limits from the original
* config max_samples_stored for every piece of data. This is done so on restart loops we aren't using
* the new harvest limit value from server that has been down sampled. It could result in harvest limits
* being 0 if enough restarts occur.
*/
tap.test('Connect calls re-generate harvest limits from original config values', (t) => {
let agent
let serverHarvest

t.before(() => {
serverHarvest = {
event_harvest_config: {
report_period_ms: 100,
harvest_limits: {
analytic_event_data: 10,
custom_event_data: 10,
error_event_data: 1,
span_event_data: 10,
log_event_data: 10
}
}
}
nock.disableNetConnect()
nockRequest('preconnect').reply(200, { return_value: TEST_DOMAIN })
nockRequest('connect').reply(200, { return_value: { agent_run_id: RUN_ID, ...serverHarvest } })
nockRequest('agent_settings', RUN_ID).reply(200, { return_value: [] })
nockRequest('metric_data', RUN_ID).reply(409, { return_value: [] })
nockRequest('preconnect').reply(200, { return_value: TEST_DOMAIN })
nockRequest('connect').reply(200, { return_value: { agent_run_id: RUN_ID, ...serverHarvest } })
nockRequest('agent_settings', RUN_ID).reply(200, { return_value: [] })
agent = helper.loadMockedAgent({
license_key: 'license key here',
host: TEST_DOMAIN,
application_logging: {
enabled: true
}
})
})

t.teardown(() => {
helper.unloadAgent(agent)
if (!nock.isDone()) {
// eslint-disable-next-line no-console
console.error('Cleaning pending mocks: %j', nock.pendingMocks())
nock.cleanAll()
}

nock.enableNetConnect()
})

const originalConfig = Object.assign({}, agent.config)
agent.config.no_immediate_harvest = true
sinon.spy(agent.collector, '_connect')

/**
* This flow starts agent which pre-connects, connects and gets agent settings.
* Then we call send metrics and since the metrics collector endpoint is responding
* with 409 it will issue a restart and make another pre-connect, connect and agent
* settings call.
*/
agent.start((err) => {
t.error(err)
const config = agent.config
t.same(
config.event_harvest_config,
serverHarvest.event_harvest_config,
'config should have been updated from server'
)
agent.metrics.once('finished metric_data data send.', function onMetricsFinished() {
const connectCalls = agent.collector._connect.args
t.same(
config.event_harvest_config,
serverHarvest.event_harvest_config,
'config should have been updated from server after reconnect'
)
t.equal(connectCalls.length, 2, 'should have reconnected once')
connectCalls.forEach((call) => {
const factsConfig = call[0][0]
t.not(
factsConfig.event_harvest_config.harvest_limits,
config.event_harvest_config.harvest_limits,
'facts harvest config should not be same as new harvest config'
)
t.same(
factsConfig.event_harvest_config.harvest_limits,
originalConfig.event_harvest_config.harvest_limits,
'connect should send up original harvest limits'
)
})
t.end()
})

agent.metrics.send()
})
})

function createStatusCodeTest(testCase) {
return (statusCodeTest) => {
let startEndpoints = null
Expand Down Expand Up @@ -404,7 +307,7 @@ function whenAllAggregatorsSend(agent) {
* Each type is added every test, even though not all endpoints are mocked.
* This allows for verifying response handling for endpoint under test still
* behaves correctly when other endpoints fail.
* @param {*} agent The agent intance to add data to
* @param {*} agent The agent instance to add data to
* @param {*} callback
*/
function createTestData(agent, callback) {
Expand Down
2 changes: 1 addition & 1 deletion third_party_manifest.json
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
{
"lastUpdated": "Fri Oct 06 2023 17:25:04 GMT-0400 (Eastern Daylight Time)",
"lastUpdated": "Mon Oct 23 2023 12:15:25 GMT-0400 (Eastern Daylight Time)",
"projectName": "New Relic Node Agent",
"projectUrl": "https://github.com/newrelic/node-newrelic",
"includeOptDeps": true,
Expand Down

0 comments on commit 5202048

Please sign in to comment.