Skip to content

Commit 5ef1b55

Browse files
feat: add data update bulk/resume commands (#1098)
* chore: refactor bulk ingest utils * feat: add `data update bulk/resume` * fix: update `data import bulk` help * test: add bulk update NUT * test: break up NUTs (#1099) * chore: unify bulk ingest logic * test: add bulk update NUTs to test matrix * fix: insert operation * fix: command-specific resume instructions * fix: command-specific stage title * fix: pass operation opt * test: fix update resume NUT on win * test: refactor/doc * chore: moar refactor/doc * chore: clean up msgs * feat: add column-delimiter flag to import/update bulk * chore: update command snapshot * chore: eslint rule inline * test: validate async command's cache files * chore: update msg [skip ci] * fix: edit help for new "data update bulk|resume" commands (#1106) * fix: remove `as string` * chore: use proper stop status * chore: share column-delimiter flag def * test: remove type assertions * feat: detect column delimiter * test: nut should detect column delimiter --------- Co-authored-by: Juliet Shackell <63259011+jshackell-sfdc@users.noreply.github.com>
1 parent 0a5f8d8 commit 5ef1b55

40 files changed

+1200
-386
lines changed

.github/workflows/test.yml

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,17 @@ jobs:
2121
strategy:
2222
matrix:
2323
os: [ubuntu-latest, windows-latest]
24+
command:
25+
- 'yarn test:nuts:bulk:export'
26+
- 'yarn test:nuts:bulk:import'
27+
- 'yarn test:nuts:bulk:update'
28+
- 'yarn test:nuts:data:bulk-upsert-delete'
29+
- 'yarn test:nuts:data:create'
30+
- 'yarn test:nuts:data:query'
31+
- 'yarn test:nuts:data:record'
32+
- 'yarn test:nuts:data:search'
33+
- 'yarn test:nuts:data:tree'
2434
fail-fast: false
2535
with:
2636
os: ${{ matrix.os }}
37+
command: ${{ matrix.command }}

command-snapshot.json

Lines changed: 39 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -142,7 +142,18 @@
142142
"command": "data:import:bulk",
143143
"flagAliases": [],
144144
"flagChars": ["a", "f", "o", "s", "w"],
145-
"flags": ["api-version", "async", "file", "flags-dir", "json", "line-ending", "sobject", "target-org", "wait"],
145+
"flags": [
146+
"api-version",
147+
"async",
148+
"column-delimiter",
149+
"file",
150+
"flags-dir",
151+
"json",
152+
"line-ending",
153+
"sobject",
154+
"target-org",
155+
"wait"
156+
],
146157
"plugin": "@salesforce/plugin-data"
147158
},
148159
{
@@ -235,6 +246,25 @@
235246
"flags": ["api-version", "file", "flags-dir", "json", "query", "result-format", "target-org"],
236247
"plugin": "@salesforce/plugin-data"
237248
},
249+
{
250+
"alias": [],
251+
"command": "data:update:bulk",
252+
"flagAliases": [],
253+
"flagChars": ["a", "f", "o", "s", "w"],
254+
"flags": [
255+
"api-version",
256+
"async",
257+
"column-delimiter",
258+
"file",
259+
"flags-dir",
260+
"json",
261+
"line-ending",
262+
"sobject",
263+
"target-org",
264+
"wait"
265+
],
266+
"plugin": "@salesforce/plugin-data"
267+
},
238268
{
239269
"alias": ["force:data:record:update"],
240270
"command": "data:update:record",
@@ -255,6 +285,14 @@
255285
],
256286
"plugin": "@salesforce/plugin-data"
257287
},
288+
{
289+
"alias": [],
290+
"command": "data:update:resume",
291+
"flagAliases": [],
292+
"flagChars": ["i", "w"],
293+
"flags": ["flags-dir", "job-id", "json", "use-most-recent", "wait"],
294+
"plugin": "@salesforce/plugin-data"
295+
},
258296
{
259297
"alias": [],
260298
"command": "data:upsert:bulk",

messages/bulkIngest.md

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
# export.resume
2+
3+
Run "sf %s --job-id %s" to resume the operation.
4+
5+
# error.timeout
6+
7+
The operation timed out after %s minutes.
8+
9+
Run "sf %s --job-id %s" to resume it.
10+
11+
# error.failedRecordDetails
12+
13+
Job finished being processed but failed to process %s records.
14+
15+
To review the details of this job, run this command:
16+
17+
sf org open --target-org %s --path "/lightning/setup/AsyncApiJobStatus/page?address=%2F%s"
18+
19+
# error.jobFailed
20+
21+
Job failed to be processed due to:
22+
23+
%s
24+
25+
To review the details of this job, run this command:
26+
27+
sf org open --target-org %s --path "/lightning/setup/AsyncApiJobStatus/page?address=%2F%s"
28+
29+
# error.jobAborted
30+
31+
Job has been aborted.
32+
33+
To review the details of this job, run this command:
34+
35+
sf org open --target-org %s --path "/lightning/setup/AsyncApiJobStatus/page?address=%2F%s"
36+
37+
# flags.column-delimiter.summary
38+
39+
Column delimiter used in the CSV file. Default is COMMA.

messages/data.export.resume.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# summary
22

3-
Resume a bulk export job that you previously started.
3+
Resume a bulk export job that you previously started. Uses Bulk API 2.0.
44

55
# description
66

messages/data.import.bulk.md

Lines changed: 3 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -40,40 +40,8 @@ Time to wait for the command to finish, in minutes.
4040

4141
# flags.line-ending.summary
4242

43-
Line ending used in the CSV file. Default value on Windows is `CRLF`; on macOS and Linux it's `LR`.
43+
Line ending used in the CSV file. Default value on Windows is `CRLF`; on macOS and Linux it's `LF`.
4444

45-
# export.resume
45+
# flags.column-delimiter.summary
4646

47-
Run "sf data import resume --job-id %s" to resume the operation.
48-
49-
# error.timeout
50-
51-
The operation timed out after %s minutes.
52-
53-
Run "sf data import resume --job-id %s" to resume it.
54-
55-
# error.failedRecordDetails
56-
57-
Job finished being processed but failed to import %s records.
58-
59-
To review the details of this job, run this command:
60-
61-
sf org open --target-org %s --path "/lightning/setup/AsyncApiJobStatus/page?address=%2F%s"
62-
63-
# error.jobFailed
64-
65-
Job failed to be processed due to:
66-
67-
%s
68-
69-
To review the details of this job, run this command:
70-
71-
sf org open --target-org %s --path "/lightning/setup/AsyncApiJobStatus/page?address=%2F%s"
72-
73-
# error.jobAborted
74-
75-
Job has been aborted.
76-
77-
To review the details of this job, run this command:
78-
79-
sf org open --target-org %s --path "/lightning/setup/AsyncApiJobStatus/page?address=%2F%s"
47+
Column delimiter used in the CSV file. Default is COMMA.

messages/data.import.resume.md

Lines changed: 0 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -27,35 +27,3 @@ Job ID of the bulk import.
2727
# flags.wait.summary
2828

2929
Time to wait for the command to finish, in minutes.
30-
31-
# error.failedRecordDetails
32-
33-
Job finished being processed but failed to import %s records.
34-
35-
To review the details of this job, run this command:
36-
37-
sf org open --target-org %s --path "/lightning/setup/AsyncApiJobStatus/page?address=%2F%s"
38-
39-
# error.timeout
40-
41-
The operation timed out after %s minutes.
42-
43-
Try re-running "sf data import resume --job-id %s" with a bigger wait time.
44-
45-
# error.jobFailed
46-
47-
Job failed to be processed due to:
48-
49-
%s
50-
51-
To review the details of this job, run this command:
52-
53-
sf org open --target-org %s --path "/lightning/setup/AsyncApiJobStatus/page?address=%2F%s"
54-
55-
# error.jobAborted
56-
57-
Job has been aborted.
58-
59-
To review the details of this job, run this command:
60-
61-
sf org open --target-org %s --path "/lightning/setup/AsyncApiJobStatus/page?address=%2F%s"

messages/data.update.bulk.md

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
# summary
2+
3+
Bulk update records to an org from a CSV file. Uses Bulk API 2.0.
4+
5+
# description
6+
7+
You can use this command to update millions of Salesforce object records based on a file in comma-separated values (CSV) format.
8+
9+
All the records in the CSV file must be for the same Salesforce object. Specify the object with the `--sobject` flag. The first column of every line in the CSV file must be an ID of the record you want to update. The CSV file can contain only existing records; if a record in the file doesn't currently exist in the Salesforce object, the command fails. Consider using "sf data upsert bulk" if you also want to insert new records.
10+
11+
Bulk updates can take a while, depending on how many records are in the CSV file. If the command times out, or you specified the --async flag, the command displays the job ID. To see the status and get the results of the job, run "sf data update resume" and pass the job ID to the --job-id flag.
12+
13+
For information and examples about how to prepare your CSV files, see "Prepare Data to Ingest" in the "Bulk API 2.0 and Bulk API Developer Guide" (https://developer.salesforce.com/docs/atlas.en-us.api_asynch.meta/api_asynch/datafiles_prepare_data.htm).
14+
15+
# examples
16+
17+
- Update Account records from a CSV-formatted file into an org with alias "my-scratch"; if the update doesn't complete in 10 minutes, the command ends and displays a job ID:
18+
19+
<%= config.bin %> <%= command.id %> --file accounts.csv --sobject Account --wait 10 --target-org my-scratch
20+
21+
- Update asynchronously and use the default org; the command immediately returns a job ID that you then pass to the "sf data update resume" command:
22+
23+
<%= config.bin %> <%= command.id %> --file accounts.csv --sobject Account --async
24+
25+
# flags.async.summary
26+
27+
Don't wait for the command to complete.
28+
29+
# flags.wait.summary
30+
31+
Time to wait for the command to finish, in minutes.
32+
33+
# flags.file.summary
34+
35+
CSV file that contains the Salesforce object records you want to update.
36+
37+
# flags.sobject.summary
38+
39+
API name of the Salesforce object, either standard or custom, which you are updating.
40+
41+
# flags.line-ending.summary
42+
43+
Line ending used in the CSV file. Default value on Windows is `CRLF`; on macOS and Linux it's `LF`.
44+
45+
# flags.column-delimiter.summary
46+
47+
Column delimiter used in the CSV file. Default is COMMA.

messages/data.update.resume.md

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
# summary
2+
3+
Resume a bulk update job that you previously started. Uses Bulk API 2.0.
4+
5+
# description
6+
7+
When the original "sf data update bulk" command either times out or is run with the --async flag, it displays a job ID. To see the status and get the results of the bulk update, run this command by either passing it the job ID or using the --use-most-recent flag to specify the most recent bulk update job.
8+
9+
# examples
10+
11+
- Resume a bulk update job of your default org using a job ID:
12+
13+
<%= config.bin %> <%= command.id %> --job-id 750xx000000005sAAA
14+
15+
- Resume the most recently run bulk update job for an org with alias "my-scratch":
16+
17+
<%= config.bin %> <%= command.id %> --use-most-recent --target-org my-scratch
18+
19+
# flags.use-most-recent.summary
20+
21+
Use the job ID of the bulk update job that was most recently run.
22+
23+
# flags.job-id.summary
24+
25+
Job ID of the bulk update.
26+
27+
# flags.wait.summary
28+
29+
Time to wait for the command to finish, in minutes.

package.json

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,8 @@
6767
"description": "Query records."
6868
},
6969
"update": {
70-
"description": "Update a single record."
70+
"description": "Update many records.",
71+
"external": true
7172
},
7273
"upsert": {
7374
"description": "Upsert many records."
@@ -103,8 +104,15 @@
103104
"prepack": "sf-prepack",
104105
"prepare": "sf-install",
105106
"test": "wireit",
106-
"test:nuts": "nyc mocha \"./test/**/*.nut.ts\" --slow 4500 --timeout 600000 --parallel --jobs 20",
107-
"test:nuts:bulk": "nyc mocha \"./test/**/dataBulk.nut.ts\" --slow 4500 --timeout 600000 --parallel --jobs 20",
107+
"test:nuts:bulk:import": "nyc mocha \"./test/commands/data/import/*.nut.ts\" --slow 4500 --timeout 600000 --parallel --jobs 20",
108+
"test:nuts:bulk:export": "nyc mocha \"./test/commands/data/export/*.nut.ts\" --slow 4500 --timeout 600000 --parallel --jobs 20",
109+
"test:nuts:bulk:update": "nyc mocha \"./test/commands/data/update/*.nut.ts\" --slow 4500 --timeout 600000 --parallel --jobs 20",
110+
"test:nuts:data:tree": "nyc mocha \"./test/commands/data/tree/*.nut.ts\" --slow 4500 --timeout 600000 --parallel --jobs 20",
111+
"test:nuts:data:query": "nyc mocha \"./test/commands/data/query/*.nut.ts\" --slow 4500 --timeout 600000 --parallel --jobs 20",
112+
"test:nuts:data:record": "nyc mocha \"./test/commands/data/record/dataRecord.nut.ts\" --slow 4500 --timeout 600000 --parallel --jobs 20",
113+
"test:nuts:data:search": "nyc mocha \"./test/commands/data/search.nut.ts\" --slow 4500 --timeout 600000 --parallel --jobs 20",
114+
"test:nuts:data:create": "nyc mocha \"./test/commands/data/create/*.nut.ts\" --slow 4500 --timeout 600000 --parallel --jobs 20",
115+
"test:nuts:data:bulk-upsert-delete": "nyc mocha \"./test/commands/data/dataBulk.nut.ts\" --slow 4500 --timeout 600000 --parallel --jobs 20",
108116
"test:only": "wireit",
109117
"version": "oclif readme"
110118
},

src/bulkDataRequestCache.ts

Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -257,6 +257,75 @@ export class BulkImportRequestCache extends TTLConfig<TTLConfig.Options, BulkExp
257257
}
258258
}
259259

260+
export class BulkUpdateRequestCache extends TTLConfig<TTLConfig.Options, BulkExportCacheConfig> {
261+
public static getDefaultOptions(): TTLConfig.Options {
262+
return {
263+
isGlobal: true,
264+
isState: true,
265+
filename: BulkUpdateRequestCache.getFileName(),
266+
stateFolder: Global.SF_STATE_FOLDER,
267+
ttl: Duration.days(7),
268+
};
269+
}
270+
271+
public static getFileName(): string {
272+
return 'bulk-data-update-cache.json';
273+
}
274+
275+
public static async unset(key: string): Promise<void> {
276+
const cache = await BulkImportRequestCache.create();
277+
cache.unset(key);
278+
await cache.write();
279+
}
280+
281+
/**
282+
* Creates a new bulk data import cache entry for the given bulk request id.
283+
*
284+
* @param bulkRequestId
285+
* @param username
286+
* @param apiVersion
287+
*/
288+
public async createCacheEntryForRequest(bulkRequestId: string, username: string, apiVersion: string): Promise<void> {
289+
this.set(bulkRequestId, {
290+
jobId: bulkRequestId,
291+
username,
292+
apiVersion,
293+
});
294+
await this.write();
295+
Logger.childFromRoot('BulkUpdateCache').debug(`bulk cache saved for ${bulkRequestId}`);
296+
}
297+
298+
public async resolveResumeOptionsFromCache(jobIdOrMostRecent: string | boolean): Promise<ResumeBulkImportOptions> {
299+
if (typeof jobIdOrMostRecent === 'boolean') {
300+
const key = this.getLatestKey();
301+
if (!key) {
302+
throw messages.createError('error.missingCacheEntryError');
303+
}
304+
// key definitely exists because it came from the cache
305+
const entry = this.get(key);
306+
307+
return {
308+
jobInfo: { id: entry.jobId },
309+
options: {
310+
connection: (await Org.create({ aliasOrUsername: entry.username })).getConnection(),
311+
},
312+
};
313+
} else {
314+
const entry = this.get(jobIdOrMostRecent);
315+
if (!entry) {
316+
throw messages.createError('error.bulkRequestIdNotFound', [jobIdOrMostRecent]);
317+
}
318+
319+
return {
320+
jobInfo: { id: entry.jobId },
321+
options: {
322+
connection: (await Org.create({ aliasOrUsername: entry.username })).getConnection(),
323+
},
324+
};
325+
}
326+
}
327+
}
328+
260329
export class BulkExportRequestCache extends TTLConfig<TTLConfig.Options, BulkExportCacheConfig> {
261330
public static getDefaultOptions(): TTLConfig.Options {
262331
return {

0 commit comments

Comments
 (0)