The internal implementation for DBrowserX's DWebXArchive
APIs.
Works with DWebX 2.0.
All async methods work with callbacks and promises. If no callback is provided, a promise will be returned.
Any time a ddrive archive
is expected, a scoped-fs instance can be provided, unless otherwise stated.
var ddrive = require('ddrive')
var ScopedFS = require('scoped-fs')
var archive = ddrive('./my-ddrive')
var scopedfs = new ScopedFS('./my-scoped-fs')
await dba.readFile(archive, '/hello.txt') // read the published hello.txt
await dba.readFile(scopedfs, '/hello.txt') // read the local hello.txt
** NOTE: this library is written natively for node 12 and above. **
- Lookup
- Read
- Write
- writeFile(archive, name, data[, opts, cb])
- mkdir(archive, name[, cb])
- symlink(archive, target, linkname[, cb])
- copy(srcArchive, srcName, dstArchive, dstName[, cb])
- rename(srcArchive, srcName, dstName[, cb])
- updateMetadata(archive, path, metadata[, cb])
- deleteMetadata(archive, path, keys[, cb])
- createWriteStream(archive, name[, cb])
- Delete
- Mounts
- Activity Streams
- Exporters
- Manifest
- Diff
const dba = require('dbrowser-api')
archive
dDrive archive (object).name
Entry name (string).opts.lstat
Get symlink information if target is a symlink (boolean).- Returns a dDrive Stat entry (object).
- Throws NotFoundError
// by name:
var st = await dba.stat(archive, '/index.json')
st.isDirectory()
st.isFile()
console.log(st) /* =>
Stat {
dev: 0,
nlink: 1,
rdev: 0,
blksize: 0,
ino: 0,
mode: 16877,
uid: 0,
gid: 0,
size: 0,
offset: 0,
blocks: 0,
atime: 2017-04-10T18:59:00.147Z,
mtime: 2017-04-10T18:59:00.147Z,
ctime: 2017-04-10T18:59:00.147Z,
linkname: undefined } */
archive
dDrive archive (object).name
Entry path (string).opts
. Options (object|string). If a string, will act asopts.encoding
.opts.encoding
Desired output encoding (string). May be 'binary', 'utf8', 'hex', 'base64', or 'json'. Default 'utf8'.- Returns the content of the file in the requested encoding.
- Throws NotFoundError.
var manifestStr = await dba.readFile(archive, '/index.json')
var manifestObj = await dba.readFile(archive, '/index.json', 'json')
var imageBase64 = await dba.readFile(archive, '/favicon.png', 'base64')
archive
dDrive archive (object).path
Target directory path (string).opts.recursive
Read all subfolders and their files as well if true. Note: does not recurse into mounts.opts.includeStats
Output an object which includes the file name, stats object, and parent mount information.- Returns an array of file and folder names.
var listing = await dba.readdir(archive, '/assets')
console.log(listing) // => ['profile.png', 'styles.css']
var listing = await dba.readdir(archive, '/', { recursive: true })
console.log(listing) /* => [
'index.html',
'assets',
'assets/profile.png',
'assets/styles.css'
]*/
var listing = await dba.readdir(archive, '/', { includeStats: true })
console.log(listing) /* => [
{
name: 'profile.png',
stats: { ... },
mount: { ... }
},
...
]*/
archive
dDrive archive (object).path
Target directory path (string).- Returns a number (size in bytes).
This method will recurse on folders.
var size = await dba.readSize(archive, '/assets')
console.log(size) // => 123
archive
dDrive archive (object).name
Entry path (string).opts
. Options (object|string). If a string, will act asopts.encoding
.opts.start
Starting offset (number). Default 0.opts.end
. Ending offset inclusive (number). Default undefined.opts.length
. How many bytes to read (number). Default undefined.- Returns a readable stream.
- Throws NotFoundError.
dba.createReadStream(archive, '/favicon.png')
dba.createReadStream(archive, '/favicon.png', {
start: 1,
end: 3
})
archive
dDrive archive (object).name
Entry path (string).data
Data to write (string|Buffer).opts
. Options (object|string). If a string, will act asopts.encoding
.opts.encoding
Desired file encoding (string). May be 'binary', 'utf8', 'hex', 'base64', or 'json'. Default 'utf8' ifdata
is a string, 'binary' ifdata
is a Buffer.- Throws ArchiveNotWritableError, InvalidPathError, EntryAlreadyExistsError, InvalidEncodingError.
await dba.writeFile(archive, '/hello.txt', 'world', 'utf8')
await dba.writeFile(archive, '/thing.json', {hello: 'world'}, 'json')
await dba.writeFile(archive, '/profile.png', fs.readFileSync('/tmp/dog.png'))
archive
dDrive archive (object).name
Directory path (string).- Throws ArchiveNotWritableError, InvalidPathError, EntryAlreadyExistsError, InvalidEncodingError.
await dba.mkdir(archive, '/stuff')
archive
dDrive archive (object).target
Path to symlink to (string).linkname
Path to create the symlink (string).- Throws ArchiveNotWritableError, InvalidPathError, EntryAlreadyExistsError, InvalidEncodingError.
await dba.symlink(archive, '/hello.txt', '/goodbye.txt')
srcArchive
Source dDrive archive (object).srcName
Path to file or directory to copy (string).dstArchive
Destination dDrive archive (object).dstName
Where to copy the file or folder to (string).- Throws ArchiveNotWritableError, InvalidPathError, EntryAlreadyExistsError, InvalidEncodingError.
// copy file:
await dba.copy(archive, '/foo.txt', archive, '/foo.txt.back')
// copy folder:
await dba.copy(archive, '/stuff', otherArchive, '/stuff')
srcArchive
Source dDrive archive (object).srcName
Path to file or directory to rename (string).dstArchive
Destination dDrive archive (object).dstName
What the file or folder should be named (string).- Throws ArchiveNotWritableError, InvalidPathError, EntryAlreadyExistsError, InvalidEncodingError.
This is equivalent to moving a file/folder.
// move file:
await dba.rename(archive, '/foo.txt', archive, '/foo.md')
// move folder:
await dba.rename(archive, '/stuff', otherArchive, '/stuff')
archive
dDrive archive (object).path
Entry path (string).metadata
Metadata values to set (object).
Updates the file/folder metadata. Does not overwrite all values; any existing metadata keys which are not specified in the metadata
param are preserved.
await dba.updateMetadata(archive, '/hello.txt', {foo: 'bar'})
The default encoding for metadata attributes is utf8. Attributes which start with bin:
are encoded in binary.
await dba.updateMetadata(archive, '/hello.txt', {'bin:foo': Buffer.from([1,2,3,4]})
(await dba.stat(archive, '/hello.txt')).metadata['bin:foo'] //=> Buffer([1,2,3,4])
archive
dDrive archive (object).path
Entry path (string).keys
Metadata keys to delete (string | string[]).
await dba.deleteMetadata(archive, '/hello.txt', ['foo'])
archive
dDrive archive (object).name
Entry path (string).- Throws ArchiveNotWritableError, InvalidPathError, EntryAlreadyExistsError.
await dba.createWriteStream(archive, '/hello.txt')
archive
dDrive archive (object).name
Entry path (string).- Throws ArchiveNotWritableError, NotFoundError, NotAFileError
await dba.unlink(archive, '/hello.txt')
archive
dDrive archive (object).name
Entry path (string).opts.recursive
Delete all subfolders and files if the directory is not empty.- Throws ArchiveNotWritableError, NotFoundError, NotAFolderError, DestDirectoryNotEmpty
await dba.rmdir(archive, '/stuff', {recursive: true})
archive
dDrive archive (object).name
Entry path (string).opts
. Options (object|string). If a string or buffer, will act asopts.key
.opts.key
Key of archive to mount. May be a hex string or Buffer.- Throws ArchiveNotWritableError, InvalidPathError
await dba.mount(archive, '/foo', archive2.key)
archive
dDrive archive (object).name
Entry path (string).- Throws ArchiveNotWritableError, InvalidPathError, NotFoundError
await dba.unmount(archive, '/foo')
archive
dDrive archive (object).path
Prefix path. If falsy, will watch all files.- Returns a Readable stream.
Watches the given path for file events, which it emits as an emit-stream. Supported events:
['changed',{path}]
- The contents of the file has changed.path
is the path-string of the file.
var es = dba.watch(archive, 'foo.txt')
es.on('data', ([event, args]) => {
if (event === 'changed') {
console.log(args.path, 'has changed')
}
})
// alternatively, via emit-stream:
var emitStream = require('emit-stream')
var events = emitStream(dba.watch(archive))
events.on('changed', args => {
console.log(args.path, 'has changed')
})
archive
dDrive archive (object). Can not be a scoped-fs object.- Returns a Readable stream.
Watches the archive for network events, which it emits as an emit-stream. Supported events:
['network-changed',{connections}]
- The number of connections has changed.connections
is a number.['download',{feed,block,bytes}]
- A block has been downloaded.feed
will either be "metadata" or "content".block
is the index of data downloaded.bytes
is the number of bytes in the block.['upload',{feed,block,bytes}]
- A block has been uploaded.feed
will either be "metadata" or "content".block
is the index of data downloaded.bytes
is the number of bytes in the block.['sync',{feed}]
- All known blocks have been downloaded.feed
will either be "metadata" or "content".
var es = dba.createNetworkActivityStream(archive)
es.on('data', ([event, args]) => {
if (event === 'network-changed') {
console.log('Connected to %d peers', args.connections)
} else if (event === 'download') {
console.log('Just downloaded %d bytes (block %d) of the %s feed', args.bytes, args.block, args.feed)
} else if (event === 'upload') {
console.log('Just uploaded %d bytes (block %d) of the %s feed', args.bytes, args.block, args.feed)
} else if (event === 'sync') {
console.log('Finished downloading', args.feed)
}
})
// alternatively, via emit-stream:
var emitStream = require('emit-stream')
var events = emitStream(es)
events.on('network-changed', args => {
console.log('Connected to %d peers', args.connections)
})
events.on('download', args => {
console.log('Just downloaded %d bytes (block %d) of the %s feed', args.bytes, args.block, args.feed)
})
events.on('upload', args => {
console.log('Just uploaded %d bytes (block %d) of the %s feed', args.bytes, args.block, args.feed)
})
events.on('sync', args => {
console.log('Finished downloading', args.feed)
})
opts.srcPath
Source path in the filesystem (string). Required.opts.dstArchive
Destination archive (object). Required.opts.dstPath
Destination path within the archive. Optional, defaults to '/'.opts.ignore
Files not to copy (array of strings). Optional. Uses anymatch.opts.inplaceImport
Should import source directory in-place? (boolean). If true and importing a directory, this will cause the directory's content to be copied directy into thedstPath
. If false, will cause the source-directory to become a child of thedstPath
.opts.dryRun
Don't actually make changes, just list what changes will occur. Optional, defaults tofalse
.opts.progress
Function called with thestats
object on each file updated.- Returns stats on the export.
Copies a file-tree into an archive.
var stats = await dba.exportFilesystemToArchive({
srcPath: '/tmp/mystuff',
dstArchive: archive,
inplaceImport: true
})
console.log(stats) /* => {
addedFiles: ['fuzz.txt', 'foo/bar.txt'],
updatedFiles: ['something.txt'],
removedFiles: [],
addedFolders: ['foo'],
removedFolders: [],
skipCount: 3, // files skipped due to the target already existing
fileCount: 3,
totalSize: 400 // bytes
}*/
opts.srcArchive
Source archive (object). Required.opts.dstPath
Destination path in the filesystem (string). Required.opts.srcPath
Source path within the archive. Optional, defaults to '/'.opts.ignore
Files not to copy (array of strings). Optional. Uses anymatch.opts.overwriteExisting
Proceed if the destination isn't empty (boolean). Default false.opts.skipUndownloadedFiles
Ignore files that haven't been downloaded yet (boolean). Default false. If false, will wait for source files to download.- Returns stats on the export.
Copies an archive into the filesystem.
NOTE
- Unlike exportFilesystemToArchive, this will not compare the target for equality before copying. If
overwriteExisting
is true, it will simply copy all files again.
var stats = await dba.exportArchiveToFilesystem({
srcArchive: archive,
dstPath: '/tmp/mystuff',
skipUndownloadedFiles: true
})
console.log(stats) /* => {
addedFiles: ['fuzz.txt', 'foo/bar.txt'],
updatedFiles: ['something.txt'],
fileCount: 3,
totalSize: 400 // bytes
}*/
opts.srcArchive
Source archive (object). Required.opts.dstArchive
Destination archive (object). Required.opts.srcPath
Source path within the source archive (string). Optional, defaults to '/'.opts.dstPath
Destination path within the destination archive (string). Optional, defaults to '/'.opts.ignore
Files not to copy (array of strings). Optional. Uses anymatch.opts.skipUndownloadedFiles
Ignore files that haven't been downloaded yet (boolean). Default false. If false, will wait for source files to download.
Copies an archive into another archive.
NOTE
- Unlike exportFilesystemToArchive, this will not compare the target for equality before copying. It copies files indescriminately.
var stats = await dba.exportArchiveToArchive({
srcArchive: archiveA,
dstArchive: archiveB,
skipUndownloadedFiles: true
})
console.log(stats) /* => {
addedFiles: ['fuzz.txt', 'foo/bar.txt'],
updatedFiles: ['something.txt'],
fileCount: 3,
totalSize: 400 // bytes
}*/
archive
dDrive archive (object).
A sugar to get the manifest object.
var manifestObj = await dba.readManifest(archive)
archive
dDrive archive (object).manifest
Manifest values (object).
A sugar to write the manifest object.
await dba.writeManifest(archive, { title: 'My dwebx!' })
archive
dDrive archive (object).manifest
Manifest values (object).
A sugar to modify the manifest object.
await dba.writeManifest(archive, { title: 'My dwebx!', description: 'the desc' })
await dba.writeManifest(archive, { title: 'My new title!' }) // preserves description
opts
Manifest options (object).
Helper to generate a manifest object. Opts in detail:
{
url: String, the dwebx's url
title: String
description: String
type: String
author: String | Object{url: String}
links: Object
web_root: String
fallback_page: String
}
See: https://github.com/distributedweb/index.json
archive
Archive (object). Required.other
Other version to diff against (number|object). Required.prefix
Path prefix to filter down to (string). Optional.- Returns diff data.
Get a list of differences between an archive at two points in its history
await dba.diff(archive, 2)
await dba.diff(archive, await archive.checkout(2))
await dba.diff(archive, 2, '/subfolder')
Output looks like:
[
{type: 'put', name: 'hello.txt', value: {stat: {...}}},
{type: 'mount', name: 'mounted-folder', value: {mount: {...}}},
{type: 'del', name: 'hello.txt'}
]
fn
Function. Required.
Sets a handler for when the daemon fails authentication. This can occur sometimes because the daemon has reset recently, forcing the auth token to change.