diff --git a/barman/clients/cloud_cli.py b/barman/clients/cloud_cli.py index 5781b0bf4..e902b53c1 100644 --- a/barman/clients/cloud_cli.py +++ b/barman/clients/cloud_cli.py @@ -152,7 +152,7 @@ def create_argument_parser(description, source_or_destination=UrlArgumentType.so parser.add_argument( "--cloud-provider", help="The cloud provider to use as a storage backend", - choices=["aws-s3", "azure-blob-storage", "google-cloud-storage"], + choices=["aws-s3", "azure-blob-storage", "google-cloud-storage", "networker-storage"], default="aws-s3", ) s3_arguments = parser.add_argument_group( diff --git a/barman/cloud_providers/__init__.py b/barman/cloud_providers/__init__.py index 1da74c6f4..0ee1a85d0 100644 --- a/barman/cloud_providers/__init__.py +++ b/barman/cloud_providers/__init__.py @@ -103,6 +103,23 @@ def _make_google_cloud_interface(config, cloud_interface_kwargs): cloud_interface_kwargs["jobs"] = 1 return GoogleCloudInterface(**cloud_interface_kwargs) +def _make_networker_interface(config, cloud_interface_kwargs): + """ + :param config: Not used yet + :param cloud_interface_kwargs: common parameters + :return: NetworkerInterface + """ + from barman.cloud_providers.networker_storage import NetworkerInterface + + cloud_interface_kwargs["jobs"] = 1 + _update_kwargs( + cloud_interface_kwargs, + config, + ( + "server_name", + ), + ) + return NetworkerInterface(**cloud_interface_kwargs) def get_cloud_interface(config): """ @@ -122,8 +139,9 @@ def get_cloud_interface(config): elif config.cloud_provider == "azure-blob-storage": return _make_azure_cloud_interface(config, cloud_interface_kwargs) elif config.cloud_provider == "google-cloud-storage": - return _make_google_cloud_interface(config, cloud_interface_kwargs) + elif config.cloud_provider == "networker-storage": + return _make_networker_interface(config, cloud_interface_kwargs) else: raise CloudProviderUnsupported( "Unsupported cloud provider: %s" % config.cloud_provider diff --git a/barman/cloud_providers/networker_storage.py b/barman/cloud_providers/networker_storage.py new file mode 100644 index 000000000..e2d0dbe77 --- /dev/null +++ b/barman/cloud_providers/networker_storage.py @@ -0,0 +1,379 @@ +# -*- coding: utf-8 -*- +# © Copyright EnterpriseDB UK Limited 2018-2022 +# +# This file is part of Barman. +# +# Barman is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Barman is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Barman. If not, see . + +import logging +import os +import shutil +import subprocess +import re +import sys + +from barman.clients.cloud_compression import decompress_to_file +from barman.cloud import CloudInterface, DecompressingStreamingIO, DEFAULT_DELIMITER + +try: + # Python 3.x + from urllib.parse import urlparse +except ImportError: + # Python 2.x + from urlparse import urlparse + +BASE_DIR = "/nsr/cache/cloudboost/barman" + +class NetworkerInterface(CloudInterface): + """ + This class implements CloudInterface for Networker + It depends on the installation of the Networker Base Client + and the Networker Extended Client (mminfo) + On Linux these are packaged as e.g. lgtoclnt and lgtoxtdclnt + They are available for many platforms. + + """ + MAX_CHUNKS_PER_FILE = 1 + + # Since there is only on chunk min size is the same as max archive size + MIN_CHUNK_SIZE = 1 << 40 + + # As there is no documented limit for the networker maximum save set size, + # we just leave the defaults in. + # MAX_ARCHIVE_SIZE - we set a maximum of 1TB per file + MAX_ARCHIVE_SIZE = 1 << 40 + + def __init__(self, url, jobs=1, tags=None, server_name=None): + """ + :param str url: Full URL of the cloud destination/source (ex: ) + :param int jobs: How many sub-processes to use for asynchronous + uploading, defaults to 1. + :param List[tuple] tags: List of tags as k,v tuples to be added to all + uploaded objects + :param str server_name: networker client name of this machine + """ + self.bucket_name, self.path = self._parse_url(url) + super().__init__( + url=url, + jobs=jobs, + tags=tags, + ) + self.bucket_exists = None + self.server_name = server_name + # self._reinit_session() + + def __del__(self): + """ + cleanup the local staging area as best as we can. All previous points are too soon + and will remove still needed files + """ + command = os.path.basename(sys.argv[0]) + if 'restore' in command: + shutil.rmtree(os.path.join(BASE_DIR,self.path,self.server_name), ignore_errors=True) + if (os.path.isdir(BASE_DIR + BASE_DIR)): + shutil.rmtree(BASE_DIR + BASE_DIR, ignore_errors=True) + + @staticmethod + def _parse_url(url): + """ + Parse url and return bucket name and path. Raise ValueError otherwise. + """ + if not url.startswith("nw://"): + msg = "Networker storage URL {} is malformed. Expected format are '{}'".format( + url, + "nw://server-name", + ) + raise ValueError(msg) + parsed_url = urlparse(url) + if not parsed_url.netloc: + raise ValueError( + "Storage URL {} is malformed. Server name not found".format( + url + ) + ) + return parsed_url.netloc, parsed_url.path.strip("/") + + def _reinit_session(self): + """ + Create a new session + """ + self.container_client = self.bucket_name + + def test_connectivity(self): + """ + Test gcs connectivity by trying to access a container + """ + try: + # We are not even interested in the existence of the bucket, + # we just want to see if google cloud storage is reachable. + self.bucket_exists = self._check_bucket_existence() + return True + except Exception as e: + logging.error("Can't connect to Server") + return False + + def _check_bucket_existence(self): + """ + Check Server + + :return: True if the container exists, False otherwise + :rtype: bool + """ + logging.debug("_check_bucket_existence") + response = subprocess.run(["ping", "-q", "-c 1", self.bucket_name], stdout=subprocess.DEVNULL) + if response.returncode == 0: + self._create_bucket() + return True + else: + return False + + def _create_bucket(self): + """ + Create a local temporary directory + This will serve as a staging area for the backup files, until they're saved via the networker cmds. + The optimal way would be to stream the data directly into networker, but there no api available that + works in a usable compatible way for us. + + """ + logging.debug("_create_bucket") + if not os.path.isdir(BASE_DIR): + os.mkdir(BASE_DIR) + os.chmod(BASE_DIR, 0o777) + + def list_bucket(self, prefix="", delimiter=DEFAULT_DELIMITER): + """ + List networker saveset content in a directory manner + + :param str prefix: Prefix used to filter blobs + :param str delimiter: Delimiter, used with prefix to emulate hierarchy + :return: List of objects and dirs right under the prefix + :rtype: List[str] + """ + logging.debug("list_bucket: {}, {}".format(prefix, delimiter)) + client = prefix.replace(self.path + '/', "") + client = client.split('/')[0] + cp = subprocess.run(['mminfo', '-xc,', '-s', self.bucket_name, '-r', 'name,family', + '-q', 'client=' + client], stdout=subprocess.PIPE,encoding='utf-8') + objects = [] + dirs = [] + for line in cp.stdout.splitlines(): + (key,family) = line.split(',') + if family != 'disk': + continue + if not re.match(prefix,key): + continue + objects.append(key) + if delimiter == '': + continue + (dir,file) = os.path.split(key) + dir += os.sep + if dir not in dirs: + dirs.append(dir) + logging.debug("objects {}".format(objects)) + logging.debug("dirs {}".format(dirs)) + return objects + dirs + + def download_file(self, key, dest_path, decompress): + """ + Download a file out of networker + + :param str key: The key identifying the file to download + :param str dest_path: Where to put the destination file + :param str|None decompress: Compression scheme to use for decompression + """ + logging.debug("Cloud download of {} from {}".format(dest_path,key)) + client = key.replace(self.path + os.sep, "") + client = client.split('/')[0] + cp = subprocess.run(['mminfo', '-s', self.bucket_name, '-r', 'ssid', + '-q', 'client=' + client + ',name=' + key], stdout=subprocess.PIPE,encoding='utf-8') + if cp.returncode > 0 or not cp.stdout: + logging.debug("Key: {} does not exist".format(key)) + return None + cmd = ['recover'] + if os.geteuid() != 0: + cmd = ['sudo', 'recover'] + cp = subprocess.run(cmd + ['-s', self.bucket_name, '-d', BASE_DIR, '-a', + '-S', cp.stdout.rstrip()], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, encoding='utf-8') + blob_reader = None + for s_line in cp.stdout.splitlines(): + if re.search(key + '$',s_line): + s_line = re.sub('^\.', '', s_line) + logging.debug ('Blob Path: ' + BASE_DIR + s_line) + blob_reader = open(BASE_DIR + s_line, "rb") + if blob_reader is None: + logging.error("Networker saveset recovery failed." + cp.stdout) + return + with open(dest_path, "wb") as dest_file: + os.chmod(dest_path, 0o666) + if decompress is None: + dest_file.write(blob_reader.read()) + return + decompress_to_file(blob_reader, dest_file, decompress) + blob_reader.close() + os.remove(blob_reader.name) + + def remote_open(self, key, decompressor=None): + """ + Recover remote objects from networker to local disk and return a readable stream + + :param str key: The key identifying the object to open + :param barman.clients.cloud_compression.ChunkedCompressor decompressor: + A ChunkedCompressor object which will be used to decompress chunks of bytes + as they are read from the stream + :return: google.cloud.storage.fileio.BlobReader | DecompressingStreamingIO | None A file-like object from which + the stream can be read or None if the key does not exist + """ + logging.debug("remote_open of {}".format(key)) + client = key.replace(self.path + '/', "") + client = client.split('/')[0] + cp = subprocess.run(['mminfo', '-s', self.bucket_name, '-r', 'ssid', + '-q', 'client=' + client + ',name=' + key], stdout=subprocess.PIPE,encoding='utf-8') + if cp.returncode > 0 or not cp.stdout: + logging.debug("Key: {} does not exist".format(key)) + return None + cmd = ['recover'] + if os.geteuid() != 0: + cmd = ['sudo', 'recover'] + cp = subprocess.run(cmd + ['-s', self.bucket_name, '-d', BASE_DIR, '-a', '-iY', + '-S', cp.stdout.rstrip()], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, encoding='utf-8') + blob_reader = None + for s_line in cp.stdout.splitlines(): + if re.search(key + '$',s_line): + s_line = re.sub('^\.', '', s_line) + logging.debug ('Blob Path: ' + BASE_DIR + s_line) + blob_reader = open(BASE_DIR + s_line, "rb") + if blob_reader is None: + logging.debug("Blob Stream not available." + cp.stdout) + return None + if decompressor: + return DecompressingStreamingIO(blob_reader, decompressor) + return blob_reader + + def upload_fileobj(self, fileobj, key, override_tags=None): + """ + Create a stage file from a file-object stream and save it with networker. + + :param fileobj IOBase: File-like object to upload + :param str key: The key to identify the uploaded object + :param List[tuple] override_tags: List of tags as k,v tuples to be added to the + uploaded object + """ + tags = override_tags or self.tags + logging.debug("upload_fileobj to {}".format(key)) + (dir,file) = os.path.split(key) + + dir = os.path.join(BASE_DIR, dir) + oldmask=os.umask(000) + os.makedirs(dir, mode=0o777) + os.umask(oldmask) + + f = open(os.path.join(BASE_DIR, key), "wb") + f.write(fileobj.read()) + f.close() + + args = ['save', '-q', '-b', self.path, '-N', key, os.path.join(BASE_DIR, key)] + if tags is not None: + for t in tags: + if t[0] == 'nwargs': + args[2:2] = t[1].split(' ') + subprocess.run(args, stderr=subprocess.DEVNULL, encoding='utf-8') + + if (os.path.isdir(dir)): + shutil.rmtree(dir) + + def create_multipart_upload(self, key): + """ + Networker does not support multipart savesets. + So for now parallel upload is a simple upload. + + :param key: The key to use in the cloud service + :return: The multipart upload metadata + :rtype: dict[str, str]|None + """ + return [] + + def _upload_part(self, upload_metadata, key, body, part_number): + """ + Upload a file + + The part metadata will included in a list of metadata for all parts of + the upload which is passed to the _complete_multipart_upload method. + + :param dict upload_metadata: Provider-specific metadata for this upload + e.g. the multipart upload handle in AWS S3 + :param str key: The key to use in the cloud service + :param object body: A stream-like object to upload + :param int part_number: Part number, starting from 1 + :return: The part metadata + :rtype: dict[str, None|str] + """ + self.upload_fileobj(body, key) + return { + "PartNumber": part_number, + } + + def _complete_multipart_upload(self, upload_metadata, key, parts_metadata): + """ + Finish a certain multipart upload + There is nothing to do here as we are not using multipart. + + :param dict upload_metadata: Provider-specific metadata for this upload + e.g. the multipart upload handle in AWS S3 + :param str key: The key to use in the cloud service + :param List[dict] parts_metadata: The list of metadata for the parts + composing the multipart upload. Each part is guaranteed to provide a + PartNumber and may optionally contain additional metadata returned by + the cloud provider such as ETags. + """ + pass + + def _abort_multipart_upload(self, upload_metadata, key): + """ + Abort a certain multipart upload + + The implementation of this method should clean up any dangling resources + left by the incomplete upload. + + :param dict upload_metadata: Provider-specific metadata for this upload + e.g. the multipart upload handle in AWS S3 + :param str key: The key to use in the cloud service + """ + # Probably delete things here in case it has already been uploaded ? + # Maybe catch some exceptions like file not found (equivalent) + self.delete_objects(key) + + def delete_objects(self, paths): + """ + Delete the objects at the specified paths + :param List[str] paths: + """ + failures = {} + for path in list(set(paths)): + client = path.replace(self.path + '/', "") + client = client.split('/')[0] + cp = subprocess.run(['mminfo', '-s', self.bucket_name, '-r', 'ssid', + '-q', 'client=' + client + ',name=' + path], stdout=subprocess.PIPE,encoding='utf-8') + if cp.returncode > 0 or not cp.stdout: + logging.debug("Key: {} does not exist".format(path)) + failures[path] = ['Saveset does not exist', path] + continue + cp = subprocess.run(['nsrmm','-s', self.bucket_name, '-dy', + '-S', cp.stdout.rstrip()], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, encoding='utf-8') + if cp.returncode > 0: + logging.debug("Key: {} delete failed".format(path)) + failures[path] = ['Saveset delete failed', path] + + if failures: + logging.error(failures) + raise RuntimeError("Could not delete all networker savesets") diff --git a/doc/barman-cloud-backup-delete.1 b/doc/barman-cloud-backup-delete.1 index 60328d82d..2be557fe1 100644 --- a/doc/barman-cloud-backup-delete.1 +++ b/doc/barman-cloud-backup-delete.1 @@ -1,4 +1,4 @@ -.\" Automatically generated by Pandoc 2.2.1 +.\" Automatically generated by Pandoc 2.0.6 .\" .TH "BARMAN\-CLOUD\-BACKUP\-DELETE" "1" "September 14, 2022" "Barman User manuals" "Version 3.1.0" .hy @@ -101,7 +101,7 @@ about the objects which would be deleted to stdout .RS .RE .TP -.B \[en]cloud\-provider {aws\-s3,azure\-blob\-storage,google\-cloud\-storage} +.B \[en]cloud\-provider {aws\-s3,azure\-blob\-storage,google\-cloud\-storage,networker\-storage} the cloud provider to which the backup should be uploaded .RS .RE @@ -162,6 +162,10 @@ azure\-identity (optional, if you wish to use DefaultAzureCredential) .PP If using \f[C]\-\-cloud\-provider=google\-cloud\-storage\f[] * google\-cloud\-storage +.PP +If using \f[C]\-\-cloud\-provider=networker\-storage\f[] * Networker +Client and Extended Client Software from DellEMC * Client Registration +on Networker Server .SH EXIT STATUS .TP .B 0 diff --git a/doc/barman-cloud-backup-delete.1.md b/doc/barman-cloud-backup-delete.1.md index 04a5ef316..847dae3a7 100644 --- a/doc/barman-cloud-backup-delete.1.md +++ b/doc/barman-cloud-backup-delete.1.md @@ -80,7 +80,7 @@ SERVER_NAME : run without actually deleting any objects while printing information about the objects which would be deleted to stdout ---cloud-provider {aws-s3,azure-blob-storage,google-cloud-storage} +--cloud-provider {aws-s3,azure-blob-storage,google-cloud-storage,networker-storage} : the cloud provider to which the backup should be uploaded -P, --profile @@ -130,6 +130,10 @@ If using `--cloud-provider=azure-blob-storage`: If using `--cloud-provider=google-cloud-storage` * google-cloud-storage +If using `--cloud-provider=networker-storage` +* Networker Client and Extended Client Software from DellEMC +* Client Registration on Networker Server + # EXIT STATUS 0 diff --git a/doc/barman-cloud-backup-keep.1 b/doc/barman-cloud-backup-keep.1 index be39961b7..d98a17c94 100644 --- a/doc/barman-cloud-backup-keep.1 +++ b/doc/barman-cloud-backup-keep.1 @@ -1,4 +1,4 @@ -.\" Automatically generated by Pandoc 2.2.1 +.\" Automatically generated by Pandoc 2.0.6 .\" .TH "BARMAN\-CLOUD\-BACKUP\-DELETE" "1" "September 14, 2022" "Barman User manuals" "Version 3.1.0" .hy @@ -94,7 +94,7 @@ either directly or by retention policy. .RS .RE .TP -.B \[en]cloud\-provider {aws\-s3,azure\-blob\-storage,google\-cloud\-storage} +.B \[en]cloud\-provider {aws\-s3,azure\-blob\-storage,google\-cloud\-storage,networker\-storage} the cloud provider to which the backup should be uploaded .RS .RE @@ -155,6 +155,19 @@ azure\-identity (optional, if you wish to use DefaultAzureCredential) .PP If using \f[C]\-\-cloud\-provider=google\-cloud\-storage\f[] * google\-cloud\-storage +.PP +If using \f[C]\-\-cloud\-provider=networker\-storage\f[] * Networker +Client and Extended Client Software from DellEMC * Client Registration +on Networker Server +.IP \[bu] 2 +This command has no bearing when using \f[C]networker\-storage\f[]. +Because Networker is a fully featured Backup Software and has it's own +retention policy handling. +Additionally Networker Backup Expiration cannot be disabled. +Networker provides a completely separate command set, storage pools and +handling for archiving. +Therefore the decision for long time archiving has to be made when +creating the backup and cannot be reversed later. .SH EXIT STATUS .TP .B 0 diff --git a/doc/barman-cloud-backup-keep.1.md b/doc/barman-cloud-backup-keep.1.md index 6a2bf0637..69329afca 100644 --- a/doc/barman-cloud-backup-keep.1.md +++ b/doc/barman-cloud-backup-keep.1.md @@ -73,7 +73,7 @@ BACKUP_ID status and make it available for deletion, either directly or by retention policy. ---cloud-provider {aws-s3,azure-blob-storage,google-cloud-storage} +--cloud-provider {aws-s3,azure-blob-storage,google-cloud-storage,networker-storage} : the cloud provider to which the backup should be uploaded -P, --profile @@ -123,6 +123,16 @@ If using `--cloud-provider=azure-blob-storage`: If using `--cloud-provider=google-cloud-storage` * google-cloud-storage +If using `--cloud-provider=networker-storage` +* Networker Client and Extended Client Software from DellEMC +* Client Registration on Networker Server + +* This command has no bearing when using `networker-storage`. Because Networker is a fully featured + Backup Software and has it's own retention policy handling. Additionally Networker Backup Expiration + cannot be disabled. Networker provides a completely separate command set, storage pools and handling + for archiving. Therefore the decision for long time archiving has to be made when creating the backup + and cannot be reversed later. + # EXIT STATUS 0 diff --git a/doc/barman-cloud-backup-list.1 b/doc/barman-cloud-backup-list.1 index d6e44ad89..2e09aa6e8 100644 --- a/doc/barman-cloud-backup-list.1 +++ b/doc/barman-cloud-backup-list.1 @@ -1,4 +1,4 @@ -.\" Automatically generated by Pandoc 2.2.1 +.\" Automatically generated by Pandoc 2.0.6 .\" .TH "BARMAN\-CLOUD\-BACKUP\-LIST" "1" "September 14, 2022" "Barman User manuals" "Version 3.1.0" .hy @@ -63,7 +63,7 @@ output format (default: `console') .RS .RE .TP -.B \[en]cloud\-provider {aws\-s3,azure\-blob\-storage,google\-cloud\-storage} +.B \[en]cloud\-provider {aws\-s3,azure\-blob\-storage,google\-cloud\-storage,networker\-storage} the cloud provider to which the backup should be uploaded .RS .RE @@ -124,6 +124,10 @@ azure\-identity (optional, if you wish to use DefaultAzureCredential) .PP If using \f[C]\-\-cloud\-provider=google\-cloud\-storage\f[] * google\-cloud\-storage +.PP +If using \f[C]\-\-cloud\-provider=networker\-storage\f[] * Networker +Client and Extended Client Software from DellEMC * Client Registration +on Networker Server .SH EXIT STATUS .TP .B 0 diff --git a/doc/barman-cloud-backup-list.1.md b/doc/barman-cloud-backup-list.1.md index cf129b755..39dc5372a 100644 --- a/doc/barman-cloud-backup-list.1.md +++ b/doc/barman-cloud-backup-list.1.md @@ -52,7 +52,7 @@ SERVER_NAME --format {json,console} : output format (default: 'console') ---cloud-provider {aws-s3,azure-blob-storage,google-cloud-storage} +--cloud-provider {aws-s3,azure-blob-storage,google-cloud-storage,networker-storage} : the cloud provider to which the backup should be uploaded -P, --profile @@ -102,6 +102,10 @@ If using `--cloud-provider=azure-blob-storage`: If using `--cloud-provider=google-cloud-storage` * google-cloud-storage +If using `--cloud-provider=networker-storage` +* Networker Client and Extended Client Software from DellEMC +* Client Registration on Networker Server + # EXIT STATUS 0 diff --git a/doc/barman-cloud-backup.1 b/doc/barman-cloud-backup.1 index a461aa732..a902d6d32 100644 --- a/doc/barman-cloud-backup.1 +++ b/doc/barman-cloud-backup.1 @@ -1,4 +1,4 @@ -.\" Automatically generated by Pandoc 2.2.1 +.\" Automatically generated by Pandoc 2.0.6 .\" .TH "BARMAN\-CLOUD\-BACKUP" "1" "September 14, 2022" "Barman User manuals" "Version 3.1.0" .hy @@ -125,7 +125,7 @@ maximum size of an archive when uploading to cloud storage (default: .RS .RE .TP -.B \[en]cloud\-provider {aws\-s3,azure\-blob\-storage,google\-cloud\-storage} +.B \[en]cloud\-provider {aws\-s3,azure\-blob\-storage,google\-cloud\-storage,networker\-storage} the cloud provider to which the backup should be uploaded .RS .RE @@ -208,6 +208,10 @@ azure\-identity (optional, if you wish to use DefaultAzureCredential) .PP If using \f[C]\-\-cloud\-provider=google\-cloud\-storage\f[] * google\-cloud\-storage +.PP +If using \f[C]\-\-cloud\-provider=networker\-storage\f[] * Networker +Client and Extended Client Software from DellEMC * Client Registration +on Networker Server .SH EXIT STATUS .TP .B 0 diff --git a/doc/barman-cloud-backup.1.md b/doc/barman-cloud-backup.1.md index 4cb97e11f..398f99287 100644 --- a/doc/barman-cloud-backup.1.md +++ b/doc/barman-cloud-backup.1.md @@ -91,7 +91,7 @@ SERVER_NAME -S MAX_ARCHIVE_SIZE, --max-archive-size MAX_ARCHIVE_SIZE : maximum size of an archive when uploading to cloud storage (default: 100GB) ---cloud-provider {aws-s3,azure-blob-storage,google-cloud-storage} +--cloud-provider {aws-s3,azure-blob-storage,google-cloud-storage,networker-storage} : the cloud provider to which the backup should be uploaded --tags KEY1,VALUE1 KEY2,VALUE2 ... @@ -157,6 +157,10 @@ If using `--cloud-provider=azure-blob-storage`: If using `--cloud-provider=google-cloud-storage` * google-cloud-storage +If using `--cloud-provider=networker-storage` +* Networker Client and Extended Client Software from DellEMC +* Client Registration on Networker Server + # EXIT STATUS 0 diff --git a/doc/barman-cloud-check-wal-archive.1 b/doc/barman-cloud-check-wal-archive.1 index 53ad98e81..872e56da0 100644 --- a/doc/barman-cloud-check-wal-archive.1 +++ b/doc/barman-cloud-check-wal-archive.1 @@ -1,4 +1,4 @@ -.\" Automatically generated by Pandoc 2.2.1 +.\" Automatically generated by Pandoc 2.0.6 .\" .TH "BARMAN\-CLOUD\-CHECK\-WAL\-ARCHIVE" "1" "September 14, 2022" "Barman User manuals" "Version 3.1.0" @@ -71,7 +71,7 @@ fail. .RS .RE .TP -.B \[en]cloud\-provider {aws\-s3,azure\-blob\-storage,google\-cloud\-storage} +.B \[en]cloud\-provider {aws\-s3,azure\-blob\-storage,google\-cloud\-storage,networker\-storage} the cloud provider to which the backup should be uploaded .RS .RE @@ -132,6 +132,10 @@ azure\-identity (optional, if you wish to use DefaultAzureCredential) .PP If using \f[C]\-\-cloud\-provider=google\-cloud\-storage\f[] * google\-cloud\-storage +.PP +If using \f[C]\-\-cloud\-provider=networker\-storage\f[] * Networker +Client and Extended Client Software from DellEMC * Client Registration +on Networker Server .SH EXIT STATUS .TP .B 0 diff --git a/doc/barman-cloud-check-wal-archive.1.md b/doc/barman-cloud-check-wal-archive.1.md index 123c912f6..fcb94da08 100644 --- a/doc/barman-cloud-check-wal-archive.1.md +++ b/doc/barman-cloud-check-wal-archive.1.md @@ -57,7 +57,7 @@ SERVER_NAME to earlier timelines. If any WAL files are on this timeline or greater then the check will fail. ---cloud-provider {aws-s3,azure-blob-storage,google-cloud-storage} +--cloud-provider {aws-s3,azure-blob-storage,google-cloud-storage,networker-storage} : the cloud provider to which the backup should be uploaded -P, --profile @@ -107,6 +107,10 @@ If using `--cloud-provider=azure-blob-storage`: If using `--cloud-provider=google-cloud-storage` * google-cloud-storage +If using `--cloud-provider=networker-storage` +* Networker Client and Extended Client Software from DellEMC +* Client Registration on Networker Server + # EXIT STATUS 0 diff --git a/doc/barman-cloud-restore.1 b/doc/barman-cloud-restore.1 index 7ce60f8a3..37c873f79 100644 --- a/doc/barman-cloud-restore.1 +++ b/doc/barman-cloud-restore.1 @@ -1,4 +1,4 @@ -.\" Automatically generated by Pandoc 2.2.1 +.\" Automatically generated by Pandoc 2.0.6 .\" .TH "BARMAN\-CLOUD\-RESTORE" "1" "September 14, 2022" "Barman User manuals" "Version 3.1.0" .hy @@ -74,7 +74,7 @@ original location (you may repeat the option for multiple tablespaces) .RS .RE .TP -.B \[en]cloud\-provider {aws\-s3,azure\-blob\-storage,google\-cloud\-storage} +.B \[en]cloud\-provider {aws\-s3,azure\-blob\-storage,google\-cloud\-storage,networker\-storage} the cloud provider to which the backup should be uploaded .RS .RE @@ -135,6 +135,10 @@ azure\-identity (optional, if you wish to use DefaultAzureCredential) .PP If using \f[C]\-\-cloud\-provider=google\-cloud\-storage\f[] * google\-cloud\-storage +.PP +If using \f[C]\-\-cloud\-provider=networker\-storage\f[] * Networker +Client and Extended Client Software from DellEMC * Client Registration +on Networker Server .SH EXIT STATUS .TP .B 0 diff --git a/doc/barman-cloud-restore.1.md b/doc/barman-cloud-restore.1.md index 51470ba5e..0586f1de2 100644 --- a/doc/barman-cloud-restore.1.md +++ b/doc/barman-cloud-restore.1.md @@ -59,7 +59,7 @@ RECOVERY_DIR : extract the named tablespace to the given directory instead of its original location (you may repeat the option for multiple tablespaces) ---cloud-provider {aws-s3,azure-blob-storage,google-cloud-storage} +--cloud-provider {aws-s3,azure-blob-storage,google-cloud-storage,networker-storage} : the cloud provider to which the backup should be uploaded -P, --profile @@ -109,6 +109,10 @@ If using `--cloud-provider=azure-blob-storage`: If using `--cloud-provider=google-cloud-storage` * google-cloud-storage +If using `--cloud-provider=networker-storage` +* Networker Client and Extended Client Software from DellEMC +* Client Registration on Networker Server + # EXIT STATUS 0 diff --git a/doc/barman-cloud-wal-archive.1 b/doc/barman-cloud-wal-archive.1 index d5feea68c..086981df4 100644 --- a/doc/barman-cloud-wal-archive.1 +++ b/doc/barman-cloud-wal-archive.1 @@ -1,4 +1,4 @@ -.\" Automatically generated by Pandoc 2.2.1 +.\" Automatically generated by Pandoc 2.0.6 .\" .TH "BARMAN\-CLOUD\-WAL\-ARCHIVE" "1" "September 14, 2022" "Barman User manuals" "Version 3.1.0" .hy @@ -88,7 +88,7 @@ python\-snappy library and should not be used with python < 3.3) .RS .RE .TP -.B \[en]cloud\-provider {aws\-s3,azure\-blob\-storage,google\-cloud\-storage} +.B \[en]cloud\-provider {aws\-s3,azure\-blob\-storage,google\-cloud\-storage,networker\-storage} the cloud provider to which the backup should be uploaded .RS .RE @@ -206,6 +206,10 @@ azure\-identity (optional, if you wish to use DefaultAzureCredential) .PP If using \f[C]\-\-cloud\-provider=google\-cloud\-storage\f[] * google\-cloud\-storage +.PP +If using \f[C]\-\-cloud\-provider=networker\-storage\f[] * Networker +Client and Extended Client Software from DellEMC * Client Registration +on Networker Server .SH EXIT STATUS .TP .B 0 diff --git a/doc/barman-cloud-wal-archive.1.md b/doc/barman-cloud-wal-archive.1.md index 7688619dd..89b5a5c7d 100644 --- a/doc/barman-cloud-wal-archive.1.md +++ b/doc/barman-cloud-wal-archive.1.md @@ -71,7 +71,7 @@ WAL_PATH (requires optional python-snappy library and should not be used with python < 3.3) ---cloud-provider {aws-s3,azure-blob-storage,google-cloud-storage} +--cloud-provider {aws-s3,azure-blob-storage,google-cloud-storage,networker-storage} : the cloud provider to which the backup should be uploaded --tags KEY1,VALUE1 KEY2,VALUE2 ... @@ -157,6 +157,10 @@ If using `--cloud-provider=azure-blob-storage`: If using `--cloud-provider=google-cloud-storage` * google-cloud-storage +If using `--cloud-provider=networker-storage` +* Networker Client and Extended Client Software from DellEMC +* Client Registration on Networker Server + # EXIT STATUS 0 diff --git a/doc/barman-cloud-wal-restore.1 b/doc/barman-cloud-wal-restore.1 index a66be340c..e68f4ceb6 100644 --- a/doc/barman-cloud-wal-restore.1 +++ b/doc/barman-cloud-wal-restore.1 @@ -1,4 +1,4 @@ -.\" Automatically generated by Pandoc 2.2.1 +.\" Automatically generated by Pandoc 2.0.6 .\" .TH "BARMAN\-CLOUD\-WAL\-RESTORE" "1" "September 14, 2022" "Barman User manuals" "Version 3.1.0" .hy @@ -71,7 +71,7 @@ test connectivity to the cloud destination and exit .RS .RE .TP -.B \[en]cloud\-provider {aws\-s3,azure\-blob\-storage,google\-cloud\-storage} +.B \[en]cloud\-provider {aws\-s3,azure\-blob\-storage,google\-cloud\-storage,networker\-storage} the cloud provider to which the backup should be uploaded .RS .RE @@ -132,6 +132,10 @@ azure\-identity (optional, if you wish to use DefaultAzureCredential) .PP If using \f[C]\-\-cloud\-provider=google\-cloud\-storage\f[] * google\-cloud\-storage +.PP +If using \f[C]\-\-cloud\-provider=networker\-storage\f[] * Networker +Client and Extended Client Software from DellEMC * Client Registration +on Networker Server .SH EXIT STATUS .TP .B 0 diff --git a/doc/barman-cloud-wal-restore.1.md b/doc/barman-cloud-wal-restore.1.md index 6e07941bb..94aa239b8 100644 --- a/doc/barman-cloud-wal-restore.1.md +++ b/doc/barman-cloud-wal-restore.1.md @@ -55,7 +55,7 @@ WAL_PATH -t, --test : test connectivity to the cloud destination and exit ---cloud-provider {aws-s3,azure-blob-storage,google-cloud-storage} +--cloud-provider {aws-s3,azure-blob-storage,google-cloud-storage,networker-storage} : the cloud provider to which the backup should be uploaded -P, --profile @@ -105,6 +105,10 @@ If using `--cloud-provider=azure-blob-storage`: If using `--cloud-provider=google-cloud-storage` * google-cloud-storage +If using `--cloud-provider=networker-storage` +* Networker Client and Extended Client Software from DellEMC +* Client Registration on Networker Server + # EXIT STATUS 0 diff --git a/doc/barman-wal-restore.1 b/doc/barman-wal-restore.1 index e8f0ea45c..afa7c6998 100644 --- a/doc/barman-wal-restore.1 +++ b/doc/barman-wal-restore.1 @@ -1,4 +1,4 @@ -.\" Automatically generated by Pandoc 2.2.1 +.\" Automatically generated by Pandoc 2.0.6 .\" .TH "BARMAN\-WAL\-RESTORE" "1" "September 14, 2022" "Barman User manuals" "Version 3.1.0" .hy diff --git a/doc/barman.1 b/doc/barman.1 index 4fa9176b2..ca084b52e 100644 --- a/doc/barman.1 +++ b/doc/barman.1 @@ -1,4 +1,4 @@ -.\" Automatically generated by Pandoc 2.2.1 +.\" Automatically generated by Pandoc 2.0.6 .\" .TH "BARMAN" "1" "September 14, 2022" "Barman User manuals" "Version 3.1.0" .hy diff --git a/doc/barman.5 b/doc/barman.5 index 23195b015..9acefb110 100644 --- a/doc/barman.5 +++ b/doc/barman.5 @@ -1,4 +1,4 @@ -.\" Automatically generated by Pandoc 2.2.1 +.\" Automatically generated by Pandoc 2.0.6 .\" .TH "BARMAN" "5" "September 14, 2022" "Barman User manuals" "Version 3.1.0" .hy diff --git a/doc/manual/55-barman-cli.en.md b/doc/manual/55-barman-cli.en.md index 69a85386f..e30a3bdf8 100644 --- a/doc/manual/55-barman-cli.en.md +++ b/doc/manual/55-barman-cli.en.md @@ -48,6 +48,7 @@ Supported cloud providers are: * AWS S3 (or any S3 compatible object store) * Azure Blob Storage * Google Cloud Storage (Rest API) +* Networker Backup Software These utilities are distributed in the `barman-cli-cloud` RPM/Debian package, and can be installed alongside the PostgreSQL server: @@ -101,6 +102,14 @@ The following environment variables are supported: `AZURE_STORAGE_CONNECTION_STR `--credential` option to specify either `azure-cli` or `managed-identity` credentials in order to authenticate via Azure Active Directory. +> **WARNING:** Cloud utilities require the appropriate library for the cloud +> provider you wish to use - either: [boto3][boto3] or +> [azure-storage-blob][azure-storage-blob] and (optionally) +> [azure-identity][azure-identity] + +For Networker the PostgreSQL server has to be configured as a backup client. +The Networker Client and Extended Software packages have to be installed. + ## Installation Barman client utilities for the Cloud need to be installed on those PostgreSQL @@ -156,6 +165,7 @@ and WALs. This can be set to one of the following: * `aws-s3` [DEFAULT]: AWS S3 or S3-compatible object store. * `azure-blob-storage`: Azure Blob Storage service. * `google-cloud-storage`: Google Cloud Storage service. +* `networker-storage`: Networker Backup Software ## Specificity by provider @@ -200,5 +210,60 @@ Some details are specific to all barman cloud commands: or https://console.cloud.google.com/storage/browser/BUCKET_NAME/path ``` +### Networker Storage + +#### Setup +Copy the necessary software packages to the PostgreSQL Server. Install the Client Software as follows +```bash +dnf -y install lgtoclnt lgtoxtdclnt lgtoman sudo +``` + +The barman storage module needs the **mminfo**, **recover** and **nsrmm** networker commands. - +The system user for the PostgreSQL Database has to be enabled in sudoers. Because some of the +networker functions can only be performed as `root`. This restriction is hard-coded into the software. +In General, all recover operations need `root` permissions. So listing or restoring data operations +are best run as `root`. When run as a normal user, the module tries to elevate itself to `root` by +using sudo. + +This is especially important, when configuring the PostgreSQL `restore_command`. e.g. +``` +restore_command = "sudo barman-cloud-wal-restore --cloud-provider=networker-storage nw://..." +``` + +The module uses the directory `/nsr/cache/cloudboost/barman` as a local staging location. It will +create this directory if it doesn't exist. Assuming that `/nsr/cache/cloudboost` was created by +the networker client. Older clients may not. So check for it's existence and create it yourself +if necessary. e.g. by +```bash +mkdir -m 0777 -p /nsr/cache/cloudboost/barman +``` + +If you have an `/etc/sudoers.d` directory, create a file barman.conf in it. With the following content. +If not append the line to the `/etc/sudoers` file. This assumes that your user is named `postgres`. +``` +postgres ALL=(ALL) NOPASSWD: ALL +``` + +#### Usage +Specific Parameters for all of the barman cloud commands: +* Select the Networker Storage Provider by `--cloud-provider=networker-storage` +* `SOURCE_URL` has to be in the following format. + ``` + nw:/// + ``` +Specific Parameters for the barman backup cloud commands: +* Parameters for the Networker `save` command can be specified through the barman `--tags "nwargs, ..."` + Parameter. e.g. + ``` + --tags "nwargs,-y ${RETENTION_TIME} -w ${BROWSE_TIME} -L" + ``` + This can be useful because networker has it's own retention policies and management. So although + old backups can be removed by `barman-cloud-backup-delete`, networker will do the same and may even + already have done it. + +`barman-cloud-backup-keep` has no impact when using networker as a storage provider. Regular Networker +backups will always expire. Networker uses a complete separate command set and storage pools for this. +The decision for archiving has to be made when creating the backup and cannot be reversed later. In +addition, networker archivals are intentionally left out of the browse index. The barman module uses +the index as storage for the backup keys. Therefore archival is not supported at all with networker.