-
Notifications
You must be signed in to change notification settings - Fork 1
/
config-template.yaml
107 lines (107 loc) · 3.58 KB
/
config-template.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
sources:
aws:
# AWS Access Key ID (null to load from env - recommended)
access_key_id: null
# AWS Secret Access Key (null to load from env - recommended)
secret_access_key: null
# IAM role name to assume
role: null
# List of AWS profiles to collect
profiles: null
# List of AWS Account ID(s) to collect (null for all if scrape_org is true)
account: null
# List of AWS Regions to collect (null for all)
region: null
# Scrape the entire AWS organization
scrape_org: false
# Fork collector process instead of using threads
fork_process: true
# List of accounts to exclude when scraping the org
scrape_exclude_account: []
# Assume given role in current account
assume_current: false
# Do not scrape current account
do_not_scrape_current: false
# Account thread/process pool size
account_pool_size: 8
# Region thread pool size
region_pool_size: 32
# Number of threads available shared for all regions
shared_pool_size: 32
# Number of threads to collect a single region
region_resources_pool_size: 2
# List of AWS services to collect (default: all)
collect: []
# List of AWS services to exclude (default: none)
no_collect: []
# This value is used to look up atime and mtime for volumes and rds instances.
# It defines how long Resoto should look back for CloudWatch metrics.
# If no metric is found, now-period is used as atime and mtime. Defaults to 60 days.
cloudwatch_metrics_for_atime_mtime_period: '60d'
# Granularity of atime and mtime.
# Higher precision is more expensive: Resoto will fetch period * granularity data points.
# Defaults to 1 hour.
cloudwatch_metrics_for_atime_mtime_granularity: '1h'
gcp:
# GCP service account file(s)
service_account: []
# GCP project(s)
project: []
# GCP services to collect (default: all)
collect: []
# GCP services to exclude (default: none)
no_collect: []
# GCP project thread/process pool size
project_pool_size: 8
# Fork collector process instead of using threads
fork_process: true
digitalocean:
# DigitalOcean API tokens for the teams to be collected
api_tokens: []
# DigitalOcean Spaces access keys for the teams to be collected, separated by colons
spaces_access_keys: []
k8s:
# Configure access via kubeconfig files.
# Structure:
# - path: "/path/to/kubeconfig"
# all_contexts: false
# contexts: ["context1", "context2"]
config_files: []
# Alternative: configure access to k8s clusters directly in the config.
# Structure:
# - name: 'k8s-cluster-name'
# certificate_authority_data: 'CERT'
# server: 'https://k8s-cluster-server.example.com'
# token: 'TOKEN'
configs: []
# Objects to collect (default: all)
collect: []
# Objects to exclude (default: none)
no_collect: []
# Thread/process pool size
pool_size: 8
# Fork collector process instead of using threads
fork_process: false
slack:
# Bot token
bot_token: null
# Include archived channels
include_archived: false
# Do not verify the Slack API server TLS certificate
do_not_verify_ssl: false
onelogin:
# Onelogin region
region: 'us'
# Onelogin client ID
client_id: null
# Onelogin client secret
client_secret: null
destinations:
# Define the destination to sync.
# PostgreSQL is listed here as example - see README.md for a list of possible targets.
posgresql:
host: 127.0.0.1
port: 5432
user: postgres
password: changeme
database: cloud2sql