|
| 1 | +; Licensed to the Apache Software Foundation (ASF) under one |
| 2 | +; or more contributor license agreements. See the NOTICE file |
| 3 | +; distributed with this work for additional information |
| 4 | +; regarding copyright ownership. The ASF licenses this file |
| 5 | +; to you under the Apache License, Version 2.0 (the |
| 6 | +; "License"); you may not use this file except in compliance |
| 7 | +; with the License. You may obtain a copy of the License at |
| 8 | +; |
| 9 | +; http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | +; |
| 11 | +; Unless required by applicable law or agreed to in writing, |
| 12 | +; software distributed under the License is distributed on an |
| 13 | +; "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
| 14 | +; KIND, either express or implied. See the License for the |
| 15 | +; specific language governing permissions and limitations |
| 16 | +; under the License. |
| 17 | +[apps..default] |
| 18 | + run = true |
| 19 | + count = 1 |
| 20 | + |
| 21 | +[apps.meta] |
| 22 | + type = meta |
| 23 | + ports = @META_PORT@ |
| 24 | + pools = THREAD_POOL_DEFAULT,THREAD_POOL_META_SERVER,THREAD_POOL_META_STATE,THREAD_POOL_FD,THREAD_POOL_DLOCK,THREAD_POOL_BLOCK_SERVICE |
| 25 | + |
| 26 | +[apps.replica] |
| 27 | + type = replica |
| 28 | + ports = @REPLICA_PORT@ |
| 29 | + pools = THREAD_POOL_DEFAULT,THREAD_POOL_REPLICATION_LONG,THREAD_POOL_REPLICATION,THREAD_POOL_FD,THREAD_POOL_LOCAL_APP,THREAD_POOL_BLOCK_SERVICE,THREAD_POOL_COMPACT,THREAD_POOL_INGESTION,THREAD_POOL_PLOG,THREAD_POOL_SCAN |
| 30 | + |
| 31 | +[apps.collector] |
| 32 | + type = collector |
| 33 | + ports = 34101 |
| 34 | + pools = THREAD_POOL_DEFAULT,THREAD_POOL_REPLICATION |
| 35 | + |
| 36 | +[core] |
| 37 | + tool = nativerun |
| 38 | + toollets = profiler |
| 39 | + enable_default_app_mimic = true |
| 40 | + logging_start_level = LOG_LEVEL_INFO |
| 41 | + |
| 42 | +[network] |
| 43 | + primary_interface = lpf-desk. |
| 44 | + |
| 45 | +[block_service.local_service] |
| 46 | + type = local_service |
| 47 | + args = ../block_service/local_service |
| 48 | + |
| 49 | +[tools.simple_logger] |
| 50 | + short_header = false |
| 51 | + stderr_start_level = LOG_LEVEL_ERROR |
| 52 | + |
| 53 | +[threadpool..default] |
| 54 | + worker_count = 4 |
| 55 | + worker_priority = THREAD_xPRIORITY_NORMAL |
| 56 | + partitioned = false |
| 57 | + |
| 58 | +[threadpool.THREAD_POOL_DEFAULT] |
| 59 | + name = default |
| 60 | + # The worker count in THREAD_POOL_DEFAULT must be >= 5. |
| 61 | + # Because in info collector server, there are four timer tasks(LPC_PEGASUS_APP_STAT_TIMER, LPC_PEGASUS_STORAGE_SIZE_STAT_TIMER, |
| 62 | + # LPC_DETECT_AVAILABLE and LPC_PEGASUS_CAPACITY_UNIT_STAT_TIMER). Each of these timer tasks occupies a thread in THREAD_POOL_DEFAULT. |
| 63 | + # Each of these timer tasks calls remote procedure to meta server(which produce a callback), and waits for the rpc's callback to execute. |
| 64 | + # If the worker_count <= 4, all of these threads are occupied by these timer tasks. so their rpc's callbacks can't get a thread to run. |
| 65 | + # it comes to be a deadlock(timer task wait for rpc's callback to execute, and rpc's callback wait for the timer task to release the thread). |
| 66 | + worker_count = 5 |
| 67 | + |
| 68 | +[threadpool.THREAD_POOL_REPLICATION] |
| 69 | + name = replica |
| 70 | + partitioned = true |
| 71 | + worker_count = 2 |
| 72 | + |
| 73 | +[threadpool.THREAD_POOL_META_STATE] |
| 74 | + name = meta_state |
| 75 | + partitioned = true |
| 76 | + worker_count = 1 |
| 77 | + |
| 78 | +[threadpool.THREAD_POOL_DLOCK] |
| 79 | + name = dist_lock |
| 80 | + partitioned = true |
| 81 | + worker_count = 1 |
| 82 | + |
| 83 | +[threadpool.THREAD_POOL_FD] |
| 84 | + name = fd |
| 85 | + worker_count = 2 |
| 86 | + |
| 87 | +[threadpool.THREAD_POOL_LOCAL_APP] |
| 88 | + name = local_app |
| 89 | + worker_count = 2 |
| 90 | + |
| 91 | +[threadpool.THREAD_POOL_SCAN] |
| 92 | + name = scan_query |
| 93 | + worker_count = 2 |
| 94 | + |
| 95 | +[threadpool.THREAD_POOL_REPLICATION_LONG] |
| 96 | + name = rep_long |
| 97 | + worker_count = 2 |
| 98 | + |
| 99 | +[threadpool.THREAD_POOL_BLOCK_SERVICE] |
| 100 | + name = block_service |
| 101 | + worker_count = 1 |
| 102 | + |
| 103 | +[threadpool.THREAD_POOL_COMPACT] |
| 104 | + name = compact |
| 105 | + worker_count = 1 |
| 106 | + |
| 107 | +[threadpool.THREAD_POOL_INGESTION] |
| 108 | + name = ingestion |
| 109 | + partitioned = false |
| 110 | + worker_count = 2 |
| 111 | + |
| 112 | +[threadpool.THREAD_POOL_PLOG] |
| 113 | + name = plog |
| 114 | + partitioned = true |
| 115 | + worker_count = 4 |
| 116 | + |
| 117 | +[meta_server] |
| 118 | + server_list = lpf-desk.:34601,lpf-desk.:34602,lpf-desk.:34603 |
| 119 | + cluster_root = /pegasus/onebox/lpf-desk. |
| 120 | + distributed_lock_service_type = distributed_lock_service_zookeeper |
| 121 | + distributed_lock_service_parameters = /pegasus/onebox/lpf-desk. |
| 122 | + meta_state_service_type = meta_state_service_zookeeper |
| 123 | + stable_rs_min_running_seconds = 0 |
| 124 | + server_load_balancer_type = greedy_load_balancer |
| 125 | + min_live_node_count_for_unfreeze = 1 |
| 126 | + cold_backup_disabled = false |
| 127 | + recover_from_replica_server = false |
| 128 | + |
| 129 | +[replication] |
| 130 | + mutation_2pc_min_replica_count = 1 |
| 131 | + disk_min_available_space_ratio = 10 |
| 132 | + cold_backup_root = onebox |
| 133 | + cluster_name = onebox |
| 134 | + |
| 135 | +[meta_server.apps.temp] |
| 136 | + app_name = temp |
| 137 | + app_type = pegasus |
| 138 | + partition_count = 8 |
| 139 | + |
| 140 | +[meta_server.apps.stat] |
| 141 | + app_name = stat |
| 142 | + app_type = pegasus |
| 143 | + partition_count = 4 |
| 144 | + |
| 145 | +[pegasus.server] |
| 146 | + # Where the metrics are collected. If no value is given, no sink is used. |
| 147 | + # Options: |
| 148 | + # - falcon |
| 149 | + # - prometheus |
| 150 | + perf_counter_sink = |
| 151 | + # The HTTP port exposed to Prometheus for pulling metrics from pegasus server. |
| 152 | + prometheus_port = @PROMETHEUS_PORT@ |
| 153 | + encrypt_data_at_rest = false |
| 154 | + |
| 155 | +[pegasus.collector] |
| 156 | + available_detect_app = stat |
| 157 | + available_detect_alert_script_dir = ./package/bin |
| 158 | + usage_stat_app = stat |
| 159 | + enable_detect_hotkey = false |
| 160 | + |
| 161 | +[pegasus.clusters] |
| 162 | + onebox = lpf-desk.:34601,lpf-desk.:34602,lpf-desk.:34603 |
| 163 | + onebox2 = 0.0.0.0:35601 |
| 164 | + |
| 165 | +# The group of clusters participating in duplication. |
| 166 | +# Each cluster is assigned with a unique cluster id [1, 127] to identify which cluster |
| 167 | +# the write comes from. |
| 168 | +[duplication-group] |
| 169 | + onebox = 1 |
| 170 | + onebox2 = 2 |
| 171 | + |
| 172 | +[zookeeper] |
| 173 | + hosts_list = 127.0.0.1:22181 |
| 174 | + timeout_ms = 60000 |
| 175 | + logfile = zoo.log |
| 176 | + |
| 177 | +[task..default] |
| 178 | + is_trace = false |
| 179 | + is_profile = false |
| 180 | + allow_inline = false |
| 181 | + fast_execution_in_network_thread = false |
| 182 | + rpc_call_header_format = NET_HDR_DSN |
| 183 | + rpc_call_channel = RPC_CHANNEL_TCP |
| 184 | + rpc_timeout_milliseconds = 5000 |
| 185 | + |
| 186 | +[task.RPC_PREPARE] |
| 187 | + is_profile = true |
| 188 | + |
| 189 | +[task.RPC_PREPARE_ACK] |
| 190 | + is_profile = true |
| 191 | + |
| 192 | +[task.RPC_RRDB_RRDB_PUT] |
| 193 | + is_profile = true |
| 194 | + profiler::size.request.server = true |
| 195 | + rpc_request_throttling_mode = TM_REJECT |
| 196 | + |
| 197 | +[task.RPC_RRDB_RRDB_PUT_ACK] |
| 198 | + is_profile = true |
| 199 | + |
| 200 | +[task.RPC_RRDB_RRDB_MULTI_PUT] |
| 201 | + is_profile = true |
| 202 | + profiler::size.request.server = true |
| 203 | + rpc_request_throttling_mode = TM_REJECT |
| 204 | + |
| 205 | +[task.RPC_RRDB_RRDB_MULTI_PUT_ACK] |
| 206 | + is_profile = true |
| 207 | + |
| 208 | +[task.RPC_RRDB_RRDB_REMOVE] |
| 209 | + is_profile = true |
| 210 | + rpc_request_throttling_mode = TM_REJECT |
| 211 | + |
| 212 | +[task.RPC_RRDB_RRDB_REMOVE_ACK] |
| 213 | + is_profile = true |
| 214 | + |
| 215 | +[task.RPC_RRDB_RRDB_MULTI_REMOVE] |
| 216 | + is_profile = true |
| 217 | + rpc_request_throttling_mode = TM_REJECT |
| 218 | + |
| 219 | +[task.RPC_RRDB_RRDB_MULTI_REMOVE_ACK] |
| 220 | + is_profile = true |
| 221 | + |
| 222 | +[task.RPC_RRDB_RRDB_INCR] |
| 223 | + is_profile = true |
| 224 | + rpc_request_throttling_mode = TM_REJECT |
| 225 | + |
| 226 | +[task.RPC_RRDB_RRDB_INCR_ACK] |
| 227 | + is_profile = true |
| 228 | + |
| 229 | +[task.RPC_RRDB_RRDB_CHECK_AND_SET] |
| 230 | + is_profile = true |
| 231 | + rpc_request_throttling_mode = TM_REJECT |
| 232 | + |
| 233 | +[task.RPC_RRDB_RRDB_CHECK_AND_SET_ACK] |
| 234 | + is_profile = true |
| 235 | + |
| 236 | +[task.RPC_RRDB_RRDB_CHECK_AND_MUTATE] |
| 237 | + is_profile = true |
| 238 | + rpc_request_throttling_mode = TM_REJECT |
| 239 | + |
| 240 | +[task.RPC_RRDB_RRDB_CHECK_AND_MUTATE_ACK] |
| 241 | + is_profile = true |
| 242 | + |
| 243 | +[task.RPC_RRDB_RRDB_GET] |
| 244 | + is_profile = true |
| 245 | + profiler::size.response.server = true |
| 246 | + rpc_request_throttling_mode = TM_REJECT |
| 247 | + |
| 248 | +[task.RPC_RRDB_RRDB_GET_ACK] |
| 249 | + is_profile = true |
| 250 | + |
| 251 | +[task.RPC_RRDB_RRDB_MULTI_GET] |
| 252 | + is_profile = true |
| 253 | + profiler::size.response.server = true |
| 254 | + rpc_request_throttling_mode = TM_REJECT |
| 255 | + |
| 256 | +[task.RPC_RRDB_RRDB_BATCH_GET] |
| 257 | + is_profile = true |
| 258 | + profiler::size.response.server = true |
| 259 | + |
| 260 | +[task.RPC_RRDB_RRDB_BATCH_GET_ACK] |
| 261 | + is_profile = true |
0 commit comments