From 5ca82d67e25bb858ec70aaa7d8eb0500fb7cd695 Mon Sep 17 00:00:00 2001 From: mrdrivingduck Date: Mon, 8 Apr 2024 16:38:15 +0800 Subject: [PATCH 1/2] docs: update contributing guide --- docs/.vuepress/config.ts | 1 + .../contributing/contributing-polardb-docs.md | 16 ++++++------ docs/theory/buffer-management.md | 2 +- docs/theory/logindex.md | 3 ++- .../contributing/contributing-polardb-docs.md | 16 ++++++------ docs/zh/theory/buffer-management.md | 2 +- docs/zh/theory/logindex.md | 3 ++- package.json | 3 ++- pnpm-lock.yaml | 25 ++++++++++++++++--- 9 files changed, 46 insertions(+), 25 deletions(-) diff --git a/docs/.vuepress/config.ts b/docs/.vuepress/config.ts index 123e02a1424..bb74ca725f7 100644 --- a/docs/.vuepress/config.ts +++ b/docs/.vuepress/config.ts @@ -124,6 +124,7 @@ export default defineUserConfig({ }, }), mdEnhancePlugin({ + katex: true, footnote: true, }), registerComponentsPlugin({ diff --git a/docs/contributing/contributing-polardb-docs.md b/docs/contributing/contributing-polardb-docs.md index 487a1b1dff0..8a9f2241969 100644 --- a/docs/contributing/contributing-polardb-docs.md +++ b/docs/contributing/contributing-polardb-docs.md @@ -12,7 +12,7 @@ PolarDB for PostgreSQL 的文档使用 [VuePress 2](https://v2.vuepress.vuejs.or ## 本地文档开发 -若您发现文档中存在内容或格式错误,或者您希望能够贡献新文档,那么您需要在本地安装并配置文档开发环境。本项目的文档是一个 Node.js 工程,以 [Yarn](https://yarnpkg.com/) 作为软件包管理器。[Node.js®](https://nodejs.org/en/) 是一个基于 Chrome V8 引擎的 JavaScript 运行时环境。 +若您发现文档中存在内容或格式错误,或者您希望能够贡献新文档,那么您需要在本地安装并配置文档开发环境。本项目的文档是一个 Node.js 工程,以 [pnpm](https://pnpm.io/) 作为软件包管理器。[Node.js®](https://nodejs.org/en/) 是一个基于 Chrome V8 引擎的 JavaScript 运行时环境。 ### Node 环境准备 @@ -40,19 +40,19 @@ node -v npm -v ``` -使用 `npm` 全局安装软件包管理器 `yarn`: +使用 `npm` 全局安装软件包管理器 `pnpm`: ```bash:no-line-numbers -npm install -g yarn -yarn -v +npm install -g pnpm +pnpm -v ``` ### 文档依赖安装 -在 PolarDB for PostgreSQL 工程的根目录下运行以下命令,`yarn` 将会根据 `package.json` 安装所有依赖: +在 PolarDB for PostgreSQL 工程的根目录下运行以下命令,`pnpm` 将会根据 `package.json` 安装所有依赖: ```bash:no-line-numbers -yarn +pnpm install ``` ### 运行文档开发服务器 @@ -60,7 +60,7 @@ yarn 在 PolarDB for PostgreSQL 工程的根目录下运行以下命令: ```bash:no-line-numbers -yarn docs:dev +pnpm run docs:dev ``` 文档开发服务器将运行于 `http://localhost:8080/PolarDB-for-PostgreSQL/`,打开浏览器即可访问。对 Markdown 文件作出修改后,可以在网页上实时查看变化。 @@ -94,7 +94,7 @@ PolarDB for PostgreSQL 的文档资源位于工程根目录的 `docs/` 目录下 `.vuepress/` 目录下包含文档工程的全局配置信息: -- `config.js`:文档配置 +- `config.ts`:文档配置 - `configs/`:文档配置模块(导航栏 / 侧边栏、英文 / 中文等配置) - `public/`:公共静态资源 - `styles/`:文档主题默认样式覆盖 diff --git a/docs/theory/buffer-management.md b/docs/theory/buffer-management.md index e7fc9b9a938..1016d095de6 100644 --- a/docs/theory/buffer-management.md +++ b/docs/theory/buffer-management.md @@ -58,7 +58,7 @@ To apply the WAL records of a page up to a specified LSN, each read-only node ma For a specific page, more changes mean more LSNs and a longer period of time required to apply WAL records. To minimize the number of WAL records that need to be applied for each page, PolarDB provides consistent LSNs. -After all changes that are made up to the consistent LSN of a page are written to the shared storage, the page is persistently stored. The primary node sends the write LSN and consistent LSN of the page to each read-only node, and each read-only node sends the apply LSN of the page to the primary node. The read-only nodes do not need to apply the WAL records that are generated before the consistent LSN of the page. Therefore, all LSNs that are smaller than the consistent LSN can be removed from the LogIndex of the page. This reduces the number of WAL records that the read-only nodes need to apply. This also reduces the storage space that is occupied by LogIndex records. +After all changes that are made up to the consistent LSN of a page are written to the shared storage, the page is persistently stored. The primary node sends the write LSN and consistent LSN of the page to each read-only node, and each read-only node sends the apply LSN of the page and the min used LSN of the page to the primary node. The read-only nodes do not need to apply the WAL records that are generated before the consistent LSN of the page while reading it from shared storage. But the read-only nodes may still need to apply the WAL records that are generated before the consistent LSN of the page while replaying outdated page in buffer pool. Therefore, all LSNs that are smaller than the consistent LSN and the min used LSN can be removed from the LogIndex of the page. This reduces the number of WAL records that the read-only nodes need to apply. This also reduces the storage space that is occupied by LogIndex records. ### Flush Lists diff --git a/docs/theory/logindex.md b/docs/theory/logindex.md index 339a55615cc..398b7cdb789 100644 --- a/docs/theory/logindex.md +++ b/docs/theory/logindex.md @@ -80,7 +80,7 @@ After the data in an Inactive LogIndex Memtable is flushed to the disk, the LogI ![image.png](../imgs/58_LogIndex_10.png) -All modified data pages recorded in WAL logs before the LSN of consistent data are persisted to the shared storage based on the information described in [Buffer Management](./buffer-management.md). The LSN of consistent data is the LSN before which data is consistent between the primary node and read-only nodes. Read-only nodes do not need to replay WAL logs generated before the LSN of consistent data. In this case, the WAL logs for the LSNs that are smaller than the LSN of consistent data can be cleared from LogIndex Tables. This way, the primary node can truncate LogIndex Tables that are no longer used in the storage. This enables more efficient log replay for read-only nodes and reduces the space occupied by LogIndex Tables. +All modified data pages recorded in WAL logs before the consistent LSN are persisted to the shared storage based on the information described in [Buffer Management](./buffer-management.md). The primary node sends the write LSN and consistent LSN to each read-only node, and each read-only node sends the apply LSN and the min used LSN to the primary node. In this case, the WAL logs whose LSNs are smaller than the consistent LSN and the min used LSN can be cleared from LogIndex Tables. This way, the primary node can truncate LogIndex Tables that are no longer used in the storage. This enables more efficient log replay for read-only nodes and reduces the space occupied by LogIndex Tables. ## Log replay @@ -90,6 +90,7 @@ For scenarios in which LogIndex Tables are used, the startup processes of read-o - The background replay process replays WAL logs in the sequence of WAL logs. The process retrieves modified pages from LogIndex Memtables and LogIndex Tables based on the LSN of a page that you want to replay. If a page exists in a buffer pool, the page is replayed. Otherwise, the page is skipped. The background replay process replays WAL logs generated for the next LSN of a page in a buffer pool in the sequence of LSNs. This prevents a large number of LSNs for a single page that you want to replay from being accumulated. - The backend process replays only the pages it must access. If the backend process must access a page that does not exist in a buffer pool, the process reads this page from the shared storage, writes the page to a buffer pool, and replays this page. If the page exists in a buffer pool and is marked as an outdated page, the process replays the most recent WAL logs of this page. The backend process retrieves the LSNs of the page from LogIndex Memtables and LogIndex Tables based on the value of PageTag. After the process retrieves the LSNs, the process generates the LSNs for the page in sequence. Then, the process reads the complete WAL logs from the shared storage based on the generated LSNs to replay the page. +- According to the above two points, we can know that both the background replay process and the backend process will use LogIndex information to apply WAL logs on some pages. Therefore, the min used LSN of current RO node is defined as the minimum LSN of WALs which being applying by the background replay process and all backend processes. The RO node sends the current min used LSN to the primary node which would use this LSN to truncate those no longer used LogIndex Tables. ![image.png](../imgs/59_LogIndex_11.png) diff --git a/docs/zh/contributing/contributing-polardb-docs.md b/docs/zh/contributing/contributing-polardb-docs.md index b638fe50bc9..05168618160 100644 --- a/docs/zh/contributing/contributing-polardb-docs.md +++ b/docs/zh/contributing/contributing-polardb-docs.md @@ -8,7 +8,7 @@ PolarDB for PostgreSQL 的文档使用 [VuePress 2](https://v2.vuepress.vuejs.or ## 本地文档开发 -若您发现文档中存在内容或格式错误,或者您希望能够贡献新文档,那么您需要在本地安装并配置文档开发环境。本项目的文档是一个 Node.js 工程,以 [Yarn](https://www.yarnpkg.cn/) 作为软件包管理器。[Node.js®](https://nodejs.org/zh-cn/) 是一个基于 Chrome V8 引擎的 JavaScript 运行时环境。 +若您发现文档中存在内容或格式错误,或者您希望能够贡献新文档,那么您需要在本地安装并配置文档开发环境。本项目的文档是一个 Node.js 工程,以 [pnpm](https://pnpm.io/) 作为软件包管理器。[Node.js®](https://nodejs.org/zh-cn/) 是一个基于 Chrome V8 引擎的 JavaScript 运行时环境。 ### Node 环境准备 @@ -36,19 +36,19 @@ node -v npm -v ``` -使用 `npm` 全局安装软件包管理器 `yarn`: +使用 `npm` 全局安装软件包管理器 `pnpm`: ```bash:no-line-numbers -npm install -g yarn -yarn -v +npm install -g pnpm +pnpm -v ``` ### 文档依赖安装 -在 PolarDB for PostgreSQL 工程的根目录下运行以下命令,`yarn` 将会根据 `package.json` 安装所有依赖: +在 PolarDB for PostgreSQL 工程的根目录下运行以下命令,`pnpm` 将会根据 `package.json` 安装所有依赖: ```bash:no-line-numbers -yarn +pnpm install ``` ### 运行文档开发服务器 @@ -56,7 +56,7 @@ yarn 在 PolarDB for PostgreSQL 工程的根目录下运行以下命令: ```bash:no-line-numbers -yarn docs:dev +pnpm run docs:dev ``` 文档开发服务器将运行于 `http://localhost:8080/PolarDB-for-PostgreSQL/`,打开浏览器即可访问。对 Markdown 文件作出修改后,可以在网页上实时查看变化。 @@ -90,7 +90,7 @@ PolarDB for PostgreSQL 的文档资源位于工程根目录的 `docs/` 目录下 `.vuepress/` 目录下包含文档工程的全局配置信息: -- `config.js`:文档配置 +- `config.ts`:文档配置 - `configs/`:文档配置模块(导航栏 / 侧边栏、英文 / 中文等配置) - `public/`:公共静态资源 - `styles/`:文档主题默认样式覆盖 diff --git a/docs/zh/theory/buffer-management.md b/docs/zh/theory/buffer-management.md index 038203e11f7..c13646343a8 100644 --- a/docs/zh/theory/buffer-management.md +++ b/docs/zh/theory/buffer-management.md @@ -58,7 +58,7 @@ else 可见,数据页上的修改越多,其对应的 LSN 也越多,回放所需耗时也越长。为了尽量减少数据页需要回放的 LSN 数量,PolarDB 中引入了一致性位点的概念。 -一致性位点表示该位点之前的所有 WAL 日志修改的数据页均已经持久化到存储。主备之间,主节点向备节点发送当前 WAL 日志的写入位点和一致性位点,备节点向主节点发送当前回放的位点。由于一致性位点之前的 WAL 修改都已经写入共享存储,备节点无需再回放该位点之前的 WAL 日志。因此,可以将 LogIndex 中所有小于一致性位点的 LSN 清理掉,既加速回放效率,同时还能减少 LogIndex 占用的空间。 +一致性位点表示该位点之前的所有 WAL 日志修改的数据页均已经持久化到存储。主备之间,主节点向备节点发送当前 WAL 日志的写入位点和一致性位点,备节点向主节点反馈当前回放的位点和当前使用的最小 WAL 日志位点。由于一致性位点之前的 WAL 修改都已经写入共享存储,备节点从存储上读取新的数据页面时,无需再回放该位点之前的 WAL 日志,但是备节点回放 Buffer Pool 中的被标记为 Outdate 的数据页面时,有可能需要回放该位点之前的 WAL 日志。因此,主库节点可以根据备节点传回的‘当前使用的最小 WAL 日志位点’和一致性位点,将 LogIndex 中所有小于两个位点的 LSN 清理掉,既加速回放效率,同时还能减少 LogIndex 占用的空间。 ### FlushList diff --git a/docs/zh/theory/logindex.md b/docs/zh/theory/logindex.md index 3c473ba58d8..889521c3b3e 100644 --- a/docs/zh/theory/logindex.md +++ b/docs/zh/theory/logindex.md @@ -80,7 +80,7 @@ LogIndex 实质为一个 HashTable 结构,其 key 为 PageTag,可标识一 ![image.png](../imgs/58_LogIndex_10.png) -由 [Buffer 管理](./buffer-management.md) 可知,一致性位点之前的所有 WAL 日志修改的数据页均已持久化到共享存储中,RO 节点无需回放该位点之前的 WAL 日志,故 LogIndex Table 中小于一致性位点的 LSN 均可清除。RW 据此 Truncate 掉存储上不再使用的 LogIndex Table,在加速 RO 回放效率的同时还可减少 LogIndex Table 占用的空间。 +由 [Buffer 管理](./buffer-management.md) 可知,一致性位点之前的所有 WAL 日志修改的数据页均已持久化到共享存储中,RO 节点通过流复制向 RW 节点反馈当前回放的位点和当前使用的最小 WAL 日志位点,故 LogIndex Table 中小于两个位点的 LSN 均可清除。RW 据此 Truncate 掉存储上不再使用的 LogIndex Table,在加速 RO 回放效率的同时还可减少 LogIndex Table 占用的空间。 ## 日志回放 @@ -90,6 +90,7 @@ LogIndex 机制下,RO 节点的 Startup 进程基于接收到的 WAL Meta 生 - 背景回放进程按照 WAL 顺序依次进行日志回放操作,根据要回放的 LSN 检索 LogIndex Memtable 及 LogIndex Table,获取该 LSN 修改的 Page List,若某个 Page 存在于 Buffer Pool 中则对其进行回放,否则直接跳过。背景回放进程按照 LSN 的顺序逐步推进 Buffer Pool 中的页面位点,避免单个 Page 需要回放的 LSN 数量堆积太多; - Backend 进程则仅对其实际需要访问的 Page 进行回放,当 Backend 进程需要访问一个 Page 时,如果该 Page 在 Buffer Pool 中不存在,则将该 Page 读到 Buffer Pool 后进行回放;如果该 Page 已经在 Buffer Pool 中且标记为 outdate,则将该 Page 回放到最新。Backend 进程依据 Page TAG 对 LogIndex Memtable 及 LogIndex Table 进行检索,按序生成与该 Page 相关的 LSN List,基于 LSN List 从共享存储中读取完整的 WAL 日志来对该 Page 进行回放。 +- 由上述两点可知:背景回放进程和 Backend 进程均会检索 Logindex,并使用 LogIndex 中记录的信息对 Page 执行回放操作。这两类进程均有一个当前正在回放的 WAL 日志位点信息,因此,我们定义背景回放进程和所有的 Backend 进程正在回放的 WAL 日志位点信息的最小值,为该 RO 节点当前正在使用的最小 WAL 日志位点。RO 节点通过流复制将该位点信息回传给 RW 节点,RW 节点据此来判断存储上的 LogIndex Table 是否可以删除。 ![image.png](../imgs/59_LogIndex_11.png) diff --git a/package.json b/package.json index 8375ea213e9..943f67eeace 100644 --- a/package.json +++ b/package.json @@ -26,9 +26,10 @@ "@vuepress/plugin-docsearch": "^2.0.0-rc.9", "@vuepress/plugin-register-components": "^2.0.0-rc.9", "@vuepress/theme-default": "^2.0.0-rc.9", + "katex": "^0.16.10", + "prettier": "3.2.5", "vue": "^3.4.0", "vuepress": "^2.0.0-rc.9", - "prettier": "3.2.5", "vuepress-plugin-md-enhance": "^2.0.0-rc.33" } } diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 64493421b1e..b6e02c3bd25 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -17,6 +17,9 @@ devDependencies: '@vuepress/theme-default': specifier: ^2.0.0-rc.9 version: 2.0.0-rc.23(vuepress@2.0.0-rc.9) + katex: + specifier: ^0.16.10 + version: 0.16.10 prettier: specifier: 3.2.5 version: 3.2.5 @@ -28,7 +31,7 @@ devDependencies: version: 2.0.0-rc.9(@vuepress/bundler-vite@2.0.0-rc.9)(vue@3.4.21) vuepress-plugin-md-enhance: specifier: ^2.0.0-rc.33 - version: 2.0.0-rc.33(markdown-it@14.1.0)(vuepress@2.0.0-rc.9) + version: 2.0.0-rc.33(katex@0.16.10)(markdown-it@14.1.0)(vuepress@2.0.0-rc.9) packages: @@ -665,7 +668,7 @@ packages: upath: 2.0.1 dev: true - /@mdit/plugin-katex@0.8.0(markdown-it@14.1.0): + /@mdit/plugin-katex@0.8.0(katex@0.16.10)(markdown-it@14.1.0): resolution: {integrity: sha512-u7CX3Xv5nuc2bu2sHrk1nil83/9ETKTBMmy0icbW8zlqBC0ykLo1xTCEBXmdhXtnJtPi9f/wUZVs6iMZrJzbNg==, tarball: https://registry.npmmirror.com/@mdit/plugin-katex/-/plugin-katex-0.8.0.tgz} engines: {node: '>= 18'} peerDependencies: @@ -680,6 +683,7 @@ packages: '@mdit/plugin-tex': 0.8.0(markdown-it@14.1.0) '@types/katex': 0.16.7 '@types/markdown-it': 13.0.7 + katex: 0.16.10 markdown-it: 14.1.0 dev: true @@ -1660,6 +1664,11 @@ packages: engines: {node: '>=6'} dev: true + /commander@8.3.0: + resolution: {integrity: sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==, tarball: https://registry.npmmirror.com/commander/-/commander-8.3.0.tgz} + engines: {node: '>= 12'} + dev: true + /connect-history-api-fallback@2.0.0: resolution: {integrity: sha512-U73+6lQFmfiNPrYbXqr6kZ1i1wiRqXnp2nhMsINseWXO8lDau0LGEffJ8kQi4EjLZympVgRdvqjAgiZ1tgzDDA==, tarball: https://registry.npmmirror.com/connect-history-api-fallback/-/connect-history-api-fallback-2.0.0.tgz} engines: {node: '>=0.8'} @@ -2018,6 +2027,13 @@ packages: graceful-fs: 4.2.11 dev: true + /katex@0.16.10: + resolution: {integrity: sha512-ZiqaC04tp2O5utMsl2TEZTXxa6WSC4yo0fv5ML++D3QZv/vx2Mct0mTlRx3O+uUkjfuAgOkzsCmq5MiUEsDDdA==, tarball: https://registry.npmmirror.com/katex/-/katex-0.16.10.tgz} + hasBin: true + dependencies: + commander: 8.3.0 + dev: true + /kind-of@6.0.3: resolution: {integrity: sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==, tarball: https://registry.npmmirror.com/kind-of/-/kind-of-6.0.3.tgz} engines: {node: '>=0.10.0'} @@ -2564,7 +2580,7 @@ packages: '@vue/shared': 3.4.21 dev: true - /vuepress-plugin-md-enhance@2.0.0-rc.33(markdown-it@14.1.0)(vuepress@2.0.0-rc.9): + /vuepress-plugin-md-enhance@2.0.0-rc.33(katex@0.16.10)(markdown-it@14.1.0)(vuepress@2.0.0-rc.9): resolution: {integrity: sha512-yShU1E7K1i5zoI/8KxGsvXbE13ebX0GIbo0e7axlsMNj+xzADzF7sD/JbaPC1cJWn4pIoZ7Rg1jJiIsxWGJ8uw==, tarball: https://registry.npmmirror.com/vuepress-plugin-md-enhance/-/vuepress-plugin-md-enhance-2.0.0-rc.33.tgz} engines: {node: '>=18.16.0', npm: '>=8', pnpm: '>=7', yarn: '>=2'} peerDependencies: @@ -2627,7 +2643,7 @@ packages: '@mdit/plugin-img-mark': 0.8.0(markdown-it@14.1.0) '@mdit/plugin-img-size': 0.8.0(markdown-it@14.1.0) '@mdit/plugin-include': 0.8.0(markdown-it@14.1.0) - '@mdit/plugin-katex': 0.8.0(markdown-it@14.1.0) + '@mdit/plugin-katex': 0.8.0(katex@0.16.10)(markdown-it@14.1.0) '@mdit/plugin-mark': 0.8.0(markdown-it@14.1.0) '@mdit/plugin-mathjax': 0.8.0(markdown-it@14.1.0) '@mdit/plugin-stylize': 0.8.0(markdown-it@14.1.0) @@ -2642,6 +2658,7 @@ packages: '@vueuse/core': 10.9.0(vue@3.4.21) balloon-css: 1.2.0 js-yaml: 4.1.0 + katex: 0.16.10 vue: 3.4.21 vuepress: 2.0.0-rc.9(@vuepress/bundler-vite@2.0.0-rc.9)(vue@3.4.21) vuepress-plugin-sass-palette: 2.0.0-rc.33(vuepress@2.0.0-rc.9) From 7de734f1d500737ce8c53de1e1de04d8cd706e1b Mon Sep 17 00:00:00 2001 From: mrdrivingduck Date: Sun, 14 Apr 2024 15:00:31 +0800 Subject: [PATCH 2/2] feat: support global stat env --- docs/contributing/trouble-issuing.md | 118 ++++++++++++++++++ docs/zh/contributing/trouble-issuing.md | 118 ++++++++++++++++++ .../polar_stat_env/polar_stat_env--1.0.sql | 5 +- external/polar_stat_env/polar_stat_env.c | 92 ++++++++++---- polardb_build.sh | 16 +-- src/backend/px/px_util.c | 17 ++- src/backend/utils/misc/guc.c | 12 ++ src/backend/utils/misc/guc_px.c | 10 ++ src/include/miscadmin.h | 2 - src/include/utils/guc.h | 1 + src/include/utils/px_unsync_guc_name.h | 2 + 11 files changed, 355 insertions(+), 38 deletions(-) diff --git a/docs/contributing/trouble-issuing.md b/docs/contributing/trouble-issuing.md index 8702de87384..d550f734896 100644 --- a/docs/contributing/trouble-issuing.md +++ b/docs/contributing/trouble-issuing.md @@ -10,6 +10,7 @@ polar_stat_env -------------------------------------------------------------------- { + + "Role": "Primary", + "CPU": { + "Architecture": "x86_64", + "Model Name": "Intel(R) Xeon(R) Platinum 8369B CPU @ 2.70GHz",+ @@ -36,3 +37,120 @@ } (1 row) ``` + +通过 ePQ 功能可以直接获取整个集群中所有计算节点的硬件配置信息: + +```sql:no-line-numbers +=> CREATE EXTENSION polar_stat_env; +=> SET polar_enable_px TO ON; +=> SET polar_px_use_master TO ON; +=> SET polar_px_use_standby TO ON; +=> SELECT * FROM polar_global_function('polar_stat_env'); + polar_stat_env +--------------------------------------------------------------------- + { + + "Role": "Standby", + + "CPU": { + + "Architecture": "x86_64", + + "Model Name": "Intel(R) Xeon(R) Platinum 8269CY CPU @ 2.50GHz",+ + "CPU Cores": "104", + + "CPU Thread Per Cores": "2", + + "CPU Core Per Socket": "26", + + "NUMA Nodes": "2", + + "L1d cache": "32K", + + "L1i cache": "32K", + + "L2 cache": "1024K", + + "L3 cache": "36608K" + + }, + + "Memory": { + + "Memory Total (GB)": "754", + + "HugePage Size (MB)": "2", + + "HugePage Total Size (GB)": "42" + + }, + + "OS Params": { + + "OS": "5.10.134-16.1.al8.x86_64", + + "Swappiness(1-100)": "0", + + "Vfs Cache Pressure(0-1000)": "500", + + "Min Free KBytes(KB)": "20971520" + + } + + } + { + + "Role": "Replica", + + "CPU": { + + "Architecture": "x86_64", + + "Model Name": "Intel(R) Xeon(R) Platinum 8269CY CPU @ 2.50GHz",+ + "CPU Cores": "104", + + "CPU Thread Per Cores": "2", + + "CPU Core Per Socket": "26", + + "NUMA Nodes": "2", + + "L1d cache": "32K", + + "L1i cache": "32K", + + "L2 cache": "1024K", + + "L3 cache": "36608K" + + }, + + "Memory": { + + "Memory Total (GB)": "754", + + "HugePage Size (MB)": "2", + + "HugePage Total Size (GB)": "42" + + }, + + "OS Params": { + + "OS": "5.10.134-16.1.al8.x86_64", + + "Swappiness(1-100)": "0", + + "Vfs Cache Pressure(0-1000)": "500", + + "Min Free KBytes(KB)": "20971520" + + } + + } + { + + "Role": "Primary", + + "CPU": { + + "Architecture": "x86_64", + + "Model Name": "Intel(R) Xeon(R) Platinum 8269CY CPU @ 2.50GHz",+ + "CPU Cores": "104", + + "CPU Thread Per Cores": "2", + + "CPU Core Per Socket": "26", + + "NUMA Nodes": "2", + + "L1d cache": "32K", + + "L1i cache": "32K", + + "L2 cache": "1024K", + + "L3 cache": "36608K" + + }, + + "Memory": { + + "Memory Total (GB)": "754", + + "HugePage Size (MB)": "2", + + "HugePage Total Size (GB)": "42" + + }, + + "OS Params": { + + "OS": "5.10.134-16.1.al8.x86_64", + + "Swappiness(1-100)": "0", + + "Vfs Cache Pressure(0-1000)": "500", + + "Min Free KBytes(KB)": "20971520" + + } + + } + { + + "Role": "Replica", + + "CPU": { + + "Architecture": "x86_64", + + "Model Name": "Intel(R) Xeon(R) Platinum 8269CY CPU @ 2.50GHz",+ + "CPU Cores": "104", + + "CPU Thread Per Cores": "2", + + "CPU Core Per Socket": "26", + + "NUMA Nodes": "2", + + "L1d cache": "32K", + + "L1i cache": "32K", + + "L2 cache": "1024K", + + "L3 cache": "36608K" + + }, + + "Memory": { + + "Memory Total (GB)": "754", + + "HugePage Size (MB)": "2", + + "HugePage Total Size (GB)": "42" + + }, + + "OS Params": { + + "OS": "5.10.134-16.1.al8.x86_64", + + "Swappiness(1-100)": "0", + + "Vfs Cache Pressure(0-1000)": "500", + + "Min Free KBytes(KB)": "20971520" + + } + + } +(4 rows) +``` diff --git a/docs/zh/contributing/trouble-issuing.md b/docs/zh/contributing/trouble-issuing.md index 8702de87384..d550f734896 100644 --- a/docs/zh/contributing/trouble-issuing.md +++ b/docs/zh/contributing/trouble-issuing.md @@ -10,6 +10,7 @@ polar_stat_env -------------------------------------------------------------------- { + + "Role": "Primary", + "CPU": { + "Architecture": "x86_64", + "Model Name": "Intel(R) Xeon(R) Platinum 8369B CPU @ 2.70GHz",+ @@ -36,3 +37,120 @@ } (1 row) ``` + +通过 ePQ 功能可以直接获取整个集群中所有计算节点的硬件配置信息: + +```sql:no-line-numbers +=> CREATE EXTENSION polar_stat_env; +=> SET polar_enable_px TO ON; +=> SET polar_px_use_master TO ON; +=> SET polar_px_use_standby TO ON; +=> SELECT * FROM polar_global_function('polar_stat_env'); + polar_stat_env +--------------------------------------------------------------------- + { + + "Role": "Standby", + + "CPU": { + + "Architecture": "x86_64", + + "Model Name": "Intel(R) Xeon(R) Platinum 8269CY CPU @ 2.50GHz",+ + "CPU Cores": "104", + + "CPU Thread Per Cores": "2", + + "CPU Core Per Socket": "26", + + "NUMA Nodes": "2", + + "L1d cache": "32K", + + "L1i cache": "32K", + + "L2 cache": "1024K", + + "L3 cache": "36608K" + + }, + + "Memory": { + + "Memory Total (GB)": "754", + + "HugePage Size (MB)": "2", + + "HugePage Total Size (GB)": "42" + + }, + + "OS Params": { + + "OS": "5.10.134-16.1.al8.x86_64", + + "Swappiness(1-100)": "0", + + "Vfs Cache Pressure(0-1000)": "500", + + "Min Free KBytes(KB)": "20971520" + + } + + } + { + + "Role": "Replica", + + "CPU": { + + "Architecture": "x86_64", + + "Model Name": "Intel(R) Xeon(R) Platinum 8269CY CPU @ 2.50GHz",+ + "CPU Cores": "104", + + "CPU Thread Per Cores": "2", + + "CPU Core Per Socket": "26", + + "NUMA Nodes": "2", + + "L1d cache": "32K", + + "L1i cache": "32K", + + "L2 cache": "1024K", + + "L3 cache": "36608K" + + }, + + "Memory": { + + "Memory Total (GB)": "754", + + "HugePage Size (MB)": "2", + + "HugePage Total Size (GB)": "42" + + }, + + "OS Params": { + + "OS": "5.10.134-16.1.al8.x86_64", + + "Swappiness(1-100)": "0", + + "Vfs Cache Pressure(0-1000)": "500", + + "Min Free KBytes(KB)": "20971520" + + } + + } + { + + "Role": "Primary", + + "CPU": { + + "Architecture": "x86_64", + + "Model Name": "Intel(R) Xeon(R) Platinum 8269CY CPU @ 2.50GHz",+ + "CPU Cores": "104", + + "CPU Thread Per Cores": "2", + + "CPU Core Per Socket": "26", + + "NUMA Nodes": "2", + + "L1d cache": "32K", + + "L1i cache": "32K", + + "L2 cache": "1024K", + + "L3 cache": "36608K" + + }, + + "Memory": { + + "Memory Total (GB)": "754", + + "HugePage Size (MB)": "2", + + "HugePage Total Size (GB)": "42" + + }, + + "OS Params": { + + "OS": "5.10.134-16.1.al8.x86_64", + + "Swappiness(1-100)": "0", + + "Vfs Cache Pressure(0-1000)": "500", + + "Min Free KBytes(KB)": "20971520" + + } + + } + { + + "Role": "Replica", + + "CPU": { + + "Architecture": "x86_64", + + "Model Name": "Intel(R) Xeon(R) Platinum 8269CY CPU @ 2.50GHz",+ + "CPU Cores": "104", + + "CPU Thread Per Cores": "2", + + "CPU Core Per Socket": "26", + + "NUMA Nodes": "2", + + "L1d cache": "32K", + + "L1i cache": "32K", + + "L2 cache": "1024K", + + "L3 cache": "36608K" + + }, + + "Memory": { + + "Memory Total (GB)": "754", + + "HugePage Size (MB)": "2", + + "HugePage Total Size (GB)": "42" + + }, + + "OS Params": { + + "OS": "5.10.134-16.1.al8.x86_64", + + "Swappiness(1-100)": "0", + + "Vfs Cache Pressure(0-1000)": "500", + + "Min Free KBytes(KB)": "20971520" + + } + + } +(4 rows) +``` diff --git a/external/polar_stat_env/polar_stat_env--1.0.sql b/external/polar_stat_env/polar_stat_env--1.0.sql index 4718dcdfdec..58b6e981bad 100644 --- a/external/polar_stat_env/polar_stat_env--1.0.sql +++ b/external/polar_stat_env/polar_stat_env--1.0.sql @@ -1,13 +1,14 @@ -- Create customized polar stat env func CREATE FUNCTION polar_stat_env( IN format text DEFAULT 'json' -) RETURNS TEXT +) +RETURNS SETOF TEXT AS 'MODULE_PATHNAME', 'polar_stat_env' LANGUAGE C PARALLEL SAFE; CREATE FUNCTION polar_stat_env_no_format( IN format text DEFAULT 'json' ) -RETURNS TEXT +RETURNS SETOF TEXT AS 'MODULE_PATHNAME', 'polar_stat_env_no_format' LANGUAGE C PARALLEL SAFE; diff --git a/external/polar_stat_env/polar_stat_env.c b/external/polar_stat_env/polar_stat_env.c index a21f74aa29c..299e94ecfd6 100644 --- a/external/polar_stat_env/polar_stat_env.c +++ b/external/polar_stat_env/polar_stat_env.c @@ -23,19 +23,18 @@ *------------------------------------------------------------------------- */ #include "postgres.h" + +#include "access/xlog.h" +#include "commands/explain.h" #include "funcapi.h" +#include "miscadmin.h" #include "utils/builtins.h" -#include "utils/memutils.h" + #include #include #include -#include "utils/guc.h" -#include "commands/explain.h" -#include "fmgr.h" -#ifdef PG_MODULE_MAGIC PG_MODULE_MAGIC; -#endif const char *cpu_commands[][2] = { @@ -118,7 +117,7 @@ exec_collect_command(const char *command, StringInfoData *result) } static void -polar_collect_cpu(ExplainState *es) +collect_cpu(ExplainState *es) { bool success = false; StringInfoData cur_data; @@ -148,7 +147,7 @@ polar_collect_cpu(ExplainState *es) } static void -polar_collect_mem(ExplainState *es) +collect_mem(ExplainState *es) { bool success = false; StringInfoData cur_data; @@ -178,7 +177,7 @@ polar_collect_mem(ExplainState *es) } static void -polar_collect_os(ExplainState *es) +collect_os(ExplainState *es) { bool success = false; StringInfoData cur_data; @@ -208,16 +207,40 @@ polar_collect_os(ExplainState *es) } static void -polar_collect_env(ExplainState *es) +collect_node_type(ExplainState *es) +{ + static const char *role = "Role"; + + switch (polar_node_type()) + { + case POLAR_MASTER: + ExplainPropertyText(role, "Primary", es); + break; + case POLAR_REPLICA: + ExplainPropertyText(role, "Replica", es); + break; + case POLAR_STANDBY: + ExplainPropertyText(role, "Standby", es); + break; + default: + ExplainPropertyText(role, "Unknown", es); + break; + } +} + +static void +collect_all(ExplainState *es) { ExplainBeginOutput(es); + collect_node_type(es); + /* Collect CPU info */ - polar_collect_cpu(es); + collect_cpu(es); /* Collect memory info */ - polar_collect_mem(es); + collect_mem(es); /* Collect OS info */ - polar_collect_os(es); + collect_os(es); ExplainEndOutput(es); @@ -233,14 +256,40 @@ polar_collect_env(ExplainState *es) } } -static text * +static Datum stat_env(FunctionCallInfo fcinfo, bool need_newline) { + ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; + TupleDesc tupdesc; + Tuplestorestate *tupstore; + MemoryContext oldcontext; text *format_txt = PG_GETARG_TEXT_PP(0); char *format = text_to_cstring(format_txt); - text *result_text; ExplainState *es = NewExplainState(); + /* check to see if caller supports us returning a tuplestore */ + if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo)) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("set-valued function called in context that cannot accept a set"))); + if (!(rsinfo->allowedModes & SFRM_Materialize)) + ereport(ERROR, + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("materialize mode required, but it is not allowed in this context"))); + + /* Build tuplestore to hold the result rows */ + oldcontext = MemoryContextSwitchTo(rsinfo->econtext->ecxt_per_query_memory); + + tupdesc = CreateTemplateTupleDesc(1, false); + TupleDescInitEntry(tupdesc, (AttrNumber) 1, "nodeenv", + TEXTOID, -1, 0); + tupstore = tuplestore_begin_heap(true, false, work_mem); + rsinfo->returnMode = SFRM_Materialize; + rsinfo->setResult = tupstore; + rsinfo->setDesc = tupdesc; + + MemoryContextSwitchTo(oldcontext); + if (strcmp(format, "text") == 0) es->format = EXPLAIN_FORMAT_TEXT; else if (strcmp(format, "xml") == 0) @@ -255,24 +304,25 @@ stat_env(FunctionCallInfo fcinfo, bool need_newline) errmsg("unrecognized value for output format: \"%s\"", format))); pfree(format); - polar_collect_env(es); + collect_all(es); if (!need_newline && es && es->str) remove_newlines(es->str->data); - result_text = cstring_to_text_with_len(es->str->data, es->str->len); - + tuplestore_puttuple(tupstore, + BuildTupleFromCStrings(TupleDescGetAttInMetadata(tupdesc), + &es->str->data)); pfree(es->str->data); pfree(es); - return result_text; + return (Datum) 0; } PG_FUNCTION_INFO_V1(polar_stat_env); Datum polar_stat_env(PG_FUNCTION_ARGS) { - PG_RETURN_TEXT_P(stat_env(fcinfo, true)); + return stat_env(fcinfo, true); } /* @@ -284,5 +334,5 @@ PG_FUNCTION_INFO_V1(polar_stat_env_no_format); Datum polar_stat_env_no_format(PG_FUNCTION_ARGS) { - PG_RETURN_TEXT_P(stat_env(fcinfo, false)); + return stat_env(fcinfo, false); } diff --git a/polardb_build.sh b/polardb_build.sh index e22adc3ffd0..8c3e63d443a 100755 --- a/polardb_build.sh +++ b/polardb_build.sh @@ -221,7 +221,7 @@ pg_bld_standby_data_dir=$pg_bld_prefix/tmp_standby_datadir_polardb_pg_1100_bld pg_bld_user=`whoami` pg_bld_port=5432 pg_bld_rep_port=5433 -pg_bld_standby_port=5434 +pg_bld_standby_port=5435 pg_db_user=postgres current_branch=`git rev-parse --abbrev-ref HEAD` if [[ $current_branch == "HEAD" ]]; @@ -624,7 +624,7 @@ then polar_enable_flashback_log = off" >> $pg_bld_master_dir/postgresql.conf echo "polar_enable_dma = on polar_dma_repl_user = $pg_db_user" >> $pg_bld_master_dir/polar_dma.conf - su_eval "$pg_bld_basedir/bin/postgres -D $pg_bld_master_dir -p $pg_bld_port -c polar_dma_init_meta=ON -c polar_dma_members_info=\"localhost:$pg_bld_port@1\"" + su_eval "$pg_bld_basedir/bin/postgres -D $pg_bld_master_dir -p $pg_bld_port -c polar_dma_init_meta=ON -c polar_dma_members_info=\"127.0.0.1:$pg_bld_port@1\"" fi fi @@ -638,7 +638,7 @@ then echo "polar_hostid = 2" >> $pg_bld_replica_dir/postgresql.conf echo "synchronous_standby_names='replica1'" >> $pg_bld_master_dir/postgresql.conf - echo "primary_conninfo = 'host=localhost port=$pg_bld_port user=$pg_db_user dbname=postgres application_name=replica1'" >> $pg_bld_replica_dir/recovery.conf + echo "primary_conninfo = 'host=127.0.0.1 port=$pg_bld_port user=$pg_db_user dbname=postgres application_name=replica1'" >> $pg_bld_replica_dir/recovery.conf echo "polar_replica = on" >> $pg_bld_replica_dir/recovery.conf echo "recovery_target_timeline = 'latest'" >> $pg_bld_replica_dir/recovery.conf echo "primary_slot_name = 'replica1'" >> $pg_bld_replica_dir/recovery.conf @@ -672,7 +672,7 @@ then polar_enable_lazy_checkpoint = off" >> $pg_bld_standby_dir/postgresql.conf fi - echo "primary_conninfo = 'host=localhost port=$pg_bld_port user=$pg_db_user dbname=postgres application_name=standby1'" >> $pg_bld_standby_dir/recovery.conf + echo "primary_conninfo = 'host=127.0.0.1 port=$pg_bld_port user=$pg_db_user dbname=postgres application_name=standby1'" >> $pg_bld_standby_dir/recovery.conf echo "standby_mode = on" >> $pg_bld_standby_dir/recovery.conf echo "recovery_target_timeline = 'latest'" >> $pg_bld_standby_dir/recovery.conf echo "primary_slot_name = 'standby1'" >> $pg_bld_standby_dir/recovery.conf @@ -734,7 +734,7 @@ then fi fi - echo "primary_conninfo = 'host=localhost port=$pg_bld_port user=$pg_db_user dbname=postgres application_name=replica${i}'" >> $pg_bld_replica_dir_n/recovery.conf + echo "primary_conninfo = 'host=127.0.0.1 port=$pg_bld_port user=$pg_db_user dbname=postgres application_name=replica${i}'" >> $pg_bld_replica_dir_n/recovery.conf echo "primary_slot_name = 'replica${i}'" >> $pg_bld_replica_dir_n/recovery.conf # su_eval "env $pg_bld_basedir/bin/psql -h 127.0.0.1 -d postgres -U $pg_db_user -c \"SELECT * FROM pg_create_physical_replication_slot('replica${i}')\"" su_eval "$pg_bld_basedir/bin/pg_ctl -D $pg_bld_replica_dir_n start -w -c" @@ -804,9 +804,9 @@ if [[ $withstandby == "yes" ]]; then su_eval "env $pg_bld_basedir/bin/psql -h 127.0.0.1 -d postgres -p $pg_bld_port -U $pg_db_user -c \"SELECT * FROM pg_create_physical_replication_slot('standby1')\"" sleep 2 - rm -fr $pg_bld_standby_data_dir - cp -frp $pg_bld_data_dir $pg_bld_standby_data_dir - sed -i -E "s/${pg_bld_data_dir//\//\\/}/${pg_bld_standby_data_dir//\//\\/}/" $pg_bld_standby_dir/postgresql.conf + rm -fr $pg_bld_standby_data_dir + cp -frp $pg_bld_data_dir $pg_bld_standby_data_dir + sed -i -E "s/${pg_bld_data_dir//\//\\/}/${pg_bld_standby_data_dir//\//\\/}/" $pg_bld_standby_dir/postgresql.conf su_eval "$pg_bld_basedir/bin/pg_ctl -D $pg_bld_standby_dir start -w -c -o '-p $pg_bld_standby_port'" fi diff --git a/src/backend/px/px_util.c b/src/backend/px/px_util.c index a0fd1e96f1b..ceea948870b 100644 --- a/src/backend/px/px_util.c +++ b/src/backend/px/px_util.c @@ -158,11 +158,13 @@ GeneratePxNodeConfigs(void) for (i = 0; i < count; i++) { item = &items[i]; - if (item->type != POLAR_STANDBY && item->type != POLAR_REPLICA) + if (item->type != POLAR_MASTER && item->type != POLAR_REPLICA && item->type != POLAR_STANDBY) + continue; + if (item->type == POLAR_MASTER && !px_use_master) continue; if (item->type == POLAR_STANDBY && !px_use_standby) continue; - if (item->state != STANDBY_SNAPSHOT_READY) + if (!(item->state == STANDBY_SNAPSHOT_READY || (item->type == POLAR_MASTER && px_use_master))) continue; config = &configs[idx]; @@ -191,19 +193,24 @@ GeneratePxNodeConfigs(void) if (strcmp(item->name, node_name) == 0) { - if (item->type != POLAR_STANDBY && item->type != POLAR_REPLICA) + if (item->type != POLAR_MASTER && item->type != POLAR_REPLICA && item->type != POLAR_STANDBY) { elog(error_level, "node %s is not useable for PX, consider adjust polar_px_nodes", node_name); goto next; } + if (item->type == POLAR_MASTER && !px_use_master) + { + elog(error_level, "node %s is master, but polar_px_use_master is off, consider adjust polar_px_nodes or enable polar_px_use_master", node_name); + goto next; + } if (item->type == POLAR_STANDBY && !px_use_standby) { elog(error_level, "node %s is standby, but polar_px_use_standby is off, consider adjust polar_px_nodes or enable polar_px_use_standby", node_name); goto next; } - if (item->state != STANDBY_SNAPSHOT_READY) + if (!(item->state == STANDBY_SNAPSHOT_READY || (item->type == POLAR_MASTER && !px_use_master))) { - elog(error_level, "node %s is not ready for PX qeury, consider adjust polar_px_nodes", node_name); + elog(error_level, "node %s is not ready for PX query, consider adjust polar_px_nodes", node_name); goto next; } diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index d52d816fb01..5e13a8a9e0a 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -671,6 +671,7 @@ int polar_max_hashagg_mem = 0; int polar_max_setop_mem = 0; int polar_max_subplan_mem = 0; int polar_max_recursiveunion_mem = 0; +int polar_wait_before_shutdown_timeout = 0; static const struct config_enum_entry client_schedule_options[] = { {"round-robin", CLIENT_SCHEDULE_ROUND_ROBIN, false}, @@ -6833,6 +6834,17 @@ static struct config_int ConfigureNamesInt[] = #endif /* POLAR end */ + { + {"polar_wait_before_shutdown_timeout", PGC_SIGHUP, UNGROUPED, + gettext_noop("PolarDB wait before shutdown timeout, in seconds."), + gettext_noop("0 means no wait.") + /* thought the unit is GUC_UNIT_S, to parse it conveniently, we remove it */ + }, + &polar_wait_before_shutdown_timeout, + 0, 0, 300, + NULL, NULL, NULL + }, + /* End-of-list marker */ { {NULL, 0, 0, NULL, NULL}, NULL, 0, 0, 0, NULL, NULL, NULL diff --git a/src/backend/utils/misc/guc_px.c b/src/backend/utils/misc/guc_px.c index f39c110d6e4..cb261be5187 100644 --- a/src/backend/utils/misc/guc_px.c +++ b/src/backend/utils/misc/guc_px.c @@ -1963,6 +1963,16 @@ struct config_bool ConfigureNamesBool_px[] = NULL, NULL, NULL }, + { + {"polar_px_use_master", PGC_USERSET, UNGROUPED, + gettext_noop("Whether PolarDB PX use master"), + NULL + }, + &px_use_master, + false, + NULL, (void (*)(bool, void *))polar_invalid_px_nodes_cache, NULL + }, + { {"polar_px_use_standby", PGC_USERSET, UNGROUPED, gettext_noop("Whether PolarDB PX use standby"), diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h index 6b895cf7af3..983decb0da7 100644 --- a/src/include/miscadmin.h +++ b/src/include/miscadmin.h @@ -355,8 +355,6 @@ extern char *DatabasePath; /* POLAR */ extern char *polar_database_path; extern PGDLLIMPORT int planner_work_mem; -extern PGDLLIMPORT int work_mem; -extern PGDLLIMPORT int maintenance_work_mem; /* now in utils/init/miscinit.c */ extern void InitPostmasterChild(void); diff --git a/src/include/utils/guc.h b/src/include/utils/guc.h index 33b0eae79f9..d302522ddd9 100644 --- a/src/include/utils/guc.h +++ b/src/include/utils/guc.h @@ -988,6 +988,7 @@ extern int px_max_slices; extern char *polar_px_nodes; extern char *polar_px_ignore_function; extern PxFunctionOidArray *px_function_oid_array; +extern bool px_use_master; extern bool px_use_standby; extern bool polar_px_ignore_unusable_nodes; extern bool polar_enable_send_node_info; diff --git a/src/include/utils/px_unsync_guc_name.h b/src/include/utils/px_unsync_guc_name.h index 206c4d76106..7df95768c38 100644 --- a/src/include/utils/px_unsync_guc_name.h +++ b/src/include/utils/px_unsync_guc_name.h @@ -540,6 +540,7 @@ "polar_vfs.max_pfsd_io_size", "polar_vfs.pfs_force_mount", "polar_virtual_pid", + "polar_wait_before_shutdown_timeout", "polar_wal_buffer_insert_locks", "polar_wal_snd_reserved_for_superuser", "polar_worker.core_file_outdate_time", @@ -696,6 +697,7 @@ "polar_px_insert_dop_num", "polar_px_optimizer_remove_superfluous_order", "polar_px_update_dop_num", + "polar_px_use_master", "polar_px_use_standby", "polar_px_wait_lock_timeout", "polar_flashback_log_insert_list_max_num",