From 32f6021b85315ffd0e76e680bff8ebcc1adc3e33 Mon Sep 17 00:00:00 2001 From: Andy Breuhan Date: Fri, 17 May 2024 15:24:06 +0200 Subject: [PATCH] Added lustre integation test --- .github/workflows/rust.yml | 10 +- .../2.14.0_ddn145/lctl_mgs_fs_output.json | 1 + cassettes/2.14.0_ddn145/lctl_output.json | 1 + .../lctl_recovery_status_output.json | 1 + .../2.14.0_ddn145/lnetctl_show_output.json | 1 + .../2.14.0_ddn145/lnetctl_stats_output.json | 1 + src/lib.rs | 148 + src/main.rs | 130 +- src/utils.rs | 106 + tests/lustre_integration_tests.rs | 23 + ...stre_integration_tests__lustre_ddn145.snap | 4594 +++++++++++++++++ 11 files changed, 4901 insertions(+), 115 deletions(-) create mode 100644 cassettes/2.14.0_ddn145/lctl_mgs_fs_output.json create mode 100644 cassettes/2.14.0_ddn145/lctl_output.json create mode 100644 cassettes/2.14.0_ddn145/lctl_recovery_status_output.json create mode 100644 cassettes/2.14.0_ddn145/lnetctl_show_output.json create mode 100644 cassettes/2.14.0_ddn145/lnetctl_stats_output.json create mode 100644 src/utils.rs create mode 100644 tests/lustre_integration_tests.rs create mode 100644 tests/snapshots/lustre_integration_tests__lustre_integration_tests__lustre_ddn145.snap diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 7530351..fadc3b9 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -83,9 +83,17 @@ jobs: set -e set -o pipefail cargo llvm-cov --all-features --workspace --codecov --output-path codecov.json + + - name: Measure code coverage for lustre tests + run: | + # Fail if any tests fail + set -e + set -o pipefail + cargo llvm-cov --codecov --output-path codecov_test_lustre_ddn145.json --package lustre_collector --lib -- lustre_integration_tests::test_lustre_ddn145 + - name: Upload coverage to Codecov uses: codecov/codecov-action@v3 with: token: ${{ secrets.CODECOV_TOKEN }} - files: codecov.json + files: codecov.json, codecov_test_lustre_ddn145.json fail_ci_if_error: true diff --git a/cassettes/2.14.0_ddn145/lctl_mgs_fs_output.json b/cassettes/2.14.0_ddn145/lctl_mgs_fs_output.json new file mode 100644 index 0000000..d0d5141 --- /dev/null +++ b/cassettes/2.14.0_ddn145/lctl_mgs_fs_output.json @@ -0,0 +1 @@ +{"command":"lctl","args":["get_param","-N","mgs.*.live.*"],"stdout":"mgs.MGS.live.fs\nmgs.MGS.live.params\n","stderr":""}is.whamcloud.int: nodename nor servname provided, or not known\r\nConnection closed by UNKNOWN port 65535\r\n"} \ No newline at end of file diff --git a/cassettes/2.14.0_ddn145/lctl_output.json b/cassettes/2.14.0_ddn145/lctl_output.json new file mode 100644 index 0000000..1fabd24 --- /dev/null +++ b/cassettes/2.14.0_ddn145/lctl_output.json @@ -0,0 +1 @@ +{"command":"lctl","args":["get_param","memused","memused_max","lnet_memused","health_check","mdt.*.exports.*.uuid","osd-*.*.filesfree","osd-*.*.filestotal","osd-*.*.fstype","osd-*.*.kbytesavail","osd-*.*.kbytesfree","osd-*.*.kbytestotal","osd-*.*.brw_stats","osd-*.*.quota_slave.acct_group","osd-*.*.quota_slave.acct_user","osd-*.*.quota_slave.acct_project","mgs.*.mgs.stats","mgs.*.mgs.threads_max","mgs.*.mgs.threads_min","mgs.*.mgs.threads_started","mgs.*.num_exports","obdfilter.*OST*.job_stats","obdfilter.*OST*.stats","obdfilter.*OST*.num_exports","obdfilter.*OST*.tot_dirty","obdfilter.*OST*.tot_granted","obdfilter.*OST*.tot_pending","obdfilter.*OST*.exports.*.stats","ost.OSS.ost.stats","ost.OSS.ost_io.stats","ost.OSS.ost_create.stats","ost.OSS.ost_out.stats","ost.OSS.ost_seq.stats","mds.MDS.mdt.stats","mds.MDS.mdt_fld.stats","mds.MDS.mdt_io.stats","mds.MDS.mdt_out.stats","mds.MDS.mdt_readpage.stats","mds.MDS.mdt_seqm.stats","mds.MDS.mdt_seqs.stats","mds.MDS.mdt_setattr.stats","mdt.*.job_stats","mdt.*.md_stats","mdt.*MDT*.num_exports","mdt.*MDT*.exports.*.stats","ldlm.namespaces.{mdt-,filter-}*.contended_locks","ldlm.namespaces.{mdt-,filter-}*.contention_seconds","ldlm.namespaces.{mdt-,filter-}*.ctime_age_limit","ldlm.namespaces.{mdt-,filter-}*.early_lock_cancel","ldlm.namespaces.{mdt-,filter-}*.lock_count","ldlm.namespaces.{mdt-,filter-}*.lock_timeouts","ldlm.namespaces.{mdt-,filter-}*.lock_unused_count","ldlm.namespaces.{mdt-,filter-}*.lru_max_age","ldlm.namespaces.{mdt-,filter-}*.lru_size","ldlm.namespaces.{mdt-,filter-}*.max_nolock_bytes","ldlm.namespaces.{mdt-,filter-}*.max_parallel_ast","ldlm.namespaces.{mdt-,filter-}*.resource_count","ldlm.services.ldlm_canceld.stats","ldlm.services.ldlm_cbd.stats","llite.*.stats","mdd.*.changelog_users","qmt.*.*.glb-usr","qmt.*.*.glb-prj","qmt.*.*.glb-grp"],"stdout":"memused=980590419\nmemused_max=981936083\nlnet_memused=53235916\nhealth_check=healthy\nmdt.fs-MDT0000.exports.0@lo.uuid=\n47e3f35a-c809-41d4-861e-1080a289d12f\nfs-MDT0000-lwp-OST0001_UUID\nfs-MDT0000-lwp-OST0000_UUID\nfs-MDT0000-lwp-MDT0000_UUID\nmdt.fs-MDT0000.exports.10.73.20.12@tcp.uuid=\nfs-MDT0001-mdtlov_UUID\nfs-MDT0000-lwp-MDT0001_UUID\nfs-MDT0000-lwp-OST0002_UUID\nfs-MDT0000-lwp-OST0003_UUID\nosd-ldiskfs.MGS.filesfree=32573\nosd-ldiskfs.fs-MDT0000.filesfree=1885355\nosd-ldiskfs.fs-OST0000.filesfree=40592\nosd-ldiskfs.fs-OST0001.filesfree=40592\nosd-ldiskfs.MGS.filestotal=32768\nosd-ldiskfs.fs-MDT0000.filestotal=1885696\nosd-ldiskfs.fs-OST0000.filestotal=40960\nosd-ldiskfs.fs-OST0001.filestotal=40960\nosd-ldiskfs.MGS.fstype=ldiskfs\nosd-ldiskfs.fs-MDT0000.fstype=ldiskfs\nosd-ldiskfs.fs-OST0000.fstype=ldiskfs\nosd-ldiskfs.fs-OST0001.fstype=ldiskfs\nosd-ldiskfs.MGS.kbytesavail=463708\nosd-ldiskfs.fs-MDT0000.kbytesavail=2366504\nosd-ldiskfs.fs-OST0000.kbytesavail=4038040\nosd-ldiskfs.fs-OST0001.kbytesavail=4038040\nosd-ldiskfs.MGS.kbytesfree=489920\nosd-ldiskfs.fs-MDT0000.kbytesfree=2600612\nosd-ldiskfs.fs-OST0000.kbytesfree=4106852\nosd-ldiskfs.fs-OST0001.kbytesfree=4106852\nosd-ldiskfs.MGS.kbytestotal=491092\nosd-ldiskfs.fs-MDT0000.kbytestotal=2602832\nosd-ldiskfs.fs-OST0000.kbytestotal=4108388\nosd-ldiskfs.fs-OST0001.kbytestotal=4108388\nosd-ldiskfs.MGS.brw_stats=\nsnapshot_time: 1715951721.023721178 secs.nsecs\nstart_time: 9538.444882862 secs.nsecs\nelapsed_time: 1715942182.578838316 secs.nsecs\n\n read | write\npages per bulk r/w rpcs % cum % | rpcs % cum %\n\n read | write\ndiscontiguous pages rpcs % cum % | rpcs % cum %\n\n read | write\ndiscontiguous blocks rpcs % cum % | rpcs % cum %\n\n read | write\ndisk fragmented I/Os ios % cum % | ios % cum %\n\n read | write\ndisk I/Os in flight ios % cum % | ios % cum %\n\n read | write\nI/O time (1/1000s) ios % cum % | ios % cum %\n\n read | write\ndisk I/O size ios % cum % | ios % cum %\n\n read | write\nblock maps msec maps % cum % | maps % cum %\nosd-ldiskfs.fs-MDT0000.brw_stats=\nsnapshot_time: 1715951721.023756097 secs.nsecs\nstart_time: 9540.843836838 secs.nsecs\nelapsed_time: 1715942180.179919259 secs.nsecs\n\n read | write\npages per bulk r/w rpcs % cum % | rpcs % cum %\n\n read | write\ndiscontiguous pages rpcs % cum % | rpcs % cum %\n\n read | write\ndiscontiguous blocks rpcs % cum % | rpcs % cum %\n\n read | write\ndisk fragmented I/Os ios % cum % | ios % cum %\n\n read | write\ndisk I/Os in flight ios % cum % | ios % cum %\n\n read | write\nI/O time (1/1000s) ios % cum % | ios % cum %\n\n read | write\ndisk I/O size ios % cum % | ios % cum %\n\n read | write\nblock maps msec maps % cum % | maps % cum %\nosd-ldiskfs.fs-OST0000.brw_stats=\nsnapshot_time: 1715951721.023774049 secs.nsecs\nstart_time: 9539.298492617 secs.nsecs\nelapsed_time: 1715942181.725281432 secs.nsecs\n\n read | write\npages per bulk r/w rpcs % cum % | rpcs % cum %\n\n read | write\ndiscontiguous pages rpcs % cum % | rpcs % cum %\n\n read | write\ndiscontiguous blocks rpcs % cum % | rpcs % cum %\n\n read | write\ndisk fragmented I/Os ios % cum % | ios % cum %\n\n read | write\ndisk I/Os in flight ios % cum % | ios % cum %\n\n read | write\nI/O time (1/1000s) ios % cum % | ios % cum %\n\n read | write\ndisk I/O size ios % cum % | ios % cum %\n\n read | write\nblock maps msec maps % cum % | maps % cum %\nosd-ldiskfs.fs-OST0001.brw_stats=\nsnapshot_time: 1715951721.023790659 secs.nsecs\nstart_time: 9539.326245646 secs.nsecs\nelapsed_time: 1715942181.697545013 secs.nsecs\n\n read | write\npages per bulk r/w rpcs % cum % | rpcs % cum %\n\n read | write\ndiscontiguous pages rpcs % cum % | rpcs % cum %\n\n read | write\ndiscontiguous blocks rpcs % cum % | rpcs % cum %\n\n read | write\ndisk fragmented I/Os ios % cum % | ios % cum %\n\n read | write\ndisk I/Os in flight ios % cum % | ios % cum %\n\n read | write\nI/O time (1/1000s) ios % cum % | ios % cum %\n\n read | write\ndisk I/O size ios % cum % | ios % cum %\n\n read | write\nblock maps msec maps % cum % | maps % cum %\nosd-ldiskfs.MGS.brw_stats=\nsnapshot_time: 1715951721.023810944 secs.nsecs\nstart_time: 9538.444882862 secs.nsecs\nelapsed_time: 1715942182.578928082 secs.nsecs\n\n read | write\npages per bulk r/w rpcs % cum % | rpcs % cum %\n\n read | write\ndiscontiguous pages rpcs % cum % | rpcs % cum %\n\n read | write\ndiscontiguous blocks rpcs % cum % | rpcs % cum %\n\n read | write\ndisk fragmented I/Os ios % cum % | ios % cum %\n\n read | write\ndisk I/Os in flight ios % cum % | ios % cum %\n\n read | write\nI/O time (1/1000s) ios % cum % | ios % cum %\n\n read | write\ndisk I/O size ios % cum % | ios % cum %\n\n read | write\nblock maps msec maps % cum % | maps % cum %\nosd-ldiskfs.fs-MDT0000.brw_stats=\nsnapshot_time: 1715951721.023836262 secs.nsecs\nstart_time: 9540.843836838 secs.nsecs\nelapsed_time: 1715942180.179999424 secs.nsecs\n\n read | write\npages per bulk r/w rpcs % cum % | rpcs % cum %\n\n read | write\ndiscontiguous pages rpcs % cum % | rpcs % cum %\n\n read | write\ndiscontiguous blocks rpcs % cum % | rpcs % cum %\n\n read | write\ndisk fragmented I/Os ios % cum % | ios % cum %\n\n read | write\ndisk I/Os in flight ios % cum % | ios % cum %\n\n read | write\nI/O time (1/1000s) ios % cum % | ios % cum %\n\n read | write\ndisk I/O size ios % cum % | ios % cum %\n\n read | write\nblock maps msec maps % cum % | maps % cum %\nosd-ldiskfs.fs-OST0000.brw_stats=\nsnapshot_time: 1715951721.023857987 secs.nsecs\nstart_time: 9539.298492617 secs.nsecs\nelapsed_time: 1715942181.725365370 secs.nsecs\n\n read | write\npages per bulk r/w rpcs % cum % | rpcs % cum %\n\n read | write\ndiscontiguous pages rpcs % cum % | rpcs % cum %\n\n read | write\ndiscontiguous blocks rpcs % cum % | rpcs % cum %\n\n read | write\ndisk fragmented I/Os ios % cum % | ios % cum %\n\n read | write\ndisk I/Os in flight ios % cum % | ios % cum %\n\n read | write\nI/O time (1/1000s) ios % cum % | ios % cum %\n\n read | write\ndisk I/O size ios % cum % | ios % cum %\n\n read | write\nblock maps msec maps % cum % | maps % cum %\nosd-ldiskfs.fs-OST0001.brw_stats=\nsnapshot_time: 1715951721.023879482 secs.nsecs\nstart_time: 9539.326245646 secs.nsecs\nelapsed_time: 1715942181.697633836 secs.nsecs\n\n read | write\npages per bulk r/w rpcs % cum % | rpcs % cum %\n\n read | write\ndiscontiguous pages rpcs % cum % | rpcs % cum %\n\n read | write\ndiscontiguous blocks rpcs % cum % | rpcs % cum %\n\n read | write\ndisk fragmented I/Os ios % cum % | ios % cum %\n\n read | write\ndisk I/Os in flight ios % cum % | ios % cum %\n\n read | write\nI/O time (1/1000s) ios % cum % | ios % cum %\n\n read | write\ndisk I/O size ios % cum % | ios % cum %\n\n read | write\nblock maps msec maps % cum % | maps % cum %\nosd-ldiskfs.fs-MDT0000.quota_slave.acct_group=\ngrp_accounting:\n- id: 0\n usage: { inodes: 331, kbytes: 2000 }\nosd-ldiskfs.fs-OST0000.quota_slave.acct_group=\ngrp_accounting:\n- id: 0\n usage: { inodes: 358, kbytes: 1500 }\nosd-ldiskfs.fs-OST0001.quota_slave.acct_group=\ngrp_accounting:\n- id: 0\n usage: { inodes: 358, kbytes: 1500 }\nosd-ldiskfs.fs-MDT0000.quota_slave.acct_user=\nusr_accounting:\n- id: 0\n usage: { inodes: 331, kbytes: 2000 }\nosd-ldiskfs.fs-OST0000.quota_slave.acct_user=\nusr_accounting:\n- id: 0\n usage: { inodes: 358, kbytes: 1500 }\nosd-ldiskfs.fs-OST0001.quota_slave.acct_user=\nusr_accounting:\n- id: 0\n usage: { inodes: 358, kbytes: 1500 }\nosd-ldiskfs.fs-MDT0000.quota_slave.acct_project=\nprj_accounting:\n- id: 0\n usage: { inodes: 331, kbytes: 2000 }\nosd-ldiskfs.fs-OST0000.quota_slave.acct_project=\nprj_accounting:\n- id: 0\n usage: { inodes: 358, kbytes: 1500 }\nosd-ldiskfs.fs-OST0001.quota_slave.acct_project=\nprj_accounting:\n- id: 0\n usage: { inodes: 358, kbytes: 1500 }\nmgs.MGS.mgs.stats=\nsnapshot_time 1715951721.024778141 secs.nsecs\nstart_time 1715712427.194053418 secs.nsecs\nelapsed_time 239293.830724723 secs.nsecs\nreq_waittime 20839 samples [usecs] 10 3463 959329 104351375\nreq_qdepth 20839 samples [reqs] 0 0 0 0\nreq_active 20839 samples [reqs] 1 2 20860 20902\nreq_timeout 20839 samples [secs] 1 15 310704 4658634\nreqbuf_avail 47166 samples [bufs] 62 64 2976844 187885960\nldlm_plain_enqueue 93 samples [reqs] 1 1 93 93\nmgs_connect 2 samples [usecs] 62 84 146 10900\nmgs_target_reg 12 samples [usecs] 138 148848 550720 46243628170\nmgs_config_read 6 samples [usecs] 36 251 742 133396\nobd_ping 20449 samples [usecs] 3 1929 393420 12186244\nllog_origin_handle_open 84 samples [usecs] 13 88 2030 59304\nllog_origin_handle_next_block 119 samples [usecs] 13 11705 24977 206872551\nllog_origin_handle_read_header 74 samples [usecs] 14 23370 90287 1525693897\nmgs.MGS.mgs.threads_max=32\nmgs.MGS.mgs.threads_min=3\nmgs.MGS.mgs.threads_started=3\nmgs.MGS.num_exports=2\nobdfilter.fs-OST0000.job_stats=job_stats:\nobdfilter.fs-OST0001.job_stats=job_stats:\nobdfilter.fs-OST0000.stats=\nsnapshot_time 1715951721.025272195 secs.nsecs\nstart_time 1715712428.324534805 secs.nsecs\nelapsed_time 239292.700737390 secs.nsecs\ncreate 4 samples [usecs] 3 1058 1459 1227857\nstatfs 93473 samples [usecs] 0 991 503210 4535566\nget_info 2 samples [usecs] 1217 13065 14282 172175314\nobdfilter.fs-OST0001.stats=\nsnapshot_time 1715951721.025302142 secs.nsecs\nstart_time 1715712428.714128597 secs.nsecs\nelapsed_time 239292.311173545 secs.nsecs\ncreate 4 samples [usecs] 3 1049 1428 1198476\nstatfs 93473 samples [usecs] 0 137 270611 1393943\nget_info 2 samples [usecs] 788 213514 214302 45588849140\nobdfilter.fs-OST0000.num_exports=2\nobdfilter.fs-OST0001.num_exports=2\nobdfilter.fs-OST0000.tot_dirty=0\nobdfilter.fs-OST0001.tot_dirty=0\nobdfilter.fs-OST0000.tot_granted=278208\nobdfilter.fs-OST0001.tot_granted=278208\nobdfilter.fs-OST0000.tot_pending=0\nobdfilter.fs-OST0001.tot_pending=0\nobdfilter.fs-OST0000.exports.0@lo.stats=\nsnapshot_time 1715951721.025852282 secs.nsecs\nstart_time 1715712430.020444246 secs.nsecs\nelapsed_time 239291.005408036 secs.nsecs\ncreate 2 samples [usecs] 78 1058 1136 1125448\nstatfs 46737 samples [usecs] 0 196 198283 1167593\nget_info 1 samples [usecs] 1217 1217 1217 1481089\nobdfilter.fs-OST0000.exports.10.73.20.12@tcp.stats=\nsnapshot_time 1715951721.025876695 secs.nsecs\nstart_time 1715712429.388682576 secs.nsecs\nelapsed_time 239291.637194119 secs.nsecs\ncreate 2 samples [usecs] 3 320 323 102409\nstatfs 46736 samples [usecs] 1 991 304927 3367973\nget_info 1 samples [usecs] 13065 13065 13065 170694225\nobdfilter.fs-OST0001.exports.0@lo.stats=\nsnapshot_time 1715951721.025892012 secs.nsecs\nstart_time 1715712429.985647890 secs.nsecs\nelapsed_time 239291.040244122 secs.nsecs\ncreate 2 samples [usecs] 71 1049 1120 1105442\nstatfs 46737 samples [usecs] 1 137 224516 1299078\nget_info 1 samples [usecs] 213514 213514 213514 45588228196\nobdfilter.fs-OST0001.exports.10.73.20.12@tcp.stats=\nsnapshot_time 1715951721.025906266 secs.nsecs\nstart_time 1715712429.339699972 secs.nsecs\nelapsed_time 239291.686206294 secs.nsecs\ncreate 2 samples [usecs] 3 305 308 93034\nstatfs 46736 samples [usecs] 0 125 46095 94865\nget_info 1 samples [usecs] 788 788 788 620944\nost.OSS.ost.stats=\nsnapshot_time 1715951721.025979554 secs.nsecs\nstart_time 1715712428.013543251 secs.nsecs\nelapsed_time 239293.012436303 secs.nsecs\nreq_waittime 21 samples [usecs] 8 149 793 51965\nreq_qdepth 21 samples [reqs] 0 0 0 0\nreq_active 21 samples [reqs] 1 1 21 21\nreq_timeout 21 samples [secs] 1 15 104 1214\nreqbuf_avail 48 samples [bufs] 64 64 3072 196608\nost_create 8 samples [usecs] 13 1080 3089 2564761\nost_get_info 4 samples [usecs] 802 213537 228656 45771310142\nost_connect 6 samples [usecs] 34 133 506 48340\nost_disconnect 2 samples [usecs] 49 64 113 6497\nobd_ping 1 samples [usecs] 11 11 11 121\nost.OSS.ost_io.stats=\nsnapshot_time 1715951721.026036105 secs.nsecs\nstart_time 1715712428.054433211 secs.nsecs\nelapsed_time 239292.971602894 secs.nsecs\nost.OSS.ost_create.stats=\nsnapshot_time 1715951721.026082318 secs.nsecs\nstart_time 1715712428.020769072 secs.nsecs\nelapsed_time 239293.005313246 secs.nsecs\nreq_waittime 186946 samples [usecs] 5 7961 6734080 1766607752\nreq_qdepth 186946 samples [reqs] 0 1 35 35\nreq_active 186946 samples [reqs] 1 2 191005 199123\nreq_timeout 186946 samples [secs] 1 15 2803288 42048238\nreqbuf_avail 376899 samples [bufs] 63 64 24115843 1543055293\nost_statfs 186946 samples [usecs] 5 3634 4334895 139489167\nost.OSS.ost_out.stats=\nsnapshot_time 1715951721.026150897 secs.nsecs\nstart_time 1715712428.122126572 secs.nsecs\nelapsed_time 239292.904024325 secs.nsecs\nreq_waittime 26391 samples [usecs] 15 6022 1104980 283199020\nreq_qdepth 26391 samples [reqs] 0 0 0 0\nreq_active 26391 samples [reqs] 1 1 26391 26391\nreq_timeout 26391 samples [secs] 4 15 395630 5933420\nreqbuf_avail 53173 samples [bufs] 63 64 3402571 217732981\nmds_connect 3 samples [usecs] 56 1191 1829 1760341\nmds_statfs 26384 samples [usecs] 11 989 827045 28579743\nobd_ping 1 samples [usecs] 14 14 14 196\nout_update 3 samples [usecs] 17 169 230 30786\nost.OSS.ost_seq.stats=\nsnapshot_time 1715951721.026222446 secs.nsecs\nstart_time 1715712428.064339865 secs.nsecs\nelapsed_time 239292.961882581 secs.nsecs\nreq_waittime 9 samples [usecs] 17 35 205 4937\nreq_qdepth 9 samples [reqs] 0 0 0 0\nreq_active 9 samples [reqs] 1 1 9 9\nreq_timeout 9 samples [secs] 1 10 36 306\nreqbuf_avail 23 samples [bufs] 64 64 1472 94208\nseq_query 9 samples [usecs] 9 314216 355979 100237783449\nmds.MDS.mdt.stats=\nsnapshot_time 1715951721.026279487 secs.nsecs\nstart_time 1715712429.574034579 secs.nsecs\nelapsed_time 239291.452244908 secs.nsecs\nreq_waittime 69240 samples [usecs] 3 1896 3787926 246613360\nreq_qdepth 69240 samples [reqs] 0 3 630 1084\nreq_active 69240 samples [reqs] 1 2 92698 139614\nreq_timeout 69240 samples [secs] 1 15 1038275 15573395\nreqbuf_avail 156760 samples [bufs] 63 64 10032151 642026857\nldlm_ibits_enqueue 2 samples [reqs] 1 1 2 2\nost_set_info 151 samples [usecs] 16 63 4789 166837\nmds_connect 11 samples [usecs] 10 1172 2703 2586521\nmds_get_root 1 samples [usecs] 10 10 10 100\nmds_statfs 2 samples [usecs] 24 25 49 1201\nobd_ping 69073 samples [usecs] 2 237 636552 9041646\nmds.MDS.mdt_fld.stats=\nsnapshot_time 1715951721.026335058 secs.nsecs\nstart_time 1715712429.686717143 secs.nsecs\nelapsed_time 239291.339617915 secs.nsecs\nreq_waittime 4 samples [usecs] 33 730 923 547847\nreq_qdepth 4 samples [reqs] 0 0 0 0\nreq_active 4 samples [reqs] 1 1 4 4\nreq_timeout 4 samples [secs] 1 15 27 327\nreqbuf_avail 11 samples [bufs] 64 64 704 45056\nfld_query 2 samples [usecs] 19 22 41 845\nfld_read 2 samples [usecs] 15 33 48 1314\nmds.MDS.mdt_io.stats=\nsnapshot_time 1715951721.026386583 secs.nsecs\nstart_time 1715712429.729962924 secs.nsecs\nelapsed_time 239291.296423659 secs.nsecs\nmds.MDS.mdt_out.stats=\nsnapshot_time 1715951721.026438452 secs.nsecs\nstart_time 1715712429.683176768 secs.nsecs\nelapsed_time 239291.343261684 secs.nsecs\nreq_waittime 20352 samples [usecs] 17 6390 826823 131129985\nreq_qdepth 20352 samples [reqs] 0 0 0 0\nreq_active 20352 samples [reqs] 1 1 20352 20352\nreq_timeout 20352 samples [secs] 15 15 305280 4579200\nreqbuf_avail 41020 samples [bufs] 63 64 2624894 167968898\nmds_statfs 20352 samples [usecs] 12 2198 642351 26507153\nmds.MDS.mdt_readpage.stats=\nsnapshot_time 1715951721.026485706 secs.nsecs\nstart_time 1715712429.577763694 secs.nsecs\nelapsed_time 239291.448722012 secs.nsecs\nreq_waittime 49 samples [usecs] 9 5107 45252 153545540\nreq_qdepth 49 samples [reqs] 0 0 0 0\nreq_active 49 samples [reqs] 1 4 124 380\nreq_timeout 49 samples [secs] 10 15 680 9650\nreqbuf_avail 103 samples [bufs] 62 64 6534 414540\nldlm_ibits_enqueue 24 samples [reqs] 1 1 24 24\nmds_getattr 1 samples [usecs] 97 97 97 9409\ndt_index_read 24 samples [usecs] 254 2757 30869 61553697\nmds.MDS.mdt_seqm.stats=\nsnapshot_time 1715951721.026558823 secs.nsecs\nstart_time 1715712429.686308763 secs.nsecs\nelapsed_time 239291.340250060 secs.nsecs\nmds.MDS.mdt_seqs.stats=\nsnapshot_time 1715951721.026600085 secs.nsecs\nstart_time 1715712429.685803899 secs.nsecs\nelapsed_time 239291.340796186 secs.nsecs\nreq_waittime 4 samples [usecs] 34 108 307 27633\nreq_qdepth 4 samples [reqs] 0 0 0 0\nreq_active 4 samples [reqs] 1 3 7 15\nreq_timeout 4 samples [secs] 1 10 13 103\nreqbuf_avail 9 samples [bufs] 64 64 576 36864\nseq_query 4 samples [usecs] 20676 323036 665386 161611882868\nmds.MDS.mdt_setattr.stats=\nsnapshot_time 1715951721.026660861 secs.nsecs\nstart_time 1715712429.580198777 secs.nsecs\nelapsed_time 239291.446462084 secs.nsecs\nmdt.fs-MDT0000.job_stats=job_stats:\nmdt.fs-MDT0000.md_stats=\nsnapshot_time 1715951721.026866086 secs.nsecs\nstart_time 1715712429.906866119 secs.nsecs\nelapsed_time 239291.119999967 secs.nsecs\ngetattr 3 samples [usecs] 11 53 117 5739\nstatfs 46738 samples [usecs] 2 68 320391 2612045\nmdt.fs-MDT0000.num_exports=8\nmdt.fs-MDT0000.exports.0@lo.stats=\nsnapshot_time 1715951721.027020316 secs.nsecs\nstart_time 1715712433.065145191 secs.nsecs\nelapsed_time 239287.961875125 secs.nsecs\ngetattr 3 samples [usecs] 11 53 117 5739\nstatfs 2 samples [usecs] 2 6 8 40\nmdt.fs-MDT0000.exports.10.73.20.12@tcp.stats=\nsnapshot_time 1715951721.027040946 secs.nsecs\nstart_time 1715712433.472944418 secs.nsecs\nelapsed_time 239287.554096528 secs.nsecs\nstatfs 46736 samples [usecs] 3 68 320383 2612005\nldlm.namespaces.mdt-fs-MDT0000_UUID.contended_locks=32\nldlm.namespaces.filter-fs-OST0000_UUID.contended_locks=32\nldlm.namespaces.filter-fs-OST0001_UUID.contended_locks=32\nldlm.namespaces.mdt-fs-MDT0000_UUID.contention_seconds=2\nldlm.namespaces.filter-fs-OST0000_UUID.contention_seconds=2\nldlm.namespaces.filter-fs-OST0001_UUID.contention_seconds=2\nldlm.namespaces.mdt-fs-MDT0000_UUID.ctime_age_limit=10\nldlm.namespaces.filter-fs-OST0000_UUID.ctime_age_limit=10\nldlm.namespaces.filter-fs-OST0001_UUID.ctime_age_limit=10\nldlm.namespaces.mdt-fs-MDT0000_UUID.early_lock_cancel=0\nldlm.namespaces.filter-fs-OST0000_UUID.early_lock_cancel=0\nldlm.namespaces.filter-fs-OST0001_UUID.early_lock_cancel=0\nldlm.namespaces.mdt-fs-MDT0000_UUID.lock_count=24\nldlm.namespaces.filter-fs-OST0000_UUID.lock_count=0\nldlm.namespaces.filter-fs-OST0001_UUID.lock_count=0\nldlm.namespaces.mdt-fs-MDT0000_UUID.lock_timeouts=0\nldlm.namespaces.filter-fs-OST0000_UUID.lock_timeouts=0\nldlm.namespaces.filter-fs-OST0001_UUID.lock_timeouts=0\nldlm.namespaces.mdt-fs-MDT0000_UUID.lock_unused_count=0\nldlm.namespaces.filter-fs-OST0000_UUID.lock_unused_count=0\nldlm.namespaces.filter-fs-OST0001_UUID.lock_unused_count=0\nldlm.namespaces.mdt-fs-MDT0000_UUID.lru_max_age=3900000\nldlm.namespaces.filter-fs-OST0000_UUID.lru_max_age=3900000\nldlm.namespaces.filter-fs-OST0001_UUID.lru_max_age=3900000\nldlm.namespaces.mdt-fs-MDT0000_UUID.lru_size=800\nldlm.namespaces.filter-fs-OST0000_UUID.lru_size=800\nldlm.namespaces.filter-fs-OST0001_UUID.lru_size=800\nldlm.namespaces.mdt-fs-MDT0000_UUID.max_nolock_bytes=0\nldlm.namespaces.filter-fs-OST0000_UUID.max_nolock_bytes=0\nldlm.namespaces.filter-fs-OST0001_UUID.max_nolock_bytes=0\nldlm.namespaces.mdt-fs-MDT0000_UUID.max_parallel_ast=1024\nldlm.namespaces.filter-fs-OST0000_UUID.max_parallel_ast=1024\nldlm.namespaces.filter-fs-OST0001_UUID.max_parallel_ast=1024\nldlm.namespaces.mdt-fs-MDT0000_UUID.resource_count=6\nldlm.namespaces.filter-fs-OST0000_UUID.resource_count=0\nldlm.namespaces.filter-fs-OST0001_UUID.resource_count=0\nldlm.services.ldlm_canceld.stats=\nsnapshot_time 1715951721.029057007 secs.nsecs\nstart_time 1715712427.162508246 secs.nsecs\nelapsed_time 239293.866548761 secs.nsecs\nreq_waittime 66 samples [usecs] 12 75 3151 162729\nreq_qdepth 66 samples [reqs] 0 0 0 0\nreq_active 66 samples [reqs] 1 2 85 123\nreq_timeout 66 samples [secs] 1 15 709 10219\nreqbuf_avail 145 samples [bufs] 63 64 9208 584776\nldlm_cancel 66 samples [usecs] 3 57 779 15955\nldlm.services.ldlm_cbd.stats=\nsnapshot_time 1715951721.029123609 secs.nsecs\nstart_time 1715712427.157972923 secs.nsecs\nelapsed_time 239293.871150686 secs.nsecs\nreq_waittime 32 samples [usecs] 27 160 2749 267461\nreq_qdepth 32 samples [reqs] 0 0 0 0\nreq_active 32 samples [reqs] 1 1 32 32\nreq_timeout 32 samples [secs] 1 15 344 4934\nreqbuf_avail 70 samples [bufs] 0 1 66 66\nldlm_bl_callback 32 samples [usecs] 3 35 308 5204\nllite.fs-ffff97e895d31000.stats=\nsnapshot_time 1715951721.029201222 secs.nsecs\nstart_time 1715767600.564021089 secs.nsecs\nelapsed_time 184120.465180133 secs.nsecs\ngetattr 2 samples [usecs] 425 427 852 362954\nmdd.fs-MDT0000.changelog_users=\ncurrent_index: 0\nID index (idle) mask\nqmt.fs-QMT0000.dt-0x0.glb-usr=\nglobal_pool0_dt_usr\n- id: 0\n limits: { hard: 0, soft: 0, granted: 0, time: 604800 }\nqmt.fs-QMT0000.dt-ddn_hdd.glb-usr=\nglobal_index_copy:\n- id: 0\n limits: { hard: 0, soft: 0, granted: 0, time: 604800 }\nqmt.fs-QMT0000.dt-ddn_ssd.glb-usr=\nglobal_index_copy:\n- id: 0\n limits: { hard: 0, soft: 0, granted: 0, time: 604800 }\nqmt.fs-QMT0000.md-0x0.glb-usr=\nglobal_pool0_md_usr\n- id: 0\n limits: { hard: 0, soft: 0, granted: 0, time: 604800 }\nqmt.fs-QMT0000.dt-0x0.glb-prj=\nglobal_pool0_dt_prj\n- id: 0\n limits: { hard: 0, soft: 0, granted: 0, time: 604800 }\nqmt.fs-QMT0000.dt-ddn_hdd.glb-prj=\nglobal_index_copy:\n- id: 0\n limits: { hard: 0, soft: 0, granted: 0, time: 604800 }\nqmt.fs-QMT0000.dt-ddn_ssd.glb-prj=\nglobal_index_copy:\n- id: 0\n limits: { hard: 0, soft: 0, granted: 0, time: 604800 }\nqmt.fs-QMT0000.md-0x0.glb-prj=\nglobal_pool0_md_prj\n- id: 0\n limits: { hard: 0, soft: 0, granted: 0, time: 604800 }\nqmt.fs-QMT0000.dt-0x0.glb-grp=\nglobal_pool0_dt_grp\n- id: 0\n limits: { hard: 0, soft: 0, granted: 0, time: 604800 }\nqmt.fs-QMT0000.dt-ddn_hdd.glb-grp=\nglobal_index_copy:\n- id: 0\n limits: { hard: 0, soft: 0, granted: 0, time: 604800 }\nqmt.fs-QMT0000.dt-ddn_ssd.glb-grp=\nglobal_index_copy:\n- id: 0\n limits: { hard: 0, soft: 0, granted: 0, time: 604800 }\nqmt.fs-QMT0000.md-0x0.glb-grp=\nglobal_pool0_md_grp\n- id: 0\n limits: { hard: 0, soft: 0, granted: 0, time: 604800 }\n","stderr":""} \ No newline at end of file diff --git a/cassettes/2.14.0_ddn145/lctl_recovery_status_output.json b/cassettes/2.14.0_ddn145/lctl_recovery_status_output.json new file mode 100644 index 0000000..6001566 --- /dev/null +++ b/cassettes/2.14.0_ddn145/lctl_recovery_status_output.json @@ -0,0 +1 @@ +{"command":"lctl","args":["get_param","-N","obdfilter.*OST*.recovery_status","mdt.*MDT*.recovery_status"],"stdout":"obdfilter.fs-OST0000.recovery_status\nobdfilter.fs-OST0001.recovery_status\nmdt.fs-MDT0000.recovery_status\n","stderr":""}ection closed by UNKNOWN port 65535\r\n"} \ No newline at end of file diff --git a/cassettes/2.14.0_ddn145/lnetctl_show_output.json b/cassettes/2.14.0_ddn145/lnetctl_show_output.json new file mode 100644 index 0000000..e195ee0 --- /dev/null +++ b/cassettes/2.14.0_ddn145/lnetctl_show_output.json @@ -0,0 +1 @@ +{"command":"lnetctl","args":["net","show","-v","4"],"stdout":"net:\n - net type: lo\n local NI(s):\n - nid: 0@lo\n status: up\n statistics:\n send_count: 284847\n recv_count: 284844\n drop_count: 3\n sent_stats:\n put: 284847\n get: 0\n reply: 0\n ack: 0\n hello: 0\n received_stats:\n put: 284827\n get: 0\n reply: 0\n ack: 17\n hello: 0\n dropped_stats:\n put: 3\n get: 0\n reply: 0\n ack: 0\n hello: 0\n health stats:\n fatal_error: 0\n health value: 1000\n interrupts: 0\n dropped: 0\n aborted: 0\n no route: 0\n timeouts: 0\n error: 0\n ping_count: 0\n next_ping: 0\n tunables:\n peer_timeout: 0\n peer_credits: 0\n peer_buffer_credits: 0\n credits: 0\n lnd tunables:\n dev cpt: 0\n CPT: \"[0,1,2,3]\"\n - net type: tcp\n local NI(s):\n - nid: 10.73.20.11@tcp\n status: up\n interfaces:\n 0: bond0\n statistics:\n send_count: 445690\n recv_count: 445689\n drop_count: 3\n sent_stats:\n put: 350290\n get: 95400\n reply: 0\n ack: 0\n hello: 0\n received_stats:\n put: 350269\n get: 47699\n reply: 47701\n ack: 20\n hello: 0\n dropped_stats:\n put: 3\n get: 0\n reply: 0\n ack: 0\n hello: 0\n health stats:\n fatal_error: 0\n health value: 1000\n interrupts: 0\n dropped: 0\n aborted: 0\n no route: 0\n timeouts: 0\n error: 0\n ping_count: 0\n next_ping: 0\n tunables:\n peer_timeout: 180\n peer_credits: 8\n peer_buffer_credits: 0\n credits: 256\n lnd tunables:\n conns_per_peer: 1\n dev cpt: -1\n CPT: \"[0,1,2,3]\"\n","stderr":""} \ No newline at end of file diff --git a/cassettes/2.14.0_ddn145/lnetctl_stats_output.json b/cassettes/2.14.0_ddn145/lnetctl_stats_output.json new file mode 100644 index 0000000..694bb2f --- /dev/null +++ b/cassettes/2.14.0_ddn145/lnetctl_stats_output.json @@ -0,0 +1 @@ +{"command":"lnetctl","args":["stats","show"],"stdout":"statistics:\n msgs_alloc: 0\n msgs_max: 20\n rst_alloc: 10\n errors: 0\n send_count: 730537\n resend_count: 0\n response_timeout_count: 0\n local_interrupt_count: 0\n local_dropped_count: 0\n local_aborted_count: 0\n local_no_route_count: 0\n local_timeout_count: 0\n local_error_count: 0\n remote_dropped_count: 0\n remote_error_count: 0\n remote_timeout_count: 0\n network_timeout_count: 0\n recv_count: 730533\n route_count: 0\n drop_count: 6\n send_length: 205930288\n recv_length: 192265344\n route_length: 0\n drop_length: 3120\n","stderr":""} \ No newline at end of file diff --git a/src/lib.rs b/src/lib.rs index 12bada7..b4d0d7e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -22,12 +22,18 @@ mod stats_parser; mod time; mod top_level_parser; pub mod types; +pub mod utils; pub use crate::error::LustreCollectorError; +use crate::mgs::mgs_fs_parser; +use crate::utils::{get_output, CommandMock, CommandMode}; use combine::parser::EasyParser; pub use lnetctl_parser::parse as parse_lnetctl_output; pub use lnetctl_parser::parse_lnetctl_stats; pub use node_stats_parsers::{parse_cpustats_output, parse_meminfo_output}; +use std::panic; +use std::path::PathBuf; +use std::thread; use std::{io, str}; pub use types::*; @@ -79,6 +85,148 @@ pub fn parse_recovery_status_output( check_output(recovery_statuses, state) } +fn get_lctl_output(mode: &CommandMode, path: &PathBuf) -> Result, LustreCollectorError> { + let mock = CommandMock::new("lctl_output") + .with_mode(*mode) + .with_path(path); + let mut args = vec!["get_param".to_string()]; + args.extend(parser::params()); + get_output("lctl", args, mock) +} + +fn get_lctl_mgs_fs_output( + mode: &CommandMode, + path: &PathBuf, +) -> Result, LustreCollectorError> { + let mock = CommandMock::new("lctl_mgs_fs_output") + .with_mode(*mode) + .with_path(path); + let mut args = vec!["get_param".to_string(), "-N".to_string()]; + args.extend(mgs_fs_parser::params()); + get_output("lctl", args, mock) +} + +fn get_recovery_status_output( + mode: &CommandMode, + path: &PathBuf, +) -> Result, LustreCollectorError> { + let mock = CommandMock::new("lctl_recovery_status_output") + .with_mode(*mode) + .with_path(path); + let mut args: Vec = vec!["get_param".to_string(), "-N".to_string()]; + args.extend(recovery_status_parser::params()); + get_output("lctl", args, mock) +} + +fn get_lnetctl_stats_output( + mode: &CommandMode, + path: &PathBuf, +) -> Result, LustreCollectorError> { + let mock = CommandMock::new("lnetctl_stats_output") + .with_mode(*mode) + .with_path(path); + get_output( + "lnetctl", + ["stats", "show"] + .into_iter() + .map(|x| x.to_string()) + .collect(), + mock, + ) +} + +fn get_lnetctl_show_output( + mode: &CommandMode, + path: &PathBuf, +) -> Result, LustreCollectorError> { + let mock = CommandMock::new("lnetctl_show_output") + .with_mode(*mode) + .with_path(path); + get_output( + "lnetctl", + ["net", "show", "-v", "4"] + .into_iter() + .map(|x| x.to_string()) + .collect(), + mock, + ) +} + +pub fn parse(mode: &CommandMode, path: &PathBuf) -> Result, LustreCollectorError> { + let mode_clone = *mode; + let path_clone = path.clone(); + let handle = thread::spawn(move || -> Result, LustreCollectorError> { + let lctl_output = get_lctl_output(&mode_clone, &path_clone)?; + + let lctl_record = parse_lctl_output(&lctl_output)?; + + Ok(lctl_record) + }); + let mode_clone = *mode; + let path_clone = path.clone(); + let mgs_fs_handle = thread::spawn(move || -> Result, LustreCollectorError> { + let lctl_output = get_lctl_mgs_fs_output(&mode_clone, &path_clone)?; + let lctl_record = parse_mgs_fs_output(&lctl_output)?; + + Ok(lctl_record) + }); + + let mode_clone = *mode; + let path_clone = path.clone(); + let lnetctl_stats_handle = + thread::spawn(move || -> Result, LustreCollectorError> { + let lnetctl_stats_output = get_lnetctl_stats_output(&mode_clone, &path_clone)?; + let lnetctl_stats_record = parse_lnetctl_stats(str::from_utf8(&lnetctl_stats_output)?)?; + + Ok(lnetctl_stats_record) + }); + + let mode_clone = *mode; + let path_clone = path.clone(); + let recovery_status_handle = + thread::spawn(move || -> Result, LustreCollectorError> { + let recovery_status_output = get_recovery_status_output(&mode_clone, &path_clone)?; + let recovery_statuses = parse_recovery_status_output(&recovery_status_output)?; + + Ok(recovery_statuses) + }); + + let lnetctl_net_show_output = get_lnetctl_show_output(mode, path)?; + + let lnetctl_net_show_stats = str::from_utf8(&lnetctl_net_show_output) + .expect("while converting 'lnetctl net show -v 4' stdout from utf8"); + + let mut lnet_record = parse_lnetctl_output(lnetctl_net_show_stats) + .expect("while parsing 'lnetctl net show -v 4' stats"); + + let mut lctl_record = match handle.join() { + Ok(r) => r?, + Err(e) => panic::resume_unwind(e), + }; + + let mut mgs_fs_record = match mgs_fs_handle.join() { + Ok(r) => r.unwrap_or_default(), + Err(e) => panic::resume_unwind(e), + }; + + let mut recovery_status_records = match recovery_status_handle.join() { + Ok(r) => r.unwrap_or_default(), + Err(e) => panic::resume_unwind(e), + }; + + let mut lnetctl_stats_record = match lnetctl_stats_handle.join() { + Ok(r) => r.unwrap_or_default(), + Err(e) => panic::resume_unwind(e), + }; + + lctl_record.append(&mut lnet_record); + lctl_record.append(&mut mgs_fs_record); + lctl_record.append(&mut recovery_status_records); + lctl_record.append(&mut lnetctl_stats_record); + + Ok(lctl_record) +} + #[cfg(test)] mod tests { use super::{parse_lctl_output, Record}; diff --git a/src/main.rs b/src/main.rs index bf77704..c2bafe6 100644 --- a/src/main.rs +++ b/src/main.rs @@ -3,18 +3,13 @@ // license that can be found in the LICENSE file. use clap::{value_parser, Arg, ValueEnum}; -use lustre_collector::{ - error::LustreCollectorError, mgs::mgs_fs_parser, parse_lctl_output, parse_lnetctl_output, - parse_lnetctl_stats, parse_mgs_fs_output, parse_recovery_status_output, parser, - recovery_status_parser, types::Record, -}; +use lustre_collector::{error::LustreCollectorError, parse, utils::CommandMode}; use std::{ - fmt, panic, - process::{Command, ExitCode}, + fmt, + path::PathBuf, + process::ExitCode, str::{self, FromStr}, - thread, }; -use tracing::debug; #[derive(ValueEnum, PartialEq, Debug, Clone, Copy)] enum Format { @@ -43,44 +38,6 @@ impl fmt::Display for Format { } } -fn get_lctl_output() -> Result, LustreCollectorError> { - let lctl_params = parser::params(); - - debug!(lctl_params = lctl_params.join(" ")); - - let r = Command::new("lctl") - .arg("get_param") - .args(lctl_params) - .output()?; - - Ok(r.stdout) -} - -fn get_lctl_mgs_fs_output() -> Result, LustreCollectorError> { - let r = Command::new("lctl") - .arg("get_param") - .arg("-N") - .args(mgs_fs_parser::params()) - .output()?; - - Ok(r.stdout) -} - -fn get_recovery_status_output() -> Result, LustreCollectorError> { - let r = Command::new("lctl") - .arg("get_param") - .args(recovery_status_parser::params()) - .output()?; - - Ok(r.stdout) -} - -fn get_lnetctl_stats_output() -> Result, LustreCollectorError> { - let r = Command::new("lnetctl").arg("stats").arg("show").output()?; - - Ok(r.stdout) -} - fn main() -> ExitCode { match run() { Ok(()) => ExitCode::SUCCESS, @@ -99,86 +56,31 @@ fn run() -> Result<(), LustreCollectorError> { .version(env!("CARGO_PKG_VERSION")) .author("Whamcloud") .about("Grabs various Lustre statistics for display in JSON or YAML") - .arg( + .args(vec![ Arg::new("format") .short('f') .long("format") .value_parser(value_parser!(Format)) .default_value("json") .help("Sets the output formatting"), - ) + Arg::new("mode") + .short('m') + .long("mode") + .value_parser(value_parser!(CommandMode)) + .default_value("none") + .help("Record/Plays the command output for integration testing"), + ]) .get_matches(); let format = matches .get_one::("format") .expect("Required argument `format` missing"); - let handle = thread::spawn(move || -> Result, LustreCollectorError> { - let lctl_output = get_lctl_output()?; - - let lctl_record = parse_lctl_output(&lctl_output)?; - - Ok(lctl_record) - }); - - let mgs_fs_handle = thread::spawn(move || -> Result, LustreCollectorError> { - let lctl_output = get_lctl_mgs_fs_output()?; - let lctl_record = parse_mgs_fs_output(&lctl_output)?; - - Ok(lctl_record) - }); - - let lnetctl_stats_handle = - thread::spawn(move || -> Result, LustreCollectorError> { - let lnetctl_stats_output = get_lnetctl_stats_output()?; - let lnetctl_stats_record = parse_lnetctl_stats(str::from_utf8(&lnetctl_stats_output)?)?; - - Ok(lnetctl_stats_record) - }); - - let recovery_status_handle = - thread::spawn(move || -> Result, LustreCollectorError> { - let recovery_status_output = get_recovery_status_output()?; - let recovery_statuses = parse_recovery_status_output(&recovery_status_output)?; - - Ok(recovery_statuses) - }); - - let lnetctl_net_show_output = Command::new("lnetctl") - .args(["net", "show", "-v", "4"]) - .output() - .expect("failed to get lnetctl stats"); - - let lnetctl_net_show_stats = str::from_utf8(&lnetctl_net_show_output.stdout) - .expect("while converting 'lnetctl net show -v 4' stdout from utf8"); - - let mut lnet_record = parse_lnetctl_output(lnetctl_net_show_stats) - .expect("while parsing 'lnetctl net show -v 4' stats"); - - let mut lctl_record = match handle.join() { - Ok(r) => r?, - Err(e) => panic::resume_unwind(e), - }; - - let mut mgs_fs_record = match mgs_fs_handle.join() { - Ok(r) => r.unwrap_or_default(), - Err(e) => panic::resume_unwind(e), - }; - - let mut recovery_status_records = match recovery_status_handle.join() { - Ok(r) => r.unwrap_or_default(), - Err(e) => panic::resume_unwind(e), - }; - - let mut lnetctl_stats_record = match lnetctl_stats_handle.join() { - Ok(r) => r.unwrap_or_default(), - Err(e) => panic::resume_unwind(e), - }; + let mode = matches + .get_one::("mode") + .expect("Required argument `mode` missing"); - lctl_record.append(&mut lnet_record); - lctl_record.append(&mut mgs_fs_record); - lctl_record.append(&mut recovery_status_records); - lctl_record.append(&mut lnetctl_stats_record); + let lctl_record = parse(mode, &PathBuf::new())?; let x = match format { Format::Json => serde_json::to_string(&lctl_record)?, diff --git a/src/utils.rs b/src/utils.rs new file mode 100644 index 0000000..dff3272 --- /dev/null +++ b/src/utils.rs @@ -0,0 +1,106 @@ +use clap::ValueEnum; +use serde::{Deserialize, Serialize}; +use std::{ + fs::OpenOptions, + io::{Read, Write}, + path::PathBuf, + process::Command, +}; + +use crate::error::LustreCollectorError; + +#[derive(Serialize, Deserialize)] +struct CommandOutput { + command: String, + args: Vec, + stdout: String, + stderr: String, +} + +pub struct CommandMock { + pub name: String, + pub mode: CommandMode, + pub path: Option, +} + +#[derive(ValueEnum, PartialEq, Debug, Clone, Copy)] +pub enum CommandMode { + None, + Record, + Play, +} +impl CommandMock { + pub fn with_mode(mut self, mode: CommandMode) -> Self { + self.mode = mode; + self + } + + pub fn with_path(mut self, path: impl Into) -> Self { + self.path = Some(path.into()); + self + } + + pub fn new(name: impl Into) -> Self { + Self { + name: name.into(), + mode: CommandMode::None, + path: None, + } + } +} + +fn execute_command(command: &str, args: &[String]) -> std::io::Result { + let output = Command::new(command).args(args).output()?; + + Ok(CommandOutput { + command: command.to_string(), + args: args.to_vec(), + stdout: String::from_utf8_lossy(&output.stdout).into_owned(), + stderr: String::from_utf8_lossy(&output.stderr).into_owned(), + }) +} + +fn write_to_file(cmd_output: &CommandOutput, pb: PathBuf) -> std::io::Result<()> { + let buffer = serde_json::to_string(cmd_output)?; + let mut file = OpenOptions::new() + .write(true) + .create(true) + .truncate(true) + .open(pb)?; + file.write_all(buffer.as_bytes())?; + Ok(()) +} + +fn read_from_file(pb: PathBuf) -> std::io::Result { + let mut file = OpenOptions::new().read(true).open(pb)?; + + let mut buffer = String::new(); + file.read_to_string(&mut buffer)?; + let cmd_output: CommandOutput = serde_json::from_str(&buffer)?; + Ok(cmd_output) +} + +pub fn get_output( + command: &str, + args: Vec, + mock: CommandMock, +) -> Result, LustreCollectorError> { + let CommandMock { name, mode, path } = mock; + let pb = path.unwrap_or(PathBuf::from("")); + let pb = pb.join(format!("{name}.json")); + match mode { + CommandMode::Record => { + let cmd_output = execute_command(command, &args)?; + write_to_file(&cmd_output, pb)?; + Ok(cmd_output.stdout.into_bytes()) + } + CommandMode::Play => { + let cmd_output = read_from_file(pb)?; + Ok(cmd_output.stdout.into_bytes()) + } + CommandMode::None => { + let cmd_output = execute_command(command, &args)?; + Ok(cmd_output.stdout.into_bytes()) + } + } +} diff --git a/tests/lustre_integration_tests.rs b/tests/lustre_integration_tests.rs new file mode 100644 index 0000000..bb748ea --- /dev/null +++ b/tests/lustre_integration_tests.rs @@ -0,0 +1,23 @@ +#[cfg(test)] +mod lustre_integration_tests { + use std::path::PathBuf; + + use insta::assert_debug_snapshot; + use lustre_collector::parse; + use lustre_collector::utils::CommandMode; + + macro_rules! generate_test { + ($name:ident, $version:expr, $mode:expr) => { + #[test] + fn $name() { + let cargo_dir: &str = env!("CARGO_MANIFEST_DIR"); + let version: &str = $version; + let path = PathBuf::from(format!("{cargo_dir}/cassettes/{version}/")); + let mode = $mode; + assert_debug_snapshot!(parse(&mode, &path)); + } + }; + } + + generate_test!(test_lustre_ddn145, "2.14.0_ddn145", CommandMode::Play); +} diff --git a/tests/snapshots/lustre_integration_tests__lustre_integration_tests__lustre_ddn145.snap b/tests/snapshots/lustre_integration_tests__lustre_integration_tests__lustre_ddn145.snap new file mode 100644 index 0000000..1d1690a --- /dev/null +++ b/tests/snapshots/lustre_integration_tests__lustre_integration_tests__lustre_ddn145.snap @@ -0,0 +1,4594 @@ +--- +source: tests/lustre_integration_tests.rs +expression: "parse(&mode, &path)" +--- +Ok( + [ + Host( + Memused( + HostStat { + param: Param( + "memused", + ), + value: 980590419, + }, + ), + ), + Host( + MemusedMax( + HostStat { + param: Param( + "memused_max", + ), + value: 981936083, + }, + ), + ), + Host( + LNetMemUsed( + HostStat { + param: Param( + "lnet_memused", + ), + value: 53235916, + }, + ), + ), + Host( + HealthCheck( + HostStat { + param: Param( + "health_check", + ), + value: HealthCheckStat { + healthy: true, + targets: [], + }, + }, + ), + ), + Target( + ConnectedClients( + TargetStat { + kind: Mdt, + param: Param( + "connected_clients", + ), + target: Target( + "fs-MDT0000", + ), + value: 1, + }, + ), + ), + Target( + FilesFree( + TargetStat { + kind: Mgt, + param: Param( + "filesfree", + ), + target: Target( + "MGS", + ), + value: 32573, + }, + ), + ), + Target( + FilesFree( + TargetStat { + kind: Mdt, + param: Param( + "filesfree", + ), + target: Target( + "fs-MDT0000", + ), + value: 1885355, + }, + ), + ), + Target( + FilesFree( + TargetStat { + kind: Ost, + param: Param( + "filesfree", + ), + target: Target( + "fs-OST0000", + ), + value: 40592, + }, + ), + ), + Target( + FilesFree( + TargetStat { + kind: Ost, + param: Param( + "filesfree", + ), + target: Target( + "fs-OST0001", + ), + value: 40592, + }, + ), + ), + Target( + FilesTotal( + TargetStat { + kind: Mgt, + param: Param( + "filestotal", + ), + target: Target( + "MGS", + ), + value: 32768, + }, + ), + ), + Target( + FilesTotal( + TargetStat { + kind: Mdt, + param: Param( + "filestotal", + ), + target: Target( + "fs-MDT0000", + ), + value: 1885696, + }, + ), + ), + Target( + FilesTotal( + TargetStat { + kind: Ost, + param: Param( + "filestotal", + ), + target: Target( + "fs-OST0000", + ), + value: 40960, + }, + ), + ), + Target( + FilesTotal( + TargetStat { + kind: Ost, + param: Param( + "filestotal", + ), + target: Target( + "fs-OST0001", + ), + value: 40960, + }, + ), + ), + Target( + FsType( + TargetStat { + kind: Mgt, + param: Param( + "fstype", + ), + target: Target( + "MGS", + ), + value: "ldiskfs", + }, + ), + ), + Target( + FsType( + TargetStat { + kind: Mdt, + param: Param( + "fstype", + ), + target: Target( + "fs-MDT0000", + ), + value: "ldiskfs", + }, + ), + ), + Target( + FsType( + TargetStat { + kind: Ost, + param: Param( + "fstype", + ), + target: Target( + "fs-OST0000", + ), + value: "ldiskfs", + }, + ), + ), + Target( + FsType( + TargetStat { + kind: Ost, + param: Param( + "fstype", + ), + target: Target( + "fs-OST0001", + ), + value: "ldiskfs", + }, + ), + ), + Target( + KBytesAvail( + TargetStat { + kind: Mgt, + param: Param( + "kbytesavail", + ), + target: Target( + "MGS", + ), + value: 463708, + }, + ), + ), + Target( + KBytesAvail( + TargetStat { + kind: Mdt, + param: Param( + "kbytesavail", + ), + target: Target( + "fs-MDT0000", + ), + value: 2366504, + }, + ), + ), + Target( + KBytesAvail( + TargetStat { + kind: Ost, + param: Param( + "kbytesavail", + ), + target: Target( + "fs-OST0000", + ), + value: 4038040, + }, + ), + ), + Target( + KBytesAvail( + TargetStat { + kind: Ost, + param: Param( + "kbytesavail", + ), + target: Target( + "fs-OST0001", + ), + value: 4038040, + }, + ), + ), + Target( + KBytesFree( + TargetStat { + kind: Mgt, + param: Param( + "kbytesfree", + ), + target: Target( + "MGS", + ), + value: 489920, + }, + ), + ), + Target( + KBytesFree( + TargetStat { + kind: Mdt, + param: Param( + "kbytesfree", + ), + target: Target( + "fs-MDT0000", + ), + value: 2600612, + }, + ), + ), + Target( + KBytesFree( + TargetStat { + kind: Ost, + param: Param( + "kbytesfree", + ), + target: Target( + "fs-OST0000", + ), + value: 4106852, + }, + ), + ), + Target( + KBytesFree( + TargetStat { + kind: Ost, + param: Param( + "kbytesfree", + ), + target: Target( + "fs-OST0001", + ), + value: 4106852, + }, + ), + ), + Target( + KBytesTotal( + TargetStat { + kind: Mgt, + param: Param( + "kbytestotal", + ), + target: Target( + "MGS", + ), + value: 491092, + }, + ), + ), + Target( + KBytesTotal( + TargetStat { + kind: Mdt, + param: Param( + "kbytestotal", + ), + target: Target( + "fs-MDT0000", + ), + value: 2602832, + }, + ), + ), + Target( + KBytesTotal( + TargetStat { + kind: Ost, + param: Param( + "kbytestotal", + ), + target: Target( + "fs-OST0000", + ), + value: 4108388, + }, + ), + ), + Target( + KBytesTotal( + TargetStat { + kind: Ost, + param: Param( + "kbytestotal", + ), + target: Target( + "fs-OST0001", + ), + value: 4108388, + }, + ), + ), + Target( + BrwStats( + TargetStat { + kind: Mgt, + param: Param( + "brw_stats", + ), + target: Target( + "MGS", + ), + value: [ + BrwStats { + name: "pages", + unit: "rpcs", + buckets: [], + }, + BrwStats { + name: "discont_pages", + unit: "rpcs", + buckets: [], + }, + BrwStats { + name: "discont_blocks", + unit: "rpcs", + buckets: [], + }, + BrwStats { + name: "dio_frags", + unit: "ios", + buckets: [], + }, + BrwStats { + name: "rpc_hist", + unit: "ios", + buckets: [], + }, + BrwStats { + name: "io_time", + unit: "ios", + buckets: [], + }, + BrwStats { + name: "disk_iosize", + unit: "ios", + buckets: [], + }, + BrwStats { + name: "block_maps_msec", + unit: "maps", + buckets: [], + }, + ], + }, + ), + ), + Target( + BrwStats( + TargetStat { + kind: Mdt, + param: Param( + "brw_stats", + ), + target: Target( + "fs-MDT0000", + ), + value: [ + BrwStats { + name: "pages", + unit: "rpcs", + buckets: [], + }, + BrwStats { + name: "discont_pages", + unit: "rpcs", + buckets: [], + }, + BrwStats { + name: "discont_blocks", + unit: "rpcs", + buckets: [], + }, + BrwStats { + name: "dio_frags", + unit: "ios", + buckets: [], + }, + BrwStats { + name: "rpc_hist", + unit: "ios", + buckets: [], + }, + BrwStats { + name: "io_time", + unit: "ios", + buckets: [], + }, + BrwStats { + name: "disk_iosize", + unit: "ios", + buckets: [], + }, + BrwStats { + name: "block_maps_msec", + unit: "maps", + buckets: [], + }, + ], + }, + ), + ), + Target( + BrwStats( + TargetStat { + kind: Ost, + param: Param( + "brw_stats", + ), + target: Target( + "fs-OST0000", + ), + value: [ + BrwStats { + name: "pages", + unit: "rpcs", + buckets: [], + }, + BrwStats { + name: "discont_pages", + unit: "rpcs", + buckets: [], + }, + BrwStats { + name: "discont_blocks", + unit: "rpcs", + buckets: [], + }, + BrwStats { + name: "dio_frags", + unit: "ios", + buckets: [], + }, + BrwStats { + name: "rpc_hist", + unit: "ios", + buckets: [], + }, + BrwStats { + name: "io_time", + unit: "ios", + buckets: [], + }, + BrwStats { + name: "disk_iosize", + unit: "ios", + buckets: [], + }, + BrwStats { + name: "block_maps_msec", + unit: "maps", + buckets: [], + }, + ], + }, + ), + ), + Target( + BrwStats( + TargetStat { + kind: Ost, + param: Param( + "brw_stats", + ), + target: Target( + "fs-OST0001", + ), + value: [ + BrwStats { + name: "pages", + unit: "rpcs", + buckets: [], + }, + BrwStats { + name: "discont_pages", + unit: "rpcs", + buckets: [], + }, + BrwStats { + name: "discont_blocks", + unit: "rpcs", + buckets: [], + }, + BrwStats { + name: "dio_frags", + unit: "ios", + buckets: [], + }, + BrwStats { + name: "rpc_hist", + unit: "ios", + buckets: [], + }, + BrwStats { + name: "io_time", + unit: "ios", + buckets: [], + }, + BrwStats { + name: "disk_iosize", + unit: "ios", + buckets: [], + }, + BrwStats { + name: "block_maps_msec", + unit: "maps", + buckets: [], + }, + ], + }, + ), + ), + Target( + BrwStats( + TargetStat { + kind: Mgt, + param: Param( + "brw_stats", + ), + target: Target( + "MGS", + ), + value: [ + BrwStats { + name: "pages", + unit: "rpcs", + buckets: [], + }, + BrwStats { + name: "discont_pages", + unit: "rpcs", + buckets: [], + }, + BrwStats { + name: "discont_blocks", + unit: "rpcs", + buckets: [], + }, + BrwStats { + name: "dio_frags", + unit: "ios", + buckets: [], + }, + BrwStats { + name: "rpc_hist", + unit: "ios", + buckets: [], + }, + BrwStats { + name: "io_time", + unit: "ios", + buckets: [], + }, + BrwStats { + name: "disk_iosize", + unit: "ios", + buckets: [], + }, + BrwStats { + name: "block_maps_msec", + unit: "maps", + buckets: [], + }, + ], + }, + ), + ), + Target( + BrwStats( + TargetStat { + kind: Mdt, + param: Param( + "brw_stats", + ), + target: Target( + "fs-MDT0000", + ), + value: [ + BrwStats { + name: "pages", + unit: "rpcs", + buckets: [], + }, + BrwStats { + name: "discont_pages", + unit: "rpcs", + buckets: [], + }, + BrwStats { + name: "discont_blocks", + unit: "rpcs", + buckets: [], + }, + BrwStats { + name: "dio_frags", + unit: "ios", + buckets: [], + }, + BrwStats { + name: "rpc_hist", + unit: "ios", + buckets: [], + }, + BrwStats { + name: "io_time", + unit: "ios", + buckets: [], + }, + BrwStats { + name: "disk_iosize", + unit: "ios", + buckets: [], + }, + BrwStats { + name: "block_maps_msec", + unit: "maps", + buckets: [], + }, + ], + }, + ), + ), + Target( + BrwStats( + TargetStat { + kind: Ost, + param: Param( + "brw_stats", + ), + target: Target( + "fs-OST0000", + ), + value: [ + BrwStats { + name: "pages", + unit: "rpcs", + buckets: [], + }, + BrwStats { + name: "discont_pages", + unit: "rpcs", + buckets: [], + }, + BrwStats { + name: "discont_blocks", + unit: "rpcs", + buckets: [], + }, + BrwStats { + name: "dio_frags", + unit: "ios", + buckets: [], + }, + BrwStats { + name: "rpc_hist", + unit: "ios", + buckets: [], + }, + BrwStats { + name: "io_time", + unit: "ios", + buckets: [], + }, + BrwStats { + name: "disk_iosize", + unit: "ios", + buckets: [], + }, + BrwStats { + name: "block_maps_msec", + unit: "maps", + buckets: [], + }, + ], + }, + ), + ), + Target( + BrwStats( + TargetStat { + kind: Ost, + param: Param( + "brw_stats", + ), + target: Target( + "fs-OST0001", + ), + value: [ + BrwStats { + name: "pages", + unit: "rpcs", + buckets: [], + }, + BrwStats { + name: "discont_pages", + unit: "rpcs", + buckets: [], + }, + BrwStats { + name: "discont_blocks", + unit: "rpcs", + buckets: [], + }, + BrwStats { + name: "dio_frags", + unit: "ios", + buckets: [], + }, + BrwStats { + name: "rpc_hist", + unit: "ios", + buckets: [], + }, + BrwStats { + name: "io_time", + unit: "ios", + buckets: [], + }, + BrwStats { + name: "disk_iosize", + unit: "ios", + buckets: [], + }, + BrwStats { + name: "block_maps_msec", + unit: "maps", + buckets: [], + }, + ], + }, + ), + ), + Target( + QuotaStatsOsd( + TargetStat { + kind: Mdt, + param: Param( + "quota_slave.acct_group", + ), + target: Target( + "fs-MDT0000", + ), + value: QuotaStatsOsd { + kind: Grp, + stats: [ + QuotaStatOsd { + id: 0, + usage: QuotaStatUsage { + inodes: 331, + kbytes: 2000, + }, + }, + ], + }, + }, + ), + ), + Target( + QuotaStatsOsd( + TargetStat { + kind: Ost, + param: Param( + "quota_slave.acct_group", + ), + target: Target( + "fs-OST0000", + ), + value: QuotaStatsOsd { + kind: Grp, + stats: [ + QuotaStatOsd { + id: 0, + usage: QuotaStatUsage { + inodes: 358, + kbytes: 1500, + }, + }, + ], + }, + }, + ), + ), + Target( + QuotaStatsOsd( + TargetStat { + kind: Ost, + param: Param( + "quota_slave.acct_group", + ), + target: Target( + "fs-OST0001", + ), + value: QuotaStatsOsd { + kind: Grp, + stats: [ + QuotaStatOsd { + id: 0, + usage: QuotaStatUsage { + inodes: 358, + kbytes: 1500, + }, + }, + ], + }, + }, + ), + ), + Target( + QuotaStatsOsd( + TargetStat { + kind: Mdt, + param: Param( + "quota_slave.acct_user", + ), + target: Target( + "fs-MDT0000", + ), + value: QuotaStatsOsd { + kind: Usr, + stats: [ + QuotaStatOsd { + id: 0, + usage: QuotaStatUsage { + inodes: 331, + kbytes: 2000, + }, + }, + ], + }, + }, + ), + ), + Target( + QuotaStatsOsd( + TargetStat { + kind: Ost, + param: Param( + "quota_slave.acct_user", + ), + target: Target( + "fs-OST0000", + ), + value: QuotaStatsOsd { + kind: Usr, + stats: [ + QuotaStatOsd { + id: 0, + usage: QuotaStatUsage { + inodes: 358, + kbytes: 1500, + }, + }, + ], + }, + }, + ), + ), + Target( + QuotaStatsOsd( + TargetStat { + kind: Ost, + param: Param( + "quota_slave.acct_user", + ), + target: Target( + "fs-OST0001", + ), + value: QuotaStatsOsd { + kind: Usr, + stats: [ + QuotaStatOsd { + id: 0, + usage: QuotaStatUsage { + inodes: 358, + kbytes: 1500, + }, + }, + ], + }, + }, + ), + ), + Target( + QuotaStatsOsd( + TargetStat { + kind: Mdt, + param: Param( + "quota_slave.acct_project", + ), + target: Target( + "fs-MDT0000", + ), + value: QuotaStatsOsd { + kind: Prj, + stats: [ + QuotaStatOsd { + id: 0, + usage: QuotaStatUsage { + inodes: 331, + kbytes: 2000, + }, + }, + ], + }, + }, + ), + ), + Target( + QuotaStatsOsd( + TargetStat { + kind: Ost, + param: Param( + "quota_slave.acct_project", + ), + target: Target( + "fs-OST0000", + ), + value: QuotaStatsOsd { + kind: Prj, + stats: [ + QuotaStatOsd { + id: 0, + usage: QuotaStatUsage { + inodes: 358, + kbytes: 1500, + }, + }, + ], + }, + }, + ), + ), + Target( + QuotaStatsOsd( + TargetStat { + kind: Ost, + param: Param( + "quota_slave.acct_project", + ), + target: Target( + "fs-OST0001", + ), + value: QuotaStatsOsd { + kind: Prj, + stats: [ + QuotaStatOsd { + id: 0, + usage: QuotaStatUsage { + inodes: 358, + kbytes: 1500, + }, + }, + ], + }, + }, + ), + ), + Target( + Stats( + TargetStat { + kind: Mgt, + param: Param( + "stats", + ), + target: Target( + "MGS", + ), + value: [ + Stat { + name: "req_waittime", + units: "usecs", + samples: 20839, + min: Some( + 10, + ), + max: Some( + 3463, + ), + sum: Some( + 959329, + ), + sumsquare: Some( + 104351375, + ), + }, + Stat { + name: "req_qdepth", + units: "reqs", + samples: 20839, + min: Some( + 0, + ), + max: Some( + 0, + ), + sum: Some( + 0, + ), + sumsquare: Some( + 0, + ), + }, + Stat { + name: "req_active", + units: "reqs", + samples: 20839, + min: Some( + 1, + ), + max: Some( + 2, + ), + sum: Some( + 20860, + ), + sumsquare: Some( + 20902, + ), + }, + Stat { + name: "req_timeout", + units: "secs", + samples: 20839, + min: Some( + 1, + ), + max: Some( + 15, + ), + sum: Some( + 310704, + ), + sumsquare: Some( + 4658634, + ), + }, + Stat { + name: "reqbuf_avail", + units: "bufs", + samples: 47166, + min: Some( + 62, + ), + max: Some( + 64, + ), + sum: Some( + 2976844, + ), + sumsquare: Some( + 187885960, + ), + }, + Stat { + name: "ldlm_plain_enqueue", + units: "reqs", + samples: 93, + min: Some( + 1, + ), + max: Some( + 1, + ), + sum: Some( + 93, + ), + sumsquare: Some( + 93, + ), + }, + Stat { + name: "mgs_connect", + units: "usecs", + samples: 2, + min: Some( + 62, + ), + max: Some( + 84, + ), + sum: Some( + 146, + ), + sumsquare: Some( + 10900, + ), + }, + Stat { + name: "mgs_target_reg", + units: "usecs", + samples: 12, + min: Some( + 138, + ), + max: Some( + 148848, + ), + sum: Some( + 550720, + ), + sumsquare: Some( + 46243628170, + ), + }, + Stat { + name: "mgs_config_read", + units: "usecs", + samples: 6, + min: Some( + 36, + ), + max: Some( + 251, + ), + sum: Some( + 742, + ), + sumsquare: Some( + 133396, + ), + }, + Stat { + name: "obd_ping", + units: "usecs", + samples: 20449, + min: Some( + 3, + ), + max: Some( + 1929, + ), + sum: Some( + 393420, + ), + sumsquare: Some( + 12186244, + ), + }, + Stat { + name: "llog_origin_handle_open", + units: "usecs", + samples: 84, + min: Some( + 13, + ), + max: Some( + 88, + ), + sum: Some( + 2030, + ), + sumsquare: Some( + 59304, + ), + }, + Stat { + name: "llog_origin_handle_next_block", + units: "usecs", + samples: 119, + min: Some( + 13, + ), + max: Some( + 11705, + ), + sum: Some( + 24977, + ), + sumsquare: Some( + 206872551, + ), + }, + Stat { + name: "llog_origin_handle_read_header", + units: "usecs", + samples: 74, + min: Some( + 14, + ), + max: Some( + 23370, + ), + sum: Some( + 90287, + ), + sumsquare: Some( + 1525693897, + ), + }, + ], + }, + ), + ), + Target( + ThreadsMax( + TargetStat { + kind: Mgt, + param: Param( + "threads_max", + ), + target: Target( + "MGS", + ), + value: 32, + }, + ), + ), + Target( + ThreadsMin( + TargetStat { + kind: Mgt, + param: Param( + "threads_min", + ), + target: Target( + "MGS", + ), + value: 3, + }, + ), + ), + Target( + ThreadsStarted( + TargetStat { + kind: Mgt, + param: Param( + "threads_started", + ), + target: Target( + "MGS", + ), + value: 3, + }, + ), + ), + Target( + NumExports( + TargetStat { + kind: Mgt, + param: Param( + "num_exports", + ), + target: Target( + "MGS", + ), + value: 2, + }, + ), + ), + Target( + JobStatsOst( + TargetStat { + kind: Ost, + param: Param( + "job_stats", + ), + target: Target( + "fs-OST0000", + ), + value: None, + }, + ), + ), + Target( + JobStatsOst( + TargetStat { + kind: Ost, + param: Param( + "job_stats", + ), + target: Target( + "fs-OST0001", + ), + value: None, + }, + ), + ), + Target( + Stats( + TargetStat { + kind: Ost, + param: Param( + "stats", + ), + target: Target( + "fs-OST0000", + ), + value: [ + Stat { + name: "create", + units: "usecs", + samples: 4, + min: Some( + 3, + ), + max: Some( + 1058, + ), + sum: Some( + 1459, + ), + sumsquare: Some( + 1227857, + ), + }, + Stat { + name: "statfs", + units: "usecs", + samples: 93473, + min: Some( + 0, + ), + max: Some( + 991, + ), + sum: Some( + 503210, + ), + sumsquare: Some( + 4535566, + ), + }, + Stat { + name: "get_info", + units: "usecs", + samples: 2, + min: Some( + 1217, + ), + max: Some( + 13065, + ), + sum: Some( + 14282, + ), + sumsquare: Some( + 172175314, + ), + }, + ], + }, + ), + ), + Target( + Stats( + TargetStat { + kind: Ost, + param: Param( + "stats", + ), + target: Target( + "fs-OST0001", + ), + value: [ + Stat { + name: "create", + units: "usecs", + samples: 4, + min: Some( + 3, + ), + max: Some( + 1049, + ), + sum: Some( + 1428, + ), + sumsquare: Some( + 1198476, + ), + }, + Stat { + name: "statfs", + units: "usecs", + samples: 93473, + min: Some( + 0, + ), + max: Some( + 137, + ), + sum: Some( + 270611, + ), + sumsquare: Some( + 1393943, + ), + }, + Stat { + name: "get_info", + units: "usecs", + samples: 2, + min: Some( + 788, + ), + max: Some( + 213514, + ), + sum: Some( + 214302, + ), + sumsquare: Some( + 45588849140, + ), + }, + ], + }, + ), + ), + Target( + NumExports( + TargetStat { + kind: Ost, + param: Param( + "num_exports", + ), + target: Target( + "fs-OST0000", + ), + value: 2, + }, + ), + ), + Target( + NumExports( + TargetStat { + kind: Ost, + param: Param( + "num_exports", + ), + target: Target( + "fs-OST0001", + ), + value: 2, + }, + ), + ), + Target( + TotDirty( + TargetStat { + kind: Ost, + param: Param( + "tot_dirty", + ), + target: Target( + "fs-OST0000", + ), + value: 0, + }, + ), + ), + Target( + TotDirty( + TargetStat { + kind: Ost, + param: Param( + "tot_dirty", + ), + target: Target( + "fs-OST0001", + ), + value: 0, + }, + ), + ), + Target( + TotGranted( + TargetStat { + kind: Ost, + param: Param( + "tot_granted", + ), + target: Target( + "fs-OST0000", + ), + value: 278208, + }, + ), + ), + Target( + TotGranted( + TargetStat { + kind: Ost, + param: Param( + "tot_granted", + ), + target: Target( + "fs-OST0001", + ), + value: 278208, + }, + ), + ), + Target( + TotPending( + TargetStat { + kind: Ost, + param: Param( + "tot_pending", + ), + target: Target( + "fs-OST0000", + ), + value: 0, + }, + ), + ), + Target( + TotPending( + TargetStat { + kind: Ost, + param: Param( + "tot_pending", + ), + target: Target( + "fs-OST0001", + ), + value: 0, + }, + ), + ), + Target( + ExportStats( + TargetStat { + kind: Ost, + param: Param( + "exports", + ), + target: Target( + "fs-OST0000", + ), + value: [ + ExportStats { + nid: "0@lo", + stats: [ + Stat { + name: "create", + units: "usecs", + samples: 2, + min: Some( + 78, + ), + max: Some( + 1058, + ), + sum: Some( + 1136, + ), + sumsquare: Some( + 1125448, + ), + }, + Stat { + name: "statfs", + units: "usecs", + samples: 46737, + min: Some( + 0, + ), + max: Some( + 196, + ), + sum: Some( + 198283, + ), + sumsquare: Some( + 1167593, + ), + }, + Stat { + name: "get_info", + units: "usecs", + samples: 1, + min: Some( + 1217, + ), + max: Some( + 1217, + ), + sum: Some( + 1217, + ), + sumsquare: Some( + 1481089, + ), + }, + ], + }, + ], + }, + ), + ), + Target( + ExportStats( + TargetStat { + kind: Ost, + param: Param( + "exports", + ), + target: Target( + "fs-OST0000", + ), + value: [ + ExportStats { + nid: "10.73.20.12@tcp", + stats: [ + Stat { + name: "create", + units: "usecs", + samples: 2, + min: Some( + 3, + ), + max: Some( + 320, + ), + sum: Some( + 323, + ), + sumsquare: Some( + 102409, + ), + }, + Stat { + name: "statfs", + units: "usecs", + samples: 46736, + min: Some( + 1, + ), + max: Some( + 991, + ), + sum: Some( + 304927, + ), + sumsquare: Some( + 3367973, + ), + }, + Stat { + name: "get_info", + units: "usecs", + samples: 1, + min: Some( + 13065, + ), + max: Some( + 13065, + ), + sum: Some( + 13065, + ), + sumsquare: Some( + 170694225, + ), + }, + ], + }, + ], + }, + ), + ), + Target( + ExportStats( + TargetStat { + kind: Ost, + param: Param( + "exports", + ), + target: Target( + "fs-OST0001", + ), + value: [ + ExportStats { + nid: "0@lo", + stats: [ + Stat { + name: "create", + units: "usecs", + samples: 2, + min: Some( + 71, + ), + max: Some( + 1049, + ), + sum: Some( + 1120, + ), + sumsquare: Some( + 1105442, + ), + }, + Stat { + name: "statfs", + units: "usecs", + samples: 46737, + min: Some( + 1, + ), + max: Some( + 137, + ), + sum: Some( + 224516, + ), + sumsquare: Some( + 1299078, + ), + }, + Stat { + name: "get_info", + units: "usecs", + samples: 1, + min: Some( + 213514, + ), + max: Some( + 213514, + ), + sum: Some( + 213514, + ), + sumsquare: Some( + 45588228196, + ), + }, + ], + }, + ], + }, + ), + ), + Target( + ExportStats( + TargetStat { + kind: Ost, + param: Param( + "exports", + ), + target: Target( + "fs-OST0001", + ), + value: [ + ExportStats { + nid: "10.73.20.12@tcp", + stats: [ + Stat { + name: "create", + units: "usecs", + samples: 2, + min: Some( + 3, + ), + max: Some( + 305, + ), + sum: Some( + 308, + ), + sumsquare: Some( + 93034, + ), + }, + Stat { + name: "statfs", + units: "usecs", + samples: 46736, + min: Some( + 0, + ), + max: Some( + 125, + ), + sum: Some( + 46095, + ), + sumsquare: Some( + 94865, + ), + }, + Stat { + name: "get_info", + units: "usecs", + samples: 1, + min: Some( + 788, + ), + max: Some( + 788, + ), + sum: Some( + 788, + ), + sumsquare: Some( + 620944, + ), + }, + ], + }, + ], + }, + ), + ), + Target( + Oss( + OssStat { + param: Param( + "ost", + ), + stats: [ + Stat { + name: "req_waittime", + units: "usecs", + samples: 21, + min: Some( + 8, + ), + max: Some( + 149, + ), + sum: Some( + 793, + ), + sumsquare: Some( + 51965, + ), + }, + Stat { + name: "req_qdepth", + units: "reqs", + samples: 21, + min: Some( + 0, + ), + max: Some( + 0, + ), + sum: Some( + 0, + ), + sumsquare: Some( + 0, + ), + }, + Stat { + name: "req_active", + units: "reqs", + samples: 21, + min: Some( + 1, + ), + max: Some( + 1, + ), + sum: Some( + 21, + ), + sumsquare: Some( + 21, + ), + }, + Stat { + name: "req_timeout", + units: "secs", + samples: 21, + min: Some( + 1, + ), + max: Some( + 15, + ), + sum: Some( + 104, + ), + sumsquare: Some( + 1214, + ), + }, + Stat { + name: "reqbuf_avail", + units: "bufs", + samples: 48, + min: Some( + 64, + ), + max: Some( + 64, + ), + sum: Some( + 3072, + ), + sumsquare: Some( + 196608, + ), + }, + Stat { + name: "ost_create", + units: "usecs", + samples: 8, + min: Some( + 13, + ), + max: Some( + 1080, + ), + sum: Some( + 3089, + ), + sumsquare: Some( + 2564761, + ), + }, + Stat { + name: "ost_get_info", + units: "usecs", + samples: 4, + min: Some( + 802, + ), + max: Some( + 213537, + ), + sum: Some( + 228656, + ), + sumsquare: Some( + 45771310142, + ), + }, + Stat { + name: "ost_connect", + units: "usecs", + samples: 6, + min: Some( + 34, + ), + max: Some( + 133, + ), + sum: Some( + 506, + ), + sumsquare: Some( + 48340, + ), + }, + Stat { + name: "ost_disconnect", + units: "usecs", + samples: 2, + min: Some( + 49, + ), + max: Some( + 64, + ), + sum: Some( + 113, + ), + sumsquare: Some( + 6497, + ), + }, + Stat { + name: "obd_ping", + units: "usecs", + samples: 1, + min: Some( + 11, + ), + max: Some( + 11, + ), + sum: Some( + 11, + ), + sumsquare: Some( + 121, + ), + }, + ], + }, + ), + ), + Target( + Oss( + OssStat { + param: Param( + "ost_io", + ), + stats: [], + }, + ), + ), + Target( + Oss( + OssStat { + param: Param( + "ost_create", + ), + stats: [ + Stat { + name: "req_waittime", + units: "usecs", + samples: 186946, + min: Some( + 5, + ), + max: Some( + 7961, + ), + sum: Some( + 6734080, + ), + sumsquare: Some( + 1766607752, + ), + }, + Stat { + name: "req_qdepth", + units: "reqs", + samples: 186946, + min: Some( + 0, + ), + max: Some( + 1, + ), + sum: Some( + 35, + ), + sumsquare: Some( + 35, + ), + }, + Stat { + name: "req_active", + units: "reqs", + samples: 186946, + min: Some( + 1, + ), + max: Some( + 2, + ), + sum: Some( + 191005, + ), + sumsquare: Some( + 199123, + ), + }, + Stat { + name: "req_timeout", + units: "secs", + samples: 186946, + min: Some( + 1, + ), + max: Some( + 15, + ), + sum: Some( + 2803288, + ), + sumsquare: Some( + 42048238, + ), + }, + Stat { + name: "reqbuf_avail", + units: "bufs", + samples: 376899, + min: Some( + 63, + ), + max: Some( + 64, + ), + sum: Some( + 24115843, + ), + sumsquare: Some( + 1543055293, + ), + }, + Stat { + name: "ost_statfs", + units: "usecs", + samples: 186946, + min: Some( + 5, + ), + max: Some( + 3634, + ), + sum: Some( + 4334895, + ), + sumsquare: Some( + 139489167, + ), + }, + ], + }, + ), + ), + Target( + Oss( + OssStat { + param: Param( + "ost_out", + ), + stats: [ + Stat { + name: "req_waittime", + units: "usecs", + samples: 26391, + min: Some( + 15, + ), + max: Some( + 6022, + ), + sum: Some( + 1104980, + ), + sumsquare: Some( + 283199020, + ), + }, + Stat { + name: "req_qdepth", + units: "reqs", + samples: 26391, + min: Some( + 0, + ), + max: Some( + 0, + ), + sum: Some( + 0, + ), + sumsquare: Some( + 0, + ), + }, + Stat { + name: "req_active", + units: "reqs", + samples: 26391, + min: Some( + 1, + ), + max: Some( + 1, + ), + sum: Some( + 26391, + ), + sumsquare: Some( + 26391, + ), + }, + Stat { + name: "req_timeout", + units: "secs", + samples: 26391, + min: Some( + 4, + ), + max: Some( + 15, + ), + sum: Some( + 395630, + ), + sumsquare: Some( + 5933420, + ), + }, + Stat { + name: "reqbuf_avail", + units: "bufs", + samples: 53173, + min: Some( + 63, + ), + max: Some( + 64, + ), + sum: Some( + 3402571, + ), + sumsquare: Some( + 217732981, + ), + }, + Stat { + name: "mds_connect", + units: "usecs", + samples: 3, + min: Some( + 56, + ), + max: Some( + 1191, + ), + sum: Some( + 1829, + ), + sumsquare: Some( + 1760341, + ), + }, + Stat { + name: "mds_statfs", + units: "usecs", + samples: 26384, + min: Some( + 11, + ), + max: Some( + 989, + ), + sum: Some( + 827045, + ), + sumsquare: Some( + 28579743, + ), + }, + Stat { + name: "obd_ping", + units: "usecs", + samples: 1, + min: Some( + 14, + ), + max: Some( + 14, + ), + sum: Some( + 14, + ), + sumsquare: Some( + 196, + ), + }, + Stat { + name: "out_update", + units: "usecs", + samples: 3, + min: Some( + 17, + ), + max: Some( + 169, + ), + sum: Some( + 230, + ), + sumsquare: Some( + 30786, + ), + }, + ], + }, + ), + ), + Target( + Oss( + OssStat { + param: Param( + "ost_seq", + ), + stats: [ + Stat { + name: "req_waittime", + units: "usecs", + samples: 9, + min: Some( + 17, + ), + max: Some( + 35, + ), + sum: Some( + 205, + ), + sumsquare: Some( + 4937, + ), + }, + Stat { + name: "req_qdepth", + units: "reqs", + samples: 9, + min: Some( + 0, + ), + max: Some( + 0, + ), + sum: Some( + 0, + ), + sumsquare: Some( + 0, + ), + }, + Stat { + name: "req_active", + units: "reqs", + samples: 9, + min: Some( + 1, + ), + max: Some( + 1, + ), + sum: Some( + 9, + ), + sumsquare: Some( + 9, + ), + }, + Stat { + name: "req_timeout", + units: "secs", + samples: 9, + min: Some( + 1, + ), + max: Some( + 10, + ), + sum: Some( + 36, + ), + sumsquare: Some( + 306, + ), + }, + Stat { + name: "reqbuf_avail", + units: "bufs", + samples: 23, + min: Some( + 64, + ), + max: Some( + 64, + ), + sum: Some( + 1472, + ), + sumsquare: Some( + 94208, + ), + }, + Stat { + name: "seq_query", + units: "usecs", + samples: 9, + min: Some( + 9, + ), + max: Some( + 314216, + ), + sum: Some( + 355979, + ), + sumsquare: Some( + 100237783449, + ), + }, + ], + }, + ), + ), + Target( + Mds( + MdsStat { + param: Param( + "mdt", + ), + stats: [ + Stat { + name: "req_waittime", + units: "usecs", + samples: 69240, + min: Some( + 3, + ), + max: Some( + 1896, + ), + sum: Some( + 3787926, + ), + sumsquare: Some( + 246613360, + ), + }, + Stat { + name: "req_qdepth", + units: "reqs", + samples: 69240, + min: Some( + 0, + ), + max: Some( + 3, + ), + sum: Some( + 630, + ), + sumsquare: Some( + 1084, + ), + }, + Stat { + name: "req_active", + units: "reqs", + samples: 69240, + min: Some( + 1, + ), + max: Some( + 2, + ), + sum: Some( + 92698, + ), + sumsquare: Some( + 139614, + ), + }, + Stat { + name: "req_timeout", + units: "secs", + samples: 69240, + min: Some( + 1, + ), + max: Some( + 15, + ), + sum: Some( + 1038275, + ), + sumsquare: Some( + 15573395, + ), + }, + Stat { + name: "reqbuf_avail", + units: "bufs", + samples: 156760, + min: Some( + 63, + ), + max: Some( + 64, + ), + sum: Some( + 10032151, + ), + sumsquare: Some( + 642026857, + ), + }, + Stat { + name: "ldlm_ibits_enqueue", + units: "reqs", + samples: 2, + min: Some( + 1, + ), + max: Some( + 1, + ), + sum: Some( + 2, + ), + sumsquare: Some( + 2, + ), + }, + Stat { + name: "ost_set_info", + units: "usecs", + samples: 151, + min: Some( + 16, + ), + max: Some( + 63, + ), + sum: Some( + 4789, + ), + sumsquare: Some( + 166837, + ), + }, + Stat { + name: "mds_connect", + units: "usecs", + samples: 11, + min: Some( + 10, + ), + max: Some( + 1172, + ), + sum: Some( + 2703, + ), + sumsquare: Some( + 2586521, + ), + }, + Stat { + name: "mds_get_root", + units: "usecs", + samples: 1, + min: Some( + 10, + ), + max: Some( + 10, + ), + sum: Some( + 10, + ), + sumsquare: Some( + 100, + ), + }, + Stat { + name: "mds_statfs", + units: "usecs", + samples: 2, + min: Some( + 24, + ), + max: Some( + 25, + ), + sum: Some( + 49, + ), + sumsquare: Some( + 1201, + ), + }, + Stat { + name: "obd_ping", + units: "usecs", + samples: 69073, + min: Some( + 2, + ), + max: Some( + 237, + ), + sum: Some( + 636552, + ), + sumsquare: Some( + 9041646, + ), + }, + ], + }, + ), + ), + Target( + Mds( + MdsStat { + param: Param( + "mdt_fld", + ), + stats: [ + Stat { + name: "req_waittime", + units: "usecs", + samples: 4, + min: Some( + 33, + ), + max: Some( + 730, + ), + sum: Some( + 923, + ), + sumsquare: Some( + 547847, + ), + }, + Stat { + name: "req_qdepth", + units: "reqs", + samples: 4, + min: Some( + 0, + ), + max: Some( + 0, + ), + sum: Some( + 0, + ), + sumsquare: Some( + 0, + ), + }, + Stat { + name: "req_active", + units: "reqs", + samples: 4, + min: Some( + 1, + ), + max: Some( + 1, + ), + sum: Some( + 4, + ), + sumsquare: Some( + 4, + ), + }, + Stat { + name: "req_timeout", + units: "secs", + samples: 4, + min: Some( + 1, + ), + max: Some( + 15, + ), + sum: Some( + 27, + ), + sumsquare: Some( + 327, + ), + }, + Stat { + name: "reqbuf_avail", + units: "bufs", + samples: 11, + min: Some( + 64, + ), + max: Some( + 64, + ), + sum: Some( + 704, + ), + sumsquare: Some( + 45056, + ), + }, + Stat { + name: "fld_query", + units: "usecs", + samples: 2, + min: Some( + 19, + ), + max: Some( + 22, + ), + sum: Some( + 41, + ), + sumsquare: Some( + 845, + ), + }, + Stat { + name: "fld_read", + units: "usecs", + samples: 2, + min: Some( + 15, + ), + max: Some( + 33, + ), + sum: Some( + 48, + ), + sumsquare: Some( + 1314, + ), + }, + ], + }, + ), + ), + Target( + Mds( + MdsStat { + param: Param( + "mdt_io", + ), + stats: [], + }, + ), + ), + Target( + Mds( + MdsStat { + param: Param( + "mdt_out", + ), + stats: [ + Stat { + name: "req_waittime", + units: "usecs", + samples: 20352, + min: Some( + 17, + ), + max: Some( + 6390, + ), + sum: Some( + 826823, + ), + sumsquare: Some( + 131129985, + ), + }, + Stat { + name: "req_qdepth", + units: "reqs", + samples: 20352, + min: Some( + 0, + ), + max: Some( + 0, + ), + sum: Some( + 0, + ), + sumsquare: Some( + 0, + ), + }, + Stat { + name: "req_active", + units: "reqs", + samples: 20352, + min: Some( + 1, + ), + max: Some( + 1, + ), + sum: Some( + 20352, + ), + sumsquare: Some( + 20352, + ), + }, + Stat { + name: "req_timeout", + units: "secs", + samples: 20352, + min: Some( + 15, + ), + max: Some( + 15, + ), + sum: Some( + 305280, + ), + sumsquare: Some( + 4579200, + ), + }, + Stat { + name: "reqbuf_avail", + units: "bufs", + samples: 41020, + min: Some( + 63, + ), + max: Some( + 64, + ), + sum: Some( + 2624894, + ), + sumsquare: Some( + 167968898, + ), + }, + Stat { + name: "mds_statfs", + units: "usecs", + samples: 20352, + min: Some( + 12, + ), + max: Some( + 2198, + ), + sum: Some( + 642351, + ), + sumsquare: Some( + 26507153, + ), + }, + ], + }, + ), + ), + Target( + Mds( + MdsStat { + param: Param( + "mdt_readpage", + ), + stats: [ + Stat { + name: "req_waittime", + units: "usecs", + samples: 49, + min: Some( + 9, + ), + max: Some( + 5107, + ), + sum: Some( + 45252, + ), + sumsquare: Some( + 153545540, + ), + }, + Stat { + name: "req_qdepth", + units: "reqs", + samples: 49, + min: Some( + 0, + ), + max: Some( + 0, + ), + sum: Some( + 0, + ), + sumsquare: Some( + 0, + ), + }, + Stat { + name: "req_active", + units: "reqs", + samples: 49, + min: Some( + 1, + ), + max: Some( + 4, + ), + sum: Some( + 124, + ), + sumsquare: Some( + 380, + ), + }, + Stat { + name: "req_timeout", + units: "secs", + samples: 49, + min: Some( + 10, + ), + max: Some( + 15, + ), + sum: Some( + 680, + ), + sumsquare: Some( + 9650, + ), + }, + Stat { + name: "reqbuf_avail", + units: "bufs", + samples: 103, + min: Some( + 62, + ), + max: Some( + 64, + ), + sum: Some( + 6534, + ), + sumsquare: Some( + 414540, + ), + }, + Stat { + name: "ldlm_ibits_enqueue", + units: "reqs", + samples: 24, + min: Some( + 1, + ), + max: Some( + 1, + ), + sum: Some( + 24, + ), + sumsquare: Some( + 24, + ), + }, + Stat { + name: "mds_getattr", + units: "usecs", + samples: 1, + min: Some( + 97, + ), + max: Some( + 97, + ), + sum: Some( + 97, + ), + sumsquare: Some( + 9409, + ), + }, + Stat { + name: "dt_index_read", + units: "usecs", + samples: 24, + min: Some( + 254, + ), + max: Some( + 2757, + ), + sum: Some( + 30869, + ), + sumsquare: Some( + 61553697, + ), + }, + ], + }, + ), + ), + Target( + Mds( + MdsStat { + param: Param( + "mdt_seqm", + ), + stats: [], + }, + ), + ), + Target( + Mds( + MdsStat { + param: Param( + "mdt_seqs", + ), + stats: [ + Stat { + name: "req_waittime", + units: "usecs", + samples: 4, + min: Some( + 34, + ), + max: Some( + 108, + ), + sum: Some( + 307, + ), + sumsquare: Some( + 27633, + ), + }, + Stat { + name: "req_qdepth", + units: "reqs", + samples: 4, + min: Some( + 0, + ), + max: Some( + 0, + ), + sum: Some( + 0, + ), + sumsquare: Some( + 0, + ), + }, + Stat { + name: "req_active", + units: "reqs", + samples: 4, + min: Some( + 1, + ), + max: Some( + 3, + ), + sum: Some( + 7, + ), + sumsquare: Some( + 15, + ), + }, + Stat { + name: "req_timeout", + units: "secs", + samples: 4, + min: Some( + 1, + ), + max: Some( + 10, + ), + sum: Some( + 13, + ), + sumsquare: Some( + 103, + ), + }, + Stat { + name: "reqbuf_avail", + units: "bufs", + samples: 9, + min: Some( + 64, + ), + max: Some( + 64, + ), + sum: Some( + 576, + ), + sumsquare: Some( + 36864, + ), + }, + Stat { + name: "seq_query", + units: "usecs", + samples: 4, + min: Some( + 20676, + ), + max: Some( + 323036, + ), + sum: Some( + 665386, + ), + sumsquare: Some( + 161611882868, + ), + }, + ], + }, + ), + ), + Target( + Mds( + MdsStat { + param: Param( + "mdt_setattr", + ), + stats: [], + }, + ), + ), + Target( + JobStatsMdt( + TargetStat { + kind: Mdt, + param: Param( + "job_stats", + ), + target: Target( + "fs-MDT0000", + ), + value: None, + }, + ), + ), + Target( + Stats( + TargetStat { + kind: Mdt, + param: Param( + "md_stats", + ), + target: Target( + "fs-MDT0000", + ), + value: [ + Stat { + name: "getattr", + units: "usecs", + samples: 3, + min: Some( + 11, + ), + max: Some( + 53, + ), + sum: Some( + 117, + ), + sumsquare: Some( + 5739, + ), + }, + Stat { + name: "statfs", + units: "usecs", + samples: 46738, + min: Some( + 2, + ), + max: Some( + 68, + ), + sum: Some( + 320391, + ), + sumsquare: Some( + 2612045, + ), + }, + ], + }, + ), + ), + Target( + NumExports( + TargetStat { + kind: Mdt, + param: Param( + "num_exports", + ), + target: Target( + "fs-MDT0000", + ), + value: 8, + }, + ), + ), + Target( + ExportStats( + TargetStat { + kind: Mdt, + param: Param( + "exports", + ), + target: Target( + "fs-MDT0000", + ), + value: [ + ExportStats { + nid: "0@lo", + stats: [ + Stat { + name: "getattr", + units: "usecs", + samples: 3, + min: Some( + 11, + ), + max: Some( + 53, + ), + sum: Some( + 117, + ), + sumsquare: Some( + 5739, + ), + }, + Stat { + name: "statfs", + units: "usecs", + samples: 2, + min: Some( + 2, + ), + max: Some( + 6, + ), + sum: Some( + 8, + ), + sumsquare: Some( + 40, + ), + }, + ], + }, + ], + }, + ), + ), + Target( + ExportStats( + TargetStat { + kind: Mdt, + param: Param( + "exports", + ), + target: Target( + "fs-MDT0000", + ), + value: [ + ExportStats { + nid: "10.73.20.12@tcp", + stats: [ + Stat { + name: "statfs", + units: "usecs", + samples: 46736, + min: Some( + 3, + ), + max: Some( + 68, + ), + sum: Some( + 320383, + ), + sumsquare: Some( + 2612005, + ), + }, + ], + }, + ], + }, + ), + ), + Target( + ContendedLocks( + TargetStat { + kind: Mdt, + param: Param( + "contended_locks", + ), + target: Target( + "fs-MDT0000", + ), + value: 32, + }, + ), + ), + Target( + ContendedLocks( + TargetStat { + kind: Ost, + param: Param( + "contended_locks", + ), + target: Target( + "fs-OST0000", + ), + value: 32, + }, + ), + ), + Target( + ContendedLocks( + TargetStat { + kind: Ost, + param: Param( + "contended_locks", + ), + target: Target( + "fs-OST0001", + ), + value: 32, + }, + ), + ), + Target( + ContentionSeconds( + TargetStat { + kind: Mdt, + param: Param( + "contention_seconds", + ), + target: Target( + "fs-MDT0000", + ), + value: 2, + }, + ), + ), + Target( + ContentionSeconds( + TargetStat { + kind: Ost, + param: Param( + "contention_seconds", + ), + target: Target( + "fs-OST0000", + ), + value: 2, + }, + ), + ), + Target( + ContentionSeconds( + TargetStat { + kind: Ost, + param: Param( + "contention_seconds", + ), + target: Target( + "fs-OST0001", + ), + value: 2, + }, + ), + ), + Target( + CtimeAgeLimit( + TargetStat { + kind: Mdt, + param: Param( + "ctime_age_limit", + ), + target: Target( + "fs-MDT0000", + ), + value: 10, + }, + ), + ), + Target( + CtimeAgeLimit( + TargetStat { + kind: Ost, + param: Param( + "ctime_age_limit", + ), + target: Target( + "fs-OST0000", + ), + value: 10, + }, + ), + ), + Target( + CtimeAgeLimit( + TargetStat { + kind: Ost, + param: Param( + "ctime_age_limit", + ), + target: Target( + "fs-OST0001", + ), + value: 10, + }, + ), + ), + Target( + EarlyLockCancel( + TargetStat { + kind: Mdt, + param: Param( + "early_lock_cancel", + ), + target: Target( + "fs-MDT0000", + ), + value: 0, + }, + ), + ), + Target( + EarlyLockCancel( + TargetStat { + kind: Ost, + param: Param( + "early_lock_cancel", + ), + target: Target( + "fs-OST0000", + ), + value: 0, + }, + ), + ), + Target( + EarlyLockCancel( + TargetStat { + kind: Ost, + param: Param( + "early_lock_cancel", + ), + target: Target( + "fs-OST0001", + ), + value: 0, + }, + ), + ), + Target( + LockCount( + TargetStat { + kind: Mdt, + param: Param( + "lock_count", + ), + target: Target( + "fs-MDT0000", + ), + value: 24, + }, + ), + ), + Target( + LockCount( + TargetStat { + kind: Ost, + param: Param( + "lock_count", + ), + target: Target( + "fs-OST0000", + ), + value: 0, + }, + ), + ), + Target( + LockCount( + TargetStat { + kind: Ost, + param: Param( + "lock_count", + ), + target: Target( + "fs-OST0001", + ), + value: 0, + }, + ), + ), + Target( + LockTimeouts( + TargetStat { + kind: Mdt, + param: Param( + "lock_timeouts", + ), + target: Target( + "fs-MDT0000", + ), + value: 0, + }, + ), + ), + Target( + LockTimeouts( + TargetStat { + kind: Ost, + param: Param( + "lock_timeouts", + ), + target: Target( + "fs-OST0000", + ), + value: 0, + }, + ), + ), + Target( + LockTimeouts( + TargetStat { + kind: Ost, + param: Param( + "lock_timeouts", + ), + target: Target( + "fs-OST0001", + ), + value: 0, + }, + ), + ), + Target( + LockUnusedCount( + TargetStat { + kind: Mdt, + param: Param( + "lock_unused_count", + ), + target: Target( + "fs-MDT0000", + ), + value: 0, + }, + ), + ), + Target( + LockUnusedCount( + TargetStat { + kind: Ost, + param: Param( + "lock_unused_count", + ), + target: Target( + "fs-OST0000", + ), + value: 0, + }, + ), + ), + Target( + LockUnusedCount( + TargetStat { + kind: Ost, + param: Param( + "lock_unused_count", + ), + target: Target( + "fs-OST0001", + ), + value: 0, + }, + ), + ), + Target( + LruMaxAge( + TargetStat { + kind: Mdt, + param: Param( + "lru_max_age", + ), + target: Target( + "fs-MDT0000", + ), + value: 3900000, + }, + ), + ), + Target( + LruMaxAge( + TargetStat { + kind: Ost, + param: Param( + "lru_max_age", + ), + target: Target( + "fs-OST0000", + ), + value: 3900000, + }, + ), + ), + Target( + LruMaxAge( + TargetStat { + kind: Ost, + param: Param( + "lru_max_age", + ), + target: Target( + "fs-OST0001", + ), + value: 3900000, + }, + ), + ), + Target( + LruSize( + TargetStat { + kind: Mdt, + param: Param( + "lru_size", + ), + target: Target( + "fs-MDT0000", + ), + value: 800, + }, + ), + ), + Target( + LruSize( + TargetStat { + kind: Ost, + param: Param( + "lru_size", + ), + target: Target( + "fs-OST0000", + ), + value: 800, + }, + ), + ), + Target( + LruSize( + TargetStat { + kind: Ost, + param: Param( + "lru_size", + ), + target: Target( + "fs-OST0001", + ), + value: 800, + }, + ), + ), + Target( + MaxNolockBytes( + TargetStat { + kind: Mdt, + param: Param( + "max_nolock_bytes", + ), + target: Target( + "fs-MDT0000", + ), + value: 0, + }, + ), + ), + Target( + MaxNolockBytes( + TargetStat { + kind: Ost, + param: Param( + "max_nolock_bytes", + ), + target: Target( + "fs-OST0000", + ), + value: 0, + }, + ), + ), + Target( + MaxNolockBytes( + TargetStat { + kind: Ost, + param: Param( + "max_nolock_bytes", + ), + target: Target( + "fs-OST0001", + ), + value: 0, + }, + ), + ), + Target( + MaxParallelAst( + TargetStat { + kind: Mdt, + param: Param( + "max_parallel_ast", + ), + target: Target( + "fs-MDT0000", + ), + value: 1024, + }, + ), + ), + Target( + MaxParallelAst( + TargetStat { + kind: Ost, + param: Param( + "max_parallel_ast", + ), + target: Target( + "fs-OST0000", + ), + value: 1024, + }, + ), + ), + Target( + MaxParallelAst( + TargetStat { + kind: Ost, + param: Param( + "max_parallel_ast", + ), + target: Target( + "fs-OST0001", + ), + value: 1024, + }, + ), + ), + Target( + ResourceCount( + TargetStat { + kind: Mdt, + param: Param( + "resource_count", + ), + target: Target( + "fs-MDT0000", + ), + value: 6, + }, + ), + ), + Target( + ResourceCount( + TargetStat { + kind: Ost, + param: Param( + "resource_count", + ), + target: Target( + "fs-OST0000", + ), + value: 0, + }, + ), + ), + Target( + ResourceCount( + TargetStat { + kind: Ost, + param: Param( + "resource_count", + ), + target: Target( + "fs-OST0001", + ), + value: 0, + }, + ), + ), + LustreService( + LdlmCanceld( + [ + Stat { + name: "req_waittime", + units: "usecs", + samples: 66, + min: Some( + 12, + ), + max: Some( + 75, + ), + sum: Some( + 3151, + ), + sumsquare: Some( + 162729, + ), + }, + Stat { + name: "req_qdepth", + units: "reqs", + samples: 66, + min: Some( + 0, + ), + max: Some( + 0, + ), + sum: Some( + 0, + ), + sumsquare: Some( + 0, + ), + }, + Stat { + name: "req_active", + units: "reqs", + samples: 66, + min: Some( + 1, + ), + max: Some( + 2, + ), + sum: Some( + 85, + ), + sumsquare: Some( + 123, + ), + }, + Stat { + name: "req_timeout", + units: "secs", + samples: 66, + min: Some( + 1, + ), + max: Some( + 15, + ), + sum: Some( + 709, + ), + sumsquare: Some( + 10219, + ), + }, + Stat { + name: "reqbuf_avail", + units: "bufs", + samples: 145, + min: Some( + 63, + ), + max: Some( + 64, + ), + sum: Some( + 9208, + ), + sumsquare: Some( + 584776, + ), + }, + Stat { + name: "ldlm_cancel", + units: "usecs", + samples: 66, + min: Some( + 3, + ), + max: Some( + 57, + ), + sum: Some( + 779, + ), + sumsquare: Some( + 15955, + ), + }, + ], + ), + ), + LustreService( + LdlmCbd( + [ + Stat { + name: "req_waittime", + units: "usecs", + samples: 32, + min: Some( + 27, + ), + max: Some( + 160, + ), + sum: Some( + 2749, + ), + sumsquare: Some( + 267461, + ), + }, + Stat { + name: "req_qdepth", + units: "reqs", + samples: 32, + min: Some( + 0, + ), + max: Some( + 0, + ), + sum: Some( + 0, + ), + sumsquare: Some( + 0, + ), + }, + Stat { + name: "req_active", + units: "reqs", + samples: 32, + min: Some( + 1, + ), + max: Some( + 1, + ), + sum: Some( + 32, + ), + sumsquare: Some( + 32, + ), + }, + Stat { + name: "req_timeout", + units: "secs", + samples: 32, + min: Some( + 1, + ), + max: Some( + 15, + ), + sum: Some( + 344, + ), + sumsquare: Some( + 4934, + ), + }, + Stat { + name: "reqbuf_avail", + units: "bufs", + samples: 70, + min: Some( + 0, + ), + max: Some( + 1, + ), + sum: Some( + 66, + ), + sumsquare: Some( + 66, + ), + }, + Stat { + name: "ldlm_bl_callback", + units: "usecs", + samples: 32, + min: Some( + 3, + ), + max: Some( + 35, + ), + sum: Some( + 308, + ), + sumsquare: Some( + 5204, + ), + }, + ], + ), + ), + Target( + Llite( + LliteStat { + target: Target( + "fs-ffff97e895d31000", + ), + param: Param( + "stats", + ), + stats: [ + Stat { + name: "getattr", + units: "usecs", + samples: 2, + min: Some( + 425, + ), + max: Some( + 427, + ), + sum: Some( + 852, + ), + sumsquare: Some( + 362954, + ), + }, + ], + }, + ), + ), + Target( + Changelog( + TargetStat { + kind: Mdt, + param: Param( + "changelog_users", + ), + target: Target( + "fs-MDT0000", + ), + value: ChangelogStat { + current_index: 0, + users: [], + }, + }, + ), + ), + Target( + QuotaStats( + TargetQuotaStat { + pool: "0x0", + manager: "dt", + param: Param( + "usr", + ), + target: Target( + "fs-QMT0000", + ), + value: QuotaStats { + kind: Usr, + stats: [ + QuotaStat { + id: 0, + limits: QuotaStatLimits { + hard: 0, + soft: 0, + granted: 0, + time: 604800, + }, + }, + ], + }, + }, + ), + ), + Target( + QuotaStats( + TargetQuotaStat { + pool: "ddn_hdd", + manager: "dt", + param: Param( + "usr", + ), + target: Target( + "fs-QMT0000", + ), + value: QuotaStats { + kind: Usr, + stats: [ + QuotaStat { + id: 0, + limits: QuotaStatLimits { + hard: 0, + soft: 0, + granted: 0, + time: 604800, + }, + }, + ], + }, + }, + ), + ), + Target( + QuotaStats( + TargetQuotaStat { + pool: "ddn_ssd", + manager: "dt", + param: Param( + "usr", + ), + target: Target( + "fs-QMT0000", + ), + value: QuotaStats { + kind: Usr, + stats: [ + QuotaStat { + id: 0, + limits: QuotaStatLimits { + hard: 0, + soft: 0, + granted: 0, + time: 604800, + }, + }, + ], + }, + }, + ), + ), + Target( + QuotaStats( + TargetQuotaStat { + pool: "0x0", + manager: "md", + param: Param( + "usr", + ), + target: Target( + "fs-QMT0000", + ), + value: QuotaStats { + kind: Usr, + stats: [ + QuotaStat { + id: 0, + limits: QuotaStatLimits { + hard: 0, + soft: 0, + granted: 0, + time: 604800, + }, + }, + ], + }, + }, + ), + ), + Target( + QuotaStats( + TargetQuotaStat { + pool: "0x0", + manager: "dt", + param: Param( + "prj", + ), + target: Target( + "fs-QMT0000", + ), + value: QuotaStats { + kind: Prj, + stats: [ + QuotaStat { + id: 0, + limits: QuotaStatLimits { + hard: 0, + soft: 0, + granted: 0, + time: 604800, + }, + }, + ], + }, + }, + ), + ), + Target( + QuotaStats( + TargetQuotaStat { + pool: "ddn_hdd", + manager: "dt", + param: Param( + "prj", + ), + target: Target( + "fs-QMT0000", + ), + value: QuotaStats { + kind: Prj, + stats: [ + QuotaStat { + id: 0, + limits: QuotaStatLimits { + hard: 0, + soft: 0, + granted: 0, + time: 604800, + }, + }, + ], + }, + }, + ), + ), + Target( + QuotaStats( + TargetQuotaStat { + pool: "ddn_ssd", + manager: "dt", + param: Param( + "prj", + ), + target: Target( + "fs-QMT0000", + ), + value: QuotaStats { + kind: Prj, + stats: [ + QuotaStat { + id: 0, + limits: QuotaStatLimits { + hard: 0, + soft: 0, + granted: 0, + time: 604800, + }, + }, + ], + }, + }, + ), + ), + Target( + QuotaStats( + TargetQuotaStat { + pool: "0x0", + manager: "md", + param: Param( + "prj", + ), + target: Target( + "fs-QMT0000", + ), + value: QuotaStats { + kind: Prj, + stats: [ + QuotaStat { + id: 0, + limits: QuotaStatLimits { + hard: 0, + soft: 0, + granted: 0, + time: 604800, + }, + }, + ], + }, + }, + ), + ), + Target( + QuotaStats( + TargetQuotaStat { + pool: "0x0", + manager: "dt", + param: Param( + "grp", + ), + target: Target( + "fs-QMT0000", + ), + value: QuotaStats { + kind: Grp, + stats: [ + QuotaStat { + id: 0, + limits: QuotaStatLimits { + hard: 0, + soft: 0, + granted: 0, + time: 604800, + }, + }, + ], + }, + }, + ), + ), + Target( + QuotaStats( + TargetQuotaStat { + pool: "ddn_hdd", + manager: "dt", + param: Param( + "grp", + ), + target: Target( + "fs-QMT0000", + ), + value: QuotaStats { + kind: Grp, + stats: [ + QuotaStat { + id: 0, + limits: QuotaStatLimits { + hard: 0, + soft: 0, + granted: 0, + time: 604800, + }, + }, + ], + }, + }, + ), + ), + Target( + QuotaStats( + TargetQuotaStat { + pool: "ddn_ssd", + manager: "dt", + param: Param( + "grp", + ), + target: Target( + "fs-QMT0000", + ), + value: QuotaStats { + kind: Grp, + stats: [ + QuotaStat { + id: 0, + limits: QuotaStatLimits { + hard: 0, + soft: 0, + granted: 0, + time: 604800, + }, + }, + ], + }, + }, + ), + ), + Target( + QuotaStats( + TargetQuotaStat { + pool: "0x0", + manager: "md", + param: Param( + "grp", + ), + target: Target( + "fs-QMT0000", + ), + value: QuotaStats { + kind: Grp, + stats: [ + QuotaStat { + id: 0, + limits: QuotaStatLimits { + hard: 0, + soft: 0, + granted: 0, + time: 604800, + }, + }, + ], + }, + }, + ), + ), + LNetStat( + SendCount( + LNetStat { + nid: "0@lo", + param: Param( + "send_count", + ), + value: 284847, + }, + ), + ), + LNetStat( + RecvCount( + LNetStat { + nid: "0@lo", + param: Param( + "recv_count", + ), + value: 284844, + }, + ), + ), + LNetStat( + DropCount( + LNetStat { + nid: "0@lo", + param: Param( + "drop_count", + ), + value: 3, + }, + ), + ), + LNetStat( + SendCount( + LNetStat { + nid: "10.73.20.11@tcp", + param: Param( + "send_count", + ), + value: 445690, + }, + ), + ), + LNetStat( + RecvCount( + LNetStat { + nid: "10.73.20.11@tcp", + param: Param( + "recv_count", + ), + value: 445689, + }, + ), + ), + LNetStat( + DropCount( + LNetStat { + nid: "10.73.20.11@tcp", + param: Param( + "drop_count", + ), + value: 3, + }, + ), + ), + LNetStat( + SendLength( + LNetStatGlobal { + param: Param( + "send_length", + ), + value: 205930288, + }, + ), + ), + LNetStat( + RecvLength( + LNetStatGlobal { + param: Param( + "recv_length", + ), + value: 192265344, + }, + ), + ), + LNetStat( + DropLength( + LNetStatGlobal { + param: Param( + "drop_length", + ), + value: 3120, + }, + ), + ), + ], +)