文書番号: 000022415
# printf "\n";echo "DATE"; echo "===============";date; printf "\n"; echo "XDOCTOR VERSION";echo "===============";sudo -i xdoctor --version;printf "\n";echo "ECS VERSION";echo "============"; sudo -i ssh provo xdoctor --ecsversion;printf "\n";echo "ECS RACK INFO\TOPOLOGY";echo "======================";sudo -i getrackinfo| grep -v Status | grep -v Epoxy| grep -v Master|grep -v Initializing|grep -v Off|grep -v "Warning/Error"|grep -v "Hostname set to default hostname"|grep -v "private interface" | grep -v "Port ID";sudo -i xdoctor --topology; echo "HARDWARE MODEL";echo "==============";sudo -i ssh provo grep -i -- --sku /opt/emc/caspian/installer/log/installer.log
admin@ecs-n1:~> printf "\n";echo "DATE"; echo "===============";date; printf "\n"; echo "XDOCTOR VERSION";echo "===============";sudo -i xdoctor --version;printf "\n";echo "ECS VERSION";echo "============"; sudo -i xdoctor --ecsversion;printf "\n";echo "ECS RACK INFO\TOPOLOGY";echo "======================";sudo -i getrackinfo| grep -v Status | grep -v Epoxy| grep -v Master|grep -v Initializing|grep -v Off|grep -v "Warning/Error"|grep -v "Hostname set to default hostname"|grep -v "private interface" | grep -v "Port ID";sudo -i xdoctor --topology DATE =============== Wed Jul 19 07:46:49 UTC 2023 XDOCTOR VERSION =============== 4.8-91.1 ECS VERSION ============ Telegraf Version: 3.7.0.5-1423.2a4e0a99 Service Version: 8.2.0.0-22383.aa1ec7e8f8 Fabric-Zookeeper Version: 3.8.0.0-120.4661fac Utilities Version: 3.7.0.4-1166.b78f3fe Influxdb Version: 3.7.0.5-1423.2a4e0a99 Grafana Version: 3.7.0.5-1423.2a4e0a99 Syslog Version: 3.7.0.5-4312.96f4e16 Fabric Version: 3.7.0.5-4312.96f4e16 Fabric-Registry Version: 2.3.1.0-83.763a8ea Os Version: 3.7.0.5-1939.5d82571.103 Fluxd Version: 3.7.0.5-1423.2a4e0a99 Throttler Version: 3.7.0.5-1423.2a4e0a99 Object Image Version: 3.7.0.5-137811.7227d55be7f Object SW Version: 3.7.0.5-137811.7227d55be7f.1 ------------------------- ECS Version: 3.7.0.5 GP 1 ------------------------- HW Gen : 2 HW Model: U-Series HW Code : S2600KP ------------------------- xDoctor Version: 4.8-91.1 ------------------------- ECS RACK INFO\TOPOLOGY ====================== Node private Node Public BMC =============== ====== ====== ================= ================= ================= ================= ========= 192.168.219.1 1 MA a4:bf:01:43:ba:74 10.xx.8.xxx a4:bf:01:4b:08:2c 10.xx.8.xxx provo-red 192.168.219.2 2 SA a4:bf:01:43:bf:c8 10.xx.8.xxx a4:bf:01:4c:2d:b4 10.xx.8.xxx sandy-red 192.168.219.3 3 SA a4:bf:01:43:b9:e0 10.xx.8.xxx a4:bf:01:4d:7f:56 10.xx.8.xxx orem-red 192.168.219.4 4 SA a4:bf:01:43:9e:56 10.xx.8.xxx a4:bf:01:4a:fd:aa 10.xx.8.xxx ogden-red 192.168.219.5 5 SA a4:bf:01:41:06:5c 10.xx.8.xxx a4:bf:01:3d:be:c1 10.xx.8.xxx layton-red ECS | |- RACK - Name:[red] Primary:[169.254.1.1] PSNT:[CKM00193502048] SWID:[ELMFDJKK9CC9SJ] | |- Node 1, [ provo], NAN.IP:[ 169.254.1.1], Public.IP:[ 10.xx.8.xxx], DNS:[10.xx.28.xxx, 10.xxx.23.xxx], NTP:[10.xxx.53.xxx, 10.xxx.254.xx] |- Node 2, [ sandy], NAN.IP:[ 169.254.1.2], Public.IP:[ 10.xx.8.xxx], DNS:[10.xx.28.xxx, 10.xxx.23.xxx], NTP:[10.xxx.53.xxx, 10.xxx.254.xx] |- Node 3, [ orem], NAN.IP:[ 169.254.1.3], Public.IP:[ 10.xx.8.xxx], DNS:[10.xx.28.xxx, 10.xxx.23.xxx], NTP:[10.xxx.53.108, 10.xxx.254.xx] |- Node 4, [ ogden], NAN.IP:[ 169.254.1.4], Public.IP:[ 10.xx.8.xxx], DNS:[10.xx.28.xxx, 10.xxx.23.xxx], NTP:[10.xxx.53.xxx, 10.xxx.254.xx] |- Node 5, [ layton], NAN.IP:[ 169.254.1.5], Public.IP:[ 10.xx.8.xxx], DNS:[10.xx.28.xxx, 10.xxx.23.xxx], NTP:[10.xxx.53.xxx, 10.xxx.254.xx] Note: 'xdoctor --top --vdc' displays high-level VDC -and Rack information Note: 'xdoctor --top --details' displays detailed VDC -and Rack informatiom
# sudo xdoctor --versionExample:
admin@node1:~> sudo xdoctor --version 4.8-83.0
# sudo xdoctor --upgrade --local=/home/admin/xDoctor4ECS-4.8-92.0.noarch.rpm
admin@ecs-n1:~> sudo xdoctor --upgrade --local=/home/admin/xDoctor4ECS-4.8-84.0.noarch.rpm 2022-07-04 07:41:49,209: xDoctor_4.8-83.0 - INFO : xDoctor Upgrader Instance (1:SFTP_ONLY) 2022-07-04 07:41:49,210: xDoctor_4.8-83.0 - INFO : Local Upgrade (/home/admin/xDoctor4ECS-4.8-84.0.noarch.rpm) 2022-07-04 07:41:49,226: xDoctor_4.8-83.0 - INFO : Current Installed xDoctor version is 4.8-83.0 2022-07-04 07:41:49,242: xDoctor_4.8-83.0 - INFO : Requested package version is 4.8-92.0 2022-07-04 07:41:49,242: xDoctor_4.8-83.0 - INFO : Updating xDoctor RPM Package (RPM) 2022-07-04 07:41:49,293: xDoctor_4.8-83.0 - INFO : - Distribute package 2022-07-04 07:41:50,759: xDoctor_4.8-83.0 - INFO : - Install new rpm package 2022-07-04 07:42:04,401: xDoctor_4.8-83.0 - INFO : xDoctor successfully updated to version 4.8-92.0
# svc_exec -m "ip address show private.4 |grep -w inet"Example:
admin@ecsnode1~> svc_exec -m "ip address show private.4 |grep -w inet" svc_exec v1.0.2 (svc_tools v2.1.0) Started 2021-12-20 14:03:33 Output from node: r1n1 retval: 0 inet 169.254.1.1/16 brd 169.254.255.255 scope global private.4 Output from node: r2n1 retval: 0 inet 169.254.2.1/16 brd 169.254.255.255 scope global private.4 Output from node: r3n1 retval: 0 inet 169.254.3.1/16 brd 169.254.255.255 scope global private.4 Output from node: r4n1 retval: 0 inet 169.254.4.1/16 brd 169.254.255.255 scope global private.4
admin@ecs-n1: scp xDoctor4ECS-4.8-92.0.noarch.rpm 169.254.2.1:/home/admin/ xDoctor4ECS-4.8-92.0.noarch.rpm 100% 32MB 31.9MB/s 00:00 admin@ecsnode1~> scp xDoctor4ECS-4.8-92.0.noarch.rpm 169.254.3.1:/home/admin/ xDoctor4ECS-4.8-92.0.noarch.rpm 100% 32MB 31.9MB/s 00:00 admin@ecsnode1~> scp xDoctor4ECS-4.8-92.0.noarch.rpm 169.254.4.1:/home/admin/ xDoctor4ECS-4.8-92.0.noarch.rpm 100% 32MB 31.9MB/s 00:00 admin@ecsnode1~>
コマンド:
# sudo xdoctor --upgrade --local=/home/admin/xDoctor4ECS-4.8-x.0.noarch.rpm
# sudo xdoctorExample:
#sudo xdoctor 2023-07-19 08:59:37,968: xDoctor_4.8-92.0 - INFO : Initializing xDoctor v4.8-92.0 ... 2023-07-19 08:59:38,333: xDoctor_4.8-92.0 - INFO : Removing orphaned session - session_1689710794.260 2023-07-19 08:59:38,335: xDoctor_4.8-92.0 - INFO : Starting xDoctor session_1689757177.745 ... (SYSTEM) 2023-07-19 08:59:38,335: xDoctor_4.8-92.0 - INFO : Primary Node Control Check ... 2023-07-19 08:59:38,486: xDoctor_4.8-92.0 - INFO : xDoctor Composition - Full Diagnostic Suite for ECS 2023-07-19 08:59:38,503: xDoctor_4.8-92.0 - INFO : Session limited to 0:40:00 2023-07-19 08:59:39,501: xDoctor_4.8-92.0 - INFO : -------------------- 2023-07-19 08:59:39,502: xDoctor_4.8-92.0 - INFO : ECS Version: 3.7.0.4 2023-07-19 08:59:39,502: xDoctor_4.8-92.0 - INFO : -------------------- 2023-07-19 08:59:39,503: xDoctor_4.8-92.0 - INFO : xDoctor Pre Features 2023-07-19 08:59:39,503: xDoctor_4.8-92.0 - INFO : Cron Activation 2023-07-19 08:59:43,932: xDoctor_4.8-92.0 - WARNING : Connect Home not configured and no enabled Email Rules found, activation cancelled ... 2023-07-19 08:59:43,932: xDoctor_4.8-92.0 - INFO : -------------------- 2023-07-19 08:59:44,033: xDoctor_4.8-92.0 - INFO : Validating System Version ... 2023-07-19 08:59:45,320: xDoctor_4.8-92.0 - INFO : |- xDoctor version is sealed to 4.8-92.0 2023-07-19 08:59:47,420: xDoctor_4.8-92.0 - INFO : |- System version is sealed to 3.7.0.3-1929.0ce6f9c.101 2023-07-19 08:59:47,421: xDoctor_4.8-92.0 - INFO : Distributing xDoctor session files ... 2023-07-19 08:59:47,762: xDoctor_4.8-92.0 - INFO : Collecting data on designated nodes, please stand by ... (update every 5 to 30 seconds) 2023-07-19 08:59:47,762: xDoctor_4.8-92.0 - INFO : Collection Limit: 0:32:00, Pacemaker Limit: 1800 sec 2023-07-19 08:59:52,769: xDoctor_4.8-92.0 - INFO : Collecting data ... at 0:00:05 2023-07-19 09:00:02,777: xDoctor_4.8-92.0 - INFO : Collecting data ... at 0:00:15 2023-07-19 09:00:17,789: xDoctor_4.8-92.0 - INFO : Collecting data ... at 0:00:30 2023-07-19 09:00:37,805: xDoctor_4.8-92.0 - INFO : Collecting data ... at 0:00:50 2023-07-19 09:01:02,830: xDoctor_4.8-92.0 - INFO : Collecting data ... at 0:01:15 2023-07-19 09:01:32,861: xDoctor_4.8-92.0 - INFO : Collecting data ... at 0:01:45 2023-07-19 09:02:02,889: xDoctor_4.8-92.0 - INFO : Collecting data ... at 0:02:15 2023-07-19 09:02:15,086: xDoctor_4.8-92.0 - INFO : Waiting for local data collectors ... 2023-07-19 09:02:32,913: xDoctor_4.8-92.0 - INFO : Collecting data ... at 0:02:45 . . 2023-07-19 09:06:33,083: xDoctor_4.8-92.0 - INFO : Collecting data ... at 0:06:46 2023-07-19 09:06:45,296: xDoctor_4.8-92.0 - INFO : All data collected in 0:06:57 2023-07-19 09:06:45,296: xDoctor_4.8-92.0 - INFO : ------------------------------ 2023-07-19 09:06:45,297: xDoctor_4.8-92.0 - INFO : [169.254.1.8] collected data in 0:02:19 2023-07-19 09:06:45,297: xDoctor_4.8-92.0 - INFO : [169.254.1.7] collected data in 0:02:18 2023-07-19 09:06:45,298: xDoctor_4.8-92.0 - INFO : [169.254.1.6] collected data in 0:02:24 2023-07-19 09:06:45,298: xDoctor_4.8-92.0 - INFO : [169.254.1.5] collected data in 0:02:26 2023-07-19 09:06:45,298: xDoctor_4.8-92.0 - INFO : [169.254.1.4] collected data in 0:02:23 2023-07-19 09:06:45,298: xDoctor_4.8-92.0 - INFO : [169.254.1.3] collected data in 0:02:26 2023-07-19 09:06:45,298: xDoctor_4.8-92.0 - INFO : [169.254.1.2] collected data in 0:02:21 2023-07-19 09:06:45,298: xDoctor_4.8-92.0 - INFO : [169.254.1.1] collected data in 0:06:56 2023-07-19 09:06:45,298: xDoctor_4.8-92.0 - INFO : ------------------------------ 2023-07-19 09:06:45,488: xDoctor_4.8-92.0 - INFO : Analyzing collected data ... 2023-07-19 09:06:45,489: xDoctor_4.8-92.0 - INFO : Analysis Limit: 0:08:00 2023-07-19 09:06:45,489: xDoctor_4.8-92.0 - INFO : ANALYZER [ac_sel] filtered by HW - System HW=ECSv3_DSS7500, filter=['!ECSv3_R740XD,!ECSv3_DSS7500,!ECSv3_R740XD2,!ECSv3_R740XD_NVMe,!ECSv3_XE7100,!ECSv3_XE7440,!ECSv3_XE7420'] 2023-07-19 09:06:45,489: xDoctor_4.8-92.0 - INFO : ANALYZER [ac_hw_failures] filtered by HW - System HW=ECSv3_DSS7500, filter=['!ECSv3_R740XD,!ECSv3_DSS7500,!ECSv3_R740XD2,!ECSv3_R740XD_NVMe,!ECSv3_XE7100,!ECSv3_XE7440,!ECSv3_XE7420'] 2023-07-19 09:06:45,490: xDoctor_4.8-92.0 - INFO : ANALYZER [ac_sas_check] filtered by HW - System HW=ECSv3_DSS7500, filter=['S2600KP'] 2023-07-19 09:06:45,490: xDoctor_4.8-92.0 - INFO : ANALYZER [ac_switch] filtered by HW - System HW=ECSv3_DSS7500, filter=['!ECSv3_R740XD,!ECSv3_DSS7500,!ECSv3_R740XD2,!ECSv3_R740XD_NVMe,!ECSv3_XE7100,!ECSv3_XE7440,!ECSv3_XE7420'] 2023-07-19 09:06:45,490: xDoctor_4.8-92.0 - INFO : ANALYZER [ac_docker_memory_expansion] filtered by SW - System SW=3.7.0.4, filter=['=3.1.0.0, =3.5.0.0, =3.3.0.0, =3.5.0.0, =3.8.0.0, - /usr/local/xdoctor/archive/full_diagnosis/2023-07-19_085937 2023-07-19 09:07:10,722: xDoctor_4.8-92.0 - INFO : Session Archived as tar - /usr/local/xdoctor/archive/full_diagnosis/xDoctor-CKM00190800120-2023-07-19_085937.tgz 2023-07-19 09:07:10,722: xDoctor_4.8-92.0 - INFO : -------------------------- 2023-07-19 09:07:10,722: xDoctor_4.8-92.0 - INFO : Session Report - xdoctor --report --archive=2023-07-19_085937 2023-07-19 09:07:10,722: xDoctor_4.8-92.0 - INFO : --------------- 2023-07-19 09:07:10,723: xDoctor_4.8-92.0 - INFO : Session Cleaner 2023-07-19 09:07:10,723: xDoctor_4.8-92.0 - INFO : --------------- 2023-07-19 09:07:10,723: xDoctor_4.8-92.0 - INFO : Removing folder (count limit) - /usr/local/xdoctor/archive/full_diagnosis/2023-07-18_092114 2023-07-19 09:07:10,728: xDoctor_4.8-92.0 - INFO : Removing archive (count limit) - /usr/local/xdoctor/archive/full_diagnosis/xDoctor-CKM00190800120-2023-07-18_092114.tgz 2023-07-19 09:07:10,731: xDoctor_4.8-92.0 - INFO : Cleaned 2 archived session(s) 2023-07-19 09:07:10,732: xDoctor_4.8-92.0 - INFO : ------------------------------ 2023-07-19 09:07:10,732: xDoctor_4.8-92.0 - INFO : xDoctor session_1689757177.745 finished in 0:07:33 2023-07-19 09:07:10,757: xDoctor_4.8-92.0 - INFO : Successful Job:1689757177 Exit Code:191
sudo xdoctor --report --archive=2023-07-19_085937 -CEW xDoctor 4.8-92.0 CKM00190800120 - ECS 3.7.0.4 Displaying xDoctor Report (2023-07-19_085937) Filter:['CRITICAL', 'ERROR', 'WARNING'] ... ---------------------------------------------------- WARNING - The following nodes were recently rebooted ---------------------------------------------------- Node = Multiple_Nodes Extra = Threshold=[5 days, 0:00:00], Node:Value=[169.254.1.7:22:46:58] Timestamp = 2023-07-19_085937 PSNT = CKM00190800120 @ 4.8-92.0 ------------------------------ ERROR - Disk Marked as Suspect ------------------------------ Node = Nodes Extra = {"Nodes": {"169.254.1.7": {"8HKNU6EH": {"attributes": [{"id": "198", "raw_value": 112, "name": "Offline_Uncorrectable", "when_failed": "-"}], "dev": "/dev/sdx"}}}} RAP = RAP016 Solution = KB 470792 Timestamp = 2023-07-19_085937 PSNT = CKM00190800120 @ 4.8-92.0 ---------------------------------------------------- ERROR - (Cached) Fabric indicates a non-healthy disk ---------------------------------------------------- Extra = 169.254.1.7 RAP = RAP014 Solution = KB 486393 Timestamp = 2023-07-19_085937 PSNT = CKM00190800120 @ 4.8-92.0 ----------------------------------------------------------------------- ERROR - (Cached) Detected port state in bonded interface is not correct ----------------------------------------------------------------------- Node = 169.254.1.1 Extra = {'169.254.1.6': {'slave-1': {'1': '1', '2': '69'}}} RAP = RAP092 Solution = KB 534377 Timestamp = 2023-07-19_085937 PSNT = CKM00190800120 @ 4.8-92.0 ------------------------------------------- ERROR - (Cached) Critical System CMOS Event ------------------------------------------- Node = Nodes Extra = {"Nodes": {"169.254.1.7": ["Battery CMOS Battery - Failed (01/27/2023 01:22:13)"]}} RAP = RAP164 Solution = KB 215722 Timestamp = 2023-07-19_085937 PSNT = CKM00190800120 @ 4.8-92.0 ------------------------------- CRITICAL - (Cached) Fan Failure ------------------------------- Node = 169.254.1.4 Extra = {'node': '169.254.1.4', 'item': 'Fan3', 'status': 'CRIT', 'fan': 'System Board', 'info': '0 RPM, below critical threshold'} RAP = RAP015 Solution = KB 470284 Timestamp = 2023-07-19_085937 PSNT = CKM00190800120 @ 4.8-92.0 ------------------------------------------------------------ WARNING - One or more network interfaces are down or missing ------------------------------------------------------------ Node = 169.254.1.1 Extra = {"169.254.1.1": {"169.254.1.6": [{"slave-1": "down"}], "169.254.1.5": [{"slave-1": "down"}], "169.254.1.4": [{"slave-1": "down"}], "169.254.1.3": [{"slave-1": "down"}], "169.254.1.2": [{"slave-1": "down"}], "169.254.1.1": [{"slave-1": "down"}]}} Timestamp = 2023-07-19_085937 PSNT = CKM00190800120 @ 4.8-92.0 -------------------------------------- WARNING - xDoctor detected a PSU issue -------------------------------------- Node = Nodes Extra = {"Nodes": {"169.254.1.7": {"1": "AC-Lost"}}} Timestamp = 2023-07-19_085937 PSNT = CKM00190800120 @ 4.8-92.0 -------------------------------------------------- WARNING - Detected VLT port channel down on switch -------------------------------------------------- Node = 169.254.1.1 Extra = {"169.254.1.1": {"hare": {"port_channels": {"6": {"units": ["1"]}}}, "rabbit": {"port_channels": {"6": {"units": ["1"]}}}}} Timestamp = 2023-07-19_085937 PSNT = CKM00190800120 @ 4.8-92.0 -------------------------------------------------------------------------------------------- ERROR - Working switch configuration (in memory) and saved switch configuration do not match -------------------------------------------------------------------------------------------- Node = 169.254.1.1 Extra = {'switches': ['hare', 'hound', 'fox']} RAP = RAP065 Solution = KB 508485 Timestamp = 2023-07-19_085937 PSNT = CKM00190800120 @ 4.8-92.0 -------------------------------------------- WARNING - Unable to resolve hostname via DNS -------------------------------------------- Node = Nodes Extra = {"Nodes": ["169.254.1.1", "169.254.1.2", "169.254.1.3", "169.254.1.4", "169.254.1.5", "169.254.1.6", "169.254.1.7", "169.254.1.8"]} Timestamp = 2023-07-19_085937 PSNT = CKM00190800120 @ 4.8-92.0 ------------------------------------------------------------ WARNING - Total swap memory inconsistent across the ECS rack ------------------------------------------------------------ Node = Nodes Extra = {"Nodes": {"8191": ["169.254.1.8", "169.254.1.6"], "16383": ["169.254.1.7", "169.254.1.5", "169.254.1.4", "169.254.1.3", "169.254.1.2", "169.254.1.1"]}} Timestamp = 2023-07-19_085937 PSNT = CKM00190800120 @ 4.8-92.0
# service-console --version
#service-console run Node_Maintenance_List # service-console run Health_Check
# /service-console run Node_Maintenance_List Service Console is running on node 169.254.1.1 (suite 20230621_112130_Node_Maintenance_List) Service console version: 8.2.0.0-22383.aa1ec7e8f8 Debug log: /opt/emc/caspian/service-console/log/20230621_112128_run_Node_Maintenance_List/dbg_robot.log ================================================================================ Node Maintenance List ---------------------------------------------------------------------------------------- IP FQDN MODE Recovery POWER ---------------------------------------------------------------------------------------- 169.254.1.1 ecs-n1.nas2008.com ACTIVE Enabled ON 169.254.1.2 ecs-n2.nas2008.com ACTIVE Enabled ON 169.254.1.3 ecs-n3.nas2008.com ACTIVE Enabled ON 169.254.1.4 ecs-n4.nas2008.com ACTIVE Enabled ON 169.254.1.5 ecs-n5.nas2008.com ACTIVE Enabled ON ---------------------------------------------------------------------------------------- ---------------------------------------------------------------------------------------- ================================================================================ Status: PASS Time Elapsed: 24 sec Debug log: /opt/emc/caspian/service-console/log/20230621_112128_run_Node_Maintenance_List/dbg_robot.log HTML log: /opt/emc/caspian/service-console/log/20230621_112128_run_Node_Maintenance_List/log.html ================================================================================上記の出力をリモート サポートに送信して、ノードがアクティブでないか有効になっていないかを分析する必要があります。
/opt/emc/bin/service-console run Health_Check Service Console is running on node 169.254.1.1 (suite 20230703_095838_Health_Check) Service console version: 8.2.0.0-22383.aa1ec7e8f8 Debug log: /opt/emc/caspian/service-console/log/20230703_095836_run_Health_Check/dbg_robot.log ================================================================================ Health Check 20230703 09:58:49.612: Execute Health Checks 20230703 09:58:49.614: | Get VNEST details VNEST current members : ['26170825-2670-40b2-aa27-71e60e6ce88a', '32d1f76c-4dfa-4447-97d1-4ee0edd02a41', '383b71f6-aecd-4d69-8407-e4eeb62cb8c2', 'bd856c33-2f98-463b-93c6-b86634172f6f', 'eebf8388-7da9-4bcd-b755-c6c5f076eace'] VNEST current members count : 5 VNEST reconfiguration status: False VNEST size: 130133210 20230703 09:58:50.695: | | PASS (1 sec) 20230703 09:58:50.697: | Validate that all nodes are available - Fabric 20230703 09:59:02.946: | | PASS (12 sec) 20230703 09:59:02.948: | Confirm that docker health is GOOD and docker exec works on all nodes 20230703 09:59:17.228: | | PASS (14 sec) 20230703 09:59:17.230: | Validate docker containers are running where they should be 20230703 09:59:23.320: | | PASS (6 sec) 20230703 09:59:23.322: | Validate that provisioned drives are GOOD [WARN] On node 169.254.1.4, Disk /dev/sdl is FAILED, skipping its validation 20230703 09:59:40.075: | | PASS (16 sec) 20230703 09:59:40.077: | Validate that expected number of drives is formatted for Object 20230703 09:59:40.088: | | PASS 20230703 09:59:40.089: | Validate that the correct number of disks are mounted inside the object container 20230703 09:59:52.763: | | PASS (12 sec) 20230703 09:59:52.764: | Validate that ports for fabric services are open 20230703 09:59:56.031: | | PASS (3 sec) 20230703 09:59:56.032: | Validate API availability of fabric services 20230703 10:00:04.263: | | PASS (8 sec) 20230703 10:00:04.265: | Validate that there is cluster master 20230703 10:00:08.275: | | PASS (4 sec) . . . 20230703 10:51:55.634: | Check FE switches uplink status 20230703 10:51:56.958: | | PASS (1 sec) 20230703 10:51:56.959: | Check FE switches nodes links 20230703 10:51:57.712: | | PASS 20230703 10:51:57.713: | Check for stuck disk subsystem processes 20230703 10:51:58.732: | | PASS (1 sec) 20230703 10:51:58.734: | Check for VIPs No network Separation with VIPs detected. [WARN] Important note! This pre-upgrade health check needs to be run on all sites of the federation as a precursor to upgrade and if any site reports it has VIP configuration, s hould not proceed with upgrade. 20230703 10:51:58.736: | | PASS 20230703 10:51:58.737: | Check NIC Skip: there are no nodes with Gen3 hardware. 20230703 10:51:58.743: | | PASS 20230703 10:51:58.744: | Check NIC FW versions for consistency 20230703 10:52:01.107: | | PASS (2 sec) 20230703 10:52:01.109: | Check EXF900 appliance OS version Skip - not EXF900 20230703 10:52:01.111: | | PASS 20230703 10:52:01.112: | Check EXF900 Aggregation switches model Skip - not EXF900 20230703 10:52:02.448: | | PASS (1 sec) 20230703 10:52:02.449: | Check EXF900 BE switches model Skip - not EXF900 20230703 10:52:03.776: | | PASS (1 sec) 20230703 10:52:03.778: | Check EXF900 BE private network Skip - not EXF900 20230703 10:52:03.780: | | PASS 20230703 10:52:03.780: | Check lossless status Skip - not EXF900 20230703 10:52:04.522: | | PASS 20230703 10:52:04.524: | Check disks CRC error count 20230703 10:52:21.435: | | PASS (16 sec) 20230703 10:52:21.437: | Check BMC IP subnet 20230703 10:52:24.099: | | PASS (2 sec) 20230703 10:52:24.101: | Check data2 interface 20230703 10:52:24.839: | | PASS 20230703 10:52:24.841: | Validate system load on nodes 20230703 10:52:25.636: | | PASS 20230703 10:52:25.637: | PASS (53 min 36 sec) ================================================================================ Status: PASS Time Elapsed: 53 min 53 sec Debug log: /opt/emc/caspian/service-console/log/20230703_095836_run_Health_Check/dbg_robot.log HTML log: /opt/emc/caspian/service-console/log/20230703_095836_run_Health_Check/log.html ================================================================================
# svc_dt check
未読のDTが表示されていないことを確認します。DTが未読の場合、ローカル フィールド リソース/オンサイトECS SMEは、このKBの上部にあるトラブルシューティング ガイドを参照する必要があります。ECSにも従います。DTの未読の問題をトラブルシューティングし、リモート サポートに収集された詳細を提供する方法。注:KBはお客様に表示 できません 。
admin@ecsnode1:~> svc_dt check -ac svc_dt v1.0.20 (svc_tools v1.6.6) Started 2019-11-20 14:24:01 Date Total DT Unknown # Unready # Check type Time since check 2019-11-20 14:23:11 2432 0 0 AutoCheck 0m 50s 2019-11-20 14:21:56 2432 0 0 AutoCheck 2m 5s 2019-11-20 14:20:42 2432 0 0 AutoCheck 3m 19s 2019-11-20 14:19:29 2432 0 0 AutoCheck 4m 32s 2019-11-20 14:18:15 2432 0 0 AutoCheck 5m 46s 2019-11-20 14:16:59 2432 0 0 AutoCheck 7m 2s 2019-11-20 14:15:43 2432 0 0 AutoCheck 8m 18s BR1 BR2 CT1 CT2 ET0 LS0 MA0 MR0 OB0 PR1 PR2 RR0 RT0 SS1 SS2 TT0 Date Unk Unr|Unk Unr|Unk Unr|Unk Unr|Unk Unr|Unk Unr|Unk Unr|Unk Unr|Unk Unr|Unk Unr|Unk Unr|Unk Unr|Unk Unr|Unk Unr|Unk Unr|Unk Unr| 2019-11-20 14:23:11 0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 | 2019-11-20 14:21:56 0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 | 2019-11-20 14:20:42 0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 | 2019-11-20 14:19:29 0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 | 2019-11-20 14:18:15 0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 | 2019-11-20 14:16:59 0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 | 2019-11-20 14:15:43 0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |0 0 |
# svc_dtディストリビューター
DTがノードに均等に分散されていることを確認します。すべてのノードが同じ番号を持つ必要はありませんが、互いに近い値にする必要があります。
admin@ecsnode1:~> svc_dt dist svc_dt v1.0.20 (svc_tools v1.6.6) Started 2019-11-20 14:23:27 IP BR1 BR2 CT1 CT2 ET0 LS0 MA0 MR0 OB0 PR1 PR2 RR0 RT0 SS1 SS2 TT0 TOT 10.60.18.159 16 16 16 16 16 48 16 16 48 16 16 16 16 16 16 xx 304 10.60.18.160 16 16 16 16 16 48 16 16 48 16 16 16 16 16 16 xx 304 10.60.18.161 16 16 16 16 16 48 16 16 48 16 16 16 16 16 16 xx 304 10.60.18.162 16 16 16 16 16 48 16 16 48 16 16 16 16 16 16 xx 304 10.60.18.163 16 16 16 16 16 48 16 16 48 16 16 16 16 16 16 xx 304 10.60.18.164 16 16 16 16 16 48 16 16 48 16 16 16 16 16 16 xx 304 10.60.18.165 16 16 16 16 16 48 16 16 48 16 16 16 16 16 16 xx 304 10.60.18.166 16 16 16 16 16 48 16 16 48 16 16 16 16 16 16 xx 304 Total (8) 128 128 128 128 128 384 128 128 384 128 128 128 128 128 128 NOTE: Results are cached and may be out of date by several minutes. Use '-f' option to force a refresh.
さらにサポートが必要な場合は、リモート サポートに連絡し、可能な場合はスクリーンショットを取得してください。
# svc_vdc capacity
Elastic Cloud Storage
Elastic Cloud Storage
19 10月 2023
5
How To