forked from ricardobranco777/hawk_test
-
Notifications
You must be signed in to change notification settings - Fork 0
/
hawk_test_ssh.py
84 lines (75 loc) · 3.97 KB
/
hawk_test_ssh.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
#!/usr/bin/python3
# Copyright (C) 2019 SUSE LLC
"""Define SSH related functions to test the HAWK GUI"""
from distutils.version import LooseVersion as Version
import paramiko
class HawkTestSSH:
def __init__(self, hostname, secret=None):
self.ssh = paramiko.SSHClient()
self.ssh.load_system_host_keys()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy)
self.ssh.connect(hostname=hostname.lower(), username="root", password=secret)
def check_cluster_conf_ssh(self, command, mustmatch):
_, out, err = self.ssh.exec_command(command)
out, err = map(lambda f: f.read().decode().rstrip('\n'), (out, err))
print(f"INFO: ssh command [{command}] got output [{out}] and error [{err}]")
if err:
print(f"ERROR: got an error over SSH: [{err}]")
return False
if isinstance(mustmatch, str):
if mustmatch:
if mustmatch in out:
return True
return False
return out == mustmatch
if isinstance(mustmatch, list):
for exp in mustmatch:
if exp not in out:
return False
return True
raise ValueError("check_cluster_conf_ssh: mustmatch must be str or list")
@staticmethod
def set_test_status(results, test, status):
results.set_test_status(test, status)
def verify_stonith_in_maintenance(self, results):
print("TEST: verify_stonith_in_maintenance")
if self.check_cluster_conf_ssh("crm status | grep stonith-sbd", ["unmanaged", "maintenance"]):
print("INFO: stonith-sbd is unmanaged")
self.set_test_status(results, 'verify_stonith_in_maintenance', 'passed')
return True
print("ERROR: stonith-sbd is not unmanaged but should be")
self.set_test_status(results, 'verify_stonith_in_maintenance', 'failed')
return False
def verify_node_maintenance(self, results):
print("TEST: verify_node_maintenance: check cluster node is in maintenance mode")
if self.check_cluster_conf_ssh("crm status | grep -i node", "maintenance"):
print("INFO: cluster node set successfully in maintenance mode")
self.set_test_status(results, 'verify_node_maintenance', 'passed')
return True
print("ERROR: cluster node failed to switch to maintenance mode")
self.set_test_status(results, 'verify_node_maintenance', 'failed')
return False
def verify_primitive(self, primitive, version, results):
print(f"TEST: verify_primitive: check primitive [{primitive}] exists")
matches = [f"{primitive} anything", "binfile=file", "op start timeout=35s",
"op monitor timeout=9s interval=13s", "meta target-role=Started"]
if Version(version) < Version('15'):
matches.append("op stop timeout=15s")
else:
matches.append("op stop timeout=15s on-fail=stop")
if self.check_cluster_conf_ssh("crm configure show", matches):
print(f"INFO: primitive [{primitive}] correctly defined in the cluster configuration")
self.set_test_status(results, 'verify_primitive', 'passed')
return True
print(f"ERROR: primitive [{primitive}] missing from cluster configuration")
self.set_test_status(results, 'verify_primitive', 'failed')
return False
def verify_primitive_removed(self, primitive, results):
print(f"TEST: verify_primitive_removed: check primitive [{primitive}] is removed")
if self.check_cluster_conf_ssh("crm resource status | grep ocf::heartbeat:anything", ''):
print("INFO: primitive successfully removed")
self.set_test_status(results, 'verify_primitive_removed', 'passed')
return True
print(f"ERROR: primitive [{primitive}] still present in the cluster while checking with SSH")
self.set_test_status(results, 'verify_primitive_removed', 'failed')
return False