From f77e16570f64a789885dd2e66cfb13c97d20776f Mon Sep 17 00:00:00 2001 From: morenod Date: Wed, 27 Sep 2023 15:32:01 +0200 Subject: [PATCH] AWS role patch required by OCM-3187 --- libs/platforms/rosa/hypershift/hypershift.py | 22 ++++++++++++++++++++ libs/platforms/rosa/rosa.py | 2 +- libs/utils.py | 7 ++++--- rosa-burner.ini | 2 +- 4 files changed, 28 insertions(+), 5 deletions(-) diff --git a/libs/platforms/rosa/hypershift/hypershift.py b/libs/platforms/rosa/hypershift/hypershift.py index 0356d80..e7f20f2 100644 --- a/libs/platforms/rosa/hypershift/hypershift.py +++ b/libs/platforms/rosa/hypershift/hypershift.py @@ -410,6 +410,16 @@ def delete_cluster(self, platform, cluster_name): cluster_info["timestamp"] = datetime.datetime.utcnow().isoformat() self.es.index_metadata(cluster_info) + def _get_aws_role_name(self, cluster_name): + # Required by OCM-3187 (https://issues.redhat.com/browse/OCM-3187), remove when fixed + (role_policy_code, role_policy_out, role_policy_err) = self.utils.subprocess_exec("rosa describe cluster -c " + cluster_name + " -o json") + if role_policy_code == 0: + for role in json.loads(role_policy_out.decode("utf-8")).get("aws", {}).get("sts", {}).get("operator_iam_roles", []): + if role.get("name", "") == "kube-controller-manager": + return role.get("role_arn").split("/")[-1] + self.logging.error(f"No Role named kube-controller-manager found on Cluster {cluster_name}") + return None + def create_cluster(self, platform, cluster_name): super().create_cluster(platform, cluster_name) cluster_info = platform.environment["clusters"][cluster_name] @@ -462,6 +472,18 @@ def create_cluster(self, platform, cluster_name): return 1 else: break + + # Required by OCM-3187 (https://issues.redhat.com/browse/OCM-3187), remove when fixed + self.logging.info(f"Getting kube-controller-manager role for cluster {cluster_name}") + aws_role_name = self._get_aws_role_name(cluster_name) + self.logging.info(f"Found kube-controller-manager role {aws_role_name} for cluster {cluster_name}") + (aws_policy_code, aws_policy_out, aws_policy_err) = self.utils.subprocess_exec("aws iam attach-role-policy --role-name " + aws_role_name + " --policy-arn arn:aws:iam::415909267177:policy/hack-414-custom-policy") + if aws_policy_code != 0: + cluster_info['status'] = "aws policy failed" + return 1 + else: + self.logging.info(f"Patched kube-controller-manager role {aws_role_name} for cluster {cluster_name} with policy arn:aws:iam::415909267177:policy/hack-414-custom-policy") + cluster_info['status'] = "Installing" self.logging.info(f"Cluster {cluster_name} installation started on the {trying} try") cluster_info["metadata"] = self.get_metadata(cluster_name) diff --git a/libs/platforms/rosa/rosa.py b/libs/platforms/rosa/rosa.py index b095269..fe8d4fb 100644 --- a/libs/platforms/rosa/rosa.py +++ b/libs/platforms/rosa/rosa.py @@ -22,7 +22,7 @@ def __init__(self, arguments, logging, utils, es): aws.set_aws_envvars(arguments['aws_profile'], arguments['aws_region']) self.environment['aws'] = aws.set_aws_environment(arguments['aws_profile'], arguments['aws_region']) self.environment["commands"].append("rosa") - # self.environment["commands"].append("aws") + self.environment["commands"].append("aws") self.environment["rosa_env"] = arguments["rosa_env"] diff --git a/libs/utils.py b/libs/utils.py index 136ce18..26ae112 100644 --- a/libs/utils.py +++ b/libs/utils.py @@ -50,10 +50,11 @@ def generate_cluster_name_seed(self, seed): return cluster_name_seed def verify_cmnd(self, command): - (cmd_code, cmd_out, cmd_err) = self.subprocess_exec(command + " -h") + help_command = command + " help" if command != "terraform" else command + " -h" + (cmd_code, cmd_out, cmd_err) = self.subprocess_exec(help_command) if cmd_code != 0: - self.logging(cmd_out) - self.logging(cmd_err) + self.logging.error(cmd_out) + self.logging.error(cmd_err) sys.exit("Exiting...") else: self.logging.info(f"{command} command validated with -h") diff --git a/rosa-burner.ini b/rosa-burner.ini index 5f2a97f..eb5a9fe 100644 --- a/rosa-burner.ini +++ b/rosa-burner.ini @@ -1,7 +1,7 @@ [Defaults] install_clusters = True cluster_count = 1 -cluster_name_seed = gitci +cluster_name_seed = rbur workers = 3 workers_wait_time = 60 wait_for_workers = True