diff --git a/libs/platforms/platform.py b/libs/platforms/platform.py index 761599e..ca128f4 100644 --- a/libs/platforms/platform.py +++ b/libs/platforms/platform.py @@ -184,6 +184,9 @@ def create_cluster(self, platform, cluster_name): def delete_cluster(self, platform, cluster_name): pass + def get_metadata(self, platform, cluster_name): + pass + def platform_cleanup(self): pass diff --git a/libs/platforms/rosa/hypershift/hypershift.py b/libs/platforms/rosa/hypershift/hypershift.py index d8d682a..5346c29 100644 --- a/libs/platforms/rosa/hypershift/hypershift.py +++ b/libs/platforms/rosa/hypershift/hypershift.py @@ -96,7 +96,7 @@ def _verify_provision_shard(self): self.logging.error(f"No Provision Shard found for Service Cluster {self.environment['service_cluster']} on {self.environment['aws']['region']}") return None - def get_mc(self, cluster_id): + def _get_mc(self, cluster_id): self.logging.debug(f"Get the mgmt cluster of cluster {cluster_id}") resp_code, resp_out, resp_err = self.utils.subprocess_exec( "ocm get /api/clusters_mgmt/v1/clusters/" + cluster_id + "/hypershift", @@ -104,6 +104,27 @@ def get_mc(self, cluster_id): ) return json.loads(resp_out).get("management_cluster", None) if resp_code == 0 else None + # Get Hypershift cluster metadata and set required platform environment variables + def get_metadata(self, platform, cluster_name): + metadata = super().get_metadata(platform, cluster_name) + self.logging.info(f"Getting information for cluster {cluster_name}") + metadata_code, metadata_out, metadata_err = self.utils.subprocess_exec( + "rosa describe cluster -c " + cluster_name + " -o json", + extra_params={"universal_newlines": True} + ) + try: + status = json.loads(metadata_out)["state"] + except Exception as err: + self.logging.error(f"Cannot load metadata for cluster {cluster_name}") + self.logging.error(err) + + if status == "ready": + cluster_mc = self._get_mc(self.get_cluster_id(cluster_name)) + metadata["mgmt_cluster_name"] = cluster_mc + platform.environment["mc_kubeconfig"] = platform.environment["path"] + "/kubeconfig_" + cluster_mc + + return metadata + def platform_cleanup(self): super().platform_cleanup() self.logging.info("Cleaning resources") @@ -268,7 +289,7 @@ def delete_cluster(self, platform, cluster_name): cluster_start_time = int(datetime.datetime.utcnow().timestamp()) cluster_info["uuid"] = self.environment["uuid"] cluster_info["install_method"] = "rosa" - cluster_info["mgmt_cluster_name"] = self.get_mc(cluster_info["metadata"]["cluster_id"]) + cluster_info["mgmt_cluster_name"] = self._get_mc(cluster_info["metadata"]["cluster_id"]) self.logging.info(f"Deleting cluster {cluster_name} on Hypershift Platform") cleanup_code, cleanup_out, cleanup_err = self.utils.subprocess_exec("rosa delete cluster -c " + cluster_name + " -y --watch", cluster_info["path"] + "/cleanup.log", {'preexec_fn': self.utils.disable_signals}) cluster_delete_end_time = int(datetime.datetime.utcnow().timestamp()) @@ -473,7 +494,7 @@ def create_cluster(self, platform, cluster_name): cluster_info['status'] = "Installing" self.logging.info(f"Cluster {cluster_name} installation started on the {trying} try") - cluster_info["metadata"] = self.get_metadata(cluster_name) + cluster_info["metadata"] = self.get_metadata(platform, cluster_name) cluster_info["install_try"] = trying with concurrent.futures.ThreadPoolExecutor() as executor: preflight_ch = executor.submit(self._preflight_wait, cluster_info["metadata"]["cluster_id"], cluster_name) @@ -481,7 +502,7 @@ def create_cluster(self, platform, cluster_name): cluster_info["preflight_checks"] = preflight_ch.result() cluster_info["sc_namespace_timing"] = sc_namespace.result() - cluster_start_time if platform.environment["sc_kubeconfig"] != "" else None - mgmt_cluster_name = self.get_mc(cluster_info["metadata"]["cluster_id"]) + mgmt_cluster_name = self._get_mc(cluster_info["metadata"]["cluster_id"]) self.environment["mc_kubeconfig"] = self.download_kubeconfig(mgmt_cluster_name, self.environment["path"]) mc_namespace = executor.submit(self._namespace_wait, platform.environment["mc_kubeconfig"], cluster_info["metadata"]["cluster_id"], cluster_name, "Management") if platform.environment["mc_kubeconfig"] != "" else 0 cluster_info["mc_namespace_timing"] = mc_namespace.result() - cluster_start_time if platform.environment["mc_kubeconfig"] != "" else None @@ -495,7 +516,7 @@ def create_cluster(self, platform, cluster_name): cluster_end_time = int(datetime.datetime.utcnow().timestamp()) index_time = datetime.datetime.utcnow().isoformat() # Getting againg metadata to update the cluster status - cluster_info["metadata"] = self.get_metadata(cluster_name) + cluster_info["metadata"] = self.get_metadata(platform, cluster_name) cluster_info["install_duration"] = cluster_end_time - cluster_start_time access_timers = self.get_cluster_admin_access(cluster_name, cluster_info["path"]) cluster_info["kubeconfig"] = access_timers.get("kubeconfig", None) diff --git a/libs/platforms/rosa/rosa.py b/libs/platforms/rosa/rosa.py index 834b3bf..6dd4f9e 100644 --- a/libs/platforms/rosa/rosa.py +++ b/libs/platforms/rosa/rosa.py @@ -200,7 +200,8 @@ def get_workers_ready(self, kubeconfig, cluster_name): super().get_workers_ready(kubeconfig, cluster_name) return Platform.get_workers_ready(self, kubeconfig, cluster_name) - def get_metadata(self, cluster_name): + def get_metadata(self, platform, cluster_name): + super().get_metadata(platform, cluster_name) metadata = {} self.logging.info(f"Getting information for cluster {cluster_name}") metadata_code, metadata_out, metadata_err = self.utils.subprocess_exec("rosa describe cluster -c " + cluster_name + " -o json", extra_params={"universal_newlines": True}) diff --git a/libs/platforms/rosa/terraform/terraform.py b/libs/platforms/rosa/terraform/terraform.py index 15c222b..0711736 100644 --- a/libs/platforms/rosa/terraform/terraform.py +++ b/libs/platforms/rosa/terraform/terraform.py @@ -174,7 +174,7 @@ def create_cluster(self, platform, cluster_name): cluster_info['status'] = "installed" self.logging.info(f"Cluster {cluster_name} installation finished on the {trying} try") - cluster_info["metadata"] = self.get_metadata(cluster_name) + cluster_info["metadata"] = self.get_metadata(platform, cluster_name) cluster_info["install_try"] = trying cluster_info["install_duration"] = cluster_end_time - cluster_start_time access_timers = self.get_cluster_admin_access(cluster_name, cluster_info["path"]) diff --git a/libs/utils.py b/libs/utils.py index 3df94ad..709aa19 100644 --- a/libs/utils.py +++ b/libs/utils.py @@ -127,13 +127,11 @@ def get_cluster_info(self, platform): loop_counter += 1 cluster_name = platform.environment["cluster_name_seed"] + "-" + str(loop_counter).zfill(4) platform.environment["clusters"][cluster_name] = {} - platform.environment["clusters"][cluster_name]["metadata"] = platform.get_metadata(cluster_name) + platform.environment["clusters"][cluster_name]["metadata"] = platform.get_metadata(platform, cluster_name) platform.environment["clusters"][cluster_name]["status"] = platform.environment["clusters"][cluster_name]["metadata"]["status"] platform.environment["clusters"][cluster_name]["path"] = platform.environment["path"] + "/" + cluster_name platform.environment["clusters"][cluster_name]["kubeconfig"] = platform.environment["clusters"][cluster_name]["path"] + "/kubeconfig" platform.environment['clusters'][cluster_name]['workers'] = int(platform.environment["workers"].split(",")[(loop_counter - 1) % len(platform.environment["workers"].split(","))]) - cluster_mc = platform.get_mc(platform.get_cluster_id(cluster_name)) - platform.environment["mc_kubeconfig"] = platform.environment["path"] + "/kubeconfig_" + cluster_mc return platform def load_scheduler(self, platform): @@ -220,7 +218,8 @@ def cluster_load(self, platform, cluster_name, load=""): # Copy executor to the local folder because we shaw in the past that we cannot use kube-burner with multiple executions at the same time shutil.copy2(platform.environment['load']['executor'], my_path) load_env["ITERATIONS"] = str(platform.environment['clusters'][cluster_name]['workers'] * platform.environment['load']['jobs']) - load_env["EXTRA_FLAGS"] = "--churn-duration=" + platform.environment['load']['duration'] + " --churn-percent=10 --churn-delay=30s --timeout=24h" + if load != "index": + load_env["EXTRA_FLAGS"] = "--churn-duration=" + platform.environment['load']['duration'] + " --churn-percent=10 --churn-delay=30s --timeout=24h" # if es_url is not None: # load_env["ES_SERVER"] = es_url load_env["LOG_LEVEL"] = "debug"