Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

cnf ran: add oran pre-provision test cases #411

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 12 additions & 5 deletions tests/cnf/ran/internal/ranconfig/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,9 @@ type RANConfig struct {
PtpOperatorNamespace string `yaml:"ptpOperatorNamespace" envconfig:"ECO_CNF_RAN_PTP_OPERATOR_NAMESPACE"`
TalmPreCachePolicies []string `yaml:"talmPreCachePolicies" envconfig:"ECO_CNF_RAN_TALM_PRECACHE_POLICIES"`
ZtpSiteGenerateImage string `yaml:"ztpSiteGenerateImage" envconfig:"ECO_CNF_RAN_ZTP_SITE_GENERATE_IMAGE"`
// ClusterTemplateAffix is the version-dependent affix used for naming ClusterTemplates and other O-RAN
// resources.
ClusterTemplateAffix string `envconfig:"ECO_CNF_RAN_CLUSTER_TEMPLATE_AFFIX"`
}

// HubConfig contains the configuration for the hub cluster, if present.
Expand All @@ -53,8 +56,12 @@ type HubConfig struct {
type Spoke1Config struct {
Spoke1BMC *bmc.BMC
Spoke1APIClient *clients.Settings
Spoke1Name string
Spoke1OCPVersion string
// Spoke1Name is automatically updated if Spoke1Kubeconfig exists, otherwise it can be provided as an input.
Spoke1Name string `envconfig:"ECO_CNF_RAN_SPOKE1_NAME"`
// Spoke1Hostname is not automatically updated but instead used as an input for the O-RAN suite.
Spoke1Hostname string `envconfig:"ECO_CNF_RAN_SPOKE1_HOSTNAME"`
Spoke1Kubeconfig string `envconfig:"KUBECONFIG"`
BMCUsername string `envconfig:"ECO_CNF_RAN_BMC_USERNAME"`
BMCPassword string `envconfig:"ECO_CNF_RAN_BMC_PASSWORD"`
BMCHosts []string `envconfig:"ECO_CNF_RAN_BMC_HOSTS"`
Expand Down Expand Up @@ -168,12 +175,12 @@ func (ranconfig *RANConfig) newSpoke1Config(configFile string) {

ranconfig.Spoke1Config.Spoke1APIClient = inittools.APIClient

spoke1Kubeconfig := os.Getenv("KUBECONFIG")
if spoke1Kubeconfig != "" {
ranconfig.Spoke1Config.Spoke1Name, err = version.GetClusterName(spoke1Kubeconfig)

if spoke1Kubeconfig := ranconfig.Spoke1Config.Spoke1Kubeconfig; spoke1Kubeconfig != "" {
spoke1Name, err := version.GetClusterName(spoke1Kubeconfig)
if err != nil {
glog.V(ranparam.LogLevel).Infof("Failed to get spoke 1 name from kubeconfig at %s: %v", spoke1Kubeconfig, err)
} else {
ranconfig.Spoke1Config.Spoke1Name = spoke1Name
}
} else {
glog.V(ranparam.LogLevel).Infof("No spoke 1 kubeconfig specified in KUBECONFIG environment variable")
Expand Down
86 changes: 86 additions & 0 deletions tests/cnf/ran/oran/internal/helper/helper.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
package helper

import (
"fmt"

"github.com/openshift-kni/eco-goinfra/pkg/clients"
"github.com/openshift-kni/eco-goinfra/pkg/oran"
. "github.com/openshift-kni/eco-gotests/tests/cnf/ran/internal/raninittools"
"github.com/openshift-kni/eco-gotests/tests/cnf/ran/oran/internal/tsparams"
pluginv1alpha1 "github.com/openshift-kni/oran-hwmgr-plugin/api/hwmgr-plugin/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
)

// NewProvisioningRequest creates a ProvisioningRequest builder with templateVersion, setting all the required
// parameters and using the affix from RANConfig.
func NewProvisioningRequest(client *clients.Settings, templateVersion string) *oran.ProvisioningRequestBuilder {
versionWithAffix := RANConfig.ClusterTemplateAffix + "-" + templateVersion
prBuilder := oran.NewPRBuilder(client, tsparams.TestPRName, tsparams.ClusterTemplateName, versionWithAffix).
WithTemplateParameter("nodeClusterName", RANConfig.Spoke1Name).
WithTemplateParameter("oCloudSiteId", RANConfig.Spoke1Name).
WithTemplateParameter("policyTemplateParameters", map[string]any{}).
WithTemplateParameter("clusterInstanceParameters", map[string]any{
"clusterName": RANConfig.Spoke1Name,
"nodes": []map[string]any{{
"hostName": RANConfig.Spoke1Hostname,
}},
})

return prBuilder
}

// NewNoTemplatePR creates a ProvisioningRequest builder with templateVersion, following the schema for no
// HardwareTemplate. All required parameters and the affix are set from RANConfig. The BMC and network data are
// incorrect so that a ClusterInstance is generated but will not actually provision.
func NewNoTemplatePR(client *clients.Settings, templateVersion string) *oran.ProvisioningRequestBuilder {
versionWithAffix := RANConfig.ClusterTemplateAffix + "-" + templateVersion
prBuilder := oran.NewPRBuilder(client, tsparams.TestPRName, tsparams.ClusterTemplateName, versionWithAffix).
WithTemplateParameter("nodeClusterName", RANConfig.Spoke1Name).
WithTemplateParameter("oCloudSiteId", RANConfig.Spoke1Name).
WithTemplateParameter("policyTemplateParameters", map[string]any{}).
WithTemplateParameter("clusterInstanceParameters", map[string]any{
"clusterName": RANConfig.Spoke1Name,
"nodes": []map[string]any{{
"hostName": RANConfig.Spoke1Hostname,
// 192.0.2.0 is a reserved test address so we never accidentally use a valid IP.
"bmcAddress": "redfish-VirtualMedia://192.0.2.0/redfish/v1/Systems/System.Embedded.1",
"bmcCredentialsDetails": map[string]any{
"username": tsparams.TestBase64Credential,
"password": tsparams.TestBase64Credential,
},
"bootMACAddress": "01:23:45:67:89:AB",
"nodeNetwork": map[string]any{
"interfaces": []map[string]any{{
"macAddress": "01:23:45:67:89:AB",
}},
},
}},
})

return prBuilder
}

// GetValidDellHwmgr returns the first HardwareManager with AdaptorID dell-hwmgr and where condition Validation is True.
func GetValidDellHwmgr(client *clients.Settings) (*oran.HardwareManagerBuilder, error) {
hwmgrs, err := oran.ListHardwareManagers(client, runtimeclient.ListOptions{
Namespace: tsparams.HardwareManagerNamespace,
})
if err != nil {
return nil, err
}

for _, hwmgr := range hwmgrs {
if hwmgr.Definition.Spec.AdaptorID != pluginv1alpha1.SupportedAdaptors.Dell {
continue
}

for _, condition := range hwmgr.Definition.Status.Conditions {
if condition.Type == string(pluginv1alpha1.ConditionTypes.Validation) && condition.Status == metav1.ConditionTrue {
return hwmgr, nil
}
}
}

return nil, fmt.Errorf("no valid HardwareManager with AdaptorID dell-hwmgr exists")
}
92 changes: 92 additions & 0 deletions tests/cnf/ran/oran/internal/tsparams/consts.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
package tsparams

import "github.com/golang/glog"

const (
// LabelSuite is the label applied to all cases in the oran suite.
LabelSuite = "oran"
// LabelPreProvision is the label applied to just the pre-provision test cases.
LabelPreProvision = "pre-provision"
// LabelProvision is the label applied to just the provision test cases.
LabelProvision = "provision"
// LabelPostProvision is the label applied to just the post-provision test cases.
LabelPostProvision = "post-provision"
)

const (
// ClusterTemplateName is the name without the version of the ClusterTemplate used in the ORAN tests. It is also
// the namespace the ClusterTemplates are in.
ClusterTemplateName = "sno-ran-du"
// HardwareManagerNamespace is the namespace that HardwareManagers and their secrets use.
HardwareManagerNamespace = "oran-hwmgr-plugin"
// O2IMSNamespace is the namespace used by the oran-o2ims operator.
O2IMSNamespace = "oran-o2ims"
// ExtraManifestsName is the name of the generated extra manifests ConfigMap in the cluster Namespace.
ExtraManifestsName = "sno-ran-du-extra-manifest-1"
// ClusterInstanceParamsKey is the key in the TemplateParameters map for the ClusterInstance parameters.
ClusterInstanceParamsKey = "clusterInstanceParameters"
// PolicyTemplateParamsKey is the key in the TemplateParameters map for the policy template parameters.
PolicyTemplateParamsKey = "policyTemplateParameters"
// HugePagesSizeKey is the key in TemplateParameters.policyTemplateParameters that sets the hugepages size.
HugePagesSizeKey = "hugepages-size"

// ImmutableMessage is the message to expect in a Policy's history when an immutable field cannot be updated.
ImmutableMessage = "cannot be updated, likely due to immutable fields not matching"
// CTMissingSchemaMessage is the ClusterTemplate condition message for when required schema is missing.
CTMissingSchemaMessage = "Error validating the clusterInstanceParameters schema"
// CTMissingLabelMessage is the ClusterTemplate condition message for when the default ConfigMap is missing an
// interface label.
CTMissingLabelMessage = "failed to validate the default ConfigMap: 'label' is missing for interface"
)

const (
// TemplateValid is the valid ClusterTemplate used for the provision tests.
TemplateValid = "v1"
// TemplateNonexistentProfile is the ClusterTemplate version for the nonexistent hardware profile test.
TemplateNonexistentProfile = "v2"
// TemplateNoHardware is the ClusterTemplate version for the no hardware available test.
TemplateNoHardware = "v3"
// TemplateMissingLabels is the ClusterTemplate version for the missing interface labels test.
TemplateMissingLabels = "v4"
// TemplateIncorrectLabel is the ClusterTemplate version for the incorrect boot interface label test.
TemplateIncorrectLabel = "v5"
// TemplateUpdateProfile is the ClusterTemplate version for the hardware profile update test.
TemplateUpdateProfile = "v6"
// TemplateInvalid is the ClusterTemplate version for the invalid ClusterTemplate test.
TemplateInvalid = "v7"
// TemplateUpdateDefaults is the ClusterTemplate version for the ClusterInstance defaults update test.
TemplateUpdateDefaults = "v8"
// TemplateUpdateExisting is the ClusterTemplate version for the update existing PG manifest test.
TemplateUpdateExisting = "v9"
// TemplateAddNew is the ClusterTemplate version for the add new manifest to existing PG test.
TemplateAddNew = "v10"
// TemplateUpdateSchema is the ClusterTemplate version for the policyTemplateParameters schema update test.
TemplateUpdateSchema = "v11"
// TemplateMissingSchema is the ClusterTemplate version for the missing schema without HardwareTemplate test.
TemplateMissingSchema = "v12"
// TemplateNoHWTemplate is the ClusterTemplate version for the successful no HardwareTemplate test.
TemplateNoHWTemplate = "v13"
)

const (
// TestName is the name to use for various test items, such as labels, annotations, and the test ConfigMap in
// post-provision tests. This constant consolidates all these names so there is only one rather than a separate
// TestLabel, TestAnnotation, etc. constants that are all the same.
TestName = "oran-test"
// TestName2 is the secondary test name to use for various test items, for example, the second test ConfigMap
// for test cases that use it in the post-provision tests.
TestName2 = "oran-test-2"
// TestOriginalValue is the original value to expect when checking the test ConfigMap.
TestOriginalValue = "original-value"
// TestNewValue is the new value to set in the test ConfigMap.
TestNewValue = "new-value"
// TestPRName is the UUID used for naming ProvisioningRequests. Since metadata.name must be a UUID, just use a
// constant one for consistency.
TestPRName = "9c5372f3-ea1d-4a96-8157-b3b874a55cf9"
// TestBase64Credential is a base64 encoded version of the string "wrongpassword" for when an obviously invalid
// credential is needed.
TestBase64Credential = "d3JvbmdwYXNzd29yZA=="
)

// LogLevel is the glog verbosity level to use for logs in this suite or its helpers.
const LogLevel glog.Level = 80
102 changes: 102 additions & 0 deletions tests/cnf/ran/oran/internal/tsparams/oranvars.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
package tsparams

import (
siteconfigv1alpha1 "github.com/openshift-kni/eco-goinfra/pkg/schemes/siteconfig/v1alpha1"
"github.com/openshift-kni/eco-gotests/tests/cnf/ran/internal/ranparam"
"github.com/openshift-kni/k8sreporter"
pluginv1alpha1 "github.com/openshift-kni/oran-hwmgr-plugin/api/hwmgr-plugin/v1alpha1"
provisioningv1alpha1 "github.com/openshift-kni/oran-o2ims/api/provisioning/v1alpha1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/ptr"
policiesv1 "open-cluster-management.io/governance-policy-propagator/api/v1"
)

var (
// Labels is the labels applied to all test cases in the suite.
Labels = append(ranparam.Labels, LabelSuite)

// ReporterHubNamespacesToDump tells the reporter which namespaces on the hub to collect pod logs from.
ReporterHubNamespacesToDump = map[string]string{
TestName: "",
O2IMSNamespace: "",
}

// ReporterSpokeNamespacesToDump tells the reporter which namespaces on the spoke to collect pod logs from.
ReporterSpokeNamespacesToDump = map[string]string{
TestName: "",
}

// ReporterHubCRsToDump is the CRs the reporter should dump on the hub.
ReporterHubCRsToDump = []k8sreporter.CRData{
{Cr: &pluginv1alpha1.HardwareManagerList{}, Namespace: ptr.To(HardwareManagerNamespace)},
{Cr: &provisioningv1alpha1.ProvisioningRequestList{}},
{Cr: &policiesv1.PolicyList{}},
{Cr: &siteconfigv1alpha1.ClusterInstanceList{}},
}

// ReporterSpokeCRsToDump is the CRs the reporter should dump on the spoke.
ReporterSpokeCRsToDump = []k8sreporter.CRData{
{Cr: &corev1.ConfigMapList{}, Namespace: ptr.To(TestName)},
{Cr: &policiesv1.PolicyList{}},
}
)

var (
// HwmgrFailedAuthCondition is the condition to match for when the HardwareManager fails to authenticate with
// the DTIAS.
HwmgrFailedAuthCondition = metav1.Condition{
Type: string(pluginv1alpha1.ConditionTypes.Validation),
Reason: string(pluginv1alpha1.ConditionReasons.Failed),
Status: metav1.ConditionFalse,
Message: "401",
}

// PRHardwareProvisionFailedCondition is the ProvisioningRequest condition where hardware provisioning failed.
PRHardwareProvisionFailedCondition = metav1.Condition{
Type: string(provisioningv1alpha1.PRconditionTypes.HardwareProvisioned),
Reason: string(provisioningv1alpha1.CRconditionReasons.Failed),
Status: metav1.ConditionFalse,
}
// PRValidationFailedCondition is the ProvisioningRequest condition where ProvisioningRequest validation failed.
PRValidationFailedCondition = metav1.Condition{
Type: string(provisioningv1alpha1.PRconditionTypes.Validated),
Reason: string(provisioningv1alpha1.CRconditionReasons.Failed),
Status: metav1.ConditionFalse,
}
// PRValidationSucceededCondition is the ProvisioningRequest condition where ProvisioningRequest validation
// succeeded.
PRValidationSucceededCondition = metav1.Condition{
Type: string(provisioningv1alpha1.PRconditionTypes.Validated),
Reason: string(provisioningv1alpha1.CRconditionReasons.Completed),
Status: metav1.ConditionTrue,
}
// PRNodeConfigFailedCondition is the ProvisioningRequest condition where applying the node configuration
// failed.
PRNodeConfigFailedCondition = metav1.Condition{
Type: string(provisioningv1alpha1.PRconditionTypes.HardwareNodeConfigApplied),
Reason: string(provisioningv1alpha1.CRconditionReasons.NotApplied),
Status: metav1.ConditionFalse,
}
// PRConfigurationAppliedCondition is the ProvisioningRequest condition where applying day2 configuration
// succeeds.
PRConfigurationAppliedCondition = metav1.Condition{
Type: string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied),
Reason: string(provisioningv1alpha1.CRconditionReasons.Completed),
Status: metav1.ConditionTrue,
}
// PRCIProcesssedCondition is the ProvisioningRequest condition where the ClusterInstance has successfully been
// processed.
PRCIProcesssedCondition = metav1.Condition{
Type: string(provisioningv1alpha1.PRconditionTypes.ClusterInstanceProcessed),
Reason: string(provisioningv1alpha1.CRconditionReasons.Completed),
Status: metav1.ConditionTrue,
}

// CTValidationFailedCondition is the ClusterTemplate condition where the validation failed.
CTValidationFailedCondition = metav1.Condition{
Type: string(provisioningv1alpha1.CTconditionTypes.Validated),
Reason: string(provisioningv1alpha1.CTconditionReasons.Failed),
Status: metav1.ConditionFalse,
}
)
58 changes: 58 additions & 0 deletions tests/cnf/ran/oran/oran_suite_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
package oran

import (
"fmt"
"path"
"runtime"
"testing"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/openshift-kni/eco-goinfra/pkg/clients"
"github.com/openshift-kni/eco-goinfra/pkg/reportxml"
"github.com/openshift-kni/eco-gotests/tests/cnf/ran/internal/rancluster"
. "github.com/openshift-kni/eco-gotests/tests/cnf/ran/internal/raninittools"
"github.com/openshift-kni/eco-gotests/tests/cnf/ran/oran/internal/tsparams"
_ "github.com/openshift-kni/eco-gotests/tests/cnf/ran/oran/tests"
"github.com/openshift-kni/eco-gotests/tests/internal/reporter"
)

var _, currentFile, _, _ = runtime.Caller(0)

func TestORAN(t *testing.T) {
_, reporterConfig := GinkgoConfiguration()
reporterConfig.JUnitReport = RANConfig.GetJunitReportPath(currentFile)

RegisterFailHandler(Fail)
RunSpecs(t, "RAN O-RAN Suite", Label(tsparams.Labels...), reporterConfig)
}

var _ = BeforeSuite(func() {
By("checking that the hub cluster is present")
isHubPresent := rancluster.AreClustersPresent([]*clients.Settings{HubAPIClient})
Expect(isHubPresent).To(BeTrue(), "Hub cluster must be present for O-RAN tests")
})

var _ = JustAfterEach(func() {
var (
currentDir, currentFilename = path.Split(currentFile)
hubReportPath = fmt.Sprintf("%shub_%s", currentDir, currentFilename)
report = CurrentSpecReport()
)

if Spoke1APIClient != nil {
reporter.ReportIfFailed(
report, currentFile, tsparams.ReporterSpokeNamespacesToDump, tsparams.ReporterSpokeCRsToDump)
}

reporter.ReportIfFailedOnCluster(
RANConfig.HubKubeconfig,
report,
hubReportPath,
tsparams.ReporterHubNamespacesToDump,
tsparams.ReporterHubCRsToDump)
})

var _ = ReportAfterSuite("", func(report Report) {
reportxml.Create(report, RANConfig.GetReportPath(), RANConfig.TCPrefix)
})
1 change: 1 addition & 0 deletions tests/cnf/ran/oran/tests/oran-post-provision.go
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
package tests
Loading