Add TestPassThroughValues
This commit is contained in:
@@ -67,3 +67,68 @@ func TestPersistenceDisabled(t *testing.T) {
|
|||||||
t.Errorf("Expected no PVC, found %v (%+v)", len(pvcs.Items), pvcs.Items)
|
t.Errorf("Expected no PVC, found %v (%+v)", len(pvcs.Items), pvcs.Items)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestPassThroughValues tests several values which are set when installing
|
||||||
|
// the Helm chart, and should be passed straight through to Kubernetes
|
||||||
|
func TestPassThroughValues(t *testing.T) {
|
||||||
|
cs := kubeLogin(t)
|
||||||
|
release := strings.ToLower(t.Name())
|
||||||
|
queueManagerName := "foo"
|
||||||
|
requestCPU := "501m"
|
||||||
|
requestMem := "501Mi"
|
||||||
|
limitCPU := "502m"
|
||||||
|
limitMem := "502Mi"
|
||||||
|
helmInstall(t, cs, release,
|
||||||
|
"license=accept",
|
||||||
|
"persistence.enabled=false",
|
||||||
|
"resources.requests.cpu="+requestCPU,
|
||||||
|
"resources.requests.memory="+requestMem,
|
||||||
|
"resources.limits.cpu="+limitCPU,
|
||||||
|
"resources.limits.memory="+limitMem,
|
||||||
|
"queueManager.name="+queueManagerName,
|
||||||
|
)
|
||||||
|
defer helmDelete(t, cs, release)
|
||||||
|
waitForReady(t, cs, release)
|
||||||
|
pods := getPodsForHelmRelease(t, cs, release)
|
||||||
|
pod := pods.Items[0]
|
||||||
|
|
||||||
|
t.Run("resources.requests.cpu", func(t *testing.T) {
|
||||||
|
cpu := pod.Spec.Containers[0].Resources.Requests.Cpu()
|
||||||
|
if cpu.String() != requestCPU {
|
||||||
|
t.Errorf("Expected requested CPU to be %v, got %v", requestCPU, cpu.String())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
t.Run("resources.requests.memory", func(t *testing.T) {
|
||||||
|
mem := pod.Spec.Containers[0].Resources.Requests.Memory()
|
||||||
|
if mem.String() != requestMem {
|
||||||
|
t.Errorf("Expected requested memory to be %v, got %v", requestMem, mem.String())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
t.Run("resources.limits.cpu", func(t *testing.T) {
|
||||||
|
cpu := pod.Spec.Containers[0].Resources.Limits.Cpu()
|
||||||
|
if cpu.String() != limitCPU {
|
||||||
|
t.Errorf("Expected CPU limits to be %v, got %v", limitCPU, cpu.String())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
t.Run("resources.limits.memory", func(t *testing.T) {
|
||||||
|
mem := pod.Spec.Containers[0].Resources.Limits.Memory()
|
||||||
|
if mem.String() != limitMem {
|
||||||
|
t.Errorf("Expected memory to be %v, got %v", limitMem, mem.String())
|
||||||
|
}
|
||||||
|
})
|
||||||
|
t.Run("queueManager.name", func(t *testing.T) {
|
||||||
|
out, _, err := kubeExec(t, pod.Name, "dspmq", "-n")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
// Example output of `dspmq -n`:
|
||||||
|
// QMNAME(qm1) STATUS(RUNNING)
|
||||||
|
n := strings.Fields(out)[0]
|
||||||
|
n = strings.Split(n, "(")[1]
|
||||||
|
n = strings.Trim(n, "() ")
|
||||||
|
t.Logf("Queue manager name detected: %v", n)
|
||||||
|
if n != queueManagerName {
|
||||||
|
t.Errorf("Expected queue manager name to be %v, got %v", queueManagerName, n)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|||||||
@@ -81,8 +81,6 @@ func inspectLogs(t *testing.T, cs *kubernetes.Clientset, release string) string
|
|||||||
|
|
||||||
func helmInstall(t *testing.T, cs *kubernetes.Clientset, release string, values ...string) {
|
func helmInstall(t *testing.T, cs *kubernetes.Clientset, release string, values ...string) {
|
||||||
chart := "../../charts/ibm-mqadvanced-server-prod"
|
chart := "../../charts/ibm-mqadvanced-server-prod"
|
||||||
//image := "mycluster.icp:8500/default/mq-devserver"
|
|
||||||
//image := "ibmcom/mq"
|
|
||||||
tag := "latest"
|
tag := "latest"
|
||||||
arg := []string{
|
arg := []string{
|
||||||
"install",
|
"install",
|
||||||
@@ -110,6 +108,7 @@ func helmInstall(t *testing.T, cs *kubernetes.Clientset, release string, values
|
|||||||
}
|
}
|
||||||
|
|
||||||
func helmDelete(t *testing.T, cs *kubernetes.Clientset, release string) {
|
func helmDelete(t *testing.T, cs *kubernetes.Clientset, release string) {
|
||||||
|
t.Log("Deleting Helm release")
|
||||||
t.Log(inspectLogs(t, cs, release))
|
t.Log(inspectLogs(t, cs, release))
|
||||||
out, _, err := runCommand(t, "helm", "delete", "--purge", release)
|
out, _, err := runCommand(t, "helm", "delete", "--purge", release)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -161,17 +160,22 @@ func waitForReady(t *testing.T, cs *kubernetes.Clientset, release string) {
|
|||||||
}
|
}
|
||||||
pod := pods.Items[0]
|
pod := pods.Items[0]
|
||||||
podName := pod.Name
|
podName := pod.Name
|
||||||
// Wait for the queue manager container to be started...
|
// Wait for the queue manager container to be started
|
||||||
for {
|
running := false
|
||||||
|
for !running {
|
||||||
pod, err := cs.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
|
pod, err := cs.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if len(pod.Status.ContainerStatuses) > 0 {
|
if len(pod.Status.ContainerStatuses) > 0 {
|
||||||
// Got a container now, but it could still be in state "ContainerCreating"
|
state := pod.Status.ContainerStatuses[0].State
|
||||||
// TODO: Check the status here properly
|
switch {
|
||||||
time.Sleep(3 * time.Second)
|
case state.Waiting != nil:
|
||||||
break
|
t.Logf("Waiting for container")
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
case state.Running != nil:
|
||||||
|
running = true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Exec into the container to check if it's ready
|
// Exec into the container to check if it's ready
|
||||||
|
|||||||
Reference in New Issue
Block a user