Add TestPassThroughValues
This commit is contained in:
@@ -67,3 +67,68 @@ func TestPersistenceDisabled(t *testing.T) {
|
||||
t.Errorf("Expected no PVC, found %v (%+v)", len(pvcs.Items), pvcs.Items)
|
||||
}
|
||||
}
|
||||
|
||||
// TestPassThroughValues tests several values which are set when installing
|
||||
// the Helm chart, and should be passed straight through to Kubernetes
|
||||
func TestPassThroughValues(t *testing.T) {
|
||||
cs := kubeLogin(t)
|
||||
release := strings.ToLower(t.Name())
|
||||
queueManagerName := "foo"
|
||||
requestCPU := "501m"
|
||||
requestMem := "501Mi"
|
||||
limitCPU := "502m"
|
||||
limitMem := "502Mi"
|
||||
helmInstall(t, cs, release,
|
||||
"license=accept",
|
||||
"persistence.enabled=false",
|
||||
"resources.requests.cpu="+requestCPU,
|
||||
"resources.requests.memory="+requestMem,
|
||||
"resources.limits.cpu="+limitCPU,
|
||||
"resources.limits.memory="+limitMem,
|
||||
"queueManager.name="+queueManagerName,
|
||||
)
|
||||
defer helmDelete(t, cs, release)
|
||||
waitForReady(t, cs, release)
|
||||
pods := getPodsForHelmRelease(t, cs, release)
|
||||
pod := pods.Items[0]
|
||||
|
||||
t.Run("resources.requests.cpu", func(t *testing.T) {
|
||||
cpu := pod.Spec.Containers[0].Resources.Requests.Cpu()
|
||||
if cpu.String() != requestCPU {
|
||||
t.Errorf("Expected requested CPU to be %v, got %v", requestCPU, cpu.String())
|
||||
}
|
||||
})
|
||||
t.Run("resources.requests.memory", func(t *testing.T) {
|
||||
mem := pod.Spec.Containers[0].Resources.Requests.Memory()
|
||||
if mem.String() != requestMem {
|
||||
t.Errorf("Expected requested memory to be %v, got %v", requestMem, mem.String())
|
||||
}
|
||||
})
|
||||
t.Run("resources.limits.cpu", func(t *testing.T) {
|
||||
cpu := pod.Spec.Containers[0].Resources.Limits.Cpu()
|
||||
if cpu.String() != limitCPU {
|
||||
t.Errorf("Expected CPU limits to be %v, got %v", limitCPU, cpu.String())
|
||||
}
|
||||
})
|
||||
t.Run("resources.limits.memory", func(t *testing.T) {
|
||||
mem := pod.Spec.Containers[0].Resources.Limits.Memory()
|
||||
if mem.String() != limitMem {
|
||||
t.Errorf("Expected memory to be %v, got %v", limitMem, mem.String())
|
||||
}
|
||||
})
|
||||
t.Run("queueManager.name", func(t *testing.T) {
|
||||
out, _, err := kubeExec(t, pod.Name, "dspmq", "-n")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Example output of `dspmq -n`:
|
||||
// QMNAME(qm1) STATUS(RUNNING)
|
||||
n := strings.Fields(out)[0]
|
||||
n = strings.Split(n, "(")[1]
|
||||
n = strings.Trim(n, "() ")
|
||||
t.Logf("Queue manager name detected: %v", n)
|
||||
if n != queueManagerName {
|
||||
t.Errorf("Expected queue manager name to be %v, got %v", queueManagerName, n)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -81,8 +81,6 @@ func inspectLogs(t *testing.T, cs *kubernetes.Clientset, release string) string
|
||||
|
||||
func helmInstall(t *testing.T, cs *kubernetes.Clientset, release string, values ...string) {
|
||||
chart := "../../charts/ibm-mqadvanced-server-prod"
|
||||
//image := "mycluster.icp:8500/default/mq-devserver"
|
||||
//image := "ibmcom/mq"
|
||||
tag := "latest"
|
||||
arg := []string{
|
||||
"install",
|
||||
@@ -110,6 +108,7 @@ func helmInstall(t *testing.T, cs *kubernetes.Clientset, release string, values
|
||||
}
|
||||
|
||||
func helmDelete(t *testing.T, cs *kubernetes.Clientset, release string) {
|
||||
t.Log("Deleting Helm release")
|
||||
t.Log(inspectLogs(t, cs, release))
|
||||
out, _, err := runCommand(t, "helm", "delete", "--purge", release)
|
||||
if err != nil {
|
||||
@@ -161,17 +160,22 @@ func waitForReady(t *testing.T, cs *kubernetes.Clientset, release string) {
|
||||
}
|
||||
pod := pods.Items[0]
|
||||
podName := pod.Name
|
||||
// Wait for the queue manager container to be started...
|
||||
for {
|
||||
// Wait for the queue manager container to be started
|
||||
running := false
|
||||
for !running {
|
||||
pod, err := cs.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(pod.Status.ContainerStatuses) > 0 {
|
||||
// Got a container now, but it could still be in state "ContainerCreating"
|
||||
// TODO: Check the status here properly
|
||||
time.Sleep(3 * time.Second)
|
||||
break
|
||||
state := pod.Status.ContainerStatuses[0].State
|
||||
switch {
|
||||
case state.Waiting != nil:
|
||||
t.Logf("Waiting for container")
|
||||
time.Sleep(1 * time.Second)
|
||||
case state.Running != nil:
|
||||
running = true
|
||||
}
|
||||
}
|
||||
}
|
||||
// Exec into the container to check if it's ready
|
||||
|
||||
Reference in New Issue
Block a user