first commit
This commit is contained in:
707
test/container/containerengine/containerengine.go
Normal file
707
test/container/containerengine/containerengine.go
Normal file
@@ -0,0 +1,707 @@
|
||||
package containerengine
|
||||
|
||||
/*
|
||||
© Copyright IBM Corporation 2017, 2024
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ibm-messaging/mq-container/test/container/pathutils"
|
||||
)
|
||||
|
||||
type ContainerInterface interface {
|
||||
ContainerCreate(config *ContainerConfig, hostConfig *ContainerHostConfig, networkingConfig *ContainerNetworkSettings, containerName string) (string, error)
|
||||
ContainerStop(container string, timeout *time.Duration) error
|
||||
ContainerKill(container string, signal string) error
|
||||
ContainerRemove(container string, options ContainerRemoveOptions) error
|
||||
ContainerStart(container string, options ContainerStartOptions) error
|
||||
ContainerWait(ctx context.Context, container string, condition string) (<-chan int64, <-chan error)
|
||||
GetContainerLogs(ctx context.Context, container string, options ContainerLogsOptions) (string, error)
|
||||
CopyFromContainer(container, srcPath string) ([]byte, error)
|
||||
|
||||
GetContainerPort(ID string, hostPort int) (string, error)
|
||||
GetContainerIPAddress(ID string) (string, error)
|
||||
ContainerInspectWithFormat(format string, ID string) (string, error)
|
||||
ExecContainer(ID string, user string, cmd []string) (int, string)
|
||||
GetMQVersion(image string) (string, error)
|
||||
ContainerInspect(containerID string) (ContainerDetails, error)
|
||||
|
||||
NetworkCreate(name string, options NetworkCreateOptions) (string, error)
|
||||
NetworkRemove(network string) error
|
||||
|
||||
VolumeCreate(options VolumeCreateOptions) (string, error)
|
||||
VolumeRemove(volumeID string, force bool) error
|
||||
|
||||
ImageBuild(context io.Reader, tag string, dockerfilename string) (string, error)
|
||||
ImageRemove(image string, options ImageRemoveOptions) (bool, error)
|
||||
ImageInspectWithFormat(format string, ID string) (string, error)
|
||||
}
|
||||
|
||||
type ContainerClient struct {
|
||||
ContainerTool string
|
||||
Version string
|
||||
logger commandLogger
|
||||
logOptions logOptions
|
||||
}
|
||||
|
||||
type commandLogger interface {
|
||||
Logf(format string, args ...any)
|
||||
}
|
||||
|
||||
type logOptions struct {
|
||||
logCommands bool
|
||||
}
|
||||
|
||||
// objects
|
||||
var objVolume = "volume"
|
||||
var objImage = "image"
|
||||
var objPort = "port"
|
||||
var objNetwork = "network"
|
||||
|
||||
// verbs
|
||||
var listContainers = "ps"
|
||||
var listImages = "images"
|
||||
var create = "create"
|
||||
var startContainer = "start"
|
||||
var waitContainer = "wait"
|
||||
var execContainer = "exec"
|
||||
var getLogs = "logs"
|
||||
var stopContainer = "stop"
|
||||
var remove = "rm"
|
||||
var inspect = "inspect"
|
||||
var copyFile = "cp"
|
||||
var build = "build"
|
||||
var killContainer = "kill"
|
||||
|
||||
// args
|
||||
var argEntrypoint = "--entrypoint"
|
||||
var argUser = "--user"
|
||||
var argExpose = "--expose"
|
||||
var argVolume = "--volume"
|
||||
var argPublish = "--publish"
|
||||
var argPrivileged = "--privileged"
|
||||
var argAddCapability = "--cap-add"
|
||||
var argDropCapability = "--cap-drop"
|
||||
var argName = "--name"
|
||||
var argCondition = "--condition"
|
||||
var argEnvironmentVariable = "--env"
|
||||
var argTail = "--tail"
|
||||
var argForce = "--force"
|
||||
var argVolumes = "--volumes"
|
||||
var argHostname = "--hostname"
|
||||
var argDriver = "--driver"
|
||||
var argFile = "--file"
|
||||
var argQuiet = "--quiet"
|
||||
var argTag = "--tag"
|
||||
var argFormat = "--format"
|
||||
var argNetwork = "--network"
|
||||
var argSecurityOptions = "--security-opt"
|
||||
var argSignal = "--signal"
|
||||
var argReadOnlyRootfs = "--read-only"
|
||||
|
||||
// generic
|
||||
var toolVersion = "version"
|
||||
var ContainerStateNotRunning = "not-running"
|
||||
var ContainerStateStopped = "stopped"
|
||||
|
||||
type ContainerConfig struct {
|
||||
Image string
|
||||
Hostname string
|
||||
User string
|
||||
Entrypoint []string
|
||||
Env []string
|
||||
ExposedPorts []string
|
||||
}
|
||||
|
||||
type ContainerDetails struct {
|
||||
ID string
|
||||
Name string
|
||||
Image string
|
||||
Path string
|
||||
Args []string
|
||||
Config ContainerConfig
|
||||
HostConfig ContainerHostConfig
|
||||
}
|
||||
|
||||
type ContainerDetailsLogging struct {
|
||||
ID string
|
||||
Name string
|
||||
Image string
|
||||
Path string
|
||||
Args []string
|
||||
CapAdd []string
|
||||
CapDrop []string
|
||||
User string
|
||||
Env []string
|
||||
}
|
||||
|
||||
type ContainerHostConfig struct {
|
||||
Binds []string // Bindings onto a volume
|
||||
PortBindings []PortBinding //Bindings from a container port to a port on the host
|
||||
Privileged bool // Give extended privileges to container
|
||||
CapAdd []string // Linux capabilities to add to the container
|
||||
CapDrop []string // Linux capabilities to drop from the container
|
||||
SecurityOpt []string
|
||||
ReadOnlyRootfs bool // Readonly root file system
|
||||
}
|
||||
|
||||
type ContainerNetworkSettings struct {
|
||||
Networks []string // A list of networks to connect the container to
|
||||
}
|
||||
|
||||
type ContainerRemoveOptions struct {
|
||||
Force bool
|
||||
RemoveVolumes bool
|
||||
}
|
||||
|
||||
type ContainerStartOptions struct {
|
||||
}
|
||||
|
||||
type NetworkCreateOptions struct {
|
||||
}
|
||||
|
||||
type ContainerLogsOptions struct {
|
||||
}
|
||||
|
||||
type ImageRemoveOptions struct {
|
||||
Force bool
|
||||
}
|
||||
|
||||
type VolumeCreateOptions struct {
|
||||
Name string
|
||||
Driver string
|
||||
}
|
||||
|
||||
// Binding from a container port to a port on the host
|
||||
type PortBinding struct {
|
||||
HostIP string
|
||||
HostPort string //Port to map to on the host
|
||||
ContainerPort string //Exposed port on the container
|
||||
}
|
||||
|
||||
// NewContainerClient returns a new container client
|
||||
// Defaults to using podman
|
||||
func NewContainerClient(options ...ContainterClientOption) ContainerClient {
|
||||
tool, set := os.LookupEnv("COMMAND")
|
||||
if !set {
|
||||
tool = "podman"
|
||||
}
|
||||
client := ContainerClient{
|
||||
ContainerTool: tool,
|
||||
Version: GetContainerToolVersion(tool),
|
||||
}
|
||||
for _, option := range options {
|
||||
option(&client)
|
||||
}
|
||||
return client
|
||||
}
|
||||
|
||||
// GetContainerToolVersion returns the version of the container tool being used
|
||||
func GetContainerToolVersion(containerTool string) string {
|
||||
if containerTool == "docker" {
|
||||
args := []string{"version", "--format", "'{{.Client.Version}}'"}
|
||||
v, err := exec.Command("docker", args...).Output()
|
||||
if err != nil {
|
||||
return "0.0.0"
|
||||
}
|
||||
return string(v)
|
||||
} else if containerTool == "podman" {
|
||||
//Default to checking the version of podman
|
||||
args := []string{"version", "--format", "'{{.Version}}'"}
|
||||
v, err := exec.Command("podman", args...).Output()
|
||||
if err != nil {
|
||||
return "0.0.0"
|
||||
}
|
||||
return string(v)
|
||||
}
|
||||
return "0.0.0"
|
||||
}
|
||||
|
||||
// GetMQVersion returns the MQ version of a given container image
|
||||
func (cli ContainerClient) GetMQVersion(image string) (string, error) {
|
||||
v, err := cli.ImageInspectWithFormat("{{.Config.Labels.version}}", image)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// ImageInspectWithFormat inspects an image with a given formatting string
|
||||
func (cli ContainerClient) ImageInspectWithFormat(format string, ID string) (string, error) {
|
||||
args := []string{
|
||||
objImage,
|
||||
inspect,
|
||||
ID,
|
||||
}
|
||||
if format != "" {
|
||||
args = append(args, []string{argFormat, format}...)
|
||||
}
|
||||
output, err := cli.logCommand(cli.ContainerTool, args...).Output()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(output), nil
|
||||
}
|
||||
|
||||
// ContainerInspectWithFormat inspects a container with a given formatting string
|
||||
func (cli ContainerClient) ContainerInspectWithFormat(format string, ID string) (string, error) {
|
||||
args := []string{
|
||||
inspect,
|
||||
ID,
|
||||
}
|
||||
if format != "" {
|
||||
args = append(args, []string{argFormat, format}...)
|
||||
}
|
||||
output, err := cli.logCommand(cli.ContainerTool, args...).Output()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(output), nil
|
||||
}
|
||||
|
||||
// GetContainerPort gets the ports on a container
|
||||
func (cli ContainerClient) GetContainerPort(ID string, hostPort int) (string, error) {
|
||||
args := []string{
|
||||
objPort,
|
||||
ID,
|
||||
strconv.Itoa(hostPort),
|
||||
}
|
||||
output, err := cli.logCommand(cli.ContainerTool, args...).Output()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
o := SanitizeString(string(output))
|
||||
return strings.Split((o), ":")[1], nil
|
||||
}
|
||||
|
||||
// GetContainerIPAddress gets the IP address of a container
|
||||
func (cli ContainerClient) GetContainerIPAddress(ID string) (string, error) {
|
||||
v, err := cli.ContainerInspectWithFormat("{{.NetworkSettings.IPAddress}}", ID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// CopyFromContainer copies a file from a container and returns its contents
|
||||
func (cli ContainerClient) CopyFromContainer(container, srcPath string) ([]byte, error) {
|
||||
tmpDir, err := os.MkdirTemp("", "tmp")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
args := []string{
|
||||
copyFile,
|
||||
container + ":" + srcPath,
|
||||
tmpDir + "/.",
|
||||
}
|
||||
_, err = cli.logCommand(cli.ContainerTool, args...).CombinedOutput()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//Get file name
|
||||
fname := filepath.Base(srcPath)
|
||||
data, err := os.ReadFile(pathutils.CleanPath(tmpDir, fname))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//Remove the file
|
||||
err = os.Remove(pathutils.CleanPath(tmpDir, fname))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (cli ContainerClient) ContainerInspect(containerID string) (ContainerDetails, error) {
|
||||
args := []string{
|
||||
inspect,
|
||||
containerID,
|
||||
}
|
||||
output, err := cli.logCommand(cli.ContainerTool, args...).Output()
|
||||
if err != nil {
|
||||
return ContainerDetails{}, err
|
||||
}
|
||||
|
||||
var container ContainerDetails
|
||||
err = json.Unmarshal(output, &container)
|
||||
if err != nil {
|
||||
return ContainerDetails{}, err
|
||||
}
|
||||
return container, err
|
||||
}
|
||||
|
||||
func (cli ContainerClient) ContainerStop(container string, timeout *time.Duration) error {
|
||||
args := []string{
|
||||
stopContainer,
|
||||
container,
|
||||
}
|
||||
_, err := cli.logCommand(cli.ContainerTool, args...).Output()
|
||||
return err
|
||||
}
|
||||
|
||||
func (cli ContainerClient) ContainerKill(container string, signal string) error {
|
||||
args := []string{
|
||||
killContainer,
|
||||
container,
|
||||
}
|
||||
if signal != "" {
|
||||
args = append(args, []string{argSignal, signal}...)
|
||||
}
|
||||
_, err := cli.logCommand(cli.ContainerTool, args...).Output()
|
||||
return err
|
||||
}
|
||||
|
||||
func (cli ContainerClient) ContainerRemove(container string, options ContainerRemoveOptions) error {
|
||||
args := []string{
|
||||
remove,
|
||||
container,
|
||||
}
|
||||
if options.Force {
|
||||
args = append(args, argForce)
|
||||
}
|
||||
if options.RemoveVolumes {
|
||||
args = append(args, argVolumes)
|
||||
}
|
||||
_, err := cli.logCommand(cli.ContainerTool, args...).Output()
|
||||
if err != nil {
|
||||
//Silently error as the exit code 125 is present on sucessful deletion
|
||||
if strings.Contains(err.Error(), "125") {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cli ContainerClient) ExecContainer(ID string, user string, cmd []string) (int, string) {
|
||||
args := []string{
|
||||
execContainer,
|
||||
}
|
||||
if user != "" {
|
||||
args = append(args, []string{argUser, user}...)
|
||||
}
|
||||
args = append(args, ID)
|
||||
args = append(args, cmd...)
|
||||
ctx := context.Background()
|
||||
output, err := cli.logCommandContext(ctx, cli.ContainerTool, args...).CombinedOutput()
|
||||
if err != nil {
|
||||
if err.(*exec.ExitError) != nil {
|
||||
return err.(*exec.ExitError).ExitCode(), string(output)
|
||||
} else {
|
||||
return 9897, string(output)
|
||||
}
|
||||
}
|
||||
return 0, string(output)
|
||||
}
|
||||
|
||||
func (cli ContainerClient) ContainerStart(container string, options ContainerStartOptions) error {
|
||||
args := []string{
|
||||
startContainer,
|
||||
container,
|
||||
}
|
||||
_, err := cli.logCommand(cli.ContainerTool, args...).Output()
|
||||
return err
|
||||
}
|
||||
|
||||
// ContainerWait starts waiting for a container. It returns an int64 channel for receiving an exit code and an error channel for receiving errors.
|
||||
// The channels returned from this function should be used to receive the results from the wait command.
|
||||
func (cli ContainerClient) ContainerWait(ctx context.Context, container string, condition string) (<-chan int64, <-chan error) {
|
||||
args := []string{
|
||||
waitContainer,
|
||||
container,
|
||||
}
|
||||
if cli.ContainerTool == "podman" {
|
||||
if condition == ContainerStateNotRunning {
|
||||
condition = ContainerStateStopped
|
||||
}
|
||||
args = append(args, []string{argCondition, string(condition)}...)
|
||||
}
|
||||
|
||||
resultC := make(chan int64)
|
||||
errC := make(chan error, 1)
|
||||
|
||||
output, err := cli.logCommandContext(ctx, cli.ContainerTool, args...).Output()
|
||||
if err != nil {
|
||||
errC <- err
|
||||
return resultC, errC
|
||||
}
|
||||
|
||||
go func() {
|
||||
out := strings.TrimSuffix(string(output), "\n")
|
||||
exitCode, err := strconv.Atoi(out)
|
||||
if err != nil {
|
||||
errC <- err
|
||||
return
|
||||
}
|
||||
resultC <- int64(exitCode)
|
||||
}()
|
||||
|
||||
return resultC, errC
|
||||
}
|
||||
|
||||
func (cli ContainerClient) GetContainerLogs(ctx context.Context, container string, options ContainerLogsOptions) (string, error) {
|
||||
args := []string{
|
||||
getLogs,
|
||||
container,
|
||||
}
|
||||
output, err := cli.logCommand(cli.ContainerTool, args...).CombinedOutput()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(output), nil
|
||||
}
|
||||
|
||||
func (cli ContainerClient) NetworkCreate(name string, options NetworkCreateOptions) (string, error) {
|
||||
args := []string{
|
||||
objNetwork,
|
||||
create,
|
||||
}
|
||||
netID, err := cli.logCommand(cli.ContainerTool, args...).CombinedOutput()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
networkID := SanitizeString(string(netID))
|
||||
|
||||
return networkID, nil
|
||||
}
|
||||
|
||||
func (cli ContainerClient) NetworkRemove(network string) error {
|
||||
args := []string{
|
||||
objNetwork,
|
||||
remove,
|
||||
}
|
||||
_, err := cli.logCommand(cli.ContainerTool, args...).CombinedOutput()
|
||||
return err
|
||||
}
|
||||
|
||||
func (cli ContainerClient) VolumeCreate(options VolumeCreateOptions) (string, error) {
|
||||
args := []string{
|
||||
objVolume,
|
||||
create,
|
||||
options.Name,
|
||||
}
|
||||
if options.Driver != "" {
|
||||
args = append(args, []string{argDriver, options.Driver}...)
|
||||
}
|
||||
output, err := cli.logCommand(cli.ContainerTool, args...).Output()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
name := SanitizeString(string(output))
|
||||
return name, nil
|
||||
}
|
||||
|
||||
func (cli ContainerClient) VolumeRemove(volumeID string, force bool) error {
|
||||
args := []string{
|
||||
objVolume,
|
||||
remove,
|
||||
volumeID,
|
||||
}
|
||||
if force {
|
||||
args = append(args, argForce)
|
||||
}
|
||||
_, err := cli.logCommand(cli.ContainerTool, args...).Output()
|
||||
return err
|
||||
}
|
||||
|
||||
func (cli ContainerClient) ImageBuild(context io.Reader, tag string, dockerfilename string) (string, error) {
|
||||
args := []string{
|
||||
objImage,
|
||||
build,
|
||||
}
|
||||
//dockerfilename includes the path to the dockerfile
|
||||
//When using podman use the full path including the name of the Dockerfile
|
||||
if cli.ContainerTool == "podman" {
|
||||
args = append(args, []string{argFile, dockerfilename}...)
|
||||
}
|
||||
if tag != "" {
|
||||
args = append(args, []string{argTag, tag}...)
|
||||
}
|
||||
args = append(args, argQuiet)
|
||||
//When using docker remove the name 'DockerFile' from the string
|
||||
if cli.ContainerTool == "docker" {
|
||||
dfn := strings.ReplaceAll(dockerfilename, "Dockerfile", "")
|
||||
args = append(args, dfn)
|
||||
}
|
||||
output, err := cli.logCommand(cli.ContainerTool, args...).Output()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sha := SanitizeString(string(output))
|
||||
return sha, nil
|
||||
}
|
||||
|
||||
func (cli ContainerClient) ImageRemove(image string, options ImageRemoveOptions) (bool, error) {
|
||||
args := []string{
|
||||
objImage,
|
||||
remove,
|
||||
image,
|
||||
}
|
||||
if options.Force {
|
||||
args = append(args, argForce)
|
||||
}
|
||||
_, err := cli.logCommand(cli.ContainerTool, args...).Output()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (cli ContainerClient) ContainerCreate(config *ContainerConfig, hostConfig *ContainerHostConfig, networkingConfig *ContainerNetworkSettings, containerName string) (string, error) {
|
||||
args := []string{
|
||||
create,
|
||||
argName,
|
||||
containerName,
|
||||
}
|
||||
args = getHostConfigArgs(args, hostConfig)
|
||||
args = getNetworkConfigArgs(args, networkingConfig)
|
||||
args = getContainerConfigArgs(args, config, cli.ContainerTool)
|
||||
output, err := cli.logCommand(cli.ContainerTool, args...).Output()
|
||||
lines := strings.Split(strings.ReplaceAll(string(output), "\r\n", "\n"), "\n")
|
||||
if err != nil {
|
||||
return strings.Join(lines, "\n"), err
|
||||
}
|
||||
return lines[0], nil
|
||||
}
|
||||
|
||||
func (cli ContainerClient) logCommandContext(ctx context.Context, name string, arg ...string) *exec.Cmd {
|
||||
if cli.logger != nil && cli.logOptions.logCommands {
|
||||
cli.logger.Logf("Running command: %s %s", name, strings.Join(arg, " "))
|
||||
}
|
||||
return exec.CommandContext(ctx, name, arg...)
|
||||
}
|
||||
|
||||
func (cli ContainerClient) logCommand(name string, arg ...string) *exec.Cmd {
|
||||
if cli.logger != nil && cli.logOptions.logCommands {
|
||||
cli.logger.Logf("Running command: %s %s", name, strings.Join(arg, " "))
|
||||
}
|
||||
return exec.Command(name, arg...)
|
||||
}
|
||||
|
||||
// getContainerConfigArgs converts a ContainerConfig into a set of cli arguments
|
||||
func getContainerConfigArgs(args []string, config *ContainerConfig, toolName string) []string {
|
||||
argList := []string{}
|
||||
if config.Entrypoint != nil && toolName == "podman" {
|
||||
entrypoint := "[\""
|
||||
for i, commandPart := range config.Entrypoint {
|
||||
if i != len(config.Entrypoint)-1 {
|
||||
entrypoint += commandPart + "\",\""
|
||||
} else {
|
||||
//terminate list
|
||||
entrypoint += commandPart + "\"]"
|
||||
}
|
||||
}
|
||||
args = append(args, []string{argEntrypoint, entrypoint}...)
|
||||
}
|
||||
if config.Entrypoint != nil && toolName == "docker" {
|
||||
ep1 := ""
|
||||
for i, commandPart := range config.Entrypoint {
|
||||
if i == 0 {
|
||||
ep1 = commandPart
|
||||
} else {
|
||||
argList = append(argList, commandPart)
|
||||
}
|
||||
}
|
||||
args = append(args, []string{argEntrypoint, ep1}...)
|
||||
}
|
||||
if config.User != "" {
|
||||
args = append(args, []string{argUser, config.User}...)
|
||||
}
|
||||
if config.ExposedPorts != nil {
|
||||
for _, port := range config.ExposedPorts {
|
||||
args = append(args, []string{argExpose, port}...)
|
||||
}
|
||||
}
|
||||
if config.Hostname != "" {
|
||||
args = append(args, []string{argHostname, config.Hostname}...)
|
||||
}
|
||||
for _, env := range config.Env {
|
||||
args = append(args, []string{argEnvironmentVariable, env}...)
|
||||
}
|
||||
if config.Image != "" {
|
||||
args = append(args, config.Image)
|
||||
}
|
||||
if config.Entrypoint != nil && toolName == "docker" {
|
||||
args = append(args, argList...)
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
// getHostConfigArgs converts a ContainerHostConfig into a set of cli arguments
|
||||
func getHostConfigArgs(args []string, hostConfig *ContainerHostConfig) []string {
|
||||
if hostConfig.Binds != nil {
|
||||
for _, volume := range hostConfig.Binds {
|
||||
args = append(args, []string{argVolume, volume}...)
|
||||
}
|
||||
}
|
||||
if hostConfig.PortBindings != nil {
|
||||
for _, binding := range hostConfig.PortBindings {
|
||||
pub := binding.HostIP + ":" + binding.HostPort + ":" + binding.ContainerPort
|
||||
args = append(args, []string{argPublish, pub}...)
|
||||
}
|
||||
}
|
||||
if hostConfig.Privileged {
|
||||
args = append(args, []string{argPrivileged}...)
|
||||
}
|
||||
if hostConfig.CapAdd != nil {
|
||||
for _, capability := range hostConfig.CapAdd {
|
||||
args = append(args, []string{argAddCapability, string(capability)}...)
|
||||
}
|
||||
}
|
||||
if hostConfig.CapDrop != nil {
|
||||
for _, capability := range hostConfig.CapDrop {
|
||||
args = append(args, []string{argDropCapability, string(capability)}...)
|
||||
}
|
||||
}
|
||||
if hostConfig.SecurityOpt != nil {
|
||||
for _, securityOption := range hostConfig.SecurityOpt {
|
||||
args = append(args, []string{argSecurityOptions, string(securityOption)}...)
|
||||
}
|
||||
}
|
||||
// Add --read-only flag to enable Read Only Root File system on the container
|
||||
if hostConfig.ReadOnlyRootfs {
|
||||
args = append(args, []string{argReadOnlyRootfs}...)
|
||||
}
|
||||
|
||||
return args
|
||||
}
|
||||
|
||||
// getNetworkConfigArgs converts a set of ContainerNetworkSettings into a set of cli arguments
|
||||
func getNetworkConfigArgs(args []string, networkingConfig *ContainerNetworkSettings) []string {
|
||||
if networkingConfig.Networks != nil {
|
||||
for _, netID := range networkingConfig.Networks {
|
||||
args = append(args, []string{argNetwork, netID}...)
|
||||
}
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
func SanitizeString(s string) string {
|
||||
s = strings.Replace(s, " ", "", -1)
|
||||
s = strings.Replace(s, "\t", "", -1)
|
||||
s = strings.Replace(s, "\n", "", -1)
|
||||
return s
|
||||
}
|
||||
18
test/container/containerengine/containerengine_options.go
Normal file
18
test/container/containerengine/containerengine_options.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package containerengine
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type ContainterClientOption func(*ContainerClient)
|
||||
|
||||
func WithTestCommandLogger(t *testing.T) ContainterClientOption {
|
||||
return func(cc *ContainerClient) {
|
||||
cc.logger = t
|
||||
cc.logOptions = logOptions{
|
||||
logCommands: strings.ToLower(os.Getenv("TEST_LOG_CONTAINER_COMMANDS")) == "true",
|
||||
}
|
||||
}
|
||||
}
|
||||
911
test/container/devconfig_test.go
Normal file
911
test/container/devconfig_test.go
Normal file
@@ -0,0 +1,911 @@
|
||||
//go:build mqdev
|
||||
// +build mqdev
|
||||
|
||||
/*
|
||||
© Copyright IBM Corporation 2018, 2023
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ce "github.com/ibm-messaging/mq-container/test/container/containerengine"
|
||||
)
|
||||
|
||||
// TestDevGoldenPath tests using the default values for the default developer config.
|
||||
// Note: This test requires a separate container image to be available for the JMS tests.
|
||||
func TestDevGoldenPath(t *testing.T) {
|
||||
t.Parallel()
|
||||
cli := ce.NewContainerClient()
|
||||
qm := "qm1"
|
||||
containerConfig := ce.ContainerConfig{
|
||||
Env: []string{
|
||||
"LICENSE=accept",
|
||||
"MQ_QMGR_NAME=" + qm,
|
||||
"DEBUG=true",
|
||||
"MQ_CONNAUTH_USE_HTP=true",
|
||||
"MQ_APP_PASSWORD=" + defaultAppPasswordWeb,
|
||||
"MQ_ADMIN_PASSWORD=" + defaultAdminPassword,
|
||||
},
|
||||
}
|
||||
id := runContainerWithPorts(t, cli, &containerConfig, []int{9443, 1414})
|
||||
defer cleanContainer(t, cli, id)
|
||||
waitForReady(t, cli, id)
|
||||
waitForWebReady(t, cli, id, insecureTLSConfig)
|
||||
t.Run("JMS", func(t *testing.T) {
|
||||
// Run the JMS tests, with no password specified.
|
||||
// Use OpenJDK JRE for running testing, pass false for 7th parameter.
|
||||
// Last parameter is blank as the test doesn't use TLS.
|
||||
runJMSTests(t, cli, id, false, "app", defaultAppPasswordOS, "false", "")
|
||||
})
|
||||
t.Run("REST admin", func(t *testing.T) {
|
||||
testRESTAdmin(t, cli, id, insecureTLSConfig, "")
|
||||
})
|
||||
t.Run("REST messaging", func(t *testing.T) {
|
||||
testRESTMessaging(t, cli, id, insecureTLSConfig, qm, "app", defaultAppPasswordWeb, "")
|
||||
})
|
||||
// Stop the container cleanly
|
||||
stopContainer(t, cli, id)
|
||||
}
|
||||
|
||||
// TestDevSecure tests the default developer config using the a custom TLS key store and password.
|
||||
// Note: This test requires a separate container image to be available for the JMS tests
|
||||
func TestDevSecure(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cli := ce.NewContainerClient()
|
||||
|
||||
const tlsPassPhrase string = "passw0rd"
|
||||
qm := "qm1"
|
||||
appPassword := "differentPassw0rd"
|
||||
containerConfig := ce.ContainerConfig{
|
||||
Env: []string{
|
||||
"LICENSE=accept",
|
||||
"MQ_QMGR_NAME=" + qm,
|
||||
"MQ_CONNAUTH_USE_HTP=true",
|
||||
"MQ_APP_PASSWORD=" + appPassword,
|
||||
"MQ_ADMIN_PASSWORD=" + defaultAdminPassword,
|
||||
"DEBUG=1",
|
||||
"WLP_LOGGING_MESSAGE_FORMAT=JSON",
|
||||
"MQ_ENABLE_EMBEDDED_WEB_SERVER_LOG=true",
|
||||
},
|
||||
Image: imageName(),
|
||||
}
|
||||
hostConfig := ce.ContainerHostConfig{
|
||||
Binds: []string{
|
||||
tlsDir(t, false) + ":/etc/mqm/pki/keys/default",
|
||||
},
|
||||
}
|
||||
addCoverageBindIfAvailable(t, &hostConfig)
|
||||
// Assign a random port for the web server on the host
|
||||
// TODO: Don't do this for all tests
|
||||
var binding ce.PortBinding
|
||||
ports := []int{9443, 1414}
|
||||
for _, p := range ports {
|
||||
port := fmt.Sprintf("%v/tcp", p)
|
||||
binding = ce.PortBinding{
|
||||
ContainerPort: port,
|
||||
HostIP: "0.0.0.0",
|
||||
}
|
||||
hostConfig.PortBindings = append(hostConfig.PortBindings, binding)
|
||||
}
|
||||
networkingConfig := ce.ContainerNetworkSettings{}
|
||||
ID, err := cli.ContainerCreate(&containerConfig, &hostConfig, &networkingConfig, t.Name())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer cleanContainer(t, cli, ID)
|
||||
startContainer(t, cli, ID)
|
||||
waitForReady(t, cli, ID)
|
||||
cert := filepath.Join(tlsDir(t, true), "server.crt")
|
||||
waitForWebReady(t, cli, ID, createTLSConfig(t, cert, tlsPassPhrase))
|
||||
|
||||
t.Run("JMS", func(t *testing.T) {
|
||||
// OpenJDK is used for running tests, hence pass "false" for 7th parameter.
|
||||
// Cipher name specified is compliant with non-IBM JRE naming.
|
||||
runJMSTests(t, cli, ID, true, "app", appPassword, "false", "*TLS12ORHIGHER")
|
||||
})
|
||||
t.Run("REST admin", func(t *testing.T) {
|
||||
testRESTAdmin(t, cli, ID, insecureTLSConfig, "")
|
||||
})
|
||||
t.Run("REST messaging", func(t *testing.T) {
|
||||
testRESTMessaging(t, cli, ID, insecureTLSConfig, qm, "app", appPassword, "")
|
||||
})
|
||||
|
||||
// Stop the container cleanly
|
||||
stopContainer(t, cli, ID)
|
||||
}
|
||||
|
||||
func TestDevWebDisabled(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cli := ce.NewContainerClient()
|
||||
containerConfig := ce.ContainerConfig{
|
||||
Env: []string{
|
||||
"LICENSE=accept",
|
||||
"MQ_QMGR_NAME=qm1",
|
||||
"MQ_ENABLE_EMBEDDED_WEB_SERVER=false",
|
||||
"MQ_APP_PASSWORD=" + defaultAppPasswordOS,
|
||||
},
|
||||
}
|
||||
id := runContainerWithPorts(t, cli, &containerConfig, []int{1414})
|
||||
defer cleanContainer(t, cli, id)
|
||||
waitForReady(t, cli, id)
|
||||
t.Run("Web", func(t *testing.T) {
|
||||
_, dspmqweb := cli.ExecContainer(id, "", []string{"dspmqweb"})
|
||||
if !strings.Contains(dspmqweb, "Server mqweb is not running.") && !strings.Contains(dspmqweb, "MQWB1125I") {
|
||||
t.Errorf("Expected dspmqweb to say 'Server is not running' or 'MQWB1125I'; got \"%v\"", dspmqweb)
|
||||
}
|
||||
})
|
||||
t.Run("JMS", func(t *testing.T) {
|
||||
// Run the JMS tests, with no password specified
|
||||
// OpenJDK is used for running tests, hence pass "false" for 7th parameter.
|
||||
// Last parameter is blank as the test doesn't use TLS.
|
||||
runJMSTests(t, cli, id, false, "app", defaultAppPasswordOS, "false", "")
|
||||
})
|
||||
// Stop the container cleanly
|
||||
stopContainer(t, cli, id)
|
||||
}
|
||||
|
||||
func TestDevConfigDisabled(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cli := ce.NewContainerClient()
|
||||
containerConfig := ce.ContainerConfig{
|
||||
Env: []string{
|
||||
"LICENSE=accept",
|
||||
"MQ_QMGR_NAME=qm1",
|
||||
"MQ_DEV=false",
|
||||
},
|
||||
}
|
||||
id := runContainerWithPorts(t, cli, &containerConfig, []int{9443})
|
||||
defer cleanContainer(t, cli, id)
|
||||
waitForReady(t, cli, id)
|
||||
rc, _ := execContainer(t, cli, id, "", []string{"bash", "-c", "echo 'display qlocal(DEV*)' | runmqsc"})
|
||||
if rc == 0 {
|
||||
t.Errorf("Expected DEV queues to be missing")
|
||||
}
|
||||
// Stop the container cleanly
|
||||
stopContainer(t, cli, id)
|
||||
}
|
||||
|
||||
// Test if SSLKEYR and CERTLABL attributes are not set when key and certificate
|
||||
// are not supplied.
|
||||
func TestSSLKEYRBlank(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cli := ce.NewContainerClient()
|
||||
containerConfig := ce.ContainerConfig{
|
||||
Env: []string{
|
||||
"LICENSE=accept",
|
||||
"MQ_QMGR_NAME=QM1",
|
||||
"MQ_ENABLE_EMBEDDED_WEB_SERVER=false",
|
||||
},
|
||||
}
|
||||
id := runContainerWithPorts(t, cli, &containerConfig, []int{9443})
|
||||
defer cleanContainer(t, cli, id)
|
||||
waitForReady(t, cli, id)
|
||||
|
||||
// execute runmqsc to display qmgr SSLKEYR and CERTLABL attibutes.
|
||||
// Search the console output for exepcted values
|
||||
_, sslkeyROutput := execContainer(t, cli, id, "", []string{"bash", "-c", "echo 'DISPLAY QMGR SSLKEYR CERTLABL' | runmqsc"})
|
||||
if !strings.Contains(sslkeyROutput, "SSLKEYR( )") || !strings.Contains(sslkeyROutput, "CERTLABL( )") {
|
||||
// Although queue manager is ready, it may be that MQSC scripts have not been applied yet.
|
||||
// Hence wait for a second and retry few times before giving up.
|
||||
waitCount := 30
|
||||
var i int
|
||||
for i = 0; i < waitCount; i++ {
|
||||
time.Sleep(1 * time.Second)
|
||||
_, sslkeyROutput = execContainer(t, cli, id, "", []string{"bash", "-c", "echo 'DISPLAY QMGR SSLKEYR CERTLABL' | runmqsc"})
|
||||
if strings.Contains(sslkeyROutput, "SSLKEYR( )") && strings.Contains(sslkeyROutput, "CERTLABL( )") {
|
||||
break
|
||||
}
|
||||
}
|
||||
// Failed to get expected output? dump the contents of mqsc files.
|
||||
if i == waitCount {
|
||||
_, tls15mqsc := execContainer(t, cli, id, "", []string{"cat", "/etc/mqm/15-tls.mqsc"})
|
||||
_, autoMQSC := execContainer(t, cli, id, "", []string{"cat", "/mnt/mqm/data/qmgrs/QM1/autocfg/cached.mqsc"})
|
||||
t.Errorf("Expected SSLKEYR to be blank but it is not; got \"%v\"\n AutoConfig MQSC file contents %v\n 15-tls: %v", sslkeyROutput, autoMQSC, tls15mqsc)
|
||||
}
|
||||
}
|
||||
|
||||
// Stop the container cleanly
|
||||
stopContainer(t, cli, id)
|
||||
}
|
||||
|
||||
// Test if SSLKEYR and CERTLABL attributes are set when key and certificate
|
||||
// are supplied.
|
||||
func TestSSLKEYRWithSuppliedKeyAndCert(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cli := ce.NewContainerClient()
|
||||
|
||||
containerConfig := ce.ContainerConfig{
|
||||
Env: []string{
|
||||
"LICENSE=accept",
|
||||
"MQ_QMGR_NAME=QM1",
|
||||
"MQ_ENABLE_EMBEDDED_WEB_SERVER=false",
|
||||
},
|
||||
Image: imageName(),
|
||||
}
|
||||
hostConfig := ce.ContainerHostConfig{
|
||||
Binds: []string{
|
||||
tlsDir(t, false) + ":/etc/mqm/pki/keys/default",
|
||||
},
|
||||
}
|
||||
addCoverageBindIfAvailable(t, &hostConfig)
|
||||
networkingConfig := ce.ContainerNetworkSettings{}
|
||||
ID, err := cli.ContainerCreate(&containerConfig, &hostConfig, &networkingConfig, t.Name())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer cleanContainer(t, cli, ID)
|
||||
startContainer(t, cli, ID)
|
||||
waitForReady(t, cli, ID)
|
||||
|
||||
// execute runmqsc to display qmgr SSLKEYR and CERTLABL attibutes.
|
||||
// Search the console output for exepcted values
|
||||
_, sslkeyROutput := execContainer(t, cli, ID, "", []string{"bash", "-c", "echo 'DISPLAY QMGR SSLKEYR CERTLABL' | runmqsc"})
|
||||
if !strings.Contains(sslkeyROutput, "SSLKEYR(/run/runmqserver/tls/key)") || !strings.Contains(sslkeyROutput, "CERTLABL(default)") {
|
||||
// Although queue manager is ready, it may be that MQSC scripts have not been applied yet.
|
||||
// Hence wait for a second and retry few times before giving up.
|
||||
waitCount := 30
|
||||
var i int
|
||||
for i = 0; i < waitCount; i++ {
|
||||
time.Sleep(1 * time.Second)
|
||||
_, sslkeyROutput = execContainer(t, cli, ID, "", []string{"bash", "-c", "echo 'DISPLAY QMGR SSLKEYR CERTLABL' | runmqsc"})
|
||||
if strings.Contains(sslkeyROutput, "SSLKEYR(/run/runmqserver/tls/key)") && strings.Contains(sslkeyROutput, "CERTLABL(default)") {
|
||||
break
|
||||
}
|
||||
}
|
||||
// Failed to get expected output? dump the contents of mqsc files.
|
||||
if i == waitCount {
|
||||
_, tls15mqsc := execContainer(t, cli, ID, "", []string{"cat", "/etc/mqm/15-tls.mqsc"})
|
||||
_, autoMQSC := execContainer(t, cli, ID, "", []string{"cat", "/mnt/mqm/data/qmgrs/QM1/autocfg/cached.mqsc"})
|
||||
t.Errorf("Expected SSLKEYR to be '/run/runmqserver/tls/key' but it is not; got \"%v\" \n AutoConfig MQSC file contents %v\n 15-tls: %v", sslkeyROutput, autoMQSC, tls15mqsc)
|
||||
}
|
||||
}
|
||||
|
||||
// Stop the container cleanly
|
||||
stopContainer(t, cli, ID)
|
||||
}
|
||||
|
||||
// Test with CA cert
|
||||
func TestSSLKEYRWithCACert(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cli := ce.NewContainerClient()
|
||||
|
||||
containerConfig := ce.ContainerConfig{
|
||||
Env: []string{
|
||||
"LICENSE=accept",
|
||||
"MQ_QMGR_NAME=QM1",
|
||||
"MQ_ENABLE_EMBEDDED_WEB_SERVER=false",
|
||||
},
|
||||
Image: imageName(),
|
||||
}
|
||||
hostConfig := ce.ContainerHostConfig{
|
||||
Binds: []string{
|
||||
tlsDirWithCA(t, false) + ":/etc/mqm/pki/keys/QM1CA",
|
||||
},
|
||||
}
|
||||
addCoverageBindIfAvailable(t, &hostConfig)
|
||||
// Assign a random port for the web server on the host
|
||||
var binding ce.PortBinding
|
||||
ports := []int{9443}
|
||||
for _, p := range ports {
|
||||
port := fmt.Sprintf("%v/tcp", p)
|
||||
binding = ce.PortBinding{
|
||||
ContainerPort: port,
|
||||
HostIP: "0.0.0.0",
|
||||
}
|
||||
hostConfig.PortBindings = append(hostConfig.PortBindings, binding)
|
||||
}
|
||||
networkingConfig := ce.ContainerNetworkSettings{}
|
||||
ID, err := cli.ContainerCreate(&containerConfig, &hostConfig, &networkingConfig, t.Name())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer cleanContainer(t, cli, ID)
|
||||
startContainer(t, cli, ID)
|
||||
waitForReady(t, cli, ID)
|
||||
|
||||
// execute runmqsc to display qmgr SSLKEYR and CERTLABL attibutes.
|
||||
// Search the console output for exepcted values
|
||||
_, sslkeyROutput := execContainer(t, cli, ID, "", []string{"bash", "-c", "echo 'DISPLAY QMGR SSLKEYR CERTLABL' | runmqsc"})
|
||||
|
||||
if !strings.Contains(sslkeyROutput, "SSLKEYR(/run/runmqserver/tls/key)") {
|
||||
// Although queue manager is ready, it may be that MQSC scripts have not been applied yet.
|
||||
// Hence wait for a second and retry few times before giving up.
|
||||
waitCount := 30
|
||||
var i int
|
||||
for i = 0; i < waitCount; i++ {
|
||||
time.Sleep(1 * time.Second)
|
||||
_, sslkeyROutput = execContainer(t, cli, ID, "", []string{"bash", "-c", "echo 'DISPLAY QMGR SSLKEYR CERTLABL' | runmqsc"})
|
||||
if strings.Contains(sslkeyROutput, "SSLKEYR(/run/runmqserver/tls/key)") {
|
||||
break
|
||||
}
|
||||
}
|
||||
// Failed to get expected output? dump the contents of mqsc files.
|
||||
if i == waitCount {
|
||||
_, tls15mqsc := execContainer(t, cli, ID, "", []string{"cat", "/etc/mqm/15-tls.mqsc"})
|
||||
_, autoMQSC := execContainer(t, cli, ID, "", []string{"cat", "/mnt/mqm/data/qmgrs/QM1/autocfg/cached.mqsc"})
|
||||
t.Errorf("Expected SSLKEYR to be '/run/runmqserver/tls/key' but it is not; got \"%v\"\n AutoConfig MQSC file contents %v\n 15-tls: %v", sslkeyROutput, autoMQSC, tls15mqsc)
|
||||
}
|
||||
}
|
||||
|
||||
if !strings.Contains(sslkeyROutput, "CERTLABL(QM1CA)") {
|
||||
_, autoMQSC := execContainer(t, cli, ID, "", []string{"cat", "/etc/mqm/15-tls.mqsc"})
|
||||
t.Errorf("Expected CERTLABL to be 'QM1CA' but it is not; got \"%v\" \n MQSC File contents %v", sslkeyROutput, autoMQSC)
|
||||
}
|
||||
|
||||
// Stop the container cleanly
|
||||
stopContainer(t, cli, ID)
|
||||
}
|
||||
|
||||
// Verifies SSLFIPS is set to NO if MQ_ENABLE_FIPS=false
|
||||
func TestSSLFIPSNO(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cli := ce.NewContainerClient()
|
||||
|
||||
containerConfig := ce.ContainerConfig{
|
||||
Env: []string{
|
||||
"LICENSE=accept",
|
||||
"MQ_QMGR_NAME=QM1",
|
||||
"MQ_ENABLE_EMBEDDED_WEB_SERVER=false",
|
||||
"MQ_ENABLE_FIPS=false",
|
||||
},
|
||||
Image: imageName(),
|
||||
}
|
||||
hostConfig := ce.ContainerHostConfig{
|
||||
Binds: []string{
|
||||
tlsDir(t, false) + ":/etc/mqm/pki/keys/default",
|
||||
},
|
||||
}
|
||||
addCoverageBindIfAvailable(t, &hostConfig)
|
||||
networkingConfig := ce.ContainerNetworkSettings{}
|
||||
ID, err := cli.ContainerCreate(&containerConfig, &hostConfig, &networkingConfig, t.Name())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer cleanContainer(t, cli, ID)
|
||||
startContainer(t, cli, ID)
|
||||
waitForReady(t, cli, ID)
|
||||
|
||||
// execute runmqsc to display qmgr SSLKEYR, SSLFIPS and CERTLABL attibutes.
|
||||
// Search the console output for exepcted values
|
||||
_, sslFIPSOutput := execContainer(t, cli, ID, "", []string{"bash", "-c", "echo 'DISPLAY QMGR SSLKEYR CERTLABL SSLFIPS' | runmqsc"})
|
||||
if !strings.Contains(sslFIPSOutput, "SSLKEYR(/run/runmqserver/tls/key)") {
|
||||
t.Errorf("Expected SSLKEYR to be '/run/runmqserver/tls/key' but it is not; got \"%v\"", sslFIPSOutput)
|
||||
}
|
||||
if !strings.Contains(sslFIPSOutput, "CERTLABL(default)") {
|
||||
t.Errorf("Expected CERTLABL to be 'default' but it is not; got \"%v\"", sslFIPSOutput)
|
||||
}
|
||||
|
||||
if !strings.Contains(sslFIPSOutput, "SSLFIPS(NO)") {
|
||||
t.Errorf("Expected SSLFIPS to be 'NO' but it is not; got \"%v\"", sslFIPSOutput)
|
||||
}
|
||||
|
||||
// Stop the container cleanly
|
||||
stopContainer(t, cli, ID)
|
||||
}
|
||||
|
||||
// Verifies SSLFIPS is set to YES if certificates for queue manager
|
||||
// are supplied and MQ_ENABLE_FIPS=true
|
||||
func TestSSLFIPSYES(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cli := ce.NewContainerClient()
|
||||
|
||||
appPassword := "differentPassw0rd"
|
||||
containerConfig := ce.ContainerConfig{
|
||||
Env: []string{
|
||||
"LICENSE=accept",
|
||||
"MQ_APP_PASSWORD=" + appPassword,
|
||||
"MQ_QMGR_NAME=QM1",
|
||||
"MQ_ENABLE_EMBEDDED_WEB_SERVER=false",
|
||||
"MQ_ENABLE_FIPS=true",
|
||||
},
|
||||
Image: imageName(),
|
||||
}
|
||||
hostConfig := ce.ContainerHostConfig{
|
||||
Binds: []string{
|
||||
tlsDir(t, false) + ":/etc/mqm/pki/keys/default",
|
||||
},
|
||||
}
|
||||
addCoverageBindIfAvailable(t, &hostConfig)
|
||||
var binding ce.PortBinding
|
||||
ports := []int{1414}
|
||||
for _, p := range ports {
|
||||
port := fmt.Sprintf("%v/tcp", p)
|
||||
binding = ce.PortBinding{
|
||||
ContainerPort: port,
|
||||
HostIP: "0.0.0.0",
|
||||
}
|
||||
hostConfig.PortBindings = append(hostConfig.PortBindings, binding)
|
||||
}
|
||||
networkingConfig := ce.ContainerNetworkSettings{}
|
||||
ID, err := cli.ContainerCreate(&containerConfig, &hostConfig, &networkingConfig, t.Name())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer cleanContainer(t, cli, ID)
|
||||
startContainer(t, cli, ID)
|
||||
waitForReady(t, cli, ID)
|
||||
|
||||
// Check for expected message on container log
|
||||
logs := inspectLogs(t, cli, ID)
|
||||
if !strings.Contains(logs, "FIPS cryptography is enabled.") {
|
||||
t.Errorf("Expected 'FIPS cryptography is enabled.' but got %v\n", logs)
|
||||
}
|
||||
|
||||
// execute runmqsc to display qmgr SSLKEYR, SSLFIPS and CERTLABL attibutes.
|
||||
// Search the console output for exepcted values
|
||||
_, sslFIPSOutput := execContainer(t, cli, ID, "", []string{"bash", "-c", "echo 'DISPLAY QMGR SSLKEYR CERTLABL SSLFIPS' | runmqsc"})
|
||||
if !strings.Contains(sslFIPSOutput, "SSLKEYR(/run/runmqserver/tls/key)") {
|
||||
t.Errorf("Expected SSLKEYR to be '/run/runmqserver/tls/key' but it is not; got \"%v\"", sslFIPSOutput)
|
||||
}
|
||||
if !strings.Contains(sslFIPSOutput, "CERTLABL(default)") {
|
||||
t.Errorf("Expected CERTLABL to be 'default' but it is not; got \"%v\"", sslFIPSOutput)
|
||||
}
|
||||
|
||||
if !strings.Contains(sslFIPSOutput, "SSLFIPS(YES)") {
|
||||
t.Errorf("Expected SSLFIPS to be 'YES' but it is not; got \"%v\"", sslFIPSOutput)
|
||||
}
|
||||
|
||||
t.Run("JMS", func(t *testing.T) {
|
||||
// Run the JMS tests, with no password specified
|
||||
runJMSTests(t, cli, ID, true, "app", appPassword, "false", "*TLS12ORHIGHER")
|
||||
})
|
||||
|
||||
// Stop the container cleanly
|
||||
stopContainer(t, cli, ID)
|
||||
}
|
||||
|
||||
// TestDevSecureFIPSYESWeb verifies if the MQ Web Server is running in FIPS mode
|
||||
func TestDevSecureFIPSTrueWeb(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cli := ce.NewContainerClient()
|
||||
|
||||
const tlsPassPhrase string = "passw0rd"
|
||||
qm := "qm1"
|
||||
appPassword := "differentPassw0rd"
|
||||
containerConfig := ce.ContainerConfig{
|
||||
Env: []string{
|
||||
"LICENSE=accept",
|
||||
"MQ_QMGR_NAME=" + qm,
|
||||
"MQ_CONNAUTH_USE_HTP=true",
|
||||
"MQ_APP_PASSWORD=" + appPassword,
|
||||
"MQ_ADMIN_PASSWORD=" + defaultAdminPassword,
|
||||
"DEBUG=1",
|
||||
"WLP_LOGGING_MESSAGE_FORMAT=JSON",
|
||||
"MQ_ENABLE_EMBEDDED_WEB_SERVER_LOG=true",
|
||||
"MQ_ENABLE_FIPS=true",
|
||||
},
|
||||
Image: imageName(),
|
||||
}
|
||||
hostConfig := ce.ContainerHostConfig{
|
||||
Binds: []string{
|
||||
tlsDir(t, false) + ":/etc/mqm/pki/keys/default",
|
||||
tlsDir(t, false) + ":/etc/mqm/pki/trust/default",
|
||||
},
|
||||
}
|
||||
addCoverageBindIfAvailable(t, &hostConfig)
|
||||
// Assign a random port for the web server on the host
|
||||
// TODO: Don't do this for all tests
|
||||
var binding ce.PortBinding
|
||||
ports := []int{9443}
|
||||
for _, p := range ports {
|
||||
port := fmt.Sprintf("%v/tcp", p)
|
||||
binding = ce.PortBinding{
|
||||
ContainerPort: port,
|
||||
HostIP: "0.0.0.0",
|
||||
}
|
||||
hostConfig.PortBindings = append(hostConfig.PortBindings, binding)
|
||||
}
|
||||
networkingConfig := ce.ContainerNetworkSettings{}
|
||||
ID, err := cli.ContainerCreate(&containerConfig, &hostConfig, &networkingConfig, t.Name())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer cleanContainer(t, cli, ID)
|
||||
|
||||
startContainer(t, cli, ID)
|
||||
waitForReady(t, cli, ID)
|
||||
cert := filepath.Join(tlsDir(t, true), "server.crt")
|
||||
waitForWebReady(t, cli, ID, createTLSConfig(t, cert, tlsPassPhrase))
|
||||
|
||||
// Create a TLS Config with a cipher to use when connecting over HTTPS
|
||||
var secureTLSConfig *tls.Config = createTLSConfig(t, cert, tlsPassPhrase, withMinTLSVersion(tls.VersionTLS12))
|
||||
// Put a message to queue
|
||||
t.Run("REST messaging", func(t *testing.T) {
|
||||
testRESTMessaging(t, cli, ID, secureTLSConfig, qm, "app", appPassword, "")
|
||||
})
|
||||
|
||||
// Create a TLS Config with a non-FIPS cipher to use when connecting over HTTPS
|
||||
var secureNonFIPSCipherConfig *tls.Config = createTLSConfig(t, cert, tlsPassPhrase, withMinTLSVersion(tls.VersionTLS12))
|
||||
// Put a message to queue - the attempt to put message will fail with a EOF return message.
|
||||
t.Run("REST messaging", func(t *testing.T) {
|
||||
testRESTMessaging(t, cli, ID, secureNonFIPSCipherConfig, qm, "app", appPassword, "EOF")
|
||||
})
|
||||
|
||||
// Stop the container cleanly
|
||||
stopContainer(t, cli, ID)
|
||||
}
|
||||
|
||||
// TestDevSecureNOFIPSWeb verifies if the MQ Web Server is not running in FIPS mode
|
||||
func TestDevSecureFalseFIPSWeb(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cli := ce.NewContainerClient()
|
||||
|
||||
const tlsPassPhrase string = "passw0rd"
|
||||
qm := "qm1"
|
||||
appPassword := "differentPassw0rd"
|
||||
containerConfig := ce.ContainerConfig{
|
||||
Env: []string{
|
||||
"LICENSE=accept",
|
||||
"MQ_QMGR_NAME=" + qm,
|
||||
"MQ_CONNAUTH_USE_HTP=true",
|
||||
"MQ_APP_PASSWORD=" + appPassword,
|
||||
"MQ_ADMIN_PASSWORD=" + defaultAdminPassword,
|
||||
"DEBUG=1",
|
||||
"WLP_LOGGING_MESSAGE_FORMAT=JSON",
|
||||
"MQ_ENABLE_EMBEDDED_WEB_SERVER_LOG=true",
|
||||
"MQ_ENABLE_FIPS=false",
|
||||
},
|
||||
Image: imageName(),
|
||||
}
|
||||
hostConfig := ce.ContainerHostConfig{
|
||||
Binds: []string{
|
||||
tlsDir(t, false) + ":/etc/mqm/pki/keys/default",
|
||||
tlsDir(t, false) + ":/etc/mqm/pki/trust/default",
|
||||
},
|
||||
}
|
||||
addCoverageBindIfAvailable(t, &hostConfig)
|
||||
// Assign a random port for the web server on the host
|
||||
var binding ce.PortBinding
|
||||
ports := []int{9443}
|
||||
for _, p := range ports {
|
||||
port := fmt.Sprintf("%v/tcp", p)
|
||||
binding = ce.PortBinding{
|
||||
ContainerPort: port,
|
||||
HostIP: "0.0.0.0",
|
||||
}
|
||||
hostConfig.PortBindings = append(hostConfig.PortBindings, binding)
|
||||
}
|
||||
networkingConfig := ce.ContainerNetworkSettings{}
|
||||
ID, err := cli.ContainerCreate(&containerConfig, &hostConfig, &networkingConfig, t.Name())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer cleanContainer(t, cli, ID)
|
||||
startContainer(t, cli, ID)
|
||||
waitForReady(t, cli, ID)
|
||||
|
||||
cert := filepath.Join(tlsDir(t, true), "server.crt")
|
||||
waitForWebReady(t, cli, ID, createTLSConfig(t, cert, tlsPassPhrase))
|
||||
|
||||
// As FIPS is not enabled, the MQ WebServer (actually Java) will choose a JSSE provider from the list
|
||||
// specified in java.security file. We will need to enable java.net.debug and then parse the web server
|
||||
// logs to check what JJSE provider is being used. Hence just check the jvm.options file does not contain
|
||||
// -Dcom.ibm.jsse2.usefipsprovider line.
|
||||
_, jvmOptionsOutput := execContainer(t, cli, ID, "", []string{"bash", "-c", "cat /var/mqm/web/installations/Installation1/servers/mqweb/configDropins/defaults/jvm.options"})
|
||||
if strings.Contains(jvmOptionsOutput, "-Dcom.ibm.jsse2.usefipsprovider") {
|
||||
t.Errorf("Did not expect -Dcom.ibm.jsse2.usefipsprovider but it is not; got \"%v\"", jvmOptionsOutput)
|
||||
}
|
||||
|
||||
// Just do a HTTPS GET as well to query installation details.
|
||||
var secureTLSConfig *tls.Config = createTLSConfig(t, cert, tlsPassPhrase, withMinTLSVersion(tls.VersionTLS12))
|
||||
t.Run("REST admin", func(t *testing.T) {
|
||||
testRESTAdmin(t, cli, ID, secureTLSConfig, "")
|
||||
})
|
||||
|
||||
// Stop the container cleanly
|
||||
stopContainer(t, cli, ID)
|
||||
}
|
||||
|
||||
// Verify SSLFIPS is set to NO if no certificates were supplied
|
||||
func TestSSLFIPSTrueNoCerts(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cli := ce.NewContainerClient()
|
||||
|
||||
appPassword := "differentPassw0rd"
|
||||
containerConfig := ce.ContainerConfig{
|
||||
Env: []string{
|
||||
"LICENSE=accept",
|
||||
"MQ_CONNAUTH_USE_HTP=true",
|
||||
"MQ_APP_PASSWORD=" + appPassword,
|
||||
"MQ_QMGR_NAME=QM1",
|
||||
"MQ_ENABLE_EMBEDDED_WEB_SERVER=false",
|
||||
"MQ_ENABLE_FIPS=true",
|
||||
},
|
||||
Image: imageName(),
|
||||
}
|
||||
hostConfig := ce.ContainerHostConfig{}
|
||||
addCoverageBindIfAvailable(t, &hostConfig)
|
||||
networkingConfig := ce.ContainerNetworkSettings{}
|
||||
ID, err := cli.ContainerCreate(&containerConfig, &hostConfig, &networkingConfig, t.Name())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer cleanContainer(t, cli, ID)
|
||||
startContainer(t, cli, ID)
|
||||
waitForReady(t, cli, ID)
|
||||
|
||||
// execute runmqsc to display qmgr SSLKEYR, SSLFIPS and CERTLABL attibutes.
|
||||
// Search the console output for exepcted values
|
||||
_, sslFIPSOutput := execContainer(t, cli, ID, "", []string{"bash", "-c", "echo 'DISPLAY QMGR SSLKEYR CERTLABL SSLFIPS' | runmqsc"})
|
||||
if !strings.Contains(sslFIPSOutput, "SSLKEYR( )") {
|
||||
t.Errorf("Expected SSLKEYR to be ' ' but it is not; got \"%v\"", sslFIPSOutput)
|
||||
}
|
||||
if !strings.Contains(sslFIPSOutput, "CERTLABL( )") {
|
||||
t.Errorf("Expected CERTLABL to be blank but it is not; got \"%v\"", sslFIPSOutput)
|
||||
}
|
||||
|
||||
if !strings.Contains(sslFIPSOutput, "SSLFIPS(NO)") {
|
||||
t.Errorf("Expected SSLFIPS to be 'NO' but it is not; got \"%v\"", sslFIPSOutput)
|
||||
}
|
||||
|
||||
// Stop the container cleanly
|
||||
stopContainer(t, cli, ID)
|
||||
}
|
||||
|
||||
// Verifies SSLFIPS is set to NO if MQ_ENABLE_FIPS=tru (invalid value)
|
||||
func TestSSLFIPSInvalidValue(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cli := ce.NewContainerClient()
|
||||
|
||||
containerConfig := ce.ContainerConfig{
|
||||
Env: []string{
|
||||
"LICENSE=accept",
|
||||
"MQ_QMGR_NAME=QM1",
|
||||
"MQ_ENABLE_EMBEDDED_WEB_SERVER=false",
|
||||
"MQ_ENABLE_FIPS=tru",
|
||||
},
|
||||
Image: imageName(),
|
||||
}
|
||||
hostConfig := ce.ContainerHostConfig{
|
||||
Binds: []string{
|
||||
tlsDir(t, false) + ":/etc/mqm/pki/keys/default",
|
||||
},
|
||||
}
|
||||
addCoverageBindIfAvailable(t, &hostConfig)
|
||||
networkingConfig := ce.ContainerNetworkSettings{}
|
||||
ID, err := cli.ContainerCreate(&containerConfig, &hostConfig, &networkingConfig, t.Name())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer cleanContainer(t, cli, ID)
|
||||
startContainer(t, cli, ID)
|
||||
waitForReady(t, cli, ID)
|
||||
|
||||
// execute runmqsc to display qmgr SSLKEYR, SSLFIPS and CERTLABL attibutes.
|
||||
// Search the console output for exepcted values
|
||||
_, sslFIPSOutput := execContainer(t, cli, ID, "", []string{"bash", "-c", "echo 'DISPLAY QMGR SSLKEYR CERTLABL SSLFIPS' | runmqsc"})
|
||||
if !strings.Contains(sslFIPSOutput, "SSLKEYR(/run/runmqserver/tls/key)") {
|
||||
t.Errorf("Expected SSLKEYR to be '/run/runmqserver/tls/key' but it is not; got \"%v\"", sslFIPSOutput)
|
||||
}
|
||||
|
||||
if !strings.Contains(sslFIPSOutput, "CERTLABL(default)") {
|
||||
t.Errorf("Expected CERTLABL to be 'default' but it is not; got \"%v\"", sslFIPSOutput)
|
||||
}
|
||||
|
||||
if !strings.Contains(sslFIPSOutput, "SSLFIPS(NO)") {
|
||||
t.Errorf("Expected SSLFIPS to be 'NO' but it is not; got \"%v\"", sslFIPSOutput)
|
||||
}
|
||||
|
||||
// Stop the container cleanly
|
||||
stopContainer(t, cli, ID)
|
||||
}
|
||||
|
||||
// Container creation fails when invalid certs are passed and MQ_ENABLE_FIPS set true
|
||||
func TestSSLFIPSBadCerts(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cli := ce.NewContainerClient()
|
||||
|
||||
containerConfig := ce.ContainerConfig{
|
||||
Env: []string{
|
||||
"LICENSE=accept",
|
||||
"MQ_QMGR_NAME=QM1",
|
||||
"MQ_ENABLE_EMBEDDED_WEB_SERVER=false",
|
||||
"MQ_ENABLE_FIPS=true",
|
||||
},
|
||||
Image: imageName(),
|
||||
}
|
||||
hostConfig := ce.ContainerHostConfig{
|
||||
Binds: []string{
|
||||
tlsDirInvalid(t, false) + ":/etc/mqm/pki/keys/default",
|
||||
},
|
||||
}
|
||||
addCoverageBindIfAvailable(t, &hostConfig)
|
||||
networkingConfig := ce.ContainerNetworkSettings{}
|
||||
ID, err := cli.ContainerCreate(&containerConfig, &hostConfig, &networkingConfig, t.Name())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer cleanContainer(t, cli, ID)
|
||||
startContainer(t, cli, ID)
|
||||
|
||||
rc := waitForContainer(t, cli, ID, 20*time.Second)
|
||||
// Expect return code 1 if container failed to create.
|
||||
if rc == 1 {
|
||||
// Get container logs and search for specific message.
|
||||
logs := inspectLogs(t, cli, ID)
|
||||
if strings.Contains(logs, "Failed to parse private key") {
|
||||
t.Logf("Container creating failed because of invalid certifates")
|
||||
}
|
||||
} else {
|
||||
// Some other error occurred.
|
||||
t.Errorf("Expected rc=0, got rc=%v", rc)
|
||||
}
|
||||
|
||||
// Stop the container cleanly
|
||||
stopContainer(t, cli, ID)
|
||||
}
|
||||
|
||||
// Test REST messaging with default developer configuration
|
||||
// MQ_CONNAUTH_USE_HTP is set to true in the dev image. The test
|
||||
// specifies password for admin userId via MQ_ADMIN_PASSWORD
|
||||
// environment variable but then attempts to do REST messaging
|
||||
// usig 'app' userId. HTTP 401 is expected.
|
||||
func TestDevNoDefCreds(t *testing.T) {
|
||||
t.Parallel()
|
||||
cli := ce.NewContainerClient()
|
||||
qm := "qm1"
|
||||
containerConfig := ce.ContainerConfig{
|
||||
Env: []string{
|
||||
"LICENSE=accept",
|
||||
"MQ_QMGR_NAME=" + qm,
|
||||
"DEBUG=true",
|
||||
"MQ_ADMIN_PASSWORD=" + defaultAdminPassword,
|
||||
},
|
||||
}
|
||||
id := runContainerWithPorts(t, cli, &containerConfig, []int{9443, 1414})
|
||||
defer cleanContainer(t, cli, id)
|
||||
waitForReady(t, cli, id)
|
||||
waitForWebReady(t, cli, id, insecureTLSConfig)
|
||||
// Expect a 401 Unauthorized HTTP Response
|
||||
testRESTMessaging(t, cli, id, insecureTLSConfig, qm, "app", defaultAppPasswordWeb, "401 Unauthorized")
|
||||
// Stop the container cleanly
|
||||
stopContainer(t, cli, id)
|
||||
}
|
||||
|
||||
// MQ_CONNAUTH_USE_HTP is set to false. There should be no 'mqsimpleauth:' entries in pod log
|
||||
// eventhough MQ_ADMIN_PASSWORD is also specified.
|
||||
func TestDevNoDefCredsLogMessageConnAuthFalse(t *testing.T) {
|
||||
t.Parallel()
|
||||
testDevNoDefaultCredsUtil(t, []string{"MQ_CONNAUTH_USE_HTP=false", "MQ_ADMIN_PASSWORD=passw0rd"}, false)
|
||||
}
|
||||
|
||||
// MQ_CONNAUTH_USE_HTP is true with neither Admin nor App password specified,
|
||||
// so there should be no 'mqsimpleauth:' entries in the pod log
|
||||
func TestDevNoDefCredsLogMessageConnAuthTrue(t *testing.T) {
|
||||
t.Parallel()
|
||||
testDevNoDefaultCredsUtil(t, []string{"MQ_CONNAUTH_USE_HTP=true"}, false)
|
||||
}
|
||||
|
||||
// MQ_CONNAUTH_USE_HTP is true with App password specified,
|
||||
// there should be at least one 'mqsimpleauth:' entry in the pod log
|
||||
func TestDevNoDefCredsLogMessageConnAuthTrueWithPwd(t *testing.T) {
|
||||
t.Parallel()
|
||||
testDevNoDefaultCredsUtil(t, []string{"MQ_CONNAUTH_USE_HTP=true", "MQ_APP_PASSWORD=passw0rd"}, true)
|
||||
}
|
||||
|
||||
// Utility function for testing mqsimpleauth
|
||||
func testDevNoDefaultCredsUtil(t *testing.T, mqsimpleauthEnvs []string, htpwdInLog bool) {
|
||||
cli := ce.NewContainerClient()
|
||||
qm := "QM1"
|
||||
containerConfig := ce.ContainerConfig{
|
||||
Env: []string{
|
||||
"LICENSE=accept",
|
||||
"MQ_QMGR_NAME=" + qm,
|
||||
"DEBUG=true",
|
||||
},
|
||||
}
|
||||
|
||||
containerConfig.Env = append(containerConfig.Env, mqsimpleauthEnvs...)
|
||||
|
||||
id := runContainerWithPorts(t, cli, &containerConfig, []int{1414})
|
||||
defer cleanContainer(t, cli, id)
|
||||
waitForReady(t, cli, id)
|
||||
defer stopContainer(t, cli, id)
|
||||
|
||||
logs := inspectLogs(t, cli, id)
|
||||
if htpwdInLog {
|
||||
if !strings.Contains(logs, "mqsimpleauth:") {
|
||||
t.Errorf("Exepcted mqsimpleauth keyword in pod logs but did not find any.")
|
||||
}
|
||||
} else {
|
||||
if strings.Contains(logs, "mqsimpleauth:") {
|
||||
t.Errorf("Didn't exepct mqsimpleauth keyword in pod logs but found at least one.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test REST messaging with default developer configuration
|
||||
// MQ_CONNAUTH_USE_HTP is set to true in the dev image with
|
||||
// read only root filesystem enabled. The test
|
||||
// specifies password for admin userId via MQ_ADMIN_PASSWORD
|
||||
// environment variable but then attempts to do REST messaging
|
||||
// usig 'app' userId. HTTP 401 is expected.
|
||||
func TestRORFSDevNoAppPassword(t *testing.T) {
|
||||
t.Parallel()
|
||||
cli := ce.NewContainerClient()
|
||||
qm := "QM1"
|
||||
containerConfig := ce.ContainerConfig{
|
||||
Env: []string{
|
||||
"LICENSE=accept",
|
||||
"MQ_QMGR_NAME=" + qm,
|
||||
"DEBUG=true",
|
||||
"MQ_CONNAUTH_USE_HTP=true",
|
||||
"MQ_ADMIN_PASSWORD=" + defaultAdminPassword,
|
||||
},
|
||||
Image: imageName(),
|
||||
}
|
||||
|
||||
// Create volumes for mounting into container
|
||||
ephData := createVolume(t, cli, "ephData"+t.Name())
|
||||
defer removeVolume(t, cli, ephData)
|
||||
ephRun := createVolume(t, cli, "ephRun"+t.Name())
|
||||
defer removeVolume(t, cli, ephRun)
|
||||
ephTmp := createVolume(t, cli, "ephTmp"+t.Name())
|
||||
defer removeVolume(t, cli, ephTmp)
|
||||
|
||||
hostConfig := ce.ContainerHostConfig{
|
||||
Binds: []string{
|
||||
ephRun + ":/run",
|
||||
ephTmp + ":/tmp",
|
||||
ephData + ":/mnt/mqm",
|
||||
},
|
||||
ReadOnlyRootfs: true, //Enable read only root filesystem
|
||||
}
|
||||
addCoverageBindIfAvailable(t, &hostConfig)
|
||||
// Assign a random port for the web server on the host
|
||||
var binding ce.PortBinding
|
||||
ports := []int{9443}
|
||||
for _, p := range ports {
|
||||
port := fmt.Sprintf("%v/tcp", p)
|
||||
binding = ce.PortBinding{
|
||||
ContainerPort: port,
|
||||
HostIP: "0.0.0.0",
|
||||
}
|
||||
hostConfig.PortBindings = append(hostConfig.PortBindings, binding)
|
||||
}
|
||||
networkingConfig := ce.ContainerNetworkSettings{}
|
||||
id, err := cli.ContainerCreate(&containerConfig, &hostConfig, &networkingConfig, t.Name())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer cleanContainer(t, cli, id)
|
||||
startContainer(t, cli, id)
|
||||
|
||||
waitForReady(t, cli, id)
|
||||
waitForWebReady(t, cli, id, insecureTLSConfig)
|
||||
// Expect a 401 Unauthorized HTTP Response
|
||||
testRESTMessaging(t, cli, id, insecureTLSConfig, qm, "app", defaultAppPasswordWeb, "401 Unauthorized")
|
||||
// Stop the container cleanly
|
||||
stopContainer(t, cli, id)
|
||||
}
|
||||
345
test/container/devconfig_test_util.go
Normal file
345
test/container/devconfig_test_util.go
Normal file
@@ -0,0 +1,345 @@
|
||||
//go:build mqdev
|
||||
// +build mqdev
|
||||
|
||||
/*
|
||||
© Copyright IBM Corporation 2018, 2024
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ce "github.com/ibm-messaging/mq-container/test/container/containerengine"
|
||||
"github.com/ibm-messaging/mq-container/test/container/pathutils"
|
||||
)
|
||||
|
||||
const defaultAdminPassword string = "passw0rd"
|
||||
const defaultAppPasswordOS string = "passw0rd"
|
||||
const defaultAppPasswordWeb string = "passw0rd"
|
||||
|
||||
// Disable TLS verification (server uses a self-signed certificate by default,
|
||||
// so verification isn't useful anyway)
|
||||
var insecureTLSConfig *tls.Config = &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
|
||||
func waitForWebReady(t *testing.T, cli ce.ContainerInterface, ID string, tlsConfig *tls.Config) {
|
||||
t.Logf("%s Waiting for web server to be ready", time.Now().Format(time.RFC3339))
|
||||
httpClient := http.Client{
|
||||
Timeout: time.Duration(10 * time.Second),
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: tlsConfig,
|
||||
},
|
||||
}
|
||||
port, err := cli.GetContainerPort(ID, 9443)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
url := fmt.Sprintf("https://localhost:%s/ibmmq/rest/v1/admin/installation", port)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-time.After(1 * time.Second):
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
req.SetBasicAuth("admin", defaultAdminPassword)
|
||||
resp, err := httpClient.Do(req)
|
||||
if err == nil && resp.StatusCode == http.StatusOK {
|
||||
t.Logf("%s MQ web server is ready", time.Now().Format(time.RFC3339))
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
t.Fatalf("%s Timed out waiting for web server to become ready", time.Now().Format(time.RFC3339))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// tlsDir returns the host directory where the test certificate(s) are located
|
||||
func tlsDir(t *testing.T, unixPath bool) string {
|
||||
return pathutils.CleanPath(filepath.Dir(getCwd(t, unixPath)), "../tls")
|
||||
}
|
||||
|
||||
func tlsDirWithCA(t *testing.T, unixPath bool) string {
|
||||
return pathutils.CleanPath(filepath.Dir(getCwd(t, unixPath)), "../tlscacert")
|
||||
}
|
||||
|
||||
func tlsDirInvalid(t *testing.T, unixPath bool) string {
|
||||
return pathutils.CleanPath(filepath.Dir(getCwd(t, unixPath)), "../tlsinvalidcert")
|
||||
}
|
||||
|
||||
// runJMSTests runs a container with a JMS client, which connects to the queue manager container with the specified ID
|
||||
func runJMSTests(t *testing.T, cli ce.ContainerInterface, ID string, tls bool, user, password string, ibmjre string, cipherName string) {
|
||||
port, err := cli.GetContainerPort(ID, 1414)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
containerConfig := ce.ContainerConfig{
|
||||
// -e MQ_PORT_1414_TCP_ADDR=9.145.14.173 -e MQ_USERNAME=app -e MQ_PASSWORD=passw0rd -e MQ_CHANNEL=DEV.APP.SVRCONN -e MQ_TLS_TRUSTSTORE=/tls/test.p12 -e MQ_TLS_PASSPHRASE=passw0rd -v /Users/arthurbarr/go/src/github.com/ibm-messaging/mq-container/test/tls:/tls msgtest
|
||||
Env: []string{
|
||||
"MQ_PORT_1414_TCP_ADDR=127.0.0.1",
|
||||
"MQ_PORT_1414_OVERRIDE=" + port,
|
||||
"MQ_USERNAME=" + user,
|
||||
"MQ_CHANNEL=DEV.APP.SVRCONN",
|
||||
"IBMJRE=" + ibmjre,
|
||||
},
|
||||
Image: imageNameDevJMS(),
|
||||
}
|
||||
// Set a password for the client to use, if one is specified
|
||||
if password != "" {
|
||||
containerConfig.Env = append(containerConfig.Env, "MQ_PASSWORD="+password)
|
||||
}
|
||||
if tls {
|
||||
t.Log("Using TLS from JMS client")
|
||||
containerConfig.Env = append(containerConfig.Env, []string{
|
||||
"MQ_TLS_TRUSTSTORE=/var/tls/client-trust.jks",
|
||||
"MQ_TLS_PASSPHRASE=passw0rd",
|
||||
"MQ_TLS_CIPHER=" + cipherName,
|
||||
}...)
|
||||
}
|
||||
hostConfig := ce.ContainerHostConfig{
|
||||
Binds: []string{
|
||||
tlsDir(t, false) + ":/var/tls",
|
||||
},
|
||||
}
|
||||
addCoverageBindIfAvailable(t, &hostConfig)
|
||||
networkingConfig := ce.ContainerNetworkSettings{
|
||||
Networks: []string{"host"},
|
||||
}
|
||||
jmsID, err := cli.ContainerCreate(&containerConfig, &hostConfig, &networkingConfig, strings.Replace(t.Name()+"JMS", "/", "", -1))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
startContainer(t, cli, jmsID)
|
||||
rc := waitForContainer(t, cli, jmsID, 2*time.Minute)
|
||||
if rc != 0 {
|
||||
t.Errorf("JUnit container failed with rc=%v", rc)
|
||||
}
|
||||
|
||||
// Get console output of the container and process the lines
|
||||
// to see if we have any failures
|
||||
scanner := bufio.NewScanner(strings.NewReader(inspectLogs(t, cli, jmsID)))
|
||||
for scanner.Scan() {
|
||||
s := scanner.Text()
|
||||
if processJunitLogLine(s) {
|
||||
t.Errorf("JUnit container tests failed. Reason: %s", s)
|
||||
}
|
||||
}
|
||||
|
||||
defer cleanContainer(t, cli, jmsID)
|
||||
}
|
||||
|
||||
// Parse JUnit log line and return true if line contains failed or aborted tests
|
||||
func processJunitLogLine(outputLine string) bool {
|
||||
var failedLine bool
|
||||
// Sample JUnit test run output
|
||||
//[ 2 containers found ]
|
||||
//[ 0 containers skipped ]
|
||||
//[ 2 containers started ]
|
||||
//[ 0 containers aborted ]
|
||||
//[ 2 containers successful ]
|
||||
//[ 0 containers failed ]
|
||||
//[ 0 tests found ]
|
||||
//[ 0 tests skipped ]
|
||||
//[ 0 tests started ]
|
||||
//[ 0 tests aborted ]
|
||||
//[ 0 tests successful ]
|
||||
//[ 0 tests failed ]
|
||||
|
||||
// Consider only those lines that begin with '[' and with ']'
|
||||
if strings.HasPrefix(outputLine, "[") && strings.HasSuffix(outputLine, "]") {
|
||||
// Strip off [] and whitespaces
|
||||
trimmed := strings.Trim(outputLine, "[] ")
|
||||
if strings.Contains(trimmed, "aborted") || strings.Contains(trimmed, "failed") {
|
||||
// Tokenize on whitespace
|
||||
tokens := strings.Split(trimmed, " ")
|
||||
// Determine the count of aborted or failed tests
|
||||
count, err := strconv.Atoi(tokens[0])
|
||||
if err == nil {
|
||||
if count > 0 {
|
||||
failedLine = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return failedLine
|
||||
}
|
||||
|
||||
// createTLSConfig creates a tls.Config which trusts the specified certificate
|
||||
func createTLSConfig(t *testing.T, certFile, password string, tlsConfigOptions ...tlsConfigOption) *tls.Config {
|
||||
// Get the SystemCertPool, continue with an empty pool on error
|
||||
certs, err := x509.SystemCertPool()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Read in the cert file
|
||||
cert, err := os.ReadFile(certFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Append our cert to the system pool
|
||||
ok := certs.AppendCertsFromPEM(cert)
|
||||
if !ok {
|
||||
t.Fatal("No certs appended")
|
||||
}
|
||||
// Trust the augmented cert pool in our client
|
||||
config := &tls.Config{
|
||||
InsecureSkipVerify: false,
|
||||
RootCAs: certs,
|
||||
}
|
||||
// Apply any additional config options
|
||||
for _, applyOpt := range tlsConfigOptions {
|
||||
applyOpt(config)
|
||||
}
|
||||
return config
|
||||
}
|
||||
|
||||
func testRESTAdmin(t *testing.T, cli ce.ContainerInterface, ID string, tlsConfig *tls.Config, errorExpected string) {
|
||||
httpClient := http.Client{
|
||||
Timeout: time.Duration(30 * time.Second),
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: tlsConfig,
|
||||
},
|
||||
}
|
||||
port, err := cli.GetContainerPort(ID, 9443)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
url := fmt.Sprintf("https://localhost:%s/ibmmq/rest/v1/admin/installation", port)
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
req.SetBasicAuth("admin", defaultAdminPassword)
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
if len(errorExpected) > 0 {
|
||||
if !strings.Contains(err.Error(), errorExpected) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
if resp != nil && resp.StatusCode != http.StatusOK {
|
||||
t.Errorf("Expected HTTP status code %v from 'GET installation'; got %v", http.StatusOK, resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
// curl -i -k https://localhost:1234/ibmmq/rest/v1/messaging/qmgr/qm1/queue/DEV.QUEUE.1/message -X POST -u app -H “ibm-mq-rest-csrf-token: N/A” -H “Content-Type: text/plain;charset=utf-8" -d “Hello World”
|
||||
|
||||
func logHTTPRequest(t *testing.T, req *http.Request) {
|
||||
d, err := httputil.DumpRequestOut(req, true)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
t.Logf("HTTP request: %v", string(d))
|
||||
}
|
||||
|
||||
func logHTTPResponse(t *testing.T, resp *http.Response) {
|
||||
d, err := httputil.DumpResponse(resp, true)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
t.Logf("HTTP response: %v", string(d))
|
||||
}
|
||||
|
||||
func testRESTMessaging(t *testing.T, cli ce.ContainerInterface, ID string, tlsConfig *tls.Config, qmName string, user string, password string, errorExpected string) {
|
||||
httpClient := http.Client{
|
||||
Timeout: time.Duration(30 * time.Second),
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: tlsConfig,
|
||||
},
|
||||
}
|
||||
q := "DEV.QUEUE.1"
|
||||
port, err := cli.GetContainerPort(ID, 9443)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
url := fmt.Sprintf("https://localhost:%s/ibmmq/rest/v1/messaging/qmgr/%s/queue/%s/message", port, qmName, q)
|
||||
putMessage := []byte("Hello")
|
||||
req, err := http.NewRequest("POST", url, bytes.NewBuffer(putMessage))
|
||||
req.SetBasicAuth(user, password)
|
||||
req.Header.Add("ibm-mq-rest-csrf-token", "n/a")
|
||||
req.Header.Add("Content-Type", "text/plain;charset=utf-8")
|
||||
logHTTPRequest(t, req)
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
if len(errorExpected) > 0 {
|
||||
if strings.Contains(err.Error(), errorExpected) {
|
||||
t.Logf("Error contains expected '%s' value", errorExpected)
|
||||
return
|
||||
} else {
|
||||
t.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
logHTTPResponse(t, resp)
|
||||
if resp != nil && resp.StatusCode != http.StatusCreated {
|
||||
if strings.Contains(resp.Status, errorExpected) {
|
||||
t.Logf("HTTP Response code is as expected. %s", resp.Status)
|
||||
return
|
||||
} else {
|
||||
t.Errorf("Expected HTTP status code %v from 'POST to queue'; got %v", http.StatusOK, resp.StatusCode)
|
||||
t.Logf("HTTP response: %+v", resp)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
req, err = http.NewRequest("DELETE", url, nil)
|
||||
req.Header.Add("ibm-mq-rest-csrf-token", "n/a")
|
||||
req.SetBasicAuth(user, password)
|
||||
logHTTPRequest(t, req)
|
||||
resp, err = httpClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
logHTTPResponse(t, resp)
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
t.Errorf("Expected HTTP status code %v from 'DELETE from queue'; got %v", http.StatusOK, resp.StatusCode)
|
||||
t.Logf("HTTP response: %+v", resp)
|
||||
t.Fail()
|
||||
}
|
||||
gotMessage, err := io.ReadAll(resp.Body)
|
||||
//gotMessage := string(b)
|
||||
if string(gotMessage) != string(putMessage) {
|
||||
t.Errorf("Expected payload to be \"%s\"; got \"%s\"", putMessage, gotMessage)
|
||||
}
|
||||
}
|
||||
|
||||
type tlsConfigOption func(*tls.Config)
|
||||
|
||||
// withMinTLSVersion is a functional option to set the minimum version for TLS
|
||||
func withMinTLSVersion(version uint16) tlsConfigOption {
|
||||
return func(cfg *tls.Config) {
|
||||
cfg.MinVersion = version
|
||||
}
|
||||
}
|
||||
2209
test/container/docker_api_test.go
Normal file
2209
test/container/docker_api_test.go
Normal file
File diff suppressed because it is too large
Load Diff
889
test/container/docker_api_test_util.go
Normal file
889
test/container/docker_api_test_util.go
Normal file
@@ -0,0 +1,889 @@
|
||||
/*
|
||||
© Copyright IBM Corporation 2017, 2024
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ce "github.com/ibm-messaging/mq-container/test/container/containerengine"
|
||||
"github.com/ibm-messaging/mq-container/test/container/pathutils"
|
||||
)
|
||||
|
||||
func imageName() string {
|
||||
image, ok := os.LookupEnv("TEST_IMAGE")
|
||||
if !ok {
|
||||
image = "mq-devserver:latest-x86-64"
|
||||
}
|
||||
return image
|
||||
}
|
||||
|
||||
func imageNameDevJMS() string {
|
||||
image, ok := os.LookupEnv("DEV_JMS_IMAGE")
|
||||
if !ok {
|
||||
image = "mq-dev-jms-test"
|
||||
}
|
||||
return image
|
||||
}
|
||||
|
||||
// baseImage returns the ID of the underlying base image (e.g. "ubuntu" or "rhel")
|
||||
func baseImage(t *testing.T, cli ce.ContainerInterface) string {
|
||||
rc, out := runContainerOneShot(t, cli, "grep", "^ID=", "/etc/os-release")
|
||||
if rc != 0 {
|
||||
t.Fatal("Couldn't determine base image")
|
||||
}
|
||||
s := strings.Split(out, "=")
|
||||
if len(s) < 2 {
|
||||
t.Fatal("Couldn't determine base image string")
|
||||
}
|
||||
return s[1]
|
||||
}
|
||||
|
||||
// devImage returns true if the image under test is a developer image,
|
||||
// determined by use of the MQ_ADMIN_PASSWORD environment variable
|
||||
func devImage(t *testing.T, cli ce.ContainerInterface) bool {
|
||||
rc, _ := runContainerOneShot(t, cli, "printenv", "MQ_ADMIN_PASSWORD")
|
||||
if rc == 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isWSL return whether we are running in the Windows Subsystem for Linux
|
||||
func isWSL(t *testing.T) bool {
|
||||
if runtime.GOOS == "linux" {
|
||||
uname, err := exec.Command("uname", "-r").Output()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return strings.Contains(string(uname), "Microsoft")
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isARM returns whether we are running an arm64 MacOS machine
|
||||
func isARM(t *testing.T) bool {
|
||||
return runtime.GOARCH == "arm64"
|
||||
}
|
||||
|
||||
// getCwd returns the working directory, in an os-specific or UNIX form
|
||||
func getCwd(t *testing.T, unixPath bool) string {
|
||||
dir, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if isWSL(t) {
|
||||
// Check if the cwd is a symlink
|
||||
dir, err = filepath.EvalSymlinks(dir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !unixPath {
|
||||
dir = strings.Replace(dir, getWindowsRoot(true), getWindowsRoot(false), 1)
|
||||
}
|
||||
}
|
||||
return dir
|
||||
}
|
||||
|
||||
// getWindowsRoot get the path of the root directory on Windows, in UNIX or OS-specific style
|
||||
func getWindowsRoot(unixStylePath bool) string {
|
||||
if unixStylePath {
|
||||
return "/mnt/c/"
|
||||
}
|
||||
return "C:/"
|
||||
}
|
||||
|
||||
func coverage() bool {
|
||||
cover := os.Getenv("TEST_COVER")
|
||||
if cover == "true" || cover == "1" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// coverageDir returns the host directory to use for code coverage data
|
||||
func coverageDir(t *testing.T, unixStylePath bool) string {
|
||||
return pathutils.CleanPath(getCwd(t, unixStylePath), "coverage")
|
||||
}
|
||||
|
||||
// coverageBind returns a string to use to add a bind-mounted directory for code coverage data
|
||||
func coverageBind(t *testing.T) string {
|
||||
return coverageDir(t, false) + ":/var/coverage"
|
||||
}
|
||||
|
||||
func addCoverageBindIfAvailable(t *testing.T, cfg *ce.ContainerHostConfig) {
|
||||
info, err := os.Stat(coverageDir(t, false))
|
||||
if err != nil || !info.IsDir() {
|
||||
return
|
||||
}
|
||||
cfg.Binds = append(cfg.Binds, coverageBind(t))
|
||||
}
|
||||
|
||||
// getTempDir get the path of the tmp directory, in UNIX or OS-specific style
|
||||
func getTempDir(t *testing.T, unixStylePath bool) string {
|
||||
if isWSL(t) {
|
||||
return getWindowsRoot(unixStylePath) + "Temp/"
|
||||
}
|
||||
return "/tmp/"
|
||||
}
|
||||
|
||||
// terminationMessage return the termination message, or an empty string if not set
|
||||
func terminationMessage(t *testing.T, cli ce.ContainerInterface, ID string) string {
|
||||
r, err := cli.CopyFromContainer(ID, "/run/termination-log")
|
||||
if err != nil {
|
||||
t.Log(err)
|
||||
t.Log(string(r))
|
||||
return ""
|
||||
}
|
||||
return string(r)
|
||||
}
|
||||
|
||||
func expectTerminationMessage(t *testing.T, cli ce.ContainerInterface, ID string) {
|
||||
m := terminationMessage(t, cli, ID)
|
||||
if m == "" {
|
||||
t.Error("Expected termination message to be set")
|
||||
}
|
||||
}
|
||||
|
||||
// logContainerDetails logs selected details about the container
|
||||
func logContainerDetails(t *testing.T, cli ce.ContainerInterface, ID string) {
|
||||
i, err := cli.ContainerInspect(ID)
|
||||
if err == nil {
|
||||
d := ce.ContainerDetailsLogging{
|
||||
ID: ID,
|
||||
Name: i.Name,
|
||||
Image: i.Image,
|
||||
Path: i.Path,
|
||||
Args: i.Args,
|
||||
CapAdd: i.HostConfig.CapAdd,
|
||||
CapDrop: i.HostConfig.CapDrop,
|
||||
User: i.Config.User,
|
||||
Env: i.Config.Env,
|
||||
}
|
||||
// If you need more details, you can always just run `json.MarshalIndent(i, "", " ")` to see everything.
|
||||
t.Logf("Container details: %+v", d)
|
||||
}
|
||||
}
|
||||
|
||||
func cleanContainerQuiet(t *testing.T, cli ce.ContainerInterface, ID string) {
|
||||
timeout := 10 * time.Second
|
||||
err := cli.ContainerStop(ID, &timeout)
|
||||
if err != nil {
|
||||
// Just log the error and continue
|
||||
t.Log(err)
|
||||
}
|
||||
opts := ce.ContainerRemoveOptions{
|
||||
RemoveVolumes: true,
|
||||
Force: true,
|
||||
}
|
||||
err = cli.ContainerRemove(ID, opts)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func cleanContainer(t *testing.T, cli ce.ContainerInterface, ID string) {
|
||||
logContainerDetails(t, cli, ID)
|
||||
t.Logf("Stopping container: %v", ID)
|
||||
timeout := 10 * time.Second
|
||||
// Stop the container. This allows the coverage output to be generated.
|
||||
err := cli.ContainerStop(ID, &timeout)
|
||||
if err != nil {
|
||||
// Just log the error and continue
|
||||
t.Log(err)
|
||||
}
|
||||
t.Log("Container stopped")
|
||||
|
||||
// If a code coverage file has been generated, then rename it to match the test name
|
||||
os.Rename(pathutils.CleanPath(coverageDir(t, true), "container.cov"), pathutils.CleanPath(coverageDir(t, true), t.Name()+".cov"))
|
||||
// Log the container output for any container we're about to delete
|
||||
t.Logf("Console log from container %v:\n%v", ID, inspectTextLogs(t, cli, ID))
|
||||
|
||||
m := terminationMessage(t, cli, ID)
|
||||
if m != "" {
|
||||
t.Logf("Termination message: %v", m)
|
||||
}
|
||||
|
||||
t.Logf("Removing container: %s", ID)
|
||||
opts := ce.ContainerRemoveOptions{
|
||||
RemoveVolumes: true,
|
||||
Force: true,
|
||||
}
|
||||
err = cli.ContainerRemove(ID, opts)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func generateRandomUID() string {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
min := 1000
|
||||
max := 9999
|
||||
return fmt.Sprint(rand.Intn(max-min) + min)
|
||||
}
|
||||
|
||||
// getDefaultHostConfig creates a HostConfig and populates it with the defaults used in testing
|
||||
func getDefaultHostConfig(t *testing.T, cli ce.ContainerInterface) *ce.ContainerHostConfig {
|
||||
hostConfig := ce.ContainerHostConfig{
|
||||
PortBindings: []ce.PortBinding{},
|
||||
CapDrop: []string{
|
||||
"ALL",
|
||||
},
|
||||
Privileged: false,
|
||||
}
|
||||
if coverage() {
|
||||
hostConfig.Binds = append(hostConfig.Binds, coverageBind(t))
|
||||
}
|
||||
if devImage(t, cli) {
|
||||
// Only needed for a RHEL-based image
|
||||
if baseImage(t, cli) != "ubuntu" {
|
||||
hostConfig.CapAdd = append(hostConfig.CapAdd, "DAC_OVERRIDE")
|
||||
}
|
||||
} else {
|
||||
t.Logf("Detected MQ Advanced image - dropping all capabilities")
|
||||
}
|
||||
return &hostConfig
|
||||
}
|
||||
|
||||
// runContainerWithHostConfig creates and starts a container, using the supplied HostConfig.
|
||||
// Note that a default HostConfig can be created using getDefaultHostConfig.
|
||||
func runContainerWithHostConfig(t *testing.T, cli ce.ContainerInterface, containerConfig *ce.ContainerConfig, hostConfig *ce.ContainerHostConfig) string {
|
||||
if containerConfig.Image == "" {
|
||||
containerConfig.Image = imageName()
|
||||
}
|
||||
// Always run as a random user, unless the test has specified otherwise
|
||||
if containerConfig.User == "" {
|
||||
containerConfig.User = generateRandomUID()
|
||||
}
|
||||
if coverage() {
|
||||
containerConfig.Env = append(containerConfig.Env, "COVERAGE_FILE="+t.Name()+".cov")
|
||||
containerConfig.Env = append(containerConfig.Env, "EXIT_CODE_FILE="+getExitCodeFilename(t))
|
||||
}
|
||||
networkingConfig := ce.ContainerNetworkSettings{}
|
||||
t.Logf("Running container (%s)", containerConfig.Image)
|
||||
ID, err := cli.ContainerCreate(containerConfig, hostConfig, &networkingConfig, t.Name())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
startContainer(t, cli, ID)
|
||||
return ID
|
||||
}
|
||||
|
||||
// runContainerWithAllConfig creates and starts a container, using the supplied ContainerConfig, HostConfig,
|
||||
// NetworkingConfig, and container name (or the value of t.Name if containerName="").
|
||||
func runContainerWithAllConfig(t *testing.T, cli ce.ContainerInterface, containerConfig *ce.ContainerConfig, hostConfig *ce.ContainerHostConfig, networkingConfig *ce.ContainerNetworkSettings, containerName string) string {
|
||||
if containerName == "" {
|
||||
containerName = t.Name()
|
||||
}
|
||||
if containerConfig.Image == "" {
|
||||
containerConfig.Image = imageName()
|
||||
}
|
||||
// Always run as a random user, unless the test has specified otherwise
|
||||
if containerConfig.User == "" {
|
||||
containerConfig.User = generateRandomUID()
|
||||
}
|
||||
if coverage() {
|
||||
containerConfig.Env = append(containerConfig.Env, "COVERAGE_FILE="+t.Name()+".cov")
|
||||
containerConfig.Env = append(containerConfig.Env, "EXIT_CODE_FILE="+getExitCodeFilename(t))
|
||||
}
|
||||
t.Logf("Running container (%s)", containerConfig.Image)
|
||||
ID, err := cli.ContainerCreate(containerConfig, hostConfig, networkingConfig, containerName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
startContainer(t, cli, ID)
|
||||
return ID
|
||||
}
|
||||
|
||||
// runContainerWithPorts creates and starts a container, exposing the specified ports on the host.
|
||||
// If no image is specified in the container config, then the image name is retrieved from the TEST_IMAGE
|
||||
// environment variable.
|
||||
func runContainerWithPorts(t *testing.T, cli ce.ContainerInterface, containerConfig *ce.ContainerConfig, ports []int) string {
|
||||
hostConfig := getDefaultHostConfig(t, cli)
|
||||
var binding ce.PortBinding
|
||||
for _, p := range ports {
|
||||
port := fmt.Sprintf("%v/tcp", p)
|
||||
binding = ce.PortBinding{
|
||||
ContainerPort: port,
|
||||
HostIP: "0.0.0.0",
|
||||
}
|
||||
hostConfig.PortBindings = append(hostConfig.PortBindings, binding)
|
||||
}
|
||||
return runContainerWithHostConfig(t, cli, containerConfig, hostConfig)
|
||||
}
|
||||
|
||||
// runContainer creates and starts a container. If no image is specified in
|
||||
// the container config, then the image name is retrieved from the TEST_IMAGE
|
||||
// environment variable.
|
||||
func runContainer(t *testing.T, cli ce.ContainerInterface, containerConfig *ce.ContainerConfig) string {
|
||||
return runContainerWithPorts(t, cli, containerConfig, nil)
|
||||
}
|
||||
|
||||
// runContainerOneShot runs a container with a custom entrypoint, as the root
|
||||
// user and with default capabilities
|
||||
func runContainerOneShot(t *testing.T, cli ce.ContainerInterface, command ...string) (int64, string) {
|
||||
containerConfig := ce.ContainerConfig{
|
||||
Entrypoint: command,
|
||||
User: "root",
|
||||
Image: imageName(),
|
||||
}
|
||||
hostConfig := ce.ContainerHostConfig{}
|
||||
networkingConfig := ce.ContainerNetworkSettings{}
|
||||
t.Logf("Running one shot container (%s): %v", containerConfig.Image, command)
|
||||
ID, err := cli.ContainerCreate(&containerConfig, &hostConfig, &networkingConfig, t.Name()+"OneShot")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
startOptions := ce.ContainerStartOptions{}
|
||||
err = cli.ContainerStart(ID, startOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer cleanContainerQuiet(t, cli, ID)
|
||||
rc := waitForContainer(t, cli, ID, 20*time.Second)
|
||||
out := inspectLogs(t, cli, ID)
|
||||
t.Logf("One shot container finished with rc=%v, output=%v", rc, out)
|
||||
return rc, out
|
||||
}
|
||||
|
||||
// runContainerOneShot runs a container with a custom entrypoint, as the root
|
||||
// user, with default capabilities, and a volume mounted
|
||||
func runContainerOneShotWithVolume(t *testing.T, cli ce.ContainerInterface, bind string, command ...string) (int64, string) {
|
||||
containerConfig := ce.ContainerConfig{
|
||||
Entrypoint: command,
|
||||
User: "root",
|
||||
Image: imageName(),
|
||||
}
|
||||
hostConfig := ce.ContainerHostConfig{
|
||||
Binds: []string{
|
||||
bind,
|
||||
},
|
||||
}
|
||||
networkingConfig := ce.ContainerNetworkSettings{}
|
||||
t.Logf("Running one shot container with volume (%s): %v", containerConfig.Image, command)
|
||||
ID, err := cli.ContainerCreate(&containerConfig, &hostConfig, &networkingConfig, t.Name()+"OneShotVolume")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
startOptions := ce.ContainerStartOptions{}
|
||||
err = cli.ContainerStart(ID, startOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer cleanContainerQuiet(t, cli, ID)
|
||||
rc := waitForContainer(t, cli, ID, 20*time.Second)
|
||||
out := inspectLogs(t, cli, ID)
|
||||
t.Logf("One shot container finished with rc=%v, output=%v", rc, out)
|
||||
return rc, out
|
||||
}
|
||||
|
||||
func startMultiVolumeQueueManager(t *testing.T, cli ce.ContainerInterface, dataVol bool, qmsharedlogs string, qmshareddata string, env []string, qmRun string, qmTmp string, readOnlyRootFs bool) (error, string, string) {
|
||||
id := strconv.FormatInt(time.Now().UnixNano(), 10)
|
||||
volume := createVolume(t, cli, id)
|
||||
containerConfig := ce.ContainerConfig{
|
||||
Image: imageName(),
|
||||
Env: env,
|
||||
}
|
||||
var hostConfig ce.ContainerHostConfig
|
||||
|
||||
if !dataVol {
|
||||
hostConfig = ce.ContainerHostConfig{}
|
||||
if readOnlyRootFs {
|
||||
hostConfig.Binds = append(hostConfig.Binds, qmRun+":/run")
|
||||
hostConfig.Binds = append(hostConfig.Binds, qmTmp+":/tmp")
|
||||
hostConfig.ReadOnlyRootfs = true
|
||||
}
|
||||
} else if qmsharedlogs == "" && qmshareddata == "" {
|
||||
hostConfig = getHostConfig(t, 1, "", "", volume, qmRun, qmTmp, readOnlyRootFs)
|
||||
} else if qmsharedlogs == "" {
|
||||
hostConfig = getHostConfig(t, 2, "", qmshareddata, volume, qmRun, qmTmp, readOnlyRootFs)
|
||||
} else if qmshareddata == "" {
|
||||
hostConfig = getHostConfig(t, 3, qmsharedlogs, "", volume, qmRun, qmTmp, readOnlyRootFs)
|
||||
} else {
|
||||
hostConfig = getHostConfig(t, 4, qmsharedlogs, qmshareddata, volume, qmRun, qmTmp, readOnlyRootFs)
|
||||
}
|
||||
networkingConfig := ce.ContainerNetworkSettings{}
|
||||
qmID, err := cli.ContainerCreate(&containerConfig, &hostConfig, &networkingConfig, t.Name()+id)
|
||||
if err != nil {
|
||||
return err, "", ""
|
||||
}
|
||||
startContainer(t, cli, qmID)
|
||||
|
||||
return nil, qmID, volume
|
||||
}
|
||||
|
||||
func getHostConfig(t *testing.T, mounts int, qmsharedlogs string, qmshareddata string, qmdata string, qmRun string, qmTmp string, readOnlyRootFS bool) ce.ContainerHostConfig {
|
||||
|
||||
var hostConfig ce.ContainerHostConfig
|
||||
|
||||
switch mounts {
|
||||
case 1:
|
||||
hostConfig = ce.ContainerHostConfig{
|
||||
Binds: []string{
|
||||
qmdata + ":/mnt/mqm",
|
||||
},
|
||||
}
|
||||
case 2:
|
||||
hostConfig = ce.ContainerHostConfig{
|
||||
Binds: []string{
|
||||
qmdata + ":/mnt/mqm",
|
||||
qmshareddata + ":/mnt/mqm-data",
|
||||
},
|
||||
}
|
||||
case 3:
|
||||
hostConfig = ce.ContainerHostConfig{
|
||||
Binds: []string{
|
||||
qmdata + ":/mnt/mqm",
|
||||
qmsharedlogs + ":/mnt/mqm-log",
|
||||
},
|
||||
}
|
||||
case 4:
|
||||
hostConfig = ce.ContainerHostConfig{
|
||||
Binds: []string{
|
||||
qmdata + ":/mnt/mqm",
|
||||
qmsharedlogs + ":/mnt/mqm-log",
|
||||
qmshareddata + ":/mnt/mqm-data",
|
||||
},
|
||||
}
|
||||
}
|
||||
if coverage() {
|
||||
hostConfig.Binds = append(hostConfig.Binds, coverageBind(t))
|
||||
}
|
||||
if readOnlyRootFS {
|
||||
hostConfig.Binds = append(hostConfig.Binds, qmRun+":/run")
|
||||
hostConfig.Binds = append(hostConfig.Binds, qmTmp+":/tmp")
|
||||
hostConfig.ReadOnlyRootfs = true
|
||||
}
|
||||
return hostConfig
|
||||
}
|
||||
|
||||
func startContainer(t *testing.T, cli ce.ContainerInterface, ID string) {
|
||||
t.Logf("Starting container: %v", ID)
|
||||
startOptions := ce.ContainerStartOptions{}
|
||||
err := cli.ContainerStart(ID, startOptions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func stopContainer(t *testing.T, cli ce.ContainerInterface, ID string) {
|
||||
t.Logf("Stopping container: %v", ID)
|
||||
timeout := 10 * time.Second
|
||||
err := cli.ContainerStop(ID, &timeout) //Duration(20)*time.Second)
|
||||
if err != nil {
|
||||
// Just log the error and continue
|
||||
t.Log(err)
|
||||
}
|
||||
}
|
||||
|
||||
func killContainer(t *testing.T, cli ce.ContainerInterface, ID string, signal string) {
|
||||
t.Logf("Killing container: %v", ID)
|
||||
err := cli.ContainerKill(ID, signal)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func getExitCodeFilename(t *testing.T) string {
|
||||
return t.Name() + "ExitCode"
|
||||
}
|
||||
|
||||
func getCoverageExitCode(t *testing.T, orig int64) int64 {
|
||||
f := pathutils.CleanPath(coverageDir(t, true), getExitCodeFilename(t))
|
||||
_, err := os.Stat(f)
|
||||
if err != nil {
|
||||
t.Log(err)
|
||||
return orig
|
||||
}
|
||||
// Remove the file, ready for the next test
|
||||
defer os.Remove(f)
|
||||
buf, err := os.ReadFile(f)
|
||||
if err != nil {
|
||||
t.Log(err)
|
||||
return orig
|
||||
}
|
||||
rc, err := strconv.Atoi(string(buf))
|
||||
if err != nil {
|
||||
t.Log(err)
|
||||
return orig
|
||||
}
|
||||
t.Logf("Retrieved exit code %v from file", rc)
|
||||
return int64(rc)
|
||||
}
|
||||
|
||||
// waitForContainer waits until a container has exited
|
||||
func waitForContainer(t *testing.T, cli ce.ContainerInterface, ID string, timeout time.Duration) int64 {
|
||||
c, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
t.Logf("Waiting for container for %s", timeout)
|
||||
okC, errC := cli.ContainerWait(c, ID, ce.ContainerStateNotRunning)
|
||||
var rc int64
|
||||
select {
|
||||
case err := <-errC:
|
||||
t.Fatal(err)
|
||||
case ok := <-okC:
|
||||
rc = ok
|
||||
}
|
||||
if coverage() {
|
||||
// COVERAGE: When running coverage, the exit code is written to a file,
|
||||
// to allow the coverage to be generated (which doesn't happen for non-zero
|
||||
// exit codes)
|
||||
rc = getCoverageExitCode(t, rc)
|
||||
}
|
||||
return rc
|
||||
}
|
||||
|
||||
// execContainer runs a command in a running container, and returns the exit code and output
|
||||
func execContainer(t *testing.T, cli ce.ContainerInterface, ID string, user string, cmd []string) (int, string) {
|
||||
t.Logf("Running command: %v", cmd)
|
||||
exitcode, outputStr := cli.ExecContainer(ID, user, cmd)
|
||||
return exitcode, outputStr
|
||||
}
|
||||
|
||||
func waitForReady(t *testing.T, cli ce.ContainerInterface, ID string) {
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 4*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-time.After(1 * time.Second):
|
||||
rc, _ := execContainer(t, cli, ID, "", []string{"chkmqready"})
|
||||
|
||||
if rc == 0 {
|
||||
t.Log("MQ is ready")
|
||||
return
|
||||
} else if rc == 10 {
|
||||
t.Log("MQ Readiness: Queue Manager Running as Standby")
|
||||
return
|
||||
} else if rc == 20 {
|
||||
t.Log("MQ Readiness: Queue Manager Running as Replica")
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
t.Fatal("Timed out waiting for container to become ready")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createNetwork(t *testing.T, cli ce.ContainerInterface) string {
|
||||
name := "test"
|
||||
t.Logf("Creating network: %v", name)
|
||||
opts := ce.NetworkCreateOptions{}
|
||||
netID, err := cli.NetworkCreate(name, opts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Logf("Created network %v with ID %v", name, netID)
|
||||
return netID
|
||||
}
|
||||
|
||||
func removeNetwork(t *testing.T, cli ce.ContainerInterface, ID string) {
|
||||
t.Logf("Removing network ID: %v", ID)
|
||||
err := cli.NetworkRemove(ID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func createVolume(t *testing.T, cli ce.ContainerInterface, name string) string {
|
||||
v, err := cli.VolumeCreate(ce.VolumeCreateOptions{
|
||||
Driver: "local",
|
||||
Name: name,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Logf("Created volume %v", v)
|
||||
return v
|
||||
}
|
||||
|
||||
func removeVolume(t *testing.T, cli ce.ContainerInterface, name string) {
|
||||
t.Logf("Removing volume %v", name)
|
||||
err := cli.VolumeRemove(name, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func inspectTextLogs(t *testing.T, cli ce.ContainerInterface, ID string) string {
|
||||
jsonLogs := inspectLogs(t, cli, ID)
|
||||
scanner := bufio.NewScanner(strings.NewReader(jsonLogs))
|
||||
b := make([]byte, 64*1024)
|
||||
buf := bytes.NewBuffer(b)
|
||||
for scanner.Scan() {
|
||||
text := scanner.Text()
|
||||
if strings.HasPrefix(text, "{") {
|
||||
// If it's a JSON log message, it makes it hard to debug the test, as the JSON
|
||||
// is embedded in the long test output. So just summarize the JSON instead.
|
||||
var e map[string]interface{}
|
||||
json.Unmarshal([]byte(text), &e)
|
||||
fmt.Fprintf(buf, "{\"ibm_datetime\": \"%v\", \"message\": \"%v\", ...}\n", e["ibm_datetime"], e["message"])
|
||||
} else {
|
||||
fmt.Fprintln(buf, text)
|
||||
}
|
||||
}
|
||||
err := scanner.Err()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func inspectLogs(t *testing.T, cli ce.ContainerInterface, ID string) string {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
logs, err := cli.GetContainerLogs(ctx, ID, ce.ContainerLogsOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return logs
|
||||
}
|
||||
|
||||
// generateTAR creates a TAR-formatted []byte, with the specified files included.
|
||||
func generateTAR(t *testing.T, files []struct{ Name, Body string }) []byte {
|
||||
buf := new(bytes.Buffer)
|
||||
tw := tar.NewWriter(buf)
|
||||
for _, file := range files {
|
||||
hdr := &tar.Header{
|
||||
Name: file.Name,
|
||||
Mode: 0600,
|
||||
Size: int64(len(file.Body)),
|
||||
}
|
||||
err := tw.WriteHeader(hdr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = tw.Write([]byte(file.Body))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
err := tw.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// createImage creates a new Docker image with the specified files included.
|
||||
func createImage(t *testing.T, cli ce.ContainerInterface, files []struct{ Name, Body string }) string {
|
||||
r := bytes.NewReader(generateTAR(t, files))
|
||||
tag := strings.ToLower(t.Name())
|
||||
|
||||
tmpDir, err := os.MkdirTemp("", "tmp")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
//Write files to temp directory
|
||||
for _, file := range files {
|
||||
//Add tag to file name to allow parallel testing
|
||||
f, err := os.Create(pathutils.CleanPath(tmpDir, file.Name))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
body := []byte(file.Body)
|
||||
_, err = f.Write(body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
_, err = cli.ImageBuild(r, tag, pathutils.CleanPath(tmpDir, files[0].Name))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return tag
|
||||
}
|
||||
|
||||
// deleteImage deletes a Docker image
|
||||
func deleteImage(t *testing.T, cli ce.ContainerInterface, id string) {
|
||||
cli.ImageRemove(id, ce.ImageRemoveOptions{
|
||||
Force: true,
|
||||
})
|
||||
}
|
||||
|
||||
func copyFromContainer(t *testing.T, cli ce.ContainerInterface, id string, file string) []byte {
|
||||
b, err := cli.CopyFromContainer(id, file)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func countLines(t *testing.T, r io.Reader) int {
|
||||
scanner := bufio.NewScanner(r)
|
||||
count := 0
|
||||
for scanner.Scan() {
|
||||
count++
|
||||
}
|
||||
err := scanner.Err()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func countTarLines(t *testing.T, b []byte) int {
|
||||
r := bytes.NewReader(b)
|
||||
tr := tar.NewReader(r)
|
||||
total := 0
|
||||
for {
|
||||
_, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
// End of TAR
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
total += countLines(t, tr)
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
// scanForExcludedEntries scans for default excluded messages
|
||||
func scanForExcludedEntries(msg string) bool {
|
||||
if strings.Contains(msg, "AMQ5041I") || strings.Contains(msg, "AMQ5052I") ||
|
||||
strings.Contains(msg, "AMQ5051I") || strings.Contains(msg, "AMQ5037I") ||
|
||||
strings.Contains(msg, "AMQ5975I") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// checkLogForValidJSON checks if the message is in Json format
|
||||
func checkLogForValidJSON(jsonLogs string) bool {
|
||||
scanner := bufio.NewScanner(strings.NewReader(jsonLogs))
|
||||
for scanner.Scan() {
|
||||
var obj map[string]interface{}
|
||||
s := scanner.Text()
|
||||
err := json.Unmarshal([]byte(s), &obj)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// runContainerWithAllConfig creates and starts a container, using the supplied ContainerConfig, HostConfig,
|
||||
// NetworkingConfig, and container name (or the value of t.Name if containerName="").
|
||||
func runContainerWithAllConfigError(t *testing.T, cli ce.ContainerInterface, containerConfig *ce.ContainerConfig, hostConfig *ce.ContainerHostConfig, networkingConfig *ce.ContainerNetworkSettings, containerName string) (string, error) {
|
||||
if containerName == "" {
|
||||
containerName = t.Name()
|
||||
}
|
||||
if containerConfig.Image == "" {
|
||||
containerConfig.Image = imageName()
|
||||
}
|
||||
// Always run as a random user, unless the test has specified otherwise
|
||||
if containerConfig.User == "" {
|
||||
containerConfig.User = generateRandomUID()
|
||||
}
|
||||
if coverage() {
|
||||
containerConfig.Env = append(containerConfig.Env, "COVERAGE_FILE="+t.Name()+".cov")
|
||||
containerConfig.Env = append(containerConfig.Env, "EXIT_CODE_FILE="+getExitCodeFilename(t))
|
||||
}
|
||||
t.Logf("Running container (%s)", containerConfig.Image)
|
||||
ID, err := cli.ContainerCreate(containerConfig, hostConfig, networkingConfig, containerName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
err = startContainerError(t, cli, ID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return ID, nil
|
||||
}
|
||||
|
||||
func startContainerError(t *testing.T, cli ce.ContainerInterface, ID string) error {
|
||||
t.Logf("Starting container: %v", ID)
|
||||
startOptions := ce.ContainerStartOptions{}
|
||||
err := cli.ContainerStart(ID, startOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// testLogFilePages validates that the specified number of logFilePages is present in the qm.ini file.
|
||||
func testLogFilePages(t *testing.T, cli ce.ContainerInterface, id string, qmName string, expectedLogFilePages string) {
|
||||
catIniFileCommand := fmt.Sprintf("cat /var/mqm/qmgrs/" + qmName + "/qm.ini")
|
||||
_, iniContent := execContainer(t, cli, id, "", []string{"bash", "-c", catIniFileCommand})
|
||||
|
||||
if !strings.Contains(iniContent, "LogFilePages="+expectedLogFilePages) {
|
||||
t.Errorf("Expected qm.ini to contain LogFilePages="+expectedLogFilePages+"; got qm.ini \"%v\"", iniContent)
|
||||
}
|
||||
}
|
||||
|
||||
// waitForMessageInLog will check for a particular message with wait
|
||||
func waitForMessageInLog(t *testing.T, cli ce.ContainerInterface, id string, expectedMessageId string) (string, error) {
|
||||
var jsonLogs string
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
defer cancel()
|
||||
for {
|
||||
select {
|
||||
case <-time.After(1 * time.Second):
|
||||
jsonLogs = inspectLogs(t, cli, id)
|
||||
if strings.Contains(jsonLogs, expectedMessageId) {
|
||||
return jsonLogs, nil
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return "", fmt.Errorf("expected message Id %s was not logged", expectedMessageId)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// waitForMessageCountInLog will check for a particular message with wait and must occur exact number of times in log as specified by count
|
||||
func waitForMessageCountInLog(t *testing.T, cli ce.ContainerInterface, id string, expectedMessageId string, count int) (string, error) {
|
||||
var jsonLogs string
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
defer cancel()
|
||||
for {
|
||||
select {
|
||||
case <-time.After(1 * time.Second):
|
||||
jsonLogs = inspectLogs(t, cli, id)
|
||||
if strings.Contains(jsonLogs, expectedMessageId) && strings.Count(jsonLogs, expectedMessageId) == count {
|
||||
return jsonLogs, nil
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return "", fmt.Errorf("expected message Id %s was not logged or it was not logged %v times", expectedMessageId, count)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Returns fully qualified path
|
||||
func tlsDirDN(t *testing.T, unixPath bool, certPath string) string {
|
||||
return pathutils.CleanPath(filepath.Dir(getCwd(t, unixPath)), certPath)
|
||||
}
|
||||
3
test/container/go.mod
Normal file
3
test/container/go.mod
Normal file
@@ -0,0 +1,3 @@
|
||||
module github.com/ibm-messaging/mq-container/test/container
|
||||
|
||||
go 1.19
|
||||
0
test/container/go.sum
Normal file
0
test/container/go.sum
Normal file
19
test/container/main.go
Normal file
19
test/container/main.go
Normal file
@@ -0,0 +1,19 @@
|
||||
/*
|
||||
© Copyright IBM Corporation 2017
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package main
|
||||
|
||||
func main() {
|
||||
}
|
||||
269
test/container/mq_multi_instance_test.go
Normal file
269
test/container/mq_multi_instance_test.go
Normal file
@@ -0,0 +1,269 @@
|
||||
/*
|
||||
© Copyright IBM Corporation 2019, 2023
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ce "github.com/ibm-messaging/mq-container/test/container/containerengine"
|
||||
)
|
||||
|
||||
var miEnv = []string{
|
||||
"LICENSE=accept",
|
||||
"MQ_QMGR_NAME=QM1",
|
||||
"MQ_MULTI_INSTANCE=true",
|
||||
}
|
||||
|
||||
// TestMultiInstanceStartStop creates 2 containers in a multi instance queue manager configuration
|
||||
// and starts/stop them checking we always have an active and standby
|
||||
func TestMultiInstanceStartStop(t *testing.T) {
|
||||
t.Skipf("Skipping %v until test defect fixed", t.Name())
|
||||
cli := ce.NewContainerClient()
|
||||
err, qm1aId, qm1bId, volumes := configureMultiInstance(t, cli, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, volume := range volumes {
|
||||
defer removeVolume(t, cli, volume)
|
||||
}
|
||||
defer cleanContainer(t, cli, qm1aId)
|
||||
defer cleanContainer(t, cli, qm1bId)
|
||||
|
||||
waitForReady(t, cli, qm1aId)
|
||||
waitForReady(t, cli, qm1bId)
|
||||
|
||||
err, active, standby := getActiveStandbyQueueManager(t, cli, qm1aId, qm1bId)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
killContainer(t, cli, active, "SIGTERM")
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
if status := getQueueManagerStatus(t, cli, standby, "QM1"); strings.Compare(status, "Running") != 0 {
|
||||
t.Fatalf("Expected QM1 to be running as active queue manager, dspmq returned status of %v", status)
|
||||
}
|
||||
|
||||
startContainer(t, cli, qm1aId)
|
||||
waitForReady(t, cli, qm1aId)
|
||||
|
||||
err, _, _ = getActiveStandbyQueueManager(t, cli, qm1aId, qm1bId)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TestMultiInstanceContainerStop starts 2 containers in a multi instance queue manager configuration,
|
||||
// stops the active queue manager, then checks to ensure the backup queue manager becomes active
|
||||
func TestMultiInstanceContainerStop(t *testing.T) {
|
||||
cli := ce.NewContainerClient()
|
||||
err, qm1aId, qm1bId, volumes := configureMultiInstance(t, cli, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, volume := range volumes {
|
||||
defer removeVolume(t, cli, volume)
|
||||
}
|
||||
defer cleanContainer(t, cli, qm1aId)
|
||||
defer cleanContainer(t, cli, qm1bId)
|
||||
|
||||
waitForReady(t, cli, qm1aId)
|
||||
waitForReady(t, cli, qm1bId)
|
||||
|
||||
err, originalActive, originalStandby := getActiveStandbyQueueManager(t, cli, qm1aId, qm1bId)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
stopContainer(t, cli, originalActive)
|
||||
|
||||
for {
|
||||
status := getQueueManagerStatus(t, cli, originalStandby, "QM1")
|
||||
select {
|
||||
case <-time.After(1 * time.Second):
|
||||
if status == "Running" {
|
||||
t.Logf("Original standby is now the active")
|
||||
return
|
||||
} else if status == "Starting" {
|
||||
t.Logf("Original standby is starting")
|
||||
}
|
||||
case <-ctx.Done():
|
||||
t.Fatalf("%s Timed out waiting for standby to become the active. Status=%v", time.Now().Format(time.RFC3339), status)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestMultiInstanceRace starts 2 containers in separate goroutines in a multi instance queue manager
|
||||
// configuration, then checks to ensure that both an active and standby queue manager have been started
|
||||
func TestMultiInstanceRace(t *testing.T) {
|
||||
t.Skipf("Skipping %v until file lock is implemented", t.Name())
|
||||
cli := ce.NewContainerClient()
|
||||
qmsharedlogs := createVolume(t, cli, "qmsharedlogs")
|
||||
defer removeVolume(t, cli, qmsharedlogs)
|
||||
qmshareddata := createVolume(t, cli, "qmshareddata")
|
||||
defer removeVolume(t, cli, qmshareddata)
|
||||
|
||||
qmsChannel := make(chan QMChan)
|
||||
|
||||
go singleMultiInstanceQueueManager(t, cli, qmsharedlogs, qmshareddata, qmsChannel)
|
||||
go singleMultiInstanceQueueManager(t, cli, qmsharedlogs, qmshareddata, qmsChannel)
|
||||
|
||||
qm1a := <-qmsChannel
|
||||
if qm1a.Error != nil {
|
||||
t.Fatal(qm1a.Error)
|
||||
}
|
||||
|
||||
qm1b := <-qmsChannel
|
||||
if qm1b.Error != nil {
|
||||
t.Fatal(qm1b.Error)
|
||||
}
|
||||
|
||||
qm1aId, qm1aData := qm1a.QMId, qm1a.QMData
|
||||
qm1bId, qm1bData := qm1b.QMId, qm1b.QMData
|
||||
|
||||
defer removeVolume(t, cli, qm1aData)
|
||||
defer removeVolume(t, cli, qm1bData)
|
||||
defer cleanContainer(t, cli, qm1aId)
|
||||
defer cleanContainer(t, cli, qm1bId)
|
||||
|
||||
waitForReady(t, cli, qm1aId)
|
||||
waitForReady(t, cli, qm1bId)
|
||||
|
||||
err, _, _ := getActiveStandbyQueueManager(t, cli, qm1aId, qm1bId)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMultiInstanceNoSharedMounts starts 2 multi instance queue managers without providing shared log/data
|
||||
// mounts, then checks to ensure that the container terminates with the expected message
|
||||
func TestMultiInstanceNoSharedMounts(t *testing.T) {
|
||||
t.Parallel()
|
||||
cli := ce.NewContainerClient()
|
||||
|
||||
err, qm1aId, qm1aData := startMultiVolumeQueueManager(t, cli, true, "", "", miEnv, "", "", false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer removeVolume(t, cli, qm1aData)
|
||||
defer cleanContainer(t, cli, qm1aId)
|
||||
|
||||
waitForTerminationMessage(t, cli, qm1aId, "Missing required mount '/mnt/mqm-log'", 30*time.Second)
|
||||
}
|
||||
|
||||
// TestMultiInstanceNoSharedLogs starts 2 multi instance queue managers without providing a shared log
|
||||
// mount, then checks to ensure that the container terminates with the expected message
|
||||
func TestMultiInstanceNoSharedLogs(t *testing.T) {
|
||||
cli := ce.NewContainerClient()
|
||||
|
||||
qmshareddata := createVolume(t, cli, "qmshareddata")
|
||||
defer removeVolume(t, cli, qmshareddata)
|
||||
|
||||
err, qm1aId, qm1aData := startMultiVolumeQueueManager(t, cli, true, "", qmshareddata, miEnv, "", "", false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer removeVolume(t, cli, qm1aData)
|
||||
defer cleanContainer(t, cli, qm1aId)
|
||||
|
||||
waitForTerminationMessage(t, cli, qm1aId, "Missing required mount '/mnt/mqm-log'", 30*time.Second)
|
||||
}
|
||||
|
||||
// TestMultiInstanceNoSharedData starts 2 multi instance queue managers without providing a shared data
|
||||
// mount, then checks to ensure that the container terminates with the expected message
|
||||
func TestMultiInstanceNoSharedData(t *testing.T) {
|
||||
cli := ce.NewContainerClient()
|
||||
|
||||
qmsharedlogs := createVolume(t, cli, "qmsharedlogs")
|
||||
defer removeVolume(t, cli, qmsharedlogs)
|
||||
|
||||
err, qm1aId, qm1aData := startMultiVolumeQueueManager(t, cli, true, qmsharedlogs, "", miEnv, "", "", false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer removeVolume(t, cli, qm1aData)
|
||||
defer cleanContainer(t, cli, qm1aId)
|
||||
|
||||
waitForTerminationMessage(t, cli, qm1aId, "Missing required mount '/mnt/mqm-data'", 30*time.Second)
|
||||
}
|
||||
|
||||
// TestMultiInstanceNoMounts starts 2 multi instance queue managers without providing a shared data
|
||||
// mount, then checks to ensure that the container terminates with the expected message
|
||||
func TestMultiInstanceNoMounts(t *testing.T) {
|
||||
cli := ce.NewContainerClient()
|
||||
|
||||
err, qm1aId, qm1aData := startMultiVolumeQueueManager(t, cli, false, "", "", miEnv, "", "", false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer removeVolume(t, cli, qm1aData)
|
||||
defer cleanContainer(t, cli, qm1aId)
|
||||
|
||||
waitForTerminationMessage(t, cli, qm1aId, "Missing required mount '/mnt/mqm'", 30*time.Second)
|
||||
}
|
||||
|
||||
// TestRoRFsMultiInstanceContainerStop starts 2 containers in a multi instance queue manager configuration,
|
||||
// with read-only root filesystem stops the active queue manager, then checks to ensure the backup queue
|
||||
// manager becomes active
|
||||
func TestRoRFsMultiInstanceContainerStop(t *testing.T) {
|
||||
cli := ce.NewContainerClient()
|
||||
err, qm1aId, qm1bId, volumes := configureMultiInstance(t, cli, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, volume := range volumes {
|
||||
defer removeVolume(t, cli, volume)
|
||||
}
|
||||
defer cleanContainer(t, cli, qm1aId)
|
||||
defer cleanContainer(t, cli, qm1bId)
|
||||
|
||||
waitForReady(t, cli, qm1aId)
|
||||
waitForReady(t, cli, qm1bId)
|
||||
|
||||
err, originalActive, originalStandby := getActiveStandbyQueueManager(t, cli, qm1aId, qm1bId)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
stopContainer(t, cli, originalActive)
|
||||
|
||||
for {
|
||||
status := getQueueManagerStatus(t, cli, originalStandby, "QM1")
|
||||
select {
|
||||
case <-time.After(1 * time.Second):
|
||||
if status == "Running" {
|
||||
t.Logf("Original standby is now the active")
|
||||
return
|
||||
} else if status == "Starting" {
|
||||
t.Logf("Original standby is starting")
|
||||
}
|
||||
case <-ctx.Done():
|
||||
t.Fatalf("%s Timed out waiting for standby to become the active. Status=%v", time.Now().Format(time.RFC3339), status)
|
||||
}
|
||||
}
|
||||
}
|
||||
114
test/container/mq_multi_instance_test_util.go
Normal file
114
test/container/mq_multi_instance_test_util.go
Normal file
@@ -0,0 +1,114 @@
|
||||
/*
|
||||
© Copyright IBM Corporation 2019, 2023
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ce "github.com/ibm-messaging/mq-container/test/container/containerengine"
|
||||
)
|
||||
|
||||
type QMChan struct {
|
||||
QMId string
|
||||
QMData string
|
||||
Error error
|
||||
}
|
||||
|
||||
// configureMultiInstance creates the volumes and containers required for basic testing
|
||||
// of multi instance queue managers. Returns error, qm1a ID, qm1b ID, slice of volume names
|
||||
func configureMultiInstance(t *testing.T, cli ce.ContainerInterface, readOnlyRootFs bool) (error, string, string, []string) {
|
||||
|
||||
qmsharedlogs := createVolume(t, cli, "qmsharedlogs")
|
||||
qmshareddata := createVolume(t, cli, "qmshareddata")
|
||||
|
||||
// Create tmp and run volumes
|
||||
var qmRunVol, qmTmpVol string
|
||||
if readOnlyRootFs {
|
||||
qmRunVol = createVolume(t, cli, "qmRunVolume")
|
||||
qmTmpVol = createVolume(t, cli, "qmTmpVolume")
|
||||
}
|
||||
|
||||
err, qm1aId, qm1aData := startMultiVolumeQueueManager(t, cli, true, qmsharedlogs, qmshareddata, miEnv, qmRunVol, qmTmpVol, readOnlyRootFs)
|
||||
if err != nil {
|
||||
return err, "", "", []string{}
|
||||
}
|
||||
time.Sleep(10 * time.Second)
|
||||
err, qm1bId, qm1bData := startMultiVolumeQueueManager(t, cli, true, qmsharedlogs, qmshareddata, miEnv, qmRunVol, qmTmpVol, readOnlyRootFs)
|
||||
if err != nil {
|
||||
return err, "", "", []string{}
|
||||
}
|
||||
|
||||
volumes := []string{qmsharedlogs, qmshareddata, qm1aData, qm1bData}
|
||||
if readOnlyRootFs {
|
||||
volumes = append(volumes, qmRunVol)
|
||||
volumes = append(volumes, qmTmpVol)
|
||||
}
|
||||
return nil, qm1aId, qm1bId, volumes
|
||||
}
|
||||
|
||||
func singleMultiInstanceQueueManager(t *testing.T, cli ce.ContainerInterface, qmsharedlogs string, qmshareddata string, qmsChannel chan QMChan) {
|
||||
err, qmId, qmData := startMultiVolumeQueueManager(t, cli, true, qmsharedlogs, qmshareddata, miEnv, "", "", false)
|
||||
if err != nil {
|
||||
qmsChannel <- QMChan{Error: err}
|
||||
}
|
||||
qmsChannel <- QMChan{QMId: qmId, QMData: qmData}
|
||||
}
|
||||
|
||||
func getActiveStandbyQueueManager(t *testing.T, cli ce.ContainerInterface, qm1aId string, qm1bId string) (error, string, string) {
|
||||
qm1aStatus := getQueueManagerStatus(t, cli, qm1aId, "QM1")
|
||||
qm1bStatus := getQueueManagerStatus(t, cli, qm1bId, "QM1")
|
||||
|
||||
if qm1aStatus == "Running" && qm1bStatus == "Running as standby" {
|
||||
return nil, qm1aId, qm1bId
|
||||
} else if qm1bStatus == "Running" && qm1aStatus == "Running as standby" {
|
||||
return nil, qm1bId, qm1aId
|
||||
}
|
||||
err := fmt.Errorf("Expected to be running in multi instance configuration, got status 1) %v status 2) %v", qm1aStatus, qm1bStatus)
|
||||
return err, "", ""
|
||||
}
|
||||
|
||||
func getQueueManagerStatus(t *testing.T, cli ce.ContainerInterface, containerID string, queueManagerName string) string {
|
||||
_, dspmqOut := execContainer(t, cli, containerID, "", []string{"bash", "-c", "dspmq", "-m", queueManagerName})
|
||||
t.Logf("dspmq for %v (%v) returned: %v", containerID, queueManagerName, dspmqOut)
|
||||
regex := regexp.MustCompile(`STATUS\(.*\)`)
|
||||
status := regex.FindString(dspmqOut)
|
||||
status = strings.TrimSuffix(strings.TrimPrefix(status, "STATUS("), ")")
|
||||
return status
|
||||
}
|
||||
|
||||
func waitForTerminationMessage(t *testing.T, cli ce.ContainerInterface, qmId string, terminationString string, timeout time.Duration) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
for {
|
||||
select {
|
||||
case <-time.After(1 * time.Second):
|
||||
m := terminationMessage(t, cli, qmId)
|
||||
if m != "" {
|
||||
if !strings.Contains(m, terminationString) {
|
||||
t.Fatalf("Expected container to fail with termination message %v. Got termination message: %v", terminationString, m)
|
||||
}
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
t.Fatal("Timed out waiting for container to terminate")
|
||||
}
|
||||
}
|
||||
}
|
||||
346
test/container/mq_native_ha_test.go
Normal file
346
test/container/mq_native_ha_test.go
Normal file
@@ -0,0 +1,346 @@
|
||||
/*
|
||||
© Copyright IBM Corporation 2021, 2023
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ce "github.com/ibm-messaging/mq-container/test/container/containerengine"
|
||||
)
|
||||
|
||||
// TestNativeHABasic creates 3 containers in a Native HA queue manager configuration
|
||||
// and ensures the queue manger and replicas start as expected
|
||||
func TestNativeHABasic(t *testing.T) {
|
||||
cli := ce.NewContainerClient()
|
||||
|
||||
version, err := cli.GetMQVersion(imageName())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if version < "9.2.2.0" {
|
||||
t.Skipf("Skipping %s as test requires at least MQ 9.2.2.0, but image is version %s", t.Name(), version)
|
||||
}
|
||||
|
||||
containerNames := [3]string{"QM1_1", "QM1_2", "QM1_3"}
|
||||
qmReplicaIDs := [3]string{}
|
||||
qmVolumes := []string{}
|
||||
//Each native HA qmgr instance is exposed on subsequent ports on the host starting with basePort
|
||||
//If the qmgr exposes more than one port (tests do not do this currently) then they are offset by +50
|
||||
basePort := 14551
|
||||
for i := 0; i <= 2; i++ {
|
||||
nhaPort := basePort + i
|
||||
vol := createVolume(t, cli, containerNames[i])
|
||||
defer removeVolume(t, cli, vol)
|
||||
qmVolumes = append(qmVolumes, vol)
|
||||
containerConfig := getNativeHAContainerConfig(containerNames[i], containerNames, basePort)
|
||||
hostConfig := getHostConfig(t, 1, "", "", vol, "", "", false)
|
||||
hostConfig = populateNativeHAPortBindings([]int{9414}, nhaPort, hostConfig)
|
||||
networkingConfig := getNativeHANetworkConfig("host")
|
||||
ctr := runContainerWithAllConfig(t, cli, &containerConfig, &hostConfig, &networkingConfig, containerNames[i])
|
||||
defer cleanContainer(t, cli, ctr)
|
||||
qmReplicaIDs[i] = ctr
|
||||
}
|
||||
|
||||
waitForReadyHA(t, cli, qmReplicaIDs)
|
||||
|
||||
_, err = getActiveReplicaInstances(t, cli, qmReplicaIDs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TestNativeHAFailover creates 3 containers in a Native HA queue manager configuration,
|
||||
// stops the active queue manager, checks a replica becomes active, and ensures the stopped
|
||||
// queue manager comes back as a replica
|
||||
func TestNativeHAFailover(t *testing.T) {
|
||||
|
||||
cli := ce.NewContainerClient()
|
||||
|
||||
version, err := cli.GetMQVersion(imageName())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if version < "9.2.2.0" {
|
||||
t.Skipf("Skipping %s as test requires at least MQ 9.2.2.0, but image is version %s", t.Name(), version)
|
||||
}
|
||||
|
||||
containerNames := [3]string{"QM1_1", "QM1_2", "QM1_3"}
|
||||
qmReplicaIDs := [3]string{}
|
||||
qmVolumes := []string{}
|
||||
//Each native HA qmgr instance is exposed on subsequent ports on the host starting with basePort
|
||||
//If the qmgr exposes more than one port (tests do not do this currently) then they are offset by +50
|
||||
basePort := 14551
|
||||
for i := 0; i <= 2; i++ {
|
||||
nhaPort := basePort + i
|
||||
vol := createVolume(t, cli, containerNames[i])
|
||||
defer removeVolume(t, cli, vol)
|
||||
qmVolumes = append(qmVolumes, vol)
|
||||
containerConfig := getNativeHAContainerConfig(containerNames[i], containerNames, basePort)
|
||||
hostConfig := getHostConfig(t, 1, "", "", vol, "", "", false)
|
||||
hostConfig = populateNativeHAPortBindings([]int{9414}, nhaPort, hostConfig)
|
||||
networkingConfig := getNativeHANetworkConfig("host")
|
||||
ctr := runContainerWithAllConfig(t, cli, &containerConfig, &hostConfig, &networkingConfig, containerNames[i])
|
||||
defer cleanContainer(t, cli, ctr)
|
||||
qmReplicaIDs[i] = ctr
|
||||
}
|
||||
|
||||
waitForReadyHA(t, cli, qmReplicaIDs)
|
||||
|
||||
haStatus, err := getActiveReplicaInstances(t, cli, qmReplicaIDs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stopContainer(t, cli, haStatus.Active)
|
||||
waitForFailoverHA(t, cli, haStatus.Replica)
|
||||
startContainer(t, cli, haStatus.Active)
|
||||
waitForReady(t, cli, haStatus.Active)
|
||||
|
||||
_, err = getActiveReplicaInstances(t, cli, qmReplicaIDs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TestNativeHASecure creates 3 containers in a Native HA queue manager configuration
|
||||
// with HA TLS enabled, and ensures the queue manger and replicas start as expected
|
||||
func TestNativeHASecure(t *testing.T) {
|
||||
cli := ce.NewContainerClient()
|
||||
|
||||
version, err := cli.GetMQVersion(imageName())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if version < "9.2.2.0" {
|
||||
t.Skipf("Skipping %s as test requires at least MQ 9.2.2.0, but image is version %s", t.Name(), version)
|
||||
}
|
||||
if isARM(t) {
|
||||
t.Skip("Skipping as an issue has been identified for the arm64 MQ image")
|
||||
}
|
||||
|
||||
containerNames := [3]string{"QM1_1", "QM1_2", "QM1_3"}
|
||||
qmReplicaIDs := [3]string{}
|
||||
//Each native HA qmgr instance is exposed on subsequent ports on the host starting with basePort
|
||||
//If the qmgr exposes more than one port (tests do not do this currently) then they are offset by +50
|
||||
basePort := 14551
|
||||
for i := 0; i <= 2; i++ {
|
||||
nhaPort := basePort + i
|
||||
containerConfig := getNativeHAContainerConfig(containerNames[i], containerNames, defaultHAPort)
|
||||
containerConfig.Env = append(containerConfig.Env, "MQ_NATIVE_HA_TLS=true")
|
||||
hostConfig := getNativeHASecureHostConfig(t)
|
||||
hostConfig = populateNativeHAPortBindings([]int{9414}, nhaPort, hostConfig)
|
||||
networkingConfig := getNativeHANetworkConfig("host")
|
||||
ctr := runContainerWithAllConfig(t, cli, &containerConfig, &hostConfig, &networkingConfig, containerNames[i])
|
||||
defer cleanContainer(t, cli, ctr)
|
||||
qmReplicaIDs[i] = ctr
|
||||
}
|
||||
|
||||
waitForReadyHA(t, cli, qmReplicaIDs)
|
||||
|
||||
_, err = getActiveReplicaInstances(t, cli, qmReplicaIDs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TestNativeHASecure creates 3 containers in a Native HA queue manager configuration
|
||||
// with HA TLS enabled, overrides the default CipherSpec, and ensures the queue manger
|
||||
// and replicas start as expected
|
||||
func TestNativeHASecureCipherSpec(t *testing.T) {
|
||||
cli := ce.NewContainerClient()
|
||||
|
||||
version, err := cli.GetMQVersion(imageName())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if version < "9.2.2.0" {
|
||||
t.Skipf("Skipping %s as test requires at least MQ 9.2.2.0, but image is version %s", t.Name(), version)
|
||||
}
|
||||
|
||||
containerNames := [3]string{"QM1_1", "QM1_2", "QM1_3"}
|
||||
qmReplicaIDs := [3]string{}
|
||||
//Each native HA qmgr instance is exposed on subsequent ports on the host starting with basePort
|
||||
//If the qmgr exposes more than one port (tests do not do this currently) then they are offset by +50
|
||||
basePort := 14551
|
||||
for i := 0; i <= 2; i++ {
|
||||
nhaPort := basePort + i
|
||||
containerConfig := getNativeHAContainerConfig(containerNames[i], containerNames, defaultHAPort)
|
||||
containerConfig.Env = append(containerConfig.Env, "MQ_NATIVE_HA_TLS=true", "MQ_NATIVE_HA_CIPHERSPEC=TLS_AES_256_GCM_SHA384")
|
||||
hostConfig := getNativeHASecureHostConfig(t)
|
||||
hostConfig = populateNativeHAPortBindings([]int{9414}, nhaPort, hostConfig)
|
||||
networkingConfig := getNativeHANetworkConfig("host")
|
||||
ctr := runContainerWithAllConfig(t, cli, &containerConfig, &hostConfig, &networkingConfig, containerNames[i])
|
||||
defer cleanContainer(t, cli, ctr)
|
||||
qmReplicaIDs[i] = ctr
|
||||
}
|
||||
|
||||
waitForReadyHA(t, cli, qmReplicaIDs)
|
||||
|
||||
_, err = getActiveReplicaInstances(t, cli, qmReplicaIDs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TestNativeHASecure creates 3 containers in a Native HA queue manager configuration
|
||||
// with HA TLS FIPS enabled, overrides the default CipherSpec, and ensures the queue manger
|
||||
// and replicas start as expected. This test uses FIPS compliant cipher.
|
||||
func TestNativeHASecureCipherSpecFIPS(t *testing.T) {
|
||||
cli := ce.NewContainerClient()
|
||||
|
||||
version, err := cli.GetMQVersion(imageName())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if version < "9.2.2.0" {
|
||||
t.Skipf("Skipping %s as test requires at least MQ 9.2.2.0, but image is version %s", t.Name(), version)
|
||||
}
|
||||
|
||||
containerNames := [3]string{"QM1_1", "QM1_2", "QM1_3"}
|
||||
qmReplicaIDs := [3]string{}
|
||||
//Each native HA qmgr instance is exposed on subsequent ports on the host starting with basePort
|
||||
//If the qmgr exposes more than one port (tests do not do this currently) then they are offset by +50
|
||||
basePort := 14551
|
||||
for i := 0; i <= 2; i++ {
|
||||
nhaPort := basePort + i
|
||||
containerConfig := getNativeHAContainerConfig(containerNames[i], containerNames, defaultHAPort)
|
||||
// MQ_NATIVE_HA_CIPHERSPEC is set a FIPS compliant cipherspec.
|
||||
containerConfig.Env = append(containerConfig.Env, "MQ_NATIVE_HA_TLS=true", "MQ_NATIVE_HA_CIPHERSPEC=ANY_TLS12_OR_HIGHER", "MQ_ENABLE_FIPS=true")
|
||||
hostConfig := getNativeHASecureHostConfig(t)
|
||||
hostConfig = populateNativeHAPortBindings([]int{9414}, nhaPort, hostConfig)
|
||||
networkingConfig := getNativeHANetworkConfig("host")
|
||||
ctr := runContainerWithAllConfig(t, cli, &containerConfig, &hostConfig, &networkingConfig, containerNames[i])
|
||||
defer cleanContainer(t, cli, ctr)
|
||||
qmReplicaIDs[i] = ctr
|
||||
}
|
||||
|
||||
waitForReadyHA(t, cli, qmReplicaIDs)
|
||||
// Display the contents of qm.ini
|
||||
_, qmini := execContainer(t, cli, qmReplicaIDs[0], "", []string{"cat", "/var/mqm/qmgrs/QM1/qm.ini"})
|
||||
if !strings.Contains(qmini, "SSLFipsRequired=Yes") {
|
||||
t.Errorf("Expected SSLFipsRequired=Yes but it is not; got \"%v\"", qmini)
|
||||
}
|
||||
|
||||
_, err = getActiveReplicaInstances(t, cli, qmReplicaIDs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestNativeHASecure creates 3 containers in a Native HA queue manager configuration
|
||||
// with HA TLS FIPS enabled with non-FIPS cipher, overrides the default CipherSpec, and
|
||||
// ensures the queue manger and replicas don't start as expected
|
||||
func TestNativeHASecureCipherSpecNonFIPSCipher(t *testing.T) {
|
||||
cli := ce.NewContainerClient()
|
||||
|
||||
version, err := cli.GetMQVersion(imageName())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if version < "9.2.2.0" {
|
||||
t.Skipf("Skipping %s as test requires at least MQ 9.2.2.0, but image is version %s", t.Name(), version)
|
||||
}
|
||||
|
||||
containerNames := [3]string{"QM1_1", "QM1_2", "QM1_3"}
|
||||
qmReplicaIDs := [3]string{}
|
||||
//Each native HA qmgr instance is exposed on subsequent ports on the host starting with basePort
|
||||
//If the qmgr exposes more than one port (tests do not do this currently) then they are offset by +50
|
||||
basePort := 14551
|
||||
for i := 0; i <= 2; i++ {
|
||||
nhaPort := basePort + i
|
||||
containerConfig := getNativeHAContainerConfig(containerNames[i], containerNames, defaultHAPort)
|
||||
// MQ_NATIVE_HA_CIPHERSPEC is set a FIPS non-compliant cipherspec - SSL_ECDHE_ECDSA_WITH_RC4_128_SHA
|
||||
containerConfig.Env = append(containerConfig.Env, "MQ_NATIVE_HA_TLS=true", "MQ_NATIVE_HA_CIPHERSPEC=SSL_ECDHE_ECDSA_WITH_RC4_128_SHA", "MQ_ENABLE_FIPS=true")
|
||||
hostConfig := getNativeHASecureHostConfig(t)
|
||||
hostConfig = populateNativeHAPortBindings([]int{9414}, nhaPort, hostConfig)
|
||||
networkingConfig := getNativeHANetworkConfig("host")
|
||||
ctr := runContainerWithAllConfig(t, cli, &containerConfig, &hostConfig, &networkingConfig, containerNames[i])
|
||||
defer cleanContainer(t, cli, ctr)
|
||||
// We expect container to fail in this case because the cipher is non-FIPS and we have asked for FIPS compliance
|
||||
// by setting MQ_ENABLE_FIPS=true
|
||||
qmReplicaIDs[i] = ctr
|
||||
}
|
||||
for i := 0; i <= 2; i++ {
|
||||
waitForTerminationMessage(t, cli, qmReplicaIDs[i], "/opt/mqm/bin/strmqm: exit status 23", 60*time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
// TestNativeHAFailover creates 3 containers in a Native HA queue manager configuration,
|
||||
// stops the active queue manager, checks a replica becomes active, and ensures the stopped
|
||||
// queue manager comes back as a replica
|
||||
func TestNativeHAFailoverWithRoRFs(t *testing.T) {
|
||||
|
||||
cli := ce.NewContainerClient()
|
||||
|
||||
version, err := cli.GetMQVersion(imageName())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if version < "9.2.2.0" {
|
||||
t.Skipf("Skipping %s as test requires at least MQ 9.2.2.0, but image is version %s", t.Name(), version)
|
||||
}
|
||||
|
||||
containerNames := [3]string{"QM1_1", "QM1_2", "QM1_3"}
|
||||
qmReplicaIDs := [3]string{}
|
||||
qmVolumes := []string{}
|
||||
//Each native HA qmgr instance is exposed on subsequent ports on the host starting with basePort
|
||||
//If the qmgr exposes more than one port (tests do not do this currently) then they are offset by +50
|
||||
basePort := 14551
|
||||
for i := 0; i <= 2; i++ {
|
||||
nhaPort := basePort + i
|
||||
vol := createVolume(t, cli, containerNames[i])
|
||||
defer removeVolume(t, cli, vol)
|
||||
volRun := createVolume(t, cli, "ephRun"+containerNames[i])
|
||||
defer removeVolume(t, cli, volRun)
|
||||
volTmp := createVolume(t, cli, "ephTmp"+containerNames[i])
|
||||
defer removeVolume(t, cli, volTmp)
|
||||
|
||||
qmVolumes = append(qmVolumes, vol)
|
||||
qmVolumes = append(qmVolumes, volRun)
|
||||
qmVolumes = append(qmVolumes, volTmp)
|
||||
|
||||
containerConfig := getNativeHAContainerConfig(containerNames[i], containerNames, basePort)
|
||||
hostConfig := getHostConfig(t, 1, "", "", vol, volRun, volTmp, true)
|
||||
hostConfig = populateNativeHAPortBindings([]int{9414}, nhaPort, hostConfig)
|
||||
networkingConfig := getNativeHANetworkConfig("host")
|
||||
ctr := runContainerWithAllConfig(t, cli, &containerConfig, &hostConfig, &networkingConfig, containerNames[i])
|
||||
defer cleanContainer(t, cli, ctr)
|
||||
qmReplicaIDs[i] = ctr
|
||||
}
|
||||
|
||||
waitForReadyHA(t, cli, qmReplicaIDs)
|
||||
|
||||
haStatus, err := getActiveReplicaInstances(t, cli, qmReplicaIDs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stopContainer(t, cli, haStatus.Active)
|
||||
waitForFailoverHA(t, cli, haStatus.Replica)
|
||||
startContainer(t, cli, haStatus.Active)
|
||||
waitForReady(t, cli, haStatus.Active)
|
||||
|
||||
_, err = getActiveReplicaInstances(t, cli, qmReplicaIDs)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
159
test/container/mq_native_ha_test_util.go
Normal file
159
test/container/mq_native_ha_test_util.go
Normal file
@@ -0,0 +1,159 @@
|
||||
/*
|
||||
© Copyright IBM Corporation 2021, 2023
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ce "github.com/ibm-messaging/mq-container/test/container/containerengine"
|
||||
"github.com/ibm-messaging/mq-container/test/container/pathutils"
|
||||
)
|
||||
|
||||
const defaultHAPort = 9414
|
||||
|
||||
// HAReplicaStatus represents the Active/Replica/Replica container status of the queue manager
|
||||
type HAReplicaStatus struct {
|
||||
Active string
|
||||
Replica [2]string
|
||||
}
|
||||
|
||||
func getNativeHAContainerConfig(containerName string, replicaNames [3]string, haPort int) ce.ContainerConfig {
|
||||
return ce.ContainerConfig{
|
||||
Env: []string{
|
||||
"LICENSE=accept",
|
||||
"MQ_QMGR_NAME=QM1",
|
||||
"AMQ_CLOUD_PAK=true",
|
||||
"MQ_NATIVE_HA=true",
|
||||
fmt.Sprintf("HOSTNAME=%s", containerName),
|
||||
fmt.Sprintf("MQ_NATIVE_HA_INSTANCE_0_NAME=%s", replicaNames[0]),
|
||||
fmt.Sprintf("MQ_NATIVE_HA_INSTANCE_1_NAME=%s", replicaNames[1]),
|
||||
fmt.Sprintf("MQ_NATIVE_HA_INSTANCE_2_NAME=%s", replicaNames[2]),
|
||||
fmt.Sprintf("MQ_NATIVE_HA_INSTANCE_0_REPLICATION_ADDRESS=%s(%d)", "127.0.0.1", haPort+0),
|
||||
fmt.Sprintf("MQ_NATIVE_HA_INSTANCE_1_REPLICATION_ADDRESS=%s(%d)", "127.0.0.1", haPort+1),
|
||||
fmt.Sprintf("MQ_NATIVE_HA_INSTANCE_2_REPLICATION_ADDRESS=%s(%d)", "127.0.0.1", haPort+2),
|
||||
},
|
||||
//When using the host for networking a consistent user was required. If a random user is used then the following example error was recorded.
|
||||
//AMQ3209E: Native HA connection rejected due to configuration mismatch of 'QmgrUserId=5024'
|
||||
User: "1111",
|
||||
}
|
||||
}
|
||||
|
||||
func getNativeHASecureHostConfig(t *testing.T) ce.ContainerHostConfig {
|
||||
hostConfig := ce.ContainerHostConfig{
|
||||
Binds: []string{
|
||||
pathutils.CleanPath(filepath.Dir(getCwd(t, true)), "../tls") + ":/etc/mqm/ha/pki/keys/ha",
|
||||
},
|
||||
}
|
||||
addCoverageBindIfAvailable(t, &hostConfig)
|
||||
return hostConfig
|
||||
}
|
||||
|
||||
func getNativeHANetworkConfig(networkID string) ce.ContainerNetworkSettings {
|
||||
return ce.ContainerNetworkSettings{
|
||||
Networks: []string{networkID},
|
||||
}
|
||||
}
|
||||
|
||||
// populatePortBindings writes port bindings to the host config
|
||||
func populateNativeHAPortBindings(ports []int, nativeHaPort int, hostConfig ce.ContainerHostConfig) ce.ContainerHostConfig {
|
||||
hostConfig.PortBindings = []ce.PortBinding{}
|
||||
var binding ce.PortBinding
|
||||
for i, p := range ports {
|
||||
port := fmt.Sprintf("%v/tcp", p)
|
||||
binding = ce.PortBinding{
|
||||
ContainerPort: port,
|
||||
HostIP: "0.0.0.0",
|
||||
//Offset the ports by 50 if there are multiple
|
||||
HostPort: strconv.Itoa(nativeHaPort + 50*i),
|
||||
}
|
||||
hostConfig.PortBindings = append(hostConfig.PortBindings, binding)
|
||||
}
|
||||
return hostConfig
|
||||
}
|
||||
|
||||
func getActiveReplicaInstances(t *testing.T, cli ce.ContainerInterface, qmReplicaIDs [3]string) (HAReplicaStatus, error) {
|
||||
|
||||
var actives []string
|
||||
var replicas []string
|
||||
|
||||
for _, id := range qmReplicaIDs {
|
||||
qmReplicaStatus := getQueueManagerStatus(t, cli, id, "QM1")
|
||||
if qmReplicaStatus == "Running" {
|
||||
actives = append(actives, id)
|
||||
} else if qmReplicaStatus == "Replica" {
|
||||
replicas = append(replicas, id)
|
||||
} else {
|
||||
err := fmt.Errorf("Expected status to be Running or Replica, got status: %s", qmReplicaStatus)
|
||||
return HAReplicaStatus{}, err
|
||||
}
|
||||
}
|
||||
|
||||
if len(actives) != 1 || len(replicas) != 2 {
|
||||
err := fmt.Errorf("Expected 1 Active and 2 Replicas, got: %d Active and %d Replica", len(actives), len(replicas))
|
||||
return HAReplicaStatus{}, err
|
||||
}
|
||||
|
||||
return HAReplicaStatus{actives[0], [2]string{replicas[0], replicas[1]}}, nil
|
||||
}
|
||||
|
||||
func waitForReadyHA(t *testing.T, cli ce.ContainerInterface, qmReplicaIDs [3]string) {
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 4*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-time.After(1 * time.Second):
|
||||
for _, id := range qmReplicaIDs {
|
||||
rc, _ := execContainer(t, cli, id, "", []string{"chkmqready"})
|
||||
if rc == 0 {
|
||||
t.Log("MQ is ready")
|
||||
rc, _ := execContainer(t, cli, id, "", []string{"chkmqstarted"})
|
||||
if rc == 0 {
|
||||
t.Log("MQ has started")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
case <-ctx.Done():
|
||||
t.Fatal("Timed out waiting for HA Queue Manager to become ready")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func waitForFailoverHA(t *testing.T, cli ce.ContainerInterface, replicas [2]string) {
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-time.After(1 * time.Second):
|
||||
for _, id := range replicas {
|
||||
if status := getQueueManagerStatus(t, cli, id, "QM1"); status == "Running" {
|
||||
return
|
||||
}
|
||||
}
|
||||
case <-ctx.Done():
|
||||
t.Fatal("Timed out waiting for Native HA Queue Manager to failover to an available replica")
|
||||
}
|
||||
}
|
||||
}
|
||||
420
test/container/mqmetric_test.go
Normal file
420
test/container/mqmetric_test.go
Normal file
@@ -0,0 +1,420 @@
|
||||
/*
|
||||
© Copyright IBM Corporation 2018, 2023
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ce "github.com/ibm-messaging/mq-container/test/container/containerengine"
|
||||
)
|
||||
|
||||
func TestGoldenPathMetric(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cli := ce.NewContainerClient()
|
||||
id := runContainerWithPorts(t, cli, metricsContainerConfig(), []int{defaultMetricPort})
|
||||
defer cleanContainer(t, cli, id)
|
||||
|
||||
port, err := cli.GetContainerPort(id, defaultMetricPort)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Now the container is ready we prod the prometheus endpoint until it's up.
|
||||
waitForMetricReady(t, port)
|
||||
|
||||
// Call once as mq_prometheus 'ignores' the first call and will not return any metrics
|
||||
getMetrics(t, port)
|
||||
time.Sleep(15 * time.Second)
|
||||
|
||||
// Now actually get the metrics (after waiting for some to become available)
|
||||
metrics := getMetrics(t, port)
|
||||
if len(metrics) <= 0 {
|
||||
t.Error("Expected some metrics to be returned but had none...")
|
||||
}
|
||||
// Stop the container cleanly
|
||||
stopContainer(t, cli, id)
|
||||
}
|
||||
|
||||
func TestMetricNames(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cli := ce.NewContainerClient()
|
||||
id := runContainerWithPorts(t, cli, metricsContainerConfig(), []int{defaultMetricPort})
|
||||
defer cleanContainer(t, cli, id)
|
||||
|
||||
port, err := cli.GetContainerPort(id, defaultMetricPort)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Now the container is ready we prod the prometheus endpoint until it's up.
|
||||
waitForMetricReady(t, port)
|
||||
|
||||
// Call once as mq_prometheus 'ignores' the first call
|
||||
getMetrics(t, port)
|
||||
time.Sleep(15 * time.Second)
|
||||
|
||||
// Now actually get the metrics (after waiting for some to become available)
|
||||
metrics := getMetrics(t, port)
|
||||
names := metricNames()
|
||||
if len(metrics) != len(names) {
|
||||
t.Errorf("Expected %d metrics to be returned, received %d", len(names), len(metrics))
|
||||
}
|
||||
|
||||
// Check all the metrics have the correct names
|
||||
for _, metric := range metrics {
|
||||
ok := false
|
||||
for _, name := range names {
|
||||
if metric.Key == "ibmmq_qmgr_"+name {
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !ok {
|
||||
t.Errorf("Metric '%s' does not have the expected name", metric.Key)
|
||||
}
|
||||
}
|
||||
|
||||
// Stop the container cleanly
|
||||
stopContainer(t, cli, id)
|
||||
}
|
||||
|
||||
func TestMetricLabels(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cli := ce.NewContainerClient()
|
||||
requiredLabels := []string{"qmgr"}
|
||||
id := runContainerWithPorts(t, cli, metricsContainerConfig(), []int{defaultMetricPort})
|
||||
defer cleanContainer(t, cli, id)
|
||||
port, err := cli.GetContainerPort(id, defaultMetricPort)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Now the container is ready we prod the prometheus endpoint until it's up.
|
||||
waitForMetricReady(t, port)
|
||||
|
||||
// Call once as mq_prometheus 'ignores' the first call
|
||||
getMetrics(t, port)
|
||||
time.Sleep(15 * time.Second)
|
||||
|
||||
// Now actually get the metrics (after waiting for some to become available)
|
||||
metrics := getMetrics(t, port)
|
||||
if len(metrics) <= 0 {
|
||||
t.Error("Expected some metrics to be returned but had none")
|
||||
}
|
||||
|
||||
// Check all the metrics have the required labels
|
||||
for _, metric := range metrics {
|
||||
found := false
|
||||
for key := range metric.Labels {
|
||||
for _, e := range requiredLabels {
|
||||
if key == e {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if found {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
t.Errorf("Metric '%s' with labels %s does not have one or more required labels - %s", metric.Key, metric.Labels, requiredLabels)
|
||||
}
|
||||
}
|
||||
|
||||
// Stop the container cleanly
|
||||
stopContainer(t, cli, id)
|
||||
}
|
||||
|
||||
func TestRapidFirePrometheus(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cli := ce.NewContainerClient()
|
||||
id := runContainerWithPorts(t, cli, metricsContainerConfig(), []int{defaultMetricPort})
|
||||
defer cleanContainer(t, cli, id)
|
||||
port, err := cli.GetContainerPort(id, defaultMetricPort)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Now the container is ready we prod the prometheus endpoint until it's up.
|
||||
waitForMetricReady(t, port)
|
||||
|
||||
// Call once as mq_prometheus 'ignores' the first call and will not return any metrics
|
||||
getMetrics(t, port)
|
||||
|
||||
// Rapid fire it then check we're still happy
|
||||
for i := 0; i < 30; i++ {
|
||||
getMetrics(t, port)
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
time.Sleep(11 * time.Second)
|
||||
|
||||
// Now actually get the metrics (after waiting for some to become available)
|
||||
metrics := getMetrics(t, port)
|
||||
if len(metrics) <= 0 {
|
||||
t.Error("Expected some metrics to be returned but had none")
|
||||
}
|
||||
|
||||
// Stop the container cleanly
|
||||
stopContainer(t, cli, id)
|
||||
}
|
||||
|
||||
func TestSlowPrometheus(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cli := ce.NewContainerClient()
|
||||
id := runContainerWithPorts(t, cli, metricsContainerConfig(), []int{defaultMetricPort})
|
||||
defer cleanContainer(t, cli, id)
|
||||
port, err := cli.GetContainerPort(id, defaultMetricPort)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Now the container is ready we prod the prometheus endpoint until it's up.
|
||||
waitForMetricReady(t, port)
|
||||
|
||||
// Call once as mq_prometheus 'ignores' the first call and will not return any metrics
|
||||
getMetrics(t, port)
|
||||
|
||||
// Send a request twice over a long period and check we're still happy
|
||||
for i := 0; i < 2; i++ {
|
||||
time.Sleep(30 * time.Second)
|
||||
metrics := getMetrics(t, port)
|
||||
if len(metrics) <= 0 {
|
||||
t.Error("Expected some metrics to be returned but had none")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Stop the container cleanly
|
||||
stopContainer(t, cli, id)
|
||||
}
|
||||
|
||||
func TestContainerRestart(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cli := ce.NewContainerClient()
|
||||
id := runContainerWithPorts(t, cli, metricsContainerConfig(), []int{defaultMetricPort})
|
||||
defer cleanContainer(t, cli, id)
|
||||
port, err := cli.GetContainerPort(id, defaultMetricPort)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Now the container is ready we prod the prometheus endpoint until it's up.
|
||||
waitForMetricReady(t, port)
|
||||
|
||||
// Call once as mq_prometheus 'ignores' the first call and will not return any metrics
|
||||
getMetrics(t, port)
|
||||
time.Sleep(15 * time.Second)
|
||||
|
||||
// Now actually get the metrics (after waiting for some to become available)
|
||||
metrics := getMetrics(t, port)
|
||||
if len(metrics) <= 0 {
|
||||
t.Fatal("Expected some metrics to be returned before the restart but had none...")
|
||||
}
|
||||
|
||||
// Stop the container cleanly
|
||||
stopContainer(t, cli, id)
|
||||
// Start the container cleanly
|
||||
startContainer(t, cli, id)
|
||||
port, err = cli.GetContainerPort(id, defaultMetricPort)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Now the container is ready we prod the prometheus endpoint until it's up.
|
||||
waitForMetricReady(t, port)
|
||||
|
||||
// Call once as mq_prometheus 'ignores' the first call and will not return any metrics
|
||||
getMetrics(t, port)
|
||||
time.Sleep(15 * time.Second)
|
||||
|
||||
// Now actually get the metrics (after waiting for some to become available)
|
||||
metrics = getMetrics(t, port)
|
||||
if len(metrics) <= 0 {
|
||||
t.Error("Expected some metrics to be returned after the restart but had none...")
|
||||
}
|
||||
|
||||
// Stop the container cleanly
|
||||
stopContainer(t, cli, id)
|
||||
}
|
||||
|
||||
func TestQMRestart(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cli := ce.NewContainerClient()
|
||||
id := runContainerWithPorts(t, cli, metricsContainerConfig(), []int{defaultMetricPort})
|
||||
defer cleanContainer(t, cli, id)
|
||||
|
||||
port, err := cli.GetContainerPort(id, defaultMetricPort)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Now the container is ready we prod the prometheus endpoint until it's up.
|
||||
waitForMetricReady(t, port)
|
||||
|
||||
// Call once as mq_prometheus 'ignores' the first call and will not return any metrics
|
||||
getMetrics(t, port)
|
||||
time.Sleep(15 * time.Second)
|
||||
|
||||
// Now actually get the metrics (after waiting for some to become available)
|
||||
metrics := getMetrics(t, port)
|
||||
if len(metrics) <= 0 {
|
||||
t.Fatal("Expected some metrics to be returned before the restart but had none...")
|
||||
}
|
||||
|
||||
// Restart just the QM (to simulate a lost connection)
|
||||
t.Log("Stopping queue manager\n")
|
||||
rc, out := execContainer(t, cli, id, "", []string{"endmqm", "-w", "-r", defaultMetricQMName})
|
||||
if rc != 0 {
|
||||
t.Fatalf("Failed to stop the queue manager. rc=%d, err=%s", rc, out)
|
||||
}
|
||||
t.Log("starting queue manager\n")
|
||||
rc, out = execContainer(t, cli, id, "", []string{"strmqm", defaultMetricQMName})
|
||||
if rc != 0 {
|
||||
t.Fatalf("Failed to start the queue manager. rc=%d, err=%s", rc, out)
|
||||
}
|
||||
|
||||
// Wait for the queue manager to come back up
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
// Now the container is ready we prod the prometheus endpoint until it's up.
|
||||
waitForMetricReady(t, port)
|
||||
|
||||
// Call once as mq_prometheus 'ignores' the first call and will not return any metrics
|
||||
getMetrics(t, port)
|
||||
time.Sleep(15 * time.Second)
|
||||
|
||||
// Now actually get the metrics (after waiting for some to become available)
|
||||
metrics = getMetrics(t, port)
|
||||
if len(metrics) <= 0 {
|
||||
t.Errorf("Expected some metrics to be returned after the restart but had none...")
|
||||
}
|
||||
|
||||
// Stop the container cleanly
|
||||
stopContainer(t, cli, id)
|
||||
}
|
||||
|
||||
func TestValidValues(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cli := ce.NewContainerClient()
|
||||
id := runContainerWithPorts(t, cli, metricsContainerConfig(), []int{defaultMetricPort})
|
||||
defer cleanContainer(t, cli, id)
|
||||
// hostname := getIPAddress(t, cli, id)
|
||||
port, err := cli.GetContainerPort(id, defaultMetricPort)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Now the container is ready we prod the prometheus endpoint until it's up.
|
||||
waitForMetricReady(t, port)
|
||||
|
||||
// Call once as mq_prometheus 'ignores' the first call and will not return any metrics
|
||||
getMetrics(t, port)
|
||||
time.Sleep(15 * time.Second)
|
||||
|
||||
// Now actually get the metrics (after waiting for some to become available)
|
||||
metrics := getMetrics(t, port)
|
||||
if len(metrics) <= 0 {
|
||||
t.Fatal("Expected some metrics to be returned but had none...")
|
||||
}
|
||||
|
||||
// Check that the values for each metric are valid numbers
|
||||
// can be either int, float or exponential - all these can be parsed by ParseFloat function
|
||||
for _, e := range metrics {
|
||||
if _, err := strconv.ParseFloat(e.Value, 64); err != nil {
|
||||
t.Errorf("Value (%s) for key (%s) is not a valid number", e.Value, e.Key)
|
||||
}
|
||||
}
|
||||
|
||||
// Stop the container cleanly
|
||||
stopContainer(t, cli, id)
|
||||
}
|
||||
|
||||
func TestChangingValues(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cli := ce.NewContainerClient()
|
||||
id := runContainerWithPorts(t, cli, metricsContainerConfig(), []int{1414, defaultMetricPort})
|
||||
defer cleanContainer(t, cli, id)
|
||||
// hostname := getIPAddress(t, cli, id)
|
||||
port, err := cli.GetContainerPort(id, defaultMetricPort)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Now the container is ready we prod the prometheus endpoint until it's up.
|
||||
waitForMetricReady(t, port)
|
||||
|
||||
// Call once as mq_prometheus 'ignores' the first call and will not return any metrics
|
||||
getMetrics(t, port)
|
||||
time.Sleep(15 * time.Second)
|
||||
|
||||
// Now actually get the metrics (after waiting for some to become available)
|
||||
metrics := getMetrics(t, port)
|
||||
if len(metrics) <= 0 {
|
||||
t.Fatal("Expected some metrics to be returned but had none...")
|
||||
}
|
||||
|
||||
// Check we have no FDC files to start
|
||||
for _, e := range metrics {
|
||||
if e.Key == "ibmmq_qmgr_mq_fdc_file_count" {
|
||||
if e.Value != "0" {
|
||||
t.Fatalf("Expected %s to have a value of 0 but was %s", e.Key, e.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Send invalid data to the MQ listener to generate a FDC
|
||||
noport, err := cli.GetContainerPort(id, 1414)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
listener := fmt.Sprintf("localhost:%s", noport)
|
||||
conn, err := net.Dial("tcp", listener)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not connect to the listener - %v", err)
|
||||
}
|
||||
fmt.Fprintf(conn, "THIS WILL GENERATE A FDC!")
|
||||
conn.Close()
|
||||
|
||||
// Now actually get the metrics (after waiting for some to become available)
|
||||
time.Sleep(25 * time.Second)
|
||||
metrics = getMetrics(t, port)
|
||||
if len(metrics) <= 0 {
|
||||
t.Fatal("Expected some metrics to be returned but had none...")
|
||||
}
|
||||
|
||||
// Check that there is now 1 FDC file
|
||||
for _, e := range metrics {
|
||||
if e.Key == "ibmmq_qmgr_mq_fdc_file_count" {
|
||||
if e.Value != "1" {
|
||||
t.Fatalf("Expected %s to have a value of 1 but was %s", e.Key, e.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop the container cleanly
|
||||
stopContainer(t, cli, id)
|
||||
}
|
||||
259
test/container/mqmetric_test_util.go
Normal file
259
test/container/mqmetric_test_util.go
Normal file
@@ -0,0 +1,259 @@
|
||||
/*
|
||||
© Copyright IBM Corporation 2018, 2023
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
ce "github.com/ibm-messaging/mq-container/test/container/containerengine"
|
||||
)
|
||||
|
||||
type mqmetric struct {
|
||||
Key string
|
||||
Value string
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
const defaultMetricURL = "/metrics"
|
||||
const defaultMetricPort = 9157
|
||||
const defaultMQNamespace = "ibmmq"
|
||||
const defaultMetricQMName = "qm1"
|
||||
|
||||
func getMetrics(t *testing.T, port string) []mqmetric {
|
||||
returned := []mqmetric{}
|
||||
urlToUse := fmt.Sprintf("http://localhost:%s%s", port, defaultMetricURL)
|
||||
resp, err := http.Get(urlToUse)
|
||||
if err != nil {
|
||||
t.Fatalf("Error from HTTP GET for metrics: %v", err)
|
||||
return returned
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
metricsRaw, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading metrics data: %v", err)
|
||||
return returned
|
||||
}
|
||||
return convertRawMetricToMap(t, string(metricsRaw))
|
||||
}
|
||||
|
||||
// Also filters out all non "ibmmq" metrics
|
||||
func convertRawMetricToMap(t *testing.T, input string) []mqmetric {
|
||||
returnList := []mqmetric{}
|
||||
scanner := bufio.NewScanner(strings.NewReader(input))
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if strings.HasPrefix(line, "#") {
|
||||
// Comment line of HELP or TYPE. Ignore
|
||||
continue
|
||||
}
|
||||
if !strings.HasPrefix(line, defaultMQNamespace) {
|
||||
// Not an ibmmq_ metric. Ignore
|
||||
continue
|
||||
}
|
||||
//It's an IBM MQ metric!
|
||||
key, value, labelMap, err := convertMetricLineToMetric(line)
|
||||
if err != nil {
|
||||
t.Fatalf("ibmmq_ metric could not be deciphered - %v", err)
|
||||
}
|
||||
|
||||
toAdd := mqmetric{
|
||||
Key: key,
|
||||
Value: value,
|
||||
Labels: labelMap,
|
||||
}
|
||||
|
||||
returnList = append(returnList, toAdd)
|
||||
}
|
||||
|
||||
return returnList
|
||||
}
|
||||
|
||||
func convertMetricLineToMetric(input string) (string, string, map[string]string, error) {
|
||||
// Lines are in the form "<key>{<labels>}<value>" or "<key> <value>"
|
||||
// Get the key and value while skipping the label
|
||||
var key, value string
|
||||
labelMap := make(map[string]string)
|
||||
if strings.Contains(input, "{") {
|
||||
// Get key
|
||||
splitted := strings.Split(input, "{")
|
||||
if len(splitted) != 2 {
|
||||
return "", "", labelMap, fmt.Errorf("Could not split by { Expected 2 but got %d - %s", len(splitted), input)
|
||||
}
|
||||
key = strings.TrimSpace(splitted[0])
|
||||
|
||||
// Get value
|
||||
splitted = strings.Split(splitted[1], "}")
|
||||
if len(splitted) != 2 {
|
||||
return "", "", labelMap, fmt.Errorf("Could not split by } Expected 2 but got %d - %s", len(splitted), input)
|
||||
}
|
||||
value = strings.TrimSpace(splitted[1])
|
||||
|
||||
// Get labels
|
||||
allLabels := strings.Split(splitted[0], ",")
|
||||
for _, e := range allLabels {
|
||||
labelPair := strings.Split(e, "=")
|
||||
if len(labelPair) != 2 {
|
||||
return "", "", labelMap, fmt.Errorf("Could not split label by '=' Expected 2 but got %d - %s", len(labelPair), e)
|
||||
}
|
||||
lkey := strings.TrimSpace(labelPair[0])
|
||||
lvalue := strings.TrimSpace(labelPair[1])
|
||||
lvalue = strings.Trim(lvalue, "\"")
|
||||
labelMap[lkey] = lvalue
|
||||
}
|
||||
|
||||
} else {
|
||||
splitted := strings.Split(input, " ")
|
||||
if len(splitted) != 2 {
|
||||
return "", "", labelMap, fmt.Errorf("Could not split by ' ' Expected 2 but got %d - %s", len(splitted), input)
|
||||
}
|
||||
key = strings.TrimSpace(splitted[0])
|
||||
value = strings.TrimSpace(splitted[1])
|
||||
}
|
||||
return key, value, labelMap, nil
|
||||
}
|
||||
|
||||
func waitForMetricReady(t *testing.T, port string) {
|
||||
timeout := 12 // 12 * 5 = 1 minute
|
||||
for i := 0; i < timeout; i++ {
|
||||
urlToUse := fmt.Sprintf("http://localhost:%s", port)
|
||||
resp, err := http.Get(urlToUse)
|
||||
if err == nil {
|
||||
resp.Body.Close()
|
||||
return
|
||||
}
|
||||
|
||||
time.Sleep(time.Second * 10)
|
||||
}
|
||||
t.Fatalf("Metric endpoint failed to startup in timely manner")
|
||||
}
|
||||
|
||||
func metricsContainerConfig() *ce.ContainerConfig {
|
||||
return &ce.ContainerConfig{
|
||||
Env: []string{
|
||||
"LICENSE=accept",
|
||||
"MQ_QMGR_NAME=" + defaultMetricQMName,
|
||||
"MQ_ENABLE_METRICS=true",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func metricNames() []string {
|
||||
|
||||
// NB: There are currently a total of 93 metrics, but the following 3 do not generate values (based on the queue manager configuration)
|
||||
// - log_occupied_by_reusable_extents_bytes
|
||||
// - log_occupied_by_extents_waiting_to_be_archived_bytes
|
||||
// - log_required_for_media_recovery_bytes
|
||||
|
||||
names := []string{
|
||||
"cpu_load_one_minute_average_percentage",
|
||||
"cpu_load_five_minute_average_percentage",
|
||||
"cpu_load_fifteen_minute_average_percentage",
|
||||
"system_cpu_time_percentage",
|
||||
"user_cpu_time_percentage",
|
||||
"ram_free_percentage",
|
||||
// disabled : "system_ram_size_bytes",
|
||||
"system_cpu_time_estimate_for_queue_manager_percentage",
|
||||
"user_cpu_time_estimate_for_queue_manager_percentage",
|
||||
"ram_usage_estimate_for_queue_manager_bytes",
|
||||
"trace_file_system_free_space_percentage",
|
||||
"trace_file_system_in_use_bytes",
|
||||
"errors_file_system_free_space_percentage",
|
||||
"errors_file_system_in_use_bytes",
|
||||
"fdc_files",
|
||||
"queue_manager_file_system_free_space_percentage",
|
||||
"queue_manager_file_system_in_use_bytes",
|
||||
"log_logical_written_bytes_total",
|
||||
"log_physical_written_bytes_total",
|
||||
"log_primary_space_in_use_percentage",
|
||||
"log_workload_primary_space_utilization_percentage",
|
||||
"log_write_latency_seconds",
|
||||
"log_max_bytes",
|
||||
"log_write_size_bytes",
|
||||
"log_in_use_bytes",
|
||||
"log_file_system_max_bytes",
|
||||
"log_file_system_in_use_bytes",
|
||||
"durable_subscription_create_total",
|
||||
"durable_subscription_alter_total",
|
||||
"durable_subscription_resume_total",
|
||||
"durable_subscription_delete_total",
|
||||
"non_durable_subscription_create_total",
|
||||
"non_durable_subscription_delete_total",
|
||||
"failed_subscription_create_alter_resume_total",
|
||||
"failed_subscription_delete_total",
|
||||
"mqsubrq_total",
|
||||
"failed_mqsubrq_total",
|
||||
// disabled : "durable_subscriber_high_water_mark",
|
||||
// disabled : "durable_subscriber_low_water_mark",
|
||||
// disabled : "non_durable_subscriber_high_water_mark",
|
||||
// disabled : "non_durable_subscriber_low_water_mark",
|
||||
"topic_mqput_mqput1_total",
|
||||
"topic_put_bytes_total",
|
||||
"failed_topic_mqput_mqput1_total",
|
||||
"persistent_topic_mqput_mqput1_total",
|
||||
"non_persistent_topic_mqput_mqput1_total",
|
||||
"published_to_subscribers_message_total",
|
||||
"published_to_subscribers_bytes_total",
|
||||
"mqconn_mqconnx_total",
|
||||
"failed_mqconn_mqconnx_total",
|
||||
"mqdisc_total",
|
||||
// disabled : "concurrent_connections_high_water_mark",
|
||||
"mqopen_total",
|
||||
"failed_mqopen_total",
|
||||
"mqclose_total",
|
||||
"failed_mqclose_total",
|
||||
"mqinq_total",
|
||||
"failed_mqinq_total",
|
||||
"mqset_total",
|
||||
"failed_mqset_total",
|
||||
"persistent_message_mqput_total",
|
||||
"persistent_message_mqput1_total",
|
||||
"persistent_message_put_bytes_total",
|
||||
"non_persistent_message_mqput_total",
|
||||
"non_persistent_message_mqput1_total",
|
||||
"non_persistent_message_put_bytes_total",
|
||||
"mqput_mqput1_total",
|
||||
"mqput_mqput1_bytes_total",
|
||||
"failed_mqput_total",
|
||||
"failed_mqput1_total",
|
||||
"mqstat_total",
|
||||
"persistent_message_destructive_get_total",
|
||||
"persistent_message_browse_total",
|
||||
"persistent_message_get_bytes_total",
|
||||
"persistent_message_browse_bytes_total",
|
||||
"non_persistent_message_destructive_get_total",
|
||||
"non_persistent_message_browse_total",
|
||||
"non_persistent_message_get_bytes_total",
|
||||
"non_persistent_message_browse_bytes_total",
|
||||
"destructive_get_total",
|
||||
"destructive_get_bytes_total",
|
||||
"failed_mqget_total",
|
||||
"failed_browse_total",
|
||||
"mqctl_total",
|
||||
"expired_message_total",
|
||||
"purged_queue_total",
|
||||
"mqcb_total",
|
||||
"failed_mqcb_total",
|
||||
"commit_total",
|
||||
"rollback_total",
|
||||
}
|
||||
return names
|
||||
}
|
||||
39
test/container/pathutils/path_clean.go
Normal file
39
test/container/pathutils/path_clean.go
Normal file
@@ -0,0 +1,39 @@
|
||||
/*
|
||||
© Copyright IBM Corporation 2024
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package pathutils contains code to provide sanitised file paths
|
||||
package pathutils
|
||||
|
||||
import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// CleanPath returns the result of joining a series of sanitised file paths (preventing directory traversal for each path)
|
||||
// If the first path is relative, a relative path is returned
|
||||
func CleanPath(paths ...string) string {
|
||||
if len(paths) == 0 {
|
||||
return ""
|
||||
}
|
||||
var combined string
|
||||
if !path.IsAbs(paths[0]) {
|
||||
combined = "./"
|
||||
}
|
||||
for _, part := range paths {
|
||||
combined = filepath.Join(combined, filepath.FromSlash(path.Clean("/"+part)))
|
||||
}
|
||||
return combined
|
||||
}
|
||||
Reference in New Issue
Block a user