Merge pull request #9 from arthurbarr/master
Add probe values and tidy up README
This commit is contained in:
@@ -17,3 +17,4 @@ description: IBM MQ queue manager
|
||||
name: ibm-mqadvanced-server-dev
|
||||
version: 1.0.0-beta
|
||||
icon: https://developer.ibm.com/messaging/wp-content/uploads/sites/18/2017/07/IBM-MQ-Square-200.png
|
||||
tillerVersion: ">=2.4.0"
|
||||
@@ -10,14 +10,14 @@ This chart deploys a single IBM MQ Advanced for Developers server (queue manager
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Kubernetes 1.5 or greater, with beta APIs enabled
|
||||
- Kubernetes 1.7 or greater, with beta APIs enabled
|
||||
- If persistence is enabled (see [configuration](#configuration)), then you either need to create a PersistentVolume, or specify a Storage Class if classes are defined in your cluster.
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
To install the chart with the release name `foo`:
|
||||
|
||||
```bash
|
||||
```sh
|
||||
helm install --name foo stable/ibm-mqadvanced-server-dev --set license=accept
|
||||
```
|
||||
|
||||
@@ -29,13 +29,13 @@ This command accepts the [IBM MQ Advanced for Developers license](LICENSE) and d
|
||||
|
||||
To uninstall/delete the `foo` release:
|
||||
|
||||
```bash
|
||||
```sh
|
||||
helm delete foo
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart, except any Persistent Volume Claims (PVCs). This is the default behavior of Kubernetes, and ensures that valuable data is not deleted. In order to delete the Queue Manager's data, you can delete the PVC using the following command:
|
||||
|
||||
```bash
|
||||
```sh
|
||||
kubectl delete pvc -l release=foo
|
||||
```
|
||||
|
||||
@@ -49,9 +49,11 @@ The following table lists the configurable parameters of the `ibm-mqadvanced-ser
|
||||
| `image.tag` | Image tag | `9` |
|
||||
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `image.pullSecret` | Image pull secret, if you are using a private Docker registry | `nil` |
|
||||
| `persistence.enabled` | Use a PersistentVolume to persist MQ data (under `/var/mqm`) | `true` |
|
||||
| `persistence.storageClass` | Storage class of backing Persistent Volume | `nil` |
|
||||
| `persistence.size` | Size of data volume | `2Gi` |
|
||||
| `persistence.enabled` | Use persistent volumes for all defined volumes | `true` |
|
||||
| `persistence.useDynamicProvisioning` | Use dynamic provisioning (storage classes) for all volumes | `true` |
|
||||
| `dataPVC.name` | Suffix for the PVC name | `"data"` |
|
||||
| `dataPVC.storageClassName` | Storage class of volume for main MQ data (under `/var/mqm`) | `""` |
|
||||
| `dataPVC.size` | Size of volume for main MQ data (under `/var/mqm`) | `2Gi` |
|
||||
| `service.name` | Name of the Kubernetes service to create | `qmgr` |
|
||||
| `service.type` | Kubernetes service type exposing ports, e.g. `NodePort` | `ClusterIP` |
|
||||
| `resources.limits.cpu` | Kubernetes CPU limit for the Queue Manager container | `500m` |
|
||||
|
||||
@@ -24,6 +24,8 @@ metadata:
|
||||
spec:
|
||||
serviceName: {{ .Values.service.name }}
|
||||
replicas: 1
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
|
||||
@@ -17,3 +17,4 @@ description: IBM MQ queue manager
|
||||
name: ibm-mqadvanced-server-prod
|
||||
version: 1.0.0-beta
|
||||
icon: https://developer.ibm.com/messaging/wp-content/uploads/sites/18/2017/07/IBM-MQ-Square-200.png
|
||||
tillerVersion: ">=2.4.0"
|
||||
@@ -10,14 +10,14 @@ This chart deploys a single IBM MQ Advanced server (queue manager) into an IBM C
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Kubernetes 1.5 or greater, with beta APIs enabled
|
||||
- Kubernetes 1.7 or greater, with beta APIs enabled
|
||||
- If persistence is enabled (see [configuration](#configuration)), then you either need to create a PersistentVolume, or specify a Storage Class if classes are defined in your cluster.
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
To install the chart with the release name `foo`:
|
||||
|
||||
```bash
|
||||
```sh
|
||||
helm install --name foo stable/ibm-mqadvanced-server-prod --set license=accept
|
||||
```
|
||||
|
||||
@@ -29,38 +29,47 @@ This command accepts the [IBM MQ Advanced license](LICENSE) and deploys an MQ Ad
|
||||
|
||||
To uninstall/delete the `foo` release:
|
||||
|
||||
```bash
|
||||
```sh
|
||||
helm delete foo
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart, except any Persistent Volume Claims (PVCs). This is the default behavior of Kubernetes, and ensures that valuable data is not deleted. In order to delete the Queue Manager's data, you can delete the PVC using the following command:
|
||||
|
||||
```bash
|
||||
```sh
|
||||
kubectl delete pvc -l release=foo
|
||||
```
|
||||
|
||||
## Configuration
|
||||
The following table lists the configurable parameters of the `ibm-mqadvanced-server-prod` chart and their default values.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
| ------------------------------- | ----------------------------------------------- | ---------------------------------------------------------- |
|
||||
| `license` | Set to `accept` to accept the terms of the IBM license | `not accepted` |
|
||||
| `image.repository` | Image full name including repository | `nil` |
|
||||
| `image.tag` | Image tag | `nil` |
|
||||
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `image.pullSecret` | Image pull secret, if you are using a private Docker registry | `nil` |
|
||||
| `persistence.enabled` | Use a PersistentVolume to persist MQ data (under `/var/mqm`) | `true` |
|
||||
| `persistence.storageClass` | Storage class of backing Persistent Volume | `nil` |
|
||||
| `persistence.size` | Size of data volume | `2Gi` |
|
||||
| `service.name` | Name of the Kubernetes service to create | `qmgr` |
|
||||
| `service.type` | Kubernetes service type exposing ports, e.g. `NodePort` | `ClusterIP` |
|
||||
| `resources.limits.cpu` | Kubernetes CPU limit for the Queue Manager container | `1` |
|
||||
| `resources.limits.memory` | Kubernetes memory limit for the Queue Manager container | `1Gi` |
|
||||
| `resources.requests.cpu` | Kubernetes CPU request for the Queue Manager container | `1` |
|
||||
| `resources.requests.memory` | Kubernetes memory request for the Queue Manager container | `1Gi` |
|
||||
| `queueManager.name` | MQ Queue Manager name | Helm release name |
|
||||
| Parameter | Description | Default |
|
||||
| ------------------------------- | --------------------------------------------------------------- | ------------------------------------------ |
|
||||
| `license` | Set to `accept` to accept the terms of the IBM license | `"not accepted"` |
|
||||
| `image.repository` | Image full name including repository | `nil` |
|
||||
| `image.tag` | Image tag | `nil` |
|
||||
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `image.pullSecret` | Image pull secret, if you are using a private Docker registry | `nil` |
|
||||
| `persistence.enabled` | Use persistent volumes for all defined volumes | `true` |
|
||||
| `persistence.useDynamicProvisioning` | Use dynamic provisioning (storage classes) for all volumes | `true` |
|
||||
| `dataPVC.name` | Suffix for the PVC name | `"data"` |
|
||||
| `dataPVC.storageClassName` | Storage class of volume for main MQ data (under `/var/mqm`) | `""` |
|
||||
| `dataPVC.size` | Size of volume for main MQ data (under `/var/mqm`) | `2Gi` |
|
||||
| `service.name` | Name of the Kubernetes service to create | `"qmgr"` |
|
||||
| `service.type` | Kubernetes service type exposing ports, e.g. `NodePort` | `ClusterIP` |
|
||||
| `resources.limits.cpu` | Kubernetes CPU limit for the Queue Manager container | `1` |
|
||||
| `resources.limits.memory` | Kubernetes memory limit for the Queue Manager container | `1Gi` |
|
||||
| `resources.requests.cpu` | Kubernetes CPU request for the Queue Manager container | `1` |
|
||||
| `resources.requests.memory` | Kubernetes memory request for the Queue Manager container | `1Gi` |
|
||||
| `queueManager.name` | MQ Queue Manager name | Helm release name |
|
||||
| `nameOverride` | Set to partially override the resource names used in this chart | `nil` |
|
||||
| `livenessDelay` | Raises the time out before Kubernetes checks for Queue Manager's health. Useful for slower systems that take longer to start the Queue Manager. | 60 |
|
||||
| `livenessProbe.initialDelaySeconds` | The initial delay before starting the liveness probe. Useful for slower systems that take longer to start the Queue Manager. | 60 |
|
||||
| `livenessProbe.periodSeconds` | How often to run the probe | 10 |
|
||||
| `livenessProbe.timeoutSeconds` | Number of seconds after which the probe times out | 5 |
|
||||
| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | 1 |
|
||||
| `readinessProbe.initialDelaySeconds` | The initial delay before starting the readiness probe | 10 |
|
||||
| `readinessProbe.periodSeconds` | How often to run the probe | 5 |
|
||||
| `readinessProbe.timeoutSeconds` | Number of seconds after which the probe times out | 3 |
|
||||
| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded | 1 |
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`.
|
||||
|
||||
@@ -77,9 +86,10 @@ The chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/pers
|
||||
You have two major options for configuring the MQ queue manager itself:
|
||||
|
||||
1. Use existing tools, such as `runmqsc`, MQ Explorer or the MQ Command Server to configure your queue manager directly.
|
||||
2. Create a new image with your configuration baked-in
|
||||
2. Create a new image layer with your configuration baked-in
|
||||
|
||||
If you decide to opt for option 1 you will need to create any administrative entry point to your Queue Manager. This can be completed by either manually running kubectl commands to execute `runmqsc` and configure your entry point or creating a new image which automatically does this. At a minimum you will need to:
|
||||
## Configuring MQ using existing tools
|
||||
You will need to create any administrative entry point to your Queue Manager. This can be completed by either manually running kubectl commands to execute `runmqsc` and configure your entry point or creating a new image which automatically does this. At a minimum you will need to:
|
||||
|
||||
* Create a user with MQ administrative permissions (is a member of the `mqm` group) which you can use to log into your Queue Manager.
|
||||
* Enable `ADOPTCTX` so we use the user for authorization as well as authentication when connecting a MQ Application.
|
||||
@@ -87,15 +97,15 @@ If you decide to opt for option 1 you will need to create any administrative ent
|
||||
* Create a channel to use as our entrypoint.
|
||||
* Create a channel authentication rule to allow access for administrative users to connect through the channel.
|
||||
|
||||
For the above minimum you should execute the following commands through a shell prompt on the pod, if you choose to do this then you should replace `mquser` with your own username:
|
||||
For the above minimum you, should execute the following commands through a shell prompt on the pod. If you choose to do this then you should replace `mquser` with your own username:
|
||||
|
||||
```
|
||||
```sh
|
||||
useradd --gid mqm mquser
|
||||
passwd mquser
|
||||
runmqsc <QM Name>
|
||||
```
|
||||
|
||||
Then in the runmqsc program i would execute the following MQSC commands:
|
||||
Then in the `runmqsc` program, you could execute the following MQSC commands:
|
||||
|
||||
```
|
||||
DEFINE CHANNEL('EXAMPLE.ENTRYPOINT') CHLTYPE(SVRCONN)
|
||||
@@ -104,25 +114,25 @@ REFRESH SECURITY(*) TYPE(CONNAUTH)
|
||||
SET CHLAUTH('EXAMPLE.ENTRYPOINT') TYPE(BLOCKUSER) USERLIST('nobody')
|
||||
```
|
||||
|
||||
At this point you can now connect a MQ Explorer or other remote MQ administrative client using the channel `EXAMPLE.ENTRYPOINT` and user `mquser`.
|
||||
At this point you could now connect a MQ Explorer or other remote MQ administrative client using the channel `EXAMPLE.ENTRYPOINT` and user `mquser`.
|
||||
|
||||
> **Tip**: If you are using a client that has a compatibility mode option for user authentication to connect to your IBM MQ Queue Manager. Make sure you have compatibility mode turned off.
|
||||
|
||||
## Configuring MQ objects with a new image
|
||||
## Configuring MQ using a new image layer
|
||||
You can create a new container image layer, on top of the IBM MQ Advanced base image. You can add MQSC files to define MQ objects such as queues and topics, and place these files into `/etc/mqm` in your image. When the MQ pod starts, it will run any MQSC files found in this directory (in sorted order).
|
||||
|
||||
## Example Dockerfile and MQSC script for creating a new image
|
||||
### Example Dockerfile and MQSC script for creating a new image
|
||||
In this example you will create a Dockerfile that creates two users:
|
||||
* `admin` - Administrator users which is a member of the `mqm` group
|
||||
* `admin` - Administrator user which is a member of the `mqm` group
|
||||
* `app` - Client application user which is a member of the `mqclient` group. (You will also create this group)
|
||||
|
||||
You will also create a MQSC Script file called `config.mqsc` that will be copied to `/etc/mqm` on my pod so it is ran automatically at startup. This script will do multiple things including:
|
||||
* Creating default Local Queues for my applications
|
||||
* Creating channels for my Admin and App users
|
||||
* Configuring security to allow use of the channels by remote applications
|
||||
* Creating authority records to allow members of the `mqclient` group to access the Queue Manager and the default Local Queues.
|
||||
You will also create a MQSC Script file called `config.mqsc` that will be run automatically when your container starts. This script will do the following:
|
||||
* Create default local queues for my applications
|
||||
* Create channels for use by the `admin` and `app` users
|
||||
* Configure security to allow use of the channels by remote applications
|
||||
* Create authority records to allow members of the `mqclient` group to access the Queue Manager and the default local queues.
|
||||
|
||||
First create a file called `config.MQSC`. This the MQSC file that will be ran at startup. It should contain the following:
|
||||
First create a file called `config.mqsc`. This the MQSC file that will be run when an MQ container starts. It should contain the following:
|
||||
|
||||
```
|
||||
* Create Local Queues that my application(s) can use.
|
||||
@@ -152,11 +162,10 @@ SET AUTHREC OBJTYPE(QMGR) GROUP('mqclient') AUTHADD(CONNECT,INQ)
|
||||
SET AUTHREC PROFILE('EXAMPLE.**') OBJTYPE(QUEUE) GROUP('mqclient') AUTHADD(INQ,PUT,GET,BROWSE)
|
||||
```
|
||||
|
||||
Next create a `Dockerfile` that expands on the MQ Advanced server image to create the users and groups. It should contain the following, replacing <IMAGE NAME> with the ibm-mq-prod image you want to base this new image off:
|
||||
Next create a `Dockerfile` that expands on the MQ Advanced Server image to create the users and groups. It should contain the following, replacing `<IMAGE NAME>` with the MQ image you want to use as a base:
|
||||
|
||||
```
|
||||
```dockerfile
|
||||
FROM <IMAGE NAME>
|
||||
|
||||
# Add the admin user as a member of the mqm group and set their password
|
||||
RUN useradd admin -G mqm \
|
||||
&& echo admin:passw0rd | chpasswd \
|
||||
@@ -165,12 +174,11 @@ RUN useradd admin -G mqm \
|
||||
# Create the app user as a member of the mqclient group and set their password
|
||||
&& useradd app -G mqclient \
|
||||
&& echo app:passw0rd | chpasswd
|
||||
|
||||
# Copy the configuration script to /etc/mqm where it will be picked up automatically
|
||||
COPY config.mqsc /etc/mqm/
|
||||
```
|
||||
|
||||
Finally build and push the image to your registry.
|
||||
Finally, build and push the image to your registry.
|
||||
|
||||
You can then use the new image when you deploy MQ into your cluster. You will find that once you have run the image you will be able to see your new default objects and users.
|
||||
|
||||
|
||||
@@ -24,6 +24,8 @@ metadata:
|
||||
spec:
|
||||
serviceName: {{ .Values.service.name }}
|
||||
replicas: 1
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
@@ -58,19 +60,19 @@ spec:
|
||||
exec:
|
||||
command:
|
||||
- chkmqhealthy
|
||||
initialDelaySeconds: {{ .Values.livenessDelay }}
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 1
|
||||
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
|
||||
periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
|
||||
timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
|
||||
failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
|
||||
# Set readiness probe to determine if the MQ listener is running
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- chkmqready
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 3
|
||||
failureThreshold: 1
|
||||
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
|
||||
periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
|
||||
timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
|
||||
failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
|
||||
resources:
|
||||
limits:
|
||||
{{ toYaml .Values.resources.limits | indent 14 }}
|
||||
|
||||
@@ -61,5 +61,17 @@ queueManager:
|
||||
# nameOverride can be set to partially override the name of the resources created by this chart
|
||||
nameOverride:
|
||||
|
||||
# livenessDelay should be raised if your system cannot start the Queue Manager in 60 seconds
|
||||
livenessDelay: 60
|
||||
# livenessProbe section specifies setting for the MQ liveness probe, which checks for a running Queue Manager
|
||||
livenessProbe:
|
||||
# initialDelaySeconds should be raised if your system cannot start the Queue Manager in 60 seconds
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 1
|
||||
|
||||
# readinessProbe section specifies setting for the MQ readiness probe, which checks when the MQ listener is running
|
||||
readinessProbe:
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 5
|
||||
timeoutSeconds: 3
|
||||
failureThreshold: 1
|
||||
Reference in New Issue
Block a user