Merge remote-tracking branch 'upstream/master'

This commit is contained in:
Arthur Barr
2018-06-26 10:26:40 +01:00
13 changed files with 519 additions and 254 deletions

View File

@@ -26,6 +26,7 @@ Note that in order to use the image, it is necessary to accept the terms of the
- **LANG** - Set this to the language you would like the license to be printed in. - **LANG** - Set this to the language you would like the license to be printed in.
- **MQ_QMGR_NAME** - Set this to the name you want your Queue Manager to be created with. - **MQ_QMGR_NAME** - Set this to the name you want your Queue Manager to be created with.
- **LOG_FORMAT** - Set this to change the format of the logs which are printed on the container's stdout. Set to "json" to use JSON format (JSON object per line); set to "basic" to use a simple human-readable format. Defaults to "basic". - **LOG_FORMAT** - Set this to change the format of the logs which are printed on the container's stdout. Set to "json" to use JSON format (JSON object per line); set to "basic" to use a simple human-readable format. Defaults to "basic".
- **MQ_ENABLE_METRICS** - Set this to `true` to generate Prometheus metrics for your Queue Manager.
See the [default developer configuration docs](docs/developer-config.md) for the extra environment variables supported by the MQ Advanced for Developers image. See the [default developer configuration docs](docs/developer-config.md) for the extra environment variables supported by the MQ Advanced for Developers image.

View File

@@ -33,6 +33,72 @@ var fsTypes = map[int64]string{
0x9123683e: "btrfs", 0x9123683e: "btrfs",
0x01021994: "tmpfs", 0x01021994: "tmpfs",
0x794c7630: "overlayfs", 0x794c7630: "overlayfs",
0x58465342: "xfs",
// less popular codes
0xadf5: "adfs",
0xadff: "affs",
0x5346414F: "afs",
0x0187: "autofs",
0x73757245: "coda",
0x28cd3d45: "cramfs",
0x453dcd28: "cramfs",
0x64626720: "debugfs",
0x73636673: "securityfs",
0xf97cff8c: "selinux",
0x43415d53: "smack",
0x858458f6: "ramfs",
0x958458f6: "hugetlbfs",
0x73717368: "squashfs",
0xf15f: "ecryptfs",
0x414A53: "efs",
0xabba1974: "xenfs",
0x3434: "nilfs",
0xF2F52010: "f2fs",
0xf995e849: "hpfs",
0x9660: "isofs",
0x72b6: "jffs2",
0x6165676C: "pstorefs",
0xde5e81e4: "efivarfs",
0x00c0ffee: "hostfs",
0x137F: "minix_14", // minix v1 fs, 14 char names
0x138F: "minix_30", // minix v1 fs, 30 char names
0x2468: "minix2_14", // minix v2 fs, 14 char names
0x2478: "minix2_30", // minix v2 fs, 30 char names
0x4d5a: "minix3_60", // minix v3 fs, 60 char names
0x4d44: "msdos",
0x564c: "ncp",
0x7461636f: "ocfs2",
0x9fa1: "openprom",
0x002f: "qnx4",
0x68191122: "qnx6",
0x6B414653: "afs_fs",
0x52654973: "reiserfs",
0x517B: "smb",
0x27e0eb: "cgroup",
0x63677270: "cgroup2",
0x7655821: "rdtgroup",
0x57AC6E9D: "stack_end",
0x74726163: "tracefs",
0x01021997: "v9fs",
0x62646576: "bdevfs",
0x64646178: "daxfs",
0x42494e4d: "binfmtfs",
0x1cd1: "devpts",
0xBAD1DEA: "futexfs",
0x50495045: "pipefs",
0x9fa0: "proc",
0x534F434B: "sockfs",
0x62656572: "sysfs",
0x9fa2: "usbdevice",
0x11307854: "mtd_inode",
0x09041934: "anon_inode",
0x73727279: "btrfs",
0x6e736673: "nsfs",
0xcafe4a11: "bpf",
0x5a3c69f0: "aafs",
0x15013346: "udf",
0x13661366: "balloon_kvm",
0x58295829: "zsmalloc",
} }
func checkFS(path string) error { func checkFS(path string) error {
@@ -43,7 +109,11 @@ func checkFS(path string) error {
return nil return nil
} }
// Use a type conversion to make type an int64. On s390x it's a uint32. // Use a type conversion to make type an int64. On s390x it's a uint32.
t := fsTypes[int64(statfs.Type)] t, ok := fsTypes[int64(statfs.Type)]
if !ok {
log.Printf("WARNING: detected %v has unknown filesystem type %x", path, statfs.Type)
return nil
}
switch t { switch t {
case "aufs", "overlayfs", "tmpfs": case "aufs", "overlayfs", "tmpfs":
return fmt.Errorf("%v uses unsupported filesystem type: %v", path, t) return fmt.Errorf("%v uses unsupported filesystem type: %v", path, t)

View File

@@ -24,6 +24,7 @@ The `runmqserver` command has the following responsibilities:
- Works as PID 1, so is responsible for [reaping zombie processes](https://blog.phusion.nl/2015/01/20/docker-and-the-pid-1-zombie-reaping-problem/) - Works as PID 1, so is responsible for [reaping zombie processes](https://blog.phusion.nl/2015/01/20/docker-and-the-pid-1-zombie-reaping-problem/)
* Creating and starting a queue manager * Creating and starting a queue manager
* Configuring the queue manager, by running any MQSC scripts found under `/etc/mqm` * Configuring the queue manager, by running any MQSC scripts found under `/etc/mqm`
* Starting Prometheus metrics generation for the queue manager (if enabled)
* Indicates to the `chkmqready` command that configuration is complete, and that normal readiness checking can happen. This is done by writing a file into `/run/runmqserver` * Indicates to the `chkmqready` command that configuration is complete, and that normal readiness checking can happen. This is done by writing a file into `/run/runmqserver`
In addition, for MQ Advanced for Developers only, the web server is started. In addition, for MQ Advanced for Developers only, the web server is started.
@@ -35,4 +36,17 @@ The `runmqdevserver` command is added to the MQ Advanced for Developers image on
2. Generates MQSC files to put in `/etc/mqm`, based on a template, which is updated with values based on supplied environment variables. 2. Generates MQSC files to put in `/etc/mqm`, based on a template, which is updated with values based on supplied environment variables.
3. If requested, it creates TLS key stores under `/run/runmqdevserver`, and configures MQ and the web server to use them 3. If requested, it creates TLS key stores under `/run/runmqdevserver`, and configures MQ and the web server to use them
A special version of `runmqserver` is used in the developer image, which performs extra actions like starting the web server. This is built using the `mqdev` [build constraint](https://golang.org/pkg/go/build/#hdr-Build_Constraints). A special version of `runmqserver` is used in the developer image, which performs extra actions like starting the web server. This is built using the `mqdev` [build constraint](https://golang.org/pkg/go/build/#hdr-Build_Constraints).
## Prometheus metrics
[Prometheus](https://prometheus.io) metrics are generated for the queue manager as follows:
1. A connection is established with the queue manager
2. Metrics are discovered by subscribing to topics that provide meta-data on metric classes, types and elements
3. Subscriptions are then created for each topic that provides this metric data
4. Metrics are initialised using Prometheus names mapped from their element descriptions
5. The metrics are then registered with the Prometheus registry as Prometheus Gauges
6. Publications are processed on a periodic basis to retrieve the metric data
7. An HTTP server is setup to listen for requests from Prometheus on `/metrics` port `9157`
8. Prometheus requests are handled by updating the Prometheus Gauges with the latest metric data
9. These updated Prometheus Gauges are then collected by the Prometheus registry

View File

@@ -37,6 +37,21 @@ docker run \
The Docker image always uses `/mnt/mqm` for MQ data, which is correctly linked for you under `/var/mqm` at runtime. This is to handle problems with file permissions on some platforms. The Docker image always uses `/mnt/mqm` for MQ data, which is correctly linked for you under `/var/mqm` at runtime. This is to handle problems with file permissions on some platforms.
## Running with the default configuration and Prometheus metrics enabled
You can run a queue manager with [Prometheus](https://prometheus.io) metrics enabled. The following command will generate Prometheus metrics for your queue manager on `/metrics` port `9157`:
```
docker run \
--env LICENSE=accept \
--env MQ_QMGR_NAME=QM1 \
--env MQ_ENABLE_METRICS=true \
--publish 1414:1414 \
--publish 9443:9443 \
--publish 9157:9157 \
--detach \
ibmcom/mq
```
## Customizing the queue manager configuration ## Customizing the queue manager configuration
You can customize the configuration in several ways: You can customize the configuration in several ways:

View File

@@ -139,7 +139,7 @@ rm -rf ${DIR_EXTRACT}
# Apply any bug fixes not included in base Ubuntu or MQ image. # Apply any bug fixes not included in base Ubuntu or MQ image.
# Don't upgrade everything based on Docker best practices https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/#run # Don't upgrade everything based on Docker best practices https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/#run
$UBUNTU && apt-get upgrade -y gpgv gnupg $UBUNTU && apt-get install -y gnupg gpgv libgcrypt20 perl-base --only-upgrade
# End of bug fixes # End of bug fixes
# Clean up cached files # Clean up cached files

View File

@@ -33,6 +33,7 @@ const (
type exporter struct { type exporter struct {
qmName string qmName string
gaugeMap map[string]*prometheus.GaugeVec gaugeMap map[string]*prometheus.GaugeVec
counterMap map[string]*prometheus.CounterVec
firstCollect bool firstCollect bool
log *logger.Logger log *logger.Logger
} }
@@ -41,6 +42,7 @@ func newExporter(qmName string, log *logger.Logger) *exporter {
return &exporter{ return &exporter{
qmName: qmName, qmName: qmName,
gaugeMap: make(map[string]*prometheus.GaugeVec), gaugeMap: make(map[string]*prometheus.GaugeVec),
counterMap: make(map[string]*prometheus.CounterVec),
firstCollect: true, firstCollect: true,
log: log, log: log,
} }
@@ -54,12 +56,22 @@ func (e *exporter) Describe(ch chan<- *prometheus.Desc) {
for key, metric := range response { for key, metric := range response {
// Allocate a Prometheus Gauge for each available metric if metric.isDelta {
gaugeVec := createGaugeVec(metric.name, metric.description, metric.objectType) // For delta type metrics - allocate a Prometheus Counter
e.gaugeMap[key] = gaugeVec counterVec := createCounterVec(metric.name, metric.description, metric.objectType)
e.counterMap[key] = counterVec
// Describe metric // Describe metric
gaugeVec.Describe(ch) counterVec.Describe(ch)
} else {
// For non-delta type metrics - allocate a Prometheus Gauge
gaugeVec := createGaugeVec(metric.name, metric.description, metric.objectType)
e.gaugeMap[key] = gaugeVec
// Describe metric
gaugeVec.Describe(ch)
}
} }
} }
@@ -71,32 +83,61 @@ func (e *exporter) Collect(ch chan<- prometheus.Metric) {
for key, metric := range response { for key, metric := range response {
// Reset Prometheus Gauge if metric.isDelta {
gaugeVec := e.gaugeMap[key] // For delta type metrics - update their Prometheus Counter
gaugeVec.Reset() counterVec := e.counterMap[key]
// Populate Prometheus Gauge with metric values // Populate Prometheus Counter with metric values
// - Skip on first collect to avoid build-up of accumulated values // - Skip on first collect to avoid build-up of accumulated values
if !e.firstCollect { if !e.firstCollect {
for label, value := range metric.values { for label, value := range metric.values {
var err error var err error
var gauge prometheus.Gauge var counter prometheus.Counter
if label == qmgrLabelValue { if label == qmgrLabelValue {
gauge, err = gaugeVec.GetMetricWithLabelValues(e.qmName) counter, err = counterVec.GetMetricWithLabelValues(e.qmName)
} else { } else {
gauge, err = gaugeVec.GetMetricWithLabelValues(label, e.qmName) counter, err = counterVec.GetMetricWithLabelValues(label, e.qmName)
} }
if err == nil { if err == nil {
gauge.Set(value) counter.Add(value)
} else { } else {
e.log.Errorf("Metrics Error: %s", err.Error()) e.log.Errorf("Metrics Error: %s", err.Error())
}
} }
} }
}
// Collect metric // Collect metric
gaugeVec.Collect(ch) counterVec.Collect(ch)
} else {
// For non-delta type metrics - reset their Prometheus Gauge
gaugeVec := e.gaugeMap[key]
gaugeVec.Reset()
// Populate Prometheus Gauge with metric values
// - Skip on first collect to avoid build-up of accumulated values
if !e.firstCollect {
for label, value := range metric.values {
var err error
var gauge prometheus.Gauge
if label == qmgrLabelValue {
gauge, err = gaugeVec.GetMetricWithLabelValues(e.qmName)
} else {
gauge, err = gaugeVec.GetMetricWithLabelValues(label, e.qmName)
}
if err == nil {
gauge.Set(value)
} else {
e.log.Errorf("Metrics Error: %s", err.Error())
}
}
}
// Collect metric
gaugeVec.Collect(ch)
}
} }
if e.firstCollect { if e.firstCollect {
@@ -104,16 +145,26 @@ func (e *exporter) Collect(ch chan<- prometheus.Metric) {
} }
} }
// createCounterVec returns a Prometheus CounterVec populated with metric details
func createCounterVec(name, description string, objectType bool) *prometheus.CounterVec {
prefix, labels := getVecDetails(objectType)
counterVec := prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
Name: prefix + "_" + name,
Help: description,
},
labels,
)
return counterVec
}
// createGaugeVec returns a Prometheus GaugeVec populated with metric details // createGaugeVec returns a Prometheus GaugeVec populated with metric details
func createGaugeVec(name, description string, objectType bool) *prometheus.GaugeVec { func createGaugeVec(name, description string, objectType bool) *prometheus.GaugeVec {
prefix := qmgrPrefix prefix, labels := getVecDetails(objectType)
labels := []string{qmgrLabel}
if objectType {
prefix = objectPrefix
labels = []string{objectLabel, qmgrLabel}
}
gaugeVec := prometheus.NewGaugeVec( gaugeVec := prometheus.NewGaugeVec(
prometheus.GaugeOpts{ prometheus.GaugeOpts{
@@ -125,3 +176,16 @@ func createGaugeVec(name, description string, objectType bool) *prometheus.Gauge
) )
return gaugeVec return gaugeVec
} }
// getVecDetails returns the required prefix and labels for a metric
func getVecDetails(objectType bool) (prefix string, labels []string) {
prefix = qmgrPrefix
labels = []string{qmgrLabel}
if objectType {
prefix = objectPrefix
labels = []string{objectLabel, qmgrLabel}
}
return prefix, labels
}

View File

@@ -19,11 +19,21 @@ import (
"testing" "testing"
"time" "time"
"github.com/ibm-messaging/mq-golang/ibmmq"
"github.com/ibm-messaging/mq-golang/mqmetric"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
) )
func TestDescribe(t *testing.T) { func TestDescribe_Counter(t *testing.T) {
testDescribe(t, true)
}
func TestDescribe_Gauge(t *testing.T) {
testDescribe(t, false)
}
func testDescribe(t *testing.T, isDelta bool) {
teardownTestCase := setupTestCase(false) teardownTestCase := setupTestCase(false)
defer teardownTestCase() defer teardownTestCase()
@@ -40,6 +50,9 @@ func TestDescribe(t *testing.T) {
t.Errorf("Received unexpected collect request") t.Errorf("Received unexpected collect request")
} }
if isDelta {
mqmetric.Metrics.Classes[0].Types[0].Elements[0].Datatype = ibmmq.MQIAMO_MONITOR_DELTA
}
metrics, _ := initialiseMetrics(log) metrics, _ := initialiseMetrics(log)
responseChannel <- metrics responseChannel <- metrics
@@ -55,14 +68,26 @@ func TestDescribe(t *testing.T) {
} }
} }
func TestCollect(t *testing.T) { func TestCollect_Counter(t *testing.T) {
testCollect(t, true)
}
func TestCollect_Gauge(t *testing.T) {
testCollect(t, false)
}
func testCollect(t *testing.T, isDelta bool) {
teardownTestCase := setupTestCase(false) teardownTestCase := setupTestCase(false)
defer teardownTestCase() defer teardownTestCase()
log := getTestLogger() log := getTestLogger()
exporter := newExporter("qmName", log) exporter := newExporter("qmName", log)
exporter.gaugeMap[testKey1] = createGaugeVec(testElement1Name, testElement1Description, false) if isDelta {
exporter.counterMap[testKey1] = createCounterVec(testElement1Name, testElement1Description, false)
} else {
exporter.gaugeMap[testKey1] = createGaugeVec(testElement1Name, testElement1Description, false)
}
for i := 1; i <= 3; i++ { for i := 1; i <= 3; i++ {
@@ -78,20 +103,33 @@ func TestCollect(t *testing.T) {
} }
populateTestMetrics(i, false) populateTestMetrics(i, false)
if isDelta {
mqmetric.Metrics.Classes[0].Types[0].Elements[0].Datatype = ibmmq.MQIAMO_MONITOR_DELTA
}
metrics, _ := initialiseMetrics(log) metrics, _ := initialiseMetrics(log)
updateMetrics(metrics) updateMetrics(metrics)
responseChannel <- metrics responseChannel <- metrics
select { select {
case <-ch: case <-ch:
var actual float64
prometheusMetric := dto.Metric{} prometheusMetric := dto.Metric{}
exporter.gaugeMap[testKey1].WithLabelValues("qmName").Write(&prometheusMetric) if isDelta {
actual := prometheusMetric.GetGauge().GetValue() exporter.counterMap[testKey1].WithLabelValues("qmName").Write(&prometheusMetric)
actual = prometheusMetric.GetCounter().GetValue()
} else {
exporter.gaugeMap[testKey1].WithLabelValues("qmName").Write(&prometheusMetric)
actual = prometheusMetric.GetGauge().GetValue()
}
if i == 1 { if i == 1 {
if actual != float64(0) { if actual != float64(0) {
t.Errorf("Expected values to be zero on first collect; actual %f", actual) t.Errorf("Expected values to be zero on first collect; actual %f", actual)
} }
} else if isDelta && i != 2 {
if actual != float64(i+(i-1)) {
t.Errorf("Expected value=%f; actual %f", float64(i+(i-1)), actual)
}
} else if actual != float64(i) { } else if actual != float64(i) {
t.Errorf("Expected value=%f; actual %f", float64(i), actual) t.Errorf("Expected value=%f; actual %f", float64(i), actual)
} }
@@ -101,6 +139,38 @@ func TestCollect(t *testing.T) {
} }
} }
func TestCreateCounterVec(t *testing.T) {
ch := make(chan *prometheus.Desc)
counterVec := createCounterVec("MetricName", "MetricDescription", false)
go func() {
counterVec.Describe(ch)
}()
description := <-ch
expected := "Desc{fqName: \"ibmmq_qmgr_MetricName\", help: \"MetricDescription\", constLabels: {}, variableLabels: [qmgr]}"
actual := description.String()
if actual != expected {
t.Errorf("Expected value=%s; actual %s", expected, actual)
}
}
func TestCreateCounterVec_ObjectLabel(t *testing.T) {
ch := make(chan *prometheus.Desc)
counterVec := createCounterVec("MetricName", "MetricDescription", true)
go func() {
counterVec.Describe(ch)
}()
description := <-ch
expected := "Desc{fqName: \"ibmmq_object_MetricName\", help: \"MetricDescription\", constLabels: {}, variableLabels: [object qmgr]}"
actual := description.String()
if actual != expected {
t.Errorf("Expected value=%s; actual %s", expected, actual)
}
}
func TestCreateGaugeVec(t *testing.T) { func TestCreateGaugeVec(t *testing.T) {
ch := make(chan *prometheus.Desc) ch := make(chan *prometheus.Desc)

View File

@@ -17,103 +17,108 @@ limitations under the License.
// Package metrics contains code to provide metrics for the queue manager // Package metrics contains code to provide metrics for the queue manager
package metrics package metrics
// generateMetricNamesMap generates metric names mapped from their description type metricLookup struct {
func generateMetricNamesMap() map[string]string { name string
enabled bool
}
metricNamesMap := map[string]string{ // generateMetricNamesMap generates metric names mapped from their description
"CPU/SystemSummary/CPU load - five minute average": "cpu_load_five_minute_average_percentage", func generateMetricNamesMap() map[string]metricLookup {
"CPU/SystemSummary/CPU load - fifteen minute average": "cpu_load_fifteen_minute_average_percentage",
"CPU/SystemSummary/RAM free percentage": "ram_free_percentage", metricNamesMap := map[string]metricLookup{
"CPU/SystemSummary/RAM total bytes": "ram_total_bytes", "CPU/SystemSummary/CPU load - one minute average": metricLookup{"cpu_load_one_minute_average_percentage", true},
"CPU/SystemSummary/User CPU time percentage": "user_cpu_time_percentage", "CPU/SystemSummary/CPU load - five minute average": metricLookup{"cpu_load_five_minute_average_percentage", true},
"CPU/SystemSummary/System CPU time percentage": "system_cpu_time_percentage", "CPU/SystemSummary/CPU load - fifteen minute average": metricLookup{"cpu_load_fifteen_minute_average_percentage", true},
"CPU/SystemSummary/CPU load - one minute average": "cpu_load_one_minute_average_percentage", "CPU/SystemSummary/System CPU time percentage": metricLookup{"system_cpu_time_percentage", true},
"CPU/QMgrSummary/System CPU time - percentage estimate for queue manager": "system_cpu_time_estimate_for_queue_manager_percentage", "CPU/SystemSummary/User CPU time percentage": metricLookup{"user_cpu_time_percentage", true},
"CPU/QMgrSummary/RAM total bytes - estimate for queue manager": "ram_total_estimate_for_queue_manager_bytes", "CPU/SystemSummary/RAM free percentage": metricLookup{"ram_free_percentage", true},
"CPU/QMgrSummary/User CPU time - percentage estimate for queue manager": "user_cpu_time_estimate_for_queue_manager_percentage", "CPU/SystemSummary/RAM total bytes": metricLookup{"system_ram_size_bytes", true},
"DISK/SystemSummary/MQ trace file system - bytes in use": "mq_trace_file_system_in_use_bytes", "CPU/QMgrSummary/System CPU time - percentage estimate for queue manager": metricLookup{"system_cpu_time_estimate_for_queue_manager_percentage", true},
"DISK/SystemSummary/MQ trace file system - free space": "mq_trace_file_system_free_space_percentage", "CPU/QMgrSummary/User CPU time - percentage estimate for queue manager": metricLookup{"user_cpu_time_estimate_for_queue_manager_percentage", true},
"DISK/SystemSummary/MQ errors file system - bytes in use": "mq_errors_file_system_in_use_bytes", "CPU/QMgrSummary/RAM total bytes - estimate for queue manager": metricLookup{"ram_usage_estimate_for_queue_manager_bytes", true},
"DISK/SystemSummary/MQ errors file system - free space": "mq_errors_file_system_free_space_percentage", "DISK/SystemSummary/MQ trace file system - free space": metricLookup{"trace_file_system_free_space_percentage", true},
"DISK/SystemSummary/MQ FDC file count": "mq_fdc_file_count", "DISK/SystemSummary/MQ trace file system - bytes in use": metricLookup{"trace_file_system_in_use_bytes", true},
"DISK/QMgrSummary/Queue Manager file system - bytes in use": "queue_manager_file_system_in_use_bytes", "DISK/SystemSummary/MQ errors file system - free space": metricLookup{"errors_file_system_free_space_percentage", true},
"DISK/QMgrSummary/Queue Manager file system - free space": "queue_manager_file_system_free_space_percentage", "DISK/SystemSummary/MQ errors file system - bytes in use": metricLookup{"errors_file_system_in_use_bytes", true},
"DISK/Log/Log - bytes occupied by reusable extents": "log_occupied_by_reusable_extents_bytes", "DISK/SystemSummary/MQ FDC file count": metricLookup{"fdc_files", true},
"DISK/Log/Log - write size": "log_write_size_bytes", "DISK/QMgrSummary/Queue Manager file system - free space": metricLookup{"queue_manager_file_system_free_space_percentage", true},
"DISK/Log/Log - bytes in use": "log_in_use_bytes", "DISK/QMgrSummary/Queue Manager file system - bytes in use": metricLookup{"queue_manager_file_system_in_use_bytes", true},
"DISK/Log/Log - logical bytes written": "log_logical_written_bytes", "DISK/Log/Log - logical bytes written": metricLookup{"log_logical_written_bytes_total", true},
"DISK/Log/Log - write latency": "log_write_latency_seconds", "DISK/Log/Log - physical bytes written": metricLookup{"log_physical_written_bytes_total", true},
"DISK/Log/Log - bytes required for media recovery": "log_required_for_media_recovery_bytes", "DISK/Log/Log - current primary space in use": metricLookup{"log_primary_space_in_use_percentage", true},
"DISK/Log/Log - current primary space in use": "log_current_primary_space_in_use_percentage", "DISK/Log/Log - workload primary space utilization": metricLookup{"log_workload_primary_space_utilization_percentage", true},
"DISK/Log/Log - workload primary space utilization": "log_workload_primary_space_utilization_percentage", "DISK/Log/Log - write latency": metricLookup{"log_write_latency_seconds", true},
"DISK/Log/Log - bytes occupied by extents waiting to be archived": "log_occupied_by_extents_waiting_to_be_archived_bytes", "DISK/Log/Log - bytes max": metricLookup{"log_max_bytes", true},
"DISK/Log/Log - bytes max": "log_max_bytes", "DISK/Log/Log - write size": metricLookup{"log_write_size_bytes", true},
"DISK/Log/Log file system - bytes in use": "log_file_system_in_use_bytes", "DISK/Log/Log - bytes in use": metricLookup{"log_in_use_bytes", true},
"DISK/Log/Log file system - bytes max": "log_file_system_max_bytes", "DISK/Log/Log file system - bytes max": metricLookup{"log_file_system_max_bytes", true},
"DISK/Log/Log - physical bytes written": "log_physical_written_bytes", "DISK/Log/Log file system - bytes in use": metricLookup{"log_file_system_in_use_bytes", true},
"STATMQI/SUBSCRIBE/Create durable subscription count": "create_durable_subscription_count", "DISK/Log/Log - bytes occupied by reusable extents": metricLookup{"log_occupied_by_reusable_extents_bytes", true},
"STATMQI/SUBSCRIBE/Resume durable subscription count": "resume_durable_subscription_count", "DISK/Log/Log - bytes occupied by extents waiting to be archived": metricLookup{"log_occupied_by_extents_waiting_to_be_archived_bytes", true},
"STATMQI/SUBSCRIBE/Create non-durable subscription count": "create_non_durable_subscription_count", "DISK/Log/Log - bytes required for media recovery": metricLookup{"log_required_for_media_recovery_bytes", true},
"STATMQI/SUBSCRIBE/Failed create/alter/resume subscription count": "failed_create_alter_resume_subscription_count", "STATMQI/SUBSCRIBE/Create durable subscription count": metricLookup{"durable_subscription_create_total", true},
"STATMQI/SUBSCRIBE/Subscription delete failure count": "subscription_delete_failure_count", "STATMQI/SUBSCRIBE/Alter durable subscription count": metricLookup{"durable_subscription_alter_total", true},
"STATMQI/SUBSCRIBE/MQSUBRQ count": "mqsubrq_count", "STATMQI/SUBSCRIBE/Resume durable subscription count": metricLookup{"durable_subscription_resume_total", true},
"STATMQI/SUBSCRIBE/Failed MQSUBRQ count": "failed_mqsubrq_count", "STATMQI/SUBSCRIBE/Delete durable subscription count": metricLookup{"durable_subscription_delete_total", true},
"STATMQI/SUBSCRIBE/Durable subscriber - high water mark": "durable_subscriber_high_water_mark_count", "STATMQI/SUBSCRIBE/Create non-durable subscription count": metricLookup{"non_durable_subscription_create_total", true},
"STATMQI/SUBSCRIBE/Non-durable subscriber - high water mark": "non_durable_subscriber_high_water_mark_count", "STATMQI/SUBSCRIBE/Delete non-durable subscription count": metricLookup{"non_durable_subscription_delete_total", true},
"STATMQI/SUBSCRIBE/Durable subscriber - low water mark": "durable_subscriber_low_water_mark_count", "STATMQI/SUBSCRIBE/Failed create/alter/resume subscription count": metricLookup{"failed_subscription_create_alter_resume_total", true},
"STATMQI/SUBSCRIBE/Delete non-durable subscription count": "delete_non_durable_subscription_count", "STATMQI/SUBSCRIBE/Subscription delete failure count": metricLookup{"failed_subscription_delete_total", true},
"STATMQI/SUBSCRIBE/Alter durable subscription count": "alter_durable_subscription_count", "STATMQI/SUBSCRIBE/MQSUBRQ count": metricLookup{"mqsubrq_total", true},
"STATMQI/SUBSCRIBE/Delete durable subscription count": "delete_durable_subscription_count", "STATMQI/SUBSCRIBE/Failed MQSUBRQ count": metricLookup{"failed_mqsubrq_total", true},
"STATMQI/SUBSCRIBE/Non-durable subscriber - low water mark": "non_durable_subscriber_low_water_mark_count", "STATMQI/SUBSCRIBE/Durable subscriber - high water mark": metricLookup{"durable_subscriber_high_water_mark", false},
"STATMQI/PUBLISH/Interval total topic bytes put": "interval_total_topic_put_bytes", "STATMQI/SUBSCRIBE/Durable subscriber - low water mark": metricLookup{"durable_subscriber_low_water_mark", false},
"STATMQI/PUBLISH/Published to subscribers - message count": "published_to_subscribers_message_count", "STATMQI/SUBSCRIBE/Non-durable subscriber - high water mark": metricLookup{"non_durable_subscriber_high_water_mark", false},
"STATMQI/PUBLISH/Published to subscribers - byte count": "published_to_subscribers_bytes", "STATMQI/SUBSCRIBE/Non-durable subscriber - low water mark": metricLookup{"non_durable_subscriber_low_water_mark", false},
"STATMQI/PUBLISH/Non-persistent - topic MQPUT/MQPUT1 count": "non_persistent_topic_mqput_mqput1_count", "STATMQI/PUBLISH/Topic MQPUT/MQPUT1 interval total": metricLookup{"topic_mqput_mqput1_total", true},
"STATMQI/PUBLISH/Persistent - topic MQPUT/MQPUT1 count": "persistent_topic_mqput_mqput1_count", "STATMQI/PUBLISH/Interval total topic bytes put": metricLookup{"topic_put_bytes_total", true},
"STATMQI/PUBLISH/Failed topic MQPUT/MQPUT1 count": "failed_topic_mqput_mqput1_count", "STATMQI/PUBLISH/Failed topic MQPUT/MQPUT1 count": metricLookup{"failed_topic_mqput_mqput1_total", true},
"STATMQI/PUBLISH/Topic MQPUT/MQPUT1 interval total": "topic_mqput_mqput1_interval_count", "STATMQI/PUBLISH/Persistent - topic MQPUT/MQPUT1 count": metricLookup{"persistent_topic_mqput_mqput1_total", true},
"STATMQI/CONNDISC/MQCONN/MQCONNX count": "mqconn_mqconnx_count", "STATMQI/PUBLISH/Non-persistent - topic MQPUT/MQPUT1 count": metricLookup{"non_persistent_topic_mqput_mqput1_total", true},
"STATMQI/CONNDISC/Failed MQCONN/MQCONNX count": "failed_mqconn_mqconnx_count", "STATMQI/PUBLISH/Published to subscribers - message count": metricLookup{"published_to_subscribers_message_total", true},
"STATMQI/CONNDISC/Concurrent connections - high water mark": "concurrent_connections_high_water_mark_count", "STATMQI/PUBLISH/Published to subscribers - byte count": metricLookup{"published_to_subscribers_bytes_total", true},
"STATMQI/CONNDISC/MQDISC count": "mqdisc_count", "STATMQI/CONNDISC/MQCONN/MQCONNX count": metricLookup{"mqconn_mqconnx_total", true},
"STATMQI/OPENCLOSE/MQOPEN count": "mqopen_count", "STATMQI/CONNDISC/Failed MQCONN/MQCONNX count": metricLookup{"failed_mqconn_mqconnx_total", true},
"STATMQI/OPENCLOSE/Failed MQOPEN count": "failed_mqopen_count", "STATMQI/CONNDISC/MQDISC count": metricLookup{"mqdisc_total", true},
"STATMQI/OPENCLOSE/MQCLOSE count": "mqclose_count", "STATMQI/CONNDISC/Concurrent connections - high water mark": metricLookup{"concurrent_connections_high_water_mark", false},
"STATMQI/OPENCLOSE/Failed MQCLOSE count": "failed_mqclose_count", "STATMQI/OPENCLOSE/MQOPEN count": metricLookup{"mqopen_total", true},
"STATMQI/INQSET/MQINQ count": "mqinq_count", "STATMQI/OPENCLOSE/Failed MQOPEN count": metricLookup{"failed_mqopen_total", true},
"STATMQI/INQSET/Failed MQINQ count": "failed_mqinq_count", "STATMQI/OPENCLOSE/MQCLOSE count": metricLookup{"mqclose_total", true},
"STATMQI/INQSET/MQSET count": "mqset_count", "STATMQI/OPENCLOSE/Failed MQCLOSE count": metricLookup{"failed_mqclose_total", true},
"STATMQI/INQSET/Failed MQSET count": "failed_mqset_count", "STATMQI/INQSET/MQINQ count": metricLookup{"mqinq_total", true},
"STATMQI/PUT/Interval total MQPUT/MQPUT1 byte count": "interval_total_mqput_mqput1_bytes", "STATMQI/INQSET/Failed MQINQ count": metricLookup{"failed_mqinq_total", true},
"STATMQI/PUT/Persistent message MQPUT count": "persistent_message_mqput_count", "STATMQI/INQSET/MQSET count": metricLookup{"mqset_total", true},
"STATMQI/PUT/Failed MQPUT count": "failed_mqput_count", "STATMQI/INQSET/Failed MQSET count": metricLookup{"failed_mqset_total", true},
"STATMQI/PUT/Non-persistent message MQPUT1 count": "non_persistent_message_mqput1_count", "STATMQI/PUT/Persistent message MQPUT count": metricLookup{"persistent_message_mqput_total", true},
"STATMQI/PUT/Persistent message MQPUT1 count": "persistent_message_mqput1_count", "STATMQI/PUT/Persistent message MQPUT1 count": metricLookup{"persistent_message_mqput1_total", true},
"STATMQI/PUT/Failed MQPUT1 count": "failed_mqput1_count", "STATMQI/PUT/Put persistent messages - byte count": metricLookup{"persistent_message_put_bytes_total", true},
"STATMQI/PUT/Put non-persistent messages - byte count": "put_non_persistent_messages_bytes", "STATMQI/PUT/Non-persistent message MQPUT count": metricLookup{"non_persistent_message_mqput_total", true},
"STATMQI/PUT/Interval total MQPUT/MQPUT1 count": "interval_total_mqput_mqput1_count", "STATMQI/PUT/Non-persistent message MQPUT1 count": metricLookup{"non_persistent_message_mqput1_total", true},
"STATMQI/PUT/Put persistent messages - byte count": "put_persistent_messages_bytes", "STATMQI/PUT/Put non-persistent messages - byte count": metricLookup{"non_persistent_message_put_bytes_total", true},
"STATMQI/PUT/MQSTAT count": "mqstat_count", "STATMQI/PUT/Interval total MQPUT/MQPUT1 count": metricLookup{"mqput_mqput1_total", true},
"STATMQI/PUT/Non-persistent message MQPUT count": "non_persistent_message_mqput_count", "STATMQI/PUT/Interval total MQPUT/MQPUT1 byte count": metricLookup{"mqput_mqput1_bytes_total", true},
"STATMQI/GET/Interval total destructive get- count": "interval_total_destructive_get_count", "STATMQI/PUT/Failed MQPUT count": metricLookup{"failed_mqput_total", true},
"STATMQI/GET/MQCTL count": "mqctl_count", "STATMQI/PUT/Failed MQPUT1 count": metricLookup{"failed_mqput1_total", true},
"STATMQI/GET/Failed MQGET - count": "failed_mqget_count", "STATMQI/PUT/MQSTAT count": metricLookup{"mqstat_total", true},
"STATMQI/GET/Got non-persistent messages - byte count": "got_non_persistent_messages_bytes", "STATMQI/GET/Persistent message destructive get - count": metricLookup{"persistent_message_destructive_get_total", true},
"STATMQI/GET/Persistent message browse - count": "persistent_message_browse_count", "STATMQI/GET/Persistent message browse - count": metricLookup{"persistent_message_browse_total", true},
"STATMQI/GET/Expired message count": "expired_message_count", "STATMQI/GET/Got persistent messages - byte count": metricLookup{"persistent_message_get_bytes_total", true},
"STATMQI/GET/Purged queue count": "purged_queue_count", "STATMQI/GET/Persistent message browse - byte count": metricLookup{"persistent_message_browse_bytes_total", true},
"STATMQI/GET/Interval total destructive get - byte count": "interval_total_destructive_get_bytes", "STATMQI/GET/Non-persistent message destructive get - count": metricLookup{"non_persistent_message_destructive_get_total", true},
"STATMQI/GET/Non-persistent message destructive get - count": "non_persistent_message_destructive_get_count", "STATMQI/GET/Non-persistent message browse - count": metricLookup{"non_persistent_message_browse_total", true},
"STATMQI/GET/Got persistent messages - byte count": "got_persistent_messages_bytes", "STATMQI/GET/Got non-persistent messages - byte count": metricLookup{"non_persistent_message_get_bytes_total", true},
"STATMQI/GET/Non-persistent message browse - count": "non_persistent_message_browse_count", "STATMQI/GET/Non-persistent message browse - byte count": metricLookup{"non_persistent_message_browse_bytes_total", true},
"STATMQI/GET/Failed browse count": "failed_browse_count", "STATMQI/GET/Interval total destructive get- count": metricLookup{"destructive_get_total", true},
"STATMQI/GET/Persistent message destructive get - count": "persistent_message_destructive_get_count", "STATMQI/GET/Interval total destructive get - byte count": metricLookup{"destructive_get_bytes_total", true},
"STATMQI/GET/Non-persistent message browse - byte count": "non_persistent_message_browse_bytes", "STATMQI/GET/Failed MQGET - count": metricLookup{"failed_mqget_total", true},
"STATMQI/GET/Persistent message browse - byte count": "persistent_message_browse_bytes", "STATMQI/GET/Failed browse count": metricLookup{"failed_browse_total", true},
"STATMQI/GET/MQCB count": "mqcb_count", "STATMQI/GET/MQCTL count": metricLookup{"mqctl_total", true},
"STATMQI/GET/Failed MQCB count": "failed_mqcb_count", "STATMQI/GET/Expired message count": metricLookup{"expired_message_total", true},
"STATMQI/SYNCPOINT/Commit count": "commit_count", "STATMQI/GET/Purged queue count": metricLookup{"purged_queue_total", true},
"STATMQI/SYNCPOINT/Rollback count": "rollback_count", "STATMQI/GET/MQCB count": metricLookup{"mqcb_total", true},
"STATMQI/GET/Failed MQCB count": metricLookup{"failed_mqcb_total", true},
"STATMQI/SYNCPOINT/Commit count": metricLookup{"commit_total", true},
"STATMQI/SYNCPOINT/Rollback count": metricLookup{"rollback_total", true},
} }
return metricNamesMap return metricNamesMap
} }

View File

@@ -30,8 +30,8 @@ func TestGenerateMetricNamesMap(t *testing.T) {
if !ok { if !ok {
t.Errorf("No metric name mapping found for %s", testKey1) t.Errorf("No metric name mapping found for %s", testKey1)
} else { } else {
if actual != testElement1Name { if actual.name != testElement1Name {
t.Errorf("Expected metric name=%s; actual %s", testElement1Name, actual) t.Errorf("Expected metric name=%s; actual %s", testElement1Name, actual.name)
} }
} }
} }

View File

@@ -23,6 +23,7 @@ import (
"time" "time"
"github.com/ibm-messaging/mq-container/internal/logger" "github.com/ibm-messaging/mq-container/internal/logger"
"github.com/ibm-messaging/mq-golang/ibmmq"
"github.com/ibm-messaging/mq-golang/mqmetric" "github.com/ibm-messaging/mq-golang/mqmetric"
) )
@@ -43,6 +44,7 @@ type metricData struct {
description string description string
objectType bool objectType bool
values map[string]float64 values map[string]float64
isDelta bool
} }
// processMetrics processes publications of metric data and handles describe/collect/stop requests // processMetrics processes publications of metric data and handles describe/collect/stop requests
@@ -143,23 +145,36 @@ func initialiseMetrics(log *logger.Logger) (map[string]*metricData, error) {
key := makeKey(metricElement) key := makeKey(metricElement)
// Get metric name from mapping // Get metric name from mapping
if metricName, found := metricNamesMap[key]; found { if metricLookup, found := metricNamesMap[key]; found {
// Set metric details // Check if metric is enabled
metric := metricData{ if metricLookup.enabled {
name: metricName,
description: metricElement.Description,
}
// Add metric // Check if metric is a delta type
if _, exists := metrics[key]; !exists { isDelta := false
metrics[key] = &metric if metricElement.Datatype == ibmmq.MQIAMO_MONITOR_DELTA {
isDelta = true
}
// Set metric details
metric := metricData{
name: metricLookup.name,
description: metricElement.Description,
isDelta: isDelta,
}
// Add metric
if _, exists := metrics[key]; !exists {
metrics[key] = &metric
} else {
log.Errorf("Metrics Error: Found duplicate metric key [%s]", key)
validMetrics = false
}
} else { } else {
log.Errorf("Metrics Error: Found duplicate metric key %s", key) log.Debugf("Metrics: Skipping metric, metric is not enabled for key [%s]", key)
validMetrics = false
} }
} else { } else {
log.Errorf("Metrics Error: Skipping metric, unexpected key %s", key) log.Errorf("Metrics Error: Skipping metric, unexpected key [%s]", key)
validMetrics = false validMetrics = false
} }
} }

View File

@@ -329,9 +329,9 @@ func TestVolumeUnmount(t *testing.T) {
defer cleanContainer(t, cli, ctr.ID) defer cleanContainer(t, cli, ctr.ID)
waitForReady(t, cli, ctr.ID) waitForReady(t, cli, ctr.ID)
// Unmount the volume as root // Unmount the volume as root
rc, _ := execContainer(t, cli, ctr.ID, "root", []string{"umount", "-l", "-f", "/mnt/mqm"}) rc, out := execContainer(t, cli, ctr.ID, "root", []string{"umount", "-l", "-f", "/mnt/mqm"})
if rc != 0 { if rc != 0 {
t.Fatalf("Expected umount to work with rc=0, got %v", rc) t.Fatalf("Expected umount to work with rc=0, got %v. Output was: %s", rc, out)
} }
time.Sleep(3 * time.Second) time.Sleep(3 * time.Second)
rc, _ = execContainer(t, cli, ctr.ID, "mqm", []string{"chkmqhealthy"}) rc, _ = execContainer(t, cli, ctr.ID, "mqm", []string{"chkmqhealthy"})
@@ -366,7 +366,9 @@ func TestZombies(t *testing.T) {
// will be adopted by PID 1, and should then be reaped when they die. // will be adopted by PID 1, and should then be reaped when they die.
_, out := execContainer(t, cli, id, "mqm", []string{"pkill", "--signal", "kill", "-c", "amqzxma0"}) _, out := execContainer(t, cli, id, "mqm", []string{"pkill", "--signal", "kill", "-c", "amqzxma0"})
if out == "0" { if out == "0" {
t.Fatalf("Expected pkill to kill a process, got %v", out) t.Log("Failed to kill process 'amqzxma0'")
_, out := execContainer(t, cli, id, "root", []string{"ps", "-lA"})
t.Fatalf("Here is a list of currently running processes:\n%s", out)
} }
time.Sleep(3 * time.Second) time.Sleep(3 * time.Second)
_, out = execContainer(t, cli, id, "mqm", []string{"bash", "-c", "ps -lA | grep '^. Z'"}) _, out = execContainer(t, cli, id, "mqm", []string{"bash", "-c", "ps -lA | grep '^. Z'"})
@@ -635,6 +637,7 @@ func TestCorrectLicense(t *testing.T) {
} }
id := runContainer(t, cli, &containerConfig) id := runContainer(t, cli, &containerConfig)
defer cleanContainer(t, cli, id) defer cleanContainer(t, cli, id)
waitForReady(t, cli, id)
rc, license := execContainer(t, cli, id, "mqm", []string{"dspmqver", "-f", "8192", "-b"}) rc, license := execContainer(t, cli, id, "mqm", []string{"dspmqver", "-f", "8192", "-b"})
if rc != 0 { if rc != 0 {

View File

@@ -27,7 +27,6 @@ import (
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"regexp"
"runtime" "runtime"
"strconv" "strconv"
"strings" "strings"
@@ -328,7 +327,6 @@ func waitForContainer(t *testing.T, cli *client.Client, ID string, timeout int64
// execContainer runs a command in a running container, and returns the exit code and output // execContainer runs a command in a running container, and returns the exit code and output
func execContainer(t *testing.T, cli *client.Client, ID string, user string, cmd []string) (int, string) { func execContainer(t *testing.T, cli *client.Client, ID string, user string, cmd []string) (int, string) {
rerun:
config := types.ExecConfig{ config := types.ExecConfig{
User: user, User: user,
Privileged: false, Privileged: false,
@@ -348,7 +346,9 @@ rerun:
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
cli.ContainerExecStart(context.Background(), resp.ID, types.ExecStartCheck{ defer hijack.Close()
time.Sleep(time.Millisecond * 10)
err = cli.ContainerExecStart(context.Background(), resp.ID, types.ExecStartCheck{
Detach: false, Detach: false,
Tty: false, Tty: false,
}) })
@@ -357,30 +357,38 @@ rerun:
} }
// Wait for the command to finish // Wait for the command to finish
var exitcode int var exitcode int
var outputStr string
for { for {
inspect, err := cli.ContainerExecInspect(context.Background(), resp.ID) inspect, err := cli.ContainerExecInspect(context.Background(), resp.ID)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if !inspect.Running { if inspect.Running {
exitcode = inspect.ExitCode continue
break
} }
}
buf := new(bytes.Buffer)
// Each output line has a header, which needs to be removed
_, err = stdcopy.StdCopy(buf, buf, hijack.Reader)
if err != nil {
t.Fatal(err)
}
outputStr := strings.TrimSpace(buf.String()) exitcode = inspect.ExitCode
buf := new(bytes.Buffer)
// Each output line has a header, which needs to be removed
_, err = stdcopy.StdCopy(buf, buf, hijack.Reader)
if err != nil {
t.Fatal(err)
}
// Before we go let's just double check it did actually run because sometimes we get a "Exec command already running error" outputStr = strings.TrimSpace(buf.String())
alreadyRunningErr := regexp.MustCompile("Error: Exec command .* is already running")
if alreadyRunningErr.MatchString(outputStr) { /* Commented out on 14/06/2018 as it might not be needed after adding
time.Sleep(1 * time.Second) * pause between ContainerExecAttach and ContainerExecStart.
goto rerun * TODO If intermittent failures do not occur, remove and refactor.
*
* // Before we go let's just double check it did actually finish running
* // because sometimes we get a "Exec command already running error"
* alreadyRunningErr := regexp.MustCompile("Error: Exec command .* is already running")
* if alreadyRunningErr.MatchString(outputStr) {
* continue
* }
*/
break
} }
return exitcode, outputStr return exitcode, outputStr

View File

@@ -164,96 +164,96 @@ func metricNames() []string {
// - log_required_for_media_recovery_bytes // - log_required_for_media_recovery_bytes
names := []string{ names := []string{
"cpu_load_one_minute_average_percentage",
"cpu_load_five_minute_average_percentage", "cpu_load_five_minute_average_percentage",
"cpu_load_fifteen_minute_average_percentage", "cpu_load_fifteen_minute_average_percentage",
"ram_free_percentage",
"ram_total_bytes",
"user_cpu_time_percentage",
"system_cpu_time_percentage", "system_cpu_time_percentage",
"cpu_load_one_minute_average_percentage", "user_cpu_time_percentage",
"ram_free_percentage",
"system_ram_size_bytes",
"system_cpu_time_estimate_for_queue_manager_percentage", "system_cpu_time_estimate_for_queue_manager_percentage",
"ram_total_estimate_for_queue_manager_bytes",
"user_cpu_time_estimate_for_queue_manager_percentage", "user_cpu_time_estimate_for_queue_manager_percentage",
"mq_trace_file_system_in_use_bytes", "ram_usage_estimate_for_queue_manager_bytes",
"mq_trace_file_system_free_space_percentage", "trace_file_system_free_space_percentage",
"mq_errors_file_system_in_use_bytes", "trace_file_system_in_use_bytes",
"mq_errors_file_system_free_space_percentage", "errors_file_system_free_space_percentage",
"mq_fdc_file_count", "errors_file_system_in_use_bytes",
"queue_manager_file_system_in_use_bytes", "fdc_files",
"queue_manager_file_system_free_space_percentage", "queue_manager_file_system_free_space_percentage",
"queue_manager_file_system_in_use_bytes",
"log_logical_written_bytes_total",
"log_physical_written_bytes_total",
"log_primary_space_in_use_percentage",
"log_workload_primary_space_utilization_percentage",
"log_write_latency_seconds",
"log_max_bytes",
"log_write_size_bytes", "log_write_size_bytes",
"log_in_use_bytes", "log_in_use_bytes",
"log_logical_written_bytes",
"log_write_latency_seconds",
"log_current_primary_space_in_use_percentage",
"log_workload_primary_space_utilization_percentage",
"log_max_bytes",
"log_file_system_in_use_bytes",
"log_file_system_max_bytes", "log_file_system_max_bytes",
"log_physical_written_bytes", "log_file_system_in_use_bytes",
"create_durable_subscription_count", "durable_subscription_create_total",
"resume_durable_subscription_count", "durable_subscription_alter_total",
"create_non_durable_subscription_count", "durable_subscription_resume_total",
"failed_create_alter_resume_subscription_count", "durable_subscription_delete_total",
"subscription_delete_failure_count", "non_durable_subscription_create_total",
"mqsubrq_count", "non_durable_subscription_delete_total",
"failed_mqsubrq_count", "failed_subscription_create_alter_resume_total",
"durable_subscriber_high_water_mark_count", "failed_subscription_delete_total",
"non_durable_subscriber_high_water_mark_count", "mqsubrq_total",
"durable_subscriber_low_water_mark_count", "failed_mqsubrq_total",
"delete_non_durable_subscription_count", // disabled : "durable_subscriber_high_water_mark",
"alter_durable_subscription_count", // disabled : "durable_subscriber_low_water_mark",
"delete_durable_subscription_count", // disabled : "non_durable_subscriber_high_water_mark",
"non_durable_subscriber_low_water_mark_count", // disabled : "non_durable_subscriber_low_water_mark",
"interval_total_topic_put_bytes", "topic_mqput_mqput1_total",
"published_to_subscribers_message_count", "topic_put_bytes_total",
"published_to_subscribers_bytes", "failed_topic_mqput_mqput1_total",
"non_persistent_topic_mqput_mqput1_count", "persistent_topic_mqput_mqput1_total",
"persistent_topic_mqput_mqput1_count", "non_persistent_topic_mqput_mqput1_total",
"failed_topic_mqput_mqput1_count", "published_to_subscribers_message_total",
"topic_mqput_mqput1_interval_count", "published_to_subscribers_bytes_total",
"mqconn_mqconnx_count", "mqconn_mqconnx_total",
"failed_mqconn_mqconnx_count", "failed_mqconn_mqconnx_total",
"concurrent_connections_high_water_mark_count", "mqdisc_total",
"mqdisc_count", // disabled : "concurrent_connections_high_water_mark",
"mqopen_count", "mqopen_total",
"failed_mqopen_count", "failed_mqopen_total",
"mqclose_count", "mqclose_total",
"failed_mqclose_count", "failed_mqclose_total",
"mqinq_count", "mqinq_total",
"failed_mqinq_count", "failed_mqinq_total",
"mqset_count", "mqset_total",
"failed_mqset_count", "failed_mqset_total",
"interval_total_mqput_mqput1_bytes", "persistent_message_mqput_total",
"persistent_message_mqput_count", "persistent_message_mqput1_total",
"failed_mqput_count", "persistent_message_put_bytes_total",
"non_persistent_message_mqput1_count", "non_persistent_message_mqput_total",
"persistent_message_mqput1_count", "non_persistent_message_mqput1_total",
"failed_mqput1_count", "non_persistent_message_put_bytes_total",
"put_non_persistent_messages_bytes", "mqput_mqput1_total",
"interval_total_mqput_mqput1_count", "mqput_mqput1_bytes_total",
"put_persistent_messages_bytes", "failed_mqput_total",
"mqstat_count", "failed_mqput1_total",
"non_persistent_message_mqput_count", "mqstat_total",
"interval_total_destructive_get_count", "persistent_message_destructive_get_total",
"mqctl_count", "persistent_message_browse_total",
"failed_mqget_count", "persistent_message_get_bytes_total",
"got_non_persistent_messages_bytes", "persistent_message_browse_bytes_total",
"persistent_message_browse_count", "non_persistent_message_destructive_get_total",
"expired_message_count", "non_persistent_message_browse_total",
"purged_queue_count", "non_persistent_message_get_bytes_total",
"interval_total_destructive_get_bytes", "non_persistent_message_browse_bytes_total",
"non_persistent_message_destructive_get_count", "destructive_get_total",
"got_persistent_messages_bytes", "destructive_get_bytes_total",
"non_persistent_message_browse_count", "failed_mqget_total",
"failed_browse_count", "failed_browse_total",
"persistent_message_destructive_get_count", "mqctl_total",
"non_persistent_message_browse_bytes", "expired_message_total",
"persistent_message_browse_bytes", "purged_queue_total",
"mqcb_count", "mqcb_total",
"failed_mqcb_count", "failed_mqcb_total",
"commit_count", "commit_total",
"rollback_count", "rollback_total",
} }
return names return names
} }