diff --git a/install-mq.sh b/install-mq.sh index ef71448..3e99b05 100644 --- a/install-mq.sh +++ b/install-mq.sh @@ -139,7 +139,7 @@ rm -rf ${DIR_EXTRACT} # Apply any bug fixes not included in base Ubuntu or MQ image. # Don't upgrade everything based on Docker best practices https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/#run -$UBUNTU && apt-get upgrade -y libprocps4 procps +$UBUNTU && apt-get upgrade -y gpgv gnupg # End of bug fixes # Clean up cached files diff --git a/internal/metrics/exporter_test.go b/internal/metrics/exporter_test.go index e24054c..e8db353 100644 --- a/internal/metrics/exporter_test.go +++ b/internal/metrics/exporter_test.go @@ -45,7 +45,7 @@ func TestDescribe(t *testing.T) { select { case prometheusDesc := <-ch: - expected := "Desc{fqName: \"ibmmq_qmgr_Element1Name\", help: \"Element1Description\", constLabels: {}, variableLabels: [qmgr]}" + expected := "Desc{fqName: \"ibmmq_qmgr_" + testElement1Name + "\", help: \"" + testElement1Description + "\", constLabels: {}, variableLabels: [qmgr]}" actual := prometheusDesc.String() if actual != expected { t.Errorf("Expected value=%s; actual %s", expected, actual) @@ -62,7 +62,7 @@ func TestCollect(t *testing.T) { log := getTestLogger() exporter := newExporter("qmName", log) - exporter.gaugeMap["ClassName/Type1Name/Element1Name"] = createGaugeVec("Element1Name", "Element1Description", false) + exporter.gaugeMap[testKey1] = createGaugeVec(testElement1Name, testElement1Description, false) for i := 1; i <= 3; i++ { @@ -85,7 +85,7 @@ func TestCollect(t *testing.T) { select { case <-ch: prometheusMetric := dto.Metric{} - exporter.gaugeMap["ClassName/Type1Name/Element1Name"].WithLabelValues("qmName").Write(&prometheusMetric) + exporter.gaugeMap[testKey1].WithLabelValues("qmName").Write(&prometheusMetric) actual := prometheusMetric.GetGauge().GetValue() if i == 1 { diff --git a/internal/metrics/mapping.go b/internal/metrics/mapping.go new file mode 100644 index 0000000..6edaf2e --- /dev/null +++ b/internal/metrics/mapping.go @@ -0,0 +1,119 @@ +/* +© Copyright IBM Corporation 2018 + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package metrics contains code to provide metrics for the queue manager +package metrics + +// generateMetricNamesMap generates metric names mapped from their description +func generateMetricNamesMap() map[string]string { + + metricNamesMap := map[string]string{ + "CPU/SystemSummary/CPU load - five minute average": "cpu_load_five_minute_average_percentage", + "CPU/SystemSummary/CPU load - fifteen minute average": "cpu_load_fifteen_minute_average_percentage", + "CPU/SystemSummary/RAM free percentage": "ram_free_percentage", + "CPU/SystemSummary/RAM total bytes": "ram_total_bytes", + "CPU/SystemSummary/User CPU time percentage": "user_cpu_time_percentage", + "CPU/SystemSummary/System CPU time percentage": "system_cpu_time_percentage", + "CPU/SystemSummary/CPU load - one minute average": "cpu_load_one_minute_average_percentage", + "CPU/QMgrSummary/System CPU time - percentage estimate for queue manager": "system_cpu_time_estimate_for_queue_manager_percentage", + "CPU/QMgrSummary/RAM total bytes - estimate for queue manager": "ram_total_estimate_for_queue_manager_bytes", + "CPU/QMgrSummary/User CPU time - percentage estimate for queue manager": "user_cpu_time_estimate_for_queue_manager_percentage", + "DISK/SystemSummary/MQ trace file system - bytes in use": "mq_trace_file_system_in_use_bytes", + "DISK/SystemSummary/MQ trace file system - free space": "mq_trace_file_system_free_space_percentage", + "DISK/SystemSummary/MQ errors file system - bytes in use": "mq_errors_file_system_in_use_bytes", + "DISK/SystemSummary/MQ errors file system - free space": "mq_errors_file_system_free_space_percentage", + "DISK/SystemSummary/MQ FDC file count": "mq_fdc_file_count", + "DISK/QMgrSummary/Queue Manager file system - bytes in use": "queue_manager_file_system_in_use_bytes", + "DISK/QMgrSummary/Queue Manager file system - free space": "queue_manager_file_system_free_space_percentage", + "DISK/Log/Log - bytes occupied by reusable extents": "log_occupied_by_reusable_extents_bytes", + "DISK/Log/Log - write size": "log_write_size_bytes", + "DISK/Log/Log - bytes in use": "log_in_use_bytes", + "DISK/Log/Log - logical bytes written": "log_logical_written_bytes", + "DISK/Log/Log - write latency": "log_write_latency_seconds", + "DISK/Log/Log - bytes required for media recovery": "log_required_for_media_recovery_bytes", + "DISK/Log/Log - current primary space in use": "log_current_primary_space_in_use_percentage", + "DISK/Log/Log - workload primary space utilization": "log_workload_primary_space_utilization_percentage", + "DISK/Log/Log - bytes occupied by extents waiting to be archived": "log_occupied_by_extents_waiting_to_be_archived_bytes", + "DISK/Log/Log - bytes max": "log_max_bytes", + "DISK/Log/Log file system - bytes in use": "log_file_system_in_use_bytes", + "DISK/Log/Log file system - bytes max": "log_file_system_max_bytes", + "DISK/Log/Log - physical bytes written": "log_physical_written_bytes", + "STATMQI/SUBSCRIBE/Create durable subscription count": "create_durable_subscription_count", + "STATMQI/SUBSCRIBE/Resume durable subscription count": "resume_durable_subscription_count", + "STATMQI/SUBSCRIBE/Create non-durable subscription count": "create_non_durable_subscription_count", + "STATMQI/SUBSCRIBE/Failed create/alter/resume subscription count": "failed_create_alter_resume_subscription_count", + "STATMQI/SUBSCRIBE/Subscription delete failure count": "subscription_delete_failure_count", + "STATMQI/SUBSCRIBE/MQSUBRQ count": "mqsubrq_count", + "STATMQI/SUBSCRIBE/Failed MQSUBRQ count": "failed_mqsubrq_count", + "STATMQI/SUBSCRIBE/Durable subscriber - high water mark": "durable_subscriber_high_water_mark_count", + "STATMQI/SUBSCRIBE/Non-durable subscriber - high water mark": "non_durable_subscriber_high_water_mark_count", + "STATMQI/SUBSCRIBE/Durable subscriber - low water mark": "durable_subscriber_low_water_mark_count", + "STATMQI/SUBSCRIBE/Delete non-durable subscription count": "delete_non_durable_subscription_count", + "STATMQI/SUBSCRIBE/Alter durable subscription count": "alter_durable_subscription_count", + "STATMQI/SUBSCRIBE/Delete durable subscription count": "delete_durable_subscription_count", + "STATMQI/SUBSCRIBE/Non-durable subscriber - low water mark": "non_durable_subscriber_low_water_mark_count", + "STATMQI/PUBLISH/Interval total topic bytes put": "interval_total_topic_put_bytes", + "STATMQI/PUBLISH/Published to subscribers - message count": "published_to_subscribers_message_count", + "STATMQI/PUBLISH/Published to subscribers - byte count": "published_to_subscribers_bytes", + "STATMQI/PUBLISH/Non-persistent - topic MQPUT/MQPUT1 count": "non_persistent_topic_mqput_mqput1_count", + "STATMQI/PUBLISH/Persistent - topic MQPUT/MQPUT1 count": "persistent_topic_mqput_mqput1_count", + "STATMQI/PUBLISH/Failed topic MQPUT/MQPUT1 count": "failed_topic_mqput_mqput1_count", + "STATMQI/PUBLISH/Topic MQPUT/MQPUT1 interval total": "topic_mqput_mqput1_interval_count", + "STATMQI/CONNDISC/MQCONN/MQCONNX count": "mqconn_mqconnx_count", + "STATMQI/CONNDISC/Failed MQCONN/MQCONNX count": "failed_mqconn_mqconnx_count", + "STATMQI/CONNDISC/Concurrent connections - high water mark": "concurrent_connections_high_water_mark_count", + "STATMQI/CONNDISC/MQDISC count": "mqdisc_count", + "STATMQI/OPENCLOSE/MQOPEN count": "mqopen_count", + "STATMQI/OPENCLOSE/Failed MQOPEN count": "failed_mqopen_count", + "STATMQI/OPENCLOSE/MQCLOSE count": "mqclose_count", + "STATMQI/OPENCLOSE/Failed MQCLOSE count": "failed_mqclose_count", + "STATMQI/INQSET/MQINQ count": "mqinq_count", + "STATMQI/INQSET/Failed MQINQ count": "failed_mqinq_count", + "STATMQI/INQSET/MQSET count": "mqset_count", + "STATMQI/INQSET/Failed MQSET count": "failed_mqset_count", + "STATMQI/PUT/Interval total MQPUT/MQPUT1 byte count": "interval_total_mqput_mqput1_bytes", + "STATMQI/PUT/Persistent message MQPUT count": "persistent_message_mqput_count", + "STATMQI/PUT/Failed MQPUT count": "failed_mqput_count", + "STATMQI/PUT/Non-persistent message MQPUT1 count": "non_persistent_message_mqput1_count", + "STATMQI/PUT/Persistent message MQPUT1 count": "persistent_message_mqput1_count", + "STATMQI/PUT/Failed MQPUT1 count": "failed_mqput1_count", + "STATMQI/PUT/Put non-persistent messages - byte count": "put_non_persistent_messages_bytes", + "STATMQI/PUT/Interval total MQPUT/MQPUT1 count": "interval_total_mqput_mqput1_count", + "STATMQI/PUT/Put persistent messages - byte count": "put_persistent_messages_bytes", + "STATMQI/PUT/MQSTAT count": "mqstat_count", + "STATMQI/PUT/Non-persistent message MQPUT count": "non_persistent_message_mqput_count", + "STATMQI/GET/Interval total destructive get- count": "interval_total_destructive_get_count", + "STATMQI/GET/MQCTL count": "mqctl_count", + "STATMQI/GET/Failed MQGET - count": "failed_mqget_count", + "STATMQI/GET/Got non-persistent messages - byte count": "got_non_persistent_messages_bytes", + "STATMQI/GET/Persistent message browse - count": "persistent_message_browse_count", + "STATMQI/GET/Expired message count": "expired_message_count", + "STATMQI/GET/Purged queue count": "purged_queue_count", + "STATMQI/GET/Interval total destructive get - byte count": "interval_total_destructive_get_bytes", + "STATMQI/GET/Non-persistent message destructive get - count": "non_persistent_message_destructive_get_count", + "STATMQI/GET/Got persistent messages - byte count": "got_persistent_messages_bytes", + "STATMQI/GET/Non-persistent message browse - count": "non_persistent_message_browse_count", + "STATMQI/GET/Failed browse count": "failed_browse_count", + "STATMQI/GET/Persistent message destructive get - count": "persistent_message_destructive_get_count", + "STATMQI/GET/Non-persistent message browse - byte count": "non_persistent_message_browse_bytes", + "STATMQI/GET/Persistent message browse - byte count": "persistent_message_browse_bytes", + "STATMQI/GET/MQCB count": "mqcb_count", + "STATMQI/GET/Failed MQCB count": "failed_mqcb_count", + "STATMQI/SYNCPOINT/Commit count": "commit_count", + "STATMQI/SYNCPOINT/Rollback count": "rollback_count", + } + return metricNamesMap +} diff --git a/internal/metrics/mapping_test.go b/internal/metrics/mapping_test.go new file mode 100644 index 0000000..1c4cafa --- /dev/null +++ b/internal/metrics/mapping_test.go @@ -0,0 +1,37 @@ +/* +© Copyright IBM Corporation 2018 + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package metrics + +import "testing" + +func TestGenerateMetricNamesMap(t *testing.T) { + + metricNamesMap := generateMetricNamesMap() + + if len(metricNamesMap) != 93 { + t.Errorf("Expected mapping-size=%d; actual %d", 93, len(metricNamesMap)) + } + + actual, ok := metricNamesMap[testKey1] + + if !ok { + t.Errorf("No metric name mapping found for %s", testKey1) + } else { + if actual != testElement1Name { + t.Errorf("Expected metric name=%s; actual %s", testElement1Name, actual) + } + } +} diff --git a/internal/metrics/update.go b/internal/metrics/update.go index 121c5cd..0a443d2 100644 --- a/internal/metrics/update.go +++ b/internal/metrics/update.go @@ -132,20 +132,34 @@ func initialiseMetrics(log *logger.Logger) (map[string]*metricData, error) { metrics := make(map[string]*metricData) validMetrics := true + metricNamesMap := generateMetricNamesMap() for _, metricClass := range mqmetric.Metrics.Classes { for _, metricType := range metricClass.Types { if !strings.Contains(metricType.ObjectTopic, "%s") { for _, metricElement := range metricType.Elements { - metric := metricData{ - name: metricElement.MetricName, - description: metricElement.Description, - } + + // Get unique metric key key := makeKey(metricElement) - if _, exists := metrics[key]; !exists { - metrics[key] = &metric + + // Get metric name from mapping + if metricName, found := metricNamesMap[key]; found { + + // Set metric details + metric := metricData{ + name: metricName, + description: metricElement.Description, + } + + // Add metric + if _, exists := metrics[key]; !exists { + metrics[key] = &metric + } else { + log.Errorf("Metrics Error: Found duplicate metric key %s", key) + validMetrics = false + } } else { - log.Errorf("Metrics Error: Found duplicate metric key %s", key) + log.Errorf("Metrics Error: Skipping metric, unexpected key %s", key) validMetrics = false } } @@ -154,7 +168,7 @@ func initialiseMetrics(log *logger.Logger) (map[string]*metricData, error) { } if !validMetrics { - return metrics, fmt.Errorf("Invalid metrics data - found duplicate metric keys") + return metrics, fmt.Errorf("Invalid metrics data") } return metrics, nil } @@ -167,14 +181,20 @@ func updateMetrics(metrics map[string]*metricData) { if !strings.Contains(metricType.ObjectTopic, "%s") { for _, metricElement := range metricType.Elements { - // Clear existing metric values - metric := metrics[makeKey(metricElement)] - metric.values = make(map[string]float64) + // Unexpected metric elements (with no defined mapping) are handled in 'initialiseMetrics' + // - if any exist, they are logged as errors and skipped (they are not added to the metrics map) + // Therefore we can ignore handling any unexpected metric elements found here + // - this avoids us logging excessive errors, as this function is called frequently + metric, ok := metrics[makeKey(metricElement)] + if ok { + // Clear existing metric values + metric.values = make(map[string]float64) - // Update metric with cached values of publication data - for label, value := range metricElement.Values { - normalisedValue := mqmetric.Normalise(metricElement, label, value) - metric.values[label] = normalisedValue + // Update metric with cached values of publication data + for label, value := range metricElement.Values { + normalisedValue := mqmetric.Normalise(metricElement, label, value) + metric.values[label] = normalisedValue + } } // Reset cached values of publication data for this metric @@ -187,5 +207,5 @@ func updateMetrics(metrics map[string]*metricData) { // makeKey builds a unique key for each metric func makeKey(metricElement *mqmetric.MonElement) string { - return metricElement.Parent.Parent.Name + "/" + metricElement.Parent.Name + "/" + metricElement.MetricName + return metricElement.Parent.Parent.Name + "/" + metricElement.Parent.Name + "/" + metricElement.Description } diff --git a/internal/metrics/update_test.go b/internal/metrics/update_test.go index af38905..82e8874 100644 --- a/internal/metrics/update_test.go +++ b/internal/metrics/update_test.go @@ -23,13 +23,24 @@ import ( "github.com/ibm-messaging/mq-golang/mqmetric" ) +const ( + testClassName = "CPU" + testTypeName = "SystemSummary" + testElement1Name = "cpu_load_five_minute_average_percentage" + testElement2Name = "cpu_load_fifteen_minute_average_percentage" + testElement1Description = "CPU load - five minute average" + testElement2Description = "CPU load - fifteen minute average" + testKey1 = testClassName + "/" + testTypeName + "/" + testElement1Description + testKey2 = testClassName + "/" + testTypeName + "/" + testElement2Description +) + func TestInitialiseMetrics(t *testing.T) { teardownTestCase := setupTestCase(false) defer teardownTestCase() metrics, err := initialiseMetrics(getTestLogger()) - metric, ok := metrics["ClassName/Type1Name/Element1Name"] + metric, ok := metrics[testKey1] if err != nil { t.Errorf("Unexpected error %s", err.Error()) @@ -37,11 +48,11 @@ func TestInitialiseMetrics(t *testing.T) { if !ok { t.Error("Expected metric not found in map") } else { - if metric.name != "Element1Name" { - t.Errorf("Expected name=%s; actual %s", "Element1Name", metric.name) + if metric.name != testElement1Name { + t.Errorf("Expected name=%s; actual %s", testElement1Name, metric.name) } - if metric.description != "Element1Description" { - t.Errorf("Expected description=%s; actual %s", "Element1Description", metric.description) + if metric.description != testElement1Description { + t.Errorf("Expected description=%s; actual %s", testElement1Description, metric.description) } if metric.objectType != false { t.Errorf("Expected objectType=%v; actual %v", false, metric.objectType) @@ -50,7 +61,7 @@ func TestInitialiseMetrics(t *testing.T) { t.Errorf("Expected values-size=%d; actual %d", 0, len(metric.values)) } } - _, ok = metrics["ClassName/Type2Name/Element2Name"] + _, ok = metrics[testKey2] if ok { t.Errorf("Unexpected metric found in map, %%s object topics should be ignored") } @@ -60,6 +71,19 @@ func TestInitialiseMetrics(t *testing.T) { } } +func TestInitialiseMetrics_UnexpectedKey(t *testing.T) { + + teardownTestCase := setupTestCase(false) + defer teardownTestCase() + + mqmetric.Metrics.Classes[0].Types[0].Elements[0].Description = "New Metric" + _, err := initialiseMetrics(getTestLogger()) + + if err == nil { + t.Error("Expected skipping metric error") + } +} + func TestInitialiseMetrics_DuplicateKeys(t *testing.T) { teardownTestCase := setupTestCase(true) @@ -80,7 +104,7 @@ func TestUpdateMetrics(t *testing.T) { metrics, _ := initialiseMetrics(getTestLogger()) updateMetrics(metrics) - metric, _ := metrics["ClassName/Type1Name/Element1Name"] + metric, _ := metrics[testKey1] actual, ok := metric.values[qmgrLabelValue] if !ok { @@ -110,7 +134,7 @@ func TestMakeKey(t *testing.T) { teardownTestCase := setupTestCase(false) defer teardownTestCase() - expected := "ClassName/Type1Name/Element1Name" + expected := testKey1 actual := makeKey(mqmetric.Metrics.Classes[0].Types[0].Elements[0]) if actual != expected { t.Errorf("Expected value=%s; actual %s", expected, actual) @@ -132,15 +156,15 @@ func populateTestMetrics(testValue int, duplicateKey bool) { metricElement1 := new(mqmetric.MonElement) metricElement2 := new(mqmetric.MonElement) - metricClass.Name = "ClassName" - metricType1.Name = "Type1Name" - metricType2.Name = "Type2Name" + metricClass.Name = testClassName + metricType1.Name = testTypeName + metricType2.Name = testTypeName metricElement1.MetricName = "Element1Name" - metricElement1.Description = "Element1Description" + metricElement1.Description = testElement1Description metricElement1.Values = make(map[string]int64) metricElement1.Values[qmgrLabelValue] = int64(testValue) metricElement2.MetricName = "Element2Name" - metricElement2.Description = "Element2Description" + metricElement2.Description = testElement2Description metricElement2.Values = make(map[string]int64) metricType1.ObjectTopic = "ObjectTopic" metricType2.ObjectTopic = "%s" diff --git a/test/docker/mqmetric_test.go b/test/docker/mqmetric_test.go index e8dab06..cb3d67e 100644 --- a/test/docker/mqmetric_test.go +++ b/test/docker/mqmetric_test.go @@ -19,7 +19,6 @@ import ( "fmt" "net" "strconv" - "strings" "testing" "time" @@ -54,7 +53,6 @@ func TestGoldenPathMetric(t *testing.T) { func TestMetricNames(t *testing.T) { t.Parallel() - approvedSuffixes := []string{"bytes", "seconds", "percentage", "count", "total"} cli, err := client.NewEnvClient() if err != nil { t.Fatal(err) @@ -72,22 +70,23 @@ func TestMetricNames(t *testing.T) { // Now actually get the metrics (after waiting for some to become available) metrics := getMetrics(t, port) - if len(metrics) <= 0 { - t.Error("Expected some metrics to be returned but had none...") + names := metricNames() + if len(metrics) != len(names) { + t.Errorf("Expected %d metrics to be returned, received %d", len(names), len(metrics)) } - // Check all the metrics have approved suffixes + // Check all the metrics have the correct names for _, metric := range metrics { ok := false - for _, e := range approvedSuffixes { - if strings.HasSuffix(metric.Key, e) { + for _, name := range names { + if metric.Key == "ibmmq_qmgr_"+name { ok = true break } } if !ok { - t.Errorf("Metric '%s' does not have an approved suffix", metric.Key) + t.Errorf("Metric '%s' does not have the expected name", metric.Key) } } @@ -387,7 +386,7 @@ func TestChangingValues(t *testing.T) { conn.Close() // Now actually get the metrics (after waiting for some to become available) - time.Sleep(15 * time.Second) + time.Sleep(25 * time.Second) metrics = getMetrics(t, port) if len(metrics) <= 0 { t.Fatal("Expected some metrics to be returned but had none...") diff --git a/test/docker/mqmetric_test_util.go b/test/docker/mqmetric_test_util.go index b9b64dc..9a1af05 100644 --- a/test/docker/mqmetric_test_util.go +++ b/test/docker/mqmetric_test_util.go @@ -155,3 +155,105 @@ func metricsContainerConfig() *container.Config { }, } } + +func metricNames() []string { + + // NB: There are currently a total of 93 metrics, but the following 3 do not generate values (based on the queue manager configuration) + // - log_occupied_by_reusable_extents_bytes + // - log_occupied_by_extents_waiting_to_be_archived_bytes + // - log_required_for_media_recovery_bytes + + names := []string{ + "cpu_load_five_minute_average_percentage", + "cpu_load_fifteen_minute_average_percentage", + "ram_free_percentage", + "ram_total_bytes", + "user_cpu_time_percentage", + "system_cpu_time_percentage", + "cpu_load_one_minute_average_percentage", + "system_cpu_time_estimate_for_queue_manager_percentage", + "ram_total_estimate_for_queue_manager_bytes", + "user_cpu_time_estimate_for_queue_manager_percentage", + "mq_trace_file_system_in_use_bytes", + "mq_trace_file_system_free_space_percentage", + "mq_errors_file_system_in_use_bytes", + "mq_errors_file_system_free_space_percentage", + "mq_fdc_file_count", + "queue_manager_file_system_in_use_bytes", + "queue_manager_file_system_free_space_percentage", + "log_write_size_bytes", + "log_in_use_bytes", + "log_logical_written_bytes", + "log_write_latency_seconds", + "log_current_primary_space_in_use_percentage", + "log_workload_primary_space_utilization_percentage", + "log_max_bytes", + "log_file_system_in_use_bytes", + "log_file_system_max_bytes", + "log_physical_written_bytes", + "create_durable_subscription_count", + "resume_durable_subscription_count", + "create_non_durable_subscription_count", + "failed_create_alter_resume_subscription_count", + "subscription_delete_failure_count", + "mqsubrq_count", + "failed_mqsubrq_count", + "durable_subscriber_high_water_mark_count", + "non_durable_subscriber_high_water_mark_count", + "durable_subscriber_low_water_mark_count", + "delete_non_durable_subscription_count", + "alter_durable_subscription_count", + "delete_durable_subscription_count", + "non_durable_subscriber_low_water_mark_count", + "interval_total_topic_put_bytes", + "published_to_subscribers_message_count", + "published_to_subscribers_bytes", + "non_persistent_topic_mqput_mqput1_count", + "persistent_topic_mqput_mqput1_count", + "failed_topic_mqput_mqput1_count", + "topic_mqput_mqput1_interval_count", + "mqconn_mqconnx_count", + "failed_mqconn_mqconnx_count", + "concurrent_connections_high_water_mark_count", + "mqdisc_count", + "mqopen_count", + "failed_mqopen_count", + "mqclose_count", + "failed_mqclose_count", + "mqinq_count", + "failed_mqinq_count", + "mqset_count", + "failed_mqset_count", + "interval_total_mqput_mqput1_bytes", + "persistent_message_mqput_count", + "failed_mqput_count", + "non_persistent_message_mqput1_count", + "persistent_message_mqput1_count", + "failed_mqput1_count", + "put_non_persistent_messages_bytes", + "interval_total_mqput_mqput1_count", + "put_persistent_messages_bytes", + "mqstat_count", + "non_persistent_message_mqput_count", + "interval_total_destructive_get_count", + "mqctl_count", + "failed_mqget_count", + "got_non_persistent_messages_bytes", + "persistent_message_browse_count", + "expired_message_count", + "purged_queue_count", + "interval_total_destructive_get_bytes", + "non_persistent_message_destructive_get_count", + "got_persistent_messages_bytes", + "non_persistent_message_browse_count", + "failed_browse_count", + "persistent_message_destructive_get_count", + "non_persistent_message_browse_bytes", + "persistent_message_browse_bytes", + "mqcb_count", + "failed_mqcb_count", + "commit_count", + "rollback_count", + } + return names +}