Compare commits

..

43 Commits

Author SHA1 Message Date
Tom Jefferson
a28c8a3acb Update version to 9.3.0.4-r2. Update go to 1.18 and re-vendor (#411)
* Update version to 9.3.0.4-r2. Update go to 1.18 and re-vendor dependencies
2023-03-02 13:37:17 +00:00
Tom Jefferson
4311c04634 Merge pull request #376 from mq-cloudpak/amf-1831-version-length-930
Handle multi-digit portions of VRMF
2023-02-24 12:05:51 +00:00
Tom Jefferson
5ff3c358ed Merge branch 'v9.3.0.x' into amf-1831-version-length-930 2023-02-24 09:56:21 +00:00
Tom Jefferson
880fa699ef Merge pull request #404 from mq-cloudpak/tadj-9.3.0.4-update
Update version to 9.3.0.4-r1 and update ubi
2023-02-08 16:26:23 +00:00
Tom Jefferson
bd7c7cd91d Update version to 9.3.0.4-r1 and update ubi 2023-02-08 13:05:07 +00:00
Tom Jefferson
fb2e7c71e1 Update gosec behaviour and version (#397)
* Update gosec behaviour and fix resulting gosec vulnerabilities (#399)

Co-authored-by: KIRAN DARBHA <kirandarbha@in.ibm.com>
2023-02-07 22:03:13 +00:00
Alex Mirski-Fitton
923407e77f Fix setting buildkit disable environment variable 2023-02-06 14:43:43 +00:00
Simon Hirst
2181258e38 Updating gosec to 2.14.0
Updating gosec to 2.14.0
2023-02-01 14:41:02 +00:00
Simon Hirst
015481bd3e Updating gosec to 2.14.0 2023-02-01 12:46:28 +00:00
Alex Mirski-Fitton
27dada90d6 Handle multi-digit portions of VRMF 2023-01-24 13:34:08 +00:00
Alec Painter
de2d068bb6 Merge pull request #368 from mq-cloudpak/sjh-dec-lts-update
Version updates for January release
2023-01-11 08:23:14 +00:00
Simon Hirst
fd34ea0a78 Updating UBI and changelog 2023-01-09 15:07:51 +00:00
Simon Hirst
ad02f98858 Updating versions for January LTS release 2023-01-09 15:02:22 +00:00
Tom Jefferson
27679a6c58 Merge pull request #355 from mq-cloudpak/sjh-dec-updates
Updates for December LTS release
2022-12-06 17:57:38 +00:00
Simon Hirst
592ebdf555 Updating .travis.yml and Dockerfile-server UBI version and RELEASE 2022-12-06 15:32:53 +00:00
Alec Painter
35101841cc Merge pull request #348 from mq-cloudpak/ahp-9301-ubi
updated go-version & ubi
2022-11-14 10:34:39 +00:00
Alec-Painter
9033fd67d7 updated go-version & ubi 2022-11-14 10:11:38 +00:00
Alec Painter
066db7bf04 Merge pull request #341 from mq-cloudpak/ahp-nov-ga-9301r3
update release version for 9.3.0.1-r3
2022-10-21 12:07:29 +01:00
Alec-Painter
c64c188ae5 update release version for 9.3.0.1-r3 2022-10-21 09:06:27 +01:00
Tom Jefferson
8889ced3c8 Merge pull request #338 from mq-cloudpak/tadj-ipgate-go-toolset-930x
Update go toolset
2022-10-10 15:57:40 +01:00
Tom Jefferson
c4bf74bdf5 Update go toolset 2022-10-10 15:08:27 +01:00
Tom Jefferson
090a761aef Merge pull request #332 from mq-cloudpak/tadj-create-9.3.0.1-r2
Update ubi/go/release for 9.3.0.1-r2
2022-09-28 20:05:17 +01:00
Tom Jefferson
11d6685c45 Update ubi/go/release for 9.3.0.1-r2 2022-09-28 19:44:37 +01:00
Alec Painter
45934d8a59 Merge pull request #324 from mq-cloudpak/ahp-9.3.0.1-r1
Updated changelog for 9.3.0.1
2022-09-14 11:25:10 +01:00
Alec Painter
c9de739331 Merge branch 'v9.3.0.x' into ahp-9.3.0.1-r1 2022-09-14 10:37:27 +01:00
Alec-Painter
c78bc602b0 Updated changelog for 9.3.0.1 2022-09-14 10:31:07 +01:00
Alec Painter
f78ac15820 Merge pull request #323 from mq-cloudpak/ahp-9.3.0.1-r1
* Added MQ 9.3.0.1-r1

* Updated ubi & go-toolset versions
2022-09-12 12:15:27 +01:00
Alec-Painter
0e05030047 updated ubi & go version 2022-09-12 11:20:04 +01:00
Alec-Painter
46869e4df4 updated to version 9.3.0.1-r1 2022-09-12 11:18:12 +01:00
Alex Mirski-Fitton
8ba8ae4f6c Merge pull request #313 from mq-cloudpak/amf-credential-helper-go-version-930
Pin docker-credential-helpers for old go installs
2022-08-22 13:25:11 +01:00
Alex Mirski-Fitton
03e939a49b Pin docker-credential-helpers for old go installs 2022-08-22 11:49:54 +01:00
Tom Jefferson
f60281688e Merge pull request #310 from mq-cloudpak/tadj-update-ubi-930x
Update ubi and go version
2022-08-09 14:02:43 +01:00
Tom Jefferson
c3a23b7a1b Update ubi and go version 2022-08-08 21:34:09 +01:00
Prerna Srivastava
1941fc1675 Merge pull request #300 from mq-cloudpak/WS_fixes
Ws fixes
2022-08-05 14:24:32 +05:30
Prerna Srivastava
167319022f WS fix 2022-08-05 13:46:47 +05:30
Prerna Srivastava
fb1d17764a ws fixes 2022-08-05 12:04:04 +05:30
Tom Jefferson
5a20b85759 Merge pull request #294 from mq-cloudpak/tadj-add-august-cd-version
Update release for 9.3.0.0-r3 release
2022-08-01 15:59:29 +01:00
Tom Jefferson
d4668af82f Update release for 9.3.0.0-r3 release 2022-08-01 15:22:09 +01:00
Simon Hirst
ce8a34b1bb Push fake master to different namespace 2022-08-01 14:29:29 +01:00
Simon Hirst
f9852fb553 Push fake master to different namespace 2022-08-01 14:03:32 +01:00
Alex Mirski-Fitton
3957d487c3 Merge pull request #270 from mq-cloudpak/amf-9300r2
Update base images for 9.3.0.0-r2 release
2022-07-01 14:36:45 +01:00
Alex Mirski-Fitton
6427e0d84b Update base images for 9.3.0.0-r2 release 2022-07-01 11:54:21 +01:00
Alex Mirski-Fitton
9f12fc5a04 [ci skip]: Setting up v9.3.0.x branch 2022-06-28 15:46:05 +01:00
499 changed files with 33361 additions and 21517 deletions

View File

@@ -18,16 +18,16 @@ sudo: required
language: go language: go
go: go:
- "1.17.12" - "1.18.9"
services: services:
- docker - docker
env: env:
global: global:
- MAIN_BRANCH=v9.3.1 - MAIN_BRANCH=v9.3.0.x
- TAGCACHE_FILE=tagcache - TAGCACHE_FILE=tagcache
- RELEASE=r1 - RELEASE=r2
go_import_path: "github.com/ibm-messaging/mq-container" go_import_path: "github.com/ibm-messaging/mq-container"
@@ -38,51 +38,51 @@ go_import_path: "github.com/ibm-messaging/mq-container"
jobs: jobs:
include: include:
- stage: basic-build - stage: basic-build
if: branch != v9.3.1 AND tag IS blank if: branch != v9.3.0.x AND tag IS blank
name: "Basic AMD64 build" name: "Basic AMD64 build"
os: linux os: linux
env: env:
- MQ_ARCHIVE_REPOSITORY_DEV=$MQ_931_ARCHIVE_REPOSITORY_DEV_AMD64 - MQ_ARCHIVE_REPOSITORY_DEV=$MQ_930_ARCHIVE_REPOSITORY_DEV_AMD64
script: bash -e travis-build-scripts/run.sh script: bash -e travis-build-scripts/run.sh
# CD Build # CD Build
- stage: global-tag - stage: global-tag
if: branch = v9.3.1 AND type != pull_request OR tag =~ ^release-candidate* if: branch = v9.3.0.x AND type != pull_request OR tag =~ ^release-candidate*
name: "Generate Global Tag" name: "Generate Global Tag"
os: linux os: linux
script: bash -e travis-build-scripts/global-tag.sh script: bash -e travis-build-scripts/global-tag.sh
- stage: build - stage: build
if: branch = v9.3.1 OR tag =~ ^release-candidate* if: branch = v9.3.0.x OR tag =~ ^release-candidate*
name: "Multi-Arch AMD64 build" name: "Multi-Arch AMD64 build"
os: linux os: linux
env: env:
- BUILD_ALL=true - BUILD_ALL=true
- MQ_ARCHIVE_REPOSITORY=$MQ_931_ARCHIVE_REPOSITORY_AMD64 - MQ_ARCHIVE_REPOSITORY=$MQ_930_ARCHIVE_REPOSITORY_AMD64
- MQ_ARCHIVE_REPOSITORY_DEV=$MQ_931_ARCHIVE_REPOSITORY_DEV_AMD64 - MQ_ARCHIVE_REPOSITORY_DEV=$MQ_930_ARCHIVE_REPOSITORY_DEV_AMD64
script: bash -e travis-build-scripts/run.sh script: bash -e travis-build-scripts/run.sh
- stage: build - stage: build
if: branch = v9.3.1 OR tag =~ ^release-candidate* if: branch = v9.3.0.x OR tag =~ ^release-candidate*
name: "Multi-Arch S390X build" name: "Multi-Arch S390X build"
os: linux-s390 os: linux-s390
env: env:
- BUILD_ALL=true - BUILD_ALL=true
- TEST_OPTS_DOCKER="-run TestGoldenPathWithMetrics" - TEST_OPTS_DOCKER="-run TestGoldenPathWithMetrics"
- MQ_ARCHIVE_REPOSITORY=$MQ_931_ARCHIVE_REPOSITORY_S390X - MQ_ARCHIVE_REPOSITORY=$MQ_930_ARCHIVE_REPOSITORY_S390X
- MQ_ARCHIVE_REPOSITORY_DEV=$MQ_931_ARCHIVE_REPOSITORY_DEV_S390X - MQ_ARCHIVE_REPOSITORY_DEV=$MQ_930_ARCHIVE_REPOSITORY_DEV_S390X
script: bash -e travis-build-scripts/run.sh script: bash -e travis-build-scripts/run.sh
- stage: build - stage: build
if: branch = v9.3.1 OR tag =~ ^release-candidate* if: branch = v9.3.0.x OR tag =~ ^release-candidate*
name: "Multi-Arch PPC64LE build" name: "Multi-Arch PPC64LE build"
os: linux-ppc64le os: linux-ppc64le
env: env:
- BUILD_ALL=true - BUILD_ALL=true
- TEST_OPTS_DOCKER="-run TestGoldenPathWithMetrics" - TEST_OPTS_DOCKER="-run TestGoldenPathWithMetrics"
- MQ_ARCHIVE_REPOSITORY=$MQ_931_ARCHIVE_REPOSITORY_PPC64LE - MQ_ARCHIVE_REPOSITORY=$MQ_930_ARCHIVE_REPOSITORY_PPC64LE
- MQ_ARCHIVE_REPOSITORY_DEV=$MQ_931_ARCHIVE_REPOSITORY_DEV_PPC64LE - MQ_ARCHIVE_REPOSITORY_DEV=$MQ_930_ARCHIVE_REPOSITORY_DEV_PPC64LE
script: bash -e travis-build-scripts/run.sh script: bash -e travis-build-scripts/run.sh
- stage: push-manifest - stage: push-manifest
if: branch = v9.3.1 AND type != pull_request OR tag =~ ^release-candidate* if: branch = v9.3.0.x AND type != pull_request OR tag =~ ^release-candidate*
name: "Push Manifest-list to registry" name: "Push Manifest-list to registry"
env: env:
- PUSH_MANIFEST_ONLY=true - PUSH_MANIFEST_ONLY=true

View File

@@ -1,9 +1,6 @@
{ {
"settingsInheritedFrom": "whitesource-config/whitesource-config@master", "settingsInheritedFrom": "whitesource-config/whitesource-config@master",
"scanSettings": { "scanSettings": {
"baseBranches": ["private-master", "v9.2.0.x-eus", "v9.3.0.x"] "baseBranches": ["private-master", "v9.2.0.x-eus", "v9.2.5"]
}, }
"issueSettings": {
"issueRepoName": "whitesource-scan-issues"
}
} }

View File

@@ -1,16 +1,12 @@
# Change log # Change log
## 9.3.1.1 (2023-01) ## 9.3.0.3-LTS (2023-01)
* Updated to MQ version 9.3.1.1 * Updated to MQ version 9.3.0.3
## 9.3.1.0-r2 (2022-11) ## 9.3.0.1-LTS (2022-09)
* Queue manager attribute SSLKEYR is now set to blank instead of '/run/runmqserver/tls/key' if key and certificate are not supplied. * Updated to MQ version 9.3.0.1
## 9.3.1.0 (2022-10)
* Updated to MQ version 9.3.1.0
## 9.3.0.0 (2022-06) ## 9.3.0.0 (2022-06)

View File

@@ -13,11 +13,11 @@
# limitations under the License. # limitations under the License.
ARG BASE_IMAGE=registry.access.redhat.com/ubi8/ubi-minimal ARG BASE_IMAGE=registry.access.redhat.com/ubi8/ubi-minimal
ARG BASE_TAG=8.7-1031 ARG BASE_TAG=8.7-1085
ARG BUILDER_IMAGE=registry.access.redhat.com/ubi8/go-toolset ARG BUILDER_IMAGE=registry.access.redhat.com/ubi8/go-toolset
ARG BUILDER_TAG=1.17.12-11 ARG BUILDER_TAG=1.18.9-13
ARG GO_WORKDIR=/opt/app-root/src/go/src/github.com/ibm-messaging/mq-container ARG GO_WORKDIR=/opt/app-root/src/go/src/github.com/ibm-messaging/mq-container
ARG MQ_URL="https://public.dhe.ibm.com/ibmdl/export/pub/software/websphere/messaging/mqadv/9.3.1.1-IBM-MQ-Advanced-for-Developers-Non-Install-LinuxX64.tar.gz" ARG MQ_URL="https://public.dhe.ibm.com/ibmdl/export/pub/software/websphere/messaging/mqadv/9.3.0.4-IBM-MQ-Advanced-for-Developers-Non-Install-LinuxX64.tar.gz"
############################################################################### ###############################################################################
# Build stage to build Go code # Build stage to build Go code
############################################################################### ###############################################################################

View File

@@ -1,4 +1,4 @@
# © Copyright IBM Corporation 2017, 2022 # © Copyright IBM Corporation 2017, 2023
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
@@ -20,13 +20,6 @@
include config.env include config.env
include source-branch.env include source-branch.env
# arch_uname is the platform architecture according to the uname program. Can be differ by OS, e.g. `arm64` on macOS, but `aarch64` on Linux.
arch_uname := $(shell uname -m)
# arch_go is the platform architecture in Go-style (e.g. amd64, ppc64le, s390x or arm64).
arch_go := $(if $(findstring x86_64,$(arch_uname)),amd64,$(if $(findstring aarch64,$(arch_uname)),arm64,$(arch_uname)))
# ARCH is the platform architecture in Go-style (e.g. amd64, ppc64le, s390x or arm64).
# Override this to build an image for a different architecture. Note that RUN instructions will not be able to succeed without the help of emulation provided by packages like qemu-user-static.
ARCH ?= $(arch_go)
# RELEASE shows what release of the container code has been built # RELEASE shows what release of the container code has been built
RELEASE ?= RELEASE ?=
# MQ_ARCHIVE_REPOSITORY is a remote repository from which to pull the MQ_ARCHIVE (if required) # MQ_ARCHIVE_REPOSITORY is a remote repository from which to pull the MQ_ARCHIVE (if required)
@@ -67,6 +60,8 @@ MQ_DELIVERY_REGISTRY_NAMESPACE ?=
MQ_DELIVERY_REGISTRY_USER ?= MQ_DELIVERY_REGISTRY_USER ?=
# MQ_DELIVERY_REGISTRY_CREDENTIAL is the password/API key for the remote registry (if required) # MQ_DELIVERY_REGISTRY_CREDENTIAL is the password/API key for the remote registry (if required)
MQ_DELIVERY_REGISTRY_CREDENTIAL ?= MQ_DELIVERY_REGISTRY_CREDENTIAL ?=
# ARCH is the platform architecture (e.g. amd64, ppc64le or s390x)
ARCH ?= $(if $(findstring x86_64,$(shell uname -m)),amd64,$(shell uname -m))
# LTS is a boolean value to enable/disable LTS container build # LTS is a boolean value to enable/disable LTS container build
LTS ?= false LTS ?= false
# VOLUME_MOUNT_OPTIONS is used when bind-mounting files from the "downloads" directory into the container. By default, SELinux labels are automatically re-written, but this doesn't work on some filesystems with extended attributes (xattrs). You can turn off the label re-writing by setting this variable to be blank. # VOLUME_MOUNT_OPTIONS is used when bind-mounting files from the "downloads" directory into the container. By default, SELinux labels are automatically re-written, but this doesn't work on some filesystems with extended attributes (xattrs). You can turn off the label re-writing by setting this variable to be blank.
@@ -75,8 +70,6 @@ VOLUME_MOUNT_OPTIONS ?= :Z
############################################################################### ###############################################################################
# Other variables # Other variables
############################################################################### ###############################################################################
# Build doesn't work if BuildKit is enabled
DOCKER_BUILDKIT=0
# Lock Docker API version for compatibility with Podman and with the Docker version in Travis' Ubuntu Bionic # Lock Docker API version for compatibility with Podman and with the Docker version in Travis' Ubuntu Bionic
DOCKER_API_VERSION=1.40 DOCKER_API_VERSION=1.40
GO_PKG_DIRS = ./cmd ./internal ./test GO_PKG_DIRS = ./cmd ./internal ./test
@@ -113,17 +106,12 @@ endif
# Try to figure out which archive to use from the architecture # Try to figure out which archive to use from the architecture
ifeq "$(ARCH)" "amd64" ifeq "$(ARCH)" "amd64"
MQ_ARCHIVE_ARCH:=X86-64 MQ_ARCHIVE_ARCH=X86-64
MQ_ARCHIVE_DEV_ARCH:=X64 MQ_ARCHIVE_DEV_ARCH=X64
else ifeq "$(ARCH)" "ppc64le" else ifeq "$(ARCH)" "ppc64le"
MQ_ARCHIVE_ARCH:=PPC64LE MQ_ARCHIVE_ARCH=PPC64LE
MQ_ARCHIVE_DEV_ARCH:=PPC64LE
else ifeq "$(ARCH)" "s390x" else ifeq "$(ARCH)" "s390x"
MQ_ARCHIVE_ARCH:=S390X MQ_ARCHIVE_ARCH=S390X
MQ_ARCHIVE_DEV_ARCH:=S390X
else ifeq "$(ARCH)" "arm64"
MQ_ARCHIVE_ARCH:=ARM64
MQ_ARCHIVE_DEV_ARCH:=ARM64
endif endif
# If this is a fake master build, push images to alternative location (pipeline wont consider these images GA candidates) # If this is a fake master build, push images to alternative location (pipeline wont consider these images GA candidates)
@@ -178,13 +166,6 @@ ifeq ($(shell [ ! -z $(TRAVIS) ] && [ "$(TRAVIS_PULL_REQUEST)" = "false" ] && [
MQ_MANIFEST_TAG_SUFFIX=.$(TIMESTAMPFLAT).$(GIT_COMMIT) MQ_MANIFEST_TAG_SUFFIX=.$(TIMESTAMPFLAT).$(GIT_COMMIT)
endif endif
# Make sure we don't use VOLUME_MOUNT_OPTIONS for Podman on macOS
ifeq "$(COMMAND)" "podman"
ifeq "$(shell uname -s)" "Darwin"
VOLUME_MOUNT_OPTIONS:=
endif
endif
PATH_TO_MQ_TAG_CACHE=$(TRAVIS_BUILD_DIR)/.tagcache PATH_TO_MQ_TAG_CACHE=$(TRAVIS_BUILD_DIR)/.tagcache
ifneq "$(TRAVIS)" "$(EMPTY)" ifneq "$(TRAVIS)" "$(EMPTY)"
ifneq ("$(wildcard $(PATH_TO_MQ_TAG_CACHE))","") ifneq ("$(wildcard $(PATH_TO_MQ_TAG_CACHE))","")
@@ -304,7 +285,7 @@ build-devjmstest:
test-devserver: test/docker/vendor test-devserver: test/docker/vendor
$(info $(SPACER)$(shell printf $(TITLE)"Test $(MQ_IMAGE_DEVSERVER):$(MQ_TAG) on $(shell $(COMMAND) --version)"$(END))) $(info $(SPACER)$(shell printf $(TITLE)"Test $(MQ_IMAGE_DEVSERVER):$(MQ_TAG) on $(shell $(COMMAND) --version)"$(END)))
$(COMMAND) inspect $(MQ_IMAGE_DEVSERVER):$(MQ_TAG) $(COMMAND) inspect $(MQ_IMAGE_DEVSERVER):$(MQ_TAG)
cd test/docker && TEST_IMAGE=$(MQ_IMAGE_DEVSERVER):$(MQ_TAG) EXPECTED_LICENSE=Developer DEV_JMS_IMAGE=$(DEV_JMS_IMAGE) IBMJRE=false DOCKER_API_VERSION=$(DOCKER_API_VERSION) go test -parallel $(NUM_CPU) -timeout $(TEST_TIMEOUT_DOCKER) -tags mqdev $(TEST_OPTS_DOCKER) cd test/docker && TEST_IMAGE=$(MQ_IMAGE_DEVSERVER):$(MQ_TAG) EXPECTED_LICENSE=Developer DEV_JMS_IMAGE=$(DEV_JMS_IMAGE) IBMJRE=true DOCKER_API_VERSION=$(DOCKER_API_VERSION) go test -parallel $(NUM_CPU) -timeout $(TEST_TIMEOUT_DOCKER) -tags mqdev $(TEST_OPTS_DOCKER)
.PHONY: coverage .PHONY: coverage
coverage: coverage:
@@ -339,7 +320,6 @@ test-advancedserver-cover: test/docker/vendor coverage
# Command to build the image # Command to build the image
# Args: imageName, imageTag, dockerfile, extraArgs, dockerfileTarget # Args: imageName, imageTag, dockerfile, extraArgs, dockerfileTarget
# If the ARCH variable has been changed from the default value (arch_go variable), then the `--platform` parameter is added
define build-mq-command define build-mq-command
$(COMMAND) build \ $(COMMAND) build \
--tag $1:$2 \ --tag $1:$2 \
@@ -356,7 +336,6 @@ define build-mq-command
--label vcs-ref=$(IMAGE_REVISION) \ --label vcs-ref=$(IMAGE_REVISION) \
--label vcs-type=git \ --label vcs-type=git \
--label vcs-url=$(IMAGE_SOURCE) \ --label vcs-url=$(IMAGE_SOURCE) \
$(if $(findstring $(arch_go),$(ARCH)),,--platform=linux/$(ARCH)) \
$(EXTRA_LABELS) \ $(EXTRA_LABELS) \
--target $5 \ --target $5 \
. .
@@ -364,7 +343,7 @@ endef
# Build using a separate container to host the MQ download files. # Build using a separate container to host the MQ download files.
# To minimize the layers in the resulting image, the download files can't be part of the build context. # To minimize the layers in the resulting image, the download files can't be part of the build context.
# The "docker build" command (and "podman build" on macOS) don't allow you to mount a directory into the build, so a # The "docker build" command (and "podman build" on macOS) don't allow you to mount a directory into the build, so a
# separate container is used to host a web server. # separate container is used to host a web server.
# Note that for Podman, this means that you need to be using the "rootful" mode, because the rootless mode doesn't allow # Note that for Podman, this means that you need to be using the "rootful" mode, because the rootless mode doesn't allow
# much control of networking, so the containers can't talk to each other. # much control of networking, so the containers can't talk to each other.
@@ -378,7 +357,7 @@ define build-mq-using-web-server
--detach \ --detach \
registry.access.redhat.com/ubi8/nginx-120 nginx -g "daemon off;" || ($(COMMAND) network rm $(BUILD_SERVER_NETWORK) && exit 1) registry.access.redhat.com/ubi8/nginx-120 nginx -g "daemon off;" || ($(COMMAND) network rm $(BUILD_SERVER_NETWORK) && exit 1)
BUILD_SERVER_IP=$$($(COMMAND) inspect -f '{{ .NetworkSettings.Networks.$(BUILD_SERVER_NETWORK).IPAddress }}' $(BUILD_SERVER_CONTAINER)); \ BUILD_SERVER_IP=$$($(COMMAND) inspect -f '{{ .NetworkSettings.Networks.$(BUILD_SERVER_NETWORK).IPAddress }}' $(BUILD_SERVER_CONTAINER)); \
$(call build-mq-command,$1,$2,$3,--network build --build-arg MQ_URL=http://$$BUILD_SERVER_IP:8080/$4,$5) || ($(COMMAND) rm -f $(BUILD_SERVER_CONTAINER) && $(COMMAND) network rm $(BUILD_SERVER_NETWORK) && exit 1) DOCKER_BUILDKIT=0 $(call build-mq-command,$1,$2,$3,--network build --build-arg MQ_URL=http://$$BUILD_SERVER_IP:8080/$4,$5) || ($(COMMAND) rm -f $(BUILD_SERVER_CONTAINER) && $(COMMAND) network rm $(BUILD_SERVER_NETWORK) && exit 1)
$(COMMAND) rm -f $(BUILD_SERVER_CONTAINER) $(COMMAND) rm -f $(BUILD_SERVER_CONTAINER)
$(COMMAND) network rm $(BUILD_SERVER_NETWORK) $(COMMAND) network rm $(BUILD_SERVER_NETWORK)
endef endef
@@ -389,6 +368,13 @@ define build-mq-docker
$(call build-mq-using-web-server,$1,$2,$3,$4,$5) $(call build-mq-using-web-server,$1,$2,$3,$4,$5)
endef endef
# Make sure we don't use VOLUME_MOUNT_OPTIONS for Podman on macOS
ifeq "$(COMMAND)" "podman"
ifeq "$(shell uname -s)" "Darwin"
VOLUME_MOUNT_OPTIONS:=
endif
endif
# When building with Podman on macOS (Darwin), use the web server build because you can't use bind-mounted volumes with `podman build` on macOS # When building with Podman on macOS (Darwin), use the web server build because you can't use bind-mounted volumes with `podman build` on macOS
# Args: imageName, imageTag, dockerfile, mqArchive, dockerfileTarget # Args: imageName, imageTag, dockerfile, mqArchive, dockerfileTarget
define build-mq-podman-Darwin define build-mq-podman-Darwin
@@ -451,12 +437,9 @@ build-sdk: downloads/$(MQ_ARCHIVE_DEV)
.PHONY: log-build-env .PHONY: log-build-env
log-build-vars: log-build-vars:
$(info $(SPACER)$(shell printf $(TITLE)"Build environment"$(END))) $(info $(SPACER)$(shell printf $(TITLE)"Build environment"$(END)))
@echo arch_uname=$(arch_uname) @echo ARCH=$(ARCH)
@echo arch_go=$(arch_go) @echo MQ_VERSION=$(MQ_VERSION)
@echo "ARCH=$(ARCH) (origin:$(origin ARCH))" @echo MQ_ARCHIVE=$(MQ_ARCHIVE)
@echo MQ_VERSION="$(MQ_VERSION) (origin:$(origin MQ_VERSION))"
@echo MQ_ARCHIVE="$(MQ_ARCHIVE) (origin:$(origin MQ_ARCHIVE))"
@echo MQ_ARCHIVE_DEV_ARCH=$(MQ_ARCHIVE_DEV_ARCH)
@echo MQ_ARCHIVE_DEV=$(MQ_ARCHIVE_DEV) @echo MQ_ARCHIVE_DEV=$(MQ_ARCHIVE_DEV)
@echo MQ_IMAGE_DEVSERVER=$(MQ_IMAGE_DEVSERVER) @echo MQ_IMAGE_DEVSERVER=$(MQ_IMAGE_DEVSERVER)
@echo MQ_IMAGE_ADVANCEDSERVER=$(MQ_IMAGE_ADVANCEDSERVER) @echo MQ_IMAGE_ADVANCEDSERVER=$(MQ_IMAGE_ADVANCEDSERVER)
@@ -584,29 +567,16 @@ lint: $(addsuffix /$(wildcard *.go), $(GO_PKG_DIRS))
.PHONY: gosec .PHONY: gosec
gosec: gosec:
$(info $(SPACER)$(shell printf "Running gosec test"$(END))) $(info $(SPACER)$(shell printf "Running gosec test"$(END)))
@gosec -fmt=json -out=gosec_results.json cmd/... internal/... 2> /dev/null ;\ @gosecrc=0; gosec -fmt=json -out=gosec_results.json cmd/... internal/... 2> /dev/null || gosecrc=$$?; \
cat "gosec_results.json" ;\ cat gosec_results.json | jq '{"GolangErrors": (.["Golang errors"]|length>0),"Issues":(.Issues|length>0)}' | grep 'true' >/dev/null ;\
cat gosec_results.json | grep HIGH | grep severity > /dev/null ;\ if [ $$? -eq 0 ] || [ $$gosecrc -ne 0 ]; then \
if [ $$? -eq 0 ]; then \ printf "FAILURE: Issues found running gosec - see gosec_results.json\n" ;\
printf "\nFAILURE: gosec found files containing HIGH severity issues - see results.json\n" ;\ cat "gosec_results.json" ;\
exit 1 ;\ exit 1 ;\
else \ else \
printf "\ngosec found no HIGH severity issues\n" ;\ printf "gosec found no issues\n" ;\
fi ;\ cat "gosec_results.json" ;\
cat gosec_results.json | grep MEDIUM | grep severity > /dev/null ;\ fi
if [ $$? -eq 0 ]; then \
printf "\nFAILURE: gosec found files containing MEDIUM severity issues - see results.json\n" ;\
exit 1 ;\
else \
printf "\ngosec found no MEDIUM severity issues\n" ;\
fi ;\
cat gosec_results.json | grep LOW | grep severity > /dev/null;\
if [ $$? -eq 0 ]; then \
printf "\nFAILURE: gosec found files containing LOW severity issues - see results.json\n" ;\
exit 1;\
else \
printf "\ngosec found no LOW severity issues\n" ;\
fi ;\
.PHONY: update-release-information .PHONY: update-release-information
update-release-information: update-release-information:

View File

@@ -1,4 +1,4 @@
# © Copyright IBM Corporation 2017, 2022 # © Copyright IBM Corporation 2017, 2020
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
@@ -20,31 +20,19 @@
SRC_DIR = src SRC_DIR = src
BUILD_DIR = ./build BUILD_DIR = ./build
ARCH ?= $(if $(findstring x86_64,$(shell uname -m)),amd64,$(if $(findstring aarch64,$(shell uname -m)),aarch64,$(shell uname -m)))
# Flags passed to the C compiler. Need to use gnu11 to get POSIX functions needed for file locking. # Flags passed to the C compiler. Need to use gnu11 to get POSIX functions needed for file locking.
CFLAGS.amd64 := -m64 CFLAGS += -std=gnu11 -fPIC -Wall -m64
CFLAGS.ppc64le := -m64
CFLAGS.s390x := -m64
# -m64 is not a valid compiler option on aarch64/arm64 (ARM)
CFLAGS.arm64 :=
CFLAGS += -std=gnu11 -fPIC -Wall ${CFLAGS.${ARCH}}
LIB_APR = -L/usr/lib64 -lapr-1 -laprutil-1 LIB_APR = -L/usr/lib64 -lapr-1 -laprutil-1
LIB_MQ = -L/opt/mqm/lib64 -lmqm_r LIB_MQ = -L/opt/mqm/lib64 -lmqm_r
all: $(BUILD_DIR)/mqhtpass.so $(BUILD_DIR)/htpass_test $(BUILD_DIR)/log_test all: $(BUILD_DIR)/mqhtpass.so $(BUILD_DIR)/htpass_test
$(BUILD_DIR)/log.o : $(SRC_DIR)/log.c $(SRC_DIR)/log.h $(BUILD_DIR)/log.o : $(SRC_DIR)/log.c $(SRC_DIR)/log.h
mkdir -p ${dir $@} mkdir -p ${dir $@}
gcc $(CFLAGS) -c $(SRC_DIR)/log.c -o $@ gcc $(CFLAGS) -c $(SRC_DIR)/log.c -o $@
$(BUILD_DIR)/log_test : $(BUILD_DIR)/log.o
mkdir -p ${dir $@}
gcc $(CFLAGS) $(SRC_DIR)/log_test.c $^ -o $@
# Run Logging tests, and print log if they fail
$@ || (cat log_test*.log && exit 1)
$(BUILD_DIR)/htpass.o : $(SRC_DIR)/htpass.c $(SRC_DIR)/htpass.h $(BUILD_DIR)/htpass.o : $(SRC_DIR)/htpass.c $(SRC_DIR)/htpass.h
mkdir -p ${dir $@} mkdir -p ${dir $@}
gcc $(CFLAGS) -c $(SRC_DIR)/htpass.c -I /usr/include/apr-1 -o $@ gcc $(CFLAGS) -c $(SRC_DIR)/htpass.c -I /usr/include/apr-1 -o $@

View File

@@ -1,5 +1,5 @@
/* /*
© Copyright IBM Corporation 2021, 2022 © Copyright IBM Corporation 2021
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@@ -124,7 +124,7 @@ void log_printf(const char *source_file, int source_line, const char *level, con
if (strftime(date_buf, sizeof date_buf, "%FT%T", utc)) if (strftime(date_buf, sizeof date_buf, "%FT%T", utc))
{ {
// Round microseconds down to milliseconds, for consistency // Round microseconds down to milliseconds, for consistency
cur += snprintf(cur, end-cur, ", \"ibm_datetime\":\"%s.%03ldZ\"", date_buf, now.tv_usec / (long)1000); cur += snprintf(cur, end-cur, ", \"ibm_datetime\":\"%s.%03ldZ\"", date_buf, now.tv_usec / 1000);
} }
cur += snprintf(cur, end-cur, ", \"ibm_processId\":\"%d\"", pid); cur += snprintf(cur, end-cur, ", \"ibm_processId\":\"%d\"", pid);
cur += snprintf(cur, end-cur, ", \"host\":\"%s\"", hostname); cur += snprintf(cur, end-cur, ", \"host\":\"%s\"", hostname);
@@ -146,17 +146,7 @@ void log_printf(const char *source_file, int source_line, const char *level, con
// Important: Just do one file write, to prevent problems with multi-threading. // Important: Just do one file write, to prevent problems with multi-threading.
// This only works if the log message is not too long for the buffer. // This only works if the log message is not too long for the buffer.
fprintf(fp, "%s", buf); fprintf(fp, buf);
} }
} }
int trimmed_len(char *s, int max_len)
{
int i;
for (i = max_len - 1; i >= 0; i--)
{
if (s[i] != ' ')
break;
}
return i+1;
}

View File

@@ -1,5 +1,5 @@
/* /*
© Copyright IBM Corporation 2021, 2022 © Copyright IBM Corporation 2021
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@@ -59,12 +59,5 @@ void log_close();
*/ */
#define log_debugf(format,...) log_printf(__FILE__, __LINE__, "DEBUG", format, ##__VA_ARGS__) #define log_debugf(format,...) log_printf(__FILE__, __LINE__, "DEBUG", format, ##__VA_ARGS__)
/**
* Return the length of the string when trimmed of trailing spaces.
* IBM MQ uses fixed length strings, so this function can be used to print
* a trimmed version of a string using the "%.*s" printf format string.
* For example, `log_printf("%.*s", trimmed_len(fw_str, 48), fw_str)`
*/
int trimmed_len(char *s, int);
#endif #endif

View File

@@ -1,120 +0,0 @@
/*
© Copyright IBM Corporation 2022
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "log.h"
// Headers for multi-threaded tests
#include <pthread.h>
// Start a test and log the function name
#define test_start() printf("=== RUN: %s\n", __func__)
// Indicate test has passed
#define test_pass() printf("--- PASS: %s\n", __func__)
// The length of strings used in the tests
#define STR_LEN 5
// Indicate test has failed
void test_fail(const char *test_name)
{
printf("--- FAIL: %s\n", test_name);
exit(1);
}
// Print a fixed-width string in hexadecimal
void print_hex(char fw_string[STR_LEN])
{
printf("[");
for (int i=0; i<STR_LEN; i++)
{
printf("%02x", fw_string[i]);
if (i < STR_LEN-1)
printf(",");
}
printf("]");
}
// ----------------------------------------------------------------------------
// Tests for string manipulation
// ----------------------------------------------------------------------------
void test_trimmed_len(const char *test_name, char fw_string[STR_LEN], int expected_len)
{
printf("=== RUN: %s\n", test_name);
int len;
// Create a copy of the fixed-width string
char fw_string2[STR_LEN];
memcpy(fw_string2, fw_string, STR_LEN * sizeof(char));
// Call the function under test
len = trimmed_len(fw_string, STR_LEN);
// Check the result is correct
if (len != expected_len)
{
printf("%s: Expected result to be %d; got %d\n", __func__, expected_len, len);
test_fail(test_name);
}
// Check that the original string has not been changed
for (int i=0; i<STR_LEN; i++)
{
if (fw_string[i] != fw_string2[i])
{
printf("%c-%c\n", fw_string[i], fw_string2[i]);
printf("%s: Expected string to be identical to input hex ", __func__);
print_hex(fw_string2);
printf("; got hex ");
print_hex(fw_string);
printf("\n");
test_fail(test_name);
}
}
printf("--- PASS: %s\n", test_name);
}
void test_trimmed_len_normal()
{
char fw_string[STR_LEN] = {'a','b','c',' ',' '};
test_trimmed_len(__func__, fw_string, 3);
}
void test_trimmed_len_full()
{
char fw_string[STR_LEN] = {'a','b','c','d','e'};
test_trimmed_len(__func__, fw_string, 5);
}
void test_trimmed_len_empty()
{
char fw_string[STR_LEN] = {' ',' ',' ',' ',' '};
test_trimmed_len(__func__, fw_string, 0);
}
// ----------------------------------------------------------------------------
int main()
{
// Turn on debugging for the tests
setenv("DEBUG", "true", true);
log_init("log_test.log");
test_trimmed_len_normal();
test_trimmed_len_full();
test_trimmed_len_empty();
log_close();
}

View File

@@ -34,6 +34,8 @@ static MQZ_TERM_AUTHORITY mqhtpass_terminate;
#define HTPASSWD_FILE "/etc/mqm/mq.htpasswd" #define HTPASSWD_FILE "/etc/mqm/mq.htpasswd"
#define NAME "MQ Advanced for Developers custom authentication service" #define NAME "MQ Advanced for Developers custom authentication service"
static char *trim(char *s);
/** /**
* Initialization and entrypoint for the dynamically loaded * Initialization and entrypoint for the dynamically loaded
* authorization installable service. It registers the addresses of the * authorization installable service. It registers the addresses of the
@@ -78,7 +80,7 @@ void MQENTRY MQStart(
{ {
log_infof("Initializing %s", NAME); log_infof("Initializing %s", NAME);
} }
log_debugf("MQStart options=%s qmgr=%.*s", ((Options == MQZIO_SECONDARY) ? "Secondary" : "Primary"), trimmed_len(QMgrName, MQ_Q_MGR_NAME_LENGTH), QMgrName); log_debugf("MQStart options=%s qmgr=%s", ((Options == MQZIO_SECONDARY) ? "Secondary" : "Primary"), trim(QMgrName));
if (!htpass_valid_file(HTPASSWD_FILE)) if (!htpass_valid_file(HTPASSWD_FILE))
{ {
@@ -174,14 +176,11 @@ static void MQENTRY mqhtpass_authenticate_user_csp(
// Tell the queue manager to continue trying other authorization services, as they might have the user. // Tell the queue manager to continue trying other authorization services, as they might have the user.
*pContinuation = MQZCI_CONTINUE; *pContinuation = MQZCI_CONTINUE;
log_debugf( log_debugf(
"User authentication failed due to invalid user. user=%.*s effuser=%.*s applname=%.*s csp_user=%s cc=%d reason=%d", "User authentication failed due to invalid user. user=%s effuser=%s applname=%s csp_user=%s cc=%d reason=%d",
trimmed_len(pIdentityContext->UserIdentifier, MQ_USER_ID_LENGTH), trim(pIdentityContext->UserIdentifier),
pIdentityContext->UserIdentifier, trim(pApplicationContext->EffectiveUserID),
trimmed_len(pApplicationContext->EffectiveUserID, MQ_USER_ID_LENGTH), trim(pApplicationContext->ApplName),
pApplicationContext->EffectiveUserID, trim(csp_user),
trimmed_len(pApplicationContext->ApplName, MQ_APPL_NAME_LENGTH),
pApplicationContext->ApplName,
csp_user,
*pCompCode, *pCompCode,
*pReason); *pReason);
} }
@@ -193,14 +192,11 @@ static void MQENTRY mqhtpass_authenticate_user_csp(
// Tell the queue manager to stop trying other authorization services. // Tell the queue manager to stop trying other authorization services.
*pContinuation = MQZCI_STOP; *pContinuation = MQZCI_STOP;
log_debugf( log_debugf(
"User authentication failed due to invalid password. user=%.*s effuser=%.*s applname=%.*s csp_user=%s cc=%d reason=%d", "User authentication failed due to invalid password. user=%s effuser=%s applname=%s csp_user=%s cc=%d reason=%d",
trimmed_len(pIdentityContext->UserIdentifier, MQ_USER_ID_LENGTH), trim(pIdentityContext->UserIdentifier),
pIdentityContext->UserIdentifier, trim(pApplicationContext->EffectiveUserID),
trimmed_len(pApplicationContext->EffectiveUserID, MQ_USER_ID_LENGTH), trim(pApplicationContext->ApplName),
pApplicationContext->EffectiveUserID, trim(csp_user),
trimmed_len(pApplicationContext->ApplName, MQ_APPL_NAME_LENGTH),
pApplicationContext->ApplName,
csp_user,
*pCompCode, *pCompCode,
*pReason); *pReason);
} }
@@ -279,14 +275,11 @@ static void MQENTRY mqhtpass_authenticate_user(
else else
{ {
log_debugf( log_debugf(
"User authentication failed user=%.*s effuser=%.*s applname=%.*s cspuser=%s cc=%d reason=%d", "User authentication failed user=%s effuser=%s applname=%s cspuser=%s cc=%d reason=%d",
trimmed_len(pIdentityContext->UserIdentifier, MQ_USER_ID_LENGTH), trim(pIdentityContext->UserIdentifier),
pIdentityContext->UserIdentifier, trim(pApplicationContext->EffectiveUserID),
trimmed_len(pApplicationContext->EffectiveUserID, MQ_USER_ID_LENGTH), trim(pApplicationContext->ApplName),
pApplicationContext->EffectiveUserID, trim(spuser),
trimmed_len(pApplicationContext->ApplName, MQ_APPL_NAME_LENGTH),
pApplicationContext->ApplName,
spuser,
*pCompCode, *pCompCode,
*pReason); *pReason);
} }
@@ -340,3 +333,18 @@ static void MQENTRY mqhtpass_terminate(
*pReason = MQRC_NONE; *pReason = MQRC_NONE;
} }
/**
* Remove trailing spaces from a string.
*/
static char *trim(char *s)
{
int i;
for (i = strlen(s) - 1; i >= 0; i--)
{
if (s[i] == ' ')
s[i] = 0;
else
break;
}
return s;
}

View File

@@ -1,5 +1,5 @@
/* /*
© Copyright IBM Corporation 2017, 2022 © Copyright IBM Corporation 2017, 2020
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@@ -18,24 +18,22 @@ limitations under the License.
package main package main
import ( import (
"context"
"fmt" "fmt"
"os" "os"
"os/exec" "os/exec"
"os/signal"
"strings" "strings"
"github.com/ibm-messaging/mq-container/pkg/name" "github.com/ibm-messaging/mq-container/pkg/name"
) )
func queueManagerHealthy(ctx context.Context) (bool, error) { func queueManagerHealthy() (bool, error) {
name, err := name.GetQueueManagerName() name, err := name.GetQueueManagerName()
if err != nil { if err != nil {
return false, err return false, err
} }
// Specify the queue manager name, just in case someone's created a second queue manager // Specify the queue manager name, just in case someone's created a second queue manager
// #nosec G204 // #nosec G204
cmd := exec.CommandContext(ctx, "dspmq", "-n", "-m", name) cmd := exec.Command("dspmq", "-n", "-m", name)
// Run the command and wait for completion // Run the command and wait for completion
out, err := cmd.CombinedOutput() out, err := cmd.CombinedOutput()
fmt.Printf("%s", out) fmt.Printf("%s", out)
@@ -49,20 +47,13 @@ func queueManagerHealthy(ctx context.Context) (bool, error) {
return true, nil return true, nil
} }
func doMain() int { func main() {
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, os.Kill) healthy, err := queueManagerHealthy()
defer cancel()
healthy, err := queueManagerHealthy(ctx)
if err != nil { if err != nil {
return 2 os.Exit(2)
} }
if !healthy { if !healthy {
return 1 os.Exit(1)
} }
return 0 os.Exit(0)
}
func main() {
os.Exit(doMain())
} }

View File

@@ -1,5 +1,5 @@
/* /*
© Copyright IBM Corporation 2017, 2022 © Copyright IBM Corporation 2017, 2019
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@@ -18,54 +18,44 @@ limitations under the License.
package main package main
import ( import (
"context"
"fmt" "fmt"
"net" "net"
"os" "os"
"os/signal"
"github.com/ibm-messaging/mq-container/internal/ready" "github.com/ibm-messaging/mq-container/internal/ready"
"github.com/ibm-messaging/mq-container/pkg/name" "github.com/ibm-messaging/mq-container/pkg/name"
) )
func doMain() int { func main() {
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, os.Kill)
defer cancel()
// Check if runmqserver has indicated that it's finished configuration // Check if runmqserver has indicated that it's finished configuration
r, err := ready.Check() r, err := ready.Check()
if !r || err != nil { if !r || err != nil {
return 1 os.Exit(1)
} }
name, err := name.GetQueueManagerName() name, err := name.GetQueueManagerName()
if err != nil { if err != nil {
fmt.Println(err) fmt.Println(err)
return 1 os.Exit(1)
} }
// Check if the queue manager has a running listener // Check if the queue manager has a running listener
if active, _ := ready.IsRunningAsActiveQM(ctx, name); active { if active, _ := ready.IsRunningAsActiveQM(name); active {
conn, err := net.Dial("tcp", "127.0.0.1:1414") conn, err := net.Dial("tcp", "127.0.0.1:1414")
if err != nil { if err != nil {
fmt.Println(err) fmt.Println(err)
return 1 os.Exit(1)
} }
err = conn.Close() err = conn.Close()
if err != nil { if err != nil {
fmt.Println(err) fmt.Println(err)
} }
} else if standby, _ := ready.IsRunningAsStandbyQM(ctx, name); standby { } else if standby, _ := ready.IsRunningAsStandbyQM(name); standby {
fmt.Printf("Detected queue manager running in standby mode") fmt.Printf("Detected queue manager running in standby mode")
return 10 os.Exit(10)
} else if replica, _ := ready.IsRunningAsReplicaQM(ctx, name); replica { } else if replica, _ := ready.IsRunningAsReplicaQM(name); replica {
fmt.Printf("Detected queue manager running in replica mode") fmt.Printf("Detected queue manager running in replica mode")
return 20 os.Exit(20)
} else { } else {
return 1 os.Exit(1)
} }
return 0
}
func main() {
os.Exit(doMain())
} }

View File

@@ -1,5 +1,5 @@
/* /*
© Copyright IBM Corporation 2021, 2022 © Copyright IBM Corporation 2021
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@@ -18,24 +18,22 @@ limitations under the License.
package main package main
import ( import (
"context"
"fmt" "fmt"
"os" "os"
"os/exec" "os/exec"
"os/signal"
"strings" "strings"
"github.com/ibm-messaging/mq-container/pkg/name" "github.com/ibm-messaging/mq-container/pkg/name"
) )
func queueManagerStarted(ctx context.Context) (bool, error) { func queueManagerStarted() (bool, error) {
name, err := name.GetQueueManagerName() name, err := name.GetQueueManagerName()
if err != nil { if err != nil {
return false, err return false, err
} }
// Specify the queue manager name, just in case someone's created a second queue manager // Specify the queue manager name, just in case someone's created a second queue manager
// #nosec G204 // #nosec G204
cmd := exec.CommandContext(ctx, "dspmq", "-n", "-m", name) cmd := exec.Command("dspmq", "-n", "-m", name)
// Run the command and wait for completion // Run the command and wait for completion
out, err := cmd.CombinedOutput() out, err := cmd.CombinedOutput()
if err != nil { if err != nil {
@@ -48,20 +46,13 @@ func queueManagerStarted(ctx context.Context) (bool, error) {
return true, nil return true, nil
} }
func doMain() int { func main() {
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, os.Kill) started, err := queueManagerStarted()
defer cancel()
started, err := queueManagerStarted(ctx)
if err != nil { if err != nil {
return 2 os.Exit(2)
} }
if !started { if !started {
return 1 os.Exit(1)
} }
return 0 os.Exit(0)
}
func main() {
os.Exit(doMain())
} }

View File

@@ -77,6 +77,7 @@ func logTermination(args ...interface{}) {
// Write the message to the termination log. This is not the default place // Write the message to the termination log. This is not the default place
// that Kubernetes will look for termination information. // that Kubernetes will look for termination information.
log.Debugf("Writing termination message: %v", msg) log.Debugf("Writing termination message: %v", msg)
// #nosec G306 - its a read by owner/s group, and pose no harm.
err := ioutil.WriteFile("/run/termination-log", []byte(msg), 0660) err := ioutil.WriteFile("/run/termination-log", []byte(msg), 0660)
if err != nil { if err != nil {
log.Debug(err) log.Debug(err)

View File

@@ -1,5 +1,5 @@
/* /*
© Copyright IBM Corporation 2017, 2022 © Copyright IBM Corporation 2017, 2021
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@@ -46,6 +46,7 @@ func logTermination(args ...interface{}) {
// Write the message to the termination log. This is not the default place // Write the message to the termination log. This is not the default place
// that Kubernetes will look for termination information. // that Kubernetes will look for termination information.
log.Debugf("Writing termination message: %v", msg) log.Debugf("Writing termination message: %v", msg)
// #nosec G306 - its a read by owner/s group, and pose no harm.
err := ioutil.WriteFile("/run/termination-log", []byte(msg), 0660) err := ioutil.WriteFile("/run/termination-log", []byte(msg), 0660)
if err != nil { if err != nil {
log.Debug(err) log.Debug(err)
@@ -165,7 +166,7 @@ func configureLogger(name string) (mirrorFunc, error) {
if err != nil { if err != nil {
log.Printf("Failed to unmarshall JSON in log message - %v", err) log.Printf("Failed to unmarshall JSON in log message - %v", err)
} else { } else {
fmt.Print(formatBasic(obj)) fmt.Printf(formatBasic(obj))
} }
} else { } else {
// The log being mirrored isn't JSON, so just print it. // The log being mirrored isn't JSON, so just print it.

View File

@@ -95,6 +95,7 @@ func mirrorLog(ctx context.Context, wg *sync.WaitGroup, path string, fromStart b
// the file is open before the queue manager is created or started. // the file is open before the queue manager is created or started.
// Otherwise, there would be the potential for a nearly-full file to // Otherwise, there would be the potential for a nearly-full file to
// rotate before the goroutine had a chance to open it. // rotate before the goroutine had a chance to open it.
// #nosec G304 - no harm, we open readonly and check error.
f, err = os.OpenFile(path, os.O_RDONLY, 0) f, err = os.OpenFile(path, os.O_RDONLY, 0)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -122,6 +123,7 @@ func mirrorLog(ctx context.Context, wg *sync.WaitGroup, path string, fromStart b
return return
} }
log.Debugf("File exists: %v, %v", path, fi.Size()) log.Debugf("File exists: %v, %v", path, fi.Size())
// #nosec G304 - no harm, we open readonly and check error.
f, err = os.OpenFile(path, os.O_RDONLY, 0) f, err = os.OpenFile(path, os.O_RDONLY, 0)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
@@ -169,6 +171,7 @@ func mirrorLog(ctx context.Context, wg *sync.WaitGroup, path string, fromStart b
} }
// Re-open file // Re-open file
log.Debugf("Re-opening error log file %v", path) log.Debugf("Re-opening error log file %v", path)
// #nosec G304 - no harm, we open readonly and check error.
f, err = os.OpenFile(path, os.O_RDONLY, 0) f, err = os.OpenFile(path, os.O_RDONLY, 0)
if err != nil { if err != nil {
log.Error(err) log.Error(err)

View File

@@ -1,5 +1,5 @@
/* /*
© Copyright IBM Corporation 2017, 2022 © Copyright IBM Corporation 2017, 2020
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@@ -16,7 +16,6 @@ limitations under the License.
package main package main
import ( import (
"context"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os" "os"
@@ -132,7 +131,7 @@ func startQueueManager(name string) error {
func stopQueueManager(name string) error { func stopQueueManager(name string) error {
log.Println("Stopping queue manager") log.Println("Stopping queue manager")
qmGracePeriod := os.Getenv("MQ_GRACE_PERIOD") qmGracePeriod := os.Getenv("MQ_GRACE_PERIOD")
isStandby, err := ready.IsRunningAsStandbyQM(context.Background(), name) isStandby, err := ready.IsRunningAsStandbyQM(name)
if err != nil { if err != nil {
log.Printf("Error getting status for queue manager %v: %v", name, err.Error()) log.Printf("Error getting status for queue manager %v: %v", name, err.Error())
return err return err
@@ -142,7 +141,7 @@ func stopQueueManager(name string) error {
if isStandby { if isStandby {
args = []string{"-x", name} args = []string{"-x", name}
} else { } else {
args = []string{"-s", "-w", "-tp", qmGracePeriod, name} args = []string{"-s", "-w", "-r", "-tp", qmGracePeriod, name}
} }
} }
out, rc, err := command.Run("endmqm", args...) out, rc, err := command.Run("endmqm", args...)
@@ -287,7 +286,8 @@ func updateQMini(qmname string) error {
if strings.Contains(qminiConfigStr, "ServiceComponent:") { if strings.Contains(qminiConfigStr, "ServiceComponent:") {
var re = regexp.MustCompile(`(?m)^.*ServiceComponent.*$\s^.*Service.*$\s^.*Name.*$\s^.*Module.*$\s^.*ComponentDataSize.*$`) var re = regexp.MustCompile(`(?m)^.*ServiceComponent.*$\s^.*Service.*$\s^.*Name.*$\s^.*Module.*$\s^.*ComponentDataSize.*$`)
curFile := re.ReplaceAllString(qminiConfigStr, "") curFile := re.ReplaceAllString(qminiConfigStr, "")
// #nosec G304 - qmgrDir filepath is derived from dspmqinf // #nosec G304 G306 - qmgrDir filepath is derived from dspmqinf and
// its a read by owner/s group, and pose no harm.
err := ioutil.WriteFile(qmgrDir, []byte(curFile), 0660) err := ioutil.WriteFile(qmgrDir, []byte(curFile), 0660)
if err != nil { if err != nil {
return err return err

View File

@@ -1,6 +1,6 @@
########################################################################################################################################################### ###########################################################################################################################################################
# MQ_VERSION is the fully qualified MQ version number to build # MQ_VERSION is the fully qualified MQ version number to build
MQ_VERSION ?= 9.3.1.1 MQ_VERSION ?= 9.3.0.4
########################################################################################################################################################### ###########################################################################################################################################################

3
docs/building.md Executable file → Normal file
View File

@@ -34,7 +34,8 @@ MQ_ARCHIVE=mq-1.2.3.4.tar.gz MQ_VERSION=1.2.3.4 make build-advancedserver
**Note**: MQ 9.3 is the latest MQ version with MQ Long Term Support (LTS), as well as being the latest Continuous Delivery (CD) version. Therefore, to build build 9.3.0.X, follow the [instructions above for MQ 9.3](#building-mq-93-long-term-support-lts-and-continuous-delivery-cd). **Note**: MQ 9.3 is the latest MQ version with MQ Long Term Support (LTS), as well as being the latest Continuous Delivery (CD) version. Therefore, to build build 9.3.0.X, follow the [instructions above for MQ 9.3](#building-mq-93-long-term-support-lts-and-continuous-delivery-cd).
However, if you wish to build the previous MQ LTS, use the [instructions](https://github.ibm.com/mq-cloudpak/mq-container/blob/v9.2.0.x-eus/docs/building.md#mq-long-term-support-lts) in the `v9.2.0.x-eus` branch. However, if you wish to build the previous MQ LTS, use the [instructions](/../9.2.0.x/docs/building.md#mq-long-term-support-lts) in the `v9.2.0.x-eus` branch.
## Building a developer image ## Building a developer image

View File

@@ -34,7 +34,7 @@ Two channels are created, one for administration, the other for normal messaging
## Web Console ## Web Console
By default the MQ Advanced for Developers image will start the IBM MQ Web Console that allows you to administer your Queue Manager running on your container. When the web console has been started, you can access it by opening a web browser and navigating to `https://<Container IP>:9443/ibmmq/console`. Where `<Container IP>` is replaced by the IP address of your running container. By default the MQ Advanced for Developers image will start the IBM MQ Web Console that allows you to administer your Queue Manager running on your container. When the web console has been started, you can access it by opening a web browser and navigating to https://<Container IP>:9443/ibmmq/console. Where <Container IP> is replaced by the IP address of your running container.
When you navigate to this page you may be presented with a security exception warning. This happens because, by default, the web console creates a self-signed certificate to use for the HTTPS operations. This certificate is not trusted by your browser and has an incorrect distinguished name. When you navigate to this page you may be presented with a security exception warning. This happens because, by default, the web console creates a self-signed certificate to use for the HTTPS operations. This certificate is not trusted by your browser and has an incorrect distinguished name.

View File

@@ -16,5 +16,5 @@ docker run \
--env LICENSE=accept \ --env LICENSE=accept \
--env MQ_QMGR_NAME=QM1 \ --env MQ_QMGR_NAME=QM1 \
--detach \ --detach \
ibm-mqadvanced-server:9.3.1.1-amd64 ibm-mqadvanced-server:9.3.0.4-amd64
``` ```

View File

@@ -17,14 +17,14 @@ There are two main sets of tests:
The Docker tests can be run locally on a machine with Docker. For example: The Docker tests can be run locally on a machine with Docker. For example:
``` ```
make test-devserver make devserver
make test-advancedserver make advancedserver
``` ```
You can specify the image to use directly by using the `MQ_IMAGE_ADVANCEDSERVER` or `MQ_IMAGE_DEVSERVER` variables, for example: You can specify the image to use directly by using the `MQ_IMAGE_ADVANCEDSERVER` or `MQ_IMAGE_DEVSERVER` variables, for example:
``` ```
MQ_IMAGE_ADVANCEDSERVER=ibm-mqadvanced-server:9.3.1.1-amd64 make test-advancedserver MQ_IMAGE_ADVANCEDSERVER=ibm-mqadvanced-server:9.3.0.4-amd64 make test-advancedserver
``` ```
You can pass parameters to `go test` with an environment variable. For example, to run the "TestGoldenPath" test, run the following command: You can pass parameters to `go test` with an environment variable. For example, to run the "TestGoldenPath" test, run the following command:

14
go.mod
View File

@@ -1,14 +1,24 @@
module github.com/ibm-messaging/mq-container module github.com/ibm-messaging/mq-container
go 1.15 go 1.18
require ( require (
github.com/genuinetools/amicontained v0.4.3 github.com/genuinetools/amicontained v0.4.3
github.com/ibm-messaging/mq-golang v2.0.0+incompatible github.com/ibm-messaging/mq-golang v2.0.0+incompatible
github.com/prometheus/client_golang v1.11.1 github.com/prometheus/client_golang v1.11.1
github.com/prometheus/client_model v0.2.0 github.com/prometheus/client_model v0.2.0
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1
software.sslmate.com/src/go-pkcs12 v0.0.0-20200830195227-52f69702a001 software.sslmate.com/src/go-pkcs12 v0.0.0-20200830195227-52f69702a001
) )
require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/golang/protobuf v1.4.3 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/prometheus/common v0.26.0 // indirect
github.com/prometheus/procfs v0.6.0 // indirect
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect
google.golang.org/protobuf v1.26.0-rc.1 // indirect
)

5
go.sum
View File

@@ -105,7 +105,6 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -120,16 +119,12 @@ golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@@ -25,4 +25,4 @@ sudo apt-get update || :
sudo apt-get install -y jq sudo apt-get install -y jq
go install golang.org/x/lint/golint@latest go install golang.org/x/lint/golint@latest
curl -sfL https://raw.githubusercontent.com/securego/gosec/master/install.sh | sh -s -- -b $GOPATH/bin 2.0.0 || echo "Gosec not installed. Platform may not be supported." curl -sfL https://raw.githubusercontent.com/securego/gosec/master/install.sh | sh -s -- -b $GOPATH/bin v2.14.0 || echo "Gosec not installed. Platform may not be supported."

View File

@@ -1,6 +1,6 @@
#!/bin/bash #!/bin/bash
# -*- mode: sh -*- # -*- mode: sh -*-
# © Copyright IBM Corporation 2015, 2022 # © Copyright IBM Corporation 2015, 2021
# #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
@@ -22,7 +22,6 @@ test -f /usr/bin/yum && YUM=true || YUM=false
test -f /usr/bin/microdnf && MICRODNF=true || MICRODNF=false test -f /usr/bin/microdnf && MICRODNF=true || MICRODNF=false
test -f /usr/bin/rpm && RPM=true || RPM=false test -f /usr/bin/rpm && RPM=true || RPM=false
test -f /usr/bin/apt-get && UBUNTU=true || UBUNTU=false test -f /usr/bin/apt-get && UBUNTU=true || UBUNTU=false
CPU_ARCH=$(uname -m)
if ($UBUNTU); then if ($UBUNTU); then
export DEBIAN_FRONTEND=noninteractive export DEBIAN_FRONTEND=noninteractive
@@ -30,7 +29,8 @@ if ($UBUNTU); then
# This ensures no unsupported code gets installed, and makes the build faster # This ensures no unsupported code gets installed, and makes the build faster
source /etc/os-release source /etc/os-release
# Figure out the correct apt URL based on the CPU architecture # Figure out the correct apt URL based on the CPU architecture
if [ "${CPU_ARCH}" == "x86_64" ]; then CPU_ARCH=$(uname -p)
if [ ${CPU_ARCH} == "x86_64" ]; then
APT_URL="http://archive.ubuntu.com/ubuntu/" APT_URL="http://archive.ubuntu.com/ubuntu/"
else else
APT_URL="http://ports.ubuntu.com/ubuntu-ports/" APT_URL="http://ports.ubuntu.com/ubuntu-ports/"
@@ -41,22 +41,29 @@ if ($UBUNTU); then
echo "deb ${APT_URL} ${UBUNTU_CODENAME}-updates main restricted" >> /etc/apt/sources.list echo "deb ${APT_URL} ${UBUNTU_CODENAME}-updates main restricted" >> /etc/apt/sources.list
echo "deb ${APT_URL} ${UBUNTU_CODENAME}-security main restricted" >> /etc/apt/sources.list echo "deb ${APT_URL} ${UBUNTU_CODENAME}-security main restricted" >> /etc/apt/sources.list
# Install additional packages required by MQ, this install process and the runtime scripts # Install additional packages required by MQ, this install process and the runtime scripts
EXTRA_DEBS="bash bc ca-certificates coreutils curl debianutils file findutils gawk grep libc-bin mount passwd procps sed tar util-linux"
# On ARM CPUs, there is no IBM JRE, so install another one
if [ "${CPU_ARCH}" == "aarch64" ]; then
EXTRA_DEBS="${EXTRA_DEBS} openjdk-8-jre"
fi
apt-get update apt-get update
apt-get install -y --no-install-recommends ${EXTRA_DEBS} apt-get install -y --no-install-recommends \
bash \
bc \
ca-certificates \
coreutils \
curl \
debianutils \
file \
findutils \
gawk \
grep \
libc-bin \
mount \
passwd \
procps \
sed \
tar \
util-linux
fi fi
if ($RPM); then if ($RPM); then
EXTRA_RPMS="bash bc ca-certificates file findutils gawk glibc-common grep ncurses-compat-libs passwd procps-ng sed shadow-utils tar util-linux which" EXTRA_RPMS="bash bc ca-certificates file findutils gawk glibc-common grep ncurses-compat-libs passwd procps-ng sed shadow-utils tar util-linux which"
# On ARM CPUs, there is no IBM JRE, so install another one
if [ "${CPU_ARCH}" == "aarch64" ]; then
EXTRA_RPMS="${EXTRA_RPMS} java-1.8.0-openjdk-headless"
fi
# Install additional packages required by MQ, this install process and the runtime scripts # Install additional packages required by MQ, this install process and the runtime scripts
$YUM && yum -y install --setopt install_weak_deps=false ${EXTRA_RPMS} $YUM && yum -y install --setopt install_weak_deps=false ${EXTRA_RPMS}
$MICRODNF && microdnf --disableplugin=subscription-manager install ${EXTRA_RPMS} $MICRODNF && microdnf --disableplugin=subscription-manager install ${EXTRA_RPMS}

View File

@@ -1,5 +1,5 @@
/* /*
© Copyright IBM Corporation 2017, 2022 © Copyright IBM Corporation 2017, 2020
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@@ -18,7 +18,6 @@ limitations under the License.
package command package command
import ( import (
"context"
"fmt" "fmt"
"os/exec" "os/exec"
) )
@@ -28,13 +27,9 @@ import (
// Do not use this function to run shell built-ins (like "cd"), because // Do not use this function to run shell built-ins (like "cd"), because
// the error handling works differently // the error handling works differently
func Run(name string, arg ...string) (string, int, error) { func Run(name string, arg ...string) (string, int, error) {
return RunContext(context.Background(), name, arg...)
}
func RunContext(ctx context.Context, name string, arg ...string) (string, int, error) {
// Run the command and wait for completion // Run the command and wait for completion
// #nosec G204 // #nosec G204
cmd := exec.CommandContext(ctx, name, arg...) cmd := exec.Command(name, arg...)
out, err := cmd.CombinedOutput() out, err := cmd.CombinedOutput()
rc := cmd.ProcessState.ExitCode() rc := cmd.ProcessState.ExitCode()
if err != nil { if err != nil {

View File

@@ -36,12 +36,15 @@ func CopyFileMode(src, dest string, perm os.FileMode) error {
if err != nil { if err != nil {
return fmt.Errorf("failed to open %s for copy: %v", src, err) return fmt.Errorf("failed to open %s for copy: %v", src, err)
} }
// #nosec G307 - local to this function, pose no harm.
defer in.Close() defer in.Close()
// #nosec G304 - this func creates based on the input filemode.
out, err := os.OpenFile(dest, os.O_CREATE|os.O_WRONLY, perm) out, err := os.OpenFile(dest, os.O_CREATE|os.O_WRONLY, perm)
if err != nil { if err != nil {
return fmt.Errorf("failed to open %s for copy: %v", dest, err) return fmt.Errorf("failed to open %s for copy: %v", dest, err)
} }
// #nosec G307 - local to this function, pose no harm.
defer out.Close() defer out.Close()
_, err = io.Copy(out, in) _, err = io.Copy(out, in)

View File

@@ -108,5 +108,6 @@ func (htpfile mapHtPasswd) updateHtPasswordFile(isTest bool) error {
if isTest { if isTest {
file = "my.htpasswd" file = "my.htpasswd"
} }
// #nosec G306 - its a read by owner/s group, and pose no harm.
return ioutil.WriteFile(file, htpfile.GetBytes(), 0660) return ioutil.WriteFile(file, htpfile.GetBytes(), 0660)
} }

View File

@@ -1,5 +1,5 @@
/* /*
© Copyright IBM Corporation 2018, 2022 © Copyright IBM Corporation 2018, 2020
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@@ -171,8 +171,8 @@ func (ks *KeyStore) GetCertificateLabels() ([]string, error) {
var labels []string var labels []string
for scanner.Scan() { for scanner.Scan() {
s := scanner.Text() s := scanner.Text()
if strings.HasPrefix(s, "-") || strings.HasPrefix(s, "*-") || strings.HasPrefix(s, "!") { if strings.HasPrefix(s, "-") || strings.HasPrefix(s, "*-") {
s := strings.TrimLeft(s, "-*!") s := strings.TrimLeft(s, "-*")
labels = append(labels, strings.TrimSpace(s)) labels = append(labels, strings.TrimSpace(s))
} }
} }

View File

@@ -1,5 +1,5 @@
/* /*
© Copyright IBM Corporation 2018, 2022 © Copyright IBM Corporation 2018, 2019
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@@ -35,6 +35,8 @@ const (
var ( var (
metricsEnabled = false metricsEnabled = false
// #nosec G112 - this needs investigation to find reasonable timeout.
// git-issue 233 to cover this..
metricsServer = &http.Server{Addr: ":" + defaultPort} metricsServer = &http.Server{Addr: ":" + defaultPort}
) )
@@ -43,7 +45,7 @@ func GatherMetrics(qmName string, log *logger.Logger) {
// If running in standby mode - wait until the queue manager becomes active // If running in standby mode - wait until the queue manager becomes active
for { for {
active, _ := ready.IsRunningAsActiveQM(context.Background(), qmName) active, _ := ready.IsRunningAsActiveQM(qmName)
if active { if active {
break break
} }

View File

@@ -48,8 +48,10 @@ func ProcessTemplateFile(templateFile, destFile string, data interface{}, log *l
return err return err
} }
} }
// #nosec G302
// #nosec G302 G304 G306 - its a read by owner/s group, and pose no harm.
f, err := os.OpenFile(destFile, os.O_CREATE|os.O_WRONLY, 0660) f, err := os.OpenFile(destFile, os.O_CREATE|os.O_WRONLY, 0660)
// #nosec G307 - local to this function, pose no harm.
defer f.Close() defer f.Close()
err = t.Execute(f, data) err = t.Execute(f, data)
if err != nil { if err != nil {

View File

@@ -18,6 +18,7 @@ package mqversion
import ( import (
"fmt" "fmt"
"strconv"
"strings" "strings"
"github.com/ibm-messaging/mq-container/internal/command" "github.com/ibm-messaging/mq-container/internal/command"
@@ -38,14 +39,59 @@ func Compare(checkVersion string) (int, error) {
if err != nil { if err != nil {
return 0, err return 0, err
} }
// trim any suffix from MQ version x.x.x.x
currentVersion = currentVersion[0:7] currentVRMF, err := parseVRMF(currentVersion)
if currentVersion < checkVersion { if err != nil {
return -1, nil return 0, err
} else if currentVersion == checkVersion {
return 0, nil
} else if currentVersion > checkVersion {
return 1, nil
} }
return 0, fmt.Errorf("Failed to compare MQ versions") compareVRMF, err := parseVRMF(checkVersion)
if err != nil {
return 0, fmt.Errorf("failed to parse compare version: %w", err)
}
return currentVRMF.compare(*compareVRMF), nil
}
type vrmf [4]int
func (v vrmf) String() string {
return fmt.Sprintf("%d.%d.%d.%d", v[0], v[1], v[2], v[3])
}
func (v vrmf) compare(to vrmf) int {
for idx := 0; idx < 4; idx++ {
if v[idx] < to[idx] {
return -1
}
if v[idx] > to[idx] {
return 1
}
}
return 0
}
func parseVRMF(vrmfString string) (*vrmf, error) {
versionParts := strings.Split(vrmfString, ".")
if len(versionParts) != 4 {
return nil, fmt.Errorf("incorrect number of parts to version string: expected 4, got %d", len(versionParts))
}
vmrfPartNames := []string{"version", "release", "minor", "fix"}
parsed := vrmf{}
for idx, value := range versionParts {
partName := vmrfPartNames[idx]
if value == "" {
return nil, fmt.Errorf("empty %s found in VRMF", partName)
}
val, err := strconv.Atoi(value)
if err != nil {
return nil, fmt.Errorf("non-numeric %s found in VRMF", partName)
}
if val < 0 {
return nil, fmt.Errorf("negative %s found in VRMF", partName)
}
if idx == 0 && val == 0 {
return nil, fmt.Errorf("zero value for version not allowed")
}
parsed[idx] = val
}
return &parsed, nil
} }

View File

@@ -16,10 +16,13 @@ limitations under the License.
package mqversion package mqversion
import "testing" import (
"fmt"
"testing"
)
func TestCompareLower(t *testing.T) { func TestCompareLower(t *testing.T) {
checkVersion := "9.9.9.9" checkVersion := "99.99.99.99"
mqVersionCheck, err := Compare(checkVersion) mqVersionCheck, err := Compare(checkVersion)
if err != nil { if err != nil {
t.Fatalf("Failed to compare MQ versions: %v", err) t.Fatalf("Failed to compare MQ versions: %v", err)
@@ -53,3 +56,92 @@ func TestCompareEqual(t *testing.T) {
t.Errorf("MQ version compare result failed. Expected 0, Got %v", mqVersionCheck) t.Errorf("MQ version compare result failed. Expected 0, Got %v", mqVersionCheck)
} }
} }
func TestVersionValid(t *testing.T) {
checkVersion, err := Get()
if err != nil {
t.Fatalf("Failed to get current MQ version: %v", err)
}
_, err = parseVRMF(checkVersion)
if err != nil {
t.Fatalf("Validation of MQ version failed: %v", err)
}
}
func TestValidVRMF(t *testing.T) {
validVRMFs := map[string]vrmf{
"1.0.0.0": {1, 0, 0, 0},
"10.0.0.0": {10, 0, 0, 0},
"1.10.0.0": {1, 10, 0, 0},
"1.0.10.0": {1, 0, 10, 0},
"1.0.0.10": {1, 0, 0, 10},
"999.998.997.996": {999, 998, 997, 996},
}
for test, expect := range validVRMFs {
t.Run(test, func(t *testing.T) {
parsed, err := parseVRMF(test)
if err != nil {
t.Fatalf("Unexpectedly failed to parse VRMF '%s': %s", test, err.Error())
}
if *parsed != expect {
t.Fatalf("VRMF not parsed as expected. Expected '%v', got '%v'", parsed, expect)
}
})
}
}
func TestInvalidVRMF(t *testing.T) {
invalidVRMFs := []string{
"not-a-number",
"9.8.7.string",
"0.1.2.3",
"1.0.0.-10",
}
for _, test := range invalidVRMFs {
t.Run(test, func(t *testing.T) {
parsed, err := parseVRMF(test)
if err == nil {
t.Fatalf("Expected error when parsing VRMF '%s', but got none. VRMF returned: %v", test, parsed)
}
})
}
}
func TestCompare(t *testing.T) {
tests := []struct {
current string
compare string
expect int
}{
{"1.0.0.1", "1.0.0.1", 0},
{"1.0.0.1", "1.0.0.0", 1},
{"1.0.0.1", "1.0.0.2", -1},
{"9.9.9.9", "10.0.0.0", -1},
{"9.9.9.9", "9.10.0.0", -1},
{"9.9.9.9", "9.9.10.0", -1},
{"9.9.9.9", "9.9.9.10", -1},
}
for _, test := range tests {
t.Run(fmt.Sprintf("%s-%s", test.current, test.compare), func(t *testing.T) {
baseVRMF, err := parseVRMF(test.current)
if err != nil {
t.Fatalf("Could not parse base version '%s': %s", test.current, err.Error())
}
compareVRMF, err := parseVRMF(test.compare)
if err != nil {
t.Fatalf("Could not parse current version '%s': %s", test.current, err.Error())
}
result := baseVRMF.compare(*compareVRMF)
if result != test.expect {
t.Fatalf("Expected %d but got %d when comparing '%s' with '%s'", test.expect, result, test.current, test.compare)
}
if test.expect == 0 {
return
}
resultReversed := compareVRMF.compare(*baseVRMF)
if resultReversed != test.expect*-1 {
t.Fatalf("Expected %d but got %d when comparing '%s' with '%s'", test.expect*-1, resultReversed, test.compare, test.current)
}
})
}
}

View File

@@ -1,5 +1,5 @@
/* /*
© Copyright IBM Corporation 2018, 2022 © Copyright IBM Corporation 2018, 2019
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@@ -18,7 +18,6 @@ limitations under the License.
package ready package ready
import ( import (
"context"
"io/ioutil" "io/ioutil"
"os" "os"
"strings" "strings"
@@ -54,6 +53,7 @@ func Clear() error {
// Set lets any subsequent calls to `CheckReady` know that the queue // Set lets any subsequent calls to `CheckReady` know that the queue
// manager has finished its configuration step // manager has finished its configuration step
func Set() error { func Set() error {
// #nosec G306 - this gives permissions to owner/s group only.
return ioutil.WriteFile(fileName, []byte("1"), 0770) return ioutil.WriteFile(fileName, []byte("1"), 0770)
} }
@@ -68,22 +68,22 @@ func Check() (bool, error) {
} }
// IsRunningAsActiveQM returns true if the queue manager is running in active mode // IsRunningAsActiveQM returns true if the queue manager is running in active mode
func IsRunningAsActiveQM(ctx context.Context, name string) (bool, error) { func IsRunningAsActiveQM(name string) (bool, error) {
return isRunningQM(ctx, name, "(RUNNING)") return isRunningQM(name, "(RUNNING)")
} }
// IsRunningAsStandbyQM returns true if the queue manager is running in standby mode // IsRunningAsStandbyQM returns true if the queue manager is running in standby mode
func IsRunningAsStandbyQM(ctx context.Context, name string) (bool, error) { func IsRunningAsStandbyQM(name string) (bool, error) {
return isRunningQM(ctx, name, "(RUNNING AS STANDBY)") return isRunningQM(name, "(RUNNING AS STANDBY)")
} }
// IsRunningAsReplicaQM returns true if the queue manager is running in replica mode // IsRunningAsReplicaQM returns true if the queue manager is running in replica mode
func IsRunningAsReplicaQM(ctx context.Context, name string) (bool, error) { func IsRunningAsReplicaQM(name string) (bool, error) {
return isRunningQM(ctx, name, "(REPLICA)") return isRunningQM(name, "(REPLICA)")
} }
func isRunningQM(ctx context.Context, name string, status string) (bool, error) { func isRunningQM(name string, status string) (bool, error) {
out, _, err := command.RunContext(ctx, "dspmq", "-n", "-m", name) out, _, err := command.Run("dspmq", "-n", "-m", name)
if err != nil { if err != nil {
return false, err return false, err
} }

View File

@@ -1,5 +1,5 @@
/* /*
© Copyright IBM Corporation 2019, 2022 © Copyright IBM Corporation 2019, 2023
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@@ -76,20 +76,18 @@ type TLSStore struct {
Truststore KeyStoreData Truststore KeyStoreData
} }
func configureTLSKeystores(keystoreDir, keyDir, trustDir string, p12TruststoreRequired bool, nativeTLSHA bool) (string, KeyStoreData, KeyStoreData, error) { func configureTLSKeystores(keystoreDir, keyDir, trustDir string, p12TruststoreRequired bool) (string, KeyStoreData, KeyStoreData, error) {
var keyLabel string
// Create the CMS Keystore & PKCS#12 Truststore (if required) // Create the CMS Keystore & PKCS#12 Truststore (if required)
tlsStore, err := generateAllKeystores(keystoreDir, p12TruststoreRequired, nativeTLSHA) tlsStore, err := generateAllKeystores(keystoreDir, p12TruststoreRequired)
if err != nil { if err != nil {
return "", tlsStore.Keystore, tlsStore.Truststore, err return "", tlsStore.Keystore, tlsStore.Truststore, err
} }
if tlsStore.Keystore.Keystore != nil { // Process all keys - add them to the CMS KeyStore
// Process all keys - add them to the CMS KeyStore keyLabel, err := processKeys(&tlsStore, keystoreDir, keyDir)
keyLabel, err = processKeys(&tlsStore, keystoreDir, keyDir) if err != nil {
if err != nil { return "", tlsStore.Keystore, tlsStore.Truststore, err
return "", tlsStore.Keystore, tlsStore.Truststore, err
}
} }
// Process all trust certificates - add them to the CMS KeyStore & PKCS#12 Truststore (if required) // Process all trust certificates - add them to the CMS KeyStore & PKCS#12 Truststore (if required)
@@ -103,13 +101,13 @@ func configureTLSKeystores(keystoreDir, keyDir, trustDir string, p12TruststoreRe
// ConfigureDefaultTLSKeystores configures the CMS Keystore & PKCS#12 Truststore // ConfigureDefaultTLSKeystores configures the CMS Keystore & PKCS#12 Truststore
func ConfigureDefaultTLSKeystores() (string, KeyStoreData, KeyStoreData, error) { func ConfigureDefaultTLSKeystores() (string, KeyStoreData, KeyStoreData, error) {
return configureTLSKeystores(keystoreDirDefault, keyDirDefault, trustDirDefault, true, false) return configureTLSKeystores(keystoreDirDefault, keyDirDefault, trustDirDefault, true)
} }
// ConfigureHATLSKeystore configures the CMS Keystore & PKCS#12 Truststore // ConfigureHATLSKeystore configures the CMS Keystore & PKCS#12 Truststore
func ConfigureHATLSKeystore() (string, KeyStoreData, KeyStoreData, error) { func ConfigureHATLSKeystore() (string, KeyStoreData, KeyStoreData, error) {
// *.crt files mounted to the HA TLS dir keyDirHA will be processed as trusted in the CMS keystore // *.crt files mounted to the HA TLS dir keyDirHA will be processed as trusted in the CMS keystore
return configureTLSKeystores(keystoreDirHA, keyDirHA, keyDirHA, false, true) return configureTLSKeystores(keystoreDirHA, keyDirHA, keyDirHA, false)
} }
// ConfigureTLS configures TLS for the queue manager // ConfigureTLS configures TLS for the queue manager
@@ -117,18 +115,9 @@ func ConfigureTLS(keyLabel string, cmsKeystore KeyStoreData, devMode bool, log *
const mqsc string = "/etc/mqm/15-tls.mqsc" const mqsc string = "/etc/mqm/15-tls.mqsc"
const mqscTemplate string = mqsc + ".tpl" const mqscTemplate string = mqsc + ".tpl"
sslKeyRing := ""
// Don't set SSLKEYR if no keys or crts are not supplied
// Key label will be blank if no certs were added during processing keys and certs.
if cmsKeystore.Keystore != nil {
certList, _ := cmsKeystore.Keystore.ListAllCertificates()
if len(certList) > 0 {
sslKeyRing = strings.TrimSuffix(cmsKeystore.Keystore.Filename, ".kdb")
}
}
err := mqtemplate.ProcessTemplateFile(mqscTemplate, mqsc, map[string]string{ err := mqtemplate.ProcessTemplateFile(mqscTemplate, mqsc, map[string]string{
"SSLKeyR": sslKeyRing, "SSLKeyR": strings.TrimSuffix(cmsKeystore.Keystore.Filename, ".kdb"),
"CertificateLabel": keyLabel, "CertificateLabel": keyLabel,
}, log) }, log)
if err != nil { if err != nil {
@@ -170,7 +159,7 @@ func configureTLSDev(log *logger.Logger) error {
} }
// generateAllKeystores creates the CMS Keystore & PKCS#12 Truststore (if required) // generateAllKeystores creates the CMS Keystore & PKCS#12 Truststore (if required)
func generateAllKeystores(keystoreDir string, p12TruststoreRequired bool, nativeTLSHA bool) (TLSStore, error) { func generateAllKeystores(keystoreDir string, p12TruststoreRequired bool) (TLSStore, error) {
var cmsKeystore, p12Truststore KeyStoreData var cmsKeystore, p12Truststore KeyStoreData
@@ -186,19 +175,11 @@ func generateAllKeystores(keystoreDir string, p12TruststoreRequired bool, native
return TLSStore{cmsKeystore, p12Truststore}, fmt.Errorf("Failed to create Keystore directory: %v", err) return TLSStore{cmsKeystore, p12Truststore}, fmt.Errorf("Failed to create Keystore directory: %v", err)
} }
// Search the default keys directory for any keys/certs. // Create the CMS Keystore
keysDirectory := keyDirDefault cmsKeystore.Keystore = keystore.NewCMSKeyStore(filepath.Join(keystoreDir, cmsKeystoreName), cmsKeystore.Password)
// Change to default native HA TLS directory if we are configuring nativeHA err = cmsKeystore.Keystore.Create()
if nativeTLSHA { if err != nil {
keysDirectory = keyDirHA return TLSStore{cmsKeystore, p12Truststore}, fmt.Errorf("Failed to create CMS Keystore: %v", err)
}
// Create the CMS Keystore if we have been provided keys and certificates
if haveKeysAndCerts(keysDirectory) || haveKeysAndCerts(trustDirDefault) {
cmsKeystore.Keystore = keystore.NewCMSKeyStore(filepath.Join(keystoreDir, cmsKeystoreName), cmsKeystore.Password)
err = cmsKeystore.Keystore.Create()
if err != nil {
return TLSStore{cmsKeystore, p12Truststore}, fmt.Errorf("Failed to create CMS Keystore: %v", err)
}
} }
// Create the PKCS#12 Truststore (if required) // Create the PKCS#12 Truststore (if required)
@@ -222,6 +203,7 @@ func processKeys(tlsStore *TLSStore, keystoreDir string, keyDir string) (string,
// Process all keys // Process all keys
keyList, err := ioutil.ReadDir(keyDir) keyList, err := ioutil.ReadDir(keyDir)
if err == nil && len(keyList) > 0 { if err == nil && len(keyList) > 0 {
// Process each set of keys - each set should contain files: *.key & *.crt // Process each set of keys - each set should contain files: *.key & *.crt
for _, keySet := range keyList { for _, keySet := range keyList {
keys, _ := ioutil.ReadDir(filepath.Join(keyDir, keySet.Name())) keys, _ := ioutil.ReadDir(filepath.Join(keyDir, keySet.Name()))
@@ -253,6 +235,7 @@ func processKeys(tlsStore *TLSStore, keystoreDir string, keyDir string) (string,
if err != nil { if err != nil {
return "", fmt.Errorf("Failed to encode PKCS#12 Keystore %s: %v", keySet.Name()+".p12", err) return "", fmt.Errorf("Failed to encode PKCS#12 Keystore %s: %v", keySet.Name()+".p12", err)
} }
// #nosec G306 - this gives permissions to owner/s group only.
err = ioutil.WriteFile(filepath.Join(keystoreDir, keySet.Name()+".p12"), file, 0644) err = ioutil.WriteFile(filepath.Join(keystoreDir, keySet.Name()+".p12"), file, 0644)
if err != nil { if err != nil {
return "", fmt.Errorf("Failed to write PKCS#12 Keystore %s: %v", filepath.Join(keystoreDir, keySet.Name()+".p12"), err) return "", fmt.Errorf("Failed to write PKCS#12 Keystore %s: %v", filepath.Join(keystoreDir, keySet.Name()+".p12"), err)
@@ -556,6 +539,7 @@ func generateRandomPassword() string {
validcharArray := []byte(validChars) validcharArray := []byte(validChars)
password := "" password := ""
for i := 0; i < 12; i++ { for i := 0; i < 12; i++ {
// #nosec G404 - this is only for internal keystore and using math/rand pose no harm.
password = password + string(validcharArray[pwr.Intn(len(validcharArray))]) password = password + string(validcharArray[pwr.Intn(len(validcharArray))])
} }
@@ -600,10 +584,13 @@ func getCertificateFingerprint(block *pem.Block) (string, error) {
// writeCertificatesToFile writes a list of certificates to a file // writeCertificatesToFile writes a list of certificates to a file
func writeCertificatesToFile(file string, certificates []*pem.Block) error { func writeCertificatesToFile(file string, certificates []*pem.Block) error {
// #nosec G304 - this is a temporary pem file to write certs.
f, err := os.Create(file) f, err := os.Create(file)
if err != nil { if err != nil {
return fmt.Errorf("Failed to create file %s: %v", file, err) return fmt.Errorf("Failed to create file %s: %v", file, err)
} }
// #nosec G307 - local to this function, pose no harm.
defer f.Close() defer f.Close()
w := bufio.NewWriter(f) w := bufio.NewWriter(f)
@@ -620,23 +607,3 @@ func writeCertificatesToFile(file string, certificates []*pem.Block) error {
} }
return nil return nil
} }
// Search the specified directory for .key and .crt files.
// Return true if at least one .key or .crt file is found else false
func haveKeysAndCerts(keyDir string) bool {
fileList, err := os.ReadDir(keyDir)
if err == nil && len(fileList) > 0 {
for _, fileInfo := range fileList {
// Keys and certs will be supplied in an user defined subdirectory.
// Do a listing of the subdirectory and then search for .key and .cert files
keys, _ := ioutil.ReadDir(filepath.Join(keyDir, fileInfo.Name()))
for _, key := range keys {
if strings.Contains(key.Name(), ".key") || strings.Contains(key.Name(), ".crt") {
// We found at least one key/crt file.
return true
}
}
}
}
return false
}

View File

@@ -65,6 +65,7 @@ func ConfigureWebKeystore(p12Truststore KeyStoreData, webKeystore string) (strin
// Check if a new self-signed certificate should be generated // Check if a new self-signed certificate should be generated
genHostName := os.Getenv("MQ_GENERATE_CERTIFICATE_HOSTNAME") genHostName := os.Getenv("MQ_GENERATE_CERTIFICATE_HOSTNAME")
if genHostName != "" { if genHostName != "" {
// Create the Web Keystore // Create the Web Keystore
newWebKeystore := keystore.NewPKCS12KeyStore(webKeystoreFile, p12Truststore.Password) newWebKeystore := keystore.NewPKCS12KeyStore(webKeystoreFile, p12Truststore.Password)
err := newWebKeystore.Create() err := newWebKeystore.Create()

View File

@@ -2,6 +2,6 @@
# SOURCE_BRANCH is the repository branch name for this release stream. # SOURCE_BRANCH is the repository branch name for this release stream.
# It should be updated when a new release fork is created but not for testing of personal builds or pre-fork updates. # It should be updated when a new release fork is created but not for testing of personal builds or pre-fork updates.
SOURCE_BRANCH ?= v9.3.1 SOURCE_BRANCH ?= v9.3.0.x
########################################################################################################################################################### ###########################################################################################################################################################

View File

@@ -1,4 +1,3 @@
//go:build mqdev
// +build mqdev // +build mqdev
/* /*
@@ -52,10 +51,8 @@ func TestDevGoldenPath(t *testing.T) {
waitForReady(t, cli, id) waitForReady(t, cli, id)
waitForWebReady(t, cli, id, insecureTLSConfig) waitForWebReady(t, cli, id, insecureTLSConfig)
t.Run("JMS", func(t *testing.T) { t.Run("JMS", func(t *testing.T) {
// Run the JMS tests, with no password specified. // Run the JMS tests, with no password specified
// Use OpenJDK JRE for running testing, pass false for 7th parameter. runJMSTests(t, cli, id, false, "app", defaultAppPasswordOS)
// Last parameter is blank as the test doesn't use TLS.
runJMSTests(t, cli, id, false, "app", defaultAppPasswordOS, "false", "")
}) })
t.Run("REST admin", func(t *testing.T) { t.Run("REST admin", func(t *testing.T) {
testRESTAdmin(t, cli, id, insecureTLSConfig) testRESTAdmin(t, cli, id, insecureTLSConfig)
@@ -118,9 +115,7 @@ func TestDevSecure(t *testing.T) {
waitForWebReady(t, cli, ctr.ID, createTLSConfig(t, cert, tlsPassPhrase)) waitForWebReady(t, cli, ctr.ID, createTLSConfig(t, cert, tlsPassPhrase))
t.Run("JMS", func(t *testing.T) { t.Run("JMS", func(t *testing.T) {
// OpenJDK is used for running tests, hence pass "false" for 7th parameter. runJMSTests(t, cli, ctr.ID, true, "app", appPassword)
// Cipher name specified is compliant with non-IBM JRE naming.
runJMSTests(t, cli, ctr.ID, true, "app", appPassword, "false", "TLS_RSA_WITH_AES_256_CBC_SHA256")
}) })
t.Run("REST admin", func(t *testing.T) { t.Run("REST admin", func(t *testing.T) {
testRESTAdmin(t, cli, ctr.ID, insecureTLSConfig) testRESTAdmin(t, cli, ctr.ID, insecureTLSConfig)
@@ -158,9 +153,7 @@ func TestDevWebDisabled(t *testing.T) {
}) })
t.Run("JMS", func(t *testing.T) { t.Run("JMS", func(t *testing.T) {
// Run the JMS tests, with no password specified // Run the JMS tests, with no password specified
// OpenJDK is used for running tests, hence pass "false" for 7th parameter. runJMSTests(t, cli, id, false, "app", defaultAppPasswordOS)
// Last parameter is blank as the test doesn't use TLS.
runJMSTests(t, cli, id, false, "app", defaultAppPasswordOS, "false", "")
}) })
// Stop the container cleanly // Stop the container cleanly
stopContainer(t, cli, id) stopContainer(t, cli, id)
@@ -191,131 +184,3 @@ func TestDevConfigDisabled(t *testing.T) {
// Stop the container cleanly // Stop the container cleanly
stopContainer(t, cli, id) stopContainer(t, cli, id)
} }
// Test if SSLKEYR and CERTLABL attributes are not set when key and certificate
// are not supplied.
func TestSSLKEYRBlank(t *testing.T) {
t.Parallel()
cli, err := client.NewClientWithOpts(client.FromEnv)
if err != nil {
t.Fatal(err)
}
containerConfig := container.Config{
Env: []string{
"LICENSE=accept",
"MQ_QMGR_NAME=qm1",
"MQ_ENABLE_EMBEDDED_WEB_SERVER=false",
},
}
id := runContainerWithPorts(t, cli, &containerConfig, []int{9443})
defer cleanContainer(t, cli, id)
waitForReady(t, cli, id)
// execute runmqsc to display qmgr SSLKEYR and CERTLABL attibutes.
// Search the console output for exepcted values
_, sslkeyROutput := execContainer(t, cli, id, "", []string{"bash", "-c", "echo 'DISPLAY QMGR SSLKEYR CERTLABL' | runmqsc"})
if !strings.Contains(sslkeyROutput, "SSLKEYR( )") && !strings.Contains(sslkeyROutput, "CERTLABL( )") {
t.Errorf("Expected SSLKEYR to be blank but it is not; got \"%v\"", sslkeyROutput)
}
// Stop the container cleanly
stopContainer(t, cli, id)
}
// Test if SSLKEYR and CERTLABL attributes are set when key and certificate
// are supplied.
func TestSSLKEYRWithSuppliedKeyAndCert(t *testing.T) {
t.Parallel()
cli, err := client.NewClientWithOpts(client.FromEnv)
if err != nil {
t.Fatal(err)
}
containerConfig := container.Config{
Env: []string{
"LICENSE=accept",
"MQ_QMGR_NAME=QM1",
"MQ_ENABLE_EMBEDDED_WEB_SERVER=false",
},
Image: imageName(),
}
hostConfig := container.HostConfig{
Binds: []string{
coverageBind(t),
tlsDir(t, false) + ":/etc/mqm/pki/keys/default",
},
}
networkingConfig := network.NetworkingConfig{}
ctr, err := cli.ContainerCreate(context.Background(), &containerConfig, &hostConfig, &networkingConfig, t.Name())
if err != nil {
t.Fatal(err)
}
defer cleanContainer(t, cli, ctr.ID)
startContainer(t, cli, ctr.ID)
waitForReady(t, cli, ctr.ID)
// execute runmqsc to display qmgr SSLKEYR and CERTLABL attibutes.
// Search the console output for exepcted values
_, sslkeyROutput := execContainer(t, cli, ctr.ID, "", []string{"bash", "-c", "echo 'DISPLAY QMGR SSLKEYR CERTLABL' | runmqsc"})
if !strings.Contains(sslkeyROutput, "SSLKEYR(/run/runmqserver/tls/key)") && !strings.Contains(sslkeyROutput, "CERTLABL(default)") {
t.Errorf("Expected SSLKEYR to be '/run/runmqserver/tls/key' but it is not; got \"%v\"", sslkeyROutput)
}
// Stop the container cleanly
stopContainer(t, cli, ctr.ID)
}
// Test with CA cert
func TestSSLKEYRWithCACert(t *testing.T) {
t.Parallel()
cli, err := client.NewClientWithOpts(client.FromEnv)
if err != nil {
t.Fatal(err)
}
containerConfig := container.Config{
Env: []string{
"LICENSE=accept",
"MQ_QMGR_NAME=QM1",
"MQ_ENABLE_EMBEDDED_WEB_SERVER=false",
},
Image: imageName(),
}
hostConfig := container.HostConfig{
Binds: []string{
coverageBind(t),
tlsDirWithCA(t, false) + ":/etc/mqm/pki/keys/QM1CA",
},
// Assign a random port for the web server on the host
PortBindings: nat.PortMap{
"9443/tcp": []nat.PortBinding{
{
HostIP: "0.0.0.0",
},
},
},
}
networkingConfig := network.NetworkingConfig{}
ctr, err := cli.ContainerCreate(context.Background(), &containerConfig, &hostConfig, &networkingConfig, t.Name())
if err != nil {
t.Fatal(err)
}
defer cleanContainer(t, cli, ctr.ID)
startContainer(t, cli, ctr.ID)
waitForReady(t, cli, ctr.ID)
// execute runmqsc to display qmgr SSLKEYR and CERTLABL attibutes.
// Search the console output for exepcted values
_, sslkeyROutput := execContainer(t, cli, ctr.ID, "", []string{"bash", "-c", "echo 'DISPLAY QMGR SSLKEYR CERTLABL' | runmqsc"})
if !strings.Contains(sslkeyROutput, "SSLKEYR(/run/runmqserver/tls/key)") {
t.Errorf("Expected SSLKEYR to be '/run/runmqserver/tls/key' but it is not; got \"%v\"", sslkeyROutput)
}
if !strings.Contains(sslkeyROutput, "CERTLABL(QM1CA)") {
t.Errorf("Expected CERTLABL to be 'QM1CA' but it is not; got \"%v\"", sslkeyROutput)
}
// Stop the container cleanly
stopContainer(t, cli, ctr.ID)
}

View File

@@ -1,8 +1,7 @@
//go:build mqdev
// +build mqdev // +build mqdev
/* /*
© Copyright IBM Corporation 2018, 2022 © Copyright IBM Corporation 2018, 2021
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@@ -19,7 +18,6 @@ limitations under the License.
package main package main
import ( import (
"bufio"
"bytes" "bytes"
"context" "context"
"crypto/tls" "crypto/tls"
@@ -28,8 +26,8 @@ import (
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/http/httputil" "net/http/httputil"
"os"
"path/filepath" "path/filepath"
"strconv"
"strings" "strings"
"testing" "testing"
"time" "time"
@@ -82,19 +80,15 @@ func tlsDir(t *testing.T, unixPath bool) string {
return filepath.Join(getCwd(t, unixPath), "../tls") return filepath.Join(getCwd(t, unixPath), "../tls")
} }
func tlsDirWithCA(t *testing.T, unixPath bool) string {
return filepath.Join(getCwd(t, unixPath), "../tlscacert")
}
// runJMSTests runs a container with a JMS client, which connects to the queue manager container with the specified ID // runJMSTests runs a container with a JMS client, which connects to the queue manager container with the specified ID
func runJMSTests(t *testing.T, cli *client.Client, ID string, tls bool, user, password string, ibmjre string, cipherName string) { func runJMSTests(t *testing.T, cli *client.Client, ID string, tls bool, user, password string) {
containerConfig := container.Config{ containerConfig := container.Config{
// -e MQ_PORT_1414_TCP_ADDR=9.145.14.173 -e MQ_USERNAME=app -e MQ_PASSWORD=passw0rd -e MQ_CHANNEL=DEV.APP.SVRCONN -e MQ_TLS_TRUSTSTORE=/tls/test.p12 -e MQ_TLS_PASSPHRASE=passw0rd -v /Users/arthurbarr/go/src/github.com/ibm-messaging/mq-container/test/tls:/tls msgtest // -e MQ_PORT_1414_TCP_ADDR=9.145.14.173 -e MQ_USERNAME=app -e MQ_PASSWORD=passw0rd -e MQ_CHANNEL=DEV.APP.SVRCONN -e MQ_TLS_TRUSTSTORE=/tls/test.p12 -e MQ_TLS_PASSPHRASE=passw0rd -v /Users/arthurbarr/go/src/github.com/ibm-messaging/mq-container/test/tls:/tls msgtest
Env: []string{ Env: []string{
"MQ_PORT_1414_TCP_ADDR=" + getIPAddress(t, cli, ID), "MQ_PORT_1414_TCP_ADDR=" + getIPAddress(t, cli, ID),
"MQ_USERNAME=" + user, "MQ_USERNAME=" + user,
"MQ_CHANNEL=DEV.APP.SVRCONN", "MQ_CHANNEL=DEV.APP.SVRCONN",
"IBMJRE=" + ibmjre, "IBMJRE=" + os.Getenv("IBMJRE"),
}, },
Image: imageNameDevJMS(), Image: imageNameDevJMS(),
} }
@@ -107,7 +101,6 @@ func runJMSTests(t *testing.T, cli *client.Client, ID string, tls bool, user, pa
containerConfig.Env = append(containerConfig.Env, []string{ containerConfig.Env = append(containerConfig.Env, []string{
"MQ_TLS_TRUSTSTORE=/var/tls/client-trust.jks", "MQ_TLS_TRUSTSTORE=/var/tls/client-trust.jks",
"MQ_TLS_PASSPHRASE=passw0rd", "MQ_TLS_PASSPHRASE=passw0rd",
"MQ_TLS_CIPHER=" + cipherName,
}...) }...)
} }
hostConfig := container.HostConfig{ hostConfig := container.HostConfig{
@@ -126,57 +119,9 @@ func runJMSTests(t *testing.T, cli *client.Client, ID string, tls bool, user, pa
if rc != 0 { if rc != 0 {
t.Errorf("JUnit container failed with rc=%v", rc) t.Errorf("JUnit container failed with rc=%v", rc)
} }
// Get console output of the container and process the lines
// to see if we have any failures
scanner := bufio.NewScanner(strings.NewReader(inspectLogs(t, cli, ctr.ID)))
for scanner.Scan() {
s := scanner.Text()
if processJunitLogLine(s) {
t.Errorf("JUnit container tests failed. Reason: %s", s)
}
}
defer cleanContainer(t, cli, ctr.ID) defer cleanContainer(t, cli, ctr.ID)
} }
// Parse JUnit log line and return true if line contains failed or aborted tests
func processJunitLogLine(outputLine string) bool {
var failedLine bool
// Sample JUnit test run output
//[ 2 containers found ]
//[ 0 containers skipped ]
//[ 2 containers started ]
//[ 0 containers aborted ]
//[ 2 containers successful ]
//[ 0 containers failed ]
//[ 0 tests found ]
//[ 0 tests skipped ]
//[ 0 tests started ]
//[ 0 tests aborted ]
//[ 0 tests successful ]
//[ 0 tests failed ]
// Consider only those lines that begin with '[' and with ']'
if strings.HasPrefix(outputLine, "[") && strings.HasSuffix(outputLine, "]") {
// Strip off [] and whitespaces
trimmed := strings.Trim(outputLine, "[] ")
if strings.Contains(trimmed, "aborted") || strings.Contains(trimmed, "failed") {
// Tokenize on whitespace
tokens := strings.Split(trimmed, " ")
// Determine the count of aborted or failed tests
count, err := strconv.Atoi(tokens[0])
if err == nil {
if count > 0 {
failedLine = true
}
}
}
}
return failedLine
}
// createTLSConfig creates a tls.Config which trusts the specified certificate // createTLSConfig creates a tls.Config which trusts the specified certificate
func createTLSConfig(t *testing.T, certFile, password string) *tls.Config { func createTLSConfig(t *testing.T, certFile, password string) *tls.Config {
// Get the SystemCertPool, continue with an empty pool on error // Get the SystemCertPool, continue with an empty pool on error

View File

@@ -52,8 +52,6 @@ func TestLicenseNotSet(t *testing.T) {
expectTerminationMessage(t, cli, id) expectTerminationMessage(t, cli, id)
} }
//Start container with LICENSE environment variable set to view.
//Check that container starts and display license text
func TestLicenseView(t *testing.T) { func TestLicenseView(t *testing.T) {
t.Parallel() t.Parallel()
@@ -134,6 +132,42 @@ func goldenPath(t *testing.T, metric bool) {
stopContainer(t, cli, id) stopContainer(t, cli, id)
} }
// TestSecurityVulnerabilities checks for any vulnerabilities in the image, as reported
// by Red Hat
func TestSecurityVulnerabilities(t *testing.T) {
t.Parallel()
cli, err := client.NewClientWithOpts(client.FromEnv)
if err != nil {
t.Fatal(err)
}
rc, _ := runContainerOneShot(t, cli, "bash", "-c", "command -v microdnf && test -e /etc/yum.repos.d/ubi.repo")
if rc != 0 {
t.Skip("Skipping test because container is based on ubi-minimal, which doesn't include yum")
}
// id, _, err := command.Run("sudo", "buildah", "from", imageName())
// if err != nil {
// t.Log(id)
// t.Fatal(err)
// }
// id = strings.TrimSpace(id)
// defer command.Run("buildah", "rm", id)
// mnt, _, err := command.Run("sudo", "buildah", "mount", id)
// if err != nil {
// t.Log(mnt)
// t.Fatal(err)
// }
// mnt = strings.TrimSpace(mnt)
// out, _, err := command.Run("bash", "-c", "sudo cp /etc/yum.repos.d/* "+filepath.Join(mnt, "/etc/yum.repos.d/"))
// if err != nil {
// t.Log(out)
// t.Fatal(err)
// }
// out, ret, _ := command.Run("bash", "-c", "yum --installroot="+mnt+" updateinfo list sec | grep /Sec")
// if ret != 1 {
// t.Errorf("Expected no vulnerabilities, found the following:\n%v", out)
// }
}
func utilTestNoQueueManagerName(t *testing.T, hostName string, expectedName string) { func utilTestNoQueueManagerName(t *testing.T, hostName string, expectedName string) {
search := "QMNAME(" + expectedName + ")" search := "QMNAME(" + expectedName + ")"
@@ -153,7 +187,6 @@ func utilTestNoQueueManagerName(t *testing.T, hostName string, expectedName stri
t.Errorf("Expected result of running dspmq to contain name=%v, got name=%v", search, out) t.Errorf("Expected result of running dspmq to contain name=%v, got name=%v", search, out)
} }
} }
func TestNoQueueManagerName(t *testing.T) { func TestNoQueueManagerName(t *testing.T) {
t.Parallel() t.Parallel()

View File

@@ -1,10 +1,8 @@
module github.com/ibm-messaging/mq-container/test/docker module github.com/ibm-messaging/mq-container/test/docker
go 1.16 go 1.18
require ( require (
github.com/containerd/containerd v1.6.6 // indirect
github.com/docker/distribution v2.8.1+incompatible // indirect
// Note: This is not actually Docker v17.12! // Note: This is not actually Docker v17.12!
// Go modules require the use of semver, but Docker does not use semver and has not // Go modules require the use of semver, but Docker does not use semver and has not
// [opted-in to use Go modules](https://github.com/golang/go/wiki/Modules#can-a-module-consume-a-package-that-has-not-opted-in-to-modules) // [opted-in to use Go modules](https://github.com/golang/go/wiki/Modules#can-a-module-consume-a-package-that-has-not-opted-in-to-modules)
@@ -17,6 +15,26 @@ require (
// version 1.41 on the server, which is currently too new for the version of Docker in Travis (Ubuntu Bionic) // version 1.41 on the server, which is currently too new for the version of Docker in Travis (Ubuntu Bionic)
github.com/docker/docker v17.12.0-ce-rc1.0.20210128214336-420b1d36250f+incompatible github.com/docker/docker v17.12.0-ce-rc1.0.20210128214336-420b1d36250f+incompatible
github.com/docker/go-connections v0.4.0 github.com/docker/go-connections v0.4.0
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 // indirect )
google.golang.org/grpc v1.46.0 // indirect
require (
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/Microsoft/go-winio v0.5.1 // indirect
github.com/containerd/containerd v1.6.3 // indirect
github.com/docker/distribution v2.8.1+incompatible // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/sirupsen/logrus v1.8.1 // indirect
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f // indirect
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 // indirect
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect
google.golang.org/grpc v1.46.0 // indirect
google.golang.org/protobuf v1.27.1 // indirect
gotest.tools v2.2.0+incompatible // indirect
) )

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
/* /*
© Copyright IBM Corporation 2019, 2022 © Copyright IBM Corporation 2019, 2020
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@@ -16,7 +16,6 @@ limitations under the License.
package main package main
import ( import (
"context"
"strings" "strings"
"testing" "testing"
"time" "time"
@@ -93,28 +92,15 @@ func TestMultiInstanceContainerStop(t *testing.T) {
waitForReady(t, cli, qm1aId) waitForReady(t, cli, qm1aId)
waitForReady(t, cli, qm1bId) waitForReady(t, cli, qm1bId)
err, originalActive, originalStandby := getActiveStandbyQueueManager(t, cli, qm1aId, qm1bId) err, active, standby := getActiveStandbyQueueManager(t, cli, qm1aId, qm1bId)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) stopContainer(t, cli, active)
defer cancel()
stopContainer(t, cli, originalActive)
for { if status := getQueueManagerStatus(t, cli, standby, "QM1"); strings.Compare(status, "Running") != 0 {
status := getQueueManagerStatus(t, cli, originalStandby, "QM1") t.Fatalf("Expected QM1 to be running as active queue manager, dspmq returned status of %v", status)
select {
case <-time.After(1 * time.Second):
if status == "Running" {
t.Logf("Original standby is now the active")
return
} else if status == "Starting" {
t.Logf("Original standby is starting")
}
case <-ctx.Done():
t.Fatalf("%s Timed out waiting for standby to become the active. Status=%v", time.Now().Format(time.RFC3339), status)
}
} }
} }

View File

@@ -1,5 +1,5 @@
/* /*
© Copyright IBM Corporation 2019, 2022 © Copyright IBM Corporation 2019
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@@ -77,7 +77,6 @@ func getActiveStandbyQueueManager(t *testing.T, cli *client.Client, qm1aId strin
func getQueueManagerStatus(t *testing.T, cli *client.Client, containerID string, queueManagerName string) string { func getQueueManagerStatus(t *testing.T, cli *client.Client, containerID string, queueManagerName string) string {
_, dspmqOut := execContainer(t, cli, containerID, "", []string{"bash", "-c", "dspmq", "-m", queueManagerName}) _, dspmqOut := execContainer(t, cli, containerID, "", []string{"bash", "-c", "dspmq", "-m", queueManagerName})
t.Logf("dspmq for %v (%v) returned: %v", containerID, queueManagerName, dspmqOut)
regex := regexp.MustCompile(`STATUS\(.*\)`) regex := regexp.MustCompile(`STATUS\(.*\)`)
status := regex.FindString(dspmqOut) status := regex.FindString(dspmqOut)
status = strings.TrimSuffix(strings.TrimPrefix(status, "STATUS("), ")") status = strings.TrimSuffix(strings.TrimPrefix(status, "STATUS("), ")")

View File

@@ -16,16 +16,16 @@
# Application build environment (Maven) # Application build environment (Maven)
############################################################################### ###############################################################################
FROM registry.access.redhat.com/ubi8/openjdk-8 as builder FROM registry.access.redhat.com/ubi8/openjdk-8 as builder
COPY pom.xml ./ COPY pom.xml .
#WORKDIR /usr/src/mymaven #WORKDIR /usr/src/mymaven
# Download dependencies separately, so Docker caches them # Download dependencies separately, so Docker caches them
RUN mvn dependency:go-offline install RUN mvn dependency:go-offline install
# Copy source # Copy source
COPY src ./src COPY src .
# Run the main build # Run the main build
RUN mvn --offline install RUN mvn --offline install
# Print a list of all the files (useful for debugging) # Print a list of all the files (useful for debugging)
RUN find ./ RUN find .
############################################################################### ###############################################################################
# Application runtime (JRE only, no build environment) # Application runtime (JRE only, no build environment)
@@ -35,4 +35,4 @@ FROM registry.access.redhat.com/ubi8/openjdk-8-runtime
COPY --from=builder /home/jboss/target/*.jar /opt/app/ COPY --from=builder /home/jboss/target/*.jar /opt/app/
COPY --from=builder /home/jboss/target/lib/*.jar /opt/app/ COPY --from=builder /home/jboss/target/lib/*.jar /opt/app/
USER 1001 USER 1001
ENTRYPOINT ["java", "-classpath", "/opt/app/*", "org.junit.platform.console.ConsoleLauncher", "--fail-if-no-tests", "-p", "com.ibm.mqcontainer.test", "--details", "verbose"] ENTRYPOINT ["java", "-classpath", "/opt/app/*", "org.junit.platform.console.ConsoleLauncher", "-p", "com.ibm.mqcontainer.test", "--details", "verbose"]

View File

@@ -1,5 +1,5 @@
/* /*
© Copyright IBM Corporation 2018, 2022 © Copyright IBM Corporation 2018, 2021
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@@ -82,10 +82,11 @@ class JMSTests {
boolean ibmjre = System.getenv("IBMJRE").equals("true"); boolean ibmjre = System.getenv("IBMJRE").equals("true");
if (ibmjre){ if (ibmjre){
System.setProperty("com.ibm.mq.cfg.useIBMCipherMappings", "true"); System.setProperty("com.ibm.mq.cfg.useIBMCipherMappings", "true");
factory.setSSLCipherSuite("SSL_RSA_WITH_AES_128_CBC_SHA256");
} else { } else {
System.setProperty("com.ibm.mq.cfg.useIBMCipherMappings", "false"); System.setProperty("com.ibm.mq.cfg.useIBMCipherMappings", "false");
factory.setSSLCipherSuite("TLS_RSA_WITH_AES_128_CBC_SHA256");
} }
factory.setSSLCipherSuite(System.getenv("MQ_TLS_CIPHER"));
} }
return factory; return factory;
} }

View File

@@ -1,23 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDxTCCAq2gAwIBAgIUc5EKoPi8cg2M2n+SqCPn44LFjoAwDQYJKoZIhvcNAQEL
BQAwcjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5ZMREwDwYDVQQHDAhOZXcgWW9y
azEMMAoGA1UECgwDSUJNMQwwCgYDVQQLDANJQk0xDDAKBgNVBAMMA0lCTTEZMBcG
CSqGSIb3DQEJARYKbXFAaWJtLmNvbTAeFw0yMjEwMDYxMzA2NTVaFw0zMjEwMDMx
MzA2NTVaMHIxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJOWTERMA8GA1UEBwwITmV3
IFlvcmsxDDAKBgNVBAoMA0lCTTEMMAoGA1UECwwDSUJNMQwwCgYDVQQDDANJQk0x
GTAXBgkqhkiG9w0BCQEWCm1xQGlibS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IB
DwAwggEKAoIBAQCls3oNIDxzKct0NXVsoz1Hng3BcaDPcBRYCNgAEwDOVe3rEEbZ
d2KFliDgCG3hCHMM1Yaabx3iTVsKklubBxr1JFmyDtgb4z9mJpMVYXS+gsKsZOs/
vNSmzpt5VlbEadHKJ/aFf/EWxvoOP80UiEeUJt36aWFUTyjjyArd2xS8fD1DATFB
U2bteaWfkpuLeFiTtwftZhsLv1s5T35+Ex087eX1tkm/TArxZsNl/9RrSWsbJh/t
bjiRKn+fCZdirFsurP3Si5Jd9laCW0RBKAKYEh40XYDgjLhvcazDPTBueTHXQPG5
S0hCOhCJiCWpPCsh8rIOCz0D9YIByZADR1WvAgMBAAGjUzBRMB0GA1UdDgQWBBS5
OsiPqZXlMwpMqGKczUg3qVvy0zAfBgNVHSMEGDAWgBS5OsiPqZXlMwpMqGKczUg3
qVvy0zAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQBfwYRcckke
/NzDHlFb8TBlUDqERmlT/qTWamVZO2Zuo4Y0BFOYFEA23F5sQU2s2MFSEZcAKe5v
mJroFE2rr4aY4bJ4Z0UXlOAYyqNxVOTI4MIxwbg3GVr8c8oWBnAmgqI9W9OpgZ52
/bN24XL9s6I3TeOTtYI9z5O70Kl/E3nG8GcfMw0EtNIy0UPUWvJH8FgEsotsRO9v
tPtlZklEK/D+Keozbs2shdNhKgVnDatpdTBqvwLztb1+te5AckuOnJsnG+iIrG2D
Ehoq2O3gktIVdAk4sv2BoONzegLWB+GSxGVZsemfYF4PkN9/w+znz0LK/ATAtabK
rikk0yC+Xg8z
-----END CERTIFICATE-----

View File

@@ -1,34 +0,0 @@
#!/bin/bash -ex
# -*- mode: sh -*-
# © Copyright IBM Corporation 2018, 2022
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
KEY=server.key
CERT=server.crt
CACERT=cacert.crt
CAPEM=rootcakey.pem
# Create a private key and certificate in PEM format, for the server to use
openssl req \
-newkey rsa:2048 -nodes -keyout ${KEY} \
-subj "/CN=localhost" \
-addext "subjectAltName = DNS:localhost" \
-x509 -days 3650 -out ${CERT}
# Generate the private key of the root CA
openssl genrsa -out ${CAPEM} 2048
#Generate the self-signed root CA certificate. Manual input is required when prompted
openssl req -x509 -sha256 -new -nodes -key ${CAPEM} -days 3650 -out ${CACERT}

View File

@@ -1,27 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEApbN6DSA8cynLdDV1bKM9R54NwXGgz3AUWAjYABMAzlXt6xBG
2XdihZYg4Aht4QhzDNWGmm8d4k1bCpJbmwca9SRZsg7YG+M/ZiaTFWF0voLCrGTr
P7zUps6beVZWxGnRyif2hX/xFsb6Dj/NFIhHlCbd+mlhVE8o48gK3dsUvHw9QwEx
QVNm7Xmln5Kbi3hYk7cH7WYbC79bOU9+fhMdPO3l9bZJv0wK8WbDZf/Ua0lrGyYf
7W44kSp/nwmXYqxbLqz90ouSXfZWgltEQSgCmBIeNF2A4Iy4b3Gswz0wbnkx10Dx
uUtIQjoQiYglqTwrIfKyDgs9A/WCAcmQA0dVrwIDAQABAoIBAQCcL9ZltPMF4mlh
+lnasuu6K+LvafmYTh7+9CcVutPRqfF+1nLR3NRC8sW+JnPb36kCeepMe1yByUR9
bINoV4QzebYKPi+56bQCx21wg9IVGRACi4WrKISRTsIB1z4mGVCj6pNWNsi7HYbq
E31tUx+VKCWoOdiCLbNvMUn84Npk5npK9P9F86qypSJqJv3HORgOa58x7qZiD2fk
TroLuGHKFWGtSiK1vvgax8gBwMi9JvWoPhwHagINh0WwT820+3/4KbqcsvRNSIu8
qA+ltk/Vt0ftwPMpxPYnvRFrSvzYIRE04fbWqA3mxhPr/oP3xXrwyd1hnX6GzPIR
KXeX1i7BAoGBANGV6XtL8cq8tu/4emOYDn4tncMRICQ8uMWZqnIQAvX8PBx1w9E2
Wbkl0oBHJ/gDtU+feDvbHI0JBvXerce2cxj4+793TGLUl980dgq776x2fcxHjvYZ
uZjJd4M95Lh+IhtWGZQ1FviiylDg62w+mrNydX8WiFjLGYPydQqCIAAxAoGBAMpl
m/MDqpgPxiDU1O9DAq8C/0MQUOc/p+67aGsYxmPDdCouBLA/zckQh6Cp9Wo3n7MF
X5UHOqn72q/4ahNEx+3YQoaLqRKTjUHl3r3zj+MsM0hIDp1uOxVzbANxazuLuqqA
C+yJTmRU7uvNPH1AMFJBKRSmhd3MJwoHF/KZAhvfAoGAFaGPU3ZnIjGP//x5RUYw
WL2EhtmBo7vQpjRR7yvP4muCGL3e0/z0DbPloe+2JFbdo7Ylxqe6rqO74Cx3ayFd
h7pK4VwCukCO3C6h8EGtXvNr0GWiT6wgB7DjcNw2ewQpqQCd6zn/gPHsR6SvJ6De
fp7VmaRNtjxgCcpAYjFD9EECgYAhEPaofjnZvAH/jSX4rPb8Rr4TY9AD58d03lNR
4+tNkzogRgJoFRR2u+ecnQfGQa4qnj8eZt7ztHzm8OvLmBodxo4f0yNdMJQMZxS7
7dXdJHSAY51XpRGsEH5eFaKSSOLHRkIsc8ZF6AZcqdwvDlSWq6SdhhMqyFa8cao8
7TiF+wKBgADNZ4HoZDfnuH5jUvf7y+YlxDX3jxWR+BUTLCJmt082uT+8Xg5SALec
B8GP5s6VKglD5Wzj8IhxvpQ5yzH9DRHwEeu3vFLBinIUlWdBiXwtnbmY0E9r3PSb
pZQH5RZ5PyrJicIVBJSqdFu2HDl4heeLJE0LGh7SQnFaexxXn397
-----END RSA PRIVATE KEY-----

View File

@@ -1,19 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDHzCCAgegAwIBAgIUUFCo8fUglrbfDY8ZUDnzAfWeq54wDQYJKoZIhvcNAQEL
BQAwFDESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTIyMTAwNjEzMDYwMloXDTMyMTAw
MzEzMDYwMlowFDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAxcja4TbshPj4tWgbRP73eDs2382j6Km5TNej6To13PJq
Wyezg081ctmgFEMlgbRiowZmecpYOKjDKuVDtfLE6nZMmN+PjXXuOMGIPu67fx/4
tnaMDYw96WIBEFNVZ7dC/pceaTIRbnjma89o1/mTudTAYPLAvKpeBqpJJFWPMDhz
nK3NKeydTdUYc9jmEJWiFCI4bUdyvyUjp+7QrDbdODXo27/nVAV0Ih+OuU4ZnxT5
cf1fzVV1ZqHd8jbLm25ZoAmkk+9DSXFNA2hbSepf70mRVD/Qyn8U6b5A2v+mWIfs
B1+iAlPl7IX88W1Q9q1yu0uT8YWGWpeTbeOnJ4WJ8wIDAQABo2kwZzAdBgNVHQ4E
FgQUEjp6AtPmpuLQyBPeiW4pW+VGb2wwHwYDVR0jBBgwFoAUEjp6AtPmpuLQyBPe
iW4pW+VGb2wwDwYDVR0TAQH/BAUwAwEB/zAUBgNVHREEDTALgglsb2NhbGhvc3Qw
DQYJKoZIhvcNAQELBQADggEBAL2bTWfTqxfN0YbBPjG05sR4nO8mhbNSGHDuGeiO
OP0wPxkgAueScTpyhHWEAJmMQOMUM9KhByZj7LnqW8XY9BBS3zPAyzAdia8/o6Vl
7El+M2JCfqz7hSupRK8M+r+XUq3hyEFjPLt+KO6D5VNzXiTM+36UueeQD3aaxxyo
LpHSPeXFBkOrT/wt6FHi4NHvWls95PllncWZVYjxPMUUF/o30tOxSmgXwjUknrI8
29ADKM1IbFuXd4vKYG9V+ukI6n5F86PYrN2ajPBKIidvTqU8tPzMHuJZ3YiIiv8p
TARE2b5YLWuu+aF2z/V71MmIWr0uyOk6pZVGOCw7fwHx/wg=
-----END CERTIFICATE-----

View File

@@ -1,28 +0,0 @@
-----BEGIN PRIVATE KEY-----
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDFyNrhNuyE+Pi1
aBtE/vd4OzbfzaPoqblM16PpOjXc8mpbJ7ODTzVy2aAUQyWBtGKjBmZ5ylg4qMMq
5UO18sTqdkyY34+Nde44wYg+7rt/H/i2dowNjD3pYgEQU1Vnt0L+lx5pMhFueOZr
z2jX+ZO51MBg8sC8ql4GqkkkVY8wOHOcrc0p7J1N1Rhz2OYQlaIUIjhtR3K/JSOn
7tCsNt04Nejbv+dUBXQiH465ThmfFPlx/V/NVXVmod3yNsubblmgCaST70NJcU0D
aFtJ6l/vSZFUP9DKfxTpvkDa/6ZYh+wHX6ICU+XshfzxbVD2rXK7S5PxhYZal5Nt
46cnhYnzAgMBAAECggEBAKLRsZZbf6QLzbqRBHntJ04b+RWOlVOQfRHMJ4x1Nig4
i+OUsEv1pftxOj3T9QlstRKdzziNociq7VffurkLLJ4TWwUybVu37K9easncABAs
ArQ6rRruC32YB2YoJBOoowcw4oEZDY6TCqVP7nB1be46PVDSJmZqHdOA1YuKv8Ci
FbzLZEKYy6QGmHp9xMzc3usQ+KRNIFcR3NJb0eCbfAXb0tP3F12i4ygnxifkOVQS
hukTJlZVbAO3W9uUEzLh5bkLoPfob6Vrwv1tGQ48uFgzgPXc4bWOUDFXHW5+vQLD
1MKFboozrNhRR+Q5xvbRnaWEv4hMHlUNggc5ErRj6CkCgYEA5m5f1VfhfqSvEF2c
XcIfUDiCzREpllY2ZdBSfUlz/GA6f0QUyFJBCdd4ypipQcggn60de9DoKDcNcq32
rfVfANpsciJq9s4+xLL8MGtUuoi4HK8LHP3tc8aJaAcCVjBFbz0orKXDUOcue6A5
Z5riDjiXOE56XSLSSNSRjWh4psUCgYEA27sfaM4J0YkdFuth/Qu+X9PeroUZyC0T
3glMN/7PU4jZg+2v4Psfe61gj8qOt0catuWvsD0wQTy3jt+svY/KfkbspK6/7CEG
fKx1AB1xeMr4JuQp9POFVhKRn4sBUMbHOkbjzlNpGmUI2arlLRTwT8YpuMDjCK4l
ZuUYB/IHOVcCgYAqexqryCHIKTAlAjz7g/gl3+UtTQavsoEg0AEFG++IDW17XN+/
9noLCHA6WV6KxAxPo6iV1POXxl5yT+P0OhIjpCDuAa5ahbdIp/6aJo9ePCpFD3gr
Bh0qhOV8Ch7CKPAEC/Bds8mINrZ5EBbFJOab3I70UHN6jBrcVmPm/+WOSQKBgQCW
AbBWt1qCnu2qCPWzcAH+n8DFOf645vVKPuS20ZEuwR1l8K2ClU4P+/QRFkLKIpO9
Sx7e3VcFInNZ6Z+fJfwiqz7AysAhbwZjtMSHWJJv2XkB7AAsxtc/RJv/5ED4qUu3
oE/DOrRlHZamKwIb/dB1VZ6ED8Ku2VyVW09FlViTLwKBgEU21xqvP1+TXzsrZNGm
/Hj/RAaA8B6tyo5Dj9glV80oakMSaxBsLP9xHkoZjkHaJnoFosKBQSnCcPnEY4gP
22WEyGshu8sujLibLKWhARqjeubatXv+XBxiDdMbgcd/XTwbI4HTjXy5LF0o47UI
W6itMOg9uCfBJM/i2jrAkmQR
-----END PRIVATE KEY-----

View File

@@ -35,10 +35,13 @@ go_minor="${go_version_parts[1]}"
if [[ "$go_major" -eq 1 && "$go_minor" -lt 18 ]]; then if [[ "$go_major" -eq 1 && "$go_minor" -lt 18 ]]; then
echo "Go version ${go_major}.${go_minor} < 1.18... Pinning credential-helper commit" echo "Go version ${go_major}.${go_minor} < 1.18... Pinning credential-helper commit"
git checkout ab7fd12c67d83193072fa91e5648b036547f6323 git checkout ab7fd12c67d83193072fa91e5648b036547f6323
make pass
cp bin/docker-credential-pass $GOPATH/bin/docker-credential-pass
else
make pass
cp bin/build/docker-credential-pass $GOPATH/bin/docker-credential-pass
fi fi
make pass
cp bin/docker-credential-pass $GOPATH/bin/docker-credential-pass
mkdir -p /home/travis/.docker mkdir -p /home/travis/.docker
echo '{ "credsStore": "pass" }' | tee /home/travis/.docker/config.json echo '{ "credsStore": "pass" }' | tee /home/travis/.docker/config.json
gpg2 --batch --gen-key <<-EOF gpg2 --batch --gen-key <<-EOF

View File

@@ -1,3 +0,0 @@
module github.com/cespare/xxhash/v2
go 1.11

View File

View File

@@ -765,7 +765,7 @@ func unescape(s string) (ch string, tail string, err error) {
if i > utf8.MaxRune { if i > utf8.MaxRune {
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
} }
return string(i), s, nil return string(rune(i)), s, nil
} }
return "", "", fmt.Errorf(`unknown escape \%c`, r) return "", "", fmt.Errorf(`unknown escape \%c`, r)
} }

View File

@@ -163,7 +163,7 @@ func (c *counter) updateExemplar(v float64, l Labels) {
// (e.g. number of HTTP requests, partitioned by response code and // (e.g. number of HTTP requests, partitioned by response code and
// method). Create instances with NewCounterVec. // method). Create instances with NewCounterVec.
type CounterVec struct { type CounterVec struct {
*metricVec *MetricVec
} }
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and // NewCounterVec creates a new CounterVec based on the provided CounterOpts and
@@ -176,11 +176,11 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
opts.ConstLabels, opts.ConstLabels,
) )
return &CounterVec{ return &CounterVec{
metricVec: newMetricVec(desc, func(lvs ...string) Metric { MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
if len(lvs) != len(desc.variableLabels) { if len(lvs) != len(desc.variableLabels) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
} }
result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs), now: time.Now} result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now}
result.init(result) // Init self-collection. result.init(result) // Init self-collection.
return result return result
}), }),
@@ -188,7 +188,7 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
} }
// GetMetricWithLabelValues returns the Counter for the given slice of label // GetMetricWithLabelValues returns the Counter for the given slice of label
// values (same order as the VariableLabels in Desc). If that combination of // values (same order as the variable labels in Desc). If that combination of
// label values is accessed for the first time, a new Counter is created. // label values is accessed for the first time, a new Counter is created.
// //
// It is possible to call this method without using the returned Counter to only // It is possible to call this method without using the returned Counter to only
@@ -202,7 +202,7 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
// Counter with the same label values is created later. // Counter with the same label values is created later.
// //
// An error is returned if the number of label values is not the same as the // An error is returned if the number of label values is not the same as the
// number of VariableLabels in Desc (minus any curried labels). // number of variable labels in Desc (minus any curried labels).
// //
// Note that for more than one label value, this method is prone to mistakes // Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as // caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
@@ -211,7 +211,7 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
// with a performance overhead (for creating and processing the Labels map). // with a performance overhead (for creating and processing the Labels map).
// See also the GaugeVec example. // See also the GaugeVec example.
func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
metric, err := v.metricVec.getMetricWithLabelValues(lvs...) metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
if metric != nil { if metric != nil {
return metric.(Counter), err return metric.(Counter), err
} }
@@ -219,19 +219,19 @@ func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
} }
// GetMetricWith returns the Counter for the given Labels map (the label names // GetMetricWith returns the Counter for the given Labels map (the label names
// must match those of the VariableLabels in Desc). If that label map is // must match those of the variable labels in Desc). If that label map is
// accessed for the first time, a new Counter is created. Implications of // accessed for the first time, a new Counter is created. Implications of
// creating a Counter without using it and keeping the Counter for later use are // creating a Counter without using it and keeping the Counter for later use are
// the same as for GetMetricWithLabelValues. // the same as for GetMetricWithLabelValues.
// //
// An error is returned if the number and names of the Labels are inconsistent // An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc (minus any curried labels). // with those of the variable labels in Desc (minus any curried labels).
// //
// This method is used for the same purpose as // This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two // GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods. // methods.
func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
metric, err := v.metricVec.getMetricWith(labels) metric, err := v.MetricVec.GetMetricWith(labels)
if metric != nil { if metric != nil {
return metric.(Counter), err return metric.(Counter), err
} }
@@ -275,7 +275,7 @@ func (v *CounterVec) With(labels Labels) Counter {
// registered with a given registry (usually the uncurried version). The Reset // registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector. // method deletes all metrics, even if called on a curried vector.
func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) { func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) {
vec, err := v.curryWith(labels) vec, err := v.MetricVec.CurryWith(labels)
if vec != nil { if vec != nil {
return &CounterVec{vec}, err return &CounterVec{vec}, err
} }

View File

@@ -20,7 +20,7 @@ import (
"strings" "strings"
"github.com/cespare/xxhash/v2" "github.com/cespare/xxhash/v2"
//lint:ignore SA1019 Need to keep deprecated package for compatibility. //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@@ -51,7 +51,7 @@ type Desc struct {
// constLabelPairs contains precalculated DTO label pairs based on // constLabelPairs contains precalculated DTO label pairs based on
// the constant labels. // the constant labels.
constLabelPairs []*dto.LabelPair constLabelPairs []*dto.LabelPair
// VariableLabels contains names of labels for which the metric // variableLabels contains names of labels for which the metric
// maintains variable values. // maintains variable values.
variableLabels []string variableLabels []string
// id is a hash of the values of the ConstLabels and fqName. This // id is a hash of the values of the ConstLabels and fqName. This

View File

@@ -22,43 +22,10 @@ type expvarCollector struct {
exports map[string]*Desc exports map[string]*Desc
} }
// NewExpvarCollector returns a newly allocated expvar Collector that still has // NewExpvarCollector is the obsolete version of collectors.NewExpvarCollector.
// to be registered with a Prometheus registry. // See there for documentation.
// //
// An expvar Collector collects metrics from the expvar interface. It provides a // Deprecated: Use collectors.NewExpvarCollector instead.
// quick way to expose numeric values that are already exported via expvar as
// Prometheus metrics. Note that the data models of expvar and Prometheus are
// fundamentally different, and that the expvar Collector is inherently slower
// than native Prometheus metrics. Thus, the expvar Collector is probably great
// for experiments and prototying, but you should seriously consider a more
// direct implementation of Prometheus metrics for monitoring production
// systems.
//
// The exports map has the following meaning:
//
// The keys in the map correspond to expvar keys, i.e. for every expvar key you
// want to export as Prometheus metric, you need an entry in the exports
// map. The descriptor mapped to each key describes how to export the expvar
// value. It defines the name and the help string of the Prometheus metric
// proxying the expvar value. The type will always be Untyped.
//
// For descriptors without variable labels, the expvar value must be a number or
// a bool. The number is then directly exported as the Prometheus sample
// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values
// that are not numbers or bools are silently ignored.
//
// If the descriptor has one variable label, the expvar value must be an expvar
// map. The keys in the expvar map become the various values of the one
// Prometheus label. The values in the expvar map must be numbers or bools again
// as above.
//
// For descriptors with more than one variable label, the expvar must be a
// nested expvar map, i.e. where the values of the topmost map are maps again
// etc. until a depth is reached that corresponds to the number of labels. The
// leaves of that structure must be numbers or bools as above to serve as the
// sample values.
//
// Anything that does not fit into the scheme above is silently ignored.
func NewExpvarCollector(exports map[string]*Desc) Collector { func NewExpvarCollector(exports map[string]*Desc) Collector {
return &expvarCollector{ return &expvarCollector{
exports: exports, exports: exports,

View File

@@ -132,7 +132,7 @@ func (g *gauge) Write(out *dto.Metric) error {
// (e.g. number of operations queued, partitioned by user and operation // (e.g. number of operations queued, partitioned by user and operation
// type). Create instances with NewGaugeVec. // type). Create instances with NewGaugeVec.
type GaugeVec struct { type GaugeVec struct {
*metricVec *MetricVec
} }
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and // NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
@@ -145,11 +145,11 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
opts.ConstLabels, opts.ConstLabels,
) )
return &GaugeVec{ return &GaugeVec{
metricVec: newMetricVec(desc, func(lvs ...string) Metric { MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
if len(lvs) != len(desc.variableLabels) { if len(lvs) != len(desc.variableLabels) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
} }
result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)}
result.init(result) // Init self-collection. result.init(result) // Init self-collection.
return result return result
}), }),
@@ -157,7 +157,7 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
} }
// GetMetricWithLabelValues returns the Gauge for the given slice of label // GetMetricWithLabelValues returns the Gauge for the given slice of label
// values (same order as the VariableLabels in Desc). If that combination of // values (same order as the variable labels in Desc). If that combination of
// label values is accessed for the first time, a new Gauge is created. // label values is accessed for the first time, a new Gauge is created.
// //
// It is possible to call this method without using the returned Gauge to only // It is possible to call this method without using the returned Gauge to only
@@ -172,7 +172,7 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
// example. // example.
// //
// An error is returned if the number of label values is not the same as the // An error is returned if the number of label values is not the same as the
// number of VariableLabels in Desc (minus any curried labels). // number of variable labels in Desc (minus any curried labels).
// //
// Note that for more than one label value, this method is prone to mistakes // Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as // caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
@@ -180,7 +180,7 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
// latter has a much more readable (albeit more verbose) syntax, but it comes // latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map). // with a performance overhead (for creating and processing the Labels map).
func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
metric, err := v.metricVec.getMetricWithLabelValues(lvs...) metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
if metric != nil { if metric != nil {
return metric.(Gauge), err return metric.(Gauge), err
} }
@@ -188,19 +188,19 @@ func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
} }
// GetMetricWith returns the Gauge for the given Labels map (the label names // GetMetricWith returns the Gauge for the given Labels map (the label names
// must match those of the VariableLabels in Desc). If that label map is // must match those of the variable labels in Desc). If that label map is
// accessed for the first time, a new Gauge is created. Implications of // accessed for the first time, a new Gauge is created. Implications of
// creating a Gauge without using it and keeping the Gauge for later use are // creating a Gauge without using it and keeping the Gauge for later use are
// the same as for GetMetricWithLabelValues. // the same as for GetMetricWithLabelValues.
// //
// An error is returned if the number and names of the Labels are inconsistent // An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc (minus any curried labels). // with those of the variable labels in Desc (minus any curried labels).
// //
// This method is used for the same purpose as // This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two // GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods. // methods.
func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
metric, err := v.metricVec.getMetricWith(labels) metric, err := v.MetricVec.GetMetricWith(labels)
if metric != nil { if metric != nil {
return metric.(Gauge), err return metric.(Gauge), err
} }
@@ -244,7 +244,7 @@ func (v *GaugeVec) With(labels Labels) Gauge {
// registered with a given registry (usually the uncurried version). The Reset // registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector. // method deletes all metrics, even if called on a curried vector.
func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) { func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) {
vec, err := v.curryWith(labels) vec, err := v.MetricVec.CurryWith(labels)
if vec != nil { if vec != nil {
return &GaugeVec{vec}, err return &GaugeVec{vec}, err
} }

View File

@@ -36,31 +36,10 @@ type goCollector struct {
msMaxAge time.Duration // Maximum allowed age of old memstats. msMaxAge time.Duration // Maximum allowed age of old memstats.
} }
// NewGoCollector returns a collector that exports metrics about the current Go // NewGoCollector is the obsolete version of collectors.NewGoCollector.
// process. This includes memory stats. To collect those, runtime.ReadMemStats // See there for documentation.
// is called. This requires to “stop the world”, which usually only happens for
// garbage collection (GC). Take the following implications into account when
// deciding whether to use the Go collector:
// //
// 1. The performance impact of stopping the world is the more relevant the more // Deprecated: Use collectors.NewGoCollector instead.
// frequently metrics are collected. However, with Go1.9 or later the
// stop-the-world time per metrics collection is very short (~25µs) so that the
// performance impact will only matter in rare cases. However, with older Go
// versions, the stop-the-world duration depends on the heap size and can be
// quite significant (~1.7 ms/GiB as per
// https://go-review.googlesource.com/c/go/+/34937).
//
// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the
// metrics collection happens to coincide with GC, it will only complete after
// GC has finished. Usually, GC is fast enough to not cause problems. However,
// with a very large heap, GC might take multiple seconds, which is enough to
// cause scrape timeouts in common setups. To avoid this problem, the Go
// collector will use the memstats from a previous collection if
// runtime.ReadMemStats takes more than 1s. However, if there are no previously
// collected memstats, or their collection is more than 5m ago, the collection
// will block until runtime.ReadMemStats succeeds. (The problem might be solved
// in Go1.13, see https://github.com/golang/go/issues/19812 for the related Go
// issue.)
func NewGoCollector() Collector { func NewGoCollector() Collector {
return &goCollector{ return &goCollector{
goroutinesDesc: NewDesc( goroutinesDesc: NewDesc(
@@ -365,25 +344,17 @@ type memStatsMetrics []struct {
valType ValueType valType ValueType
} }
// NewBuildInfoCollector returns a collector collecting a single metric // NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector.
// "go_build_info" with the constant value 1 and three labels "path", "version", // See there for documentation.
// and "checksum". Their label values contain the main module path, version, and
// checksum, respectively. The labels will only have meaningful values if the
// binary is built with Go module support and from source code retrieved from
// the source repository (rather than the local file system). This is usually
// accomplished by building from outside of GOPATH, specifying the full address
// of the main package, e.g. "GO111MODULE=on go run
// github.com/prometheus/client_golang/examples/random". If built without Go
// module support, all label values will be "unknown". If built with Go module
// support but using the source code from the local file system, the "path" will
// be set appropriately, but "checksum" will be empty and "version" will be
// "(devel)".
// //
// This collector uses only the build information for the main module. See // Deprecated: Use collectors.NewBuildInfoCollector instead.
// https://github.com/povilasv/prommod for an example of a collector for the
// module dependencies.
func NewBuildInfoCollector() Collector { func NewBuildInfoCollector() Collector {
path, version, sum := readBuildInfo() path, version, sum := "unknown", "unknown", "unknown"
if bi, ok := debug.ReadBuildInfo(); ok {
path = bi.Main.Path
version = bi.Main.Version
sum = bi.Main.Sum
}
c := &selfCollector{MustNewConstMetric( c := &selfCollector{MustNewConstMetric(
NewDesc( NewDesc(
"go_build_info", "go_build_info",

View File

@@ -22,7 +22,7 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
//lint:ignore SA1019 Need to keep deprecated package for compatibility. //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
@@ -47,7 +47,12 @@ type Histogram interface {
Metric Metric
Collector Collector
// Observe adds a single observation to the histogram. // Observe adds a single observation to the histogram. Observations are
// usually positive or zero. Negative observations are accepted but
// prevent current versions of Prometheus from properly detecting
// counter resets in the sum of observations. See
// https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations
// for details.
Observe(float64) Observe(float64)
} }
@@ -192,7 +197,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
h := &histogram{ h := &histogram{
desc: desc, desc: desc,
upperBounds: opts.Buckets, upperBounds: opts.Buckets,
labelPairs: makeLabelPairs(desc, labelValues), labelPairs: MakeLabelPairs(desc, labelValues),
counts: [2]*histogramCounts{{}, {}}, counts: [2]*histogramCounts{{}, {}},
now: time.Now, now: time.Now,
} }
@@ -409,7 +414,7 @@ func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
// (e.g. HTTP request latencies, partitioned by status code and method). Create // (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewHistogramVec. // instances with NewHistogramVec.
type HistogramVec struct { type HistogramVec struct {
*metricVec *MetricVec
} }
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and // NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
@@ -422,14 +427,14 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
opts.ConstLabels, opts.ConstLabels,
) )
return &HistogramVec{ return &HistogramVec{
metricVec: newMetricVec(desc, func(lvs ...string) Metric { MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
return newHistogram(desc, opts, lvs...) return newHistogram(desc, opts, lvs...)
}), }),
} }
} }
// GetMetricWithLabelValues returns the Histogram for the given slice of label // GetMetricWithLabelValues returns the Histogram for the given slice of label
// values (same order as the VariableLabels in Desc). If that combination of // values (same order as the variable labels in Desc). If that combination of
// label values is accessed for the first time, a new Histogram is created. // label values is accessed for the first time, a new Histogram is created.
// //
// It is possible to call this method without using the returned Histogram to only // It is possible to call this method without using the returned Histogram to only
@@ -444,7 +449,7 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
// example. // example.
// //
// An error is returned if the number of label values is not the same as the // An error is returned if the number of label values is not the same as the
// number of VariableLabels in Desc (minus any curried labels). // number of variable labels in Desc (minus any curried labels).
// //
// Note that for more than one label value, this method is prone to mistakes // Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as // caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
@@ -453,7 +458,7 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
// with a performance overhead (for creating and processing the Labels map). // with a performance overhead (for creating and processing the Labels map).
// See also the GaugeVec example. // See also the GaugeVec example.
func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
metric, err := v.metricVec.getMetricWithLabelValues(lvs...) metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
if metric != nil { if metric != nil {
return metric.(Observer), err return metric.(Observer), err
} }
@@ -461,19 +466,19 @@ func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error)
} }
// GetMetricWith returns the Histogram for the given Labels map (the label names // GetMetricWith returns the Histogram for the given Labels map (the label names
// must match those of the VariableLabels in Desc). If that label map is // must match those of the variable labels in Desc). If that label map is
// accessed for the first time, a new Histogram is created. Implications of // accessed for the first time, a new Histogram is created. Implications of
// creating a Histogram without using it and keeping the Histogram for later use // creating a Histogram without using it and keeping the Histogram for later use
// are the same as for GetMetricWithLabelValues. // are the same as for GetMetricWithLabelValues.
// //
// An error is returned if the number and names of the Labels are inconsistent // An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc (minus any curried labels). // with those of the variable labels in Desc (minus any curried labels).
// //
// This method is used for the same purpose as // This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two // GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods. // methods.
func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
metric, err := v.metricVec.getMetricWith(labels) metric, err := v.MetricVec.GetMetricWith(labels)
if metric != nil { if metric != nil {
return metric.(Observer), err return metric.(Observer), err
} }
@@ -517,7 +522,7 @@ func (v *HistogramVec) With(labels Labels) Observer {
// registered with a given registry (usually the uncurried version). The Reset // registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector. // method deletes all metrics, even if called on a curried vector.
func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) { func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) {
vec, err := v.curryWith(labels) vec, err := v.MetricVec.CurryWith(labels)
if vec != nil { if vec != nil {
return &HistogramVec{vec}, err return &HistogramVec{vec}, err
} }
@@ -602,7 +607,7 @@ func NewConstHistogram(
count: count, count: count,
sum: sum, sum: sum,
buckets: buckets, buckets: buckets,
labelPairs: makeLabelPairs(desc, labelValues), labelPairs: MakeLabelPairs(desc, labelValues),
}, nil }, nil
} }

View File

@@ -17,7 +17,7 @@ import (
"strings" "strings"
"time" "time"
//lint:ignore SA1019 Need to keep deprecated package for compatibility. //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
@@ -58,7 +58,7 @@ type Metric interface {
} }
// Opts bundles the options for creating most Metric types. Each metric // Opts bundles the options for creating most Metric types. Each metric
// implementation XXX has its own XXXOpts type, but in most cases, it is just be // implementation XXX has its own XXXOpts type, but in most cases, it is just
// an alias of this type (which might change when the requirement arises.) // an alias of this type (which might change when the requirement arises.)
// //
// It is mandatory to set Name to a non-empty string. All other fields are // It is mandatory to set Name to a non-empty string. All other fields are
@@ -89,7 +89,7 @@ type Opts struct {
// better covered by target labels set by the scraping Prometheus // better covered by target labels set by the scraping Prometheus
// server, or by one specific metric (e.g. a build_info or a // server, or by one specific metric (e.g. a build_info or a
// machine_role metric). See also // machine_role metric). See also
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
ConstLabels Labels ConstLabels Labels
} }

View File

@@ -15,7 +15,11 @@ package prometheus
import ( import (
"errors" "errors"
"fmt"
"io/ioutil"
"os" "os"
"strconv"
"strings"
) )
type processCollector struct { type processCollector struct {
@@ -50,16 +54,10 @@ type ProcessCollectorOpts struct {
ReportErrors bool ReportErrors bool
} }
// NewProcessCollector returns a collector which exports the current state of // NewProcessCollector is the obsolete version of collectors.NewProcessCollector.
// process metrics including CPU, memory and file descriptor usage as well as // See there for documentation.
// the process start time. The detailed behavior is defined by the provided
// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a
// collector for the current process with an empty namespace string and no error
// reporting.
// //
// The collector only works on operating systems with a Linux-style proc // Deprecated: Use collectors.NewProcessCollector instead.
// filesystem and on Microsoft Windows. On other operating systems, it will not
// collect any metrics.
func NewProcessCollector(opts ProcessCollectorOpts) Collector { func NewProcessCollector(opts ProcessCollectorOpts) Collector {
ns := "" ns := ""
if len(opts.Namespace) > 0 { if len(opts.Namespace) > 0 {
@@ -149,3 +147,20 @@ func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error)
} }
ch <- NewInvalidMetric(desc, err) ch <- NewInvalidMetric(desc, err)
} }
// NewPidFileFn returns a function that retrieves a pid from the specified file.
// It is meant to be used for the PidFn field in ProcessCollectorOpts.
func NewPidFileFn(pidFilePath string) func() (int, error) {
return func() (int, error) {
content, err := ioutil.ReadFile(pidFilePath)
if err != nil {
return 0, fmt.Errorf("can't read pid file %q: %+v", pidFilePath, err)
}
pid, err := strconv.Atoi(strings.TrimSpace(string(content)))
if err != nil {
return 0, fmt.Errorf("can't parse pid file %q: %+v", pidFilePath, err)
}
return pid, nil
}
}

View File

@@ -83,8 +83,7 @@ type readerFromDelegator struct{ *responseWriterDelegator }
type pusherDelegator struct{ *responseWriterDelegator } type pusherDelegator struct{ *responseWriterDelegator }
func (d closeNotifierDelegator) CloseNotify() <-chan bool { func (d closeNotifierDelegator) CloseNotify() <-chan bool {
//lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to //nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users.
//remove support from client_golang yet.
return d.ResponseWriter.(http.CloseNotifier).CloseNotify() return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
} }
func (d flusherDelegator) Flush() { func (d flusherDelegator) Flush() {
@@ -348,8 +347,7 @@ func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) deleg
} }
id := 0 id := 0
//lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to //nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users.
//remove support from client_golang yet.
if _, ok := w.(http.CloseNotifier); ok { if _, ok := w.(http.CloseNotifier); ok {
id += closeNotifier id += closeNotifier
} }

View File

@@ -99,7 +99,7 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight) inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
} }
if opts.Registry != nil { if opts.Registry != nil {
// Initialize all possibilites that can occur below. // Initialize all possibilities that can occur below.
errCnt.WithLabelValues("gathering") errCnt.WithLabelValues("gathering")
errCnt.WithLabelValues("encoding") errCnt.WithLabelValues("encoding")
if err := opts.Registry.Register(errCnt); err != nil { if err := opts.Registry.Register(errCnt); err != nil {
@@ -303,8 +303,12 @@ type Logger interface {
// HandlerOpts specifies options how to serve metrics via an http.Handler. The // HandlerOpts specifies options how to serve metrics via an http.Handler. The
// zero value of HandlerOpts is a reasonable default. // zero value of HandlerOpts is a reasonable default.
type HandlerOpts struct { type HandlerOpts struct {
// ErrorLog specifies an optional logger for errors collecting and // ErrorLog specifies an optional Logger for errors collecting and
// serving metrics. If nil, errors are not logged at all. // serving metrics. If nil, errors are not logged at all. Note that the
// type of a reported error is often prometheus.MultiError, which
// formats into a multi-line error string. If you want to avoid the
// latter, create a Logger implementation that detects a
// prometheus.MultiError and formats the contained errors into one line.
ErrorLog Logger ErrorLog Logger
// ErrorHandling defines how errors are handled. Note that errors are // ErrorHandling defines how errors are handled. Note that errors are
// logged regardless of the configured ErrorHandling provided ErrorLog // logged regardless of the configured ErrorHandling provided ErrorLog

View File

@@ -49,7 +49,10 @@ func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripp
// http.RoundTripper to observe the request result with the provided CounterVec. // http.RoundTripper to observe the request result with the provided CounterVec.
// The CounterVec must have zero, one, or two non-const non-curried labels. For // The CounterVec must have zero, one, or two non-const non-curried labels. For
// those, the only allowed label names are "code" and "method". The function // those, the only allowed label names are "code" and "method". The function
// panics otherwise. Partitioning of the CounterVec happens by HTTP status code // panics otherwise. For the "method" label a predefined default label value set
// is used to filter given values. Values besides predefined values will count
// as `unknown` method.`WithExtraMethods` can be used to add more
// methods to the set. Partitioning of the CounterVec happens by HTTP status code
// and/or HTTP method if the respective instance label names are present in the // and/or HTTP method if the respective instance label names are present in the
// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. // CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
// //
@@ -57,13 +60,18 @@ func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripp
// is not incremented. // is not incremented.
// //
// See the example for ExampleInstrumentRoundTripperDuration for example usage. // See the example for ExampleInstrumentRoundTripperDuration for example usage.
func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc { func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper, opts ...Option) RoundTripperFunc {
rtOpts := &option{}
for _, o := range opts {
o(rtOpts)
}
code, method := checkLabels(counter) code, method := checkLabels(counter)
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
resp, err := next.RoundTrip(r) resp, err := next.RoundTrip(r)
if err == nil { if err == nil {
counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc() counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Inc()
} }
return resp, err return resp, err
}) })
@@ -73,7 +81,10 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou
// http.RoundTripper to observe the request duration with the provided // http.RoundTripper to observe the request duration with the provided
// ObserverVec. The ObserverVec must have zero, one, or two non-const // ObserverVec. The ObserverVec must have zero, one, or two non-const
// non-curried labels. For those, the only allowed label names are "code" and // non-curried labels. For those, the only allowed label names are "code" and
// "method". The function panics otherwise. The Observe method of the Observer // "method". The function panics otherwise. For the "method" label a predefined
// default label value set is used to filter given values. Values besides
// predefined values will count as `unknown` method. `WithExtraMethods`
// can be used to add more methods to the set. The Observe method of the Observer
// in the ObserverVec is called with the request duration in // in the ObserverVec is called with the request duration in
// seconds. Partitioning happens by HTTP status code and/or HTTP method if the // seconds. Partitioning happens by HTTP status code and/or HTTP method if the
// respective instance label names are present in the ObserverVec. For // respective instance label names are present in the ObserverVec. For
@@ -85,14 +96,19 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou
// //
// Note that this method is only guaranteed to never observe negative durations // Note that this method is only guaranteed to never observe negative durations
// if used with Go1.9+. // if used with Go1.9+.
func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc { func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper, opts ...Option) RoundTripperFunc {
rtOpts := &option{}
for _, o := range opts {
o(rtOpts)
}
code, method := checkLabels(obs) code, method := checkLabels(obs)
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
start := time.Now() start := time.Now()
resp, err := next.RoundTrip(r) resp, err := next.RoundTrip(r)
if err == nil { if err == nil {
obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds()) obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Observe(time.Since(start).Seconds())
} }
return resp, err return resp, err
}) })

View File

@@ -43,14 +43,17 @@ func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handl
// InstrumentHandlerDuration is a middleware that wraps the provided // InstrumentHandlerDuration is a middleware that wraps the provided
// http.Handler to observe the request duration with the provided ObserverVec. // http.Handler to observe the request duration with the provided ObserverVec.
// The ObserverVec must have zero, one, or two non-const non-curried labels. For // The ObserverVec must have valid metric and label names and must have zero,
// those, the only allowed label names are "code" and "method". The function // one, or two non-const non-curried labels. For those, the only allowed label
// panics otherwise. The Observe method of the Observer in the ObserverVec is // names are "code" and "method". The function panics otherwise. For the "method"
// called with the request duration in seconds. Partitioning happens by HTTP // label a predefined default label value set is used to filter given values.
// status code and/or HTTP method if the respective instance label names are // Values besides predefined values will count as `unknown` method.
// present in the ObserverVec. For unpartitioned observations, use an //`WithExtraMethods` can be used to add more methods to the set. The Observe
// ObserverVec with zero labels. Note that partitioning of Histograms is // method of the Observer in the ObserverVec is called with the request duration
// expensive and should be used judiciously. // in seconds. Partitioning happens by HTTP status code and/or HTTP method if
// the respective instance label names are present in the ObserverVec. For
// unpartitioned observations, use an ObserverVec with zero labels. Note that
// partitioning of Histograms is expensive and should be used judiciously.
// //
// If the wrapped Handler does not set a status code, a status code of 200 is assumed. // If the wrapped Handler does not set a status code, a status code of 200 is assumed.
// //
@@ -58,7 +61,12 @@ func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handl
// //
// Note that this method is only guaranteed to never observe negative durations // Note that this method is only guaranteed to never observe negative durations
// if used with Go1.9+. // if used with Go1.9+.
func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
mwOpts := &option{}
for _, o := range opts {
o(mwOpts)
}
code, method := checkLabels(obs) code, method := checkLabels(obs)
if code { if code {
@@ -67,57 +75,70 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) ht
d := newDelegator(w, nil) d := newDelegator(w, nil)
next.ServeHTTP(d, r) next.ServeHTTP(d, r)
obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds()) obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(time.Since(now).Seconds())
}) })
} }
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
now := time.Now() now := time.Now()
next.ServeHTTP(w, r) next.ServeHTTP(w, r)
obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds()) obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds())
}) })
} }
// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler // InstrumentHandlerCounter is a middleware that wraps the provided http.Handler
// to observe the request result with the provided CounterVec. The CounterVec // to observe the request result with the provided CounterVec. The CounterVec
// must have zero, one, or two non-const non-curried labels. For those, the only // must have valid metric and label names and must have zero, one, or two
// allowed label names are "code" and "method". The function panics // non-const non-curried labels. For those, the only allowed label names are
// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or // "code" and "method". The function panics otherwise. For the "method"
// HTTP method if the respective instance label names are present in the // label a predefined default label value set is used to filter given values.
// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. // Values besides predefined values will count as `unknown` method.
// `WithExtraMethods` can be used to add more methods to the set. Partitioning of the
// CounterVec happens by HTTP status code and/or HTTP method if the respective
// instance label names are present in the CounterVec. For unpartitioned
// counting, use a CounterVec with zero labels.
// //
// If the wrapped Handler does not set a status code, a status code of 200 is assumed. // If the wrapped Handler does not set a status code, a status code of 200 is assumed.
// //
// If the wrapped Handler panics, the Counter is not incremented. // If the wrapped Handler panics, the Counter is not incremented.
// //
// See the example for InstrumentHandlerDuration for example usage. // See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc { func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, opts ...Option) http.HandlerFunc {
mwOpts := &option{}
for _, o := range opts {
o(mwOpts)
}
code, method := checkLabels(counter) code, method := checkLabels(counter)
if code { if code {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil) d := newDelegator(w, nil)
next.ServeHTTP(d, r) next.ServeHTTP(d, r)
counter.With(labels(code, method, r.Method, d.Status())).Inc() counter.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Inc()
}) })
} }
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r) next.ServeHTTP(w, r)
counter.With(labels(code, method, r.Method, 0)).Inc() counter.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Inc()
}) })
} }
// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided // InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided
// http.Handler to observe with the provided ObserverVec the request duration // http.Handler to observe with the provided ObserverVec the request duration
// until the response headers are written. The ObserverVec must have zero, one, // until the response headers are written. The ObserverVec must have valid
// or two non-const non-curried labels. For those, the only allowed label names // metric and label names and must have zero, one, or two non-const non-curried
// are "code" and "method". The function panics otherwise. The Observe method of // labels. For those, the only allowed label names are "code" and "method". The
// the Observer in the ObserverVec is called with the request duration in // function panics otherwise. For the "method" label a predefined default label
// seconds. Partitioning happens by HTTP status code and/or HTTP method if the // value set is used to filter given values. Values besides predefined values
// respective instance label names are present in the ObserverVec. For // will count as `unknown` method.`WithExtraMethods` can be used to add more
// unpartitioned observations, use an ObserverVec with zero labels. Note that // methods to the set. The Observe method of the Observer in the
// partitioning of Histograms is expensive and should be used judiciously. // ObserverVec is called with the request duration in seconds. Partitioning
// happens by HTTP status code and/or HTTP method if the respective instance
// label names are present in the ObserverVec. For unpartitioned observations,
// use an ObserverVec with zero labels. Note that partitioning of Histograms is
// expensive and should be used judiciously.
// //
// If the wrapped Handler panics before calling WriteHeader, no value is // If the wrapped Handler panics before calling WriteHeader, no value is
// reported. // reported.
@@ -126,35 +147,48 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler)
// if used with Go1.9+. // if used with Go1.9+.
// //
// See the example for InstrumentHandlerDuration for example usage. // See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
mwOpts := &option{}
for _, o := range opts {
o(mwOpts)
}
code, method := checkLabels(obs) code, method := checkLabels(obs)
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
now := time.Now() now := time.Now()
d := newDelegator(w, func(status int) { d := newDelegator(w, func(status int) {
obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds()) obs.With(labels(code, method, r.Method, status, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds())
}) })
next.ServeHTTP(d, r) next.ServeHTTP(d, r)
}) })
} }
// InstrumentHandlerRequestSize is a middleware that wraps the provided // InstrumentHandlerRequestSize is a middleware that wraps the provided
// http.Handler to observe the request size with the provided ObserverVec. The // http.Handler to observe the request size with the provided ObserverVec. The
// ObserverVec must have zero, one, or two non-const non-curried labels. For // ObserverVec must have valid metric and label names and must have zero, one,
// those, the only allowed label names are "code" and "method". The function // or two non-const non-curried labels. For those, the only allowed label names
// panics otherwise. The Observe method of the Observer in the ObserverVec is // are "code" and "method". The function panics otherwise. For the "method"
// called with the request size in bytes. Partitioning happens by HTTP status // label a predefined default label value set is used to filter given values.
// code and/or HTTP method if the respective instance label names are present in // Values besides predefined values will count as `unknown` method.
// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero // `WithExtraMethods` can be used to add more methods to the set. The Observe
// labels. Note that partitioning of Histograms is expensive and should be used // method of the Observer in the ObserverVec is called with the request size in
// judiciously. // bytes. Partitioning happens by HTTP status code and/or HTTP method if the
// respective instance label names are present in the ObserverVec. For
// unpartitioned observations, use an ObserverVec with zero labels. Note that
// partitioning of Histograms is expensive and should be used judiciously.
// //
// If the wrapped Handler does not set a status code, a status code of 200 is assumed. // If the wrapped Handler does not set a status code, a status code of 200 is assumed.
// //
// If the wrapped Handler panics, no values are reported. // If the wrapped Handler panics, no values are reported.
// //
// See the example for InstrumentHandlerDuration for example usage. // See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
mwOpts := &option{}
for _, o := range opts {
o(mwOpts)
}
code, method := checkLabels(obs) code, method := checkLabels(obs)
if code { if code {
@@ -162,42 +196,56 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler)
d := newDelegator(w, nil) d := newDelegator(w, nil)
next.ServeHTTP(d, r) next.ServeHTTP(d, r)
size := computeApproximateRequestSize(r) size := computeApproximateRequestSize(r)
obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size)) obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(size))
}) })
} }
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r) next.ServeHTTP(w, r)
size := computeApproximateRequestSize(r) size := computeApproximateRequestSize(r)
obs.With(labels(code, method, r.Method, 0)).Observe(float64(size)) obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(float64(size))
}) })
} }
// InstrumentHandlerResponseSize is a middleware that wraps the provided // InstrumentHandlerResponseSize is a middleware that wraps the provided
// http.Handler to observe the response size with the provided ObserverVec. The // http.Handler to observe the response size with the provided ObserverVec. The
// ObserverVec must have zero, one, or two non-const non-curried labels. For // ObserverVec must have valid metric and label names and must have zero, one,
// those, the only allowed label names are "code" and "method". The function // or two non-const non-curried labels. For those, the only allowed label names
// panics otherwise. The Observe method of the Observer in the ObserverVec is // are "code" and "method". The function panics otherwise. For the "method"
// called with the response size in bytes. Partitioning happens by HTTP status // label a predefined default label value set is used to filter given values.
// code and/or HTTP method if the respective instance label names are present in // Values besides predefined values will count as `unknown` method.
// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero // `WithExtraMethods` can be used to add more methods to the set. The Observe
// labels. Note that partitioning of Histograms is expensive and should be used // method of the Observer in the ObserverVec is called with the response size in
// judiciously. // bytes. Partitioning happens by HTTP status code and/or HTTP method if the
// respective instance label names are present in the ObserverVec. For
// unpartitioned observations, use an ObserverVec with zero labels. Note that
// partitioning of Histograms is expensive and should be used judiciously.
// //
// If the wrapped Handler does not set a status code, a status code of 200 is assumed. // If the wrapped Handler does not set a status code, a status code of 200 is assumed.
// //
// If the wrapped Handler panics, no values are reported. // If the wrapped Handler panics, no values are reported.
// //
// See the example for InstrumentHandlerDuration for example usage. // See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler { func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.Handler {
mwOpts := &option{}
for _, o := range opts {
o(mwOpts)
}
code, method := checkLabels(obs) code, method := checkLabels(obs)
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil) d := newDelegator(w, nil)
next.ServeHTTP(d, r) next.ServeHTTP(d, r)
obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written())) obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(d.Written()))
}) })
} }
// checkLabels returns whether the provided Collector has a non-const,
// non-curried label named "code" and/or "method". It panics if the provided
// Collector does not have a Desc or has more than one Desc or its Desc is
// invalid. It also panics if the Collector has any non-const, non-curried
// labels that are not named "code" or "method".
func checkLabels(c prometheus.Collector) (code bool, method bool) { func checkLabels(c prometheus.Collector) (code bool, method bool) {
// TODO(beorn7): Remove this hacky way to check for instance labels // TODO(beorn7): Remove this hacky way to check for instance labels
// once Descriptors can have their dimensionality queried. // once Descriptors can have their dimensionality queried.
@@ -225,6 +273,10 @@ func checkLabels(c prometheus.Collector) (code bool, method bool) {
close(descc) close(descc)
// Make sure the Collector has a valid Desc by registering it with a
// temporary registry.
prometheus.NewRegistry().MustRegister(c)
// Create a ConstMetric with the Desc. Since we don't know how many // Create a ConstMetric with the Desc. Since we don't know how many
// variable labels there are, try for as long as it needs. // variable labels there are, try for as long as it needs.
for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) { for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) {
@@ -279,7 +331,7 @@ func isLabelCurried(c prometheus.Collector, label string) bool {
// unnecessary allocations on each request. // unnecessary allocations on each request.
var emptyLabels = prometheus.Labels{} var emptyLabels = prometheus.Labels{}
func labels(code, method bool, reqMethod string, status int) prometheus.Labels { func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels {
if !(code || method) { if !(code || method) {
return emptyLabels return emptyLabels
} }
@@ -289,7 +341,7 @@ func labels(code, method bool, reqMethod string, status int) prometheus.Labels {
labels["code"] = sanitizeCode(status) labels["code"] = sanitizeCode(status)
} }
if method { if method {
labels["method"] = sanitizeMethod(reqMethod) labels["method"] = sanitizeMethod(reqMethod, extraMethods...)
} }
return labels return labels
@@ -319,7 +371,12 @@ func computeApproximateRequestSize(r *http.Request) int {
return s return s
} }
func sanitizeMethod(m string) string { // If the wrapped http.Handler has a known method, it will be sanitized and returned.
// Otherwise, "unknown" will be returned. The known method list can be extended
// as needed by using extraMethods parameter.
func sanitizeMethod(m string, extraMethods ...string) string {
// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for
// the methods chosen as default.
switch m { switch m {
case "GET", "get": case "GET", "get":
return "get" return "get"
@@ -337,15 +394,25 @@ func sanitizeMethod(m string) string {
return "options" return "options"
case "NOTIFY", "notify": case "NOTIFY", "notify":
return "notify" return "notify"
case "TRACE", "trace":
return "trace"
case "PATCH", "patch":
return "patch"
default: default:
return strings.ToLower(m) for _, method := range extraMethods {
if strings.EqualFold(m, method) {
return strings.ToLower(m)
}
}
return "unknown"
} }
} }
// If the wrapped http.Handler has not set a status code, i.e. the value is // If the wrapped http.Handler has not set a status code, i.e. the value is
// currently 0, santizeCode will return 200, for consistency with behavior in // currently 0, sanitizeCode will return 200, for consistency with behavior in
// the stdlib. // the stdlib.
func sanitizeCode(s int) string { func sanitizeCode(s int) string {
// See for accepted codes https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml
switch s { switch s {
case 100: case 100:
return "100" return "100"
@@ -442,6 +509,9 @@ func sanitizeCode(s int) string {
return "511" return "511"
default: default:
return strconv.Itoa(s) if s >= 100 && s <= 599 {
return strconv.Itoa(s)
}
return "unknown"
} }
} }

View File

@@ -1,4 +1,4 @@
// Copyright 2019 The Prometheus Authors // Copyright 2022 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
// You may obtain a copy of the License at // You may obtain a copy of the License at
@@ -11,19 +11,21 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// +build go1.12 package promhttp
package prometheus // Option are used to configure a middleware or round tripper..
type Option func(*option)
import "runtime/debug" type option struct {
extraMethods []string
// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go 1.12+. }
func readBuildInfo() (path, version, sum string) {
path, version, sum = "unknown", "unknown", "unknown" // WithExtraMethods adds additional HTTP methods to the list of allowed methods.
if bi, ok := debug.ReadBuildInfo(); ok { // See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for the default list.
path = bi.Main.Path //
version = bi.Main.Version // See the example for ExampleInstrumentHandlerWithExtraMethods for example usage.
sum = bi.Main.Sum func WithExtraMethods(methods ...string) Option {
} return func(o *option) {
return o.extraMethods = methods
}
} }

View File

@@ -26,7 +26,7 @@ import (
"unicode/utf8" "unicode/utf8"
"github.com/cespare/xxhash/v2" "github.com/cespare/xxhash/v2"
//lint:ignore SA1019 Need to keep deprecated package for compatibility. //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/prometheus/common/expfmt" "github.com/prometheus/common/expfmt"
@@ -215,6 +215,8 @@ func (err AlreadyRegisteredError) Error() string {
// by a Gatherer to report multiple errors during MetricFamily gathering. // by a Gatherer to report multiple errors during MetricFamily gathering.
type MultiError []error type MultiError []error
// Error formats the contained errors as a bullet point list, preceded by the
// total number of errors. Note that this results in a multi-line string.
func (errs MultiError) Error() string { func (errs MultiError) Error() string {
if len(errs) == 0 { if len(errs) == 0 {
return "" return ""

View File

@@ -23,7 +23,7 @@ import (
"time" "time"
"github.com/beorn7/perks/quantile" "github.com/beorn7/perks/quantile"
//lint:ignore SA1019 Need to keep deprecated package for compatibility. //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
@@ -55,7 +55,12 @@ type Summary interface {
Metric Metric
Collector Collector
// Observe adds a single observation to the summary. // Observe adds a single observation to the summary. Observations are
// usually positive or zero. Negative observations are accepted but
// prevent current versions of Prometheus from properly detecting
// counter resets in the sum of observations. See
// https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations
// for details.
Observe(float64) Observe(float64)
} }
@@ -110,7 +115,7 @@ type SummaryOpts struct {
// better covered by target labels set by the scraping Prometheus // better covered by target labels set by the scraping Prometheus
// server, or by one specific metric (e.g. a build_info or a // server, or by one specific metric (e.g. a build_info or a
// machine_role metric). See also // machine_role metric). See also
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
ConstLabels Labels ConstLabels Labels
// Objectives defines the quantile rank estimates with their respective // Objectives defines the quantile rank estimates with their respective
@@ -121,7 +126,9 @@ type SummaryOpts struct {
Objectives map[float64]float64 Objectives map[float64]float64
// MaxAge defines the duration for which an observation stays relevant // MaxAge defines the duration for which an observation stays relevant
// for the summary. Must be positive. The default value is DefMaxAge. // for the summary. Only applies to pre-calculated quantiles, does not
// apply to _sum and _count. Must be positive. The default value is
// DefMaxAge.
MaxAge time.Duration MaxAge time.Duration
// AgeBuckets is the number of buckets used to exclude observations that // AgeBuckets is the number of buckets used to exclude observations that
@@ -208,7 +215,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
// Use the lock-free implementation of a Summary without objectives. // Use the lock-free implementation of a Summary without objectives.
s := &noObjectivesSummary{ s := &noObjectivesSummary{
desc: desc, desc: desc,
labelPairs: makeLabelPairs(desc, labelValues), labelPairs: MakeLabelPairs(desc, labelValues),
counts: [2]*summaryCounts{{}, {}}, counts: [2]*summaryCounts{{}, {}},
} }
s.init(s) // Init self-collection. s.init(s) // Init self-collection.
@@ -221,7 +228,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
objectives: opts.Objectives, objectives: opts.Objectives,
sortedObjectives: make([]float64, 0, len(opts.Objectives)), sortedObjectives: make([]float64, 0, len(opts.Objectives)),
labelPairs: makeLabelPairs(desc, labelValues), labelPairs: MakeLabelPairs(desc, labelValues),
hotBuf: make([]float64, 0, opts.BufCap), hotBuf: make([]float64, 0, opts.BufCap),
coldBuf: make([]float64, 0, opts.BufCap), coldBuf: make([]float64, 0, opts.BufCap),
@@ -513,7 +520,7 @@ func (s quantSort) Less(i, j int) bool {
// (e.g. HTTP request latencies, partitioned by status code and method). Create // (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewSummaryVec. // instances with NewSummaryVec.
type SummaryVec struct { type SummaryVec struct {
*metricVec *MetricVec
} }
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and // NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
@@ -535,14 +542,14 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
opts.ConstLabels, opts.ConstLabels,
) )
return &SummaryVec{ return &SummaryVec{
metricVec: newMetricVec(desc, func(lvs ...string) Metric { MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
return newSummary(desc, opts, lvs...) return newSummary(desc, opts, lvs...)
}), }),
} }
} }
// GetMetricWithLabelValues returns the Summary for the given slice of label // GetMetricWithLabelValues returns the Summary for the given slice of label
// values (same order as the VariableLabels in Desc). If that combination of // values (same order as the variable labels in Desc). If that combination of
// label values is accessed for the first time, a new Summary is created. // label values is accessed for the first time, a new Summary is created.
// //
// It is possible to call this method without using the returned Summary to only // It is possible to call this method without using the returned Summary to only
@@ -557,7 +564,7 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
// example. // example.
// //
// An error is returned if the number of label values is not the same as the // An error is returned if the number of label values is not the same as the
// number of VariableLabels in Desc (minus any curried labels). // number of variable labels in Desc (minus any curried labels).
// //
// Note that for more than one label value, this method is prone to mistakes // Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as // caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
@@ -566,7 +573,7 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
// with a performance overhead (for creating and processing the Labels map). // with a performance overhead (for creating and processing the Labels map).
// See also the GaugeVec example. // See also the GaugeVec example.
func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
metric, err := v.metricVec.getMetricWithLabelValues(lvs...) metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
if metric != nil { if metric != nil {
return metric.(Observer), err return metric.(Observer), err
} }
@@ -574,19 +581,19 @@ func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
} }
// GetMetricWith returns the Summary for the given Labels map (the label names // GetMetricWith returns the Summary for the given Labels map (the label names
// must match those of the VariableLabels in Desc). If that label map is // must match those of the variable labels in Desc). If that label map is
// accessed for the first time, a new Summary is created. Implications of // accessed for the first time, a new Summary is created. Implications of
// creating a Summary without using it and keeping the Summary for later use are // creating a Summary without using it and keeping the Summary for later use are
// the same as for GetMetricWithLabelValues. // the same as for GetMetricWithLabelValues.
// //
// An error is returned if the number and names of the Labels are inconsistent // An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc (minus any curried labels). // with those of the variable labels in Desc (minus any curried labels).
// //
// This method is used for the same purpose as // This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two // GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods. // methods.
func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
metric, err := v.metricVec.getMetricWith(labels) metric, err := v.MetricVec.GetMetricWith(labels)
if metric != nil { if metric != nil {
return metric.(Observer), err return metric.(Observer), err
} }
@@ -630,7 +637,7 @@ func (v *SummaryVec) With(labels Labels) Observer {
// registered with a given registry (usually the uncurried version). The Reset // registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector. // method deletes all metrics, even if called on a curried vector.
func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) { func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) {
vec, err := v.curryWith(labels) vec, err := v.MetricVec.CurryWith(labels)
if vec != nil { if vec != nil {
return &SummaryVec{vec}, err return &SummaryVec{vec}, err
} }
@@ -716,7 +723,7 @@ func NewConstSummary(
count: count, count: count,
sum: sum, sum: sum,
quantiles: quantiles, quantiles: quantiles,
labelPairs: makeLabelPairs(desc, labelValues), labelPairs: MakeLabelPairs(desc, labelValues),
}, nil }, nil
} }

View File

@@ -19,7 +19,7 @@ import (
"time" "time"
"unicode/utf8" "unicode/utf8"
//lint:ignore SA1019 Need to keep deprecated package for compatibility. //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes" "github.com/golang/protobuf/ptypes"
@@ -63,7 +63,7 @@ func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *val
desc: desc, desc: desc,
valType: valueType, valType: valueType,
function: function, function: function,
labelPairs: makeLabelPairs(desc, nil), labelPairs: MakeLabelPairs(desc, nil),
} }
result.init(result) result.init(result)
return result return result
@@ -95,7 +95,7 @@ func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues
desc: desc, desc: desc,
valType: valueType, valType: valueType,
val: value, val: value,
labelPairs: makeLabelPairs(desc, labelValues), labelPairs: MakeLabelPairs(desc, labelValues),
}, nil }, nil
} }
@@ -145,7 +145,14 @@ func populateMetric(
return nil return nil
} }
func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { // MakeLabelPairs is a helper function to create protobuf LabelPairs from the
// variable and constant labels in the provided Desc. The values for the
// variable labels are defined by the labelValues slice, which must be in the
// same order as the corresponding variable labels in the Desc.
//
// This function is only needed for custom Metric implementations. See MetricVec
// example.
func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) totalLen := len(desc.variableLabels) + len(desc.constLabelPairs)
if totalLen == 0 { if totalLen == 0 {
// Super fast path. // Super fast path.

View File

@@ -20,12 +20,20 @@ import (
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
) )
// metricVec is a Collector to bundle metrics of the same name that differ in // MetricVec is a Collector to bundle metrics of the same name that differ in
// their label values. metricVec is not used directly (and therefore // their label values. MetricVec is not used directly but as a building block
// unexported). It is used as a building block for implementations of vectors of // for implementations of vectors of a given metric type, like GaugeVec,
// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec. // CounterVec, SummaryVec, and HistogramVec. It is exported so that it can be
// It also handles label currying. // used for custom Metric implementations.
type metricVec struct { //
// To create a FooVec for custom Metric Foo, embed a pointer to MetricVec in
// FooVec and initialize it with NewMetricVec. Implement wrappers for
// GetMetricWithLabelValues and GetMetricWith that return (Foo, error) rather
// than (Metric, error). Similarly, create a wrapper for CurryWith that returns
// (*FooVec, error) rather than (*MetricVec, error). It is recommended to also
// add the convenience methods WithLabelValues, With, and MustCurryWith, which
// panic instead of returning errors. See also the MetricVec example.
type MetricVec struct {
*metricMap *metricMap
curry []curriedLabelValue curry []curriedLabelValue
@@ -35,9 +43,9 @@ type metricVec struct {
hashAddByte func(h uint64, b byte) uint64 hashAddByte func(h uint64, b byte) uint64
} }
// newMetricVec returns an initialized metricVec. // NewMetricVec returns an initialized metricVec.
func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec { func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
return &metricVec{ return &MetricVec{
metricMap: &metricMap{ metricMap: &metricMap{
metrics: map[uint64][]metricWithLabelValues{}, metrics: map[uint64][]metricWithLabelValues{},
desc: desc, desc: desc,
@@ -63,7 +71,7 @@ func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec {
// latter has a much more readable (albeit more verbose) syntax, but it comes // latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map). // with a performance overhead (for creating and processing the Labels map).
// See also the CounterVec example. // See also the CounterVec example.
func (m *metricVec) DeleteLabelValues(lvs ...string) bool { func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
h, err := m.hashLabelValues(lvs) h, err := m.hashLabelValues(lvs)
if err != nil { if err != nil {
return false return false
@@ -82,7 +90,7 @@ func (m *metricVec) DeleteLabelValues(lvs ...string) bool {
// //
// This method is used for the same purpose as DeleteLabelValues(...string). See // This method is used for the same purpose as DeleteLabelValues(...string). See
// there for pros and cons of the two methods. // there for pros and cons of the two methods.
func (m *metricVec) Delete(labels Labels) bool { func (m *MetricVec) Delete(labels Labels) bool {
h, err := m.hashLabels(labels) h, err := m.hashLabels(labels)
if err != nil { if err != nil {
return false return false
@@ -95,15 +103,32 @@ func (m *metricVec) Delete(labels Labels) bool {
// show up in GoDoc. // show up in GoDoc.
// Describe implements Collector. // Describe implements Collector.
func (m *metricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) } func (m *MetricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) }
// Collect implements Collector. // Collect implements Collector.
func (m *metricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) } func (m *MetricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) }
// Reset deletes all metrics in this vector. // Reset deletes all metrics in this vector.
func (m *metricVec) Reset() { m.metricMap.Reset() } func (m *MetricVec) Reset() { m.metricMap.Reset() }
func (m *metricVec) curryWith(labels Labels) (*metricVec, error) { // CurryWith returns a vector curried with the provided labels, i.e. the
// returned vector has those labels pre-set for all labeled operations performed
// on it. The cardinality of the curried vector is reduced accordingly. The
// order of the remaining labels stays the same (just with the curried labels
// taken out of the sequence which is relevant for the
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
// vector, but only with labels not yet used for currying before.
//
// The metrics contained in the MetricVec are shared between the curried and
// uncurried vectors. They are just accessed differently. Curried and uncurried
// vectors behave identically in terms of collection. Only one must be
// registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector.
//
// Note that CurryWith is usually not called directly but through a wrapper
// around MetricVec, implementing a vector for a specific Metric
// implementation, for example GaugeVec.
func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
var ( var (
newCurry []curriedLabelValue newCurry []curriedLabelValue
oldCurry = m.curry oldCurry = m.curry
@@ -128,7 +153,7 @@ func (m *metricVec) curryWith(labels Labels) (*metricVec, error) {
return nil, fmt.Errorf("%d unknown label(s) found during currying", l) return nil, fmt.Errorf("%d unknown label(s) found during currying", l)
} }
return &metricVec{ return &MetricVec{
metricMap: m.metricMap, metricMap: m.metricMap,
curry: newCurry, curry: newCurry,
hashAdd: m.hashAdd, hashAdd: m.hashAdd,
@@ -136,7 +161,34 @@ func (m *metricVec) curryWith(labels Labels) (*metricVec, error) {
}, nil }, nil
} }
func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) { // GetMetricWithLabelValues returns the Metric for the given slice of label
// values (same order as the variable labels in Desc). If that combination of
// label values is accessed for the first time, a new Metric is created (by
// calling the newMetric function provided during construction of the
// MetricVec).
//
// It is possible to call this method without using the returned Metric to only
// create the new Metric but leave it in its initial state.
//
// Keeping the Metric for later use is possible (and should be considered if
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
// Delete can be used to delete the Metric from the MetricVec. In that case, the
// Metric will still exist, but it will not be exported anymore, even if a
// Metric with the same label values is created later.
//
// An error is returned if the number of label values is not the same as the
// number of variable labels in Desc (minus any curried labels).
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
// an alternative to avoid that type of mistake. For higher label numbers, the
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
//
// Note that GetMetricWithLabelValues is usually not called directly but through
// a wrapper around MetricVec, implementing a vector for a specific Metric
// implementation, for example GaugeVec.
func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
h, err := m.hashLabelValues(lvs) h, err := m.hashLabelValues(lvs)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -145,7 +197,23 @@ func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) {
return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
} }
func (m *metricVec) getMetricWith(labels Labels) (Metric, error) { // GetMetricWith returns the Metric for the given Labels map (the label names
// must match those of the variable labels in Desc). If that label map is
// accessed for the first time, a new Metric is created. Implications of
// creating a Metric without using it and keeping the Metric for later use
// are the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the variable labels in Desc (minus any curried labels).
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
//
// Note that GetMetricWith is usually not called directly but through a wrapper
// around MetricVec, implementing a vector for a specific Metric implementation,
// for example GaugeVec.
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
h, err := m.hashLabels(labels) h, err := m.hashLabels(labels)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -154,7 +222,7 @@ func (m *metricVec) getMetricWith(labels Labels) (Metric, error) {
return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil
} }
func (m *metricVec) hashLabelValues(vals []string) (uint64, error) { func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil {
return 0, err return 0, err
} }
@@ -177,7 +245,7 @@ func (m *metricVec) hashLabelValues(vals []string) (uint64, error) {
return h, nil return h, nil
} }
func (m *metricVec) hashLabels(labels Labels) (uint64, error) { func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil {
return 0, err return 0, err
} }
@@ -276,7 +344,9 @@ func (m *metricMap) deleteByHashWithLabelValues(
} }
if len(metrics) > 1 { if len(metrics) > 1 {
old := metrics
m.metrics[h] = append(metrics[:i], metrics[i+1:]...) m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
old[len(old)-1] = metricWithLabelValues{}
} else { } else {
delete(m.metrics, h) delete(m.metrics, h)
} }
@@ -302,7 +372,9 @@ func (m *metricMap) deleteByHashWithLabels(
} }
if len(metrics) > 1 { if len(metrics) > 1 {
old := metrics
m.metrics[h] = append(metrics[:i], metrics[i+1:]...) m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
old[len(old)-1] = metricWithLabelValues{}
} else { } else {
delete(m.metrics, h) delete(m.metrics, h)
} }

View File

@@ -17,7 +17,7 @@ import (
"fmt" "fmt"
"sort" "sort"
//lint:ignore SA1019 Need to keep deprecated package for compatibility. //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
@@ -32,7 +32,9 @@ import (
// in a no-op Registerer. // in a no-op Registerer.
// //
// WrapRegistererWith provides a way to add fixed labels to a subset of // WrapRegistererWith provides a way to add fixed labels to a subset of
// Collectors. It should not be used to add fixed labels to all metrics exposed. // Collectors. It should not be used to add fixed labels to all metrics
// exposed. See also
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
// //
// Conflicts between Collectors registered through the original Registerer with // Conflicts between Collectors registered through the original Registerer with
// Collectors registered through the wrapping Registerer will still be // Collectors registered through the wrapping Registerer will still be

View File

@@ -299,6 +299,17 @@ func (p *TextParser) startLabelName() stateFn {
p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
return nil return nil
} }
// Check for duplicate label names.
labels := make(map[string]struct{})
for _, l := range p.currentMetric.Label {
lName := l.GetName()
if _, exists := labels[lName]; !exists {
labels[lName] = struct{}{}
} else {
p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName()))
return nil
}
}
return p.startLabelValue return p.startLabelValue
} }

View File

@@ -45,6 +45,14 @@ const (
// scrape a target. // scrape a target.
MetricsPathLabel = "__metrics_path__" MetricsPathLabel = "__metrics_path__"
// ScrapeIntervalLabel is the name of the label that holds the scrape interval
// used to scrape a target.
ScrapeIntervalLabel = "__scrape_interval__"
// ScrapeTimeoutLabel is the name of the label that holds the scrape
// timeout used to scrape a target.
ScrapeTimeoutLabel = "__scrape_timeout__"
// ReservedLabelPrefix is a prefix which is not legal in user-supplied // ReservedLabelPrefix is a prefix which is not legal in user-supplied
// label names. // label names.
ReservedLabelPrefix = "__" ReservedLabelPrefix = "__"

View File

@@ -14,6 +14,8 @@
package model package model
import ( import (
"encoding/json"
"errors"
"fmt" "fmt"
"math" "math"
"regexp" "regexp"
@@ -201,13 +203,23 @@ func ParseDuration(durationStr string) (Duration, error) {
// Parse the match at pos `pos` in the regex and use `mult` to turn that // Parse the match at pos `pos` in the regex and use `mult` to turn that
// into ms, then add that value to the total parsed duration. // into ms, then add that value to the total parsed duration.
var overflowErr error
m := func(pos int, mult time.Duration) { m := func(pos int, mult time.Duration) {
if matches[pos] == "" { if matches[pos] == "" {
return return
} }
n, _ := strconv.Atoi(matches[pos]) n, _ := strconv.Atoi(matches[pos])
// Check if the provided duration overflows time.Duration (> ~ 290years).
if n > int((1<<63-1)/mult/time.Millisecond) {
overflowErr = errors.New("duration out of range")
}
d := time.Duration(n) * time.Millisecond d := time.Duration(n) * time.Millisecond
dur += d * mult dur += d * mult
if dur < 0 {
overflowErr = errors.New("duration out of range")
}
} }
m(2, 1000*60*60*24*365) // y m(2, 1000*60*60*24*365) // y
@@ -218,7 +230,7 @@ func ParseDuration(durationStr string) (Duration, error) {
m(12, 1000) // s m(12, 1000) // s
m(14, 1) // ms m(14, 1) // ms
return Duration(dur), nil return Duration(dur), overflowErr
} }
func (d Duration) String() string { func (d Duration) String() string {
@@ -254,6 +266,37 @@ func (d Duration) String() string {
return r return r
} }
// MarshalJSON implements the json.Marshaler interface.
func (d Duration) MarshalJSON() ([]byte, error) {
return json.Marshal(d.String())
}
// UnmarshalJSON implements the json.Unmarshaler interface.
func (d *Duration) UnmarshalJSON(bytes []byte) error {
var s string
if err := json.Unmarshal(bytes, &s); err != nil {
return err
}
dur, err := ParseDuration(s)
if err != nil {
return err
}
*d = dur
return nil
}
// MarshalText implements the encoding.TextMarshaler interface.
func (d *Duration) MarshalText() ([]byte, error) {
return []byte(d.String()), nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
func (d *Duration) UnmarshalText(text []byte) error {
var err error
*d, err = ParseDuration(string(text))
return err
}
// MarshalYAML implements the yaml.Marshaler interface. // MarshalYAML implements the yaml.Marshaler interface.
func (d Duration) MarshalYAML() (interface{}, error) { func (d Duration) MarshalYAML() (interface{}, error) {
return d.String(), nil return d.String(), nil

View File

@@ -78,7 +78,7 @@ ifneq ($(shell which gotestsum),)
endif endif
endif endif
PROMU_VERSION ?= 0.5.0 PROMU_VERSION ?= 0.7.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
GOLANGCI_LINT := GOLANGCI_LINT :=
@@ -245,10 +245,12 @@ common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: $(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)"
DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) .PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
common-docker-tag-latest: $(TAG_DOCKER_ARCHS) common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: $(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
.PHONY: common-docker-manifest .PHONY: common-docker-manifest
common-docker-manifest: common-docker-manifest:

6
vendor/github.com/prometheus/procfs/SECURITY.md generated vendored Normal file
View File

@@ -0,0 +1,6 @@
# Reporting a security issue
The Prometheus security policy, including how to report vulnerabilities, can be
found here:
https://prometheus.io/docs/operating/security/

View File

@@ -36,7 +36,7 @@ type ARPEntry struct {
func (fs FS) GatherARPEntries() ([]ARPEntry, error) { func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
data, err := ioutil.ReadFile(fs.proc.Path("net/arp")) data, err := ioutil.ReadFile(fs.proc.Path("net/arp"))
if err != nil { if err != nil {
return nil, fmt.Errorf("error reading arp %s: %s", fs.proc.Path("net/arp"), err) return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err)
} }
return parseARPEntries(data) return parseARPEntries(data)
@@ -59,7 +59,7 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) {
} else if width == expectedDataWidth { } else if width == expectedDataWidth {
entry, err := parseARPEntry(columns) entry, err := parseARPEntry(columns)
if err != nil { if err != nil {
return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %s", err) return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %w", err)
} }
entries = append(entries, entry) entries = append(entries, entry)
} else { } else {

View File

@@ -74,7 +74,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
for i := 0; i < arraySize; i++ { for i := 0; i < arraySize; i++ {
sizes[i], err = strconv.ParseFloat(parts[i+4], 64) sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid value in buddyinfo: %s", err) return nil, fmt.Errorf("invalid value in buddyinfo: %w", err)
} }
} }

View File

@@ -19,6 +19,7 @@ import (
"bufio" "bufio"
"bytes" "bytes"
"errors" "errors"
"fmt"
"regexp" "regexp"
"strconv" "strconv"
"strings" "strings"
@@ -77,7 +78,7 @@ func parseCPUInfoX86(info []byte) ([]CPUInfo, error) {
// find the first "processor" line // find the first "processor" line
firstLine := firstNonEmptyLine(scanner) firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
return nil, errors.New("invalid cpuinfo file: " + firstLine) return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
} }
field := strings.SplitN(firstLine, ": ", 2) field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32) v, err := strconv.ParseUint(field[1], 0, 32)
@@ -192,7 +193,7 @@ func parseCPUInfoARM(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner) firstLine := firstNonEmptyLine(scanner)
match, _ := regexp.MatchString("^[Pp]rocessor", firstLine) match, _ := regexp.MatchString("^[Pp]rocessor", firstLine)
if !match || !strings.Contains(firstLine, ":") { if !match || !strings.Contains(firstLine, ":") {
return nil, errors.New("invalid cpuinfo file: " + firstLine) return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
} }
field := strings.SplitN(firstLine, ": ", 2) field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{} cpuinfo := []CPUInfo{}
@@ -256,7 +257,7 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner) firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") { if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") {
return nil, errors.New("invalid cpuinfo file: " + firstLine) return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
} }
field := strings.SplitN(firstLine, ": ", 2) field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{} cpuinfo := []CPUInfo{}
@@ -281,7 +282,7 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
if strings.HasPrefix(line, "processor") { if strings.HasPrefix(line, "processor") {
match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line) match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line)
if len(match) < 2 { if len(match) < 2 {
return nil, errors.New("Invalid line found in cpuinfo: " + line) return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
} }
cpu := commonCPUInfo cpu := commonCPUInfo
v, err := strconv.ParseUint(match[1], 0, 32) v, err := strconv.ParseUint(match[1], 0, 32)
@@ -313,6 +314,22 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
return nil, err return nil, err
} }
cpuinfo[i].CPUMHz = v cpuinfo[i].CPUMHz = v
case "physical id":
cpuinfo[i].PhysicalID = field[1]
case "core id":
cpuinfo[i].CoreID = field[1]
case "cpu cores":
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].CPUCores = uint(v)
case "siblings":
v, err := strconv.ParseUint(field[1], 0, 32)
if err != nil {
return nil, err
}
cpuinfo[i].Siblings = uint(v)
} }
} }
@@ -325,7 +342,7 @@ func parseCPUInfoMips(info []byte) ([]CPUInfo, error) {
// find the first "processor" line // find the first "processor" line
firstLine := firstNonEmptyLine(scanner) firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
return nil, errors.New("invalid cpuinfo file: " + firstLine) return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
} }
field := strings.SplitN(firstLine, ": ", 2) field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{} cpuinfo := []CPUInfo{}
@@ -367,7 +384,7 @@ func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner) firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
return nil, errors.New("invalid cpuinfo file: " + firstLine) return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
} }
field := strings.SplitN(firstLine, ": ", 2) field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32) v, err := strconv.ParseUint(field[1], 0, 32)
@@ -412,7 +429,7 @@ func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner) firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
return nil, errors.New("invalid cpuinfo file: " + firstLine) return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
} }
field := strings.SplitN(firstLine, ": ", 2) field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32) v, err := strconv.ParseUint(field[1], 0, 32)

View File

@@ -1,4 +1,4 @@
// Copyright 2019 The Prometheus Authors // Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
// You may obtain a copy of the License at // You may obtain a copy of the License at
@@ -11,12 +11,9 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// +build !go1.12 // +build linux
// +build riscv riscv64
package prometheus package procfs
// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go versions before var parseCPUInfo = parseCPUInfoRISCV
// 1.12. Remove this whole file once the minimum supported Go version is 1.12.
func readBuildInfo() (path, version, sum string) {
return "unknown", "unknown", "unknown"
}

View File

@@ -55,12 +55,12 @@ func (fs FS) Crypto() ([]Crypto, error) {
path := fs.proc.Path("crypto") path := fs.proc.Path("crypto")
b, err := util.ReadFileNoStat(path) b, err := util.ReadFileNoStat(path)
if err != nil { if err != nil {
return nil, fmt.Errorf("error reading crypto %s: %s", path, err) return nil, fmt.Errorf("error reading crypto %q: %w", path, err)
} }
crypto, err := parseCrypto(bytes.NewReader(b)) crypto, err := parseCrypto(bytes.NewReader(b))
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing crypto %s: %s", path, err) return nil, fmt.Errorf("error parsing crypto %q: %w", path, err)
} }
return crypto, nil return crypto, nil

View File

@@ -111,7 +111,7 @@ Max core file size 0 unlimited bytes
Max resident set unlimited unlimited bytes Max resident set unlimited unlimited bytes
Max processes 62898 62898 processes Max processes 62898 62898 processes
Max open files 2048 4096 files Max open files 2048 4096 files
Max locked memory 65536 65536 bytes Max locked memory 18446744073708503040 18446744073708503040 bytes
Max address space 8589934592 unlimited bytes Max address space 8589934592 unlimited bytes
Max file locks unlimited unlimited locks Max file locks unlimited unlimited locks
Max pending signals 62898 62898 signals Max pending signals 62898 62898 signals
@@ -1080,7 +1080,6 @@ internal : yes
type : skcipher type : skcipher
async : yes async : yes
blocksize : 1 blocksize : 1
min keysize : 16
max keysize : 32 max keysize : 32
ivsize : 16 ivsize : 16
chunksize : 16 chunksize : 16
@@ -1839,6 +1838,7 @@ min keysize : 16
max keysize : 32 max keysize : 32
Mode: 444 Mode: 444
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/diskstats Path: fixtures/proc/diskstats
Lines: 52 Lines: 52
@@ -2129,6 +2129,24 @@ Lines: 6
4 1FB3C 0 1282A8F 0 4 1FB3C 0 1282A8F 0
Mode: 644 Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/net/protocols
Lines: 14
protocol size sockets memory press maxhdr slab module cl co di ac io in de sh ss gs se re sp bi br ha uh gp em
PACKET 1344 2 -1 NI 0 no kernel n n n n n n n n n n n n n n n n n n n
PINGv6 1112 0 -1 NI 0 yes kernel y y y n n y n n y y y y n y y y y y n
RAWv6 1112 1 -1 NI 0 yes kernel y y y n y y y n y y y y n y y y y n n
UDPLITEv6 1216 0 57 NI 0 yes kernel y y y n y y y n y y y y n n n y y y n
UDPv6 1216 10 57 NI 0 yes kernel y y y n y y y n y y y y n n n y y y n
TCPv6 2144 1937 1225378 no 320 yes kernel y y y y y y y y y y y y y n y y y y y
UNIX 1024 120 -1 NI 0 yes kernel n n n n n n n n n n n n n n n n n n n
UDP-Lite 1024 0 57 NI 0 yes kernel y y y n y y y n y y y y y n n y y y n
PING 904 0 -1 NI 0 yes kernel y y y n n y n n y y y y n y y y y y n
RAW 912 0 -1 NI 0 yes kernel y y y n y y y n y y y y n y y y y n n
UDP 1024 73 57 NI 0 yes kernel y y y n y y y n y y y y y n n y y y n
TCP 1984 93064 1225378 yes 320 yes kernel y y y y y y y y y y y y y n y y y y y
NETLINK 1040 16 -1 NI 0 no kernel n n n n n n n n n n n n n n n n n n n
Mode: 444
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/proc/net/rpc Directory: fixtures/proc/net/rpc
Mode: 755 Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@@ -2186,10 +2204,25 @@ Lines: 1
00015c73 00020e76 F0000769 00000000 00015c73 00020e76 F0000769 00000000
Mode: 644 Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/net/tcp
Lines: 4
sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode
0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/net/tcp6
Lines: 3
sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops
1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 981 0 21040 2 0000000013726323 0
6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 1000 0 11337031 2 00000000b9256fdd 0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/net/udp Path: fixtures/proc/net/udp
Lines: 4 Lines: 4
sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode
0: 0A000005:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
Mode: 644 Mode: 644
@@ -2292,6 +2325,312 @@ Mode: 644
Path: fixtures/proc/self Path: fixtures/proc/self
SymlinkTo: 26231 SymlinkTo: 26231
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/slabinfo
Lines: 302
slabinfo - version: 2.1
# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab> : tunables <limit> <batchcount> <sharedfactor> : slabdata <active_slabs> <num_slabs> <sharedavail>
pid_3 375 532 576 28 4 : tunables 0 0 0 : slabdata 19 19 0
pid_2 3 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
nvidia_p2p_page_cache 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
nvidia_pte_cache 9022 9152 368 22 2 : tunables 0 0 0 : slabdata 416 416 0
nvidia_stack_cache 321 326 12624 2 8 : tunables 0 0 0 : slabdata 163 163 0
kvm_async_pf 0 0 472 34 4 : tunables 0 0 0 : slabdata 0 0 0
kvm_vcpu 0 0 15552 2 8 : tunables 0 0 0 : slabdata 0 0 0
kvm_mmu_page_header 0 0 504 32 4 : tunables 0 0 0 : slabdata 0 0 0
pte_list_desc 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
x86_emulator 0 0 3024 10 8 : tunables 0 0 0 : slabdata 0 0 0
x86_fpu 0 0 4608 7 8 : tunables 0 0 0 : slabdata 0 0 0
iwl_cmd_pool:0000:04:00.0 0 128 512 32 4 : tunables 0 0 0 : slabdata 4 4 0
ext4_groupinfo_4k 3719 3740 480 34 4 : tunables 0 0 0 : slabdata 110 110 0
bio-6 32 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0
bio-5 16 48 1344 24 8 : tunables 0 0 0 : slabdata 2 2 0
bio-4 17 92 1408 23 8 : tunables 0 0 0 : slabdata 4 4 0
fat_inode_cache 0 0 1056 31 8 : tunables 0 0 0 : slabdata 0 0 0
fat_cache 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
ovl_aio_req 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
ovl_inode 0 0 1000 32 8 : tunables 0 0 0 : slabdata 0 0 0
squashfs_inode_cache 0 0 1088 30 8 : tunables 0 0 0 : slabdata 0 0 0
fuse_request 0 0 472 34 4 : tunables 0 0 0 : slabdata 0 0 0
fuse_inode 0 0 1152 28 8 : tunables 0 0 0 : slabdata 0 0 0
xfs_dqtrx 0 0 864 37 8 : tunables 0 0 0 : slabdata 0 0 0
xfs_dquot 0 0 832 39 8 : tunables 0 0 0 : slabdata 0 0 0
xfs_buf 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_bui_item 0 0 544 30 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_bud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_cui_item 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_cud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_rui_item 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0
xfs_rud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_icr 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_ili 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_inode 0 0 1344 24 8 : tunables 0 0 0 : slabdata 0 0 0
xfs_efi_item 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_efd_item 0 0 776 21 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_buf_item 0 0 608 26 4 : tunables 0 0 0 : slabdata 0 0 0
xf_trans 0 0 568 28 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_ifork 0 0 376 21 2 : tunables 0 0 0 : slabdata 0 0 0
xfs_da_state 0 0 816 20 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_btree_cur 0 0 560 29 4 : tunables 0 0 0 : slabdata 0 0 0
xfs_bmap_free_item 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
xfs_log_ticket 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0
nfs_direct_cache 0 0 560 29 4 : tunables 0 0 0 : slabdata 0 0 0
nfs_commit_data 4 28 1152 28 8 : tunables 0 0 0 : slabdata 1 1 0
nfs_write_data 32 50 1280 25 8 : tunables 0 0 0 : slabdata 2 2 0
nfs_read_data 0 0 1280 25 8 : tunables 0 0 0 : slabdata 0 0 0
nfs_inode_cache 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0
nfs_page 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
rpc_inode_cache 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0
rpc_buffers 8 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0
rpc_tasks 8 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0
fscache_cookie_jar 1 35 464 35 4 : tunables 0 0 0 : slabdata 1 1 0
jfs_mp 32 35 464 35 4 : tunables 0 0 0 : slabdata 1 1 0
jfs_ip 0 0 1592 20 8 : tunables 0 0 0 : slabdata 0 0 0
reiser_inode_cache 0 0 1096 29 8 : tunables 0 0 0 : slabdata 0 0 0
btrfs_end_io_wq 0 0 464 35 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_prelim_ref 0 0 424 38 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_delayed_extent_op 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
btrfs_delayed_data_ref 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_delayed_tree_ref 0 0 440 37 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_delayed_ref_head 0 0 480 34 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_inode_defrag 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
btrfs_delayed_node 0 0 648 25 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_ordered_extent 0 0 752 21 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_extent_map 0 0 480 34 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_extent_state 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
bio-3 35 92 704 23 4 : tunables 0 0 0 : slabdata 4 4 0
btrfs_extent_buffer 0 0 600 27 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_free_space_bitmap 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0
btrfs_free_space 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_path 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_trans_handle 0 0 440 37 4 : tunables 0 0 0 : slabdata 0 0 0
btrfs_inode 0 0 1496 21 8 : tunables 0 0 0 : slabdata 0 0 0
ext4_inode_cache 84136 84755 1400 23 8 : tunables 0 0 0 : slabdata 3685 3685 0
ext4_free_data 22 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0
ext4_allocation_context 0 70 464 35 4 : tunables 0 0 0 : slabdata 2 2 0
ext4_prealloc_space 24 74 440 37 4 : tunables 0 0 0 : slabdata 2 2 0
ext4_system_zone 267 273 376 21 2 : tunables 0 0 0 : slabdata 13 13 0
ext4_io_end_vec 0 88 368 22 2 : tunables 0 0 0 : slabdata 4 4 0
ext4_io_end 0 80 400 20 2 : tunables 0 0 0 : slabdata 4 4 0
ext4_bio_post_read_ctx 128 147 384 21 2 : tunables 0 0 0 : slabdata 7 7 0
ext4_pending_reservation 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
ext4_extent_status 79351 79422 376 21 2 : tunables 0 0 0 : slabdata 3782 3782 0
jbd2_transaction_s 44 100 640 25 4 : tunables 0 0 0 : slabdata 4 4 0
jbd2_inode 6785 6840 400 20 2 : tunables 0 0 0 : slabdata 342 342 0
jbd2_journal_handle 0 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0
jbd2_journal_head 824 1944 448 36 4 : tunables 0 0 0 : slabdata 54 54 0
jbd2_revoke_table_s 4 23 352 23 2 : tunables 0 0 0 : slabdata 1 1 0
jbd2_revoke_record_s 0 156 416 39 4 : tunables 0 0 0 : slabdata 4 4 0
ext2_inode_cache 0 0 1144 28 8 : tunables 0 0 0 : slabdata 0 0 0
mbcache 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0
dm_thin_new_mapping 0 152 424 38 4 : tunables 0 0 0 : slabdata 4 4 0
dm_snap_pending_exception 0 0 464 35 4 : tunables 0 0 0 : slabdata 0 0 0
dm_exception 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
dm_dirty_log_flush_entry 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
dm_bio_prison_cell_v2 0 0 432 37 4 : tunables 0 0 0 : slabdata 0 0 0
dm_bio_prison_cell 0 148 432 37 4 : tunables 0 0 0 : slabdata 4 4 0
kcopyd_job 0 8 3648 8 8 : tunables 0 0 0 : slabdata 1 1 0
io 0 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0
dm_uevent 0 0 3224 10 8 : tunables 0 0 0 : slabdata 0 0 0
dax_cache 1 28 1152 28 8 : tunables 0 0 0 : slabdata 1 1 0
aic94xx_ascb 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
aic94xx_dma_token 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0
asd_sas_event 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
sas_task 0 0 704 23 4 : tunables 0 0 0 : slabdata 0 0 0
qla2xxx_srbs 0 0 832 39 8 : tunables 0 0 0 : slabdata 0 0 0
sd_ext_cdb 2 22 368 22 2 : tunables 0 0 0 : slabdata 1 1 0
scsi_sense_cache 258 288 512 32 4 : tunables 0 0 0 : slabdata 9 9 0
virtio_scsi_cmd 64 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0
L2TP/IPv6 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0
L2TP/IP 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0
ip6-frags 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0
fib6_nodes 5 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0
ip6_dst_cache 4 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0
ip6_mrt_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
PINGv6 0 0 1600 20 8 : tunables 0 0 0 : slabdata 0 0 0
RAWv6 25 40 1600 20 8 : tunables 0 0 0 : slabdata 2 2 0
UDPLITEv6 0 0 1728 18 8 : tunables 0 0 0 : slabdata 0 0 0
UDPv6 3 54 1728 18 8 : tunables 0 0 0 : slabdata 3 3 0
tw_sock_TCPv6 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
request_sock_TCPv6 0 0 632 25 4 : tunables 0 0 0 : slabdata 0 0 0
TCPv6 0 33 2752 11 8 : tunables 0 0 0 : slabdata 3 3 0
uhci_urb_priv 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0
sgpool-128 2 14 4544 7 8 : tunables 0 0 0 : slabdata 2 2 0
sgpool-64 2 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0
sgpool-32 2 44 1472 22 8 : tunables 0 0 0 : slabdata 2 2 0
sgpool-16 2 68 960 34 8 : tunables 0 0 0 : slabdata 2 2 0
sgpool-8 2 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0
btree_node 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
bfq_io_cq 0 0 488 33 4 : tunables 0 0 0 : slabdata 0 0 0
bfq_queue 0 0 848 38 8 : tunables 0 0 0 : slabdata 0 0 0
mqueue_inode_cache 1 24 1344 24 8 : tunables 0 0 0 : slabdata 1 1 0
isofs_inode_cache 0 0 968 33 8 : tunables 0 0 0 : slabdata 0 0 0
io_kiocb 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0
kioctx 0 30 1088 30 8 : tunables 0 0 0 : slabdata 1 1 0
aio_kiocb 0 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
userfaultfd_ctx_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
fanotify_path_event 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0
fanotify_fid_event 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
fsnotify_mark 0 0 408 20 2 : tunables 0 0 0 : slabdata 0 0 0
dnotify_mark 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
dnotify_struct 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
dio 0 0 1088 30 8 : tunables 0 0 0 : slabdata 0 0 0
bio-2 4 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0
fasync_cache 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0
audit_tree_mark 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
pid_namespace 30 34 480 34 4 : tunables 0 0 0 : slabdata 1 1 0
posix_timers_cache 0 27 592 27 4 : tunables 0 0 0 : slabdata 1 1 0
iommu_devinfo 24 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0
iommu_domain 10 10 3264 10 8 : tunables 0 0 0 : slabdata 1 1 0
iommu_iova 8682 8748 448 36 4 : tunables 0 0 0 : slabdata 243 243 0
UNIX 529 814 1472 22 8 : tunables 0 0 0 : slabdata 37 37 0
ip4-frags 0 0 536 30 4 : tunables 0 0 0 : slabdata 0 0 0
ip_mrt_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
UDP-Lite 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0
tcp_bind_bucket 7 128 512 32 4 : tunables 0 0 0 : slabdata 4 4 0
inet_peer_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
xfrm_dst_cache 0 0 704 23 4 : tunables 0 0 0 : slabdata 0 0 0
xfrm_state 0 0 1152 28 8 : tunables 0 0 0 : slabdata 0 0 0
ip_fib_trie 7 21 384 21 2 : tunables 0 0 0 : slabdata 1 1 0
ip_fib_alias 9 20 392 20 2 : tunables 0 0 0 : slabdata 1 1 0
ip_dst_cache 27 84 576 28 4 : tunables 0 0 0 : slabdata 3 3 0
PING 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0
RAW 32 46 1408 23 8 : tunables 0 0 0 : slabdata 2 2 0
UDP 11 168 1536 21 8 : tunables 0 0 0 : slabdata 8 8 0
tw_sock_TCP 1 56 576 28 4 : tunables 0 0 0 : slabdata 2 2 0
request_sock_TCP 0 25 632 25 4 : tunables 0 0 0 : slabdata 1 1 0
TCP 10 60 2624 12 8 : tunables 0 0 0 : slabdata 5 5 0
hugetlbfs_inode_cache 2 35 928 35 8 : tunables 0 0 0 : slabdata 1 1 0
dquot 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0
bio-1 32 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0
eventpoll_pwq 409 600 408 20 2 : tunables 0 0 0 : slabdata 30 30 0
eventpoll_epi 408 672 576 28 4 : tunables 0 0 0 : slabdata 24 24 0
inotify_inode_mark 58 195 416 39 4 : tunables 0 0 0 : slabdata 5 5 0
scsi_data_buffer 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0
bio_crypt_ctx 128 147 376 21 2 : tunables 0 0 0 : slabdata 7 7 0
request_queue 29 39 2408 13 8 : tunables 0 0 0 : slabdata 3 3 0
blkdev_ioc 81 148 440 37 4 : tunables 0 0 0 : slabdata 4 4 0
bio-0 125 200 640 25 4 : tunables 0 0 0 : slabdata 8 8 0
biovec-max 166 196 4544 7 8 : tunables 0 0 0 : slabdata 28 28 0
biovec-128 0 52 2496 13 8 : tunables 0 0 0 : slabdata 4 4 0
biovec-64 0 88 1472 22 8 : tunables 0 0 0 : slabdata 4 4 0
biovec-16 0 92 704 23 4 : tunables 0 0 0 : slabdata 4 4 0
bio_integrity_payload 4 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
khugepaged_mm_slot 59 180 448 36 4 : tunables 0 0 0 : slabdata 5 5 0
ksm_mm_slot 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0
ksm_stable_node 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
ksm_rmap_item 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
user_namespace 2 37 864 37 8 : tunables 0 0 0 : slabdata 1 1 0
uid_cache 5 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
dmaengine-unmap-256 1 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0
dmaengine-unmap-128 1 22 1472 22 8 : tunables 0 0 0 : slabdata 1 1 0
dmaengine-unmap-16 1 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
dmaengine-unmap-2 1 36 448 36 4 : tunables 0 0 0 : slabdata 1 1 0
audit_buffer 0 22 360 22 2 : tunables 0 0 0 : slabdata 1 1 0
sock_inode_cache 663 1170 1216 26 8 : tunables 0 0 0 : slabdata 45 45 0
skbuff_ext_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
skbuff_fclone_cache 1 72 896 36 8 : tunables 0 0 0 : slabdata 2 2 0
skbuff_head_cache 3 650 640 25 4 : tunables 0 0 0 : slabdata 26 26 0
configfs_dir_cache 7 38 424 38 4 : tunables 0 0 0 : slabdata 1 1 0
file_lock_cache 27 116 552 29 4 : tunables 0 0 0 : slabdata 4 4 0
file_lock_ctx 106 120 392 20 2 : tunables 0 0 0 : slabdata 6 6 0
fsnotify_mark_connector 52 66 368 22 2 : tunables 0 0 0 : slabdata 3 3 0
net_namespace 1 6 5312 6 8 : tunables 0 0 0 : slabdata 1 1 0
task_delay_info 784 1560 416 39 4 : tunables 0 0 0 : slabdata 40 40 0
taskstats 45 92 688 23 4 : tunables 0 0 0 : slabdata 4 4 0
proc_dir_entry 678 682 528 31 4 : tunables 0 0 0 : slabdata 22 22 0
pde_opener 0 189 376 21 2 : tunables 0 0 0 : slabdata 9 9 0
proc_inode_cache 7150 8250 992 33 8 : tunables 0 0 0 : slabdata 250 250 0
seq_file 60 735 456 35 4 : tunables 0 0 0 : slabdata 21 21 0
sigqueue 0 156 416 39 4 : tunables 0 0 0 : slabdata 4 4 0
bdev_cache 36 78 1216 26 8 : tunables 0 0 0 : slabdata 3 3 0
shmem_inode_cache 1599 2208 1016 32 8 : tunables 0 0 0 : slabdata 69 69 0
kernfs_iattrs_cache 1251 1254 424 38 4 : tunables 0 0 0 : slabdata 33 33 0
kernfs_node_cache 52898 52920 464 35 4 : tunables 0 0 0 : slabdata 1512 1512 0
mnt_cache 42 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0
filp 4314 6371 704 23 4 : tunables 0 0 0 : slabdata 277 277 0
inode_cache 28695 29505 920 35 8 : tunables 0 0 0 : slabdata 843 843 0
dentry 166069 169074 528 31 4 : tunables 0 0 0 : slabdata 5454 5454 0
names_cache 0 35 4544 7 8 : tunables 0 0 0 : slabdata 5 5 0
hashtab_node 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0
ebitmap_node 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
avtab_extended_perms 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
avtab_node 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0
avc_xperms_data 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
avc_xperms_decision_node 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0
avc_xperms_node 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0
avc_node 37 40 408 20 2 : tunables 0 0 0 : slabdata 2 2 0
iint_cache 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0
lsm_inode_cache 122284 122340 392 20 2 : tunables 0 0 0 : slabdata 6117 6117 0
lsm_file_cache 4266 4485 352 23 2 : tunables 0 0 0 : slabdata 195 195 0
key_jar 8 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0
buffer_head 255622 257076 440 37 4 : tunables 0 0 0 : slabdata 6948 6948 0
uts_namespace 0 0 776 21 4 : tunables 0 0 0 : slabdata 0 0 0
nsproxy 31 40 408 20 2 : tunables 0 0 0 : slabdata 2 2 0
vm_area_struct 39115 43214 528 31 4 : tunables 0 0 0 : slabdata 1394 1394 0
mm_struct 96 529 1408 23 8 : tunables 0 0 0 : slabdata 23 23 0
fs_cache 102 756 448 36 4 : tunables 0 0 0 : slabdata 21 21 0
files_cache 102 588 1152 28 8 : tunables 0 0 0 : slabdata 21 21 0
signal_cache 266 672 1536 21 8 : tunables 0 0 0 : slabdata 32 32 0
sighand_cache 266 507 2496 13 8 : tunables 0 0 0 : slabdata 39 39 0
task_struct 783 963 10240 3 8 : tunables 0 0 0 : slabdata 321 321 0
cred_jar 364 952 576 28 4 : tunables 0 0 0 : slabdata 34 34 0
anon_vma_chain 63907 67821 416 39 4 : tunables 0 0 0 : slabdata 1739 1739 0
anon_vma 25891 28899 416 39 4 : tunables 0 0 0 : slabdata 741 741 0
pid 408 992 512 32 4 : tunables 0 0 0 : slabdata 31 31 0
Acpi-Operand 6682 6740 408 20 2 : tunables 0 0 0 : slabdata 337 337 0
Acpi-ParseExt 0 39 416 39 4 : tunables 0 0 0 : slabdata 1 1 0
Acpi-Parse 0 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0
Acpi-State 0 78 416 39 4 : tunables 0 0 0 : slabdata 2 2 0
Acpi-Namespace 3911 3948 384 21 2 : tunables 0 0 0 : slabdata 188 188 0
trace_event_file 2638 2660 424 38 4 : tunables 0 0 0 : slabdata 70 70 0
ftrace_event_field 6592 6594 384 21 2 : tunables 0 0 0 : slabdata 314 314 0
pool_workqueue 41 64 1024 32 8 : tunables 0 0 0 : slabdata 2 2 0
radix_tree_node 21638 24045 912 35 8 : tunables 0 0 0 : slabdata 687 687 0
task_group 48 78 1216 26 8 : tunables 0 0 0 : slabdata 3 3 0
vmap_area 4411 4680 400 20 2 : tunables 0 0 0 : slabdata 234 234 0
dma-kmalloc-8k 0 0 24576 1 8 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-4k 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-2k 0 0 6144 5 8 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-1k 0 0 3072 10 8 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-512 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-256 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-128 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-64 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-32 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-16 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-8 0 0 344 23 2 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-192 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0
dma-kmalloc-96 0 0 432 37 4 : tunables 0 0 0 : slabdata 0 0 0
kmalloc-rcl-8k 0 0 24576 1 8 : tunables 0 0 0 : slabdata 0 0 0
kmalloc-rcl-4k 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0
kmalloc-rcl-2k 0 0 6144 5 8 : tunables 0 0 0 : slabdata 0 0 0
kmalloc-rcl-1k 0 0 3072 10 8 : tunables 0 0 0 : slabdata 0 0 0
kmalloc-rcl-512 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0
kmalloc-rcl-256 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0
kmalloc-rcl-192 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0
kmalloc-rcl-128 31 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0
kmalloc-rcl-96 3371 3626 432 37 4 : tunables 0 0 0 : slabdata 98 98 0
kmalloc-rcl-64 2080 2272 512 32 4 : tunables 0 0 0 : slabdata 71 71 0
kmalloc-rcl-32 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
kmalloc-rcl-16 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
kmalloc-rcl-8 0 0 344 23 2 : tunables 0 0 0 : slabdata 0 0 0
kmalloc-8k 133 140 24576 1 8 : tunables 0 0 0 : slabdata 140 140 0
kmalloc-4k 403 444 12288 2 8 : tunables 0 0 0 : slabdata 222 222 0
kmalloc-2k 2391 2585 6144 5 8 : tunables 0 0 0 : slabdata 517 517 0
kmalloc-1k 2163 2420 3072 10 8 : tunables 0 0 0 : slabdata 242 242 0
kmalloc-512 2972 3633 1536 21 8 : tunables 0 0 0 : slabdata 173 173 0
kmalloc-256 1841 1856 1024 32 8 : tunables 0 0 0 : slabdata 58 58 0
kmalloc-192 2165 2914 528 31 4 : tunables 0 0 0 : slabdata 94 94 0
kmalloc-128 1137 1175 640 25 4 : tunables 0 0 0 : slabdata 47 47 0
kmalloc-96 1925 2590 432 37 4 : tunables 0 0 0 : slabdata 70 70 0
kmalloc-64 9433 10688 512 32 4 : tunables 0 0 0 : slabdata 334 334 0
kmalloc-32 9098 10062 416 39 4 : tunables 0 0 0 : slabdata 258 258 0
kmalloc-16 10914 10956 368 22 2 : tunables 0 0 0 : slabdata 498 498 0
kmalloc-8 7576 7705 344 23 2 : tunables 0 0 0 : slabdata 335 335 0
kmem_cache_node 904 928 512 32 4 : tunables 0 0 0 : slabdata 29 29 0
kmem_cache 904 936 832 39 8 : tunables 0 0 0 : slabdata 24 24 0
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/proc/stat Path: fixtures/proc/stat
Lines: 16 Lines: 16
cpu 301854 612 111922 8979004 3552 2 3944 0 0 0 cpu 301854 612 111922 8979004 3552 2 3944 0 0 0
@@ -4639,6 +4978,35 @@ Mode: 644
Directory: fixtures/sys/devices/system Directory: fixtures/sys/devices/system
Mode: 775 Mode: 775
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/sys/devices/system/node
Mode: 775
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/sys/devices/system/node/node1
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/sys/devices/system/node/node1/vmstat
Lines: 6
nr_free_pages 1
nr_zone_inactive_anon 2
nr_zone_active_anon 3
nr_zone_inactive_file 4
nr_zone_active_file 5
nr_zone_unevictable 6
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/sys/devices/system/node/node2
Mode: 755
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Path: fixtures/sys/devices/system/node/node2/vmstat
Lines: 6
nr_free_pages 7
nr_zone_inactive_anon 8
nr_zone_active_anon 9
nr_zone_inactive_file 10
nr_zone_active_file 11
nr_zone_unevictable 12
Mode: 644
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Directory: fixtures/sys/devices/system/clocksource Directory: fixtures/sys/devices/system/clocksource
Mode: 775 Mode: 775
# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

View File

@@ -236,7 +236,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
m, err := parseFscacheinfo(bytes.NewReader(b)) m, err := parseFscacheinfo(bytes.NewReader(b))
if err != nil { if err != nil {
return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %v", err) return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %w", err)
} }
return *m, nil return *m, nil

View File

@@ -1,9 +0,0 @@
module github.com/prometheus/procfs
go 1.12
require (
github.com/google/go-cmp v0.3.1
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e
)

View File

@@ -1,6 +0,0 @@
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e h1:LwyF2AFISC9nVbS6MgzsaQNSUsRXI49GS+YQ5KX/QH0=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=

View File

@@ -39,10 +39,10 @@ type FS string
func NewFS(mountPoint string) (FS, error) { func NewFS(mountPoint string) (FS, error) {
info, err := os.Stat(mountPoint) info, err := os.Stat(mountPoint)
if err != nil { if err != nil {
return "", fmt.Errorf("could not read %s: %s", mountPoint, err) return "", fmt.Errorf("could not read %q: %w", mountPoint, err)
} }
if !info.IsDir() { if !info.IsDir() {
return "", fmt.Errorf("mount point %s is not a directory", mountPoint) return "", fmt.Errorf("mount point %q is not a directory", mountPoint)
} }
return FS(mountPoint), nil return FS(mountPoint), nil

View File

@@ -44,14 +44,14 @@ func parseLoad(loadavgBytes []byte) (*LoadAvg, error) {
loads := make([]float64, 3) loads := make([]float64, 3)
parts := strings.Fields(string(loadavgBytes)) parts := strings.Fields(string(loadavgBytes))
if len(parts) < 3 { if len(parts) < 3 {
return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %s", string(loadavgBytes)) return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %q", string(loadavgBytes))
} }
var err error var err error
for i, load := range parts[0:3] { for i, load := range parts[0:3] {
loads[i], err = strconv.ParseFloat(load, 64) loads[i], err = strconv.ParseFloat(load, 64)
if err != nil { if err != nil {
return nil, fmt.Errorf("could not parse load '%s': %s", load, err) return nil, fmt.Errorf("could not parse load %q: %w", load, err)
} }
} }
return &LoadAvg{ return &LoadAvg{

View File

@@ -22,8 +22,9 @@ import (
) )
var ( var (
statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
recoveryLineRE = regexp.MustCompile(`\((\d+)/\d+\)`) recoveryLineRE = regexp.MustCompile(`\((\d+)/\d+\)`)
componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`)
) )
// MDStat holds info parsed from /proc/mdstat. // MDStat holds info parsed from /proc/mdstat.
@@ -44,6 +45,8 @@ type MDStat struct {
BlocksTotal int64 BlocksTotal int64
// Number of blocks on the device that are in sync. // Number of blocks on the device that are in sync.
BlocksSynced int64 BlocksSynced int64
// Name of md component devices
Devices []string
} }
// MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of // MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of
@@ -56,7 +59,7 @@ func (fs FS) MDStat() ([]MDStat, error) {
} }
mdstat, err := parseMDStat(data) mdstat, err := parseMDStat(data)
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err) return nil, fmt.Errorf("error parsing mdstat %q: %w", fs.proc.Path("mdstat"), err)
} }
return mdstat, nil return mdstat, nil
} }
@@ -82,10 +85,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
state := deviceFields[2] // active or inactive state := deviceFields[2] // active or inactive
if len(lines) <= i+3 { if len(lines) <= i+3 {
return nil, fmt.Errorf( return nil, fmt.Errorf("error parsing %q: too few lines for md device", mdName)
"error parsing %s: too few lines for md device",
mdName,
)
} }
// Failed disks have the suffix (F) & Spare disks have the suffix (S). // Failed disks have the suffix (F) & Spare disks have the suffix (S).
@@ -94,7 +94,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
active, total, size, err := evalStatusLine(lines[i], lines[i+1]) active, total, size, err := evalStatusLine(lines[i], lines[i+1])
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing md device lines: %s", err) return nil, fmt.Errorf("error parsing md device lines: %w", err)
} }
syncLineIdx := i + 2 syncLineIdx := i + 2
@@ -126,7 +126,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
} else { } else {
syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx]) syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx])
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing sync line in md device %s: %s", mdName, err) return nil, fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err)
} }
} }
} }
@@ -140,6 +140,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
DisksTotal: total, DisksTotal: total,
BlocksTotal: size, BlocksTotal: size,
BlocksSynced: syncedBlocks, BlocksSynced: syncedBlocks,
Devices: evalComponentDevices(deviceFields),
}) })
} }
@@ -151,7 +152,7 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, size int64, e
sizeStr := strings.Fields(statusLine)[0] sizeStr := strings.Fields(statusLine)[0]
size, err = strconv.ParseInt(sizeStr, 10, 64) size, err = strconv.ParseInt(sizeStr, 10, 64)
if err != nil { if err != nil {
return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err) return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
} }
if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
@@ -171,12 +172,12 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, size int64, e
total, err = strconv.ParseInt(matches[2], 10, 64) total, err = strconv.ParseInt(matches[2], 10, 64)
if err != nil { if err != nil {
return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err) return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
} }
active, err = strconv.ParseInt(matches[3], 10, 64) active, err = strconv.ParseInt(matches[3], 10, 64)
if err != nil { if err != nil {
return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err) return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
} }
return active, total, size, nil return active, total, size, nil
@@ -190,8 +191,23 @@ func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, err error) {
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
if err != nil { if err != nil {
return 0, fmt.Errorf("%s in recoveryLine: %s", err, recoveryLine) return 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err)
} }
return syncedBlocks, nil return syncedBlocks, nil
} }
func evalComponentDevices(deviceFields []string) []string {
mdComponentDevices := make([]string, 0)
if len(deviceFields) > 3 {
for _, field := range deviceFields[4:] {
match := componentDeviceRE.FindStringSubmatch(field)
if match == nil {
continue
}
mdComponentDevices = append(mdComponentDevices, match[1])
}
}
return mdComponentDevices
}

View File

@@ -28,9 +28,9 @@ import (
type Meminfo struct { type Meminfo struct {
// Total usable ram (i.e. physical ram minus a few reserved // Total usable ram (i.e. physical ram minus a few reserved
// bits and the kernel binary code) // bits and the kernel binary code)
MemTotal uint64 MemTotal *uint64
// The sum of LowFree+HighFree // The sum of LowFree+HighFree
MemFree uint64 MemFree *uint64
// An estimate of how much memory is available for starting // An estimate of how much memory is available for starting
// new applications, without swapping. Calculated from // new applications, without swapping. Calculated from
// MemFree, SReclaimable, the size of the file LRU lists, and // MemFree, SReclaimable, the size of the file LRU lists, and
@@ -39,59 +39,59 @@ type Meminfo struct {
// well, and that not all reclaimable slab will be // well, and that not all reclaimable slab will be
// reclaimable, due to items being in use. The impact of those // reclaimable, due to items being in use. The impact of those
// factors will vary from system to system. // factors will vary from system to system.
MemAvailable uint64 MemAvailable *uint64
// Relatively temporary storage for raw disk blocks shouldn't // Relatively temporary storage for raw disk blocks shouldn't
// get tremendously large (20MB or so) // get tremendously large (20MB or so)
Buffers uint64 Buffers *uint64
Cached uint64 Cached *uint64
// Memory that once was swapped out, is swapped back in but // Memory that once was swapped out, is swapped back in but
// still also is in the swapfile (if memory is needed it // still also is in the swapfile (if memory is needed it
// doesn't need to be swapped out AGAIN because it is already // doesn't need to be swapped out AGAIN because it is already
// in the swapfile. This saves I/O) // in the swapfile. This saves I/O)
SwapCached uint64 SwapCached *uint64
// Memory that has been used more recently and usually not // Memory that has been used more recently and usually not
// reclaimed unless absolutely necessary. // reclaimed unless absolutely necessary.
Active uint64 Active *uint64
// Memory which has been less recently used. It is more // Memory which has been less recently used. It is more
// eligible to be reclaimed for other purposes // eligible to be reclaimed for other purposes
Inactive uint64 Inactive *uint64
ActiveAnon uint64 ActiveAnon *uint64
InactiveAnon uint64 InactiveAnon *uint64
ActiveFile uint64 ActiveFile *uint64
InactiveFile uint64 InactiveFile *uint64
Unevictable uint64 Unevictable *uint64
Mlocked uint64 Mlocked *uint64
// total amount of swap space available // total amount of swap space available
SwapTotal uint64 SwapTotal *uint64
// Memory which has been evicted from RAM, and is temporarily // Memory which has been evicted from RAM, and is temporarily
// on the disk // on the disk
SwapFree uint64 SwapFree *uint64
// Memory which is waiting to get written back to the disk // Memory which is waiting to get written back to the disk
Dirty uint64 Dirty *uint64
// Memory which is actively being written back to the disk // Memory which is actively being written back to the disk
Writeback uint64 Writeback *uint64
// Non-file backed pages mapped into userspace page tables // Non-file backed pages mapped into userspace page tables
AnonPages uint64 AnonPages *uint64
// files which have been mapped, such as libraries // files which have been mapped, such as libraries
Mapped uint64 Mapped *uint64
Shmem uint64 Shmem *uint64
// in-kernel data structures cache // in-kernel data structures cache
Slab uint64 Slab *uint64
// Part of Slab, that might be reclaimed, such as caches // Part of Slab, that might be reclaimed, such as caches
SReclaimable uint64 SReclaimable *uint64
// Part of Slab, that cannot be reclaimed on memory pressure // Part of Slab, that cannot be reclaimed on memory pressure
SUnreclaim uint64 SUnreclaim *uint64
KernelStack uint64 KernelStack *uint64
// amount of memory dedicated to the lowest level of page // amount of memory dedicated to the lowest level of page
// tables. // tables.
PageTables uint64 PageTables *uint64
// NFS pages sent to the server, but not yet committed to // NFS pages sent to the server, but not yet committed to
// stable storage // stable storage
NFSUnstable uint64 NFSUnstable *uint64
// Memory used for block device "bounce buffers" // Memory used for block device "bounce buffers"
Bounce uint64 Bounce *uint64
// Memory used by FUSE for temporary writeback buffers // Memory used by FUSE for temporary writeback buffers
WritebackTmp uint64 WritebackTmp *uint64
// Based on the overcommit ratio ('vm.overcommit_ratio'), // Based on the overcommit ratio ('vm.overcommit_ratio'),
// this is the total amount of memory currently available to // this is the total amount of memory currently available to
// be allocated on the system. This limit is only adhered to // be allocated on the system. This limit is only adhered to
@@ -105,7 +105,7 @@ type Meminfo struct {
// yield a CommitLimit of 7.3G. // yield a CommitLimit of 7.3G.
// For more details, see the memory overcommit documentation // For more details, see the memory overcommit documentation
// in vm/overcommit-accounting. // in vm/overcommit-accounting.
CommitLimit uint64 CommitLimit *uint64
// The amount of memory presently allocated on the system. // The amount of memory presently allocated on the system.
// The committed memory is a sum of all of the memory which // The committed memory is a sum of all of the memory which
// has been allocated by processes, even if it has not been // has been allocated by processes, even if it has not been
@@ -119,27 +119,27 @@ type Meminfo struct {
// This is useful if one needs to guarantee that processes will // This is useful if one needs to guarantee that processes will
// not fail due to lack of memory once that memory has been // not fail due to lack of memory once that memory has been
// successfully allocated. // successfully allocated.
CommittedAS uint64 CommittedAS *uint64
// total size of vmalloc memory area // total size of vmalloc memory area
VmallocTotal uint64 VmallocTotal *uint64
// amount of vmalloc area which is used // amount of vmalloc area which is used
VmallocUsed uint64 VmallocUsed *uint64
// largest contiguous block of vmalloc area which is free // largest contiguous block of vmalloc area which is free
VmallocChunk uint64 VmallocChunk *uint64
HardwareCorrupted uint64 HardwareCorrupted *uint64
AnonHugePages uint64 AnonHugePages *uint64
ShmemHugePages uint64 ShmemHugePages *uint64
ShmemPmdMapped uint64 ShmemPmdMapped *uint64
CmaTotal uint64 CmaTotal *uint64
CmaFree uint64 CmaFree *uint64
HugePagesTotal uint64 HugePagesTotal *uint64
HugePagesFree uint64 HugePagesFree *uint64
HugePagesRsvd uint64 HugePagesRsvd *uint64
HugePagesSurp uint64 HugePagesSurp *uint64
Hugepagesize uint64 Hugepagesize *uint64
DirectMap4k uint64 DirectMap4k *uint64
DirectMap2M uint64 DirectMap2M *uint64
DirectMap1G uint64 DirectMap1G *uint64
} }
// Meminfo returns an information about current kernel/system memory statistics. // Meminfo returns an information about current kernel/system memory statistics.
@@ -152,7 +152,7 @@ func (fs FS) Meminfo() (Meminfo, error) {
m, err := parseMemInfo(bytes.NewReader(b)) m, err := parseMemInfo(bytes.NewReader(b))
if err != nil { if err != nil {
return Meminfo{}, fmt.Errorf("failed to parse meminfo: %v", err) return Meminfo{}, fmt.Errorf("failed to parse meminfo: %w", err)
} }
return *m, nil return *m, nil
@@ -175,101 +175,101 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) {
switch fields[0] { switch fields[0] {
case "MemTotal:": case "MemTotal:":
m.MemTotal = v m.MemTotal = &v
case "MemFree:": case "MemFree:":
m.MemFree = v m.MemFree = &v
case "MemAvailable:": case "MemAvailable:":
m.MemAvailable = v m.MemAvailable = &v
case "Buffers:": case "Buffers:":
m.Buffers = v m.Buffers = &v
case "Cached:": case "Cached:":
m.Cached = v m.Cached = &v
case "SwapCached:": case "SwapCached:":
m.SwapCached = v m.SwapCached = &v
case "Active:": case "Active:":
m.Active = v m.Active = &v
case "Inactive:": case "Inactive:":
m.Inactive = v m.Inactive = &v
case "Active(anon):": case "Active(anon):":
m.ActiveAnon = v m.ActiveAnon = &v
case "Inactive(anon):": case "Inactive(anon):":
m.InactiveAnon = v m.InactiveAnon = &v
case "Active(file):": case "Active(file):":
m.ActiveFile = v m.ActiveFile = &v
case "Inactive(file):": case "Inactive(file):":
m.InactiveFile = v m.InactiveFile = &v
case "Unevictable:": case "Unevictable:":
m.Unevictable = v m.Unevictable = &v
case "Mlocked:": case "Mlocked:":
m.Mlocked = v m.Mlocked = &v
case "SwapTotal:": case "SwapTotal:":
m.SwapTotal = v m.SwapTotal = &v
case "SwapFree:": case "SwapFree:":
m.SwapFree = v m.SwapFree = &v
case "Dirty:": case "Dirty:":
m.Dirty = v m.Dirty = &v
case "Writeback:": case "Writeback:":
m.Writeback = v m.Writeback = &v
case "AnonPages:": case "AnonPages:":
m.AnonPages = v m.AnonPages = &v
case "Mapped:": case "Mapped:":
m.Mapped = v m.Mapped = &v
case "Shmem:": case "Shmem:":
m.Shmem = v m.Shmem = &v
case "Slab:": case "Slab:":
m.Slab = v m.Slab = &v
case "SReclaimable:": case "SReclaimable:":
m.SReclaimable = v m.SReclaimable = &v
case "SUnreclaim:": case "SUnreclaim:":
m.SUnreclaim = v m.SUnreclaim = &v
case "KernelStack:": case "KernelStack:":
m.KernelStack = v m.KernelStack = &v
case "PageTables:": case "PageTables:":
m.PageTables = v m.PageTables = &v
case "NFS_Unstable:": case "NFS_Unstable:":
m.NFSUnstable = v m.NFSUnstable = &v
case "Bounce:": case "Bounce:":
m.Bounce = v m.Bounce = &v
case "WritebackTmp:": case "WritebackTmp:":
m.WritebackTmp = v m.WritebackTmp = &v
case "CommitLimit:": case "CommitLimit:":
m.CommitLimit = v m.CommitLimit = &v
case "Committed_AS:": case "Committed_AS:":
m.CommittedAS = v m.CommittedAS = &v
case "VmallocTotal:": case "VmallocTotal:":
m.VmallocTotal = v m.VmallocTotal = &v
case "VmallocUsed:": case "VmallocUsed:":
m.VmallocUsed = v m.VmallocUsed = &v
case "VmallocChunk:": case "VmallocChunk:":
m.VmallocChunk = v m.VmallocChunk = &v
case "HardwareCorrupted:": case "HardwareCorrupted:":
m.HardwareCorrupted = v m.HardwareCorrupted = &v
case "AnonHugePages:": case "AnonHugePages:":
m.AnonHugePages = v m.AnonHugePages = &v
case "ShmemHugePages:": case "ShmemHugePages:":
m.ShmemHugePages = v m.ShmemHugePages = &v
case "ShmemPmdMapped:": case "ShmemPmdMapped:":
m.ShmemPmdMapped = v m.ShmemPmdMapped = &v
case "CmaTotal:": case "CmaTotal:":
m.CmaTotal = v m.CmaTotal = &v
case "CmaFree:": case "CmaFree:":
m.CmaFree = v m.CmaFree = &v
case "HugePages_Total:": case "HugePages_Total:":
m.HugePagesTotal = v m.HugePagesTotal = &v
case "HugePages_Free:": case "HugePages_Free:":
m.HugePagesFree = v m.HugePagesFree = &v
case "HugePages_Rsvd:": case "HugePages_Rsvd:":
m.HugePagesRsvd = v m.HugePagesRsvd = &v
case "HugePages_Surp:": case "HugePages_Surp:":
m.HugePagesSurp = v m.HugePagesSurp = &v
case "Hugepagesize:": case "Hugepagesize:":
m.Hugepagesize = v m.Hugepagesize = &v
case "DirectMap4k:": case "DirectMap4k:":
m.DirectMap4k = v m.DirectMap4k = &v
case "DirectMap2M:": case "DirectMap2M:":
m.DirectMap2M = v m.DirectMap2M = &v
case "DirectMap1G:": case "DirectMap1G:":
m.DirectMap1G = v m.DirectMap1G = &v
} }
} }

View File

@@ -338,12 +338,12 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
if len(ss) == 0 { if len(ss) == 0 {
break break
} }
if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
}
switch ss[0] { switch ss[0] {
case fieldOpts: case fieldOpts:
if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
}
if stats.Opts == nil { if stats.Opts == nil {
stats.Opts = map[string]string{} stats.Opts = map[string]string{}
} }
@@ -356,6 +356,9 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
} }
} }
case fieldAge: case fieldAge:
if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
}
// Age integer is in seconds // Age integer is in seconds
d, err := time.ParseDuration(ss[1] + "s") d, err := time.ParseDuration(ss[1] + "s")
if err != nil { if err != nil {
@@ -364,6 +367,9 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
stats.Age = d stats.Age = d
case fieldBytes: case fieldBytes:
if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
}
bstats, err := parseNFSBytesStats(ss[1:]) bstats, err := parseNFSBytesStats(ss[1:])
if err != nil { if err != nil {
return nil, err return nil, err
@@ -371,6 +377,9 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
stats.Bytes = *bstats stats.Bytes = *bstats
case fieldEvents: case fieldEvents:
if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
}
estats, err := parseNFSEventsStats(ss[1:]) estats, err := parseNFSEventsStats(ss[1:])
if err != nil { if err != nil {
return nil, err return nil, err

View File

@@ -55,7 +55,7 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
stat, err := parseConntrackStat(bytes.NewReader(b)) stat, err := parseConntrackStat(bytes.NewReader(b))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to read conntrack stats from %q: %v", path, err) return nil, fmt.Errorf("failed to read conntrack stats from %q: %w", path, err)
} }
return stat, nil return stat, nil
@@ -147,7 +147,7 @@ func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
func parseConntrackStatField(field string) (uint64, error) { func parseConntrackStatField(field string) (uint64, error) {
val, err := strconv.ParseUint(field, 16, 64) val, err := strconv.ParseUint(field, 16, 64)
if err != nil { if err != nil {
return 0, fmt.Errorf("couldn't parse \"%s\" field: %s", field, err) return 0, fmt.Errorf("couldn't parse %q field: %w", field, err)
} }
return val, err return val, err
} }

220
vendor/github.com/prometheus/procfs/net_ip_socket.go generated vendored Normal file
View File

@@ -0,0 +1,220 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"encoding/hex"
"fmt"
"io"
"net"
"os"
"strconv"
"strings"
)
const (
// readLimit is used by io.LimitReader while reading the content of the
// /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
// as each line represents a single used socket.
// In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
// With e.g. 150 Byte per line and the maximum number of 65535,
// the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP.
readLimit = 4294967296 // Byte -> 4 GiB
)
// this contains generic data structures for both udp and tcp sockets
type (
// NetIPSocket represents the contents of /proc/net/{t,u}dp{,6} file without the header.
NetIPSocket []*netIPSocketLine
// NetIPSocketSummary provides already computed values like the total queue lengths or
// the total number of used sockets. In contrast to NetIPSocket it does not collect
// the parsed lines into a slice.
NetIPSocketSummary struct {
// TxQueueLength shows the total queue length of all parsed tx_queue lengths.
TxQueueLength uint64
// RxQueueLength shows the total queue length of all parsed rx_queue lengths.
RxQueueLength uint64
// UsedSockets shows the total number of parsed lines representing the
// number of used sockets.
UsedSockets uint64
}
// netIPSocketLine represents the fields parsed from a single line
// in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped.
// For the proc file format details, see https://linux.die.net/man/5/proc.
netIPSocketLine struct {
Sl uint64
LocalAddr net.IP
LocalPort uint64
RemAddr net.IP
RemPort uint64
St uint64
TxQueue uint64
RxQueue uint64
UID uint64
}
)
func newNetIPSocket(file string) (NetIPSocket, error) {
f, err := os.Open(file)
if err != nil {
return nil, err
}
defer f.Close()
var netIPSocket NetIPSocket
lr := io.LimitReader(f, readLimit)
s := bufio.NewScanner(lr)
s.Scan() // skip first line with headers
for s.Scan() {
fields := strings.Fields(s.Text())
line, err := parseNetIPSocketLine(fields)
if err != nil {
return nil, err
}
netIPSocket = append(netIPSocket, line)
}
if err := s.Err(); err != nil {
return nil, err
}
return netIPSocket, nil
}
// newNetIPSocketSummary creates a new NetIPSocket{,6} from the contents of the given file.
func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) {
f, err := os.Open(file)
if err != nil {
return nil, err
}
defer f.Close()
var netIPSocketSummary NetIPSocketSummary
lr := io.LimitReader(f, readLimit)
s := bufio.NewScanner(lr)
s.Scan() // skip first line with headers
for s.Scan() {
fields := strings.Fields(s.Text())
line, err := parseNetIPSocketLine(fields)
if err != nil {
return nil, err
}
netIPSocketSummary.TxQueueLength += line.TxQueue
netIPSocketSummary.RxQueueLength += line.RxQueue
netIPSocketSummary.UsedSockets++
}
if err := s.Err(); err != nil {
return nil, err
}
return &netIPSocketSummary, nil
}
// the /proc/net/{t,u}dp{,6} files are network byte order for ipv4 and for ipv6 the address is four words consisting of four bytes each. In each of those four words the four bytes are written in reverse order.
func parseIP(hexIP string) (net.IP, error) {
var byteIP []byte
byteIP, err := hex.DecodeString(hexIP)
if err != nil {
return nil, fmt.Errorf("cannot parse address field in socket line %q", hexIP)
}
switch len(byteIP) {
case 4:
return net.IP{byteIP[3], byteIP[2], byteIP[1], byteIP[0]}, nil
case 16:
i := net.IP{
byteIP[3], byteIP[2], byteIP[1], byteIP[0],
byteIP[7], byteIP[6], byteIP[5], byteIP[4],
byteIP[11], byteIP[10], byteIP[9], byteIP[8],
byteIP[15], byteIP[14], byteIP[13], byteIP[12],
}
return i, nil
default:
return nil, fmt.Errorf("Unable to parse IP %s", hexIP)
}
}
// parseNetIPSocketLine parses a single line, represented by a list of fields.
func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
line := &netIPSocketLine{}
if len(fields) < 8 {
return nil, fmt.Errorf(
"cannot parse net socket line as it has less then 8 columns %q",
strings.Join(fields, " "),
)
}
var err error // parse error
// sl
s := strings.Split(fields[0], ":")
if len(s) != 2 {
return nil, fmt.Errorf("cannot parse sl field in socket line %q", fields[0])
}
if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
return nil, fmt.Errorf("cannot parse sl value in socket line: %w", err)
}
// local_address
l := strings.Split(fields[1], ":")
if len(l) != 2 {
return nil, fmt.Errorf("cannot parse local_address field in socket line %q", fields[1])
}
if line.LocalAddr, err = parseIP(l[0]); err != nil {
return nil, err
}
if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse local_address port value in socket line: %w", err)
}
// remote_address
r := strings.Split(fields[2], ":")
if len(r) != 2 {
return nil, fmt.Errorf("cannot parse rem_address field in socket line %q", fields[1])
}
if line.RemAddr, err = parseIP(r[0]); err != nil {
return nil, err
}
if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse rem_address port value in socket line: %w", err)
}
// st
if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse st value in socket line: %w", err)
}
// tx_queue and rx_queue
q := strings.Split(fields[4], ":")
if len(q) != 2 {
return nil, fmt.Errorf(
"cannot parse tx/rx queues in socket line as it has a missing colon %q",
fields[4],
)
}
if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse tx_queue value in socket line: %w", err)
}
if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse rx_queue value in socket line: %w", err)
}
// uid
if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
return nil, fmt.Errorf("cannot parse uid value in socket line: %w", err)
}
return line, nil
}

180
vendor/github.com/prometheus/procfs/net_protocols.go generated vendored Normal file
View File

@@ -0,0 +1,180 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// NetProtocolStats stores the contents from /proc/net/protocols
type NetProtocolStats map[string]NetProtocolStatLine
// NetProtocolStatLine contains a single line parsed from /proc/net/protocols. We
// only care about the first six columns as the rest are not likely to change
// and only serve to provide a set of capabilities for each protocol.
type NetProtocolStatLine struct {
Name string // 0 The name of the protocol
Size uint64 // 1 The size, in bytes, of a given protocol structure. e.g. sizeof(struct tcp_sock) or sizeof(struct unix_sock)
Sockets int64 // 2 Number of sockets in use by this protocol
Memory int64 // 3 Number of 4KB pages allocated by all sockets of this protocol
Pressure int // 4 This is either yes, no, or NI (not implemented). For the sake of simplicity we treat NI as not experiencing memory pressure.
MaxHeader uint64 // 5 Protocol specific max header size
Slab bool // 6 Indicates whether or not memory is allocated from the SLAB
ModuleName string // 7 The name of the module that implemented this protocol or "kernel" if not from a module
Capabilities NetProtocolCapabilities
}
// NetProtocolCapabilities contains a list of capabilities for each protocol
type NetProtocolCapabilities struct {
Close bool // 8
Connect bool // 9
Disconnect bool // 10
Accept bool // 11
IoCtl bool // 12
Init bool // 13
Destroy bool // 14
Shutdown bool // 15
SetSockOpt bool // 16
GetSockOpt bool // 17
SendMsg bool // 18
RecvMsg bool // 19
SendPage bool // 20
Bind bool // 21
BacklogRcv bool // 22
Hash bool // 23
UnHash bool // 24
GetPort bool // 25
EnterMemoryPressure bool // 26
}
// NetProtocols reads stats from /proc/net/protocols and returns a map of
// PortocolStatLine entries. As of this writing no official Linux Documentation
// exists, however the source is fairly self-explanatory and the format seems
// stable since its introduction in 2.6.12-rc2
// Linux 2.6.12-rc2 - https://elixir.bootlin.com/linux/v2.6.12-rc2/source/net/core/sock.c#L1452
// Linux 5.10 - https://elixir.bootlin.com/linux/v5.10.4/source/net/core/sock.c#L3586
func (fs FS) NetProtocols() (NetProtocolStats, error) {
data, err := util.ReadFileNoStat(fs.proc.Path("net/protocols"))
if err != nil {
return NetProtocolStats{}, err
}
return parseNetProtocols(bufio.NewScanner(bytes.NewReader(data)))
}
func parseNetProtocols(s *bufio.Scanner) (NetProtocolStats, error) {
nps := NetProtocolStats{}
// Skip the header line
s.Scan()
for s.Scan() {
line, err := nps.parseLine(s.Text())
if err != nil {
return NetProtocolStats{}, err
}
nps[line.Name] = *line
}
return nps, nil
}
func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, error) {
line := &NetProtocolStatLine{Capabilities: NetProtocolCapabilities{}}
var err error
const enabled = "yes"
const disabled = "no"
fields := strings.Fields(rawLine)
line.Name = fields[0]
line.Size, err = strconv.ParseUint(fields[1], 10, 64)
if err != nil {
return nil, err
}
line.Sockets, err = strconv.ParseInt(fields[2], 10, 64)
if err != nil {
return nil, err
}
line.Memory, err = strconv.ParseInt(fields[3], 10, 64)
if err != nil {
return nil, err
}
if fields[4] == enabled {
line.Pressure = 1
} else if fields[4] == disabled {
line.Pressure = 0
} else {
line.Pressure = -1
}
line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64)
if err != nil {
return nil, err
}
if fields[6] == enabled {
line.Slab = true
} else if fields[6] == disabled {
line.Slab = false
} else {
return nil, fmt.Errorf("unable to parse capability for protocol: %s", line.Name)
}
line.ModuleName = fields[7]
err = line.Capabilities.parseCapabilities(fields[8:])
if err != nil {
return nil, err
}
return line, nil
}
func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) error {
// The capabilities are all bools so we can loop over to map them
capabilityFields := [...]*bool{
&pc.Close,
&pc.Connect,
&pc.Disconnect,
&pc.Accept,
&pc.IoCtl,
&pc.Init,
&pc.Destroy,
&pc.Shutdown,
&pc.SetSockOpt,
&pc.GetSockOpt,
&pc.SendMsg,
&pc.RecvMsg,
&pc.SendPage,
&pc.Bind,
&pc.BacklogRcv,
&pc.Hash,
&pc.UnHash,
&pc.GetPort,
&pc.EnterMemoryPressure,
}
for i := 0; i < len(capabilities); i++ {
if capabilities[i] == "y" {
*capabilityFields[i] = true
} else if capabilities[i] == "n" {
*capabilityFields[i] = false
} else {
return fmt.Errorf("unable to parse capability block for protocol: position %d", i)
}
}
return nil
}

View File

@@ -70,7 +70,7 @@ func readSockstat(name string) (*NetSockstat, error) {
stat, err := parseSockstat(bytes.NewReader(b)) stat, err := parseSockstat(bytes.NewReader(b))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to read sockstats from %q: %v", name, err) return nil, fmt.Errorf("failed to read sockstats from %q: %w", name, err)
} }
return stat, nil return stat, nil
@@ -90,7 +90,7 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) {
// The remaining fields are key/value pairs. // The remaining fields are key/value pairs.
kvs, err := parseSockstatKVs(fields[1:]) kvs, err := parseSockstatKVs(fields[1:])
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %v", s.Text(), err) return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %w", s.Text(), err)
} }
// The first field is the protocol. We must trim its colon suffix. // The first field is the protocol. We must trim its colon suffix.

View File

@@ -51,7 +51,7 @@ func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) {
entries, err := parseSoftnet(bytes.NewReader(b)) entries, err := parseSoftnet(bytes.NewReader(b))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %v", err) return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %w", err)
} }
return entries, nil return entries, nil

64
vendor/github.com/prometheus/procfs/net_tcp.go generated vendored Normal file
View File

@@ -0,0 +1,64 @@
// Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
type (
// NetTCP represents the contents of /proc/net/tcp{,6} file without the header.
NetTCP []*netIPSocketLine
// NetTCPSummary provides already computed values like the total queue lengths or
// the total number of used sockets. In contrast to NetTCP it does not collect
// the parsed lines into a slice.
NetTCPSummary NetIPSocketSummary
)
// NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams
// read from /proc/net/tcp.
func (fs FS) NetTCP() (NetTCP, error) {
return newNetTCP(fs.proc.Path("net/tcp"))
}
// NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams
// read from /proc/net/tcp6.
func (fs FS) NetTCP6() (NetTCP, error) {
return newNetTCP(fs.proc.Path("net/tcp6"))
}
// NetTCPSummary returns already computed statistics like the total queue lengths
// for TCP datagrams read from /proc/net/tcp.
func (fs FS) NetTCPSummary() (*NetTCPSummary, error) {
return newNetTCPSummary(fs.proc.Path("net/tcp"))
}
// NetTCP6Summary returns already computed statistics like the total queue lengths
// for TCP datagrams read from /proc/net/tcp6.
func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) {
return newNetTCPSummary(fs.proc.Path("net/tcp6"))
}
// newNetTCP creates a new NetTCP{,6} from the contents of the given file.
func newNetTCP(file string) (NetTCP, error) {
n, err := newNetIPSocket(file)
n1 := NetTCP(n)
return n1, err
}
func newNetTCPSummary(file string) (*NetTCPSummary, error) {
n, err := newNetIPSocketSummary(file)
if n == nil {
return nil, err
}
n1 := NetTCPSummary(*n)
return &n1, err
}

View File

@@ -13,58 +13,14 @@
package procfs package procfs
import (
"bufio"
"encoding/hex"
"fmt"
"io"
"net"
"os"
"strconv"
"strings"
)
const (
// readLimit is used by io.LimitReader while reading the content of the
// /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
// as each line represents a single used socket.
// In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
// With e.g. 150 Byte per line and the maximum number of 65535,
// the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP.
readLimit = 4294967296 // Byte -> 4 GiB
)
type ( type (
// NetUDP represents the contents of /proc/net/udp{,6} file without the header. // NetUDP represents the contents of /proc/net/udp{,6} file without the header.
NetUDP []*netUDPLine NetUDP []*netIPSocketLine
// NetUDPSummary provides already computed values like the total queue lengths or // NetUDPSummary provides already computed values like the total queue lengths or
// the total number of used sockets. In contrast to NetUDP it does not collect // the total number of used sockets. In contrast to NetUDP it does not collect
// the parsed lines into a slice. // the parsed lines into a slice.
NetUDPSummary struct { NetUDPSummary NetIPSocketSummary
// TxQueueLength shows the total queue length of all parsed tx_queue lengths.
TxQueueLength uint64
// RxQueueLength shows the total queue length of all parsed rx_queue lengths.
RxQueueLength uint64
// UsedSockets shows the total number of parsed lines representing the
// number of used sockets.
UsedSockets uint64
}
// netUDPLine represents the fields parsed from a single line
// in /proc/net/udp{,6}. Fields which are not used by UDP are skipped.
// For the proc file format details, see https://linux.die.net/man/5/proc.
netUDPLine struct {
Sl uint64
LocalAddr net.IP
LocalPort uint64
RemAddr net.IP
RemPort uint64
St uint64
TxQueue uint64
RxQueue uint64
UID uint64
}
) )
// NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams // NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams
@@ -93,137 +49,16 @@ func (fs FS) NetUDP6Summary() (*NetUDPSummary, error) {
// newNetUDP creates a new NetUDP{,6} from the contents of the given file. // newNetUDP creates a new NetUDP{,6} from the contents of the given file.
func newNetUDP(file string) (NetUDP, error) { func newNetUDP(file string) (NetUDP, error) {
f, err := os.Open(file) n, err := newNetIPSocket(file)
if err != nil { n1 := NetUDP(n)
return nil, err return n1, err
}
defer f.Close()
netUDP := NetUDP{}
lr := io.LimitReader(f, readLimit)
s := bufio.NewScanner(lr)
s.Scan() // skip first line with headers
for s.Scan() {
fields := strings.Fields(s.Text())
line, err := parseNetUDPLine(fields)
if err != nil {
return nil, err
}
netUDP = append(netUDP, line)
}
if err := s.Err(); err != nil {
return nil, err
}
return netUDP, nil
} }
// newNetUDPSummary creates a new NetUDP{,6} from the contents of the given file.
func newNetUDPSummary(file string) (*NetUDPSummary, error) { func newNetUDPSummary(file string) (*NetUDPSummary, error) {
f, err := os.Open(file) n, err := newNetIPSocketSummary(file)
if err != nil { if n == nil {
return nil, err return nil, err
} }
defer f.Close() n1 := NetUDPSummary(*n)
return &n1, err
netUDPSummary := &NetUDPSummary{}
lr := io.LimitReader(f, readLimit)
s := bufio.NewScanner(lr)
s.Scan() // skip first line with headers
for s.Scan() {
fields := strings.Fields(s.Text())
line, err := parseNetUDPLine(fields)
if err != nil {
return nil, err
}
netUDPSummary.TxQueueLength += line.TxQueue
netUDPSummary.RxQueueLength += line.RxQueue
netUDPSummary.UsedSockets++
}
if err := s.Err(); err != nil {
return nil, err
}
return netUDPSummary, nil
}
// parseNetUDPLine parses a single line, represented by a list of fields.
func parseNetUDPLine(fields []string) (*netUDPLine, error) {
line := &netUDPLine{}
if len(fields) < 8 {
return nil, fmt.Errorf(
"cannot parse net udp socket line as it has less then 8 columns: %s",
strings.Join(fields, " "),
)
}
var err error // parse error
// sl
s := strings.Split(fields[0], ":")
if len(s) != 2 {
return nil, fmt.Errorf(
"cannot parse sl field in udp socket line: %s", fields[0])
}
if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
return nil, fmt.Errorf("cannot parse sl value in udp socket line: %s", err)
}
// local_address
l := strings.Split(fields[1], ":")
if len(l) != 2 {
return nil, fmt.Errorf(
"cannot parse local_address field in udp socket line: %s", fields[1])
}
if line.LocalAddr, err = hex.DecodeString(l[0]); err != nil {
return nil, fmt.Errorf(
"cannot parse local_address value in udp socket line: %s", err)
}
if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
return nil, fmt.Errorf(
"cannot parse local_address port value in udp socket line: %s", err)
}
// remote_address
r := strings.Split(fields[2], ":")
if len(r) != 2 {
return nil, fmt.Errorf(
"cannot parse rem_address field in udp socket line: %s", fields[1])
}
if line.RemAddr, err = hex.DecodeString(r[0]); err != nil {
return nil, fmt.Errorf(
"cannot parse rem_address value in udp socket line: %s", err)
}
if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
return nil, fmt.Errorf(
"cannot parse rem_address port value in udp socket line: %s", err)
}
// st
if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
return nil, fmt.Errorf(
"cannot parse st value in udp socket line: %s", err)
}
// tx_queue and rx_queue
q := strings.Split(fields[4], ":")
if len(q) != 2 {
return nil, fmt.Errorf(
"cannot parse tx/rx queues in udp socket line as it has a missing colon: %s",
fields[4],
)
}
if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse tx_queue value in udp socket line: %s", err)
}
if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse rx_queue value in udp socket line: %s", err)
}
// uid
if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
return nil, fmt.Errorf(
"cannot parse uid value in udp socket line: %s", err)
}
return line, nil
} }

Some files were not shown because too many files have changed in this diff Show More