Compare commits
48 Commits
9.3.2.1-r2
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
| a587f0165b | |||
|
|
80c7ff0b11 | ||
| 1a635bc659 | |||
| 1811d74890 | |||
|
|
0cab6ed1c7 | ||
|
|
e9b417c311 | ||
| 7f03dbbc14 | |||
| a06485ed93 | |||
| 674a66e710 | |||
| 6017357b9d | |||
| ae282e19ef | |||
|
|
19f0dd286d | ||
|
|
50ea9a9f85 | ||
|
|
feaf18fcf1 | ||
|
|
2174531486 | ||
|
|
e0081f3dbf | ||
| 4a86315749 | |||
|
|
882544647e | ||
| fbe8435884 | |||
|
|
3b5bce2a65 | ||
|
|
d2eb9e8498 | ||
| d8ee2d131a | |||
| bbb056f38c | |||
|
|
e657c5eb56 | ||
|
|
3599852fc1 | ||
|
|
05fe51d96d | ||
|
|
1f7334e3d1 | ||
|
|
dfa8e1ba41 | ||
|
|
9a8582a7a9 | ||
|
|
57ffe4011d | ||
|
|
1d60dd7ce5 | ||
|
|
dc71321648 | ||
|
|
bc4f246a75 | ||
|
|
cc619bdd86 | ||
|
|
784f03875e | ||
|
|
1b7dd14555 | ||
|
|
91603a08b0 | ||
|
|
1766663a78 | ||
|
|
8ca5f31853 | ||
|
|
447e1c57ce | ||
|
|
0e95c1ca9e | ||
|
|
7d093b4340 | ||
|
|
33566bed16 | ||
|
|
e14ffb261a | ||
|
|
1d239647f4 | ||
|
|
f10e2facf8 | ||
|
|
bad1cfaa96 | ||
|
|
bddb9bfd3a |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -6,7 +6,7 @@ test/docker/vendor
|
|||||||
test/kubernetes/vendor
|
test/kubernetes/vendor
|
||||||
build
|
build
|
||||||
coverage
|
coverage
|
||||||
downloads
|
#downloads
|
||||||
incubating/mqipt/ms81*
|
incubating/mqipt/ms81*
|
||||||
vendor/github.com/prometheus/client_model/bin/
|
vendor/github.com/prometheus/client_model/bin/
|
||||||
vendor/github.com/prometheus/client_model/.classpath
|
vendor/github.com/prometheus/client_model/.classpath
|
||||||
|
|||||||
16
.travis.yml
16
.travis.yml
@@ -18,7 +18,7 @@ sudo: required
|
|||||||
language: go
|
language: go
|
||||||
|
|
||||||
go:
|
go:
|
||||||
- "1.18.9"
|
- "1.19.9"
|
||||||
|
|
||||||
services:
|
services:
|
||||||
- docker
|
- docker
|
||||||
@@ -42,7 +42,7 @@ jobs:
|
|||||||
name: "Basic AMD64 build"
|
name: "Basic AMD64 build"
|
||||||
os: linux
|
os: linux
|
||||||
env:
|
env:
|
||||||
- MQ_ARCHIVE_REPOSITORY_DEV=$MQ_932_ARCHIVE_REPOSITORY_DEV_AMD64
|
- MQ_ARCHIVE_REPOSITORY_DEV=$MQ_933_ARCHIVE_REPOSITORY_DEV_AMD64
|
||||||
script: bash -e travis-build-scripts/run.sh
|
script: bash -e travis-build-scripts/run.sh
|
||||||
|
|
||||||
# CD Build
|
# CD Build
|
||||||
@@ -58,8 +58,8 @@ jobs:
|
|||||||
os: linux
|
os: linux
|
||||||
env:
|
env:
|
||||||
- BUILD_ALL=true
|
- BUILD_ALL=true
|
||||||
- MQ_ARCHIVE_REPOSITORY=$MQ_932_ARCHIVE_REPOSITORY_AMD64
|
- MQ_ARCHIVE_REPOSITORY=$MQ_933_ARCHIVE_REPOSITORY_AMD64
|
||||||
- MQ_ARCHIVE_REPOSITORY_DEV=$MQ_932_ARCHIVE_REPOSITORY_DEV_AMD64
|
- MQ_ARCHIVE_REPOSITORY_DEV=$MQ_933_ARCHIVE_REPOSITORY_DEV_AMD64
|
||||||
script: bash -e travis-build-scripts/run.sh
|
script: bash -e travis-build-scripts/run.sh
|
||||||
- stage: build
|
- stage: build
|
||||||
if: branch = private-master OR tag =~ ^release-candidate*
|
if: branch = private-master OR tag =~ ^release-candidate*
|
||||||
@@ -68,8 +68,8 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
- BUILD_ALL=true
|
- BUILD_ALL=true
|
||||||
- TEST_OPTS_DOCKER="-run TestGoldenPathWithMetrics"
|
- TEST_OPTS_DOCKER="-run TestGoldenPathWithMetrics"
|
||||||
- MQ_ARCHIVE_REPOSITORY=$MQ_932_ARCHIVE_REPOSITORY_S390X
|
- MQ_ARCHIVE_REPOSITORY=$MQ_933_ARCHIVE_REPOSITORY_S390X
|
||||||
- MQ_ARCHIVE_REPOSITORY_DEV=$MQ_932_ARCHIVE_REPOSITORY_DEV_S390X
|
- MQ_ARCHIVE_REPOSITORY_DEV=$MQ_933_ARCHIVE_REPOSITORY_DEV_S390X
|
||||||
script: bash -e travis-build-scripts/run.sh
|
script: bash -e travis-build-scripts/run.sh
|
||||||
- stage: build
|
- stage: build
|
||||||
if: branch = private-master OR tag =~ ^release-candidate*
|
if: branch = private-master OR tag =~ ^release-candidate*
|
||||||
@@ -78,8 +78,8 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
- BUILD_ALL=true
|
- BUILD_ALL=true
|
||||||
- TEST_OPTS_DOCKER="-run TestGoldenPathWithMetrics"
|
- TEST_OPTS_DOCKER="-run TestGoldenPathWithMetrics"
|
||||||
- MQ_ARCHIVE_REPOSITORY=$MQ_932_ARCHIVE_REPOSITORY_PPC64LE
|
- MQ_ARCHIVE_REPOSITORY=$MQ_933_ARCHIVE_REPOSITORY_PPC64LE
|
||||||
- MQ_ARCHIVE_REPOSITORY_DEV=$MQ_932_ARCHIVE_REPOSITORY_DEV_PPC64LE
|
- MQ_ARCHIVE_REPOSITORY_DEV=$MQ_933_ARCHIVE_REPOSITORY_DEV_PPC64LE
|
||||||
script: bash -e travis-build-scripts/run.sh
|
script: bash -e travis-build-scripts/run.sh
|
||||||
- stage: push-manifest
|
- stage: push-manifest
|
||||||
if: branch = private-master AND type != pull_request OR tag =~ ^release-candidate*
|
if: branch = private-master AND type != pull_request OR tag =~ ^release-candidate*
|
||||||
|
|||||||
39
CBO.md
Normal file
39
CBO.md
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
# MQ in Docker
|
||||||
|
|
||||||
|
## Download new version
|
||||||
|
Download IBM MQ Advanced for Developers here: https://public.dhe.ibm.com/ibmdl/export/pub/software/websphere/messaging/mqadv/
|
||||||
|
Place the file in the downloads folder of this repo.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
mkdir -p downloads
|
||||||
|
wget -P downloads https://public.dhe.ibm.com/ibmdl/export/pub/software/websphere/messaging/mqadv/9.3.3.1-IBM-MQ-Advanced-for-Developers-Non-Install-LinuxX64.tar.gz
|
||||||
|
wget -P downloads https://public.dhe.ibm.com/ibmdl/export/pub/software/websphere/messaging/mqadv/9.3.3.1-IBM-MQ-Advanced-for-Developers-Non-Install-LinuxARM64.tar.gz
|
||||||
|
```
|
||||||
|
|
||||||
|
# Build Docker image
|
||||||
|
Update ```MQ_ARCHIVE``` with the new filename in the two Dockerfiles
|
||||||
|
```bash
|
||||||
|
nano Dockerfile-server-arm
|
||||||
|
nano Dockerfile-server-x64
|
||||||
|
```
|
||||||
|
|
||||||
|
Build the new images
|
||||||
|
```bash
|
||||||
|
docker buildx build -t <tag> -f <dockerfile> .
|
||||||
|
|
||||||
|
# Build for various architechtures
|
||||||
|
docker buildx build --platform linux/amd64 -t git.cbo.dk/academy/mq:9.3.3.1-amd64 -f Dockerfile-server-x64 .
|
||||||
|
docker buildx build --platform linux/arm64 -t git.cbo.dk/academy/mq:9.3.3.1-arm64 -f Dockerfile-server-arm .
|
||||||
|
docker image tag git.cbo.dk/academy/mq:9.3.3.1-amd64 git.cbo.dk/academy/mq:latest
|
||||||
|
|
||||||
|
# Push to registry (Optional)
|
||||||
|
docker login git.cbo.dk
|
||||||
|
docker push --all-tags git.cbo.dk/academy/mq
|
||||||
|
```
|
||||||
|
|
||||||
|
# Deploy MQ
|
||||||
|
```bash
|
||||||
|
docker stop ibmmq
|
||||||
|
docker rm ibmmq
|
||||||
|
docker run --name "ibmmq" -d -p 1414:1414 -p 9157:9157 -p 9443:9443 -e LICENSE=accept -e MQ_ADMIN_PASSWORD=passw0rd -e MQ_QMGR_NAME=MQDOCKER -e MQ_ENABLE_METRICS=true --name ibmmq git.cbo.dk/academy/mq:latest
|
||||||
|
```
|
||||||
@@ -1,5 +1,9 @@
|
|||||||
# Change log
|
# Change log
|
||||||
|
|
||||||
|
## 9.3.3.0 (2023-06)
|
||||||
|
|
||||||
|
* Updated to MQ version 9.3.3.0
|
||||||
|
|
||||||
## 9.3.2.0 (2023-02)
|
## 9.3.2.0 (2023-02)
|
||||||
|
|
||||||
* Updated to MQ version 9.3.2.0
|
* Updated to MQ version 9.3.2.0
|
||||||
|
|||||||
210
Dockerfile-server-arm
Normal file
210
Dockerfile-server-arm
Normal file
@@ -0,0 +1,210 @@
|
|||||||
|
# © Copyright IBM Corporation 2015, 2023
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# Download IBM MQ Advanced for Developers here: https://public.dhe.ibm.com/ibmdl/export/pub/software/websphere/messaging/mqadv/
|
||||||
|
# mkdir downloads
|
||||||
|
# Kopier 9.3.3.0-IBM-MQ-Advanced-for-Developers-Non-Install-LinuxARM64.tar.gz ind i downloads.
|
||||||
|
# Opdater .dockerignore
|
||||||
|
# docker build -t mqserver -f Dockerfile-server-arm .
|
||||||
|
# docker run -d -p 1414:1414 -p 9157:9157 -p 9443:9443 -e LICENSE=accept --name mqserver mqserver
|
||||||
|
|
||||||
|
ARG BASE_IMAGE=registry.access.redhat.com/ubi8/ubi-minimal
|
||||||
|
ARG BASE_TAG=8.10-896.1716497715
|
||||||
|
ARG BUILDER_IMAGE=registry.access.redhat.com/ubi8/go-toolset
|
||||||
|
ARG BUILDER_TAG=1.21.9-3.1716505664
|
||||||
|
ARG GO_WORKDIR=/opt/app-root/src/go/src/github.com/ibm-messaging/mq-container
|
||||||
|
ARG MQ_ARCHIVE="downloads/9.3.3.1-IBM-MQ-Advanced-for-Developers-Non-Install-LinuxARM64.tar.gz"
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Build stage to build Go code
|
||||||
|
###############################################################################
|
||||||
|
FROM $BUILDER_IMAGE:$BUILDER_TAG as builder
|
||||||
|
ARG IMAGE_REVISION="Not specified"
|
||||||
|
ARG IMAGE_SOURCE="Not specified"
|
||||||
|
ARG IMAGE_TAG="Not specified"
|
||||||
|
ARG GO_WORKDIR
|
||||||
|
ARG MQ_ARCHIVE
|
||||||
|
USER 0
|
||||||
|
WORKDIR $GO_WORKDIR/
|
||||||
|
ADD $MQ_ARCHIVE /opt/mqm
|
||||||
|
ENV CGO_CFLAGS="-I/opt/mqm/inc/" \
|
||||||
|
CGO_LDFLAGS_ALLOW="-Wl,-rpath.*" \
|
||||||
|
PATH="${PATH}:/opt/mqm/bin"
|
||||||
|
COPY go.mod go.sum ./
|
||||||
|
COPY cmd/ ./cmd
|
||||||
|
COPY internal/ ./internal
|
||||||
|
COPY pkg/ ./pkg
|
||||||
|
COPY vendor/ ./vendor
|
||||||
|
RUN go build -ldflags "-X \"main.ImageCreated=$(date --iso-8601=seconds)\" -X \"main.ImageRevision=$IMAGE_REVISION\" -X \"main.ImageSource=$IMAGE_SOURCE\" -X \"main.ImageTag=$IMAGE_TAG\"" ./cmd/runmqserver/ \
|
||||||
|
&& go build ./cmd/chkmqready/ \
|
||||||
|
&& go build ./cmd/chkmqhealthy/ \
|
||||||
|
&& go build ./cmd/chkmqstarted/ \
|
||||||
|
&& go build ./cmd/runmqdevserver/ \
|
||||||
|
&& go test -v ./cmd/runmqdevserver/... \
|
||||||
|
&& go test -v ./cmd/runmqserver/ \
|
||||||
|
&& go test -v ./cmd/chkmqready/ \
|
||||||
|
&& go test -v ./cmd/chkmqhealthy/ \
|
||||||
|
&& go test -v ./cmd/chkmqstarted/ \
|
||||||
|
&& go test -v ./pkg/... \
|
||||||
|
&& go test -v ./internal/... \
|
||||||
|
&& go vet ./cmd/... ./internal/...
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Build stage to reduce MQ packages included using genmqpkg
|
||||||
|
###############################################################################
|
||||||
|
FROM $BASE_IMAGE:$BASE_TAG AS mq-redux
|
||||||
|
ARG BASE_IMAGE
|
||||||
|
ARG BASE_TAG
|
||||||
|
ARG MQ_ARCHIVE
|
||||||
|
WORKDIR /tmp/mq
|
||||||
|
ENV genmqpkg_inc32=0 \
|
||||||
|
genmqpkg_incadm=1 \
|
||||||
|
genmqpkg_incamqp=0 \
|
||||||
|
genmqpkg_incams=1 \
|
||||||
|
genmqpkg_inccbl=0 \
|
||||||
|
genmqpkg_inccics=0 \
|
||||||
|
genmqpkg_inccpp=0 \
|
||||||
|
genmqpkg_incdnet=0 \
|
||||||
|
genmqpkg_incjava=1 \
|
||||||
|
genmqpkg_incjre=1 \
|
||||||
|
genmqpkg_incman=0 \
|
||||||
|
genmqpkg_incmqbc=0 \
|
||||||
|
genmqpkg_incmqft=0 \
|
||||||
|
genmqpkg_incmqsf=0 \
|
||||||
|
genmqpkg_incmqxr=0 \
|
||||||
|
genmqpkg_incnls=1 \
|
||||||
|
genmqpkg_incras=1 \
|
||||||
|
genmqpkg_incsamp=1 \
|
||||||
|
genmqpkg_incsdk=0 \
|
||||||
|
genmqpkg_inctls=1 \
|
||||||
|
genmqpkg_incunthrd=0 \
|
||||||
|
genmqpkg_incweb=1
|
||||||
|
ADD $MQ_ARCHIVE /opt/mqm-noinstall
|
||||||
|
# Run genmqpkg to reduce the MQ packages included
|
||||||
|
RUN /opt/mqm-noinstall/bin/genmqpkg.sh -b /opt/mqm-redux
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Main build stage, to build MQ image
|
||||||
|
###############################################################################
|
||||||
|
FROM $BASE_IMAGE:$BASE_TAG AS mq-server
|
||||||
|
ARG MQ_URL
|
||||||
|
ARG BASE_IMAGE
|
||||||
|
ARG BASE_TAG
|
||||||
|
ARG GO_WORKDIR
|
||||||
|
LABEL summary="IBM MQ Advanced Server" \
|
||||||
|
description="Simplify, accelerate and facilitate the reliable exchange of data with a security-rich messaging solution — trusted by the world’s most successful enterprises" \
|
||||||
|
vendor="IBM" \
|
||||||
|
maintainer="IBM" \
|
||||||
|
distribution-scope="private" \
|
||||||
|
authoritative-source-url="https://www.ibm.com/software/passportadvantage/" \
|
||||||
|
url="https://www.ibm.com/products/mq/advanced" \
|
||||||
|
io.openshift.tags="mq messaging" \
|
||||||
|
io.k8s.display-name="IBM MQ Advanced Server" \
|
||||||
|
io.k8s.description="Simplify, accelerate and facilitate the reliable exchange of data with a security-rich messaging solution — trusted by the world’s most successful enterprises" \
|
||||||
|
base-image=$BASE_IMAGE \
|
||||||
|
base-image-release=$BASE_TAG
|
||||||
|
COPY --from=mq-redux /opt/mqm-redux/ /opt/mqm/
|
||||||
|
COPY setup-image.sh /usr/local/bin/
|
||||||
|
COPY install-mq-server-prereqs.sh /usr/local/bin/
|
||||||
|
RUN env \
|
||||||
|
&& chmod u+x /usr/local/bin/install-*.sh \
|
||||||
|
&& chmod u+x /usr/local/bin/setup-image.sh \
|
||||||
|
&& install-mq-server-prereqs.sh \
|
||||||
|
&& setup-image.sh \
|
||||||
|
&& /opt/mqm/bin/security/amqpamcf \
|
||||||
|
&& chown -R 1001:root /opt/mqm/*
|
||||||
|
COPY --from=builder $GO_WORKDIR/runmqserver /usr/local/bin/
|
||||||
|
COPY --from=builder $GO_WORKDIR/chkmq* /usr/local/bin/
|
||||||
|
COPY NOTICES.txt /opt/mqm/licenses/notices-container.txt
|
||||||
|
COPY ha/native-ha.ini.tpl /etc/mqm/native-ha.ini.tpl
|
||||||
|
# Copy web XML files
|
||||||
|
COPY web /etc/mqm/web
|
||||||
|
COPY etc/mqm/*.tpl /etc/mqm/
|
||||||
|
RUN chmod ug+x /usr/local/bin/runmqserver \
|
||||||
|
&& chown 1001:root /usr/local/bin/*mq* \
|
||||||
|
&& chmod ug+x /usr/local/bin/chkmq* \
|
||||||
|
&& chown -R 1001:root /etc/mqm/* \
|
||||||
|
&& install --directory --mode 2775 --owner 1001 --group root /run/runmqserver \
|
||||||
|
&& touch /run/termination-log \
|
||||||
|
&& chown 1001:root /run/termination-log \
|
||||||
|
&& chmod 0660 /run/termination-log \
|
||||||
|
&& chmod -R g+w /etc/mqm/web
|
||||||
|
# Always use port 1414 for MQ & 9157 for the metrics
|
||||||
|
EXPOSE 1414 9157 9443
|
||||||
|
ENV MQ_OVERRIDE_DATA_PATH=/mnt/mqm/data MQ_OVERRIDE_INSTALLATION_NAME=Installation1 MQ_USER_NAME="mqm" PATH="${PATH}:/opt/mqm/bin"
|
||||||
|
ENV MQ_GRACE_PERIOD=30
|
||||||
|
ENV LANG=en_US.UTF-8 AMQ_DIAGNOSTIC_MSG_SEVERITY=1 AMQ_ADDITIONAL_JSON_LOG=1
|
||||||
|
ENV MQ_LOGGING_CONSOLE_EXCLUDE_ID=AMQ5041I,AMQ5052I,AMQ5051I,AMQ5037I,AMQ5975I
|
||||||
|
ENV WLP_LOGGING_MESSAGE_FORMAT=json
|
||||||
|
# We can run as any UID
|
||||||
|
USER 1001
|
||||||
|
ENV MQ_CONNAUTH_USE_HTP=false
|
||||||
|
ENTRYPOINT ["runmqserver"]
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Build stage to build C code for custom authorization service (developer-only)
|
||||||
|
###############################################################################
|
||||||
|
# Use the Go toolset image, which already includes gcc and the MQ SDK
|
||||||
|
FROM builder as cbuilder
|
||||||
|
USER 0
|
||||||
|
# Install the Apache Portable Runtime code (used for htpasswd hash checking)
|
||||||
|
RUN yum --assumeyes --disableplugin=subscription-manager install apr-devel apr-util-openssl apr-util-devel
|
||||||
|
COPY authservice/ /opt/app-root/src/authservice/
|
||||||
|
WORKDIR /opt/app-root/src/authservice/mqhtpass
|
||||||
|
RUN make all
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Add default developer config
|
||||||
|
###############################################################################
|
||||||
|
FROM mq-server AS mq-dev-server
|
||||||
|
ARG BASE_IMAGE
|
||||||
|
ARG BASE_TAG
|
||||||
|
ARG GO_WORKDIR
|
||||||
|
LABEL summary="IBM MQ Advanced for Developers Server" \
|
||||||
|
description="Simplify, accelerate and facilitate the reliable exchange of data with a security-rich messaging solution — trusted by the world’s most successful enterprises" \
|
||||||
|
vendor="IBM" \
|
||||||
|
distribution-scope="private" \
|
||||||
|
authoritative-source-url="https://www.ibm.com/software/passportadvantage/" \
|
||||||
|
url="https://www.ibm.com/products/mq/advanced" \
|
||||||
|
io.openshift.tags="mq messaging" \
|
||||||
|
io.k8s.display-name="IBM MQ Advanced for Developers Server" \
|
||||||
|
io.k8s.description="Simplify, accelerate and facilitate the reliable exchange of data with a security-rich messaging solution — trusted by the world’s most successful enterprises" \
|
||||||
|
base-image=$BASE_IMAGE \
|
||||||
|
base-image-release=$BASE_TAG
|
||||||
|
USER 0
|
||||||
|
COPY --from=cbuilder /opt/app-root/src/authservice/mqhtpass/build/mqhtpass.so /opt/mqm/lib64/
|
||||||
|
COPY etc/mqm/*.ini /etc/mqm/
|
||||||
|
COPY etc/mqm/mq.htpasswd /etc/mqm/
|
||||||
|
COPY incubating/mqadvanced-server-dev/install-extra-packages.sh /usr/local/bin/
|
||||||
|
RUN chmod u+x /usr/local/bin/install-extra-packages.sh \
|
||||||
|
&& sleep 1 \
|
||||||
|
&& install-extra-packages.sh
|
||||||
|
COPY --from=builder $GO_WORKDIR/runmqdevserver /usr/local/bin/
|
||||||
|
# Copy template files
|
||||||
|
COPY incubating/mqadvanced-server-dev/*.tpl /etc/mqm/
|
||||||
|
# Copy web XML files for default developer configuration
|
||||||
|
COPY incubating/mqadvanced-server-dev/web /etc/mqm/web
|
||||||
|
RUN chown -R 1001:root /etc/mqm/* \
|
||||||
|
&& chmod -R g+w /etc/mqm/web \
|
||||||
|
&& chmod +x /usr/local/bin/runmq* \
|
||||||
|
&& chmod 0660 /etc/mqm/mq.htpasswd \
|
||||||
|
&& install --directory --mode 2775 --owner 1001 --group root /run/runmqdevserver
|
||||||
|
ENV MQ_DEV=true \
|
||||||
|
MQ_ENABLE_EMBEDDED_WEB_SERVER=1 \
|
||||||
|
MQ_GENERATE_CERTIFICATE_HOSTNAME=localhost \
|
||||||
|
LD_LIBRARY_PATH=/opt/mqm/lib64 \
|
||||||
|
MQ_CONNAUTH_USE_HTP=true \
|
||||||
|
MQS_PERMIT_UNKNOWN_ID=true
|
||||||
|
USER 1001
|
||||||
|
ENTRYPOINT ["runmqdevserver"]
|
||||||
@@ -12,12 +12,19 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
# Download IBM MQ Advanced for Developers here: https://public.dhe.ibm.com/ibmdl/export/pub/software/websphere/messaging/mqadv/
|
||||||
|
# mkdir downloads
|
||||||
|
# Kopier 9.3.3.0-IBM-MQ-Advanced-for-Developers-Non-Install-LinuxX64.tar.gz ind i downloads.
|
||||||
|
# Opdater .dockerignore
|
||||||
|
# docker build -t mqserver -f Dockerfile-server-x64 .
|
||||||
|
# docker run -d -p 1414:1414 -p 9157:9157 -p 9443:9443 -e LICENSE=accept --name mqserver mqserver
|
||||||
|
|
||||||
ARG BASE_IMAGE=registry.access.redhat.com/ubi8/ubi-minimal
|
ARG BASE_IMAGE=registry.access.redhat.com/ubi8/ubi-minimal
|
||||||
ARG BASE_TAG=8.7-1049.1675784874
|
ARG BASE_TAG=8.10-896.1716497715
|
||||||
ARG BUILDER_IMAGE=registry.access.redhat.com/ubi8/go-toolset
|
ARG BUILDER_IMAGE=registry.access.redhat.com/ubi8/go-toolset
|
||||||
ARG BUILDER_TAG=1.18.9-8.1675807488
|
ARG BUILDER_TAG=1.21.9-3.1716505664
|
||||||
ARG GO_WORKDIR=/opt/app-root/src/go/src/github.com/ibm-messaging/mq-container
|
ARG GO_WORKDIR=/opt/app-root/src/go/src/github.com/ibm-messaging/mq-container
|
||||||
ARG MQ_ARCHIVE="downloads/9.3.2.0-IBM-MQ-Advanced-for-Developers-Non-Install-LinuxX64.tar.gz"
|
ARG MQ_ARCHIVE="downloads/9.3.3.0-IBM-MQ-Advanced-for-Developers-Non-Install-LinuxX64.tar.gz"
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
# Build stage to build Go code
|
# Build stage to build Go code
|
||||||
@@ -15,7 +15,7 @@
|
|||||||
ARG BASE_IMAGE
|
ARG BASE_IMAGE
|
||||||
|
|
||||||
# Build stage to build Go code
|
# Build stage to build Go code
|
||||||
FROM golang:1.10 as builder
|
FROM golang:1.22 as builder
|
||||||
WORKDIR /go/src/github.com/ibm-messaging/mq-container/
|
WORKDIR /go/src/github.com/ibm-messaging/mq-container/
|
||||||
COPY cmd/ ./cmd
|
COPY cmd/ ./cmd
|
||||||
COPY internal/ ./internal
|
COPY internal/ ./internal
|
||||||
|
|||||||
42
Makefile
42
Makefile
@@ -45,10 +45,10 @@ MQ_ARCHIVE ?= IBM_MQ_$(MQ_VERSION_VRM)_$(MQ_ARCHIVE_TYPE)_$(MQ_ARCHIVE_ARCH)_NOI
|
|||||||
MQ_ARCHIVE_DEV ?= $(MQ_VERSION)-IBM-MQ-Advanced-for-Developers-Non-Install-$(MQ_ARCHIVE_DEV_TYPE)$(MQ_ARCHIVE_DEV_ARCH).tar.gz
|
MQ_ARCHIVE_DEV ?= $(MQ_VERSION)-IBM-MQ-Advanced-for-Developers-Non-Install-$(MQ_ARCHIVE_DEV_TYPE)$(MQ_ARCHIVE_DEV_ARCH).tar.gz
|
||||||
# MQ_SDK_ARCHIVE specifies the archive to use for building the golang programs. Defaults vary on developer or advanced.
|
# MQ_SDK_ARCHIVE specifies the archive to use for building the golang programs. Defaults vary on developer or advanced.
|
||||||
MQ_SDK_ARCHIVE ?= $(MQ_ARCHIVE_DEV_$(MQ_VERSION))
|
MQ_SDK_ARCHIVE ?= $(MQ_ARCHIVE_DEV_$(MQ_VERSION))
|
||||||
# Options to `go test` for the Docker tests
|
# Options to `go test` for the Container tests
|
||||||
TEST_OPTS_DOCKER ?=
|
TEST_OPTS_CONTAINER ?=
|
||||||
# Timeout for the Docker tests
|
# Timeout for the tests
|
||||||
TEST_TIMEOUT_DOCKER ?= 45m
|
TEST_TIMEOUT_CONTAINER ?= 45m
|
||||||
# MQ_IMAGE_ADVANCEDSERVER is the name of the built MQ Advanced image
|
# MQ_IMAGE_ADVANCEDSERVER is the name of the built MQ Advanced image
|
||||||
MQ_IMAGE_ADVANCEDSERVER ?=ibm-mqadvanced-server
|
MQ_IMAGE_ADVANCEDSERVER ?=ibm-mqadvanced-server
|
||||||
# MQ_IMAGE_DEVSERVER is the name of the built MQ Advanced for Developers image
|
# MQ_IMAGE_DEVSERVER is the name of the built MQ Advanced for Developers image
|
||||||
@@ -278,9 +278,9 @@ cache-mq-tag:
|
|||||||
# Test targets
|
# Test targets
|
||||||
###############################################################################
|
###############################################################################
|
||||||
|
|
||||||
# Vendor Go dependencies for the Docker tests
|
# Vendor Go dependencies for the Container tests
|
||||||
test/docker/vendor:
|
test/container/vendor:
|
||||||
cd test/docker && go mod vendor
|
cd test/container && go mod vendor
|
||||||
|
|
||||||
# Shortcut to just run the unit tests
|
# Shortcut to just run the unit tests
|
||||||
.PHONY: test-unit
|
.PHONY: test-unit
|
||||||
@@ -288,28 +288,28 @@ test-unit:
|
|||||||
$(COMMAND) build --target builder --file Dockerfile-server .
|
$(COMMAND) build --target builder --file Dockerfile-server .
|
||||||
|
|
||||||
.PHONY: test-advancedserver
|
.PHONY: test-advancedserver
|
||||||
test-advancedserver: test/docker/vendor
|
test-advancedserver: test/container/vendor
|
||||||
$(info $(SPACER)$(shell printf $(TITLE)"Test $(MQ_IMAGE_ADVANCEDSERVER):$(MQ_TAG) on $(shell $(COMMAND) --version)"$(END)))
|
$(info $(SPACER)$(shell printf $(TITLE)"Test $(MQ_IMAGE_ADVANCEDSERVER):$(MQ_TAG) on $(shell $(COMMAND) --version)"$(END)))
|
||||||
$(COMMAND) inspect $(MQ_IMAGE_ADVANCEDSERVER):$(MQ_TAG)
|
$(COMMAND) inspect $(MQ_IMAGE_ADVANCEDSERVER):$(MQ_TAG)
|
||||||
cd test/docker && TEST_IMAGE=$(MQ_IMAGE_ADVANCEDSERVER):$(MQ_TAG) EXPECTED_LICENSE=Production DOCKER_API_VERSION=$(DOCKER_API_VERSION) go test -parallel $(NUM_CPU) -timeout $(TEST_TIMEOUT_DOCKER) $(TEST_OPTS_DOCKER)
|
cd test/container && TEST_IMAGE=$(MQ_IMAGE_ADVANCEDSERVER):$(MQ_TAG) EXPECTED_LICENSE=Production DOCKER_API_VERSION=$(DOCKER_API_VERSION) COMMAND=$(COMMAND) go test -parallel $(NUM_CPU) -timeout $(TEST_TIMEOUT_CONTAINER) $(TEST_OPTS_CONTAINER)
|
||||||
|
|
||||||
.PHONY: build-devjmstest
|
.PHONY: build-devjmstest
|
||||||
build-devjmstest:
|
build-devjmstest:
|
||||||
$(info $(SPACER)$(shell printf $(TITLE)"Build JMS tests for developer config"$(END)))
|
$(info $(SPACER)$(shell printf $(TITLE)"Build JMS tests for developer config"$(END)))
|
||||||
cd test/messaging && docker build --tag $(DEV_JMS_IMAGE) .
|
cd test/messaging && $(COMMAND) build --tag $(DEV_JMS_IMAGE) .
|
||||||
|
|
||||||
.PHONY: test-devserver
|
.PHONY: test-devserver
|
||||||
test-devserver: test/docker/vendor
|
test-devserver: test/container/vendor
|
||||||
$(info $(SPACER)$(shell printf $(TITLE)"Test $(MQ_IMAGE_DEVSERVER):$(MQ_TAG) on $(shell $(COMMAND) --version)"$(END)))
|
$(info $(SPACER)$(shell printf $(TITLE)"Test $(MQ_IMAGE_DEVSERVER):$(MQ_TAG) on $(shell $(COMMAND) --version)"$(END)))
|
||||||
$(COMMAND) inspect $(MQ_IMAGE_DEVSERVER):$(MQ_TAG)
|
$(COMMAND) inspect $(MQ_IMAGE_DEVSERVER):$(MQ_TAG)
|
||||||
cd test/docker && TEST_IMAGE=$(MQ_IMAGE_DEVSERVER):$(MQ_TAG) EXPECTED_LICENSE=Developer DEV_JMS_IMAGE=$(DEV_JMS_IMAGE) IBMJRE=false DOCKER_API_VERSION=$(DOCKER_API_VERSION) go test -parallel $(NUM_CPU) -timeout $(TEST_TIMEOUT_DOCKER) -tags mqdev $(TEST_OPTS_DOCKER)
|
cd test/container && TEST_IMAGE=$(MQ_IMAGE_DEVSERVER):$(MQ_TAG) EXPECTED_LICENSE=Developer DEV_JMS_IMAGE=$(DEV_JMS_IMAGE) IBMJRE=false DOCKER_API_VERSION=$(DOCKER_API_VERSION) COMMAND=$(COMMAND) go test -parallel $(NUM_CPU) -timeout $(TEST_TIMEOUT_CONTAINER) -tags mqdev $(TEST_OPTS_CONTAINER)
|
||||||
|
|
||||||
.PHONY: coverage
|
.PHONY: coverage
|
||||||
coverage:
|
coverage:
|
||||||
mkdir coverage
|
mkdir coverage
|
||||||
|
|
||||||
.PHONY: test-advancedserver-cover
|
.PHONY: test-advancedserver-cover
|
||||||
test-advancedserver-cover: test/docker/vendor coverage
|
test-advancedserver-cover: test/container/vendor coverage
|
||||||
$(info $(SPACER)$(shell printf $(TITLE)"Test $(MQ_IMAGE_ADVANCEDSERVER):$(MQ_TAG) with code coverage on $(shell $(COMMAND) --version)"$(END)))
|
$(info $(SPACER)$(shell printf $(TITLE)"Test $(MQ_IMAGE_ADVANCEDSERVER):$(MQ_TAG) with code coverage on $(shell $(COMMAND) --version)"$(END)))
|
||||||
rm -f ./coverage/unit*.cov
|
rm -f ./coverage/unit*.cov
|
||||||
# Run unit tests with coverage, for each package under 'internal'
|
# Run unit tests with coverage, for each package under 'internal'
|
||||||
@@ -319,16 +319,16 @@ test-advancedserver-cover: test/docker/vendor coverage
|
|||||||
tail -q -n +2 ./coverage/unit-*.cov >> ./coverage/unit.cov
|
tail -q -n +2 ./coverage/unit-*.cov >> ./coverage/unit.cov
|
||||||
go tool cover -html=./coverage/unit.cov -o ./coverage/unit.html
|
go tool cover -html=./coverage/unit.cov -o ./coverage/unit.html
|
||||||
|
|
||||||
rm -f ./test/docker/coverage/*.cov
|
rm -f ./test/container/coverage/*.cov
|
||||||
rm -f ./coverage/docker.*
|
rm -f ./coverage/container.*
|
||||||
mkdir -p ./test/docker/coverage/
|
mkdir -p ./test/container/coverage/
|
||||||
cd test/docker && TEST_IMAGE=$(MQ_IMAGE_ADVANCEDSERVER):$(MQ_TAG)-cover TEST_COVER=true DOCKER_API_VERSION=$(DOCKER_API_VERSION) go test $(TEST_OPTS_DOCKER)
|
cd test/container && TEST_IMAGE=$(MQ_IMAGE_ADVANCEDSERVER):$(MQ_TAG)-cover TEST_COVER=true DOCKER_API_VERSION=$(DOCKER_API_VERSION) go test $(TEST_OPTS_CONTAINER)
|
||||||
echo 'mode: count' > ./coverage/docker.cov
|
echo 'mode: count' > ./coverage/container.cov
|
||||||
tail -q -n +2 ./test/docker/coverage/*.cov >> ./coverage/docker.cov
|
tail -q -n +2 ./test/container/coverage/*.cov >> ./coverage/container.cov
|
||||||
go tool cover -html=./coverage/docker.cov -o ./coverage/docker.html
|
go tool cover -html=./coverage/container.cov -o ./coverage/container.html
|
||||||
|
|
||||||
echo 'mode: count' > ./coverage/combined.cov
|
echo 'mode: count' > ./coverage/combined.cov
|
||||||
tail -q -n +2 ./coverage/unit.cov ./coverage/docker.cov >> ./coverage/combined.cov
|
tail -q -n +2 ./coverage/unit.cov ./coverage/container.cov >> ./coverage/combined.cov
|
||||||
go tool cover -html=./coverage/combined.cov -o ./coverage/combined.html
|
go tool cover -html=./coverage/combined.cov -o ./coverage/combined.html
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
|
|||||||
@@ -48,8 +48,8 @@ For issues relating specifically to the container image or Helm chart, please us
|
|||||||
The Dockerfiles and associated code and scripts are licensed under the [Apache License 2.0](http://www.apache.org/licenses/LICENSE-2.0.html).
|
The Dockerfiles and associated code and scripts are licensed under the [Apache License 2.0](http://www.apache.org/licenses/LICENSE-2.0.html).
|
||||||
Licenses for the products installed within the images are as follows:
|
Licenses for the products installed within the images are as follows:
|
||||||
|
|
||||||
- [IBM MQ Advanced for Developers](http://www14.software.ibm.com/cgi-bin/weblap/lap.pl?la_formnum=Z125-3301-14&li_formnum=L-APIG-CAUEQC) (International License Agreement for Non-Warranted Programs). This license may be viewed from an image using the `LICENSE=view` environment variable as described above or by following the link above.
|
- [IBM MQ Advanced for Developers](http://www14.software.ibm.com/cgi-bin/weblap/lap.pl?la_formnum=Z125-3301-14&li_formnum=L-AXAF-JLZ53A) (International License Agreement for Non-Warranted Programs). This license may be viewed from an image using the `LICENSE=view` environment variable as described above or by following the link above.
|
||||||
- [IBM MQ Advanced](http://www14.software.ibm.com/cgi-bin/weblap/lap.pl?la_formnum=Z125-3301-14&li_formnum=L-UPFX-8MW49T) (International Program License Agreement). This license may be viewed from an image using the `LICENSE=view` environment variable as described above or by following the link above.
|
- [IBM MQ Advanced](http://www14.software.ibm.com/cgi-bin/weblap/lap.pl?la_formnum=Z125-3301-14&li_formnum=L-AMRD-XH6P3Q) (International Program License Agreement). This license may be viewed from an image using the `LICENSE=view` environment variable as described above or by following the link above.
|
||||||
|
|
||||||
Note: The IBM MQ Advanced for Developers license does not permit further distribution and the terms restrict usage to a developer machine.
|
Note: The IBM MQ Advanced for Developers license does not permit further distribution and the terms restrict usage to a developer machine.
|
||||||
|
|
||||||
|
|||||||
@@ -211,7 +211,7 @@ func mirrorHTPasswdLogs(ctx context.Context, wg *sync.WaitGroup, name string, fr
|
|||||||
|
|
||||||
// mirrorWebServerLogs starts a goroutine to mirror the contents of the Liberty web server messages.log
|
// mirrorWebServerLogs starts a goroutine to mirror the contents of the Liberty web server messages.log
|
||||||
func mirrorWebServerLogs(ctx context.Context, wg *sync.WaitGroup, name string, fromStart bool, mf mirrorFunc) (chan error, error) {
|
func mirrorWebServerLogs(ctx context.Context, wg *sync.WaitGroup, name string, fromStart bool, mf mirrorFunc) (chan error, error) {
|
||||||
return mirrorLog(ctx, wg, "/var/mqm/web/installations/Installation1/servers/mqweb/logs/messages.log", false, mf, true)
|
return mirrorLog(ctx, wg, "/var/mqm/web/installations/Installation1/servers/mqweb/logs/messages.log", fromStart, mf, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getDebug() bool {
|
func getDebug() bool {
|
||||||
|
|||||||
@@ -165,6 +165,27 @@ func doMain() error {
|
|||||||
log.Println("One or more invalid value is provided for MQ_LOGGING_CONSOLE_SOURCE. Allowed values are 'qmgr' & 'web' in csv format")
|
log.Println("One or more invalid value is provided for MQ_LOGGING_CONSOLE_SOURCE. Allowed values are 'qmgr' & 'web' in csv format")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
defer func() {
|
||||||
|
log.Debug("Waiting for log mirroring to complete")
|
||||||
|
wg.Wait()
|
||||||
|
}()
|
||||||
|
ctx, cancelMirror := context.WithCancel(context.Background())
|
||||||
|
defer func() {
|
||||||
|
log.Debug("Cancel log mirroring")
|
||||||
|
cancelMirror()
|
||||||
|
}()
|
||||||
|
|
||||||
|
//For mirroring web server logs if source variable is set
|
||||||
|
if checkLogSourceForMirroring("web") {
|
||||||
|
// Always log from the end of the web server messages.log, because the log rotation should happen as soon as the web server starts
|
||||||
|
_, err = mirrorWebServerLogs(ctx, &wg, name, false, mf)
|
||||||
|
if err != nil {
|
||||||
|
logTermination(err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
err = postInit(name, keyLabel, defaultP12Truststore)
|
err = postInit(name, keyLabel, defaultP12Truststore)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logTermination(err)
|
logTermination(err)
|
||||||
@@ -205,17 +226,6 @@ func doMain() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
defer func() {
|
|
||||||
log.Debug("Waiting for log mirroring to complete")
|
|
||||||
wg.Wait()
|
|
||||||
}()
|
|
||||||
ctx, cancelMirror := context.WithCancel(context.Background())
|
|
||||||
defer func() {
|
|
||||||
log.Debug("Cancel log mirroring")
|
|
||||||
cancelMirror()
|
|
||||||
}()
|
|
||||||
|
|
||||||
//For mirroring mq system logs and qm logs, if environment variable is set
|
//For mirroring mq system logs and qm logs, if environment variable is set
|
||||||
if checkLogSourceForMirroring("qmgr") {
|
if checkLogSourceForMirroring("qmgr") {
|
||||||
//Mirror MQ system logs
|
//Mirror MQ system logs
|
||||||
@@ -241,17 +251,6 @@ func doMain() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//For mirroring web server logs if source variable is set
|
|
||||||
if checkLogSourceForMirroring("web") {
|
|
||||||
// Always log from the start of the web server messages.log, as
|
|
||||||
// Liberty resets it.
|
|
||||||
_, err = mirrorWebServerLogs(ctx, &wg, name, true, mf)
|
|
||||||
if err != nil {
|
|
||||||
logTermination(err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err = updateCommandLevel()
|
err = updateCommandLevel()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logTermination(err)
|
logTermination(err)
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
###########################################################################################################################################################
|
###########################################################################################################################################################
|
||||||
|
|
||||||
# MQ_VERSION is the fully qualified MQ version number to build
|
# MQ_VERSION is the fully qualified MQ version number to build
|
||||||
MQ_VERSION ?= 9.3.2.0
|
MQ_VERSION ?= 9.3.3.0
|
||||||
|
|
||||||
###########################################################################################################################################################
|
###########################################################################################################################################################
|
||||||
|
|||||||
@@ -12,6 +12,6 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
FROM fedora:32
|
FROM fedora:41
|
||||||
RUN yum install skopeo -y -qq
|
RUN yum install skopeo -y -qq
|
||||||
ENTRYPOINT [ "skopeo" ]
|
ENTRYPOINT [ "skopeo" ]
|
||||||
|
|||||||
@@ -38,7 +38,7 @@ However, if you wish to build the previous MQ LTS, use the [instructions](https:
|
|||||||
|
|
||||||
## Building a developer image
|
## Building a developer image
|
||||||
|
|
||||||
Run `make build-devserver`, which will download the latest version of MQ Advanced for Developers from IBM developerWorks. This is currently only available on the `amd64` architecture.
|
Run `make build-devserver`, which will download the latest version of MQ Advanced for Developers. This is available on the `amd64` and `arm64` (Apple Silicon) architectures.
|
||||||
|
|
||||||
You can use the environment variable `MQ_ARCHIVE_DEV` to specify an alternative local file to install from (which must be in the `downloads` directory).
|
You can use the environment variable `MQ_ARCHIVE_DEV` to specify an alternative local file to install from (which must be in the `downloads` directory).
|
||||||
|
|
||||||
|
|||||||
@@ -16,5 +16,5 @@ docker run \
|
|||||||
--env LICENSE=accept \
|
--env LICENSE=accept \
|
||||||
--env MQ_QMGR_NAME=QM1 \
|
--env MQ_QMGR_NAME=QM1 \
|
||||||
--detach \
|
--detach \
|
||||||
ibm-mqadvanced-server:9.3.2.0-amd64
|
ibm-mqadvanced-server:9.3.3.0-amd64
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ make test-advancedserver
|
|||||||
You can specify the image to use directly by using the `MQ_IMAGE_ADVANCEDSERVER` or `MQ_IMAGE_DEVSERVER` variables, for example:
|
You can specify the image to use directly by using the `MQ_IMAGE_ADVANCEDSERVER` or `MQ_IMAGE_DEVSERVER` variables, for example:
|
||||||
|
|
||||||
```
|
```
|
||||||
MQ_IMAGE_ADVANCEDSERVER=ibm-mqadvanced-server:9.3.2.0-amd64 make test-advancedserver
|
MQ_IMAGE_ADVANCEDSERVER=ibm-mqadvanced-server:9.3.3.0-amd64 make test-advancedserver
|
||||||
```
|
```
|
||||||
|
|
||||||
You can pass parameters to `go test` with an environment variable. For example, to run the "TestGoldenPath" test, run the following command:
|
You can pass parameters to `go test` with an environment variable. For example, to run the "TestGoldenPath" test, run the following command:
|
||||||
|
|||||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
22
go.mod
22
go.mod
@@ -1,24 +1,24 @@
|
|||||||
module github.com/ibm-messaging/mq-container
|
module github.com/ibm-messaging/mq-container
|
||||||
|
|
||||||
go 1.18
|
go 1.19
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/genuinetools/amicontained v0.4.3
|
github.com/genuinetools/amicontained v0.4.3
|
||||||
github.com/ibm-messaging/mq-golang v2.0.0+incompatible
|
github.com/ibm-messaging/mq-golang v2.0.0+incompatible
|
||||||
github.com/prometheus/client_golang v1.11.1
|
github.com/prometheus/client_golang v1.19.1
|
||||||
github.com/prometheus/client_model v0.2.0
|
github.com/prometheus/client_model v0.6.1
|
||||||
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d
|
golang.org/x/crypto v0.24.0
|
||||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1
|
golang.org/x/sys v0.21.0
|
||||||
software.sslmate.com/src/go-pkcs12 v0.0.0-20200830195227-52f69702a001
|
software.sslmate.com/src/go-pkcs12 v0.4.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.1.1 // indirect
|
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||||
github.com/golang/protobuf v1.4.3 // indirect
|
github.com/golang/protobuf v1.5.3 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||||
github.com/prometheus/common v0.26.0 // indirect
|
github.com/prometheus/common v0.48.0 // indirect
|
||||||
github.com/prometheus/procfs v0.6.0 // indirect
|
github.com/prometheus/procfs v0.12.0 // indirect
|
||||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect
|
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect
|
||||||
google.golang.org/protobuf v1.26.0-rc.1 // indirect
|
google.golang.org/protobuf v1.33.0 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
22
go.sum
22
go.sum
@@ -10,6 +10,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
|||||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
|
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||||
|
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/genuinetools/amicontained v0.4.3 h1:cqq9XiAHfWWY3dk8VU8bSJFu9yh8Il5coEdeTAPq72o=
|
github.com/genuinetools/amicontained v0.4.3 h1:cqq9XiAHfWWY3dk8VU8bSJFu9yh8Il5coEdeTAPq72o=
|
||||||
@@ -33,6 +35,9 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq
|
|||||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
|
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
|
||||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
|
github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4=
|
||||||
|
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||||
|
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
@@ -71,19 +76,27 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn
|
|||||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||||
github.com/prometheus/client_golang v1.11.1 h1:+4eQaD7vAZ6DsfsxB15hbE0odUjGI5ARs9yskGu1v4s=
|
github.com/prometheus/client_golang v1.11.1 h1:+4eQaD7vAZ6DsfsxB15hbE0odUjGI5ARs9yskGu1v4s=
|
||||||
github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||||
|
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
|
||||||
|
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
|
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||||
|
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||||
github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ=
|
github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ=
|
||||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||||
|
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
|
||||||
|
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
|
||||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||||
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
|
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
|
||||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
|
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||||
|
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||||
@@ -99,6 +112,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
|
|||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY=
|
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY=
|
||||||
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||||
|
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
|
||||||
|
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
@@ -123,6 +138,8 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4=
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4=
|
||||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
|
||||||
|
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
@@ -137,6 +154,9 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
|
|||||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
google.golang.org/protobuf v1.26.0-rc.1 h1:7QnIQpGRHE5RnLKnESfDoxm2dTapTZua5a0kS0A+VXQ=
|
google.golang.org/protobuf v1.26.0-rc.1 h1:7QnIQpGRHE5RnLKnESfDoxm2dTapTZua5a0kS0A+VXQ=
|
||||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
|
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||||
|
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
@@ -147,3 +167,5 @@ gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|||||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
software.sslmate.com/src/go-pkcs12 v0.0.0-20200830195227-52f69702a001 h1:AVd6O+azYjVQYW1l55IqkbL8/JxjrLtO6q4FCmV8N5c=
|
software.sslmate.com/src/go-pkcs12 v0.0.0-20200830195227-52f69702a001 h1:AVd6O+azYjVQYW1l55IqkbL8/JxjrLtO6q4FCmV8N5c=
|
||||||
software.sslmate.com/src/go-pkcs12 v0.0.0-20200830195227-52f69702a001/go.mod h1:/xvNRWUqm0+/ZMiF4EX00vrSCMsE4/NHb+Pt3freEeQ=
|
software.sslmate.com/src/go-pkcs12 v0.0.0-20200830195227-52f69702a001/go.mod h1:/xvNRWUqm0+/ZMiF4EX00vrSCMsE4/NHb+Pt3freEeQ=
|
||||||
|
software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k=
|
||||||
|
software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI=
|
||||||
|
|||||||
@@ -12,7 +12,7 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
FROM ubuntu:16.04
|
FROM ubuntu:24.04
|
||||||
|
|
||||||
# The URL to download the MQ installer from in tar.gz format
|
# The URL to download the MQ installer from in tar.gz format
|
||||||
ARG MQ_URL=https://public.dhe.ibm.com/ibmdl/export/pub/software/websphere/messaging/mqadv/mqadv_dev911_ubuntu_x86-64.tar.gz
|
ARG MQ_URL=https://public.dhe.ibm.com/ibmdl/export/pub/software/websphere/messaging/mqadv/mqadv_dev911_ubuntu_x86-64.tar.gz
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# -*- mode: sh -*-
|
# -*- mode: sh -*-
|
||||||
# © Copyright IBM Corporation 2015, 2022
|
# © Copyright IBM Corporation 2015, 2023
|
||||||
#
|
#
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
@@ -42,21 +42,12 @@ if ($UBUNTU); then
|
|||||||
echo "deb ${APT_URL} ${UBUNTU_CODENAME}-security main restricted" >> /etc/apt/sources.list
|
echo "deb ${APT_URL} ${UBUNTU_CODENAME}-security main restricted" >> /etc/apt/sources.list
|
||||||
# Install additional packages required by MQ, this install process and the runtime scripts
|
# Install additional packages required by MQ, this install process and the runtime scripts
|
||||||
EXTRA_DEBS="bash bc ca-certificates coreutils curl debianutils file findutils gawk grep libc-bin mount passwd procps sed tar util-linux"
|
EXTRA_DEBS="bash bc ca-certificates coreutils curl debianutils file findutils gawk grep libc-bin mount passwd procps sed tar util-linux"
|
||||||
# On ARM CPUs, there is no IBM JRE, so install another one
|
|
||||||
if [ "${CPU_ARCH}" == "aarch64" ]; then
|
|
||||||
EXTRA_DEBS="${EXTRA_DEBS} openjdk-8-jre"
|
|
||||||
fi
|
|
||||||
apt-get update
|
apt-get update
|
||||||
apt-get install -y --no-install-recommends ${EXTRA_DEBS}
|
apt-get install -y --no-install-recommends ${EXTRA_DEBS}
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if ($RPM); then
|
if ($RPM); then
|
||||||
EXTRA_RPMS="bash bc ca-certificates file findutils gawk glibc-common grep ncurses-compat-libs passwd procps-ng sed shadow-utils tar util-linux which"
|
EXTRA_RPMS="bash bc ca-certificates file findutils gawk glibc-common grep ncurses-compat-libs passwd procps-ng sed shadow-utils tar util-linux which"
|
||||||
# On ARM CPUs, there is no IBM JRE, so install another one
|
|
||||||
if [ "${CPU_ARCH}" == "aarch64" ]; then
|
|
||||||
EXTRA_RPMS="${EXTRA_RPMS} java-1.8.0-openjdk-headless"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Install additional packages required by MQ, this install process and the runtime scripts
|
# Install additional packages required by MQ, this install process and the runtime scripts
|
||||||
$YUM && yum -y install --setopt install_weak_deps=false ${EXTRA_RPMS}
|
$YUM && yum -y install --setopt install_weak_deps=false ${EXTRA_RPMS}
|
||||||
$MICRODNF && microdnf --disableplugin=subscription-manager install ${EXTRA_RPMS}
|
$MICRODNF && microdnf --disableplugin=subscription-manager install ${EXTRA_RPMS}
|
||||||
|
|||||||
3
renovate.json
Normal file
3
renovate.json
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
{
|
||||||
|
"$schema": "https://docs.renovatebot.com/renovate-schema.json"
|
||||||
|
}
|
||||||
669
test/container/containerengine/containerengine.go
Normal file
669
test/container/containerengine/containerengine.go
Normal file
@@ -0,0 +1,669 @@
|
|||||||
|
/*
|
||||||
|
© Copyright IBM Corporation 2017, 2023
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
package containerengine
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ContainerInterface interface {
|
||||||
|
ContainerCreate(config *ContainerConfig, hostConfig *ContainerHostConfig, networkingConfig *ContainerNetworkSettings, containerName string) (string, error)
|
||||||
|
ContainerStop(container string, timeout *time.Duration) error
|
||||||
|
ContainerKill(container string, signal string) error
|
||||||
|
ContainerRemove(container string, options ContainerRemoveOptions) error
|
||||||
|
ContainerStart(container string, options ContainerStartOptions) error
|
||||||
|
ContainerWait(ctx context.Context, container string, condition string) (<-chan int64, <-chan error)
|
||||||
|
GetContainerLogs(ctx context.Context, container string, options ContainerLogsOptions) (string, error)
|
||||||
|
CopyFromContainer(container, srcPath string) ([]byte, error)
|
||||||
|
|
||||||
|
GetContainerPort(ID string, hostPort int) (string, error)
|
||||||
|
GetContainerIPAddress(ID string) (string, error)
|
||||||
|
ContainerInspectWithFormat(format string, ID string) (string, error)
|
||||||
|
ExecContainer(ID string, user string, cmd []string) (int, string)
|
||||||
|
GetMQVersion(image string) (string, error)
|
||||||
|
ContainerInspect(containerID string) (ContainerDetails, error)
|
||||||
|
|
||||||
|
NetworkCreate(name string, options NetworkCreateOptions) (string, error)
|
||||||
|
NetworkRemove(network string) error
|
||||||
|
|
||||||
|
VolumeCreate(options VolumeCreateOptions) (string, error)
|
||||||
|
VolumeRemove(volumeID string, force bool) error
|
||||||
|
|
||||||
|
ImageBuild(context io.Reader, tag string, dockerfilename string) (string, error)
|
||||||
|
ImageRemove(image string, options ImageRemoveOptions) (bool, error)
|
||||||
|
ImageInspectWithFormat(format string, ID string) (string, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type ContainerClient struct {
|
||||||
|
ContainerTool string
|
||||||
|
Version string
|
||||||
|
}
|
||||||
|
|
||||||
|
// objects
|
||||||
|
var objVolume = "volume"
|
||||||
|
var objImage = "image"
|
||||||
|
var objPort = "port"
|
||||||
|
var objNetwork = "network"
|
||||||
|
|
||||||
|
// verbs
|
||||||
|
var listContainers = "ps"
|
||||||
|
var listImages = "images"
|
||||||
|
var create = "create"
|
||||||
|
var startContainer = "start"
|
||||||
|
var waitContainer = "wait"
|
||||||
|
var execContainer = "exec"
|
||||||
|
var getLogs = "logs"
|
||||||
|
var stopContainer = "stop"
|
||||||
|
var remove = "rm"
|
||||||
|
var inspect = "inspect"
|
||||||
|
var copyFile = "cp"
|
||||||
|
var build = "build"
|
||||||
|
var killContainer = "kill"
|
||||||
|
|
||||||
|
// args
|
||||||
|
var argEntrypoint = "--entrypoint"
|
||||||
|
var argUser = "--user"
|
||||||
|
var argExpose = "--expose"
|
||||||
|
var argVolume = "--volume"
|
||||||
|
var argPublish = "--publish"
|
||||||
|
var argPrivileged = "--privileged"
|
||||||
|
var argAddCapability = "--cap-add"
|
||||||
|
var argDropCapability = "--cap-drop"
|
||||||
|
var argName = "--name"
|
||||||
|
var argCondition = "--condition"
|
||||||
|
var argEnvironmentVariable = "--env"
|
||||||
|
var argTail = "--tail"
|
||||||
|
var argForce = "--force"
|
||||||
|
var argVolumes = "--volumes"
|
||||||
|
var argHostname = "--hostname"
|
||||||
|
var argDriver = "--driver"
|
||||||
|
var argFile = "--file"
|
||||||
|
var argQuiet = "--quiet"
|
||||||
|
var argTag = "--tag"
|
||||||
|
var argFormat = "--format"
|
||||||
|
var argNetwork = "--network"
|
||||||
|
var argSecurityOptions = "--security-opt"
|
||||||
|
var argSignal = "--signal"
|
||||||
|
|
||||||
|
// generic
|
||||||
|
var toolVersion = "version"
|
||||||
|
var ContainerStateNotRunning = "not-running"
|
||||||
|
var ContainerStateStopped = "stopped"
|
||||||
|
|
||||||
|
type ContainerConfig struct {
|
||||||
|
Image string
|
||||||
|
Hostname string
|
||||||
|
User string
|
||||||
|
Entrypoint []string
|
||||||
|
Env []string
|
||||||
|
ExposedPorts []string
|
||||||
|
}
|
||||||
|
|
||||||
|
type ContainerDetails struct {
|
||||||
|
ID string
|
||||||
|
Name string
|
||||||
|
Image string
|
||||||
|
Path string
|
||||||
|
Args []string
|
||||||
|
Config ContainerConfig
|
||||||
|
HostConfig ContainerHostConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
type ContainerDetailsLogging struct {
|
||||||
|
ID string
|
||||||
|
Name string
|
||||||
|
Image string
|
||||||
|
Path string
|
||||||
|
Args []string
|
||||||
|
CapAdd []string
|
||||||
|
CapDrop []string
|
||||||
|
User string
|
||||||
|
Env []string
|
||||||
|
}
|
||||||
|
|
||||||
|
type ContainerHostConfig struct {
|
||||||
|
Binds []string // Bindings onto a volume
|
||||||
|
PortBindings []PortBinding //Bindings from a container port to a port on the host
|
||||||
|
Privileged bool // Give extended privileges to container
|
||||||
|
CapAdd []string // Linux capabilities to add to the container
|
||||||
|
CapDrop []string // Linux capabilities to drop from the container
|
||||||
|
SecurityOpt []string
|
||||||
|
}
|
||||||
|
|
||||||
|
type ContainerNetworkSettings struct {
|
||||||
|
Networks []string // A list of networks to connect the container to
|
||||||
|
}
|
||||||
|
|
||||||
|
type ContainerRemoveOptions struct {
|
||||||
|
Force bool
|
||||||
|
RemoveVolumes bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type ContainerStartOptions struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
type NetworkCreateOptions struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
type ContainerLogsOptions struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
type ImageRemoveOptions struct {
|
||||||
|
Force bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type VolumeCreateOptions struct {
|
||||||
|
Name string
|
||||||
|
Driver string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Binding from a container port to a port on the host
|
||||||
|
type PortBinding struct {
|
||||||
|
HostIP string
|
||||||
|
HostPort string //Port to map to on the host
|
||||||
|
ContainerPort string //Exposed port on the container
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewContainerClient returns a new container client
|
||||||
|
// Defaults to using podman
|
||||||
|
func NewContainerClient() ContainerClient {
|
||||||
|
tool, set := os.LookupEnv("COMMAND")
|
||||||
|
if !set {
|
||||||
|
tool = "podman"
|
||||||
|
}
|
||||||
|
return ContainerClient{
|
||||||
|
ContainerTool: tool,
|
||||||
|
Version: GetContainerToolVersion(tool),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetContainerToolVersion returns the version of the container tool being used
|
||||||
|
func GetContainerToolVersion(containerTool string) string {
|
||||||
|
if containerTool == "docker" {
|
||||||
|
args := []string{"version", "--format", "'{{.Client.Version}}'"}
|
||||||
|
v, err := exec.Command("docker", args...).Output()
|
||||||
|
if err != nil {
|
||||||
|
return "0.0.0"
|
||||||
|
}
|
||||||
|
return string(v)
|
||||||
|
} else if containerTool == "podman" {
|
||||||
|
//Default to checking the version of podman
|
||||||
|
args := []string{"version", "--format", "'{{.Version}}'"}
|
||||||
|
v, err := exec.Command("podman", args...).Output()
|
||||||
|
if err != nil {
|
||||||
|
return "0.0.0"
|
||||||
|
}
|
||||||
|
return string(v)
|
||||||
|
}
|
||||||
|
return "0.0.0"
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMQVersion returns the MQ version of a given container image
|
||||||
|
func (cli ContainerClient) GetMQVersion(image string) (string, error) {
|
||||||
|
v, err := cli.ImageInspectWithFormat("{{.Config.Labels.version}}", image)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ImageInspectWithFormat inspects an image with a given formatting string
|
||||||
|
func (cli ContainerClient) ImageInspectWithFormat(format string, ID string) (string, error) {
|
||||||
|
args := []string{
|
||||||
|
objImage,
|
||||||
|
inspect,
|
||||||
|
ID,
|
||||||
|
}
|
||||||
|
if format != "" {
|
||||||
|
args = append(args, []string{argFormat, format}...)
|
||||||
|
}
|
||||||
|
output, err := exec.Command(cli.ContainerTool, args...).Output()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return string(output), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContainerInspectWithFormat inspects a container with a given formatting string
|
||||||
|
func (cli ContainerClient) ContainerInspectWithFormat(format string, ID string) (string, error) {
|
||||||
|
args := []string{
|
||||||
|
inspect,
|
||||||
|
ID,
|
||||||
|
}
|
||||||
|
if format != "" {
|
||||||
|
args = append(args, []string{argFormat, format}...)
|
||||||
|
}
|
||||||
|
output, err := exec.Command(cli.ContainerTool, args...).Output()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return string(output), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetContainerPort gets the ports on a container
|
||||||
|
func (cli ContainerClient) GetContainerPort(ID string, hostPort int) (string, error) {
|
||||||
|
args := []string{
|
||||||
|
objPort,
|
||||||
|
ID,
|
||||||
|
strconv.Itoa(hostPort),
|
||||||
|
}
|
||||||
|
output, err := exec.Command(cli.ContainerTool, args...).Output()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
o := SanitizeString(string(output))
|
||||||
|
return strings.Split((o), ":")[1], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetContainerIPAddress gets the IP address of a container
|
||||||
|
func (cli ContainerClient) GetContainerIPAddress(ID string) (string, error) {
|
||||||
|
v, err := cli.ContainerInspectWithFormat("{{.NetworkSettings.IPAddress}}", ID)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyFromContainer copies a file from a container and returns its contents
|
||||||
|
func (cli ContainerClient) CopyFromContainer(container, srcPath string) ([]byte, error) {
|
||||||
|
tmpDir, err := os.MkdirTemp("", "tmp")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tmpDir)
|
||||||
|
args := []string{
|
||||||
|
copyFile,
|
||||||
|
container + ":" + srcPath,
|
||||||
|
tmpDir + "/.",
|
||||||
|
}
|
||||||
|
_, err = exec.Command(cli.ContainerTool, args...).CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
//Get file name
|
||||||
|
fname := filepath.Base(srcPath)
|
||||||
|
data, err := os.ReadFile(filepath.Join(tmpDir, fname))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
//Remove the file
|
||||||
|
err = os.Remove(filepath.Join(tmpDir, fname))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cli ContainerClient) ContainerInspect(containerID string) (ContainerDetails, error) {
|
||||||
|
args := []string{
|
||||||
|
inspect,
|
||||||
|
containerID,
|
||||||
|
}
|
||||||
|
output, err := exec.Command(cli.ContainerTool, args...).Output()
|
||||||
|
if err != nil {
|
||||||
|
return ContainerDetails{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var container ContainerDetails
|
||||||
|
err = json.Unmarshal(output, &container)
|
||||||
|
if err != nil {
|
||||||
|
return ContainerDetails{}, err
|
||||||
|
}
|
||||||
|
return container, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cli ContainerClient) ContainerStop(container string, timeout *time.Duration) error {
|
||||||
|
args := []string{
|
||||||
|
stopContainer,
|
||||||
|
container,
|
||||||
|
}
|
||||||
|
_, err := exec.Command(cli.ContainerTool, args...).Output()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cli ContainerClient) ContainerKill(container string, signal string) error {
|
||||||
|
args := []string{
|
||||||
|
killContainer,
|
||||||
|
container,
|
||||||
|
}
|
||||||
|
if signal != "" {
|
||||||
|
args = append(args, []string{argSignal, signal}...)
|
||||||
|
}
|
||||||
|
_, err := exec.Command(cli.ContainerTool, args...).Output()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cli ContainerClient) ContainerRemove(container string, options ContainerRemoveOptions) error {
|
||||||
|
args := []string{
|
||||||
|
remove,
|
||||||
|
container,
|
||||||
|
}
|
||||||
|
if options.Force {
|
||||||
|
args = append(args, argForce)
|
||||||
|
}
|
||||||
|
if options.RemoveVolumes {
|
||||||
|
args = append(args, argVolumes)
|
||||||
|
}
|
||||||
|
_, err := exec.Command(cli.ContainerTool, args...).Output()
|
||||||
|
if err != nil {
|
||||||
|
//Silently error as the exit code 125 is present on sucessful deletion
|
||||||
|
if strings.Contains(err.Error(), "125") {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cli ContainerClient) ExecContainer(ID string, user string, cmd []string) (int, string) {
|
||||||
|
args := []string{
|
||||||
|
execContainer,
|
||||||
|
}
|
||||||
|
if user != "" {
|
||||||
|
args = append(args, []string{argUser, user}...)
|
||||||
|
}
|
||||||
|
args = append(args, ID)
|
||||||
|
args = append(args, cmd...)
|
||||||
|
ctx := context.Background()
|
||||||
|
output, err := exec.CommandContext(ctx, cli.ContainerTool, args...).CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
if err.(*exec.ExitError) != nil {
|
||||||
|
return err.(*exec.ExitError).ExitCode(), string(output)
|
||||||
|
} else {
|
||||||
|
return 9897, string(output)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, string(output)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cli ContainerClient) ContainerStart(container string, options ContainerStartOptions) error {
|
||||||
|
args := []string{
|
||||||
|
startContainer,
|
||||||
|
container,
|
||||||
|
}
|
||||||
|
_, err := exec.Command(cli.ContainerTool, args...).Output()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContainerWait starts waiting for a container. It returns an int64 channel for receiving an exit code and an error channel for receiving errors.
|
||||||
|
// The channels returned from this function should be used to receive the results from the wait command.
|
||||||
|
func (cli ContainerClient) ContainerWait(ctx context.Context, container string, condition string) (<-chan int64, <-chan error) {
|
||||||
|
args := []string{
|
||||||
|
waitContainer,
|
||||||
|
container,
|
||||||
|
}
|
||||||
|
if cli.ContainerTool == "podman" {
|
||||||
|
if condition == ContainerStateNotRunning {
|
||||||
|
condition = ContainerStateStopped
|
||||||
|
}
|
||||||
|
args = append(args, []string{argCondition, string(condition)}...)
|
||||||
|
}
|
||||||
|
|
||||||
|
resultC := make(chan int64)
|
||||||
|
errC := make(chan error, 1)
|
||||||
|
|
||||||
|
output, err := exec.CommandContext(ctx, cli.ContainerTool, args...).Output()
|
||||||
|
if err != nil {
|
||||||
|
errC <- err
|
||||||
|
return resultC, errC
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
out := strings.TrimSuffix(string(output), "\n")
|
||||||
|
exitCode, err := strconv.Atoi(out)
|
||||||
|
if err != nil {
|
||||||
|
errC <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
resultC <- int64(exitCode)
|
||||||
|
}()
|
||||||
|
|
||||||
|
return resultC, errC
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cli ContainerClient) GetContainerLogs(ctx context.Context, container string, options ContainerLogsOptions) (string, error) {
|
||||||
|
args := []string{
|
||||||
|
getLogs,
|
||||||
|
container,
|
||||||
|
}
|
||||||
|
output, err := exec.Command(cli.ContainerTool, args...).CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return string(output), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cli ContainerClient) NetworkCreate(name string, options NetworkCreateOptions) (string, error) {
|
||||||
|
args := []string{
|
||||||
|
objNetwork,
|
||||||
|
create,
|
||||||
|
}
|
||||||
|
netID, err := exec.Command(cli.ContainerTool, args...).CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
networkID := SanitizeString(string(netID))
|
||||||
|
|
||||||
|
return networkID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cli ContainerClient) NetworkRemove(network string) error {
|
||||||
|
args := []string{
|
||||||
|
objNetwork,
|
||||||
|
remove,
|
||||||
|
}
|
||||||
|
_, err := exec.Command(cli.ContainerTool, args...).CombinedOutput()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cli ContainerClient) VolumeCreate(options VolumeCreateOptions) (string, error) {
|
||||||
|
args := []string{
|
||||||
|
objVolume,
|
||||||
|
create,
|
||||||
|
options.Name,
|
||||||
|
}
|
||||||
|
if options.Driver != "" {
|
||||||
|
args = append(args, []string{argDriver, options.Driver}...)
|
||||||
|
}
|
||||||
|
output, err := exec.Command(cli.ContainerTool, args...).Output()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
name := SanitizeString(string(output))
|
||||||
|
return name, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cli ContainerClient) VolumeRemove(volumeID string, force bool) error {
|
||||||
|
args := []string{
|
||||||
|
objVolume,
|
||||||
|
remove,
|
||||||
|
volumeID,
|
||||||
|
}
|
||||||
|
if force {
|
||||||
|
args = append(args, argForce)
|
||||||
|
}
|
||||||
|
_, err := exec.Command(cli.ContainerTool, args...).Output()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cli ContainerClient) ImageBuild(context io.Reader, tag string, dockerfilename string) (string, error) {
|
||||||
|
args := []string{
|
||||||
|
objImage,
|
||||||
|
build,
|
||||||
|
}
|
||||||
|
//dockerfilename includes the path to the dockerfile
|
||||||
|
//When using podman use the full path including the name of the Dockerfile
|
||||||
|
if cli.ContainerTool == "podman" {
|
||||||
|
args = append(args, []string{argFile, dockerfilename}...)
|
||||||
|
}
|
||||||
|
if tag != "" {
|
||||||
|
args = append(args, []string{argTag, tag}...)
|
||||||
|
}
|
||||||
|
args = append(args, argQuiet)
|
||||||
|
//When using docker remove the name 'DockerFile' from the string
|
||||||
|
if cli.ContainerTool == "docker" {
|
||||||
|
dfn := strings.ReplaceAll(dockerfilename, "Dockerfile", "")
|
||||||
|
args = append(args, dfn)
|
||||||
|
}
|
||||||
|
output, err := exec.Command(cli.ContainerTool, args...).Output()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
sha := SanitizeString(string(output))
|
||||||
|
return sha, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cli ContainerClient) ImageRemove(image string, options ImageRemoveOptions) (bool, error) {
|
||||||
|
args := []string{
|
||||||
|
objImage,
|
||||||
|
remove,
|
||||||
|
image,
|
||||||
|
}
|
||||||
|
if options.Force {
|
||||||
|
args = append(args, argForce)
|
||||||
|
}
|
||||||
|
_, err := exec.Command(cli.ContainerTool, args...).Output()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cli ContainerClient) ContainerCreate(config *ContainerConfig, hostConfig *ContainerHostConfig, networkingConfig *ContainerNetworkSettings, containerName string) (string, error) {
|
||||||
|
args := []string{
|
||||||
|
create,
|
||||||
|
argName,
|
||||||
|
containerName,
|
||||||
|
}
|
||||||
|
args = getHostConfigArgs(args, hostConfig)
|
||||||
|
args = getNetworkConfigArgs(args, networkingConfig)
|
||||||
|
args = getContainerConfigArgs(args, config, cli.ContainerTool)
|
||||||
|
output, err := exec.Command(cli.ContainerTool, args...).Output()
|
||||||
|
lines := strings.Split(strings.ReplaceAll(string(output), "\r\n", "\n"), "\n")
|
||||||
|
if err != nil {
|
||||||
|
return lines[0], err
|
||||||
|
}
|
||||||
|
return lines[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getContainerConfigArgs converts a ContainerConfig into a set of cli arguments
|
||||||
|
func getContainerConfigArgs(args []string, config *ContainerConfig, toolName string) []string {
|
||||||
|
argList := []string{}
|
||||||
|
if config.Entrypoint != nil && toolName == "podman" {
|
||||||
|
entrypoint := "[\""
|
||||||
|
for i, commandPart := range config.Entrypoint {
|
||||||
|
if i != len(config.Entrypoint)-1 {
|
||||||
|
entrypoint += commandPart + "\",\""
|
||||||
|
} else {
|
||||||
|
//terminate list
|
||||||
|
entrypoint += commandPart + "\"]"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
args = append(args, []string{argEntrypoint, entrypoint}...)
|
||||||
|
}
|
||||||
|
if config.Entrypoint != nil && toolName == "docker" {
|
||||||
|
ep1 := ""
|
||||||
|
for i, commandPart := range config.Entrypoint {
|
||||||
|
if i == 0 {
|
||||||
|
ep1 = commandPart
|
||||||
|
} else {
|
||||||
|
argList = append(argList, commandPart)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
args = append(args, []string{argEntrypoint, ep1}...)
|
||||||
|
}
|
||||||
|
if config.User != "" {
|
||||||
|
args = append(args, []string{argUser, config.User}...)
|
||||||
|
}
|
||||||
|
if config.ExposedPorts != nil {
|
||||||
|
for _, port := range config.ExposedPorts {
|
||||||
|
args = append(args, []string{argExpose, port}...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if config.Hostname != "" {
|
||||||
|
args = append(args, []string{argHostname, config.Hostname}...)
|
||||||
|
}
|
||||||
|
for _, env := range config.Env {
|
||||||
|
args = append(args, []string{argEnvironmentVariable, env}...)
|
||||||
|
}
|
||||||
|
if config.Image != "" {
|
||||||
|
args = append(args, config.Image)
|
||||||
|
}
|
||||||
|
if config.Entrypoint != nil && toolName == "docker" {
|
||||||
|
args = append(args, argList...)
|
||||||
|
}
|
||||||
|
return args
|
||||||
|
}
|
||||||
|
|
||||||
|
// getHostConfigArgs converts a ContainerHostConfig into a set of cli arguments
|
||||||
|
func getHostConfigArgs(args []string, hostConfig *ContainerHostConfig) []string {
|
||||||
|
if hostConfig.Binds != nil {
|
||||||
|
for _, volume := range hostConfig.Binds {
|
||||||
|
args = append(args, []string{argVolume, volume}...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if hostConfig.PortBindings != nil {
|
||||||
|
for _, binding := range hostConfig.PortBindings {
|
||||||
|
pub := binding.HostIP + ":" + binding.HostPort + ":" + binding.ContainerPort
|
||||||
|
args = append(args, []string{argPublish, pub}...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if hostConfig.Privileged {
|
||||||
|
args = append(args, []string{argPrivileged}...)
|
||||||
|
}
|
||||||
|
if hostConfig.CapAdd != nil {
|
||||||
|
for _, capability := range hostConfig.CapAdd {
|
||||||
|
args = append(args, []string{argAddCapability, string(capability)}...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if hostConfig.CapDrop != nil {
|
||||||
|
for _, capability := range hostConfig.CapDrop {
|
||||||
|
args = append(args, []string{argDropCapability, string(capability)}...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if hostConfig.SecurityOpt != nil {
|
||||||
|
for _, securityOption := range hostConfig.SecurityOpt {
|
||||||
|
args = append(args, []string{argSecurityOptions, string(securityOption)}...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return args
|
||||||
|
}
|
||||||
|
|
||||||
|
// getNetworkConfigArgs converts a set of ContainerNetworkSettings into a set of cli arguments
|
||||||
|
func getNetworkConfigArgs(args []string, networkingConfig *ContainerNetworkSettings) []string {
|
||||||
|
if networkingConfig.Networks != nil {
|
||||||
|
for _, netID := range networkingConfig.Networks {
|
||||||
|
args = append(args, []string{argNetwork, netID}...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return args
|
||||||
|
}
|
||||||
|
|
||||||
|
func SanitizeString(s string) string {
|
||||||
|
s = strings.Replace(s, " ", "", -1)
|
||||||
|
s = strings.Replace(s, "\t", "", -1)
|
||||||
|
s = strings.Replace(s, "\n", "", -1)
|
||||||
|
return s
|
||||||
|
}
|
||||||
@@ -2,7 +2,7 @@
|
|||||||
// +build mqdev
|
// +build mqdev
|
||||||
|
|
||||||
/*
|
/*
|
||||||
© Copyright IBM Corporation 2018, 2022
|
© Copyright IBM Corporation 2018, 2023
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
@@ -19,37 +19,30 @@ limitations under the License.
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"crypto/tls"
|
||||||
|
"fmt"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
"crypto/tls"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/container"
|
ce "github.com/ibm-messaging/mq-container/test/container/containerengine"
|
||||||
"github.com/docker/docker/api/types/network"
|
|
||||||
"github.com/docker/docker/client"
|
|
||||||
"github.com/docker/go-connections/nat"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestDevGoldenPath tests using the default values for the default developer config.
|
// TestDevGoldenPath tests using the default values for the default developer config.
|
||||||
// Note: This test requires a separate container image to be available for the JMS tests.
|
// Note: This test requires a separate container image to be available for the JMS tests.
|
||||||
func TestDevGoldenPath(t *testing.T) {
|
func TestDevGoldenPath(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
cli := ce.NewContainerClient()
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
qm := "qm1"
|
qm := "qm1"
|
||||||
containerConfig := container.Config{
|
containerConfig := ce.ContainerConfig{
|
||||||
Env: []string{
|
Env: []string{
|
||||||
"LICENSE=accept",
|
"LICENSE=accept",
|
||||||
"MQ_QMGR_NAME=" + qm,
|
"MQ_QMGR_NAME=" + qm,
|
||||||
"DEBUG=true",
|
"DEBUG=true",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
id := runContainerWithPorts(t, cli, &containerConfig, []int{9443})
|
id := runContainerWithPorts(t, cli, &containerConfig, []int{9443, 1414})
|
||||||
defer cleanContainer(t, cli, id)
|
defer cleanContainer(t, cli, id)
|
||||||
waitForReady(t, cli, id)
|
waitForReady(t, cli, id)
|
||||||
waitForWebReady(t, cli, id, insecureTLSConfig)
|
waitForWebReady(t, cli, id, insecureTLSConfig)
|
||||||
@@ -74,15 +67,12 @@ func TestDevGoldenPath(t *testing.T) {
|
|||||||
func TestDevSecure(t *testing.T) {
|
func TestDevSecure(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli := ce.NewContainerClient()
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
const tlsPassPhrase string = "passw0rd"
|
const tlsPassPhrase string = "passw0rd"
|
||||||
qm := "qm1"
|
qm := "qm1"
|
||||||
appPassword := "differentPassw0rd"
|
appPassword := "differentPassw0rd"
|
||||||
containerConfig := container.Config{
|
containerConfig := ce.ContainerConfig{
|
||||||
Env: []string{
|
Env: []string{
|
||||||
"LICENSE=accept",
|
"LICENSE=accept",
|
||||||
"MQ_QMGR_NAME=" + qm,
|
"MQ_QMGR_NAME=" + qm,
|
||||||
@@ -93,67 +83,67 @@ func TestDevSecure(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Image: imageName(),
|
Image: imageName(),
|
||||||
}
|
}
|
||||||
hostConfig := container.HostConfig{
|
hostConfig := ce.ContainerHostConfig{
|
||||||
Binds: []string{
|
Binds: []string{
|
||||||
coverageBind(t),
|
coverageBind(t),
|
||||||
tlsDir(t, false) + ":/etc/mqm/pki/keys/default",
|
tlsDir(t, false) + ":/etc/mqm/pki/keys/default",
|
||||||
},
|
},
|
||||||
|
}
|
||||||
// Assign a random port for the web server on the host
|
// Assign a random port for the web server on the host
|
||||||
// TODO: Don't do this for all tests
|
// TODO: Don't do this for all tests
|
||||||
PortBindings: nat.PortMap{
|
var binding ce.PortBinding
|
||||||
"9443/tcp": []nat.PortBinding{
|
ports := []int{9443, 1414}
|
||||||
{
|
for _, p := range ports {
|
||||||
|
port := fmt.Sprintf("%v/tcp", p)
|
||||||
|
binding = ce.PortBinding{
|
||||||
|
ContainerPort: port,
|
||||||
HostIP: "0.0.0.0",
|
HostIP: "0.0.0.0",
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
networkingConfig := network.NetworkingConfig{}
|
hostConfig.PortBindings = append(hostConfig.PortBindings, binding)
|
||||||
ctr, err := cli.ContainerCreate(context.Background(), &containerConfig, &hostConfig, &networkingConfig, t.Name())
|
}
|
||||||
|
networkingConfig := ce.ContainerNetworkSettings{}
|
||||||
|
ID, err := cli.ContainerCreate(&containerConfig, &hostConfig, &networkingConfig, t.Name())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer cleanContainer(t, cli, ctr.ID)
|
defer cleanContainer(t, cli, ID)
|
||||||
startContainer(t, cli, ctr.ID)
|
startContainer(t, cli, ID)
|
||||||
waitForReady(t, cli, ctr.ID)
|
waitForReady(t, cli, ID)
|
||||||
cert := filepath.Join(tlsDir(t, true), "server.crt")
|
cert := filepath.Join(tlsDir(t, true), "server.crt")
|
||||||
waitForWebReady(t, cli, ctr.ID, createTLSConfig(t, cert, tlsPassPhrase))
|
waitForWebReady(t, cli, ID, createTLSConfig(t, cert, tlsPassPhrase))
|
||||||
|
|
||||||
t.Run("JMS", func(t *testing.T) {
|
t.Run("JMS", func(t *testing.T) {
|
||||||
// OpenJDK is used for running tests, hence pass "false" for 7th parameter.
|
// OpenJDK is used for running tests, hence pass "false" for 7th parameter.
|
||||||
// Cipher name specified is compliant with non-IBM JRE naming.
|
// Cipher name specified is compliant with non-IBM JRE naming.
|
||||||
runJMSTests(t, cli, ctr.ID, true, "app", appPassword, "false", "TLS_RSA_WITH_AES_256_CBC_SHA256")
|
runJMSTests(t, cli, ID, true, "app", appPassword, "false", "TLS_RSA_WITH_AES_256_CBC_SHA256")
|
||||||
})
|
})
|
||||||
t.Run("REST admin", func(t *testing.T) {
|
t.Run("REST admin", func(t *testing.T) {
|
||||||
testRESTAdmin(t, cli, ctr.ID, insecureTLSConfig, "")
|
testRESTAdmin(t, cli, ID, insecureTLSConfig, "")
|
||||||
})
|
})
|
||||||
t.Run("REST messaging", func(t *testing.T) {
|
t.Run("REST messaging", func(t *testing.T) {
|
||||||
testRESTMessaging(t, cli, ctr.ID, insecureTLSConfig, qm, "app", appPassword, "")
|
testRESTMessaging(t, cli, ID, insecureTLSConfig, qm, "app", appPassword, "")
|
||||||
})
|
})
|
||||||
|
|
||||||
// Stop the container cleanly
|
// Stop the container cleanly
|
||||||
stopContainer(t, cli, ctr.ID)
|
stopContainer(t, cli, ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDevWebDisabled(t *testing.T) {
|
func TestDevWebDisabled(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli := ce.NewContainerClient()
|
||||||
if err != nil {
|
containerConfig := ce.ContainerConfig{
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
containerConfig := container.Config{
|
|
||||||
Env: []string{
|
Env: []string{
|
||||||
"LICENSE=accept",
|
"LICENSE=accept",
|
||||||
"MQ_QMGR_NAME=qm1",
|
"MQ_QMGR_NAME=qm1",
|
||||||
"MQ_ENABLE_EMBEDDED_WEB_SERVER=false",
|
"MQ_ENABLE_EMBEDDED_WEB_SERVER=false",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
id := runContainer(t, cli, &containerConfig)
|
id := runContainerWithPorts(t, cli, &containerConfig, []int{1414})
|
||||||
defer cleanContainer(t, cli, id)
|
defer cleanContainer(t, cli, id)
|
||||||
waitForReady(t, cli, id)
|
waitForReady(t, cli, id)
|
||||||
t.Run("Web", func(t *testing.T) {
|
t.Run("Web", func(t *testing.T) {
|
||||||
_, dspmqweb := execContainer(t, cli, id, "", []string{"dspmqweb"})
|
_, dspmqweb := cli.ExecContainer(id, "", []string{"dspmqweb"})
|
||||||
if !strings.Contains(dspmqweb, "Server mqweb is not running.") && !strings.Contains(dspmqweb, "MQWB1125I") {
|
if !strings.Contains(dspmqweb, "Server mqweb is not running.") && !strings.Contains(dspmqweb, "MQWB1125I") {
|
||||||
t.Errorf("Expected dspmqweb to say 'Server is not running' or 'MQWB1125I'; got \"%v\"", dspmqweb)
|
t.Errorf("Expected dspmqweb to say 'Server is not running' or 'MQWB1125I'; got \"%v\"", dspmqweb)
|
||||||
}
|
}
|
||||||
@@ -171,11 +161,8 @@ func TestDevWebDisabled(t *testing.T) {
|
|||||||
func TestDevConfigDisabled(t *testing.T) {
|
func TestDevConfigDisabled(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli := ce.NewContainerClient()
|
||||||
if err != nil {
|
containerConfig := ce.ContainerConfig{
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
containerConfig := container.Config{
|
|
||||||
Env: []string{
|
Env: []string{
|
||||||
"LICENSE=accept",
|
"LICENSE=accept",
|
||||||
"MQ_QMGR_NAME=qm1",
|
"MQ_QMGR_NAME=qm1",
|
||||||
@@ -199,11 +186,8 @@ func TestDevConfigDisabled(t *testing.T) {
|
|||||||
func TestSSLKEYRBlank(t *testing.T) {
|
func TestSSLKEYRBlank(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli := ce.NewContainerClient()
|
||||||
if err != nil {
|
containerConfig := ce.ContainerConfig{
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
containerConfig := container.Config{
|
|
||||||
Env: []string{
|
Env: []string{
|
||||||
"LICENSE=accept",
|
"LICENSE=accept",
|
||||||
"MQ_QMGR_NAME=QM1",
|
"MQ_QMGR_NAME=QM1",
|
||||||
@@ -246,12 +230,9 @@ func TestSSLKEYRBlank(t *testing.T) {
|
|||||||
func TestSSLKEYRWithSuppliedKeyAndCert(t *testing.T) {
|
func TestSSLKEYRWithSuppliedKeyAndCert(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli := ce.NewContainerClient()
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
containerConfig := container.Config{
|
containerConfig := ce.ContainerConfig{
|
||||||
Env: []string{
|
Env: []string{
|
||||||
"LICENSE=accept",
|
"LICENSE=accept",
|
||||||
"MQ_QMGR_NAME=QM1",
|
"MQ_QMGR_NAME=QM1",
|
||||||
@@ -259,24 +240,24 @@ func TestSSLKEYRWithSuppliedKeyAndCert(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Image: imageName(),
|
Image: imageName(),
|
||||||
}
|
}
|
||||||
hostConfig := container.HostConfig{
|
hostConfig := ce.ContainerHostConfig{
|
||||||
Binds: []string{
|
Binds: []string{
|
||||||
coverageBind(t),
|
coverageBind(t),
|
||||||
tlsDir(t, false) + ":/etc/mqm/pki/keys/default",
|
tlsDir(t, false) + ":/etc/mqm/pki/keys/default",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
networkingConfig := network.NetworkingConfig{}
|
networkingConfig := ce.ContainerNetworkSettings{}
|
||||||
ctr, err := cli.ContainerCreate(context.Background(), &containerConfig, &hostConfig, &networkingConfig, t.Name())
|
ID, err := cli.ContainerCreate(&containerConfig, &hostConfig, &networkingConfig, t.Name())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer cleanContainer(t, cli, ctr.ID)
|
defer cleanContainer(t, cli, ID)
|
||||||
startContainer(t, cli, ctr.ID)
|
startContainer(t, cli, ID)
|
||||||
waitForReady(t, cli, ctr.ID)
|
waitForReady(t, cli, ID)
|
||||||
|
|
||||||
// execute runmqsc to display qmgr SSLKEYR and CERTLABL attibutes.
|
// execute runmqsc to display qmgr SSLKEYR and CERTLABL attibutes.
|
||||||
// Search the console output for exepcted values
|
// Search the console output for exepcted values
|
||||||
_, sslkeyROutput := execContainer(t, cli, ctr.ID, "", []string{"bash", "-c", "echo 'DISPLAY QMGR SSLKEYR CERTLABL' | runmqsc"})
|
_, sslkeyROutput := execContainer(t, cli, ID, "", []string{"bash", "-c", "echo 'DISPLAY QMGR SSLKEYR CERTLABL' | runmqsc"})
|
||||||
if !strings.Contains(sslkeyROutput, "SSLKEYR(/run/runmqserver/tls/key)") || !strings.Contains(sslkeyROutput, "CERTLABL(default)") {
|
if !strings.Contains(sslkeyROutput, "SSLKEYR(/run/runmqserver/tls/key)") || !strings.Contains(sslkeyROutput, "CERTLABL(default)") {
|
||||||
// Although queue manager is ready, it may be that MQSC scripts have not been applied yet.
|
// Although queue manager is ready, it may be that MQSC scripts have not been applied yet.
|
||||||
// Hence wait for a second and retry few times before giving up.
|
// Hence wait for a second and retry few times before giving up.
|
||||||
@@ -284,33 +265,30 @@ func TestSSLKEYRWithSuppliedKeyAndCert(t *testing.T) {
|
|||||||
var i int
|
var i int
|
||||||
for i = 0; i < waitCount; i++ {
|
for i = 0; i < waitCount; i++ {
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
_, sslkeyROutput = execContainer(t, cli, ctr.ID, "", []string{"bash", "-c", "echo 'DISPLAY QMGR SSLKEYR CERTLABL' | runmqsc"})
|
_, sslkeyROutput = execContainer(t, cli, ID, "", []string{"bash", "-c", "echo 'DISPLAY QMGR SSLKEYR CERTLABL' | runmqsc"})
|
||||||
if strings.Contains(sslkeyROutput, "SSLKEYR(/run/runmqserver/tls/key)") && strings.Contains(sslkeyROutput, "CERTLABL(default)") {
|
if strings.Contains(sslkeyROutput, "SSLKEYR(/run/runmqserver/tls/key)") && strings.Contains(sslkeyROutput, "CERTLABL(default)") {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Failed to get expected output? dump the contents of mqsc files.
|
// Failed to get expected output? dump the contents of mqsc files.
|
||||||
if i == waitCount {
|
if i == waitCount {
|
||||||
_, tls15mqsc := execContainer(t, cli, ctr.ID, "", []string{"cat", "/etc/mqm/15-tls.mqsc"})
|
_, tls15mqsc := execContainer(t, cli, ID, "", []string{"cat", "/etc/mqm/15-tls.mqsc"})
|
||||||
_, autoMQSC := execContainer(t, cli, ctr.ID, "", []string{"cat", "/mnt/mqm/data/qmgrs/QM1/autocfg/cached.mqsc"})
|
_, autoMQSC := execContainer(t, cli, ID, "", []string{"cat", "/mnt/mqm/data/qmgrs/QM1/autocfg/cached.mqsc"})
|
||||||
t.Errorf("Expected SSLKEYR to be '/run/runmqserver/tls/key' but it is not; got \"%v\" \n AutoConfig MQSC file contents %v\n 15-tls: %v", sslkeyROutput, autoMQSC, tls15mqsc)
|
t.Errorf("Expected SSLKEYR to be '/run/runmqserver/tls/key' but it is not; got \"%v\" \n AutoConfig MQSC file contents %v\n 15-tls: %v", sslkeyROutput, autoMQSC, tls15mqsc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop the container cleanly
|
// Stop the container cleanly
|
||||||
stopContainer(t, cli, ctr.ID)
|
stopContainer(t, cli, ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test with CA cert
|
// Test with CA cert
|
||||||
func TestSSLKEYRWithCACert(t *testing.T) {
|
func TestSSLKEYRWithCACert(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli := ce.NewContainerClient()
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
containerConfig := container.Config{
|
containerConfig := ce.ContainerConfig{
|
||||||
Env: []string{
|
Env: []string{
|
||||||
"LICENSE=accept",
|
"LICENSE=accept",
|
||||||
"MQ_QMGR_NAME=QM1",
|
"MQ_QMGR_NAME=QM1",
|
||||||
@@ -318,32 +296,35 @@ func TestSSLKEYRWithCACert(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Image: imageName(),
|
Image: imageName(),
|
||||||
}
|
}
|
||||||
hostConfig := container.HostConfig{
|
hostConfig := ce.ContainerHostConfig{
|
||||||
Binds: []string{
|
Binds: []string{
|
||||||
coverageBind(t),
|
coverageBind(t),
|
||||||
tlsDirWithCA(t, false) + ":/etc/mqm/pki/keys/QM1CA",
|
tlsDirWithCA(t, false) + ":/etc/mqm/pki/keys/QM1CA",
|
||||||
},
|
},
|
||||||
// Assign a random port for the web server on the host
|
|
||||||
PortBindings: nat.PortMap{
|
|
||||||
"9443/tcp": []nat.PortBinding{
|
|
||||||
{
|
|
||||||
HostIP: "0.0.0.0",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
networkingConfig := network.NetworkingConfig{}
|
// Assign a random port for the web server on the host
|
||||||
ctr, err := cli.ContainerCreate(context.Background(), &containerConfig, &hostConfig, &networkingConfig, t.Name())
|
var binding ce.PortBinding
|
||||||
|
ports := []int{9443}
|
||||||
|
for _, p := range ports {
|
||||||
|
port := fmt.Sprintf("%v/tcp", p)
|
||||||
|
binding = ce.PortBinding{
|
||||||
|
ContainerPort: port,
|
||||||
|
HostIP: "0.0.0.0",
|
||||||
|
}
|
||||||
|
hostConfig.PortBindings = append(hostConfig.PortBindings, binding)
|
||||||
|
}
|
||||||
|
networkingConfig := ce.ContainerNetworkSettings{}
|
||||||
|
ID, err := cli.ContainerCreate(&containerConfig, &hostConfig, &networkingConfig, t.Name())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer cleanContainer(t, cli, ctr.ID)
|
defer cleanContainer(t, cli, ID)
|
||||||
startContainer(t, cli, ctr.ID)
|
startContainer(t, cli, ID)
|
||||||
waitForReady(t, cli, ctr.ID)
|
waitForReady(t, cli, ID)
|
||||||
|
|
||||||
// execute runmqsc to display qmgr SSLKEYR and CERTLABL attibutes.
|
// execute runmqsc to display qmgr SSLKEYR and CERTLABL attibutes.
|
||||||
// Search the console output for exepcted values
|
// Search the console output for exepcted values
|
||||||
_, sslkeyROutput := execContainer(t, cli, ctr.ID, "", []string{"bash", "-c", "echo 'DISPLAY QMGR SSLKEYR CERTLABL' | runmqsc"})
|
_, sslkeyROutput := execContainer(t, cli, ID, "", []string{"bash", "-c", "echo 'DISPLAY QMGR SSLKEYR CERTLABL' | runmqsc"})
|
||||||
|
|
||||||
if !strings.Contains(sslkeyROutput, "SSLKEYR(/run/runmqserver/tls/key)") {
|
if !strings.Contains(sslkeyROutput, "SSLKEYR(/run/runmqserver/tls/key)") {
|
||||||
// Although queue manager is ready, it may be that MQSC scripts have not been applied yet.
|
// Although queue manager is ready, it may be that MQSC scripts have not been applied yet.
|
||||||
@@ -352,38 +333,35 @@ func TestSSLKEYRWithCACert(t *testing.T) {
|
|||||||
var i int
|
var i int
|
||||||
for i = 0; i < waitCount; i++ {
|
for i = 0; i < waitCount; i++ {
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
_, sslkeyROutput = execContainer(t, cli, ctr.ID, "", []string{"bash", "-c", "echo 'DISPLAY QMGR SSLKEYR CERTLABL' | runmqsc"})
|
_, sslkeyROutput = execContainer(t, cli, ID, "", []string{"bash", "-c", "echo 'DISPLAY QMGR SSLKEYR CERTLABL' | runmqsc"})
|
||||||
if strings.Contains(sslkeyROutput, "SSLKEYR(/run/runmqserver/tls/key)") {
|
if strings.Contains(sslkeyROutput, "SSLKEYR(/run/runmqserver/tls/key)") {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Failed to get expected output? dump the contents of mqsc files.
|
// Failed to get expected output? dump the contents of mqsc files.
|
||||||
if i == waitCount {
|
if i == waitCount {
|
||||||
_, tls15mqsc := execContainer(t, cli, ctr.ID, "", []string{"cat", "/etc/mqm/15-tls.mqsc"})
|
_, tls15mqsc := execContainer(t, cli, ID, "", []string{"cat", "/etc/mqm/15-tls.mqsc"})
|
||||||
_, autoMQSC := execContainer(t, cli, ctr.ID, "", []string{"cat", "/mnt/mqm/data/qmgrs/QM1/autocfg/cached.mqsc"})
|
_, autoMQSC := execContainer(t, cli, ID, "", []string{"cat", "/mnt/mqm/data/qmgrs/QM1/autocfg/cached.mqsc"})
|
||||||
t.Errorf("Expected SSLKEYR to be '/run/runmqserver/tls/key' but it is not; got \"%v\"\n AutoConfig MQSC file contents %v\n 15-tls: %v", sslkeyROutput, autoMQSC, tls15mqsc)
|
t.Errorf("Expected SSLKEYR to be '/run/runmqserver/tls/key' but it is not; got \"%v\"\n AutoConfig MQSC file contents %v\n 15-tls: %v", sslkeyROutput, autoMQSC, tls15mqsc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !strings.Contains(sslkeyROutput, "CERTLABL(QM1CA)") {
|
if !strings.Contains(sslkeyROutput, "CERTLABL(QM1CA)") {
|
||||||
_, autoMQSC := execContainer(t, cli, ctr.ID, "", []string{"cat", "/etc/mqm/15-tls.mqsc"})
|
_, autoMQSC := execContainer(t, cli, ID, "", []string{"cat", "/etc/mqm/15-tls.mqsc"})
|
||||||
t.Errorf("Expected CERTLABL to be 'QM1CA' but it is not; got \"%v\" \n MQSC File contents %v", sslkeyROutput, autoMQSC)
|
t.Errorf("Expected CERTLABL to be 'QM1CA' but it is not; got \"%v\" \n MQSC File contents %v", sslkeyROutput, autoMQSC)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop the container cleanly
|
// Stop the container cleanly
|
||||||
stopContainer(t, cli, ctr.ID)
|
stopContainer(t, cli, ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verifies SSLFIPS is set to NO if MQ_ENABLE_FIPS=false
|
// Verifies SSLFIPS is set to NO if MQ_ENABLE_FIPS=false
|
||||||
func TestSSLFIPSNO(t *testing.T) {
|
func TestSSLFIPSNO(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli := ce.NewContainerClient()
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
containerConfig := container.Config{
|
containerConfig := ce.ContainerConfig{
|
||||||
Env: []string{
|
Env: []string{
|
||||||
"LICENSE=accept",
|
"LICENSE=accept",
|
||||||
"MQ_QMGR_NAME=QM1",
|
"MQ_QMGR_NAME=QM1",
|
||||||
@@ -392,24 +370,24 @@ func TestSSLFIPSNO(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Image: imageName(),
|
Image: imageName(),
|
||||||
}
|
}
|
||||||
hostConfig := container.HostConfig{
|
hostConfig := ce.ContainerHostConfig{
|
||||||
Binds: []string{
|
Binds: []string{
|
||||||
coverageBind(t),
|
coverageBind(t),
|
||||||
tlsDir(t, false) + ":/etc/mqm/pki/keys/default",
|
tlsDir(t, false) + ":/etc/mqm/pki/keys/default",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
networkingConfig := network.NetworkingConfig{}
|
networkingConfig := ce.ContainerNetworkSettings{}
|
||||||
ctr, err := cli.ContainerCreate(context.Background(), &containerConfig, &hostConfig, &networkingConfig, t.Name())
|
ID, err := cli.ContainerCreate(&containerConfig, &hostConfig, &networkingConfig, t.Name())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer cleanContainer(t, cli, ctr.ID)
|
defer cleanContainer(t, cli, ID)
|
||||||
startContainer(t, cli, ctr.ID)
|
startContainer(t, cli, ID)
|
||||||
waitForReady(t, cli, ctr.ID)
|
waitForReady(t, cli, ID)
|
||||||
|
|
||||||
// execute runmqsc to display qmgr SSLKEYR, SSLFIPS and CERTLABL attibutes.
|
// execute runmqsc to display qmgr SSLKEYR, SSLFIPS and CERTLABL attibutes.
|
||||||
// Search the console output for exepcted values
|
// Search the console output for exepcted values
|
||||||
_, sslFIPSOutput := execContainer(t, cli, ctr.ID, "", []string{"bash", "-c", "echo 'DISPLAY QMGR SSLKEYR CERTLABL SSLFIPS' | runmqsc"})
|
_, sslFIPSOutput := execContainer(t, cli, ID, "", []string{"bash", "-c", "echo 'DISPLAY QMGR SSLKEYR CERTLABL SSLFIPS' | runmqsc"})
|
||||||
if !strings.Contains(sslFIPSOutput, "SSLKEYR(/run/runmqserver/tls/key)") {
|
if !strings.Contains(sslFIPSOutput, "SSLKEYR(/run/runmqserver/tls/key)") {
|
||||||
t.Errorf("Expected SSLKEYR to be '/run/runmqserver/tls/key' but it is not; got \"%v\"", sslFIPSOutput)
|
t.Errorf("Expected SSLKEYR to be '/run/runmqserver/tls/key' but it is not; got \"%v\"", sslFIPSOutput)
|
||||||
}
|
}
|
||||||
@@ -422,7 +400,7 @@ func TestSSLFIPSNO(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Stop the container cleanly
|
// Stop the container cleanly
|
||||||
stopContainer(t, cli, ctr.ID)
|
stopContainer(t, cli, ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verifies SSLFIPS is set to YES if certificates for queue manager
|
// Verifies SSLFIPS is set to YES if certificates for queue manager
|
||||||
@@ -430,13 +408,10 @@ func TestSSLFIPSNO(t *testing.T) {
|
|||||||
func TestSSLFIPSYES(t *testing.T) {
|
func TestSSLFIPSYES(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli := ce.NewContainerClient()
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
appPassword := "differentPassw0rd"
|
appPassword := "differentPassw0rd"
|
||||||
containerConfig := container.Config{
|
containerConfig := ce.ContainerConfig{
|
||||||
Env: []string{
|
Env: []string{
|
||||||
"LICENSE=accept",
|
"LICENSE=accept",
|
||||||
"MQ_APP_PASSWORD=" + appPassword,
|
"MQ_APP_PASSWORD=" + appPassword,
|
||||||
@@ -446,30 +421,40 @@ func TestSSLFIPSYES(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Image: imageName(),
|
Image: imageName(),
|
||||||
}
|
}
|
||||||
hostConfig := container.HostConfig{
|
hostConfig := ce.ContainerHostConfig{
|
||||||
Binds: []string{
|
Binds: []string{
|
||||||
coverageBind(t),
|
coverageBind(t),
|
||||||
tlsDir(t, false) + ":/etc/mqm/pki/keys/default",
|
tlsDir(t, false) + ":/etc/mqm/pki/keys/default",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
networkingConfig := network.NetworkingConfig{}
|
var binding ce.PortBinding
|
||||||
ctr, err := cli.ContainerCreate(context.Background(), &containerConfig, &hostConfig, &networkingConfig, t.Name())
|
ports := []int{1414}
|
||||||
|
for _, p := range ports {
|
||||||
|
port := fmt.Sprintf("%v/tcp", p)
|
||||||
|
binding = ce.PortBinding{
|
||||||
|
ContainerPort: port,
|
||||||
|
HostIP: "0.0.0.0",
|
||||||
|
}
|
||||||
|
hostConfig.PortBindings = append(hostConfig.PortBindings, binding)
|
||||||
|
}
|
||||||
|
networkingConfig := ce.ContainerNetworkSettings{}
|
||||||
|
ID, err := cli.ContainerCreate(&containerConfig, &hostConfig, &networkingConfig, t.Name())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer cleanContainer(t, cli, ctr.ID)
|
defer cleanContainer(t, cli, ID)
|
||||||
startContainer(t, cli, ctr.ID)
|
startContainer(t, cli, ID)
|
||||||
waitForReady(t, cli, ctr.ID)
|
waitForReady(t, cli, ID)
|
||||||
|
|
||||||
// Check for expected message on container log
|
// Check for expected message on container log
|
||||||
logs := inspectLogs(t, cli, ctr.ID)
|
logs := inspectLogs(t, cli, ID)
|
||||||
if !strings.Contains(logs, "FIPS cryptography is enabled.") {
|
if !strings.Contains(logs, "FIPS cryptography is enabled.") {
|
||||||
t.Errorf("Expected 'FIPS cryptography is enabled.' but got %v\n", logs)
|
t.Errorf("Expected 'FIPS cryptography is enabled.' but got %v\n", logs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// execute runmqsc to display qmgr SSLKEYR, SSLFIPS and CERTLABL attibutes.
|
// execute runmqsc to display qmgr SSLKEYR, SSLFIPS and CERTLABL attibutes.
|
||||||
// Search the console output for exepcted values
|
// Search the console output for exepcted values
|
||||||
_, sslFIPSOutput := execContainer(t, cli, ctr.ID, "", []string{"bash", "-c", "echo 'DISPLAY QMGR SSLKEYR CERTLABL SSLFIPS' | runmqsc"})
|
_, sslFIPSOutput := execContainer(t, cli, ID, "", []string{"bash", "-c", "echo 'DISPLAY QMGR SSLKEYR CERTLABL SSLFIPS' | runmqsc"})
|
||||||
if !strings.Contains(sslFIPSOutput, "SSLKEYR(/run/runmqserver/tls/key)") {
|
if !strings.Contains(sslFIPSOutput, "SSLKEYR(/run/runmqserver/tls/key)") {
|
||||||
t.Errorf("Expected SSLKEYR to be '/run/runmqserver/tls/key' but it is not; got \"%v\"", sslFIPSOutput)
|
t.Errorf("Expected SSLKEYR to be '/run/runmqserver/tls/key' but it is not; got \"%v\"", sslFIPSOutput)
|
||||||
}
|
}
|
||||||
@@ -483,26 +468,23 @@ func TestSSLFIPSYES(t *testing.T) {
|
|||||||
|
|
||||||
t.Run("JMS", func(t *testing.T) {
|
t.Run("JMS", func(t *testing.T) {
|
||||||
// Run the JMS tests, with no password specified
|
// Run the JMS tests, with no password specified
|
||||||
runJMSTests(t, cli, ctr.ID, true, "app", appPassword, "false", "TLS_RSA_WITH_AES_256_CBC_SHA256")
|
runJMSTests(t, cli, ID, true, "app", appPassword, "false", "TLS_RSA_WITH_AES_256_CBC_SHA256")
|
||||||
})
|
})
|
||||||
|
|
||||||
// Stop the container cleanly
|
// Stop the container cleanly
|
||||||
stopContainer(t, cli, ctr.ID)
|
stopContainer(t, cli, ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestDevSecureFIPSYESWeb verifies if the MQ Web Server is running in FIPS mode
|
// TestDevSecureFIPSYESWeb verifies if the MQ Web Server is running in FIPS mode
|
||||||
func TestDevSecureFIPSTrueWeb(t *testing.T) {
|
func TestDevSecureFIPSTrueWeb(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli := ce.NewContainerClient()
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
const tlsPassPhrase string = "passw0rd"
|
const tlsPassPhrase string = "passw0rd"
|
||||||
qm := "qm1"
|
qm := "qm1"
|
||||||
appPassword := "differentPassw0rd"
|
appPassword := "differentPassw0rd"
|
||||||
containerConfig := container.Config{
|
containerConfig := ce.ContainerConfig{
|
||||||
Env: []string{
|
Env: []string{
|
||||||
"LICENSE=accept",
|
"LICENSE=accept",
|
||||||
"MQ_QMGR_NAME=" + qm,
|
"MQ_QMGR_NAME=" + qm,
|
||||||
@@ -514,65 +496,65 @@ func TestDevSecureFIPSTrueWeb(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Image: imageName(),
|
Image: imageName(),
|
||||||
}
|
}
|
||||||
hostConfig := container.HostConfig{
|
hostConfig := ce.ContainerHostConfig{
|
||||||
Binds: []string{
|
Binds: []string{
|
||||||
coverageBind(t),
|
coverageBind(t),
|
||||||
tlsDir(t, false) + ":/etc/mqm/pki/keys/default",
|
tlsDir(t, false) + ":/etc/mqm/pki/keys/default",
|
||||||
tlsDir(t, false) + ":/etc/mqm/pki/trust/default",
|
tlsDir(t, false) + ":/etc/mqm/pki/trust/default",
|
||||||
},
|
},
|
||||||
|
}
|
||||||
// Assign a random port for the web server on the host
|
// Assign a random port for the web server on the host
|
||||||
// TODO: Don't do this for all tests
|
// TODO: Don't do this for all tests
|
||||||
PortBindings: nat.PortMap{
|
var binding ce.PortBinding
|
||||||
"9443/tcp": []nat.PortBinding{
|
ports := []int{9443}
|
||||||
{
|
for _, p := range ports {
|
||||||
|
port := fmt.Sprintf("%v/tcp", p)
|
||||||
|
binding = ce.PortBinding{
|
||||||
|
ContainerPort: port,
|
||||||
HostIP: "0.0.0.0",
|
HostIP: "0.0.0.0",
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
networkingConfig := network.NetworkingConfig{}
|
hostConfig.PortBindings = append(hostConfig.PortBindings, binding)
|
||||||
ctr, err := cli.ContainerCreate(context.Background(), &containerConfig, &hostConfig, &networkingConfig, t.Name())
|
}
|
||||||
|
networkingConfig := ce.ContainerNetworkSettings{}
|
||||||
|
ID, err := cli.ContainerCreate(&containerConfig, &hostConfig, &networkingConfig, t.Name())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer cleanContainer(t, cli, ctr.ID)
|
defer cleanContainer(t, cli, ID)
|
||||||
|
|
||||||
startContainer(t, cli, ctr.ID)
|
startContainer(t, cli, ID)
|
||||||
waitForReady(t, cli, ctr.ID)
|
waitForReady(t, cli, ID)
|
||||||
cert := filepath.Join(tlsDir(t, true), "server.crt")
|
cert := filepath.Join(tlsDir(t, true), "server.crt")
|
||||||
waitForWebReady(t, cli, ctr.ID, createTLSConfig(t, cert, tlsPassPhrase))
|
waitForWebReady(t, cli, ID, createTLSConfig(t, cert, tlsPassPhrase))
|
||||||
|
|
||||||
// Create a TLS Config with a cipher to use when connecting over HTTPS
|
// Create a TLS Config with a cipher to use when connecting over HTTPS
|
||||||
var secureTLSConfig *tls.Config = createTLSConfigWithCipher(t, cert, tlsPassPhrase, []uint16{tls.TLS_RSA_WITH_AES_256_GCM_SHA384})
|
var secureTLSConfig *tls.Config = createTLSConfigWithCipher(t, cert, tlsPassPhrase, []uint16{tls.TLS_RSA_WITH_AES_256_GCM_SHA384})
|
||||||
// Put a message to queue
|
// Put a message to queue
|
||||||
t.Run("REST messaging", func(t *testing.T) {
|
t.Run("REST messaging", func(t *testing.T) {
|
||||||
testRESTMessaging(t, cli, ctr.ID, secureTLSConfig, qm, "app", appPassword, "")
|
testRESTMessaging(t, cli, ID, secureTLSConfig, qm, "app", appPassword, "")
|
||||||
})
|
})
|
||||||
|
|
||||||
// Create a TLS Config with a non-FIPS cipher to use when connecting over HTTPS
|
// Create a TLS Config with a non-FIPS cipher to use when connecting over HTTPS
|
||||||
var secureNonFIPSCipherConfig *tls.Config = createTLSConfigWithCipher(t, cert, tlsPassPhrase, []uint16{tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA})
|
var secureNonFIPSCipherConfig *tls.Config = createTLSConfigWithCipher(t, cert, tlsPassPhrase, []uint16{tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA})
|
||||||
// Put a message to queue - the attempt to put message will fail with a EOF return message.
|
// Put a message to queue - the attempt to put message will fail with a EOF return message.
|
||||||
t.Run("REST messaging", func(t *testing.T) {
|
t.Run("REST messaging", func(t *testing.T) {
|
||||||
testRESTMessaging(t, cli, ctr.ID, secureNonFIPSCipherConfig, qm, "app", appPassword, "EOF")
|
testRESTMessaging(t, cli, ID, secureNonFIPSCipherConfig, qm, "app", appPassword, "EOF")
|
||||||
})
|
})
|
||||||
|
|
||||||
// Stop the container cleanly
|
// Stop the container cleanly
|
||||||
stopContainer(t, cli, ctr.ID)
|
stopContainer(t, cli, ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestDevSecureNOFIPSWeb verifies if the MQ Web Server is not running in FIPS mode
|
// TestDevSecureNOFIPSWeb verifies if the MQ Web Server is not running in FIPS mode
|
||||||
func TestDevSecureFalseFIPSWeb(t *testing.T) {
|
func TestDevSecureFalseFIPSWeb(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli := ce.NewContainerClient()
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
const tlsPassPhrase string = "passw0rd"
|
const tlsPassPhrase string = "passw0rd"
|
||||||
qm := "qm1"
|
qm := "qm1"
|
||||||
appPassword := "differentPassw0rd"
|
appPassword := "differentPassw0rd"
|
||||||
containerConfig := container.Config{
|
containerConfig := ce.ContainerConfig{
|
||||||
Env: []string{
|
Env: []string{
|
||||||
"LICENSE=accept",
|
"LICENSE=accept",
|
||||||
"MQ_QMGR_NAME=" + qm,
|
"MQ_QMGR_NAME=" + qm,
|
||||||
@@ -584,38 +566,41 @@ func TestDevSecureFalseFIPSWeb(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Image: imageName(),
|
Image: imageName(),
|
||||||
}
|
}
|
||||||
hostConfig := container.HostConfig{
|
hostConfig := ce.ContainerHostConfig{
|
||||||
Binds: []string{
|
Binds: []string{
|
||||||
coverageBind(t),
|
coverageBind(t),
|
||||||
tlsDir(t, false) + ":/etc/mqm/pki/keys/default",
|
tlsDir(t, false) + ":/etc/mqm/pki/keys/default",
|
||||||
tlsDir(t, false) + ":/etc/mqm/pki/trust/default",
|
tlsDir(t, false) + ":/etc/mqm/pki/trust/default",
|
||||||
},
|
},
|
||||||
// Assign a random port for the web server on the host
|
|
||||||
PortBindings: nat.PortMap{
|
|
||||||
"9443/tcp": []nat.PortBinding{
|
|
||||||
{
|
|
||||||
HostIP: "0.0.0.0",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
networkingConfig := network.NetworkingConfig{}
|
// Assign a random port for the web server on the host
|
||||||
ctr, err := cli.ContainerCreate(context.Background(), &containerConfig, &hostConfig, &networkingConfig, t.Name())
|
var binding ce.PortBinding
|
||||||
|
ports := []int{9443}
|
||||||
|
for _, p := range ports {
|
||||||
|
port := fmt.Sprintf("%v/tcp", p)
|
||||||
|
binding = ce.PortBinding{
|
||||||
|
ContainerPort: port,
|
||||||
|
HostIP: "0.0.0.0",
|
||||||
|
}
|
||||||
|
hostConfig.PortBindings = append(hostConfig.PortBindings, binding)
|
||||||
|
}
|
||||||
|
networkingConfig := ce.ContainerNetworkSettings{}
|
||||||
|
ID, err := cli.ContainerCreate(&containerConfig, &hostConfig, &networkingConfig, t.Name())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer cleanContainer(t, cli, ctr.ID)
|
defer cleanContainer(t, cli, ID)
|
||||||
startContainer(t, cli, ctr.ID)
|
startContainer(t, cli, ID)
|
||||||
waitForReady(t, cli, ctr.ID)
|
waitForReady(t, cli, ID)
|
||||||
|
|
||||||
cert := filepath.Join(tlsDir(t, true), "server.crt")
|
cert := filepath.Join(tlsDir(t, true), "server.crt")
|
||||||
waitForWebReady(t, cli, ctr.ID, createTLSConfig(t, cert, tlsPassPhrase))
|
waitForWebReady(t, cli, ID, createTLSConfig(t, cert, tlsPassPhrase))
|
||||||
|
|
||||||
// As FIPS is not enabled, the MQ WebServer (actually Java) will choose a JSSE provider from the list
|
// As FIPS is not enabled, the MQ WebServer (actually Java) will choose a JSSE provider from the list
|
||||||
// specified in java.security file. We will need to enable java.net.debug and then parse the web server
|
// specified in java.security file. We will need to enable java.net.debug and then parse the web server
|
||||||
// logs to check what JJSE provider is being used. Hence just check the jvm.options file does not contain
|
// logs to check what JJSE provider is being used. Hence just check the jvm.options file does not contain
|
||||||
// -Dcom.ibm.jsse2.usefipsprovider line.
|
// -Dcom.ibm.jsse2.usefipsprovider line.
|
||||||
_, jvmOptionsOutput := execContainer(t, cli, ctr.ID, "", []string{"bash", "-c", "cat /var/mqm/web/installations/Installation1/servers/mqweb/configDropins/defaults/jvm.options"})
|
_, jvmOptionsOutput := execContainer(t, cli, ID, "", []string{"bash", "-c", "cat /var/mqm/web/installations/Installation1/servers/mqweb/configDropins/defaults/jvm.options"})
|
||||||
if strings.Contains(jvmOptionsOutput, "-Dcom.ibm.jsse2.usefipsprovider") {
|
if strings.Contains(jvmOptionsOutput, "-Dcom.ibm.jsse2.usefipsprovider") {
|
||||||
t.Errorf("Did not expect -Dcom.ibm.jsse2.usefipsprovider but it is not; got \"%v\"", jvmOptionsOutput)
|
t.Errorf("Did not expect -Dcom.ibm.jsse2.usefipsprovider but it is not; got \"%v\"", jvmOptionsOutput)
|
||||||
}
|
}
|
||||||
@@ -623,24 +608,21 @@ func TestDevSecureFalseFIPSWeb(t *testing.T) {
|
|||||||
// Just do a HTTPS GET as well to query installation details.
|
// Just do a HTTPS GET as well to query installation details.
|
||||||
var secureTLSConfig *tls.Config = createTLSConfigWithCipher(t, cert, tlsPassPhrase, []uint16{tls.TLS_RSA_WITH_AES_256_GCM_SHA384})
|
var secureTLSConfig *tls.Config = createTLSConfigWithCipher(t, cert, tlsPassPhrase, []uint16{tls.TLS_RSA_WITH_AES_256_GCM_SHA384})
|
||||||
t.Run("REST admin", func(t *testing.T) {
|
t.Run("REST admin", func(t *testing.T) {
|
||||||
testRESTAdmin(t, cli, ctr.ID, secureTLSConfig, "")
|
testRESTAdmin(t, cli, ID, secureTLSConfig, "")
|
||||||
})
|
})
|
||||||
|
|
||||||
// Stop the container cleanly
|
// Stop the container cleanly
|
||||||
stopContainer(t, cli, ctr.ID)
|
stopContainer(t, cli, ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify SSLFIPS is set to NO if no certificates were supplied
|
// Verify SSLFIPS is set to NO if no certificates were supplied
|
||||||
func TestSSLFIPSTrueNoCerts(t *testing.T) {
|
func TestSSLFIPSTrueNoCerts(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli := ce.NewContainerClient()
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
appPassword := "differentPassw0rd"
|
appPassword := "differentPassw0rd"
|
||||||
containerConfig := container.Config{
|
containerConfig := ce.ContainerConfig{
|
||||||
Env: []string{
|
Env: []string{
|
||||||
"LICENSE=accept",
|
"LICENSE=accept",
|
||||||
"MQ_APP_PASSWORD=" + appPassword,
|
"MQ_APP_PASSWORD=" + appPassword,
|
||||||
@@ -650,23 +632,23 @@ func TestSSLFIPSTrueNoCerts(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Image: imageName(),
|
Image: imageName(),
|
||||||
}
|
}
|
||||||
hostConfig := container.HostConfig{
|
hostConfig := ce.ContainerHostConfig{
|
||||||
Binds: []string{
|
Binds: []string{
|
||||||
coverageBind(t),
|
coverageBind(t),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
networkingConfig := network.NetworkingConfig{}
|
networkingConfig := ce.ContainerNetworkSettings{}
|
||||||
ctr, err := cli.ContainerCreate(context.Background(), &containerConfig, &hostConfig, &networkingConfig, t.Name())
|
ID, err := cli.ContainerCreate(&containerConfig, &hostConfig, &networkingConfig, t.Name())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer cleanContainer(t, cli, ctr.ID)
|
defer cleanContainer(t, cli, ID)
|
||||||
startContainer(t, cli, ctr.ID)
|
startContainer(t, cli, ID)
|
||||||
waitForReady(t, cli, ctr.ID)
|
waitForReady(t, cli, ID)
|
||||||
|
|
||||||
// execute runmqsc to display qmgr SSLKEYR, SSLFIPS and CERTLABL attibutes.
|
// execute runmqsc to display qmgr SSLKEYR, SSLFIPS and CERTLABL attibutes.
|
||||||
// Search the console output for exepcted values
|
// Search the console output for exepcted values
|
||||||
_, sslFIPSOutput := execContainer(t, cli, ctr.ID, "", []string{"bash", "-c", "echo 'DISPLAY QMGR SSLKEYR CERTLABL SSLFIPS' | runmqsc"})
|
_, sslFIPSOutput := execContainer(t, cli, ID, "", []string{"bash", "-c", "echo 'DISPLAY QMGR SSLKEYR CERTLABL SSLFIPS' | runmqsc"})
|
||||||
if !strings.Contains(sslFIPSOutput, "SSLKEYR( )") {
|
if !strings.Contains(sslFIPSOutput, "SSLKEYR( )") {
|
||||||
t.Errorf("Expected SSLKEYR to be ' ' but it is not; got \"%v\"", sslFIPSOutput)
|
t.Errorf("Expected SSLKEYR to be ' ' but it is not; got \"%v\"", sslFIPSOutput)
|
||||||
}
|
}
|
||||||
@@ -679,19 +661,16 @@ func TestSSLFIPSTrueNoCerts(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Stop the container cleanly
|
// Stop the container cleanly
|
||||||
stopContainer(t, cli, ctr.ID)
|
stopContainer(t, cli, ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verifies SSLFIPS is set to NO if MQ_ENABLE_FIPS=tru (invalid value)
|
// Verifies SSLFIPS is set to NO if MQ_ENABLE_FIPS=tru (invalid value)
|
||||||
func TestSSLFIPSInvalidValue(t *testing.T) {
|
func TestSSLFIPSInvalidValue(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli := ce.NewContainerClient()
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
containerConfig := container.Config{
|
containerConfig := ce.ContainerConfig{
|
||||||
Env: []string{
|
Env: []string{
|
||||||
"LICENSE=accept",
|
"LICENSE=accept",
|
||||||
"MQ_QMGR_NAME=QM1",
|
"MQ_QMGR_NAME=QM1",
|
||||||
@@ -700,24 +679,24 @@ func TestSSLFIPSInvalidValue(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Image: imageName(),
|
Image: imageName(),
|
||||||
}
|
}
|
||||||
hostConfig := container.HostConfig{
|
hostConfig := ce.ContainerHostConfig{
|
||||||
Binds: []string{
|
Binds: []string{
|
||||||
coverageBind(t),
|
coverageBind(t),
|
||||||
tlsDir(t, false) + ":/etc/mqm/pki/keys/default",
|
tlsDir(t, false) + ":/etc/mqm/pki/keys/default",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
networkingConfig := network.NetworkingConfig{}
|
networkingConfig := ce.ContainerNetworkSettings{}
|
||||||
ctr, err := cli.ContainerCreate(context.Background(), &containerConfig, &hostConfig, &networkingConfig, t.Name())
|
ID, err := cli.ContainerCreate(&containerConfig, &hostConfig, &networkingConfig, t.Name())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer cleanContainer(t, cli, ctr.ID)
|
defer cleanContainer(t, cli, ID)
|
||||||
startContainer(t, cli, ctr.ID)
|
startContainer(t, cli, ID)
|
||||||
waitForReady(t, cli, ctr.ID)
|
waitForReady(t, cli, ID)
|
||||||
|
|
||||||
// execute runmqsc to display qmgr SSLKEYR, SSLFIPS and CERTLABL attibutes.
|
// execute runmqsc to display qmgr SSLKEYR, SSLFIPS and CERTLABL attibutes.
|
||||||
// Search the console output for exepcted values
|
// Search the console output for exepcted values
|
||||||
_, sslFIPSOutput := execContainer(t, cli, ctr.ID, "", []string{"bash", "-c", "echo 'DISPLAY QMGR SSLKEYR CERTLABL SSLFIPS' | runmqsc"})
|
_, sslFIPSOutput := execContainer(t, cli, ID, "", []string{"bash", "-c", "echo 'DISPLAY QMGR SSLKEYR CERTLABL SSLFIPS' | runmqsc"})
|
||||||
if !strings.Contains(sslFIPSOutput, "SSLKEYR(/run/runmqserver/tls/key)") {
|
if !strings.Contains(sslFIPSOutput, "SSLKEYR(/run/runmqserver/tls/key)") {
|
||||||
t.Errorf("Expected SSLKEYR to be '/run/runmqserver/tls/key' but it is not; got \"%v\"", sslFIPSOutput)
|
t.Errorf("Expected SSLKEYR to be '/run/runmqserver/tls/key' but it is not; got \"%v\"", sslFIPSOutput)
|
||||||
}
|
}
|
||||||
@@ -731,19 +710,16 @@ func TestSSLFIPSInvalidValue(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Stop the container cleanly
|
// Stop the container cleanly
|
||||||
stopContainer(t, cli, ctr.ID)
|
stopContainer(t, cli, ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Container creation fails when invalid certs are passed and MQ_ENABLE_FIPS set true
|
// Container creation fails when invalid certs are passed and MQ_ENABLE_FIPS set true
|
||||||
func TestSSLFIPSBadCerts(t *testing.T) {
|
func TestSSLFIPSBadCerts(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli := ce.NewContainerClient()
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
containerConfig := container.Config{
|
containerConfig := ce.ContainerConfig{
|
||||||
Env: []string{
|
Env: []string{
|
||||||
"LICENSE=accept",
|
"LICENSE=accept",
|
||||||
"MQ_QMGR_NAME=QM1",
|
"MQ_QMGR_NAME=QM1",
|
||||||
@@ -752,25 +728,25 @@ func TestSSLFIPSBadCerts(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Image: imageName(),
|
Image: imageName(),
|
||||||
}
|
}
|
||||||
hostConfig := container.HostConfig{
|
hostConfig := ce.ContainerHostConfig{
|
||||||
Binds: []string{
|
Binds: []string{
|
||||||
coverageBind(t),
|
coverageBind(t),
|
||||||
tlsDirInvalid(t, false) + ":/etc/mqm/pki/keys/default",
|
tlsDirInvalid(t, false) + ":/etc/mqm/pki/keys/default",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
networkingConfig := network.NetworkingConfig{}
|
networkingConfig := ce.ContainerNetworkSettings{}
|
||||||
ctr, err := cli.ContainerCreate(context.Background(), &containerConfig, &hostConfig, &networkingConfig, t.Name())
|
ID, err := cli.ContainerCreate(&containerConfig, &hostConfig, &networkingConfig, t.Name())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer cleanContainer(t, cli, ctr.ID)
|
defer cleanContainer(t, cli, ID)
|
||||||
startContainer(t, cli, ctr.ID)
|
startContainer(t, cli, ID)
|
||||||
|
|
||||||
rc := waitForContainer(t, cli, ctr.ID, 20*time.Second)
|
rc := waitForContainer(t, cli, ID, 20*time.Second)
|
||||||
// Expect return code 1 if container failed to create.
|
// Expect return code 1 if container failed to create.
|
||||||
if rc == 1 {
|
if rc == 1 {
|
||||||
// Get container logs and search for specific message.
|
// Get container logs and search for specific message.
|
||||||
logs := inspectLogs(t, cli, ctr.ID)
|
logs := inspectLogs(t, cli, ID)
|
||||||
if strings.Contains(logs, "Failed to parse private key") {
|
if strings.Contains(logs, "Failed to parse private key") {
|
||||||
t.Logf("Container creating failed because of invalid certifates")
|
t.Logf("Container creating failed because of invalid certifates")
|
||||||
}
|
}
|
||||||
@@ -780,5 +756,5 @@ func TestSSLFIPSBadCerts(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Stop the container cleanly
|
// Stop the container cleanly
|
||||||
stopContainer(t, cli, ctr.ID)
|
stopContainer(t, cli, ID)
|
||||||
}
|
}
|
||||||
@@ -2,7 +2,7 @@
|
|||||||
// +build mqdev
|
// +build mqdev
|
||||||
|
|
||||||
/*
|
/*
|
||||||
© Copyright IBM Corporation 2018, 2022
|
© Copyright IBM Corporation 2018, 2023
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
@@ -34,9 +34,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/container"
|
ce "github.com/ibm-messaging/mq-container/test/container/containerengine"
|
||||||
"github.com/docker/docker/api/types/network"
|
|
||||||
"github.com/docker/docker/client"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const defaultAdminPassword string = "passw0rd"
|
const defaultAdminPassword string = "passw0rd"
|
||||||
@@ -49,7 +47,7 @@ var insecureTLSConfig *tls.Config = &tls.Config{
|
|||||||
InsecureSkipVerify: true,
|
InsecureSkipVerify: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitForWebReady(t *testing.T, cli *client.Client, ID string, tlsConfig *tls.Config) {
|
func waitForWebReady(t *testing.T, cli ce.ContainerInterface, ID string, tlsConfig *tls.Config) {
|
||||||
t.Logf("%s Waiting for web server to be ready", time.Now().Format(time.RFC3339))
|
t.Logf("%s Waiting for web server to be ready", time.Now().Format(time.RFC3339))
|
||||||
httpClient := http.Client{
|
httpClient := http.Client{
|
||||||
Timeout: time.Duration(10 * time.Second),
|
Timeout: time.Duration(10 * time.Second),
|
||||||
@@ -57,7 +55,11 @@ func waitForWebReady(t *testing.T, cli *client.Client, ID string, tlsConfig *tls
|
|||||||
TLSClientConfig: tlsConfig,
|
TLSClientConfig: tlsConfig,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
url := fmt.Sprintf("https://localhost:%s/ibmmq/rest/v1/admin/installation", getPort(t, cli, ID, 9443))
|
port, err := cli.GetContainerPort(ID, 9443)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
url := fmt.Sprintf("https://localhost:%s/ibmmq/rest/v1/admin/installation", port)
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
@@ -91,11 +93,16 @@ func tlsDirInvalid(t *testing.T, unixPath bool) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// runJMSTests runs a container with a JMS client, which connects to the queue manager container with the specified ID
|
// runJMSTests runs a container with a JMS client, which connects to the queue manager container with the specified ID
|
||||||
func runJMSTests(t *testing.T, cli *client.Client, ID string, tls bool, user, password string, ibmjre string, cipherName string) {
|
func runJMSTests(t *testing.T, cli ce.ContainerInterface, ID string, tls bool, user, password string, ibmjre string, cipherName string) {
|
||||||
containerConfig := container.Config{
|
port, err := cli.GetContainerPort(ID, 1414)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
containerConfig := ce.ContainerConfig{
|
||||||
// -e MQ_PORT_1414_TCP_ADDR=9.145.14.173 -e MQ_USERNAME=app -e MQ_PASSWORD=passw0rd -e MQ_CHANNEL=DEV.APP.SVRCONN -e MQ_TLS_TRUSTSTORE=/tls/test.p12 -e MQ_TLS_PASSPHRASE=passw0rd -v /Users/arthurbarr/go/src/github.com/ibm-messaging/mq-container/test/tls:/tls msgtest
|
// -e MQ_PORT_1414_TCP_ADDR=9.145.14.173 -e MQ_USERNAME=app -e MQ_PASSWORD=passw0rd -e MQ_CHANNEL=DEV.APP.SVRCONN -e MQ_TLS_TRUSTSTORE=/tls/test.p12 -e MQ_TLS_PASSPHRASE=passw0rd -v /Users/arthurbarr/go/src/github.com/ibm-messaging/mq-container/test/tls:/tls msgtest
|
||||||
Env: []string{
|
Env: []string{
|
||||||
"MQ_PORT_1414_TCP_ADDR=" + getIPAddress(t, cli, ID),
|
"MQ_PORT_1414_TCP_ADDR=127.0.0.1",
|
||||||
|
"MQ_PORT_1414_OVERRIDE=" + port,
|
||||||
"MQ_USERNAME=" + user,
|
"MQ_USERNAME=" + user,
|
||||||
"MQ_CHANNEL=DEV.APP.SVRCONN",
|
"MQ_CHANNEL=DEV.APP.SVRCONN",
|
||||||
"IBMJRE=" + ibmjre,
|
"IBMJRE=" + ibmjre,
|
||||||
@@ -114,26 +121,28 @@ func runJMSTests(t *testing.T, cli *client.Client, ID string, tls bool, user, pa
|
|||||||
"MQ_TLS_CIPHER=" + cipherName,
|
"MQ_TLS_CIPHER=" + cipherName,
|
||||||
}...)
|
}...)
|
||||||
}
|
}
|
||||||
hostConfig := container.HostConfig{
|
hostConfig := ce.ContainerHostConfig{
|
||||||
Binds: []string{
|
Binds: []string{
|
||||||
coverageBind(t),
|
coverageBind(t),
|
||||||
tlsDir(t, false) + ":/var/tls",
|
tlsDir(t, false) + ":/var/tls",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
networkingConfig := network.NetworkingConfig{}
|
networkingConfig := ce.ContainerNetworkSettings{
|
||||||
ctr, err := cli.ContainerCreate(context.Background(), &containerConfig, &hostConfig, &networkingConfig, strings.Replace(t.Name()+"JMS", "/", "", -1))
|
Networks: []string{"host"},
|
||||||
|
}
|
||||||
|
jmsID, err := cli.ContainerCreate(&containerConfig, &hostConfig, &networkingConfig, strings.Replace(t.Name()+"JMS", "/", "", -1))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
startContainer(t, cli, ctr.ID)
|
startContainer(t, cli, jmsID)
|
||||||
rc := waitForContainer(t, cli, ctr.ID, 2*time.Minute)
|
rc := waitForContainer(t, cli, jmsID, 2*time.Minute)
|
||||||
if rc != 0 {
|
if rc != 0 {
|
||||||
t.Errorf("JUnit container failed with rc=%v", rc)
|
t.Errorf("JUnit container failed with rc=%v", rc)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get console output of the container and process the lines
|
// Get console output of the container and process the lines
|
||||||
// to see if we have any failures
|
// to see if we have any failures
|
||||||
scanner := bufio.NewScanner(strings.NewReader(inspectLogs(t, cli, ctr.ID)))
|
scanner := bufio.NewScanner(strings.NewReader(inspectLogs(t, cli, jmsID)))
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
s := scanner.Text()
|
s := scanner.Text()
|
||||||
if processJunitLogLine(s) {
|
if processJunitLogLine(s) {
|
||||||
@@ -141,7 +150,7 @@ func runJMSTests(t *testing.T, cli *client.Client, ID string, tls bool, user, pa
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
defer cleanContainer(t, cli, ctr.ID)
|
defer cleanContainer(t, cli, jmsID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse JUnit log line and return true if line contains failed or aborted tests
|
// Parse JUnit log line and return true if line contains failed or aborted tests
|
||||||
@@ -205,14 +214,18 @@ func createTLSConfig(t *testing.T, certFile, password string) *tls.Config {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func testRESTAdmin(t *testing.T, cli *client.Client, ID string, tlsConfig *tls.Config, errorExpected string) {
|
func testRESTAdmin(t *testing.T, cli ce.ContainerInterface, ID string, tlsConfig *tls.Config, errorExpected string) {
|
||||||
httpClient := http.Client{
|
httpClient := http.Client{
|
||||||
Timeout: time.Duration(30 * time.Second),
|
Timeout: time.Duration(30 * time.Second),
|
||||||
Transport: &http.Transport{
|
Transport: &http.Transport{
|
||||||
TLSClientConfig: tlsConfig,
|
TLSClientConfig: tlsConfig,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
url := fmt.Sprintf("https://localhost:%s/ibmmq/rest/v1/admin/installation", getPort(t, cli, ID, 9443))
|
port, err := cli.GetContainerPort(ID, 9443)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
url := fmt.Sprintf("https://localhost:%s/ibmmq/rest/v1/admin/installation", port)
|
||||||
req, err := http.NewRequest("GET", url, nil)
|
req, err := http.NewRequest("GET", url, nil)
|
||||||
req.SetBasicAuth("admin", defaultAdminPassword)
|
req.SetBasicAuth("admin", defaultAdminPassword)
|
||||||
resp, err := httpClient.Do(req)
|
resp, err := httpClient.Do(req)
|
||||||
@@ -248,7 +261,7 @@ func logHTTPResponse(t *testing.T, resp *http.Response) {
|
|||||||
t.Logf("HTTP response: %v", string(d))
|
t.Logf("HTTP response: %v", string(d))
|
||||||
}
|
}
|
||||||
|
|
||||||
func testRESTMessaging(t *testing.T, cli *client.Client, ID string, tlsConfig *tls.Config, qmName string, user string, password string, errorExpected string) {
|
func testRESTMessaging(t *testing.T, cli ce.ContainerInterface, ID string, tlsConfig *tls.Config, qmName string, user string, password string, errorExpected string) {
|
||||||
httpClient := http.Client{
|
httpClient := http.Client{
|
||||||
Timeout: time.Duration(30 * time.Second),
|
Timeout: time.Duration(30 * time.Second),
|
||||||
Transport: &http.Transport{
|
Transport: &http.Transport{
|
||||||
@@ -256,7 +269,11 @@ func testRESTMessaging(t *testing.T, cli *client.Client, ID string, tlsConfig *t
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
q := "DEV.QUEUE.1"
|
q := "DEV.QUEUE.1"
|
||||||
url := fmt.Sprintf("https://localhost:%s/ibmmq/rest/v1/messaging/qmgr/%s/queue/%s/message", getPort(t, cli, ID, 9443), qmName, q)
|
port, err := cli.GetContainerPort(ID, 9443)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
url := fmt.Sprintf("https://localhost:%s/ibmmq/rest/v1/messaging/qmgr/%s/queue/%s/message", port, qmName, q)
|
||||||
putMessage := []byte("Hello")
|
putMessage := []byte("Hello")
|
||||||
req, err := http.NewRequest("POST", url, bytes.NewBuffer(putMessage))
|
req, err := http.NewRequest("POST", url, bytes.NewBuffer(putMessage))
|
||||||
req.SetBasicAuth(user, password)
|
req.SetBasicAuth(user, password)
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -34,28 +34,9 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
ce "github.com/ibm-messaging/mq-container/test/container/containerengine"
|
||||||
"github.com/docker/docker/api/types/container"
|
|
||||||
"github.com/docker/docker/api/types/network"
|
|
||||||
"github.com/docker/docker/api/types/volume"
|
|
||||||
"github.com/docker/docker/client"
|
|
||||||
"github.com/docker/docker/pkg/jsonmessage"
|
|
||||||
"github.com/docker/docker/pkg/stdcopy"
|
|
||||||
"github.com/docker/go-connections/nat"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type containerDetails struct {
|
|
||||||
ID string
|
|
||||||
Name string
|
|
||||||
Image string
|
|
||||||
Path string
|
|
||||||
Args []string
|
|
||||||
CapAdd []string
|
|
||||||
CapDrop []string
|
|
||||||
User string
|
|
||||||
Env []string
|
|
||||||
}
|
|
||||||
|
|
||||||
func imageName() string {
|
func imageName() string {
|
||||||
image, ok := os.LookupEnv("TEST_IMAGE")
|
image, ok := os.LookupEnv("TEST_IMAGE")
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -73,7 +54,7 @@ func imageNameDevJMS() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// baseImage returns the ID of the underlying base image (e.g. "ubuntu" or "rhel")
|
// baseImage returns the ID of the underlying base image (e.g. "ubuntu" or "rhel")
|
||||||
func baseImage(t *testing.T, cli *client.Client) string {
|
func baseImage(t *testing.T, cli ce.ContainerInterface) string {
|
||||||
rc, out := runContainerOneShot(t, cli, "grep", "^ID=", "/etc/os-release")
|
rc, out := runContainerOneShot(t, cli, "grep", "^ID=", "/etc/os-release")
|
||||||
if rc != 0 {
|
if rc != 0 {
|
||||||
t.Fatal("Couldn't determine base image")
|
t.Fatal("Couldn't determine base image")
|
||||||
@@ -87,7 +68,7 @@ func baseImage(t *testing.T, cli *client.Client) string {
|
|||||||
|
|
||||||
// devImage returns true if the image under test is a developer image,
|
// devImage returns true if the image under test is a developer image,
|
||||||
// determined by use of the MQ_ADMIN_PASSWORD environment variable
|
// determined by use of the MQ_ADMIN_PASSWORD environment variable
|
||||||
func devImage(t *testing.T, cli *client.Client) bool {
|
func devImage(t *testing.T, cli ce.ContainerInterface) bool {
|
||||||
rc, _ := runContainerOneShot(t, cli, "printenv", "MQ_ADMIN_PASSWORD")
|
rc, _ := runContainerOneShot(t, cli, "printenv", "MQ_ADMIN_PASSWORD")
|
||||||
if rc == 0 {
|
if rc == 0 {
|
||||||
return true
|
return true
|
||||||
@@ -107,6 +88,11 @@ func isWSL(t *testing.T) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isARM returns whether we are running an arm64 MacOS machine
|
||||||
|
func isARM(t *testing.T) bool {
|
||||||
|
return runtime.GOARCH == "arm64"
|
||||||
|
}
|
||||||
|
|
||||||
// getCwd returns the working directory, in an os-specific or UNIX form
|
// getCwd returns the working directory, in an os-specific or UNIX form
|
||||||
func getCwd(t *testing.T, unixPath bool) string {
|
func getCwd(t *testing.T, unixPath bool) string {
|
||||||
dir, err := os.Getwd()
|
dir, err := os.Getwd()
|
||||||
@@ -161,29 +147,17 @@ func getTempDir(t *testing.T, unixStylePath bool) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// terminationMessage return the termination message, or an empty string if not set
|
// terminationMessage return the termination message, or an empty string if not set
|
||||||
func terminationMessage(t *testing.T, cli *client.Client, ID string) string {
|
func terminationMessage(t *testing.T, cli ce.ContainerInterface, ID string) string {
|
||||||
r, _, err := cli.CopyFromContainer(context.Background(), ID, "/run/termination-log")
|
r, err := cli.CopyFromContainer(ID, "/run/termination-log")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Log(err)
|
t.Log(err)
|
||||||
|
t.Log(string(r))
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
b, err := ioutil.ReadAll(r)
|
return string(r)
|
||||||
tr := tar.NewReader(bytes.NewReader(b))
|
|
||||||
_, err = tr.Next()
|
|
||||||
if err != nil {
|
|
||||||
t.Log(err)
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
// read the complete content of the file h.Name into the bs []byte
|
|
||||||
content, err := ioutil.ReadAll(tr)
|
|
||||||
if err != nil {
|
|
||||||
t.Log(err)
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return string(content)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func expectTerminationMessage(t *testing.T, cli *client.Client, ID string) {
|
func expectTerminationMessage(t *testing.T, cli ce.ContainerInterface, ID string) {
|
||||||
m := terminationMessage(t, cli, ID)
|
m := terminationMessage(t, cli, ID)
|
||||||
if m == "" {
|
if m == "" {
|
||||||
t.Error("Expected termination message to be set")
|
t.Error("Expected termination message to be set")
|
||||||
@@ -191,10 +165,10 @@ func expectTerminationMessage(t *testing.T, cli *client.Client, ID string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// logContainerDetails logs selected details about the container
|
// logContainerDetails logs selected details about the container
|
||||||
func logContainerDetails(t *testing.T, cli *client.Client, ID string) {
|
func logContainerDetails(t *testing.T, cli ce.ContainerInterface, ID string) {
|
||||||
i, err := cli.ContainerInspect(context.Background(), ID)
|
i, err := cli.ContainerInspect(ID)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
d := containerDetails{
|
d := ce.ContainerDetailsLogging{
|
||||||
ID: ID,
|
ID: ID,
|
||||||
Name: i.Name,
|
Name: i.Name,
|
||||||
Image: i.Image,
|
Image: i.Image,
|
||||||
@@ -210,29 +184,29 @@ func logContainerDetails(t *testing.T, cli *client.Client, ID string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func cleanContainerQuiet(t *testing.T, cli *client.Client, ID string) {
|
func cleanContainerQuiet(t *testing.T, cli ce.ContainerInterface, ID string) {
|
||||||
timeout := 10 * time.Second
|
timeout := 10 * time.Second
|
||||||
err := cli.ContainerStop(context.Background(), ID, &timeout)
|
err := cli.ContainerStop(ID, &timeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Just log the error and continue
|
// Just log the error and continue
|
||||||
t.Log(err)
|
t.Log(err)
|
||||||
}
|
}
|
||||||
opts := types.ContainerRemoveOptions{
|
opts := ce.ContainerRemoveOptions{
|
||||||
RemoveVolumes: true,
|
RemoveVolumes: true,
|
||||||
Force: true,
|
Force: true,
|
||||||
}
|
}
|
||||||
err = cli.ContainerRemove(context.Background(), ID, opts)
|
err = cli.ContainerRemove(ID, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func cleanContainer(t *testing.T, cli *client.Client, ID string) {
|
func cleanContainer(t *testing.T, cli ce.ContainerInterface, ID string) {
|
||||||
logContainerDetails(t, cli, ID)
|
logContainerDetails(t, cli, ID)
|
||||||
t.Logf("Stopping container: %v", ID)
|
t.Logf("Stopping container: %v", ID)
|
||||||
timeout := 10 * time.Second
|
timeout := 10 * time.Second
|
||||||
// Stop the container. This allows the coverage output to be generated.
|
// Stop the container. This allows the coverage output to be generated.
|
||||||
err := cli.ContainerStop(context.Background(), ID, &timeout)
|
err := cli.ContainerStop(ID, &timeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Just log the error and continue
|
// Just log the error and continue
|
||||||
t.Log(err)
|
t.Log(err)
|
||||||
@@ -250,11 +224,11 @@ func cleanContainer(t *testing.T, cli *client.Client, ID string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
t.Logf("Removing container: %s", ID)
|
t.Logf("Removing container: %s", ID)
|
||||||
opts := types.ContainerRemoveOptions{
|
opts := ce.ContainerRemoveOptions{
|
||||||
RemoveVolumes: true,
|
RemoveVolumes: true,
|
||||||
Force: true,
|
Force: true,
|
||||||
}
|
}
|
||||||
err = cli.ContainerRemove(context.Background(), ID, opts)
|
err = cli.ContainerRemove(ID, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
@@ -268,17 +242,17 @@ func generateRandomUID() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getDefaultHostConfig creates a HostConfig and populates it with the defaults used in testing
|
// getDefaultHostConfig creates a HostConfig and populates it with the defaults used in testing
|
||||||
func getDefaultHostConfig(t *testing.T, cli *client.Client) *container.HostConfig {
|
func getDefaultHostConfig(t *testing.T, cli ce.ContainerInterface) *ce.ContainerHostConfig {
|
||||||
hostConfig := container.HostConfig{
|
hostConfig := ce.ContainerHostConfig{
|
||||||
Binds: []string{
|
PortBindings: []ce.PortBinding{},
|
||||||
coverageBind(t),
|
|
||||||
},
|
|
||||||
PortBindings: nat.PortMap{},
|
|
||||||
CapDrop: []string{
|
CapDrop: []string{
|
||||||
"ALL",
|
"ALL",
|
||||||
},
|
},
|
||||||
Privileged: false,
|
Privileged: false,
|
||||||
}
|
}
|
||||||
|
if coverage() {
|
||||||
|
hostConfig.Binds = append(hostConfig.Binds, coverageBind(t))
|
||||||
|
}
|
||||||
if devImage(t, cli) {
|
if devImage(t, cli) {
|
||||||
// Only needed for a RHEL-based image
|
// Only needed for a RHEL-based image
|
||||||
if baseImage(t, cli) != "ubuntu" {
|
if baseImage(t, cli) != "ubuntu" {
|
||||||
@@ -292,7 +266,7 @@ func getDefaultHostConfig(t *testing.T, cli *client.Client) *container.HostConfi
|
|||||||
|
|
||||||
// runContainerWithHostConfig creates and starts a container, using the supplied HostConfig.
|
// runContainerWithHostConfig creates and starts a container, using the supplied HostConfig.
|
||||||
// Note that a default HostConfig can be created using getDefaultHostConfig.
|
// Note that a default HostConfig can be created using getDefaultHostConfig.
|
||||||
func runContainerWithHostConfig(t *testing.T, cli *client.Client, containerConfig *container.Config, hostConfig *container.HostConfig) string {
|
func runContainerWithHostConfig(t *testing.T, cli ce.ContainerInterface, containerConfig *ce.ContainerConfig, hostConfig *ce.ContainerHostConfig) string {
|
||||||
if containerConfig.Image == "" {
|
if containerConfig.Image == "" {
|
||||||
containerConfig.Image = imageName()
|
containerConfig.Image = imageName()
|
||||||
}
|
}
|
||||||
@@ -300,22 +274,23 @@ func runContainerWithHostConfig(t *testing.T, cli *client.Client, containerConfi
|
|||||||
if containerConfig.User == "" {
|
if containerConfig.User == "" {
|
||||||
containerConfig.User = generateRandomUID()
|
containerConfig.User = generateRandomUID()
|
||||||
}
|
}
|
||||||
// if coverage
|
if coverage() {
|
||||||
containerConfig.Env = append(containerConfig.Env, "COVERAGE_FILE="+t.Name()+".cov")
|
containerConfig.Env = append(containerConfig.Env, "COVERAGE_FILE="+t.Name()+".cov")
|
||||||
containerConfig.Env = append(containerConfig.Env, "EXIT_CODE_FILE="+getExitCodeFilename(t))
|
containerConfig.Env = append(containerConfig.Env, "EXIT_CODE_FILE="+getExitCodeFilename(t))
|
||||||
networkingConfig := network.NetworkingConfig{}
|
}
|
||||||
|
networkingConfig := ce.ContainerNetworkSettings{}
|
||||||
t.Logf("Running container (%s)", containerConfig.Image)
|
t.Logf("Running container (%s)", containerConfig.Image)
|
||||||
ctr, err := cli.ContainerCreate(context.Background(), containerConfig, hostConfig, &networkingConfig, t.Name())
|
ID, err := cli.ContainerCreate(containerConfig, hostConfig, &networkingConfig, t.Name())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
startContainer(t, cli, ctr.ID)
|
startContainer(t, cli, ID)
|
||||||
return ctr.ID
|
return ID
|
||||||
}
|
}
|
||||||
|
|
||||||
// runContainerWithAllConfig creates and starts a container, using the supplied ContainerConfig, HostConfig,
|
// runContainerWithAllConfig creates and starts a container, using the supplied ContainerConfig, HostConfig,
|
||||||
// NetworkingConfig, and container name (or the value of t.Name if containerName="").
|
// NetworkingConfig, and container name (or the value of t.Name if containerName="").
|
||||||
func runContainerWithAllConfig(t *testing.T, cli *client.Client, containerConfig *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) string {
|
func runContainerWithAllConfig(t *testing.T, cli ce.ContainerInterface, containerConfig *ce.ContainerConfig, hostConfig *ce.ContainerHostConfig, networkingConfig *ce.ContainerNetworkSettings, containerName string) string {
|
||||||
if containerName == "" {
|
if containerName == "" {
|
||||||
containerName = t.Name()
|
containerName = t.Name()
|
||||||
}
|
}
|
||||||
@@ -326,30 +301,32 @@ func runContainerWithAllConfig(t *testing.T, cli *client.Client, containerConfig
|
|||||||
if containerConfig.User == "" {
|
if containerConfig.User == "" {
|
||||||
containerConfig.User = generateRandomUID()
|
containerConfig.User = generateRandomUID()
|
||||||
}
|
}
|
||||||
// if coverage
|
if coverage() {
|
||||||
containerConfig.Env = append(containerConfig.Env, "COVERAGE_FILE="+t.Name()+".cov")
|
containerConfig.Env = append(containerConfig.Env, "COVERAGE_FILE="+t.Name()+".cov")
|
||||||
containerConfig.Env = append(containerConfig.Env, "EXIT_CODE_FILE="+getExitCodeFilename(t))
|
containerConfig.Env = append(containerConfig.Env, "EXIT_CODE_FILE="+getExitCodeFilename(t))
|
||||||
|
}
|
||||||
t.Logf("Running container (%s)", containerConfig.Image)
|
t.Logf("Running container (%s)", containerConfig.Image)
|
||||||
ctr, err := cli.ContainerCreate(context.Background(), containerConfig, hostConfig, networkingConfig, containerName)
|
ID, err := cli.ContainerCreate(containerConfig, hostConfig, networkingConfig, containerName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
startContainer(t, cli, ctr.ID)
|
startContainer(t, cli, ID)
|
||||||
return ctr.ID
|
return ID
|
||||||
}
|
}
|
||||||
|
|
||||||
// runContainerWithPorts creates and starts a container, exposing the specified ports on the host.
|
// runContainerWithPorts creates and starts a container, exposing the specified ports on the host.
|
||||||
// If no image is specified in the container config, then the image name is retrieved from the TEST_IMAGE
|
// If no image is specified in the container config, then the image name is retrieved from the TEST_IMAGE
|
||||||
// environment variable.
|
// environment variable.
|
||||||
func runContainerWithPorts(t *testing.T, cli *client.Client, containerConfig *container.Config, ports []int) string {
|
func runContainerWithPorts(t *testing.T, cli ce.ContainerInterface, containerConfig *ce.ContainerConfig, ports []int) string {
|
||||||
hostConfig := getDefaultHostConfig(t, cli)
|
hostConfig := getDefaultHostConfig(t, cli)
|
||||||
|
var binding ce.PortBinding
|
||||||
for _, p := range ports {
|
for _, p := range ports {
|
||||||
port := nat.Port(fmt.Sprintf("%v/tcp", p))
|
port := fmt.Sprintf("%v/tcp", p)
|
||||||
hostConfig.PortBindings[port] = []nat.PortBinding{
|
binding = ce.PortBinding{
|
||||||
{
|
ContainerPort: port,
|
||||||
HostIP: "0.0.0.0",
|
HostIP: "0.0.0.0",
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
hostConfig.PortBindings = append(hostConfig.PortBindings, binding)
|
||||||
}
|
}
|
||||||
return runContainerWithHostConfig(t, cli, containerConfig, hostConfig)
|
return runContainerWithHostConfig(t, cli, containerConfig, hostConfig)
|
||||||
}
|
}
|
||||||
@@ -357,161 +334,160 @@ func runContainerWithPorts(t *testing.T, cli *client.Client, containerConfig *co
|
|||||||
// runContainer creates and starts a container. If no image is specified in
|
// runContainer creates and starts a container. If no image is specified in
|
||||||
// the container config, then the image name is retrieved from the TEST_IMAGE
|
// the container config, then the image name is retrieved from the TEST_IMAGE
|
||||||
// environment variable.
|
// environment variable.
|
||||||
func runContainer(t *testing.T, cli *client.Client, containerConfig *container.Config) string {
|
func runContainer(t *testing.T, cli ce.ContainerInterface, containerConfig *ce.ContainerConfig) string {
|
||||||
return runContainerWithPorts(t, cli, containerConfig, nil)
|
return runContainerWithPorts(t, cli, containerConfig, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// runContainerOneShot runs a container with a custom entrypoint, as the root
|
// runContainerOneShot runs a container with a custom entrypoint, as the root
|
||||||
// user and with default capabilities
|
// user and with default capabilities
|
||||||
func runContainerOneShot(t *testing.T, cli *client.Client, command ...string) (int64, string) {
|
func runContainerOneShot(t *testing.T, cli ce.ContainerInterface, command ...string) (int64, string) {
|
||||||
containerConfig := container.Config{
|
containerConfig := ce.ContainerConfig{
|
||||||
Entrypoint: command,
|
Entrypoint: command,
|
||||||
User: "root",
|
User: "root",
|
||||||
Image: imageName(),
|
Image: imageName(),
|
||||||
}
|
}
|
||||||
hostConfig := container.HostConfig{}
|
hostConfig := ce.ContainerHostConfig{}
|
||||||
networkingConfig := network.NetworkingConfig{}
|
networkingConfig := ce.ContainerNetworkSettings{}
|
||||||
t.Logf("Running one shot container (%s): %v", containerConfig.Image, command)
|
t.Logf("Running one shot container (%s): %v", containerConfig.Image, command)
|
||||||
ctr, err := cli.ContainerCreate(context.Background(), &containerConfig, &hostConfig, &networkingConfig, t.Name()+"OneShot")
|
ID, err := cli.ContainerCreate(&containerConfig, &hostConfig, &networkingConfig, t.Name()+"OneShot")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
startOptions := types.ContainerStartOptions{}
|
startOptions := ce.ContainerStartOptions{}
|
||||||
err = cli.ContainerStart(context.Background(), ctr.ID, startOptions)
|
err = cli.ContainerStart(ID, startOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer cleanContainerQuiet(t, cli, ctr.ID)
|
defer cleanContainerQuiet(t, cli, ID)
|
||||||
rc := waitForContainer(t, cli, ctr.ID, 20*time.Second)
|
rc := waitForContainer(t, cli, ID, 20*time.Second)
|
||||||
out := inspectLogs(t, cli, ctr.ID)
|
out := inspectLogs(t, cli, ID)
|
||||||
t.Logf("One shot container finished with rc=%v, output=%v", rc, out)
|
t.Logf("One shot container finished with rc=%v, output=%v", rc, out)
|
||||||
return rc, out
|
return rc, out
|
||||||
}
|
}
|
||||||
|
|
||||||
// runContainerOneShot runs a container with a custom entrypoint, as the root
|
// runContainerOneShot runs a container with a custom entrypoint, as the root
|
||||||
// user, with default capabilities, and a volume mounted
|
// user, with default capabilities, and a volume mounted
|
||||||
func runContainerOneShotWithVolume(t *testing.T, cli *client.Client, bind string, command ...string) (int64, string) {
|
func runContainerOneShotWithVolume(t *testing.T, cli ce.ContainerInterface, bind string, command ...string) (int64, string) {
|
||||||
containerConfig := container.Config{
|
containerConfig := ce.ContainerConfig{
|
||||||
Entrypoint: command,
|
Entrypoint: command,
|
||||||
User: "root",
|
User: "root",
|
||||||
Image: imageName(),
|
Image: imageName(),
|
||||||
}
|
}
|
||||||
hostConfig := container.HostConfig{
|
hostConfig := ce.ContainerHostConfig{
|
||||||
Binds: []string{
|
Binds: []string{
|
||||||
bind,
|
bind,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
networkingConfig := network.NetworkingConfig{}
|
networkingConfig := ce.ContainerNetworkSettings{}
|
||||||
t.Logf("Running one shot container with volume (%s): %v", containerConfig.Image, command)
|
t.Logf("Running one shot container with volume (%s): %v", containerConfig.Image, command)
|
||||||
ctr, err := cli.ContainerCreate(context.Background(), &containerConfig, &hostConfig, &networkingConfig, t.Name()+"OneShotVolume")
|
ID, err := cli.ContainerCreate(&containerConfig, &hostConfig, &networkingConfig, t.Name()+"OneShotVolume")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
startOptions := types.ContainerStartOptions{}
|
startOptions := ce.ContainerStartOptions{}
|
||||||
err = cli.ContainerStart(context.Background(), ctr.ID, startOptions)
|
err = cli.ContainerStart(ID, startOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer cleanContainerQuiet(t, cli, ctr.ID)
|
defer cleanContainerQuiet(t, cli, ID)
|
||||||
rc := waitForContainer(t, cli, ctr.ID, 20*time.Second)
|
rc := waitForContainer(t, cli, ID, 20*time.Second)
|
||||||
out := inspectLogs(t, cli, ctr.ID)
|
out := inspectLogs(t, cli, ID)
|
||||||
t.Logf("One shot container finished with rc=%v, output=%v", rc, out)
|
t.Logf("One shot container finished with rc=%v, output=%v", rc, out)
|
||||||
return rc, out
|
return rc, out
|
||||||
}
|
}
|
||||||
|
|
||||||
func startMultiVolumeQueueManager(t *testing.T, cli *client.Client, dataVol bool, qmsharedlogs string, qmshareddata string, env []string) (error, string, string) {
|
func startMultiVolumeQueueManager(t *testing.T, cli ce.ContainerInterface, dataVol bool, qmsharedlogs string, qmshareddata string, env []string) (error, string, string) {
|
||||||
id := strconv.FormatInt(time.Now().UnixNano(), 10)
|
id := strconv.FormatInt(time.Now().UnixNano(), 10)
|
||||||
qmdata := createVolume(t, cli, id)
|
volume := createVolume(t, cli, id)
|
||||||
containerConfig := container.Config{
|
containerConfig := ce.ContainerConfig{
|
||||||
Image: imageName(),
|
Image: imageName(),
|
||||||
Env: env,
|
Env: env,
|
||||||
}
|
}
|
||||||
var hostConfig container.HostConfig
|
var hostConfig ce.ContainerHostConfig
|
||||||
|
|
||||||
if !dataVol {
|
if !dataVol {
|
||||||
hostConfig = container.HostConfig{}
|
hostConfig = ce.ContainerHostConfig{}
|
||||||
} else if qmsharedlogs == "" && qmshareddata == "" {
|
} else if qmsharedlogs == "" && qmshareddata == "" {
|
||||||
hostConfig = getHostConfig(t, 1, "", "", qmdata.Name)
|
hostConfig = getHostConfig(t, 1, "", "", volume)
|
||||||
} else if qmsharedlogs == "" {
|
} else if qmsharedlogs == "" {
|
||||||
hostConfig = getHostConfig(t, 2, "", qmshareddata, qmdata.Name)
|
hostConfig = getHostConfig(t, 2, "", qmshareddata, volume)
|
||||||
} else if qmshareddata == "" {
|
} else if qmshareddata == "" {
|
||||||
hostConfig = getHostConfig(t, 3, qmsharedlogs, "", qmdata.Name)
|
hostConfig = getHostConfig(t, 3, qmsharedlogs, "", volume)
|
||||||
} else {
|
} else {
|
||||||
hostConfig = getHostConfig(t, 4, qmsharedlogs, qmshareddata, qmdata.Name)
|
hostConfig = getHostConfig(t, 4, qmsharedlogs, qmshareddata, volume)
|
||||||
}
|
}
|
||||||
networkingConfig := network.NetworkingConfig{}
|
networkingConfig := ce.ContainerNetworkSettings{}
|
||||||
qm, err := cli.ContainerCreate(context.Background(), &containerConfig, &hostConfig, &networkingConfig, t.Name()+id)
|
qmID, err := cli.ContainerCreate(&containerConfig, &hostConfig, &networkingConfig, t.Name()+id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err, "", ""
|
return err, "", ""
|
||||||
}
|
}
|
||||||
startContainer(t, cli, qm.ID)
|
startContainer(t, cli, qmID)
|
||||||
|
|
||||||
return nil, qm.ID, qmdata.Name
|
return nil, qmID, volume
|
||||||
}
|
}
|
||||||
|
|
||||||
func getHostConfig(t *testing.T, mounts int, qmsharedlogs string, qmshareddata string, qmdata string) container.HostConfig {
|
func getHostConfig(t *testing.T, mounts int, qmsharedlogs string, qmshareddata string, qmdata string) ce.ContainerHostConfig {
|
||||||
|
|
||||||
var hostConfig container.HostConfig
|
var hostConfig ce.ContainerHostConfig
|
||||||
|
|
||||||
switch mounts {
|
switch mounts {
|
||||||
case 1:
|
case 1:
|
||||||
hostConfig = container.HostConfig{
|
hostConfig = ce.ContainerHostConfig{
|
||||||
Binds: []string{
|
Binds: []string{
|
||||||
coverageBind(t),
|
|
||||||
qmdata + ":/mnt/mqm",
|
qmdata + ":/mnt/mqm",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
case 2:
|
case 2:
|
||||||
hostConfig = container.HostConfig{
|
hostConfig = ce.ContainerHostConfig{
|
||||||
Binds: []string{
|
Binds: []string{
|
||||||
coverageBind(t),
|
|
||||||
qmdata + ":/mnt/mqm",
|
qmdata + ":/mnt/mqm",
|
||||||
qmshareddata + ":/mnt/mqm-data",
|
qmshareddata + ":/mnt/mqm-data",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
case 3:
|
case 3:
|
||||||
hostConfig = container.HostConfig{
|
hostConfig = ce.ContainerHostConfig{
|
||||||
Binds: []string{
|
Binds: []string{
|
||||||
coverageBind(t),
|
|
||||||
qmdata + ":/mnt/mqm",
|
qmdata + ":/mnt/mqm",
|
||||||
qmsharedlogs + ":/mnt/mqm-log",
|
qmsharedlogs + ":/mnt/mqm-log",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
case 4:
|
case 4:
|
||||||
hostConfig = container.HostConfig{
|
hostConfig = ce.ContainerHostConfig{
|
||||||
Binds: []string{
|
Binds: []string{
|
||||||
coverageBind(t),
|
|
||||||
qmdata + ":/mnt/mqm",
|
qmdata + ":/mnt/mqm",
|
||||||
qmsharedlogs + ":/mnt/mqm-log",
|
qmsharedlogs + ":/mnt/mqm-log",
|
||||||
qmshareddata + ":/mnt/mqm-data",
|
qmshareddata + ":/mnt/mqm-data",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if coverage() {
|
||||||
|
hostConfig.Binds = append(hostConfig.Binds, coverageBind(t))
|
||||||
|
}
|
||||||
return hostConfig
|
return hostConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
func startContainer(t *testing.T, cli *client.Client, ID string) {
|
func startContainer(t *testing.T, cli ce.ContainerInterface, ID string) {
|
||||||
t.Logf("Starting container: %v", ID)
|
t.Logf("Starting container: %v", ID)
|
||||||
startOptions := types.ContainerStartOptions{}
|
startOptions := ce.ContainerStartOptions{}
|
||||||
err := cli.ContainerStart(context.Background(), ID, startOptions)
|
err := cli.ContainerStart(ID, startOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func stopContainer(t *testing.T, cli *client.Client, ID string) {
|
func stopContainer(t *testing.T, cli ce.ContainerInterface, ID string) {
|
||||||
t.Logf("Stopping container: %v", ID)
|
t.Logf("Stopping container: %v", ID)
|
||||||
timeout := 10 * time.Second
|
timeout := 10 * time.Second
|
||||||
err := cli.ContainerStop(context.Background(), ID, &timeout) //Duration(20)*time.Second)
|
err := cli.ContainerStop(ID, &timeout) //Duration(20)*time.Second)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
// Just log the error and continue
|
||||||
|
t.Log(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func killContainer(t *testing.T, cli *client.Client, ID string, signal string) {
|
func killContainer(t *testing.T, cli ce.ContainerInterface, ID string, signal string) {
|
||||||
t.Logf("Killing container: %v", ID)
|
t.Logf("Killing container: %v", ID)
|
||||||
err := cli.ContainerKill(context.Background(), ID, signal)
|
err := cli.ContainerKill(ID, signal)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -545,17 +521,17 @@ func getCoverageExitCode(t *testing.T, orig int64) int64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// waitForContainer waits until a container has exited
|
// waitForContainer waits until a container has exited
|
||||||
func waitForContainer(t *testing.T, cli *client.Client, ID string, timeout time.Duration) int64 {
|
func waitForContainer(t *testing.T, cli ce.ContainerInterface, ID string, timeout time.Duration) int64 {
|
||||||
c, cancel := context.WithTimeout(context.Background(), timeout)
|
c, cancel := context.WithTimeout(context.Background(), timeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
t.Logf("Waiting for container for %s", timeout)
|
t.Logf("Waiting for container for %s", timeout)
|
||||||
okC, errC := cli.ContainerWait(c, ID, container.WaitConditionNotRunning)
|
okC, errC := cli.ContainerWait(c, ID, ce.ContainerStateNotRunning)
|
||||||
var rc int64
|
var rc int64
|
||||||
select {
|
select {
|
||||||
case err := <-errC:
|
case err := <-errC:
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
case ok := <-okC:
|
case ok := <-okC:
|
||||||
rc = ok.StatusCode
|
rc = ok
|
||||||
}
|
}
|
||||||
if coverage() {
|
if coverage() {
|
||||||
// COVERAGE: When running coverage, the exit code is written to a file,
|
// COVERAGE: When running coverage, the exit code is written to a file,
|
||||||
@@ -567,78 +543,15 @@ func waitForContainer(t *testing.T, cli *client.Client, ID string, timeout time.
|
|||||||
}
|
}
|
||||||
|
|
||||||
// execContainer runs a command in a running container, and returns the exit code and output
|
// execContainer runs a command in a running container, and returns the exit code and output
|
||||||
func execContainer(t *testing.T, cli *client.Client, ID string, user string, cmd []string) (int, string) {
|
func execContainer(t *testing.T, cli ce.ContainerInterface, ID string, user string, cmd []string) (int, string) {
|
||||||
t.Logf("Running command: %v", cmd)
|
t.Logf("Running command: %v", cmd)
|
||||||
config := types.ExecConfig{
|
exitcode, outputStr := cli.ExecContainer(ID, user, cmd)
|
||||||
User: user,
|
|
||||||
Privileged: false,
|
|
||||||
Tty: false,
|
|
||||||
AttachStdin: false,
|
|
||||||
// Note that you still need to attach stdout/stderr, even though they're not wanted
|
|
||||||
AttachStdout: true,
|
|
||||||
AttachStderr: true,
|
|
||||||
Detach: false,
|
|
||||||
Cmd: cmd,
|
|
||||||
}
|
|
||||||
resp, err := cli.ContainerExecCreate(context.Background(), ID, config)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
hijack, err := cli.ContainerExecAttach(context.Background(), resp.ID, types.ExecStartCheck{})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer hijack.Close()
|
|
||||||
time.Sleep(time.Millisecond * 10)
|
|
||||||
err = cli.ContainerExecStart(context.Background(), resp.ID, types.ExecStartCheck{
|
|
||||||
Detach: false,
|
|
||||||
Tty: false,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
// Wait for the command to finish
|
|
||||||
var exitcode int
|
|
||||||
var outputStr string
|
|
||||||
for {
|
|
||||||
inspect, err := cli.ContainerExecInspect(context.Background(), resp.ID)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if inspect.Running {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
exitcode = inspect.ExitCode
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
// Each output line has a header, which needs to be removed
|
|
||||||
_, err = stdcopy.StdCopy(buf, buf, hijack.Reader)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
outputStr = strings.TrimSpace(buf.String())
|
|
||||||
|
|
||||||
/* Commented out on 14/06/2018 as it might not be needed after adding
|
|
||||||
* pause between ContainerExecAttach and ContainerExecStart.
|
|
||||||
* TODO If intermittent failures do not occur, remove and refactor.
|
|
||||||
*
|
|
||||||
* // Before we go let's just double check it did actually finish running
|
|
||||||
* // because sometimes we get a "Exec command already running error"
|
|
||||||
* alreadyRunningErr := regexp.MustCompile("Error: Exec command .* is already running")
|
|
||||||
* if alreadyRunningErr.MatchString(outputStr) {
|
|
||||||
* continue
|
|
||||||
* }
|
|
||||||
*/
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
return exitcode, outputStr
|
return exitcode, outputStr
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitForReady(t *testing.T, cli *client.Client, ID string) {
|
func waitForReady(t *testing.T, cli ce.ContainerInterface, ID string) {
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
ctx, cancel := context.WithTimeout(context.Background(), 4*time.Minute)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
@@ -662,57 +575,47 @@ func waitForReady(t *testing.T, cli *client.Client, ID string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getIPAddress(t *testing.T, cli *client.Client, ID string) string {
|
func createNetwork(t *testing.T, cli ce.ContainerInterface) string {
|
||||||
ctr, err := cli.ContainerInspect(context.Background(), ID)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
return ctr.NetworkSettings.IPAddress
|
|
||||||
}
|
|
||||||
|
|
||||||
func createNetwork(t *testing.T, cli *client.Client) string {
|
|
||||||
name := "test"
|
name := "test"
|
||||||
t.Logf("Creating network: %v", name)
|
t.Logf("Creating network: %v", name)
|
||||||
opts := types.NetworkCreate{}
|
opts := ce.NetworkCreateOptions{}
|
||||||
net, err := cli.NetworkCreate(context.Background(), name, opts)
|
netID, err := cli.NetworkCreate(name, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
t.Logf("Created network %v with ID %v", name, net.ID)
|
t.Logf("Created network %v with ID %v", name, netID)
|
||||||
return net.ID
|
return netID
|
||||||
}
|
}
|
||||||
|
|
||||||
func removeNetwork(t *testing.T, cli *client.Client, ID string) {
|
func removeNetwork(t *testing.T, cli ce.ContainerInterface, ID string) {
|
||||||
t.Logf("Removing network ID: %v", ID)
|
t.Logf("Removing network ID: %v", ID)
|
||||||
err := cli.NetworkRemove(context.Background(), ID)
|
err := cli.NetworkRemove(ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func createVolume(t *testing.T, cli *client.Client, name string) types.Volume {
|
func createVolume(t *testing.T, cli ce.ContainerInterface, name string) string {
|
||||||
v, err := cli.VolumeCreate(context.Background(), volume.VolumeCreateBody{
|
v, err := cli.VolumeCreate(ce.VolumeCreateOptions{
|
||||||
Driver: "local",
|
Driver: "local",
|
||||||
DriverOpts: map[string]string{},
|
|
||||||
Labels: map[string]string{},
|
|
||||||
Name: name,
|
Name: name,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
t.Logf("Created volume %v", v.Name)
|
t.Logf("Created volume %v", v)
|
||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
|
|
||||||
func removeVolume(t *testing.T, cli *client.Client, name string) {
|
func removeVolume(t *testing.T, cli ce.ContainerInterface, name string) {
|
||||||
t.Logf("Removing volume %v", name)
|
t.Logf("Removing volume %v", name)
|
||||||
err := cli.VolumeRemove(context.Background(), name, true)
|
err := cli.VolumeRemove(name, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func inspectTextLogs(t *testing.T, cli *client.Client, ID string) string {
|
func inspectTextLogs(t *testing.T, cli ce.ContainerInterface, ID string) string {
|
||||||
jsonLogs := inspectLogs(t, cli, ID)
|
jsonLogs := inspectLogs(t, cli, ID)
|
||||||
scanner := bufio.NewScanner(strings.NewReader(jsonLogs))
|
scanner := bufio.NewScanner(strings.NewReader(jsonLogs))
|
||||||
b := make([]byte, 64*1024)
|
b := make([]byte, 64*1024)
|
||||||
@@ -720,9 +623,11 @@ func inspectTextLogs(t *testing.T, cli *client.Client, ID string) string {
|
|||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
text := scanner.Text()
|
text := scanner.Text()
|
||||||
if strings.HasPrefix(text, "{") {
|
if strings.HasPrefix(text, "{") {
|
||||||
|
// If it's a JSON log message, it makes it hard to debug the test, as the JSON
|
||||||
|
// is embedded in the long test output. So just summarize the JSON instead.
|
||||||
var e map[string]interface{}
|
var e map[string]interface{}
|
||||||
json.Unmarshal([]byte(text), &e)
|
json.Unmarshal([]byte(text), &e)
|
||||||
fmt.Fprintf(buf, "{\"message\": \"%v\"}\n", e["message"])
|
fmt.Fprintf(buf, "{\"ibm_datetime\": \"%v\", \"message\": \"%v\", ...}\n", e["ibm_datetime"], e["message"])
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprintln(buf, text)
|
fmt.Fprintln(buf, text)
|
||||||
}
|
}
|
||||||
@@ -734,24 +639,14 @@ func inspectTextLogs(t *testing.T, cli *client.Client, ID string) string {
|
|||||||
return buf.String()
|
return buf.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
func inspectLogs(t *testing.T, cli *client.Client, ID string) string {
|
func inspectLogs(t *testing.T, cli ce.ContainerInterface, ID string) string {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
reader, err := cli.ContainerLogs(ctx, ID, types.ContainerLogsOptions{
|
logs, err := cli.GetContainerLogs(ctx, ID, ce.ContainerLogsOptions{})
|
||||||
ShowStdout: true,
|
|
||||||
ShowStderr: true,
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
buf := new(bytes.Buffer)
|
return logs
|
||||||
|
|
||||||
// Each output line has a header, which needs to be removed
|
|
||||||
_, err = stdcopy.StdCopy(buf, buf, reader)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
return buf.String()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// generateTAR creates a TAR-formatted []byte, with the specified files included.
|
// generateTAR creates a TAR-formatted []byte, with the specified files included.
|
||||||
@@ -781,76 +676,54 @@ func generateTAR(t *testing.T, files []struct{ Name, Body string }) []byte {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// createImage creates a new Docker image with the specified files included.
|
// createImage creates a new Docker image with the specified files included.
|
||||||
func createImage(t *testing.T, cli *client.Client, files []struct{ Name, Body string }) string {
|
func createImage(t *testing.T, cli ce.ContainerInterface, files []struct{ Name, Body string }) string {
|
||||||
r := bytes.NewReader(generateTAR(t, files))
|
r := bytes.NewReader(generateTAR(t, files))
|
||||||
tag := strings.ToLower(t.Name())
|
tag := strings.ToLower(t.Name())
|
||||||
buildOptions := types.ImageBuildOptions{
|
|
||||||
Context: r,
|
tmpDir, err := os.MkdirTemp("", "tmp")
|
||||||
Tags: []string{tag},
|
|
||||||
}
|
|
||||||
resp, err := cli.ImageBuild(context.Background(), r, buildOptions)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
// resp (ImageBuildResponse) contains a series of JSON messages
|
|
||||||
dec := json.NewDecoder(resp.Body)
|
defer os.RemoveAll(tmpDir)
|
||||||
for {
|
|
||||||
m := jsonmessage.JSONMessage{}
|
//Write files to temp directory
|
||||||
err := dec.Decode(&m)
|
for _, file := range files {
|
||||||
if m.Error != nil {
|
//Add tag to file name to allow parallel testing
|
||||||
t.Fatal(m.ErrorMessage)
|
f, err := os.Create(filepath.Join(tmpDir, file.Name))
|
||||||
}
|
|
||||||
t.Log(strings.TrimSpace(m.Stream))
|
|
||||||
if err == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
body := []byte(file.Body)
|
||||||
|
_, err = f.Write(body)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_, err = cli.ImageBuild(r, tag, filepath.Join(tmpDir, files[0].Name))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
return tag
|
return tag
|
||||||
}
|
}
|
||||||
|
|
||||||
// deleteImage deletes a Docker image
|
// deleteImage deletes a Docker image
|
||||||
func deleteImage(t *testing.T, cli *client.Client, id string) {
|
func deleteImage(t *testing.T, cli ce.ContainerInterface, id string) {
|
||||||
cli.ImageRemove(context.Background(), id, types.ImageRemoveOptions{
|
cli.ImageRemove(id, ce.ImageRemoveOptions{
|
||||||
Force: true,
|
Force: true,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func copyFromContainer(t *testing.T, cli *client.Client, id string, file string) []byte {
|
func copyFromContainer(t *testing.T, cli ce.ContainerInterface, id string, file string) []byte {
|
||||||
reader, _, err := cli.CopyFromContainer(context.Background(), id, file)
|
b, err := cli.CopyFromContainer(id, file)
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer reader.Close()
|
|
||||||
b, err := ioutil.ReadAll(reader)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
func getPort(t *testing.T, cli *client.Client, ID string, port int) string {
|
|
||||||
var inspectInfo types.ContainerJSON
|
|
||||||
var err error
|
|
||||||
for attemptsRemaining := 3; attemptsRemaining > 0; attemptsRemaining-- {
|
|
||||||
inspectInfo, err = cli.ContainerInspect(context.Background(), ID)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
portNat := nat.Port(fmt.Sprintf("%d/tcp", port))
|
|
||||||
if inspectInfo.NetworkSettings.Ports[portNat] == nil || len(inspectInfo.NetworkSettings.Ports[portNat]) == 0 {
|
|
||||||
t.Log("Container port not yet bound")
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return inspectInfo.NetworkSettings.Ports[portNat][0].HostPort
|
|
||||||
}
|
|
||||||
t.Fatal("Failed to get port")
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func countLines(t *testing.T, r io.Reader) int {
|
func countLines(t *testing.T, r io.Reader) int {
|
||||||
scanner := bufio.NewScanner(r)
|
scanner := bufio.NewScanner(r)
|
||||||
count := 0
|
count := 0
|
||||||
@@ -882,15 +755,6 @@ func countTarLines(t *testing.T, b []byte) int {
|
|||||||
return total
|
return total
|
||||||
}
|
}
|
||||||
|
|
||||||
func getMQVersion(t *testing.T, cli *client.Client) (string, error) {
|
|
||||||
inspect, _, err := cli.ImageInspectWithRaw(context.Background(), imageName())
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
version := inspect.ContainerConfig.Labels["version"]
|
|
||||||
return version, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// scanForExcludedEntries scans for default excluded messages
|
// scanForExcludedEntries scans for default excluded messages
|
||||||
func scanForExcludedEntries(msg string) bool {
|
func scanForExcludedEntries(msg string) bool {
|
||||||
if strings.Contains(msg, "AMQ5041I") || strings.Contains(msg, "AMQ5052I") ||
|
if strings.Contains(msg, "AMQ5041I") || strings.Contains(msg, "AMQ5052I") ||
|
||||||
@@ -917,7 +781,7 @@ func checkLogForValidJSON(jsonLogs string) bool {
|
|||||||
|
|
||||||
// runContainerWithAllConfig creates and starts a container, using the supplied ContainerConfig, HostConfig,
|
// runContainerWithAllConfig creates and starts a container, using the supplied ContainerConfig, HostConfig,
|
||||||
// NetworkingConfig, and container name (or the value of t.Name if containerName="").
|
// NetworkingConfig, and container name (or the value of t.Name if containerName="").
|
||||||
func runContainerWithAllConfigError(t *testing.T, cli *client.Client, containerConfig *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (string, error) {
|
func runContainerWithAllConfigError(t *testing.T, cli ce.ContainerInterface, containerConfig *ce.ContainerConfig, hostConfig *ce.ContainerHostConfig, networkingConfig *ce.ContainerNetworkSettings, containerName string) (string, error) {
|
||||||
if containerName == "" {
|
if containerName == "" {
|
||||||
containerName = t.Name()
|
containerName = t.Name()
|
||||||
}
|
}
|
||||||
@@ -928,25 +792,26 @@ func runContainerWithAllConfigError(t *testing.T, cli *client.Client, containerC
|
|||||||
if containerConfig.User == "" {
|
if containerConfig.User == "" {
|
||||||
containerConfig.User = generateRandomUID()
|
containerConfig.User = generateRandomUID()
|
||||||
}
|
}
|
||||||
// if coverage
|
if coverage() {
|
||||||
containerConfig.Env = append(containerConfig.Env, "COVERAGE_FILE="+t.Name()+".cov")
|
containerConfig.Env = append(containerConfig.Env, "COVERAGE_FILE="+t.Name()+".cov")
|
||||||
containerConfig.Env = append(containerConfig.Env, "EXIT_CODE_FILE="+getExitCodeFilename(t))
|
containerConfig.Env = append(containerConfig.Env, "EXIT_CODE_FILE="+getExitCodeFilename(t))
|
||||||
|
}
|
||||||
t.Logf("Running container (%s)", containerConfig.Image)
|
t.Logf("Running container (%s)", containerConfig.Image)
|
||||||
ctr, err := cli.ContainerCreate(context.Background(), containerConfig, hostConfig, networkingConfig, containerName)
|
ID, err := cli.ContainerCreate(containerConfig, hostConfig, networkingConfig, containerName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
err = startContainerError(t, cli, ctr.ID)
|
err = startContainerError(t, cli, ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
return ctr.ID, nil
|
return ID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func startContainerError(t *testing.T, cli *client.Client, ID string) error {
|
func startContainerError(t *testing.T, cli ce.ContainerInterface, ID string) error {
|
||||||
t.Logf("Starting container: %v", ID)
|
t.Logf("Starting container: %v", ID)
|
||||||
startOptions := types.ContainerStartOptions{}
|
startOptions := ce.ContainerStartOptions{}
|
||||||
err := cli.ContainerStart(context.Background(), ID, startOptions)
|
err := cli.ContainerStart(ID, startOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -955,7 +820,7 @@ func startContainerError(t *testing.T, cli *client.Client, ID string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// testLogFilePages validates that the specified number of logFilePages is present in the qm.ini file.
|
// testLogFilePages validates that the specified number of logFilePages is present in the qm.ini file.
|
||||||
func testLogFilePages(t *testing.T, cli *client.Client, id string, qmName string, expectedLogFilePages string) {
|
func testLogFilePages(t *testing.T, cli ce.ContainerInterface, id string, qmName string, expectedLogFilePages string) {
|
||||||
catIniFileCommand := fmt.Sprintf("cat /var/mqm/qmgrs/" + qmName + "/qm.ini")
|
catIniFileCommand := fmt.Sprintf("cat /var/mqm/qmgrs/" + qmName + "/qm.ini")
|
||||||
_, iniContent := execContainer(t, cli, id, "", []string{"bash", "-c", catIniFileCommand})
|
_, iniContent := execContainer(t, cli, id, "", []string{"bash", "-c", catIniFileCommand})
|
||||||
|
|
||||||
@@ -965,7 +830,7 @@ func testLogFilePages(t *testing.T, cli *client.Client, id string, qmName string
|
|||||||
}
|
}
|
||||||
|
|
||||||
// waitForMessageInLog will check for a particular message with wait
|
// waitForMessageInLog will check for a particular message with wait
|
||||||
func waitForMessageInLog(t *testing.T, cli *client.Client, id string, expecteMessageId string) (string, error) {
|
func waitForMessageInLog(t *testing.T, cli ce.ContainerInterface, id string, expectedMessageId string) (string, error) {
|
||||||
var jsonLogs string
|
var jsonLogs string
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
@@ -973,11 +838,29 @@ func waitForMessageInLog(t *testing.T, cli *client.Client, id string, expecteMes
|
|||||||
select {
|
select {
|
||||||
case <-time.After(1 * time.Second):
|
case <-time.After(1 * time.Second):
|
||||||
jsonLogs = inspectLogs(t, cli, id)
|
jsonLogs = inspectLogs(t, cli, id)
|
||||||
if strings.Contains(jsonLogs, expecteMessageId) {
|
if strings.Contains(jsonLogs, expectedMessageId) {
|
||||||
return jsonLogs, nil
|
return jsonLogs, nil
|
||||||
}
|
}
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return "", fmt.Errorf("Expected message Id %s was not logged.", expecteMessageId)
|
return "", fmt.Errorf("expected message Id %s was not logged", expectedMessageId)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// waitForMessageCountInLog will check for a particular message with wait and must occur exact number of times in log as specified by count
|
||||||
|
func waitForMessageCountInLog(t *testing.T, cli ce.ContainerInterface, id string, expectedMessageId string, count int) (string, error) {
|
||||||
|
var jsonLogs string
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||||
|
defer cancel()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-time.After(1 * time.Second):
|
||||||
|
jsonLogs = inspectLogs(t, cli, id)
|
||||||
|
if strings.Contains(jsonLogs, expectedMessageId) && strings.Count(jsonLogs, expectedMessageId) == count {
|
||||||
|
return jsonLogs, nil
|
||||||
|
}
|
||||||
|
case <-ctx.Done():
|
||||||
|
return "", fmt.Errorf("expected message Id %s was not logged or it was not logged %v times", expectedMessageId, count)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
3
test/container/go.mod
Normal file
3
test/container/go.mod
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
module github.com/ibm-messaging/mq-container/test/container
|
||||||
|
|
||||||
|
go 1.19
|
||||||
0
test/container/go.sum
Normal file
0
test/container/go.sum
Normal file
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
© Copyright IBM Corporation 2019, 2022
|
© Copyright IBM Corporation 2019, 2023
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
@@ -21,7 +21,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/docker/client"
|
ce "github.com/ibm-messaging/mq-container/test/container/containerengine"
|
||||||
)
|
)
|
||||||
|
|
||||||
var miEnv = []string{
|
var miEnv = []string{
|
||||||
@@ -34,10 +34,7 @@ var miEnv = []string{
|
|||||||
// and starts/stop them checking we always have an active and standby
|
// and starts/stop them checking we always have an active and standby
|
||||||
func TestMultiInstanceStartStop(t *testing.T) {
|
func TestMultiInstanceStartStop(t *testing.T) {
|
||||||
t.Skipf("Skipping %v until test defect fixed", t.Name())
|
t.Skipf("Skipping %v until test defect fixed", t.Name())
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli := ce.NewContainerClient()
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
err, qm1aId, qm1bId, volumes := configureMultiInstance(t, cli)
|
err, qm1aId, qm1bId, volumes := configureMultiInstance(t, cli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@@ -76,10 +73,7 @@ func TestMultiInstanceStartStop(t *testing.T) {
|
|||||||
// TestMultiInstanceContainerStop starts 2 containers in a multi instance queue manager configuration,
|
// TestMultiInstanceContainerStop starts 2 containers in a multi instance queue manager configuration,
|
||||||
// stops the active queue manager, then checks to ensure the backup queue manager becomes active
|
// stops the active queue manager, then checks to ensure the backup queue manager becomes active
|
||||||
func TestMultiInstanceContainerStop(t *testing.T) {
|
func TestMultiInstanceContainerStop(t *testing.T) {
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli := ce.NewContainerClient()
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
err, qm1aId, qm1bId, volumes := configureMultiInstance(t, cli)
|
err, qm1aId, qm1bId, volumes := configureMultiInstance(t, cli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@@ -122,21 +116,16 @@ func TestMultiInstanceContainerStop(t *testing.T) {
|
|||||||
// configuration, then checks to ensure that both an active and standby queue manager have been started
|
// configuration, then checks to ensure that both an active and standby queue manager have been started
|
||||||
func TestMultiInstanceRace(t *testing.T) {
|
func TestMultiInstanceRace(t *testing.T) {
|
||||||
t.Skipf("Skipping %v until file lock is implemented", t.Name())
|
t.Skipf("Skipping %v until file lock is implemented", t.Name())
|
||||||
|
cli := ce.NewContainerClient()
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
qmsharedlogs := createVolume(t, cli, "qmsharedlogs")
|
qmsharedlogs := createVolume(t, cli, "qmsharedlogs")
|
||||||
defer removeVolume(t, cli, qmsharedlogs.Name)
|
defer removeVolume(t, cli, qmsharedlogs)
|
||||||
qmshareddata := createVolume(t, cli, "qmshareddata")
|
qmshareddata := createVolume(t, cli, "qmshareddata")
|
||||||
defer removeVolume(t, cli, qmshareddata.Name)
|
defer removeVolume(t, cli, qmshareddata)
|
||||||
|
|
||||||
qmsChannel := make(chan QMChan)
|
qmsChannel := make(chan QMChan)
|
||||||
|
|
||||||
go singleMultiInstanceQueueManager(t, cli, qmsharedlogs.Name, qmshareddata.Name, qmsChannel)
|
go singleMultiInstanceQueueManager(t, cli, qmsharedlogs, qmshareddata, qmsChannel)
|
||||||
go singleMultiInstanceQueueManager(t, cli, qmsharedlogs.Name, qmshareddata.Name, qmsChannel)
|
go singleMultiInstanceQueueManager(t, cli, qmsharedlogs, qmshareddata, qmsChannel)
|
||||||
|
|
||||||
qm1a := <-qmsChannel
|
qm1a := <-qmsChannel
|
||||||
if qm1a.Error != nil {
|
if qm1a.Error != nil {
|
||||||
@@ -159,7 +148,7 @@ func TestMultiInstanceRace(t *testing.T) {
|
|||||||
waitForReady(t, cli, qm1aId)
|
waitForReady(t, cli, qm1aId)
|
||||||
waitForReady(t, cli, qm1bId)
|
waitForReady(t, cli, qm1bId)
|
||||||
|
|
||||||
err, _, _ = getActiveStandbyQueueManager(t, cli, qm1aId, qm1bId)
|
err, _, _ := getActiveStandbyQueueManager(t, cli, qm1aId, qm1bId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -169,10 +158,7 @@ func TestMultiInstanceRace(t *testing.T) {
|
|||||||
// mounts, then checks to ensure that the container terminates with the expected message
|
// mounts, then checks to ensure that the container terminates with the expected message
|
||||||
func TestMultiInstanceNoSharedMounts(t *testing.T) {
|
func TestMultiInstanceNoSharedMounts(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli := ce.NewContainerClient()
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err, qm1aId, qm1aData := startMultiVolumeQueueManager(t, cli, true, "", "", miEnv)
|
err, qm1aId, qm1aData := startMultiVolumeQueueManager(t, cli, true, "", "", miEnv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -188,15 +174,12 @@ func TestMultiInstanceNoSharedMounts(t *testing.T) {
|
|||||||
// TestMultiInstanceNoSharedLogs starts 2 multi instance queue managers without providing a shared log
|
// TestMultiInstanceNoSharedLogs starts 2 multi instance queue managers without providing a shared log
|
||||||
// mount, then checks to ensure that the container terminates with the expected message
|
// mount, then checks to ensure that the container terminates with the expected message
|
||||||
func TestMultiInstanceNoSharedLogs(t *testing.T) {
|
func TestMultiInstanceNoSharedLogs(t *testing.T) {
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli := ce.NewContainerClient()
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
qmshareddata := createVolume(t, cli, "qmshareddata")
|
qmshareddata := createVolume(t, cli, "qmshareddata")
|
||||||
defer removeVolume(t, cli, qmshareddata.Name)
|
defer removeVolume(t, cli, qmshareddata)
|
||||||
|
|
||||||
err, qm1aId, qm1aData := startMultiVolumeQueueManager(t, cli, true, "", qmshareddata.Name, miEnv)
|
err, qm1aId, qm1aData := startMultiVolumeQueueManager(t, cli, true, "", qmshareddata, miEnv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -210,15 +193,12 @@ func TestMultiInstanceNoSharedLogs(t *testing.T) {
|
|||||||
// TestMultiInstanceNoSharedData starts 2 multi instance queue managers without providing a shared data
|
// TestMultiInstanceNoSharedData starts 2 multi instance queue managers without providing a shared data
|
||||||
// mount, then checks to ensure that the container terminates with the expected message
|
// mount, then checks to ensure that the container terminates with the expected message
|
||||||
func TestMultiInstanceNoSharedData(t *testing.T) {
|
func TestMultiInstanceNoSharedData(t *testing.T) {
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli := ce.NewContainerClient()
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
qmsharedlogs := createVolume(t, cli, "qmsharedlogs")
|
qmsharedlogs := createVolume(t, cli, "qmsharedlogs")
|
||||||
defer removeVolume(t, cli, qmsharedlogs.Name)
|
defer removeVolume(t, cli, qmsharedlogs)
|
||||||
|
|
||||||
err, qm1aId, qm1aData := startMultiVolumeQueueManager(t, cli, true, qmsharedlogs.Name, "", miEnv)
|
err, qm1aId, qm1aData := startMultiVolumeQueueManager(t, cli, true, qmsharedlogs, "", miEnv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -232,10 +212,7 @@ func TestMultiInstanceNoSharedData(t *testing.T) {
|
|||||||
// TestMultiInstanceNoMounts starts 2 multi instance queue managers without providing a shared data
|
// TestMultiInstanceNoMounts starts 2 multi instance queue managers without providing a shared data
|
||||||
// mount, then checks to ensure that the container terminates with the expected message
|
// mount, then checks to ensure that the container terminates with the expected message
|
||||||
func TestMultiInstanceNoMounts(t *testing.T) {
|
func TestMultiInstanceNoMounts(t *testing.T) {
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli := ce.NewContainerClient()
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err, qm1aId, qm1aData := startMultiVolumeQueueManager(t, cli, false, "", "", miEnv)
|
err, qm1aId, qm1aData := startMultiVolumeQueueManager(t, cli, false, "", "", miEnv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
© Copyright IBM Corporation 2019, 2022
|
© Copyright IBM Corporation 2019, 2023
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
@@ -23,7 +23,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/docker/client"
|
ce "github.com/ibm-messaging/mq-container/test/container/containerengine"
|
||||||
)
|
)
|
||||||
|
|
||||||
type QMChan struct {
|
type QMChan struct {
|
||||||
@@ -34,27 +34,27 @@ type QMChan struct {
|
|||||||
|
|
||||||
// configureMultiInstance creates the volumes and containers required for basic testing
|
// configureMultiInstance creates the volumes and containers required for basic testing
|
||||||
// of multi instance queue managers. Returns error, qm1a ID, qm1b ID, slice of volume names
|
// of multi instance queue managers. Returns error, qm1a ID, qm1b ID, slice of volume names
|
||||||
func configureMultiInstance(t *testing.T, cli *client.Client) (error, string, string, []string) {
|
func configureMultiInstance(t *testing.T, cli ce.ContainerInterface) (error, string, string, []string) {
|
||||||
|
|
||||||
qmsharedlogs := createVolume(t, cli, "qmsharedlogs")
|
qmsharedlogs := createVolume(t, cli, "qmsharedlogs")
|
||||||
qmshareddata := createVolume(t, cli, "qmshareddata")
|
qmshareddata := createVolume(t, cli, "qmshareddata")
|
||||||
|
|
||||||
err, qm1aId, qm1aData := startMultiVolumeQueueManager(t, cli, true, qmsharedlogs.Name, qmshareddata.Name, miEnv)
|
err, qm1aId, qm1aData := startMultiVolumeQueueManager(t, cli, true, qmsharedlogs, qmshareddata, miEnv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err, "", "", []string{}
|
return err, "", "", []string{}
|
||||||
}
|
}
|
||||||
time.Sleep(10 * time.Second)
|
time.Sleep(10 * time.Second)
|
||||||
err, qm1bId, qm1bData := startMultiVolumeQueueManager(t, cli, true, qmsharedlogs.Name, qmshareddata.Name, miEnv)
|
err, qm1bId, qm1bData := startMultiVolumeQueueManager(t, cli, true, qmsharedlogs, qmshareddata, miEnv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err, "", "", []string{}
|
return err, "", "", []string{}
|
||||||
}
|
}
|
||||||
|
|
||||||
volumes := []string{qmsharedlogs.Name, qmshareddata.Name, qm1aData, qm1bData}
|
volumes := []string{qmsharedlogs, qmshareddata, qm1aData, qm1bData}
|
||||||
|
|
||||||
return nil, qm1aId, qm1bId, volumes
|
return nil, qm1aId, qm1bId, volumes
|
||||||
}
|
}
|
||||||
|
|
||||||
func singleMultiInstanceQueueManager(t *testing.T, cli *client.Client, qmsharedlogs string, qmshareddata string, qmsChannel chan QMChan) {
|
func singleMultiInstanceQueueManager(t *testing.T, cli ce.ContainerInterface, qmsharedlogs string, qmshareddata string, qmsChannel chan QMChan) {
|
||||||
err, qmId, qmData := startMultiVolumeQueueManager(t, cli, true, qmsharedlogs, qmshareddata, miEnv)
|
err, qmId, qmData := startMultiVolumeQueueManager(t, cli, true, qmsharedlogs, qmshareddata, miEnv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
qmsChannel <- QMChan{Error: err}
|
qmsChannel <- QMChan{Error: err}
|
||||||
@@ -62,7 +62,7 @@ func singleMultiInstanceQueueManager(t *testing.T, cli *client.Client, qmsharedl
|
|||||||
qmsChannel <- QMChan{QMId: qmId, QMData: qmData}
|
qmsChannel <- QMChan{QMId: qmId, QMData: qmData}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getActiveStandbyQueueManager(t *testing.T, cli *client.Client, qm1aId string, qm1bId string) (error, string, string) {
|
func getActiveStandbyQueueManager(t *testing.T, cli ce.ContainerInterface, qm1aId string, qm1bId string) (error, string, string) {
|
||||||
qm1aStatus := getQueueManagerStatus(t, cli, qm1aId, "QM1")
|
qm1aStatus := getQueueManagerStatus(t, cli, qm1aId, "QM1")
|
||||||
qm1bStatus := getQueueManagerStatus(t, cli, qm1bId, "QM1")
|
qm1bStatus := getQueueManagerStatus(t, cli, qm1bId, "QM1")
|
||||||
|
|
||||||
@@ -75,7 +75,7 @@ func getActiveStandbyQueueManager(t *testing.T, cli *client.Client, qm1aId strin
|
|||||||
return err, "", ""
|
return err, "", ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func getQueueManagerStatus(t *testing.T, cli *client.Client, containerID string, queueManagerName string) string {
|
func getQueueManagerStatus(t *testing.T, cli ce.ContainerInterface, containerID string, queueManagerName string) string {
|
||||||
_, dspmqOut := execContainer(t, cli, containerID, "", []string{"bash", "-c", "dspmq", "-m", queueManagerName})
|
_, dspmqOut := execContainer(t, cli, containerID, "", []string{"bash", "-c", "dspmq", "-m", queueManagerName})
|
||||||
t.Logf("dspmq for %v (%v) returned: %v", containerID, queueManagerName, dspmqOut)
|
t.Logf("dspmq for %v (%v) returned: %v", containerID, queueManagerName, dspmqOut)
|
||||||
regex := regexp.MustCompile(`STATUS\(.*\)`)
|
regex := regexp.MustCompile(`STATUS\(.*\)`)
|
||||||
@@ -84,7 +84,7 @@ func getQueueManagerStatus(t *testing.T, cli *client.Client, containerID string,
|
|||||||
return status
|
return status
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitForTerminationMessage(t *testing.T, cli *client.Client, qmId string, terminationString string, timeout time.Duration) {
|
func waitForTerminationMessage(t *testing.T, cli ce.ContainerInterface, qmId string, terminationString string, timeout time.Duration) {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
for {
|
for {
|
||||||
@@ -93,7 +93,7 @@ func waitForTerminationMessage(t *testing.T, cli *client.Client, qmId string, te
|
|||||||
m := terminationMessage(t, cli, qmId)
|
m := terminationMessage(t, cli, qmId)
|
||||||
if m != "" {
|
if m != "" {
|
||||||
if !strings.Contains(m, terminationString) {
|
if !strings.Contains(m, terminationString) {
|
||||||
t.Fatalf("Expected container to fail on missing required mount. Got termination message: %v", m)
|
t.Fatalf("Expected container to fail with termination message %v. Got termination message: %v", terminationString, m)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
© Copyright IBM Corporation 2021, 2022
|
© Copyright IBM Corporation 2021, 2023
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
@@ -16,20 +16,19 @@ limitations under the License.
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/docker/docker/client"
|
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
ce "github.com/ibm-messaging/mq-container/test/container/containerengine"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestNativeHABasic creates 3 containers in a Native HA queue manager configuration
|
// TestNativeHABasic creates 3 containers in a Native HA queue manager configuration
|
||||||
// and ensures the queue manger and replicas start as expected
|
// and ensures the queue manger and replicas start as expected
|
||||||
func TestNativeHABasic(t *testing.T) {
|
func TestNativeHABasic(t *testing.T) {
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli := ce.NewContainerClient()
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
version, err := getMQVersion(t, cli)
|
version, err := cli.GetMQVersion(imageName())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -40,21 +39,18 @@ func TestNativeHABasic(t *testing.T) {
|
|||||||
containerNames := [3]string{"QM1_1", "QM1_2", "QM1_3"}
|
containerNames := [3]string{"QM1_1", "QM1_2", "QM1_3"}
|
||||||
qmReplicaIDs := [3]string{}
|
qmReplicaIDs := [3]string{}
|
||||||
qmVolumes := []string{}
|
qmVolumes := []string{}
|
||||||
qmNetwork, err := createBridgeNetwork(cli, t)
|
//Each native HA qmgr instance is exposed on subsequent ports on the host starting with basePort
|
||||||
if err != nil {
|
//If the qmgr exposes more than one port (tests do not do this currently) then they are offset by +50
|
||||||
t.Fatal(err)
|
basePort := 14551
|
||||||
}
|
|
||||||
defer removeBridgeNetwork(cli, qmNetwork.ID)
|
|
||||||
|
|
||||||
for i := 0; i <= 2; i++ {
|
for i := 0; i <= 2; i++ {
|
||||||
|
nhaPort := basePort + i
|
||||||
vol := createVolume(t, cli, containerNames[i])
|
vol := createVolume(t, cli, containerNames[i])
|
||||||
defer removeVolume(t, cli, vol.Name)
|
defer removeVolume(t, cli, vol)
|
||||||
qmVolumes = append(qmVolumes, vol.Name)
|
qmVolumes = append(qmVolumes, vol)
|
||||||
|
containerConfig := getNativeHAContainerConfig(containerNames[i], containerNames, basePort)
|
||||||
containerConfig := getNativeHAContainerConfig(containerNames[i], containerNames, defaultHAPort)
|
hostConfig := getHostConfig(t, 1, "", "", vol)
|
||||||
hostConfig := getHostConfig(t, 1, "", "", vol.Name)
|
hostConfig = populateNativeHAPortBindings([]int{9414}, nhaPort, hostConfig)
|
||||||
networkingConfig := getNativeHANetworkConfig(qmNetwork.ID)
|
networkingConfig := getNativeHANetworkConfig("host")
|
||||||
|
|
||||||
ctr := runContainerWithAllConfig(t, cli, &containerConfig, &hostConfig, &networkingConfig, containerNames[i])
|
ctr := runContainerWithAllConfig(t, cli, &containerConfig, &hostConfig, &networkingConfig, containerNames[i])
|
||||||
defer cleanContainer(t, cli, ctr)
|
defer cleanContainer(t, cli, ctr)
|
||||||
qmReplicaIDs[i] = ctr
|
qmReplicaIDs[i] = ctr
|
||||||
@@ -74,12 +70,9 @@ func TestNativeHABasic(t *testing.T) {
|
|||||||
// queue manager comes back as a replica
|
// queue manager comes back as a replica
|
||||||
func TestNativeHAFailover(t *testing.T) {
|
func TestNativeHAFailover(t *testing.T) {
|
||||||
|
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli := ce.NewContainerClient()
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
version, err := getMQVersion(t, cli)
|
version, err := cli.GetMQVersion(imageName())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -90,21 +83,18 @@ func TestNativeHAFailover(t *testing.T) {
|
|||||||
containerNames := [3]string{"QM1_1", "QM1_2", "QM1_3"}
|
containerNames := [3]string{"QM1_1", "QM1_2", "QM1_3"}
|
||||||
qmReplicaIDs := [3]string{}
|
qmReplicaIDs := [3]string{}
|
||||||
qmVolumes := []string{}
|
qmVolumes := []string{}
|
||||||
qmNetwork, err := createBridgeNetwork(cli, t)
|
//Each native HA qmgr instance is exposed on subsequent ports on the host starting with basePort
|
||||||
if err != nil {
|
//If the qmgr exposes more than one port (tests do not do this currently) then they are offset by +50
|
||||||
t.Fatal(err)
|
basePort := 14551
|
||||||
}
|
|
||||||
defer removeBridgeNetwork(cli, qmNetwork.ID)
|
|
||||||
|
|
||||||
for i := 0; i <= 2; i++ {
|
for i := 0; i <= 2; i++ {
|
||||||
|
nhaPort := basePort + i
|
||||||
vol := createVolume(t, cli, containerNames[i])
|
vol := createVolume(t, cli, containerNames[i])
|
||||||
defer removeVolume(t, cli, vol.Name)
|
defer removeVolume(t, cli, vol)
|
||||||
qmVolumes = append(qmVolumes, vol.Name)
|
qmVolumes = append(qmVolumes, vol)
|
||||||
|
containerConfig := getNativeHAContainerConfig(containerNames[i], containerNames, basePort)
|
||||||
containerConfig := getNativeHAContainerConfig(containerNames[i], containerNames, defaultHAPort)
|
hostConfig := getHostConfig(t, 1, "", "", vol)
|
||||||
hostConfig := getHostConfig(t, 1, "", "", vol.Name)
|
hostConfig = populateNativeHAPortBindings([]int{9414}, nhaPort, hostConfig)
|
||||||
networkingConfig := getNativeHANetworkConfig(qmNetwork.ID)
|
networkingConfig := getNativeHANetworkConfig("host")
|
||||||
|
|
||||||
ctr := runContainerWithAllConfig(t, cli, &containerConfig, &hostConfig, &networkingConfig, containerNames[i])
|
ctr := runContainerWithAllConfig(t, cli, &containerConfig, &hostConfig, &networkingConfig, containerNames[i])
|
||||||
defer cleanContainer(t, cli, ctr)
|
defer cleanContainer(t, cli, ctr)
|
||||||
qmReplicaIDs[i] = ctr
|
qmReplicaIDs[i] = ctr
|
||||||
@@ -132,33 +122,31 @@ func TestNativeHAFailover(t *testing.T) {
|
|||||||
// TestNativeHASecure creates 3 containers in a Native HA queue manager configuration
|
// TestNativeHASecure creates 3 containers in a Native HA queue manager configuration
|
||||||
// with HA TLS enabled, and ensures the queue manger and replicas start as expected
|
// with HA TLS enabled, and ensures the queue manger and replicas start as expected
|
||||||
func TestNativeHASecure(t *testing.T) {
|
func TestNativeHASecure(t *testing.T) {
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli := ce.NewContainerClient()
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
version, err := getMQVersion(t, cli)
|
version, err := cli.GetMQVersion(imageName())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if version < "9.2.2.0" {
|
if version < "9.2.2.0" {
|
||||||
t.Skipf("Skipping %s as test requires at least MQ 9.2.2.0, but image is version %s", t.Name(), version)
|
t.Skipf("Skipping %s as test requires at least MQ 9.2.2.0, but image is version %s", t.Name(), version)
|
||||||
}
|
}
|
||||||
|
if isARM(t) {
|
||||||
|
t.Skip("Skipping as an issue has been identified for the arm64 MQ image")
|
||||||
|
}
|
||||||
|
|
||||||
containerNames := [3]string{"QM1_1", "QM1_2", "QM1_3"}
|
containerNames := [3]string{"QM1_1", "QM1_2", "QM1_3"}
|
||||||
qmReplicaIDs := [3]string{}
|
qmReplicaIDs := [3]string{}
|
||||||
qmNetwork, err := createBridgeNetwork(cli, t)
|
//Each native HA qmgr instance is exposed on subsequent ports on the host starting with basePort
|
||||||
if err != nil {
|
//If the qmgr exposes more than one port (tests do not do this currently) then they are offset by +50
|
||||||
t.Fatal(err)
|
basePort := 14551
|
||||||
}
|
|
||||||
defer removeBridgeNetwork(cli, qmNetwork.ID)
|
|
||||||
|
|
||||||
for i := 0; i <= 2; i++ {
|
for i := 0; i <= 2; i++ {
|
||||||
|
nhaPort := basePort + i
|
||||||
containerConfig := getNativeHAContainerConfig(containerNames[i], containerNames, defaultHAPort)
|
containerConfig := getNativeHAContainerConfig(containerNames[i], containerNames, defaultHAPort)
|
||||||
containerConfig.Env = append(containerConfig.Env, "MQ_NATIVE_HA_TLS=true")
|
containerConfig.Env = append(containerConfig.Env, "MQ_NATIVE_HA_TLS=true")
|
||||||
hostConfig := getNativeHASecureHostConfig(t)
|
hostConfig := getNativeHASecureHostConfig(t)
|
||||||
networkingConfig := getNativeHANetworkConfig(qmNetwork.ID)
|
hostConfig = populateNativeHAPortBindings([]int{9414}, nhaPort, hostConfig)
|
||||||
|
networkingConfig := getNativeHANetworkConfig("host")
|
||||||
ctr := runContainerWithAllConfig(t, cli, &containerConfig, &hostConfig, &networkingConfig, containerNames[i])
|
ctr := runContainerWithAllConfig(t, cli, &containerConfig, &hostConfig, &networkingConfig, containerNames[i])
|
||||||
defer cleanContainer(t, cli, ctr)
|
defer cleanContainer(t, cli, ctr)
|
||||||
qmReplicaIDs[i] = ctr
|
qmReplicaIDs[i] = ctr
|
||||||
@@ -177,12 +165,9 @@ func TestNativeHASecure(t *testing.T) {
|
|||||||
// with HA TLS enabled, overrides the default CipherSpec, and ensures the queue manger
|
// with HA TLS enabled, overrides the default CipherSpec, and ensures the queue manger
|
||||||
// and replicas start as expected
|
// and replicas start as expected
|
||||||
func TestNativeHASecureCipherSpec(t *testing.T) {
|
func TestNativeHASecureCipherSpec(t *testing.T) {
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli := ce.NewContainerClient()
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
version, err := getMQVersion(t, cli)
|
version, err := cli.GetMQVersion(imageName())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -192,18 +177,16 @@ func TestNativeHASecureCipherSpec(t *testing.T) {
|
|||||||
|
|
||||||
containerNames := [3]string{"QM1_1", "QM1_2", "QM1_3"}
|
containerNames := [3]string{"QM1_1", "QM1_2", "QM1_3"}
|
||||||
qmReplicaIDs := [3]string{}
|
qmReplicaIDs := [3]string{}
|
||||||
qmNetwork, err := createBridgeNetwork(cli, t)
|
//Each native HA qmgr instance is exposed on subsequent ports on the host starting with basePort
|
||||||
if err != nil {
|
//If the qmgr exposes more than one port (tests do not do this currently) then they are offset by +50
|
||||||
t.Fatal(err)
|
basePort := 14551
|
||||||
}
|
|
||||||
defer removeBridgeNetwork(cli, qmNetwork.ID)
|
|
||||||
|
|
||||||
for i := 0; i <= 2; i++ {
|
for i := 0; i <= 2; i++ {
|
||||||
|
nhaPort := basePort + i
|
||||||
containerConfig := getNativeHAContainerConfig(containerNames[i], containerNames, defaultHAPort)
|
containerConfig := getNativeHAContainerConfig(containerNames[i], containerNames, defaultHAPort)
|
||||||
containerConfig.Env = append(containerConfig.Env, "MQ_NATIVE_HA_TLS=true", "MQ_NATIVE_HA_CIPHERSPEC=TLS_AES_256_GCM_SHA384")
|
containerConfig.Env = append(containerConfig.Env, "MQ_NATIVE_HA_TLS=true", "MQ_NATIVE_HA_CIPHERSPEC=TLS_AES_256_GCM_SHA384")
|
||||||
hostConfig := getNativeHASecureHostConfig(t)
|
hostConfig := getNativeHASecureHostConfig(t)
|
||||||
networkingConfig := getNativeHANetworkConfig(qmNetwork.ID)
|
hostConfig = populateNativeHAPortBindings([]int{9414}, nhaPort, hostConfig)
|
||||||
|
networkingConfig := getNativeHANetworkConfig("host")
|
||||||
ctr := runContainerWithAllConfig(t, cli, &containerConfig, &hostConfig, &networkingConfig, containerNames[i])
|
ctr := runContainerWithAllConfig(t, cli, &containerConfig, &hostConfig, &networkingConfig, containerNames[i])
|
||||||
defer cleanContainer(t, cli, ctr)
|
defer cleanContainer(t, cli, ctr)
|
||||||
qmReplicaIDs[i] = ctr
|
qmReplicaIDs[i] = ctr
|
||||||
@@ -222,12 +205,9 @@ func TestNativeHASecureCipherSpec(t *testing.T) {
|
|||||||
// with HA TLS FIPS enabled, overrides the default CipherSpec, and ensures the queue manger
|
// with HA TLS FIPS enabled, overrides the default CipherSpec, and ensures the queue manger
|
||||||
// and replicas start as expected. This test uses FIPS compliant cipher.
|
// and replicas start as expected. This test uses FIPS compliant cipher.
|
||||||
func TestNativeHASecureCipherSpecFIPS(t *testing.T) {
|
func TestNativeHASecureCipherSpecFIPS(t *testing.T) {
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli := ce.NewContainerClient()
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
version, err := getMQVersion(t, cli)
|
version, err := cli.GetMQVersion(imageName())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -237,19 +217,17 @@ func TestNativeHASecureCipherSpecFIPS(t *testing.T) {
|
|||||||
|
|
||||||
containerNames := [3]string{"QM1_1", "QM1_2", "QM1_3"}
|
containerNames := [3]string{"QM1_1", "QM1_2", "QM1_3"}
|
||||||
qmReplicaIDs := [3]string{}
|
qmReplicaIDs := [3]string{}
|
||||||
qmNetwork, err := createBridgeNetwork(cli, t)
|
//Each native HA qmgr instance is exposed on subsequent ports on the host starting with basePort
|
||||||
if err != nil {
|
//If the qmgr exposes more than one port (tests do not do this currently) then they are offset by +50
|
||||||
t.Fatal(err)
|
basePort := 14551
|
||||||
}
|
|
||||||
defer removeBridgeNetwork(cli, qmNetwork.ID)
|
|
||||||
|
|
||||||
for i := 0; i <= 2; i++ {
|
for i := 0; i <= 2; i++ {
|
||||||
|
nhaPort := basePort + i
|
||||||
containerConfig := getNativeHAContainerConfig(containerNames[i], containerNames, defaultHAPort)
|
containerConfig := getNativeHAContainerConfig(containerNames[i], containerNames, defaultHAPort)
|
||||||
// MQ_NATIVE_HA_CIPHERSPEC is set a FIPS compliant cipherspec.
|
// MQ_NATIVE_HA_CIPHERSPEC is set a FIPS compliant cipherspec.
|
||||||
containerConfig.Env = append(containerConfig.Env, "MQ_NATIVE_HA_TLS=true", "MQ_NATIVE_HA_CIPHERSPEC=TLS_RSA_WITH_AES_128_GCM_SHA256", "MQ_ENABLE_FIPS=true")
|
containerConfig.Env = append(containerConfig.Env, "MQ_NATIVE_HA_TLS=true", "MQ_NATIVE_HA_CIPHERSPEC=TLS_RSA_WITH_AES_128_GCM_SHA256", "MQ_ENABLE_FIPS=true")
|
||||||
hostConfig := getNativeHASecureHostConfig(t)
|
hostConfig := getNativeHASecureHostConfig(t)
|
||||||
networkingConfig := getNativeHANetworkConfig(qmNetwork.ID)
|
hostConfig = populateNativeHAPortBindings([]int{9414}, nhaPort, hostConfig)
|
||||||
|
networkingConfig := getNativeHANetworkConfig("host")
|
||||||
ctr := runContainerWithAllConfig(t, cli, &containerConfig, &hostConfig, &networkingConfig, containerNames[i])
|
ctr := runContainerWithAllConfig(t, cli, &containerConfig, &hostConfig, &networkingConfig, containerNames[i])
|
||||||
defer cleanContainer(t, cli, ctr)
|
defer cleanContainer(t, cli, ctr)
|
||||||
qmReplicaIDs[i] = ctr
|
qmReplicaIDs[i] = ctr
|
||||||
@@ -272,12 +250,9 @@ func TestNativeHASecureCipherSpecFIPS(t *testing.T) {
|
|||||||
// with HA TLS FIPS enabled with non-FIPS cipher, overrides the default CipherSpec, and
|
// with HA TLS FIPS enabled with non-FIPS cipher, overrides the default CipherSpec, and
|
||||||
// ensures the queue manger and replicas don't start as expected
|
// ensures the queue manger and replicas don't start as expected
|
||||||
func TestNativeHASecureCipherSpecNonFIPSCipher(t *testing.T) {
|
func TestNativeHASecureCipherSpecNonFIPSCipher(t *testing.T) {
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli := ce.NewContainerClient()
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
version, err := getMQVersion(t, cli)
|
version, err := cli.GetMQVersion(imageName())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -287,26 +262,24 @@ func TestNativeHASecureCipherSpecNonFIPSCipher(t *testing.T) {
|
|||||||
|
|
||||||
containerNames := [3]string{"QM1_1", "QM1_2", "QM1_3"}
|
containerNames := [3]string{"QM1_1", "QM1_2", "QM1_3"}
|
||||||
qmReplicaIDs := [3]string{}
|
qmReplicaIDs := [3]string{}
|
||||||
qmNetwork, err := createBridgeNetwork(cli, t)
|
//Each native HA qmgr instance is exposed on subsequent ports on the host starting with basePort
|
||||||
if err != nil {
|
//If the qmgr exposes more than one port (tests do not do this currently) then they are offset by +50
|
||||||
t.Fatal(err)
|
basePort := 14551
|
||||||
}
|
|
||||||
defer removeBridgeNetwork(cli, qmNetwork.ID)
|
|
||||||
|
|
||||||
for i := 0; i <= 2; i++ {
|
for i := 0; i <= 2; i++ {
|
||||||
|
nhaPort := basePort + i
|
||||||
containerConfig := getNativeHAContainerConfig(containerNames[i], containerNames, defaultHAPort)
|
containerConfig := getNativeHAContainerConfig(containerNames[i], containerNames, defaultHAPort)
|
||||||
// MQ_NATIVE_HA_CIPHERSPEC is set a FIPS non-compliant cipherspec - SSL_ECDHE_ECDSA_WITH_RC4_128_SHA
|
// MQ_NATIVE_HA_CIPHERSPEC is set a FIPS non-compliant cipherspec - SSL_ECDHE_ECDSA_WITH_RC4_128_SHA
|
||||||
containerConfig.Env = append(containerConfig.Env, "MQ_NATIVE_HA_TLS=true", "MQ_NATIVE_HA_CIPHERSPEC=TLS_RSA_WITH_AES_128_GCM_SHA256", "MQ_ENABLE_FIPS=true")
|
containerConfig.Env = append(containerConfig.Env, "MQ_NATIVE_HA_TLS=true", "MQ_NATIVE_HA_CIPHERSPEC=SSL_ECDHE_ECDSA_WITH_RC4_128_SHA", "MQ_ENABLE_FIPS=true")
|
||||||
hostConfig := getNativeHASecureHostConfig(t)
|
hostConfig := getNativeHASecureHostConfig(t)
|
||||||
networkingConfig := getNativeHANetworkConfig(qmNetwork.ID)
|
hostConfig = populateNativeHAPortBindings([]int{9414}, nhaPort, hostConfig)
|
||||||
|
networkingConfig := getNativeHANetworkConfig("host")
|
||||||
ctr, err := runContainerWithAllConfigError(t, cli, &containerConfig, &hostConfig, &networkingConfig, containerNames[i])
|
ctr := runContainerWithAllConfig(t, cli, &containerConfig, &hostConfig, &networkingConfig, containerNames[i])
|
||||||
defer cleanContainer(t, cli, ctr)
|
defer cleanContainer(t, cli, ctr)
|
||||||
// We expect container to fail in this case because the cipher is non-FIPS and we have asked for FIPS compliance
|
// We expect container to fail in this case because the cipher is non-FIPS and we have asked for FIPS compliance
|
||||||
// by setting MQ_ENABLE_FIPS=true
|
// by setting MQ_ENABLE_FIPS=true
|
||||||
if err == nil {
|
|
||||||
t.Logf("Container start expected to fail but did not. %v", err)
|
|
||||||
}
|
|
||||||
qmReplicaIDs[i] = ctr
|
qmReplicaIDs[i] = ctr
|
||||||
}
|
}
|
||||||
|
for i := 0; i <= 2; i++ {
|
||||||
|
waitForTerminationMessage(t, cli, qmReplicaIDs[i], "/opt/mqm/bin/strmqm: exit status 23", 60*time.Second)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
© Copyright IBM Corporation 2021
|
© Copyright IBM Corporation 2021, 2023
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
@@ -19,13 +19,11 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
ce "github.com/ibm-messaging/mq-container/test/container/containerengine"
|
||||||
"github.com/docker/docker/api/types/container"
|
|
||||||
"github.com/docker/docker/api/types/network"
|
|
||||||
"github.com/docker/docker/client"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const defaultHAPort = 9414
|
const defaultHAPort = 9414
|
||||||
@@ -36,16 +34,8 @@ type HAReplicaStatus struct {
|
|||||||
Replica [2]string
|
Replica [2]string
|
||||||
}
|
}
|
||||||
|
|
||||||
func createBridgeNetwork(cli *client.Client, t *testing.T) (types.NetworkCreateResponse, error) {
|
func getNativeHAContainerConfig(containerName string, replicaNames [3]string, haPort int) ce.ContainerConfig {
|
||||||
return cli.NetworkCreate(context.Background(), t.Name(), types.NetworkCreate{})
|
return ce.ContainerConfig{
|
||||||
}
|
|
||||||
|
|
||||||
func removeBridgeNetwork(cli *client.Client, networkID string) error {
|
|
||||||
return cli.NetworkRemove(context.Background(), networkID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getNativeHAContainerConfig(containerName string, replicaNames [3]string, haPort int) container.Config {
|
|
||||||
return container.Config{
|
|
||||||
Env: []string{
|
Env: []string{
|
||||||
"LICENSE=accept",
|
"LICENSE=accept",
|
||||||
"MQ_QMGR_NAME=QM1",
|
"MQ_QMGR_NAME=QM1",
|
||||||
@@ -55,15 +45,18 @@ func getNativeHAContainerConfig(containerName string, replicaNames [3]string, ha
|
|||||||
fmt.Sprintf("MQ_NATIVE_HA_INSTANCE_0_NAME=%s", replicaNames[0]),
|
fmt.Sprintf("MQ_NATIVE_HA_INSTANCE_0_NAME=%s", replicaNames[0]),
|
||||||
fmt.Sprintf("MQ_NATIVE_HA_INSTANCE_1_NAME=%s", replicaNames[1]),
|
fmt.Sprintf("MQ_NATIVE_HA_INSTANCE_1_NAME=%s", replicaNames[1]),
|
||||||
fmt.Sprintf("MQ_NATIVE_HA_INSTANCE_2_NAME=%s", replicaNames[2]),
|
fmt.Sprintf("MQ_NATIVE_HA_INSTANCE_2_NAME=%s", replicaNames[2]),
|
||||||
fmt.Sprintf("MQ_NATIVE_HA_INSTANCE_0_REPLICATION_ADDRESS=%s(%d)", replicaNames[0], haPort),
|
fmt.Sprintf("MQ_NATIVE_HA_INSTANCE_0_REPLICATION_ADDRESS=%s(%d)", "127.0.0.1", haPort+0),
|
||||||
fmt.Sprintf("MQ_NATIVE_HA_INSTANCE_1_REPLICATION_ADDRESS=%s(%d)", replicaNames[1], haPort),
|
fmt.Sprintf("MQ_NATIVE_HA_INSTANCE_1_REPLICATION_ADDRESS=%s(%d)", "127.0.0.1", haPort+1),
|
||||||
fmt.Sprintf("MQ_NATIVE_HA_INSTANCE_2_REPLICATION_ADDRESS=%s(%d)", replicaNames[2], haPort),
|
fmt.Sprintf("MQ_NATIVE_HA_INSTANCE_2_REPLICATION_ADDRESS=%s(%d)", "127.0.0.1", haPort+2),
|
||||||
},
|
},
|
||||||
|
//When using the host for networking a consistent user was required. If a random user is used then the following example error was recorded.
|
||||||
|
//AMQ3209E: Native HA connection rejected due to configuration mismatch of 'QmgrUserId=5024'
|
||||||
|
User: "1111",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getNativeHASecureHostConfig(t *testing.T) container.HostConfig {
|
func getNativeHASecureHostConfig(t *testing.T) ce.ContainerHostConfig {
|
||||||
return container.HostConfig{
|
return ce.ContainerHostConfig{
|
||||||
Binds: []string{
|
Binds: []string{
|
||||||
coverageBind(t),
|
coverageBind(t),
|
||||||
filepath.Join(getCwd(t, true), "../tls") + ":/etc/mqm/ha/pki/keys/ha",
|
filepath.Join(getCwd(t, true), "../tls") + ":/etc/mqm/ha/pki/keys/ha",
|
||||||
@@ -71,15 +64,30 @@ func getNativeHASecureHostConfig(t *testing.T) container.HostConfig {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getNativeHANetworkConfig(networkID string) network.NetworkingConfig {
|
func getNativeHANetworkConfig(networkID string) ce.ContainerNetworkSettings {
|
||||||
return network.NetworkingConfig{
|
return ce.ContainerNetworkSettings{
|
||||||
EndpointsConfig: map[string]*network.EndpointSettings{
|
Networks: []string{networkID},
|
||||||
networkID: &network.EndpointSettings{},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getActiveReplicaInstances(t *testing.T, cli *client.Client, qmReplicaIDs [3]string) (HAReplicaStatus, error) {
|
// populatePortBindings writes port bindings to the host config
|
||||||
|
func populateNativeHAPortBindings(ports []int, nativeHaPort int, hostConfig ce.ContainerHostConfig) ce.ContainerHostConfig {
|
||||||
|
hostConfig.PortBindings = []ce.PortBinding{}
|
||||||
|
var binding ce.PortBinding
|
||||||
|
for i, p := range ports {
|
||||||
|
port := fmt.Sprintf("%v/tcp", p)
|
||||||
|
binding = ce.PortBinding{
|
||||||
|
ContainerPort: port,
|
||||||
|
HostIP: "0.0.0.0",
|
||||||
|
//Offset the ports by 50 if there are multiple
|
||||||
|
HostPort: strconv.Itoa(nativeHaPort + 50*i),
|
||||||
|
}
|
||||||
|
hostConfig.PortBindings = append(hostConfig.PortBindings, binding)
|
||||||
|
}
|
||||||
|
return hostConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func getActiveReplicaInstances(t *testing.T, cli ce.ContainerInterface, qmReplicaIDs [3]string) (HAReplicaStatus, error) {
|
||||||
|
|
||||||
var actives []string
|
var actives []string
|
||||||
var replicas []string
|
var replicas []string
|
||||||
@@ -104,7 +112,7 @@ func getActiveReplicaInstances(t *testing.T, cli *client.Client, qmReplicaIDs [3
|
|||||||
return HAReplicaStatus{actives[0], [2]string{replicas[0], replicas[1]}}, nil
|
return HAReplicaStatus{actives[0], [2]string{replicas[0], replicas[1]}}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitForReadyHA(t *testing.T, cli *client.Client, qmReplicaIDs [3]string) {
|
func waitForReadyHA(t *testing.T, cli ce.ContainerInterface, qmReplicaIDs [3]string) {
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 4*time.Minute)
|
ctx, cancel := context.WithTimeout(context.Background(), 4*time.Minute)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
@@ -129,7 +137,7 @@ func waitForReadyHA(t *testing.T, cli *client.Client, qmReplicaIDs [3]string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitForFailoverHA(t *testing.T, cli *client.Client, replicas [2]string) {
|
func waitForFailoverHA(t *testing.T, cli ce.ContainerInterface, replicas [2]string) {
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
© Copyright IBM Corporation 2018, 2022
|
© Copyright IBM Corporation 2018, 2023
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
@@ -22,20 +22,20 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/docker/client"
|
ce "github.com/ibm-messaging/mq-container/test/container/containerengine"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestGoldenPathMetric(t *testing.T) {
|
func TestGoldenPathMetric(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli := ce.NewContainerClient()
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
id := runContainerWithPorts(t, cli, metricsContainerConfig(), []int{defaultMetricPort})
|
id := runContainerWithPorts(t, cli, metricsContainerConfig(), []int{defaultMetricPort})
|
||||||
defer cleanContainer(t, cli, id)
|
defer cleanContainer(t, cli, id)
|
||||||
|
|
||||||
port := getPort(t, cli, id, defaultMetricPort)
|
port, err := cli.GetContainerPort(id, defaultMetricPort)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
// Now the container is ready we prod the prometheus endpoint until it's up.
|
// Now the container is ready we prod the prometheus endpoint until it's up.
|
||||||
waitForMetricReady(t, port)
|
waitForMetricReady(t, port)
|
||||||
|
|
||||||
@@ -55,14 +55,14 @@ func TestGoldenPathMetric(t *testing.T) {
|
|||||||
func TestMetricNames(t *testing.T) {
|
func TestMetricNames(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli := ce.NewContainerClient()
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
id := runContainerWithPorts(t, cli, metricsContainerConfig(), []int{defaultMetricPort})
|
id := runContainerWithPorts(t, cli, metricsContainerConfig(), []int{defaultMetricPort})
|
||||||
defer cleanContainer(t, cli, id)
|
defer cleanContainer(t, cli, id)
|
||||||
|
|
||||||
port := getPort(t, cli, id, defaultMetricPort)
|
port, err := cli.GetContainerPort(id, defaultMetricPort)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
// Now the container is ready we prod the prometheus endpoint until it's up.
|
// Now the container is ready we prod the prometheus endpoint until it's up.
|
||||||
waitForMetricReady(t, port)
|
waitForMetricReady(t, port)
|
||||||
|
|
||||||
@@ -99,14 +99,14 @@ func TestMetricNames(t *testing.T) {
|
|||||||
func TestMetricLabels(t *testing.T) {
|
func TestMetricLabels(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
|
cli := ce.NewContainerClient()
|
||||||
requiredLabels := []string{"qmgr"}
|
requiredLabels := []string{"qmgr"}
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
id := runContainerWithPorts(t, cli, metricsContainerConfig(), []int{defaultMetricPort})
|
||||||
|
defer cleanContainer(t, cli, id)
|
||||||
|
port, err := cli.GetContainerPort(id, defaultMetricPort)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
id := runContainerWithPorts(t, cli, metricsContainerConfig(), []int{defaultMetricPort})
|
|
||||||
defer cleanContainer(t, cli, id)
|
|
||||||
port := getPort(t, cli, id, defaultMetricPort)
|
|
||||||
|
|
||||||
// Now the container is ready we prod the prometheus endpoint until it's up.
|
// Now the container is ready we prod the prometheus endpoint until it's up.
|
||||||
waitForMetricReady(t, port)
|
waitForMetricReady(t, port)
|
||||||
@@ -148,13 +148,13 @@ func TestMetricLabels(t *testing.T) {
|
|||||||
func TestRapidFirePrometheus(t *testing.T) {
|
func TestRapidFirePrometheus(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli := ce.NewContainerClient()
|
||||||
|
id := runContainerWithPorts(t, cli, metricsContainerConfig(), []int{defaultMetricPort})
|
||||||
|
defer cleanContainer(t, cli, id)
|
||||||
|
port, err := cli.GetContainerPort(id, defaultMetricPort)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
id := runContainerWithPorts(t, cli, metricsContainerConfig(), []int{defaultMetricPort})
|
|
||||||
defer cleanContainer(t, cli, id)
|
|
||||||
port := getPort(t, cli, id, defaultMetricPort)
|
|
||||||
|
|
||||||
// Now the container is ready we prod the prometheus endpoint until it's up.
|
// Now the container is ready we prod the prometheus endpoint until it's up.
|
||||||
waitForMetricReady(t, port)
|
waitForMetricReady(t, port)
|
||||||
@@ -182,13 +182,13 @@ func TestRapidFirePrometheus(t *testing.T) {
|
|||||||
func TestSlowPrometheus(t *testing.T) {
|
func TestSlowPrometheus(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli := ce.NewContainerClient()
|
||||||
|
id := runContainerWithPorts(t, cli, metricsContainerConfig(), []int{defaultMetricPort})
|
||||||
|
defer cleanContainer(t, cli, id)
|
||||||
|
port, err := cli.GetContainerPort(id, defaultMetricPort)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
id := runContainerWithPorts(t, cli, metricsContainerConfig(), []int{defaultMetricPort})
|
|
||||||
defer cleanContainer(t, cli, id)
|
|
||||||
port := getPort(t, cli, id, defaultMetricPort)
|
|
||||||
|
|
||||||
// Now the container is ready we prod the prometheus endpoint until it's up.
|
// Now the container is ready we prod the prometheus endpoint until it's up.
|
||||||
waitForMetricReady(t, port)
|
waitForMetricReady(t, port)
|
||||||
@@ -213,15 +213,14 @@ func TestSlowPrometheus(t *testing.T) {
|
|||||||
func TestContainerRestart(t *testing.T) {
|
func TestContainerRestart(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli := ce.NewContainerClient()
|
||||||
|
id := runContainerWithPorts(t, cli, metricsContainerConfig(), []int{defaultMetricPort})
|
||||||
|
defer cleanContainer(t, cli, id)
|
||||||
|
port, err := cli.GetContainerPort(id, defaultMetricPort)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
id := runContainerWithPorts(t, cli, metricsContainerConfig(), []int{defaultMetricPort})
|
|
||||||
defer cleanContainer(t, cli, id)
|
|
||||||
port := getPort(t, cli, id, defaultMetricPort)
|
|
||||||
|
|
||||||
// Now the container is ready we prod the prometheus endpoint until it's up.
|
// Now the container is ready we prod the prometheus endpoint until it's up.
|
||||||
waitForMetricReady(t, port)
|
waitForMetricReady(t, port)
|
||||||
|
|
||||||
@@ -239,7 +238,10 @@ func TestContainerRestart(t *testing.T) {
|
|||||||
stopContainer(t, cli, id)
|
stopContainer(t, cli, id)
|
||||||
// Start the container cleanly
|
// Start the container cleanly
|
||||||
startContainer(t, cli, id)
|
startContainer(t, cli, id)
|
||||||
port = getPort(t, cli, id, defaultMetricPort)
|
port, err = cli.GetContainerPort(id, defaultMetricPort)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
// Now the container is ready we prod the prometheus endpoint until it's up.
|
// Now the container is ready we prod the prometheus endpoint until it's up.
|
||||||
waitForMetricReady(t, port)
|
waitForMetricReady(t, port)
|
||||||
@@ -261,15 +263,14 @@ func TestContainerRestart(t *testing.T) {
|
|||||||
func TestQMRestart(t *testing.T) {
|
func TestQMRestart(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli := ce.NewContainerClient()
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
id := runContainerWithPorts(t, cli, metricsContainerConfig(), []int{defaultMetricPort})
|
id := runContainerWithPorts(t, cli, metricsContainerConfig(), []int{defaultMetricPort})
|
||||||
defer cleanContainer(t, cli, id)
|
defer cleanContainer(t, cli, id)
|
||||||
|
|
||||||
port := getPort(t, cli, id, defaultMetricPort)
|
port, err := cli.GetContainerPort(id, defaultMetricPort)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
// Now the container is ready we prod the prometheus endpoint until it's up.
|
// Now the container is ready we prod the prometheus endpoint until it's up.
|
||||||
waitForMetricReady(t, port)
|
waitForMetricReady(t, port)
|
||||||
@@ -319,14 +320,14 @@ func TestQMRestart(t *testing.T) {
|
|||||||
func TestValidValues(t *testing.T) {
|
func TestValidValues(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli := ce.NewContainerClient()
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
id := runContainerWithPorts(t, cli, metricsContainerConfig(), []int{defaultMetricPort})
|
id := runContainerWithPorts(t, cli, metricsContainerConfig(), []int{defaultMetricPort})
|
||||||
defer cleanContainer(t, cli, id)
|
defer cleanContainer(t, cli, id)
|
||||||
// hostname := getIPAddress(t, cli, id)
|
// hostname := getIPAddress(t, cli, id)
|
||||||
port := getPort(t, cli, id, defaultMetricPort)
|
port, err := cli.GetContainerPort(id, defaultMetricPort)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
// Now the container is ready we prod the prometheus endpoint until it's up.
|
// Now the container is ready we prod the prometheus endpoint until it's up.
|
||||||
waitForMetricReady(t, port)
|
waitForMetricReady(t, port)
|
||||||
|
|
||||||
@@ -343,7 +344,7 @@ func TestValidValues(t *testing.T) {
|
|||||||
// Check that the values for each metric are valid numbers
|
// Check that the values for each metric are valid numbers
|
||||||
// can be either int, float or exponential - all these can be parsed by ParseFloat function
|
// can be either int, float or exponential - all these can be parsed by ParseFloat function
|
||||||
for _, e := range metrics {
|
for _, e := range metrics {
|
||||||
if _, err = strconv.ParseFloat(e.Value, 64); err != nil {
|
if _, err := strconv.ParseFloat(e.Value, 64); err != nil {
|
||||||
t.Errorf("Value (%s) for key (%s) is not a valid number", e.Value, e.Key)
|
t.Errorf("Value (%s) for key (%s) is not a valid number", e.Value, e.Key)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -355,14 +356,14 @@ func TestValidValues(t *testing.T) {
|
|||||||
func TestChangingValues(t *testing.T) {
|
func TestChangingValues(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
cli := ce.NewContainerClient()
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
id := runContainerWithPorts(t, cli, metricsContainerConfig(), []int{1414, defaultMetricPort})
|
id := runContainerWithPorts(t, cli, metricsContainerConfig(), []int{1414, defaultMetricPort})
|
||||||
defer cleanContainer(t, cli, id)
|
defer cleanContainer(t, cli, id)
|
||||||
// hostname := getIPAddress(t, cli, id)
|
// hostname := getIPAddress(t, cli, id)
|
||||||
port := getPort(t, cli, id, defaultMetricPort)
|
port, err := cli.GetContainerPort(id, defaultMetricPort)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
// Now the container is ready we prod the prometheus endpoint until it's up.
|
// Now the container is ready we prod the prometheus endpoint until it's up.
|
||||||
waitForMetricReady(t, port)
|
waitForMetricReady(t, port)
|
||||||
|
|
||||||
@@ -386,7 +387,11 @@ func TestChangingValues(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Send invalid data to the MQ listener to generate a FDC
|
// Send invalid data to the MQ listener to generate a FDC
|
||||||
listener := fmt.Sprintf("localhost:%s", getPort(t, cli, id, 1414))
|
noport, err := cli.GetContainerPort(id, 1414)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
listener := fmt.Sprintf("localhost:%s", noport)
|
||||||
conn, err := net.Dial("tcp", listener)
|
conn, err := net.Dial("tcp", listener)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Could not connect to the listener - %v", err)
|
t.Fatalf("Could not connect to the listener - %v", err)
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
© Copyright IBM Corporation 2018
|
© Copyright IBM Corporation 2018, 2023
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
@@ -24,7 +24,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/container"
|
ce "github.com/ibm-messaging/mq-container/test/container/containerengine"
|
||||||
)
|
)
|
||||||
|
|
||||||
type mqmetric struct {
|
type mqmetric struct {
|
||||||
@@ -146,8 +146,8 @@ func waitForMetricReady(t *testing.T, port string) {
|
|||||||
t.Fatalf("Metric endpoint failed to startup in timely manner")
|
t.Fatalf("Metric endpoint failed to startup in timely manner")
|
||||||
}
|
}
|
||||||
|
|
||||||
func metricsContainerConfig() *container.Config {
|
func metricsContainerConfig() *ce.ContainerConfig {
|
||||||
return &container.Config{
|
return &ce.ContainerConfig{
|
||||||
Env: []string{
|
Env: []string{
|
||||||
"LICENSE=accept",
|
"LICENSE=accept",
|
||||||
"MQ_QMGR_NAME=" + defaultMetricQMName,
|
"MQ_QMGR_NAME=" + defaultMetricQMName,
|
||||||
@@ -1,40 +0,0 @@
|
|||||||
module github.com/ibm-messaging/mq-container/test/docker
|
|
||||||
|
|
||||||
go 1.18
|
|
||||||
|
|
||||||
require (
|
|
||||||
// Note: This is not actually Docker v17.12!
|
|
||||||
// Go modules require the use of semver, but Docker does not use semver and has not
|
|
||||||
// [opted-in to use Go modules](https://github.com/golang/go/wiki/Modules#can-a-module-consume-a-package-that-has-not-opted-in-to-modules)
|
|
||||||
// This means that when you `go get` Docker, you need to do so based on a commit,
|
|
||||||
// e.g. `go get -v github.com/docker/docker@420b1d36250f9cfdc561f086f25a213ecb669b6f`,
|
|
||||||
// which uses the commit for [Docker v19.03.15](https://github.com/moby/moby/releases/tag/v19.03.15)
|
|
||||||
// Go will then find the latest tag with a semver-compatible tag. In Docker's case,
|
|
||||||
// v17.12.0 is valid semver, but v18.09 and v19.03 are not.
|
|
||||||
// Also note: Docker v20.10 is valid semver, but the v20.10 client API requires use of Docker API
|
|
||||||
// version 1.41 on the server, which is currently too new for the version of Docker in Travis (Ubuntu Bionic)
|
|
||||||
github.com/docker/docker v17.12.0-ce-rc1.0.20210128214336-420b1d36250f+incompatible
|
|
||||||
github.com/docker/go-connections v0.4.0
|
|
||||||
)
|
|
||||||
|
|
||||||
require (
|
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
|
||||||
github.com/Microsoft/go-winio v0.5.1 // indirect
|
|
||||||
github.com/containerd/containerd v1.6.6 // indirect
|
|
||||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
|
||||||
github.com/docker/go-units v0.4.0 // indirect
|
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
|
||||||
github.com/gorilla/mux v1.8.0 // indirect
|
|
||||||
github.com/morikuni/aec v1.0.0 // indirect
|
|
||||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
|
||||||
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
|
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
|
||||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
|
||||||
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f // indirect
|
|
||||||
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 // indirect
|
|
||||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect
|
|
||||||
google.golang.org/grpc v1.46.0 // indirect
|
|
||||||
google.golang.org/protobuf v1.27.1 // indirect
|
|
||||||
gotest.tools v2.2.0+incompatible // indirect
|
|
||||||
)
|
|
||||||
@@ -1,199 +0,0 @@
|
|||||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
|
||||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
|
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
|
||||||
github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY=
|
|
||||||
github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
|
||||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
|
||||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
|
||||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
|
||||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
|
||||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
|
||||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
|
||||||
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
|
|
||||||
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
|
||||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
|
||||||
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
|
||||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
|
||||||
github.com/containerd/containerd v1.6.6 h1:xJNPhbrmz8xAMDNoVjHy9YHtWwEQNS+CDkcIRh7t8Y0=
|
|
||||||
github.com/containerd/containerd v1.6.6/go.mod h1:ZoP1geJldzCVY3Tonoz7b1IXk8rIX0Nltt5QE4OMNk0=
|
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
|
||||||
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
|
||||||
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
|
||||||
github.com/docker/docker v17.12.0-ce-rc1.0.20210128214336-420b1d36250f+incompatible h1:nhVo1udYfMj0Jsw0lnqrTjjf33aLpdgW9Wve9fHVzhQ=
|
|
||||||
github.com/docker/docker v17.12.0-ce-rc1.0.20210128214336-420b1d36250f+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
|
||||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
|
||||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
|
||||||
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
|
||||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
|
||||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
|
||||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
|
||||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
|
||||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
|
||||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
|
||||||
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
|
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
|
||||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
|
||||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
|
||||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
|
||||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
|
||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
|
||||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
|
||||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
|
||||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
|
||||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
|
||||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
|
||||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
|
||||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
|
||||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
|
||||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
|
||||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
|
||||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
|
||||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
|
||||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
|
||||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
|
||||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
|
||||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
|
||||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
|
||||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
|
||||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
|
||||||
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
|
|
||||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
|
||||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
|
||||||
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
|
|
||||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
|
||||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
|
||||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
|
||||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
|
||||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
|
||||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
|
||||||
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec=
|
|
||||||
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
|
||||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
|
||||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
|
||||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
|
||||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
|
||||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
|
||||||
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
|
|
||||||
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
|
||||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
|
||||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
|
||||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
|
||||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
|
||||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
|
||||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
|
||||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
|
||||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
|
||||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
|
||||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|
||||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
|
||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
|
||||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
|
||||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
|
||||||
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f h1:hEYJvxw1lSnWIl8X9ofsYMklzaDs90JI2az5YMd4fPM=
|
|
||||||
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 h1:xHms4gcpe1YE7A3yIllJXP16CMAGuqwO2lX1mTyyRRc=
|
|
||||||
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|
||||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|
||||||
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
|
|
||||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
|
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
|
||||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
|
||||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
|
||||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
|
||||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
|
||||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
|
||||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
|
||||||
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
|
||||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
|
||||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0=
|
|
||||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
|
||||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
|
||||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
|
||||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
|
||||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
|
||||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
|
||||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
|
||||||
google.golang.org/grpc v1.46.0 h1:oCjezcn6g6A75TGoKYBPgKmVBLexhYLM6MebdrPApP8=
|
|
||||||
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
|
||||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
|
||||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
|
||||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
|
||||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
|
||||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
|
||||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
|
||||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
|
||||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
|
||||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
|
||||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
|
||||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
|
||||||
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
|
|
||||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
||||||
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
||||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
|
||||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
|
||||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
|
||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
|
||||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
© Copyright IBM Corporation 2018, 2022
|
© Copyright IBM Corporation 2018, 2023
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
@@ -51,6 +51,7 @@ class JMSTests {
|
|||||||
private static final Logger LOGGER = Logger.getLogger(JMSTests.class.getName());
|
private static final Logger LOGGER = Logger.getLogger(JMSTests.class.getName());
|
||||||
protected static final String ADDR = System.getenv("MQ_PORT_1414_TCP_ADDR");
|
protected static final String ADDR = System.getenv("MQ_PORT_1414_TCP_ADDR");
|
||||||
protected static final String USER = System.getenv("MQ_USERNAME");
|
protected static final String USER = System.getenv("MQ_USERNAME");
|
||||||
|
protected static final String PORT = System.getenv().getOrDefault("MQ_PORT_1414_OVERRIDE", "1414");
|
||||||
protected static final String PASSWORD = System.getenv("MQ_PASSWORD");
|
protected static final String PASSWORD = System.getenv("MQ_PASSWORD");
|
||||||
protected static final String CHANNEL = System.getenv("MQ_CHANNEL");
|
protected static final String CHANNEL = System.getenv("MQ_CHANNEL");
|
||||||
protected static final String TRUSTSTORE = System.getenv("MQ_TLS_TRUSTSTORE");
|
protected static final String TRUSTSTORE = System.getenv("MQ_TLS_TRUSTSTORE");
|
||||||
@@ -67,11 +68,11 @@ class JMSTests {
|
|||||||
return ctx.getSocketFactory();
|
return ctx.getSocketFactory();
|
||||||
}
|
}
|
||||||
|
|
||||||
static MQConnectionFactory createMQConnectionFactory(String channel, String addr) throws JMSException, IOException, GeneralSecurityException {
|
static MQConnectionFactory createMQConnectionFactory(String channel, String addr, String port) throws JMSException, IOException, GeneralSecurityException {
|
||||||
MQConnectionFactory factory = new MQConnectionFactory();
|
MQConnectionFactory factory = new MQConnectionFactory();
|
||||||
factory.setTransportType(WMQConstants.WMQ_CM_CLIENT);
|
factory.setTransportType(WMQConstants.WMQ_CM_CLIENT);
|
||||||
factory.setChannel(channel);
|
factory.setChannel(channel);
|
||||||
factory.setConnectionNameList(String.format("%s(1414)", addr));
|
factory.setConnectionNameList(String.format("%s(%s)", addr, port));
|
||||||
if (TRUSTSTORE == null) {
|
if (TRUSTSTORE == null) {
|
||||||
LOGGER.info("Not using TLS");
|
LOGGER.info("Not using TLS");
|
||||||
}
|
}
|
||||||
@@ -93,9 +94,9 @@ class JMSTests {
|
|||||||
/**
|
/**
|
||||||
* Create a JMSContext with the supplied user and password.
|
* Create a JMSContext with the supplied user and password.
|
||||||
*/
|
*/
|
||||||
static JMSContext create(String channel, String addr, String user, String password) throws JMSException, IOException, GeneralSecurityException {
|
static JMSContext create(String channel, String addr, String port, String user, String password) throws JMSException, IOException, GeneralSecurityException {
|
||||||
LOGGER.info(String.format("Connecting to %s/TCP/%s(1414) as %s", channel, addr, user));
|
LOGGER.info(String.format("Connecting to %s/TCP/%s(%s) as %s", channel, addr, port, user));
|
||||||
MQConnectionFactory factory = createMQConnectionFactory(channel, addr);
|
MQConnectionFactory factory = createMQConnectionFactory(channel, addr, port);
|
||||||
// If a password is set, make sure it gets sent to the queue manager for authentication
|
// If a password is set, make sure it gets sent to the queue manager for authentication
|
||||||
if (password != null) {
|
if (password != null) {
|
||||||
factory.setBooleanProperty(WMQConstants.USER_AUTHENTICATION_MQCSP, true);
|
factory.setBooleanProperty(WMQConstants.USER_AUTHENTICATION_MQCSP, true);
|
||||||
@@ -107,9 +108,9 @@ class JMSTests {
|
|||||||
/**
|
/**
|
||||||
* Create a JMSContext with the default user identity (from the OS)
|
* Create a JMSContext with the default user identity (from the OS)
|
||||||
*/
|
*/
|
||||||
static JMSContext create(String channel, String addr) throws JMSException, IOException, GeneralSecurityException {
|
static JMSContext create(String channel, String addr, String port) throws JMSException, IOException, GeneralSecurityException {
|
||||||
LOGGER.info(String.format("Connecting to %s/TCP/%s(1414) as OS user '%s'", channel, addr, System.getProperty("user.name")));
|
LOGGER.info(String.format("Connecting to %s/TCP/%s(%s) as OS user '%s'", channel, addr, port, System.getProperty("user.name")));
|
||||||
MQConnectionFactory factory = createMQConnectionFactory(channel, addr);
|
MQConnectionFactory factory = createMQConnectionFactory(channel, addr, port);
|
||||||
LOGGER.info(String.format("CSP authentication: %s", factory.getBooleanProperty(WMQConstants.USER_AUTHENTICATION_MQCSP)));
|
LOGGER.info(String.format("CSP authentication: %s", factory.getBooleanProperty(WMQConstants.USER_AUTHENTICATION_MQCSP)));
|
||||||
return factory.createContext();
|
return factory.createContext();
|
||||||
}
|
}
|
||||||
@@ -118,7 +119,7 @@ class JMSTests {
|
|||||||
private static void waitForQueueManager() {
|
private static void waitForQueueManager() {
|
||||||
for (int i = 0; i < 20; i++) {
|
for (int i = 0; i < 20; i++) {
|
||||||
try {
|
try {
|
||||||
Socket s = new Socket(ADDR, 1414);
|
Socket s = new Socket(ADDR, Integer.parseInt(PORT));
|
||||||
s.close();
|
s.close();
|
||||||
return;
|
return;
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
@@ -132,7 +133,7 @@ class JMSTests {
|
|||||||
|
|
||||||
@Test
|
@Test
|
||||||
void putGetTest(TestInfo t) throws Exception {
|
void putGetTest(TestInfo t) throws Exception {
|
||||||
context = create(CHANNEL, ADDR, USER, PASSWORD);
|
context = create(CHANNEL, ADDR, PORT, USER, PASSWORD);
|
||||||
Queue queue = new MQQueue("DEV.QUEUE.1");
|
Queue queue = new MQQueue("DEV.QUEUE.1");
|
||||||
context.createProducer().send(queue, t.getDisplayName());
|
context.createProducer().send(queue, t.getDisplayName());
|
||||||
Message m = context.createConsumer(queue).receive();
|
Message m = context.createConsumer(queue).receive();
|
||||||
@@ -144,7 +145,7 @@ class JMSTests {
|
|||||||
LOGGER.info(String.format("Password='%s'", PASSWORD));
|
LOGGER.info(String.format("Password='%s'", PASSWORD));
|
||||||
try {
|
try {
|
||||||
// Don't pass a user/password, which should cause the default identity to be used
|
// Don't pass a user/password, which should cause the default identity to be used
|
||||||
context = create(CHANNEL, ADDR);
|
context = create(CHANNEL, ADDR, PORT);
|
||||||
} catch (DetailedJMSSecurityRuntimeException ex) {
|
} catch (DetailedJMSSecurityRuntimeException ex) {
|
||||||
Throwable cause = ex.getCause();
|
Throwable cause = ex.getCause();
|
||||||
assertNotNull(cause);
|
assertNotNull(cause);
|
||||||
|
|||||||
8
vendor/github.com/cespare/xxhash/v2/.travis.yml
generated
vendored
8
vendor/github.com/cespare/xxhash/v2/.travis.yml
generated
vendored
@@ -1,8 +0,0 @@
|
|||||||
language: go
|
|
||||||
go:
|
|
||||||
- "1.x"
|
|
||||||
- master
|
|
||||||
env:
|
|
||||||
- TAGS=""
|
|
||||||
- TAGS="-tags purego"
|
|
||||||
script: go test $TAGS -v ./...
|
|
||||||
35
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
35
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
@@ -1,10 +1,9 @@
|
|||||||
# xxhash
|
# xxhash
|
||||||
|
|
||||||
[](https://godoc.org/github.com/cespare/xxhash)
|
[](https://pkg.go.dev/github.com/cespare/xxhash/v2)
|
||||||
[](https://travis-ci.org/cespare/xxhash)
|
[](https://github.com/cespare/xxhash/actions/workflows/test.yml)
|
||||||
|
|
||||||
xxhash is a Go implementation of the 64-bit
|
xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
|
||||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
|
||||||
high-quality hashing algorithm that is much faster than anything in the Go
|
high-quality hashing algorithm that is much faster than anything in the Go
|
||||||
standard library.
|
standard library.
|
||||||
|
|
||||||
@@ -25,8 +24,11 @@ func (*Digest) WriteString(string) (int, error)
|
|||||||
func (*Digest) Sum64() uint64
|
func (*Digest) Sum64() uint64
|
||||||
```
|
```
|
||||||
|
|
||||||
This implementation provides a fast pure-Go implementation and an even faster
|
The package is written with optimized pure Go and also contains even faster
|
||||||
assembly implementation for amd64.
|
assembly implementations for amd64 and arm64. If desired, the `purego` build tag
|
||||||
|
opts into using the Go code even on those architectures.
|
||||||
|
|
||||||
|
[xxHash]: http://cyan4973.github.io/xxHash/
|
||||||
|
|
||||||
## Compatibility
|
## Compatibility
|
||||||
|
|
||||||
@@ -46,22 +48,25 @@ Here are some quick benchmarks comparing the pure-Go and assembly
|
|||||||
implementations of Sum64.
|
implementations of Sum64.
|
||||||
|
|
||||||
| input size | purego | asm |
|
| input size | purego | asm |
|
||||||
| --- | --- | --- |
|
| ---------- | --------- | --------- |
|
||||||
| 5 B | 979.66 MB/s | 1291.17 MB/s |
|
| 4 B | 1.3 GB/s | 1.2 GB/s |
|
||||||
| 100 B | 7475.26 MB/s | 7973.40 MB/s |
|
| 16 B | 2.9 GB/s | 3.5 GB/s |
|
||||||
| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
|
| 100 B | 6.9 GB/s | 8.1 GB/s |
|
||||||
| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
|
| 4 KB | 11.7 GB/s | 16.7 GB/s |
|
||||||
|
| 10 MB | 12.0 GB/s | 17.3 GB/s |
|
||||||
|
|
||||||
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
|
These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
|
||||||
the following commands under Go 1.11.2:
|
CPU using the following commands under Go 1.19.2:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
|
benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||||
$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||||
```
|
```
|
||||||
|
|
||||||
## Projects using this package
|
## Projects using this package
|
||||||
|
|
||||||
- [InfluxDB](https://github.com/influxdata/influxdb)
|
- [InfluxDB](https://github.com/influxdata/influxdb)
|
||||||
- [Prometheus](https://github.com/prometheus/prometheus)
|
- [Prometheus](https://github.com/prometheus/prometheus)
|
||||||
|
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||||
- [FreeCache](https://github.com/coocood/freecache)
|
- [FreeCache](https://github.com/coocood/freecache)
|
||||||
|
- [FastCache](https://github.com/VictoriaMetrics/fastcache)
|
||||||
|
|||||||
10
vendor/github.com/cespare/xxhash/v2/testall.sh
generated
vendored
Normal file
10
vendor/github.com/cespare/xxhash/v2/testall.sh
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -eu -o pipefail
|
||||||
|
|
||||||
|
# Small convenience script for running the tests with various combinations of
|
||||||
|
# arch/tags. This assumes we're running on amd64 and have qemu available.
|
||||||
|
|
||||||
|
go test ./...
|
||||||
|
go test -tags purego ./...
|
||||||
|
GOARCH=arm64 go test
|
||||||
|
GOARCH=arm64 go test -tags purego
|
||||||
48
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
48
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
@@ -16,19 +16,11 @@ const (
|
|||||||
prime5 uint64 = 2870177450012600261
|
prime5 uint64 = 2870177450012600261
|
||||||
)
|
)
|
||||||
|
|
||||||
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
|
// Store the primes in an array as well.
|
||||||
// possible in the Go code is worth a small (but measurable) performance boost
|
//
|
||||||
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
|
// The consts are used when possible in Go code to avoid MOVs but we need a
|
||||||
// convenience in the Go code in a few places where we need to intentionally
|
// contiguous array of the assembly code.
|
||||||
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
|
var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
|
||||||
// result overflows a uint64).
|
|
||||||
var (
|
|
||||||
prime1v = prime1
|
|
||||||
prime2v = prime2
|
|
||||||
prime3v = prime3
|
|
||||||
prime4v = prime4
|
|
||||||
prime5v = prime5
|
|
||||||
)
|
|
||||||
|
|
||||||
// Digest implements hash.Hash64.
|
// Digest implements hash.Hash64.
|
||||||
type Digest struct {
|
type Digest struct {
|
||||||
@@ -50,10 +42,10 @@ func New() *Digest {
|
|||||||
|
|
||||||
// Reset clears the Digest's state so that it can be reused.
|
// Reset clears the Digest's state so that it can be reused.
|
||||||
func (d *Digest) Reset() {
|
func (d *Digest) Reset() {
|
||||||
d.v1 = prime1v + prime2
|
d.v1 = primes[0] + prime2
|
||||||
d.v2 = prime2
|
d.v2 = prime2
|
||||||
d.v3 = 0
|
d.v3 = 0
|
||||||
d.v4 = -prime1v
|
d.v4 = -primes[0]
|
||||||
d.total = 0
|
d.total = 0
|
||||||
d.n = 0
|
d.n = 0
|
||||||
}
|
}
|
||||||
@@ -69,21 +61,23 @@ func (d *Digest) Write(b []byte) (n int, err error) {
|
|||||||
n = len(b)
|
n = len(b)
|
||||||
d.total += uint64(n)
|
d.total += uint64(n)
|
||||||
|
|
||||||
|
memleft := d.mem[d.n&(len(d.mem)-1):]
|
||||||
|
|
||||||
if d.n+n < 32 {
|
if d.n+n < 32 {
|
||||||
// This new data doesn't even fill the current block.
|
// This new data doesn't even fill the current block.
|
||||||
copy(d.mem[d.n:], b)
|
copy(memleft, b)
|
||||||
d.n += n
|
d.n += n
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.n > 0 {
|
if d.n > 0 {
|
||||||
// Finish off the partial block.
|
// Finish off the partial block.
|
||||||
copy(d.mem[d.n:], b)
|
c := copy(memleft, b)
|
||||||
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
||||||
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
||||||
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
||||||
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
||||||
b = b[32-d.n:]
|
b = b[c:]
|
||||||
d.n = 0
|
d.n = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -133,21 +127,20 @@ func (d *Digest) Sum64() uint64 {
|
|||||||
|
|
||||||
h += d.total
|
h += d.total
|
||||||
|
|
||||||
i, end := 0, d.n
|
b := d.mem[:d.n&(len(d.mem)-1)]
|
||||||
for ; i+8 <= end; i += 8 {
|
for ; len(b) >= 8; b = b[8:] {
|
||||||
k1 := round(0, u64(d.mem[i:i+8]))
|
k1 := round(0, u64(b[:8]))
|
||||||
h ^= k1
|
h ^= k1
|
||||||
h = rol27(h)*prime1 + prime4
|
h = rol27(h)*prime1 + prime4
|
||||||
}
|
}
|
||||||
if i+4 <= end {
|
if len(b) >= 4 {
|
||||||
h ^= uint64(u32(d.mem[i:i+4])) * prime1
|
h ^= uint64(u32(b[:4])) * prime1
|
||||||
h = rol23(h)*prime2 + prime3
|
h = rol23(h)*prime2 + prime3
|
||||||
i += 4
|
b = b[4:]
|
||||||
}
|
}
|
||||||
for i < end {
|
for ; len(b) > 0; b = b[1:] {
|
||||||
h ^= uint64(d.mem[i]) * prime5
|
h ^= uint64(b[0]) * prime5
|
||||||
h = rol11(h) * prime1
|
h = rol11(h) * prime1
|
||||||
i++
|
|
||||||
}
|
}
|
||||||
|
|
||||||
h ^= h >> 33
|
h ^= h >> 33
|
||||||
@@ -193,7 +186,6 @@ func (d *Digest) UnmarshalBinary(b []byte) error {
|
|||||||
b, d.v4 = consumeUint64(b)
|
b, d.v4 = consumeUint64(b)
|
||||||
b, d.total = consumeUint64(b)
|
b, d.total = consumeUint64(b)
|
||||||
copy(d.mem[:], b)
|
copy(d.mem[:], b)
|
||||||
b = b[len(d.mem):]
|
|
||||||
d.n = int(d.total % uint64(len(d.mem)))
|
d.n = int(d.total % uint64(len(d.mem)))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
308
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
308
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
@@ -1,215 +1,209 @@
|
|||||||
|
//go:build !appengine && gc && !purego
|
||||||
// +build !appengine
|
// +build !appengine
|
||||||
// +build gc
|
// +build gc
|
||||||
// +build !purego
|
// +build !purego
|
||||||
|
|
||||||
#include "textflag.h"
|
#include "textflag.h"
|
||||||
|
|
||||||
// Register allocation:
|
// Registers:
|
||||||
// AX h
|
#define h AX
|
||||||
// CX pointer to advance through b
|
#define d AX
|
||||||
// DX n
|
#define p SI // pointer to advance through b
|
||||||
// BX loop end
|
#define n DX
|
||||||
// R8 v1, k1
|
#define end BX // loop end
|
||||||
// R9 v2
|
#define v1 R8
|
||||||
// R10 v3
|
#define v2 R9
|
||||||
// R11 v4
|
#define v3 R10
|
||||||
// R12 tmp
|
#define v4 R11
|
||||||
// R13 prime1v
|
#define x R12
|
||||||
// R14 prime2v
|
#define prime1 R13
|
||||||
// R15 prime4v
|
#define prime2 R14
|
||||||
|
#define prime4 DI
|
||||||
|
|
||||||
// round reads from and advances the buffer pointer in CX.
|
#define round(acc, x) \
|
||||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
IMULQ prime2, x \
|
||||||
#define round(r) \
|
ADDQ x, acc \
|
||||||
MOVQ (CX), R12 \
|
ROLQ $31, acc \
|
||||||
ADDQ $8, CX \
|
IMULQ prime1, acc
|
||||||
IMULQ R14, R12 \
|
|
||||||
ADDQ R12, r \
|
|
||||||
ROLQ $31, r \
|
|
||||||
IMULQ R13, r
|
|
||||||
|
|
||||||
// mergeRound applies a merge round on the two registers acc and val.
|
// round0 performs the operation x = round(0, x).
|
||||||
// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
|
#define round0(x) \
|
||||||
#define mergeRound(acc, val) \
|
IMULQ prime2, x \
|
||||||
IMULQ R14, val \
|
ROLQ $31, x \
|
||||||
ROLQ $31, val \
|
IMULQ prime1, x
|
||||||
IMULQ R13, val \
|
|
||||||
XORQ val, acc \
|
// mergeRound applies a merge round on the two registers acc and x.
|
||||||
IMULQ R13, acc \
|
// It assumes that prime1, prime2, and prime4 have been loaded.
|
||||||
ADDQ R15, acc
|
#define mergeRound(acc, x) \
|
||||||
|
round0(x) \
|
||||||
|
XORQ x, acc \
|
||||||
|
IMULQ prime1, acc \
|
||||||
|
ADDQ prime4, acc
|
||||||
|
|
||||||
|
// blockLoop processes as many 32-byte blocks as possible,
|
||||||
|
// updating v1, v2, v3, and v4. It assumes that there is at least one block
|
||||||
|
// to process.
|
||||||
|
#define blockLoop() \
|
||||||
|
loop: \
|
||||||
|
MOVQ +0(p), x \
|
||||||
|
round(v1, x) \
|
||||||
|
MOVQ +8(p), x \
|
||||||
|
round(v2, x) \
|
||||||
|
MOVQ +16(p), x \
|
||||||
|
round(v3, x) \
|
||||||
|
MOVQ +24(p), x \
|
||||||
|
round(v4, x) \
|
||||||
|
ADDQ $32, p \
|
||||||
|
CMPQ p, end \
|
||||||
|
JLE loop
|
||||||
|
|
||||||
// func Sum64(b []byte) uint64
|
// func Sum64(b []byte) uint64
|
||||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
||||||
// Load fixed primes.
|
// Load fixed primes.
|
||||||
MOVQ ·prime1v(SB), R13
|
MOVQ ·primes+0(SB), prime1
|
||||||
MOVQ ·prime2v(SB), R14
|
MOVQ ·primes+8(SB), prime2
|
||||||
MOVQ ·prime4v(SB), R15
|
MOVQ ·primes+24(SB), prime4
|
||||||
|
|
||||||
// Load slice.
|
// Load slice.
|
||||||
MOVQ b_base+0(FP), CX
|
MOVQ b_base+0(FP), p
|
||||||
MOVQ b_len+8(FP), DX
|
MOVQ b_len+8(FP), n
|
||||||
LEAQ (CX)(DX*1), BX
|
LEAQ (p)(n*1), end
|
||||||
|
|
||||||
// The first loop limit will be len(b)-32.
|
// The first loop limit will be len(b)-32.
|
||||||
SUBQ $32, BX
|
SUBQ $32, end
|
||||||
|
|
||||||
// Check whether we have at least one block.
|
// Check whether we have at least one block.
|
||||||
CMPQ DX, $32
|
CMPQ n, $32
|
||||||
JLT noBlocks
|
JLT noBlocks
|
||||||
|
|
||||||
// Set up initial state (v1, v2, v3, v4).
|
// Set up initial state (v1, v2, v3, v4).
|
||||||
MOVQ R13, R8
|
MOVQ prime1, v1
|
||||||
ADDQ R14, R8
|
ADDQ prime2, v1
|
||||||
MOVQ R14, R9
|
MOVQ prime2, v2
|
||||||
XORQ R10, R10
|
XORQ v3, v3
|
||||||
XORQ R11, R11
|
XORQ v4, v4
|
||||||
SUBQ R13, R11
|
SUBQ prime1, v4
|
||||||
|
|
||||||
// Loop until CX > BX.
|
blockLoop()
|
||||||
blockLoop:
|
|
||||||
round(R8)
|
|
||||||
round(R9)
|
|
||||||
round(R10)
|
|
||||||
round(R11)
|
|
||||||
|
|
||||||
CMPQ CX, BX
|
MOVQ v1, h
|
||||||
JLE blockLoop
|
ROLQ $1, h
|
||||||
|
MOVQ v2, x
|
||||||
|
ROLQ $7, x
|
||||||
|
ADDQ x, h
|
||||||
|
MOVQ v3, x
|
||||||
|
ROLQ $12, x
|
||||||
|
ADDQ x, h
|
||||||
|
MOVQ v4, x
|
||||||
|
ROLQ $18, x
|
||||||
|
ADDQ x, h
|
||||||
|
|
||||||
MOVQ R8, AX
|
mergeRound(h, v1)
|
||||||
ROLQ $1, AX
|
mergeRound(h, v2)
|
||||||
MOVQ R9, R12
|
mergeRound(h, v3)
|
||||||
ROLQ $7, R12
|
mergeRound(h, v4)
|
||||||
ADDQ R12, AX
|
|
||||||
MOVQ R10, R12
|
|
||||||
ROLQ $12, R12
|
|
||||||
ADDQ R12, AX
|
|
||||||
MOVQ R11, R12
|
|
||||||
ROLQ $18, R12
|
|
||||||
ADDQ R12, AX
|
|
||||||
|
|
||||||
mergeRound(AX, R8)
|
|
||||||
mergeRound(AX, R9)
|
|
||||||
mergeRound(AX, R10)
|
|
||||||
mergeRound(AX, R11)
|
|
||||||
|
|
||||||
JMP afterBlocks
|
JMP afterBlocks
|
||||||
|
|
||||||
noBlocks:
|
noBlocks:
|
||||||
MOVQ ·prime5v(SB), AX
|
MOVQ ·primes+32(SB), h
|
||||||
|
|
||||||
afterBlocks:
|
afterBlocks:
|
||||||
ADDQ DX, AX
|
ADDQ n, h
|
||||||
|
|
||||||
// Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
|
ADDQ $24, end
|
||||||
ADDQ $24, BX
|
CMPQ p, end
|
||||||
|
JG try4
|
||||||
|
|
||||||
CMPQ CX, BX
|
loop8:
|
||||||
JG fourByte
|
MOVQ (p), x
|
||||||
|
ADDQ $8, p
|
||||||
|
round0(x)
|
||||||
|
XORQ x, h
|
||||||
|
ROLQ $27, h
|
||||||
|
IMULQ prime1, h
|
||||||
|
ADDQ prime4, h
|
||||||
|
|
||||||
wordLoop:
|
CMPQ p, end
|
||||||
// Calculate k1.
|
JLE loop8
|
||||||
MOVQ (CX), R8
|
|
||||||
ADDQ $8, CX
|
|
||||||
IMULQ R14, R8
|
|
||||||
ROLQ $31, R8
|
|
||||||
IMULQ R13, R8
|
|
||||||
|
|
||||||
XORQ R8, AX
|
try4:
|
||||||
ROLQ $27, AX
|
ADDQ $4, end
|
||||||
IMULQ R13, AX
|
CMPQ p, end
|
||||||
ADDQ R15, AX
|
JG try1
|
||||||
|
|
||||||
CMPQ CX, BX
|
MOVL (p), x
|
||||||
JLE wordLoop
|
ADDQ $4, p
|
||||||
|
IMULQ prime1, x
|
||||||
|
XORQ x, h
|
||||||
|
|
||||||
fourByte:
|
ROLQ $23, h
|
||||||
ADDQ $4, BX
|
IMULQ prime2, h
|
||||||
CMPQ CX, BX
|
ADDQ ·primes+16(SB), h
|
||||||
JG singles
|
|
||||||
|
|
||||||
MOVL (CX), R8
|
try1:
|
||||||
ADDQ $4, CX
|
ADDQ $4, end
|
||||||
IMULQ R13, R8
|
CMPQ p, end
|
||||||
XORQ R8, AX
|
|
||||||
|
|
||||||
ROLQ $23, AX
|
|
||||||
IMULQ R14, AX
|
|
||||||
ADDQ ·prime3v(SB), AX
|
|
||||||
|
|
||||||
singles:
|
|
||||||
ADDQ $4, BX
|
|
||||||
CMPQ CX, BX
|
|
||||||
JGE finalize
|
JGE finalize
|
||||||
|
|
||||||
singlesLoop:
|
loop1:
|
||||||
MOVBQZX (CX), R12
|
MOVBQZX (p), x
|
||||||
ADDQ $1, CX
|
ADDQ $1, p
|
||||||
IMULQ ·prime5v(SB), R12
|
IMULQ ·primes+32(SB), x
|
||||||
XORQ R12, AX
|
XORQ x, h
|
||||||
|
ROLQ $11, h
|
||||||
|
IMULQ prime1, h
|
||||||
|
|
||||||
ROLQ $11, AX
|
CMPQ p, end
|
||||||
IMULQ R13, AX
|
JL loop1
|
||||||
|
|
||||||
CMPQ CX, BX
|
|
||||||
JL singlesLoop
|
|
||||||
|
|
||||||
finalize:
|
finalize:
|
||||||
MOVQ AX, R12
|
MOVQ h, x
|
||||||
SHRQ $33, R12
|
SHRQ $33, x
|
||||||
XORQ R12, AX
|
XORQ x, h
|
||||||
IMULQ R14, AX
|
IMULQ prime2, h
|
||||||
MOVQ AX, R12
|
MOVQ h, x
|
||||||
SHRQ $29, R12
|
SHRQ $29, x
|
||||||
XORQ R12, AX
|
XORQ x, h
|
||||||
IMULQ ·prime3v(SB), AX
|
IMULQ ·primes+16(SB), h
|
||||||
MOVQ AX, R12
|
MOVQ h, x
|
||||||
SHRQ $32, R12
|
SHRQ $32, x
|
||||||
XORQ R12, AX
|
XORQ x, h
|
||||||
|
|
||||||
MOVQ AX, ret+24(FP)
|
MOVQ h, ret+24(FP)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
// writeBlocks uses the same registers as above except that it uses AX to store
|
|
||||||
// the d pointer.
|
|
||||||
|
|
||||||
// func writeBlocks(d *Digest, b []byte) int
|
// func writeBlocks(d *Digest, b []byte) int
|
||||||
TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
||||||
// Load fixed primes needed for round.
|
// Load fixed primes needed for round.
|
||||||
MOVQ ·prime1v(SB), R13
|
MOVQ ·primes+0(SB), prime1
|
||||||
MOVQ ·prime2v(SB), R14
|
MOVQ ·primes+8(SB), prime2
|
||||||
|
|
||||||
// Load slice.
|
// Load slice.
|
||||||
MOVQ b_base+8(FP), CX
|
MOVQ b_base+8(FP), p
|
||||||
MOVQ b_len+16(FP), DX
|
MOVQ b_len+16(FP), n
|
||||||
LEAQ (CX)(DX*1), BX
|
LEAQ (p)(n*1), end
|
||||||
SUBQ $32, BX
|
SUBQ $32, end
|
||||||
|
|
||||||
// Load vN from d.
|
// Load vN from d.
|
||||||
MOVQ d+0(FP), AX
|
MOVQ s+0(FP), d
|
||||||
MOVQ 0(AX), R8 // v1
|
MOVQ 0(d), v1
|
||||||
MOVQ 8(AX), R9 // v2
|
MOVQ 8(d), v2
|
||||||
MOVQ 16(AX), R10 // v3
|
MOVQ 16(d), v3
|
||||||
MOVQ 24(AX), R11 // v4
|
MOVQ 24(d), v4
|
||||||
|
|
||||||
// We don't need to check the loop condition here; this function is
|
// We don't need to check the loop condition here; this function is
|
||||||
// always called with at least one block of data to process.
|
// always called with at least one block of data to process.
|
||||||
blockLoop:
|
blockLoop()
|
||||||
round(R8)
|
|
||||||
round(R9)
|
|
||||||
round(R10)
|
|
||||||
round(R11)
|
|
||||||
|
|
||||||
CMPQ CX, BX
|
|
||||||
JLE blockLoop
|
|
||||||
|
|
||||||
// Copy vN back to d.
|
// Copy vN back to d.
|
||||||
MOVQ R8, 0(AX)
|
MOVQ v1, 0(d)
|
||||||
MOVQ R9, 8(AX)
|
MOVQ v2, 8(d)
|
||||||
MOVQ R10, 16(AX)
|
MOVQ v3, 16(d)
|
||||||
MOVQ R11, 24(AX)
|
MOVQ v4, 24(d)
|
||||||
|
|
||||||
// The number of bytes written is CX minus the old base pointer.
|
// The number of bytes written is p minus the old base pointer.
|
||||||
SUBQ b_base+8(FP), CX
|
SUBQ b_base+8(FP), p
|
||||||
MOVQ CX, ret+32(FP)
|
MOVQ p, ret+32(FP)
|
||||||
|
|
||||||
RET
|
RET
|
||||||
|
|||||||
183
vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
generated
vendored
Normal file
183
vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
generated
vendored
Normal file
@@ -0,0 +1,183 @@
|
|||||||
|
//go:build !appengine && gc && !purego
|
||||||
|
// +build !appengine
|
||||||
|
// +build gc
|
||||||
|
// +build !purego
|
||||||
|
|
||||||
|
#include "textflag.h"
|
||||||
|
|
||||||
|
// Registers:
|
||||||
|
#define digest R1
|
||||||
|
#define h R2 // return value
|
||||||
|
#define p R3 // input pointer
|
||||||
|
#define n R4 // input length
|
||||||
|
#define nblocks R5 // n / 32
|
||||||
|
#define prime1 R7
|
||||||
|
#define prime2 R8
|
||||||
|
#define prime3 R9
|
||||||
|
#define prime4 R10
|
||||||
|
#define prime5 R11
|
||||||
|
#define v1 R12
|
||||||
|
#define v2 R13
|
||||||
|
#define v3 R14
|
||||||
|
#define v4 R15
|
||||||
|
#define x1 R20
|
||||||
|
#define x2 R21
|
||||||
|
#define x3 R22
|
||||||
|
#define x4 R23
|
||||||
|
|
||||||
|
#define round(acc, x) \
|
||||||
|
MADD prime2, acc, x, acc \
|
||||||
|
ROR $64-31, acc \
|
||||||
|
MUL prime1, acc
|
||||||
|
|
||||||
|
// round0 performs the operation x = round(0, x).
|
||||||
|
#define round0(x) \
|
||||||
|
MUL prime2, x \
|
||||||
|
ROR $64-31, x \
|
||||||
|
MUL prime1, x
|
||||||
|
|
||||||
|
#define mergeRound(acc, x) \
|
||||||
|
round0(x) \
|
||||||
|
EOR x, acc \
|
||||||
|
MADD acc, prime4, prime1, acc
|
||||||
|
|
||||||
|
// blockLoop processes as many 32-byte blocks as possible,
|
||||||
|
// updating v1, v2, v3, and v4. It assumes that n >= 32.
|
||||||
|
#define blockLoop() \
|
||||||
|
LSR $5, n, nblocks \
|
||||||
|
PCALIGN $16 \
|
||||||
|
loop: \
|
||||||
|
LDP.P 16(p), (x1, x2) \
|
||||||
|
LDP.P 16(p), (x3, x4) \
|
||||||
|
round(v1, x1) \
|
||||||
|
round(v2, x2) \
|
||||||
|
round(v3, x3) \
|
||||||
|
round(v4, x4) \
|
||||||
|
SUB $1, nblocks \
|
||||||
|
CBNZ nblocks, loop
|
||||||
|
|
||||||
|
// func Sum64(b []byte) uint64
|
||||||
|
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
||||||
|
LDP b_base+0(FP), (p, n)
|
||||||
|
|
||||||
|
LDP ·primes+0(SB), (prime1, prime2)
|
||||||
|
LDP ·primes+16(SB), (prime3, prime4)
|
||||||
|
MOVD ·primes+32(SB), prime5
|
||||||
|
|
||||||
|
CMP $32, n
|
||||||
|
CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
|
||||||
|
BLT afterLoop
|
||||||
|
|
||||||
|
ADD prime1, prime2, v1
|
||||||
|
MOVD prime2, v2
|
||||||
|
MOVD $0, v3
|
||||||
|
NEG prime1, v4
|
||||||
|
|
||||||
|
blockLoop()
|
||||||
|
|
||||||
|
ROR $64-1, v1, x1
|
||||||
|
ROR $64-7, v2, x2
|
||||||
|
ADD x1, x2
|
||||||
|
ROR $64-12, v3, x3
|
||||||
|
ROR $64-18, v4, x4
|
||||||
|
ADD x3, x4
|
||||||
|
ADD x2, x4, h
|
||||||
|
|
||||||
|
mergeRound(h, v1)
|
||||||
|
mergeRound(h, v2)
|
||||||
|
mergeRound(h, v3)
|
||||||
|
mergeRound(h, v4)
|
||||||
|
|
||||||
|
afterLoop:
|
||||||
|
ADD n, h
|
||||||
|
|
||||||
|
TBZ $4, n, try8
|
||||||
|
LDP.P 16(p), (x1, x2)
|
||||||
|
|
||||||
|
round0(x1)
|
||||||
|
|
||||||
|
// NOTE: here and below, sequencing the EOR after the ROR (using a
|
||||||
|
// rotated register) is worth a small but measurable speedup for small
|
||||||
|
// inputs.
|
||||||
|
ROR $64-27, h
|
||||||
|
EOR x1 @> 64-27, h, h
|
||||||
|
MADD h, prime4, prime1, h
|
||||||
|
|
||||||
|
round0(x2)
|
||||||
|
ROR $64-27, h
|
||||||
|
EOR x2 @> 64-27, h, h
|
||||||
|
MADD h, prime4, prime1, h
|
||||||
|
|
||||||
|
try8:
|
||||||
|
TBZ $3, n, try4
|
||||||
|
MOVD.P 8(p), x1
|
||||||
|
|
||||||
|
round0(x1)
|
||||||
|
ROR $64-27, h
|
||||||
|
EOR x1 @> 64-27, h, h
|
||||||
|
MADD h, prime4, prime1, h
|
||||||
|
|
||||||
|
try4:
|
||||||
|
TBZ $2, n, try2
|
||||||
|
MOVWU.P 4(p), x2
|
||||||
|
|
||||||
|
MUL prime1, x2
|
||||||
|
ROR $64-23, h
|
||||||
|
EOR x2 @> 64-23, h, h
|
||||||
|
MADD h, prime3, prime2, h
|
||||||
|
|
||||||
|
try2:
|
||||||
|
TBZ $1, n, try1
|
||||||
|
MOVHU.P 2(p), x3
|
||||||
|
AND $255, x3, x1
|
||||||
|
LSR $8, x3, x2
|
||||||
|
|
||||||
|
MUL prime5, x1
|
||||||
|
ROR $64-11, h
|
||||||
|
EOR x1 @> 64-11, h, h
|
||||||
|
MUL prime1, h
|
||||||
|
|
||||||
|
MUL prime5, x2
|
||||||
|
ROR $64-11, h
|
||||||
|
EOR x2 @> 64-11, h, h
|
||||||
|
MUL prime1, h
|
||||||
|
|
||||||
|
try1:
|
||||||
|
TBZ $0, n, finalize
|
||||||
|
MOVBU (p), x4
|
||||||
|
|
||||||
|
MUL prime5, x4
|
||||||
|
ROR $64-11, h
|
||||||
|
EOR x4 @> 64-11, h, h
|
||||||
|
MUL prime1, h
|
||||||
|
|
||||||
|
finalize:
|
||||||
|
EOR h >> 33, h
|
||||||
|
MUL prime2, h
|
||||||
|
EOR h >> 29, h
|
||||||
|
MUL prime3, h
|
||||||
|
EOR h >> 32, h
|
||||||
|
|
||||||
|
MOVD h, ret+24(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func writeBlocks(d *Digest, b []byte) int
|
||||||
|
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
||||||
|
LDP ·primes+0(SB), (prime1, prime2)
|
||||||
|
|
||||||
|
// Load state. Assume v[1-4] are stored contiguously.
|
||||||
|
MOVD d+0(FP), digest
|
||||||
|
LDP 0(digest), (v1, v2)
|
||||||
|
LDP 16(digest), (v3, v4)
|
||||||
|
|
||||||
|
LDP b_base+8(FP), (p, n)
|
||||||
|
|
||||||
|
blockLoop()
|
||||||
|
|
||||||
|
// Store updated state.
|
||||||
|
STP (v1, v2), 0(digest)
|
||||||
|
STP (v3, v4), 16(digest)
|
||||||
|
|
||||||
|
BIC $31, n
|
||||||
|
MOVD n, ret+32(FP)
|
||||||
|
RET
|
||||||
@@ -1,3 +1,5 @@
|
|||||||
|
//go:build (amd64 || arm64) && !appengine && gc && !purego
|
||||||
|
// +build amd64 arm64
|
||||||
// +build !appengine
|
// +build !appengine
|
||||||
// +build gc
|
// +build gc
|
||||||
// +build !purego
|
// +build !purego
|
||||||
22
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
22
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
@@ -1,4 +1,5 @@
|
|||||||
// +build !amd64 appengine !gc purego
|
//go:build (!amd64 && !arm64) || appengine || !gc || purego
|
||||||
|
// +build !amd64,!arm64 appengine !gc purego
|
||||||
|
|
||||||
package xxhash
|
package xxhash
|
||||||
|
|
||||||
@@ -14,10 +15,10 @@ func Sum64(b []byte) uint64 {
|
|||||||
var h uint64
|
var h uint64
|
||||||
|
|
||||||
if n >= 32 {
|
if n >= 32 {
|
||||||
v1 := prime1v + prime2
|
v1 := primes[0] + prime2
|
||||||
v2 := prime2
|
v2 := prime2
|
||||||
v3 := uint64(0)
|
v3 := uint64(0)
|
||||||
v4 := -prime1v
|
v4 := -primes[0]
|
||||||
for len(b) >= 32 {
|
for len(b) >= 32 {
|
||||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||||
@@ -36,19 +37,18 @@ func Sum64(b []byte) uint64 {
|
|||||||
|
|
||||||
h += uint64(n)
|
h += uint64(n)
|
||||||
|
|
||||||
i, end := 0, len(b)
|
for ; len(b) >= 8; b = b[8:] {
|
||||||
for ; i+8 <= end; i += 8 {
|
k1 := round(0, u64(b[:8]))
|
||||||
k1 := round(0, u64(b[i:i+8:len(b)]))
|
|
||||||
h ^= k1
|
h ^= k1
|
||||||
h = rol27(h)*prime1 + prime4
|
h = rol27(h)*prime1 + prime4
|
||||||
}
|
}
|
||||||
if i+4 <= end {
|
if len(b) >= 4 {
|
||||||
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
|
h ^= uint64(u32(b[:4])) * prime1
|
||||||
h = rol23(h)*prime2 + prime3
|
h = rol23(h)*prime2 + prime3
|
||||||
i += 4
|
b = b[4:]
|
||||||
}
|
}
|
||||||
for ; i < end; i++ {
|
for ; len(b) > 0; b = b[1:] {
|
||||||
h ^= uint64(b[i]) * prime5
|
h ^= uint64(b[0]) * prime5
|
||||||
h = rol11(h) * prime1
|
h = rol11(h) * prime1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
1
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
1
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
@@ -1,3 +1,4 @@
|
|||||||
|
//go:build appengine
|
||||||
// +build appengine
|
// +build appengine
|
||||||
|
|
||||||
// This file contains the safe implementations of otherwise unsafe-using code.
|
// This file contains the safe implementations of otherwise unsafe-using code.
|
||||||
|
|||||||
56
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
56
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
@@ -1,3 +1,4 @@
|
|||||||
|
//go:build !appengine
|
||||||
// +build !appengine
|
// +build !appengine
|
||||||
|
|
||||||
// This file encapsulates usage of unsafe.
|
// This file encapsulates usage of unsafe.
|
||||||
@@ -6,41 +7,52 @@
|
|||||||
package xxhash
|
package xxhash
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"reflect"
|
|
||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Notes:
|
|
||||||
//
|
|
||||||
// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
|
|
||||||
// for some discussion about these unsafe conversions.
|
|
||||||
//
|
|
||||||
// In the future it's possible that compiler optimizations will make these
|
// In the future it's possible that compiler optimizations will make these
|
||||||
// unsafe operations unnecessary: https://golang.org/issue/2205.
|
// XxxString functions unnecessary by realizing that calls such as
|
||||||
|
// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205.
|
||||||
|
// If that happens, even if we keep these functions they can be replaced with
|
||||||
|
// the trivial safe code.
|
||||||
|
|
||||||
|
// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
|
||||||
//
|
//
|
||||||
// Both of these wrapper functions still incur function call overhead since they
|
// var b []byte
|
||||||
// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write
|
// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||||
// for strings to squeeze out a bit more speed. Mid-stack inlining should
|
// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||||
// eventually fix this.
|
// bh.Len = len(s)
|
||||||
|
// bh.Cap = len(s)
|
||||||
|
//
|
||||||
|
// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
|
||||||
|
// weight to this sequence of expressions that any function that uses it will
|
||||||
|
// not be inlined. Instead, the functions below use a different unsafe
|
||||||
|
// conversion designed to minimize the inliner weight and allow both to be
|
||||||
|
// inlined. There is also a test (TestInlining) which verifies that these are
|
||||||
|
// inlined.
|
||||||
|
//
|
||||||
|
// See https://github.com/golang/go/issues/42739 for discussion.
|
||||||
|
|
||||||
// Sum64String computes the 64-bit xxHash digest of s.
|
// Sum64String computes the 64-bit xxHash digest of s.
|
||||||
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
||||||
func Sum64String(s string) uint64 {
|
func Sum64String(s string) uint64 {
|
||||||
var b []byte
|
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
|
||||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
|
||||||
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
|
||||||
bh.Len = len(s)
|
|
||||||
bh.Cap = len(s)
|
|
||||||
return Sum64(b)
|
return Sum64(b)
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteString adds more data to d. It always returns len(s), nil.
|
// WriteString adds more data to d. It always returns len(s), nil.
|
||||||
// It may be faster than Write([]byte(s)) by avoiding a copy.
|
// It may be faster than Write([]byte(s)) by avoiding a copy.
|
||||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||||
var b []byte
|
d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
|
||||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
// d.Write always returns len(s), nil.
|
||||||
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
// Ignoring the return output and returning these fixed values buys a
|
||||||
bh.Len = len(s)
|
// savings of 6 in the inliner's cost model.
|
||||||
bh.Cap = len(s)
|
return len(s), nil
|
||||||
return d.Write(b)
|
}
|
||||||
|
|
||||||
|
// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
|
||||||
|
// of the first two words is the same as the layout of a string.
|
||||||
|
type sliceHeader struct {
|
||||||
|
s string
|
||||||
|
cap int
|
||||||
}
|
}
|
||||||
|
|||||||
3
vendor/github.com/golang/protobuf/AUTHORS
generated
vendored
3
vendor/github.com/golang/protobuf/AUTHORS
generated
vendored
@@ -1,3 +0,0 @@
|
|||||||
# This source code refers to The Go Authors for copyright purposes.
|
|
||||||
# The master list of authors is in the main Go distribution,
|
|
||||||
# visible at http://tip.golang.org/AUTHORS.
|
|
||||||
3
vendor/github.com/golang/protobuf/CONTRIBUTORS
generated
vendored
3
vendor/github.com/golang/protobuf/CONTRIBUTORS
generated
vendored
@@ -1,3 +0,0 @@
|
|||||||
# This source code was written by the Go contributors.
|
|
||||||
# The master list of contributors is in the main Go distribution,
|
|
||||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
|
||||||
28
vendor/github.com/golang/protobuf/LICENSE
generated
vendored
28
vendor/github.com/golang/protobuf/LICENSE
generated
vendored
@@ -1,28 +0,0 @@
|
|||||||
Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are
|
|
||||||
met:
|
|
||||||
|
|
||||||
* Redistributions of source code must retain the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer.
|
|
||||||
* Redistributions in binary form must reproduce the above
|
|
||||||
copyright notice, this list of conditions and the following disclaimer
|
|
||||||
in the documentation and/or other materials provided with the
|
|
||||||
distribution.
|
|
||||||
* Neither the name of Google Inc. nor the names of its
|
|
||||||
contributors may be used to endorse or promote products derived from
|
|
||||||
this software without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
324
vendor/github.com/golang/protobuf/proto/buffer.go
generated
vendored
324
vendor/github.com/golang/protobuf/proto/buffer.go
generated
vendored
@@ -1,324 +0,0 @@
|
|||||||
// Copyright 2019 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"google.golang.org/protobuf/encoding/prototext"
|
|
||||||
"google.golang.org/protobuf/encoding/protowire"
|
|
||||||
"google.golang.org/protobuf/runtime/protoimpl"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
WireVarint = 0
|
|
||||||
WireFixed32 = 5
|
|
||||||
WireFixed64 = 1
|
|
||||||
WireBytes = 2
|
|
||||||
WireStartGroup = 3
|
|
||||||
WireEndGroup = 4
|
|
||||||
)
|
|
||||||
|
|
||||||
// EncodeVarint returns the varint encoded bytes of v.
|
|
||||||
func EncodeVarint(v uint64) []byte {
|
|
||||||
return protowire.AppendVarint(nil, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SizeVarint returns the length of the varint encoded bytes of v.
|
|
||||||
// This is equal to len(EncodeVarint(v)).
|
|
||||||
func SizeVarint(v uint64) int {
|
|
||||||
return protowire.SizeVarint(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeVarint parses a varint encoded integer from b,
|
|
||||||
// returning the integer value and the length of the varint.
|
|
||||||
// It returns (0, 0) if there is a parse error.
|
|
||||||
func DecodeVarint(b []byte) (uint64, int) {
|
|
||||||
v, n := protowire.ConsumeVarint(b)
|
|
||||||
if n < 0 {
|
|
||||||
return 0, 0
|
|
||||||
}
|
|
||||||
return v, n
|
|
||||||
}
|
|
||||||
|
|
||||||
// Buffer is a buffer for encoding and decoding the protobuf wire format.
|
|
||||||
// It may be reused between invocations to reduce memory usage.
|
|
||||||
type Buffer struct {
|
|
||||||
buf []byte
|
|
||||||
idx int
|
|
||||||
deterministic bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBuffer allocates a new Buffer initialized with buf,
|
|
||||||
// where the contents of buf are considered the unread portion of the buffer.
|
|
||||||
func NewBuffer(buf []byte) *Buffer {
|
|
||||||
return &Buffer{buf: buf}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetDeterministic specifies whether to use deterministic serialization.
|
|
||||||
//
|
|
||||||
// Deterministic serialization guarantees that for a given binary, equal
|
|
||||||
// messages will always be serialized to the same bytes. This implies:
|
|
||||||
//
|
|
||||||
// - Repeated serialization of a message will return the same bytes.
|
|
||||||
// - Different processes of the same binary (which may be executing on
|
|
||||||
// different machines) will serialize equal messages to the same bytes.
|
|
||||||
//
|
|
||||||
// Note that the deterministic serialization is NOT canonical across
|
|
||||||
// languages. It is not guaranteed to remain stable over time. It is unstable
|
|
||||||
// across different builds with schema changes due to unknown fields.
|
|
||||||
// Users who need canonical serialization (e.g., persistent storage in a
|
|
||||||
// canonical form, fingerprinting, etc.) should define their own
|
|
||||||
// canonicalization specification and implement their own serializer rather
|
|
||||||
// than relying on this API.
|
|
||||||
//
|
|
||||||
// If deterministic serialization is requested, map entries will be sorted
|
|
||||||
// by keys in lexographical order. This is an implementation detail and
|
|
||||||
// subject to change.
|
|
||||||
func (b *Buffer) SetDeterministic(deterministic bool) {
|
|
||||||
b.deterministic = deterministic
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetBuf sets buf as the internal buffer,
|
|
||||||
// where the contents of buf are considered the unread portion of the buffer.
|
|
||||||
func (b *Buffer) SetBuf(buf []byte) {
|
|
||||||
b.buf = buf
|
|
||||||
b.idx = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset clears the internal buffer of all written and unread data.
|
|
||||||
func (b *Buffer) Reset() {
|
|
||||||
b.buf = b.buf[:0]
|
|
||||||
b.idx = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bytes returns the internal buffer.
|
|
||||||
func (b *Buffer) Bytes() []byte {
|
|
||||||
return b.buf
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unread returns the unread portion of the buffer.
|
|
||||||
func (b *Buffer) Unread() []byte {
|
|
||||||
return b.buf[b.idx:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Marshal appends the wire-format encoding of m to the buffer.
|
|
||||||
func (b *Buffer) Marshal(m Message) error {
|
|
||||||
var err error
|
|
||||||
b.buf, err = marshalAppend(b.buf, m, b.deterministic)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshal parses the wire-format message in the buffer and
|
|
||||||
// places the decoded results in m.
|
|
||||||
// It does not reset m before unmarshaling.
|
|
||||||
func (b *Buffer) Unmarshal(m Message) error {
|
|
||||||
err := UnmarshalMerge(b.Unread(), m)
|
|
||||||
b.idx = len(b.buf)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields }
|
|
||||||
|
|
||||||
func (m *unknownFields) String() string { panic("not implemented") }
|
|
||||||
func (m *unknownFields) Reset() { panic("not implemented") }
|
|
||||||
func (m *unknownFields) ProtoMessage() { panic("not implemented") }
|
|
||||||
|
|
||||||
// DebugPrint dumps the encoded bytes of b with a header and footer including s
|
|
||||||
// to stdout. This is only intended for debugging.
|
|
||||||
func (*Buffer) DebugPrint(s string, b []byte) {
|
|
||||||
m := MessageReflect(new(unknownFields))
|
|
||||||
m.SetUnknown(b)
|
|
||||||
b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface())
|
|
||||||
fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeVarint appends an unsigned varint encoding to the buffer.
|
|
||||||
func (b *Buffer) EncodeVarint(v uint64) error {
|
|
||||||
b.buf = protowire.AppendVarint(b.buf, v)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer.
|
|
||||||
func (b *Buffer) EncodeZigzag32(v uint64) error {
|
|
||||||
return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer.
|
|
||||||
func (b *Buffer) EncodeZigzag64(v uint64) error {
|
|
||||||
return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63))))
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeFixed32 appends a 32-bit little-endian integer to the buffer.
|
|
||||||
func (b *Buffer) EncodeFixed32(v uint64) error {
|
|
||||||
b.buf = protowire.AppendFixed32(b.buf, uint32(v))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeFixed64 appends a 64-bit little-endian integer to the buffer.
|
|
||||||
func (b *Buffer) EncodeFixed64(v uint64) error {
|
|
||||||
b.buf = protowire.AppendFixed64(b.buf, uint64(v))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeRawBytes appends a length-prefixed raw bytes to the buffer.
|
|
||||||
func (b *Buffer) EncodeRawBytes(v []byte) error {
|
|
||||||
b.buf = protowire.AppendBytes(b.buf, v)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeStringBytes appends a length-prefixed raw bytes to the buffer.
|
|
||||||
// It does not validate whether v contains valid UTF-8.
|
|
||||||
func (b *Buffer) EncodeStringBytes(v string) error {
|
|
||||||
b.buf = protowire.AppendString(b.buf, v)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncodeMessage appends a length-prefixed encoded message to the buffer.
|
|
||||||
func (b *Buffer) EncodeMessage(m Message) error {
|
|
||||||
var err error
|
|
||||||
b.buf = protowire.AppendVarint(b.buf, uint64(Size(m)))
|
|
||||||
b.buf, err = marshalAppend(b.buf, m, b.deterministic)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeVarint consumes an encoded unsigned varint from the buffer.
|
|
||||||
func (b *Buffer) DecodeVarint() (uint64, error) {
|
|
||||||
v, n := protowire.ConsumeVarint(b.buf[b.idx:])
|
|
||||||
if n < 0 {
|
|
||||||
return 0, protowire.ParseError(n)
|
|
||||||
}
|
|
||||||
b.idx += n
|
|
||||||
return uint64(v), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer.
|
|
||||||
func (b *Buffer) DecodeZigzag32() (uint64, error) {
|
|
||||||
v, err := b.DecodeVarint()
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer.
|
|
||||||
func (b *Buffer) DecodeZigzag64() (uint64, error) {
|
|
||||||
v, err := b.DecodeVarint()
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer.
|
|
||||||
func (b *Buffer) DecodeFixed32() (uint64, error) {
|
|
||||||
v, n := protowire.ConsumeFixed32(b.buf[b.idx:])
|
|
||||||
if n < 0 {
|
|
||||||
return 0, protowire.ParseError(n)
|
|
||||||
}
|
|
||||||
b.idx += n
|
|
||||||
return uint64(v), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer.
|
|
||||||
func (b *Buffer) DecodeFixed64() (uint64, error) {
|
|
||||||
v, n := protowire.ConsumeFixed64(b.buf[b.idx:])
|
|
||||||
if n < 0 {
|
|
||||||
return 0, protowire.ParseError(n)
|
|
||||||
}
|
|
||||||
b.idx += n
|
|
||||||
return uint64(v), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer.
|
|
||||||
// If alloc is specified, it returns a copy the raw bytes
|
|
||||||
// rather than a sub-slice of the buffer.
|
|
||||||
func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) {
|
|
||||||
v, n := protowire.ConsumeBytes(b.buf[b.idx:])
|
|
||||||
if n < 0 {
|
|
||||||
return nil, protowire.ParseError(n)
|
|
||||||
}
|
|
||||||
b.idx += n
|
|
||||||
if alloc {
|
|
||||||
v = append([]byte(nil), v...)
|
|
||||||
}
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer.
|
|
||||||
// It does not validate whether the raw bytes contain valid UTF-8.
|
|
||||||
func (b *Buffer) DecodeStringBytes() (string, error) {
|
|
||||||
v, n := protowire.ConsumeString(b.buf[b.idx:])
|
|
||||||
if n < 0 {
|
|
||||||
return "", protowire.ParseError(n)
|
|
||||||
}
|
|
||||||
b.idx += n
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeMessage consumes a length-prefixed message from the buffer.
|
|
||||||
// It does not reset m before unmarshaling.
|
|
||||||
func (b *Buffer) DecodeMessage(m Message) error {
|
|
||||||
v, err := b.DecodeRawBytes(false)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return UnmarshalMerge(v, m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeGroup consumes a message group from the buffer.
|
|
||||||
// It assumes that the start group marker has already been consumed and
|
|
||||||
// consumes all bytes until (and including the end group marker).
|
|
||||||
// It does not reset m before unmarshaling.
|
|
||||||
func (b *Buffer) DecodeGroup(m Message) error {
|
|
||||||
v, n, err := consumeGroup(b.buf[b.idx:])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
b.idx += n
|
|
||||||
return UnmarshalMerge(v, m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// consumeGroup parses b until it finds an end group marker, returning
|
|
||||||
// the raw bytes of the message (excluding the end group marker) and the
|
|
||||||
// the total length of the message (including the end group marker).
|
|
||||||
func consumeGroup(b []byte) ([]byte, int, error) {
|
|
||||||
b0 := b
|
|
||||||
depth := 1 // assume this follows a start group marker
|
|
||||||
for {
|
|
||||||
_, wtyp, tagLen := protowire.ConsumeTag(b)
|
|
||||||
if tagLen < 0 {
|
|
||||||
return nil, 0, protowire.ParseError(tagLen)
|
|
||||||
}
|
|
||||||
b = b[tagLen:]
|
|
||||||
|
|
||||||
var valLen int
|
|
||||||
switch wtyp {
|
|
||||||
case protowire.VarintType:
|
|
||||||
_, valLen = protowire.ConsumeVarint(b)
|
|
||||||
case protowire.Fixed32Type:
|
|
||||||
_, valLen = protowire.ConsumeFixed32(b)
|
|
||||||
case protowire.Fixed64Type:
|
|
||||||
_, valLen = protowire.ConsumeFixed64(b)
|
|
||||||
case protowire.BytesType:
|
|
||||||
_, valLen = protowire.ConsumeBytes(b)
|
|
||||||
case protowire.StartGroupType:
|
|
||||||
depth++
|
|
||||||
case protowire.EndGroupType:
|
|
||||||
depth--
|
|
||||||
default:
|
|
||||||
return nil, 0, errors.New("proto: cannot parse reserved wire type")
|
|
||||||
}
|
|
||||||
if valLen < 0 {
|
|
||||||
return nil, 0, protowire.ParseError(valLen)
|
|
||||||
}
|
|
||||||
b = b[valLen:]
|
|
||||||
|
|
||||||
if depth == 0 {
|
|
||||||
return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
63
vendor/github.com/golang/protobuf/proto/defaults.go
generated
vendored
63
vendor/github.com/golang/protobuf/proto/defaults.go
generated
vendored
@@ -1,63 +0,0 @@
|
|||||||
// Copyright 2019 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SetDefaults sets unpopulated scalar fields to their default values.
|
|
||||||
// Fields within a oneof are not set even if they have a default value.
|
|
||||||
// SetDefaults is recursively called upon any populated message fields.
|
|
||||||
func SetDefaults(m Message) {
|
|
||||||
if m != nil {
|
|
||||||
setDefaults(MessageReflect(m))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func setDefaults(m protoreflect.Message) {
|
|
||||||
fds := m.Descriptor().Fields()
|
|
||||||
for i := 0; i < fds.Len(); i++ {
|
|
||||||
fd := fds.Get(i)
|
|
||||||
if !m.Has(fd) {
|
|
||||||
if fd.HasDefault() && fd.ContainingOneof() == nil {
|
|
||||||
v := fd.Default()
|
|
||||||
if fd.Kind() == protoreflect.BytesKind {
|
|
||||||
v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes
|
|
||||||
}
|
|
||||||
m.Set(fd, v)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
|
||||||
switch {
|
|
||||||
// Handle singular message.
|
|
||||||
case fd.Cardinality() != protoreflect.Repeated:
|
|
||||||
if fd.Message() != nil {
|
|
||||||
setDefaults(m.Get(fd).Message())
|
|
||||||
}
|
|
||||||
// Handle list of messages.
|
|
||||||
case fd.IsList():
|
|
||||||
if fd.Message() != nil {
|
|
||||||
ls := m.Get(fd).List()
|
|
||||||
for i := 0; i < ls.Len(); i++ {
|
|
||||||
setDefaults(ls.Get(i).Message())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Handle map of messages.
|
|
||||||
case fd.IsMap():
|
|
||||||
if fd.MapValue().Message() != nil {
|
|
||||||
ms := m.Get(fd).Map()
|
|
||||||
ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
|
|
||||||
setDefaults(v.Message())
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
}
|
|
||||||
113
vendor/github.com/golang/protobuf/proto/deprecated.go
generated
vendored
113
vendor/github.com/golang/protobuf/proto/deprecated.go
generated
vendored
@@ -1,113 +0,0 @@
|
|||||||
// Copyright 2018 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
protoV2 "google.golang.org/protobuf/proto"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// Deprecated: No longer returned.
|
|
||||||
ErrNil = errors.New("proto: Marshal called with nil")
|
|
||||||
|
|
||||||
// Deprecated: No longer returned.
|
|
||||||
ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
|
|
||||||
|
|
||||||
// Deprecated: No longer returned.
|
|
||||||
ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Deprecated: Do not use.
|
|
||||||
type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
|
|
||||||
|
|
||||||
// Deprecated: Do not use.
|
|
||||||
func GetStats() Stats { return Stats{} }
|
|
||||||
|
|
||||||
// Deprecated: Do not use.
|
|
||||||
func MarshalMessageSet(interface{}) ([]byte, error) {
|
|
||||||
return nil, errors.New("proto: not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Do not use.
|
|
||||||
func UnmarshalMessageSet([]byte, interface{}) error {
|
|
||||||
return errors.New("proto: not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Do not use.
|
|
||||||
func MarshalMessageSetJSON(interface{}) ([]byte, error) {
|
|
||||||
return nil, errors.New("proto: not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Do not use.
|
|
||||||
func UnmarshalMessageSetJSON([]byte, interface{}) error {
|
|
||||||
return errors.New("proto: not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Do not use.
|
|
||||||
func RegisterMessageSetType(Message, int32, string) {}
|
|
||||||
|
|
||||||
// Deprecated: Do not use.
|
|
||||||
func EnumName(m map[int32]string, v int32) string {
|
|
||||||
s, ok := m[v]
|
|
||||||
if ok {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
return strconv.Itoa(int(v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Do not use.
|
|
||||||
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
|
|
||||||
if data[0] == '"' {
|
|
||||||
// New style: enums are strings.
|
|
||||||
var repr string
|
|
||||||
if err := json.Unmarshal(data, &repr); err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
val, ok := m[repr]
|
|
||||||
if !ok {
|
|
||||||
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
|
|
||||||
}
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
// Old style: enums are ints.
|
|
||||||
var val int32
|
|
||||||
if err := json.Unmarshal(data, &val); err != nil {
|
|
||||||
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
|
|
||||||
}
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Do not use; this type existed for intenal-use only.
|
|
||||||
type InternalMessageInfo struct{}
|
|
||||||
|
|
||||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
|
||||||
func (*InternalMessageInfo) DiscardUnknown(m Message) {
|
|
||||||
DiscardUnknown(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
|
||||||
func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) {
|
|
||||||
return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
|
||||||
func (*InternalMessageInfo) Merge(dst, src Message) {
|
|
||||||
protoV2.Merge(MessageV2(dst), MessageV2(src))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
|
||||||
func (*InternalMessageInfo) Size(m Message) int {
|
|
||||||
return protoV2.Size(MessageV2(m))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Do not use; this method existed for intenal-use only.
|
|
||||||
func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error {
|
|
||||||
return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m))
|
|
||||||
}
|
|
||||||
58
vendor/github.com/golang/protobuf/proto/discard.go
generated
vendored
58
vendor/github.com/golang/protobuf/proto/discard.go
generated
vendored
@@ -1,58 +0,0 @@
|
|||||||
// Copyright 2019 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DiscardUnknown recursively discards all unknown fields from this message
|
|
||||||
// and all embedded messages.
|
|
||||||
//
|
|
||||||
// When unmarshaling a message with unrecognized fields, the tags and values
|
|
||||||
// of such fields are preserved in the Message. This allows a later call to
|
|
||||||
// marshal to be able to produce a message that continues to have those
|
|
||||||
// unrecognized fields. To avoid this, DiscardUnknown is used to
|
|
||||||
// explicitly clear the unknown fields after unmarshaling.
|
|
||||||
func DiscardUnknown(m Message) {
|
|
||||||
if m != nil {
|
|
||||||
discardUnknown(MessageReflect(m))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func discardUnknown(m protoreflect.Message) {
|
|
||||||
m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool {
|
|
||||||
switch {
|
|
||||||
// Handle singular message.
|
|
||||||
case fd.Cardinality() != protoreflect.Repeated:
|
|
||||||
if fd.Message() != nil {
|
|
||||||
discardUnknown(m.Get(fd).Message())
|
|
||||||
}
|
|
||||||
// Handle list of messages.
|
|
||||||
case fd.IsList():
|
|
||||||
if fd.Message() != nil {
|
|
||||||
ls := m.Get(fd).List()
|
|
||||||
for i := 0; i < ls.Len(); i++ {
|
|
||||||
discardUnknown(ls.Get(i).Message())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Handle map of messages.
|
|
||||||
case fd.IsMap():
|
|
||||||
if fd.MapValue().Message() != nil {
|
|
||||||
ms := m.Get(fd).Map()
|
|
||||||
ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
|
|
||||||
discardUnknown(v.Message())
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
|
|
||||||
// Discard unknown fields.
|
|
||||||
if len(m.GetUnknown()) > 0 {
|
|
||||||
m.SetUnknown(nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
356
vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
356
vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
@@ -1,356 +0,0 @@
|
|||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
|
|
||||||
"google.golang.org/protobuf/encoding/protowire"
|
|
||||||
"google.golang.org/protobuf/proto"
|
|
||||||
"google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
"google.golang.org/protobuf/reflect/protoregistry"
|
|
||||||
"google.golang.org/protobuf/runtime/protoiface"
|
|
||||||
"google.golang.org/protobuf/runtime/protoimpl"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
// ExtensionDesc represents an extension descriptor and
|
|
||||||
// is used to interact with an extension field in a message.
|
|
||||||
//
|
|
||||||
// Variables of this type are generated in code by protoc-gen-go.
|
|
||||||
ExtensionDesc = protoimpl.ExtensionInfo
|
|
||||||
|
|
||||||
// ExtensionRange represents a range of message extensions.
|
|
||||||
// Used in code generated by protoc-gen-go.
|
|
||||||
ExtensionRange = protoiface.ExtensionRangeV1
|
|
||||||
|
|
||||||
// Deprecated: Do not use; this is an internal type.
|
|
||||||
Extension = protoimpl.ExtensionFieldV1
|
|
||||||
|
|
||||||
// Deprecated: Do not use; this is an internal type.
|
|
||||||
XXX_InternalExtensions = protoimpl.ExtensionFields
|
|
||||||
)
|
|
||||||
|
|
||||||
// ErrMissingExtension reports whether the extension was not present.
|
|
||||||
var ErrMissingExtension = errors.New("proto: missing extension")
|
|
||||||
|
|
||||||
var errNotExtendable = errors.New("proto: not an extendable proto.Message")
|
|
||||||
|
|
||||||
// HasExtension reports whether the extension field is present in m
|
|
||||||
// either as an explicitly populated field or as an unknown field.
|
|
||||||
func HasExtension(m Message, xt *ExtensionDesc) (has bool) {
|
|
||||||
mr := MessageReflect(m)
|
|
||||||
if mr == nil || !mr.IsValid() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check whether any populated known field matches the field number.
|
|
||||||
xtd := xt.TypeDescriptor()
|
|
||||||
if isValidExtension(mr.Descriptor(), xtd) {
|
|
||||||
has = mr.Has(xtd)
|
|
||||||
} else {
|
|
||||||
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
|
|
||||||
has = int32(fd.Number()) == xt.Field
|
|
||||||
return !has
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check whether any unknown field matches the field number.
|
|
||||||
for b := mr.GetUnknown(); !has && len(b) > 0; {
|
|
||||||
num, _, n := protowire.ConsumeField(b)
|
|
||||||
has = int32(num) == xt.Field
|
|
||||||
b = b[n:]
|
|
||||||
}
|
|
||||||
return has
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClearExtension removes the extension field from m
|
|
||||||
// either as an explicitly populated field or as an unknown field.
|
|
||||||
func ClearExtension(m Message, xt *ExtensionDesc) {
|
|
||||||
mr := MessageReflect(m)
|
|
||||||
if mr == nil || !mr.IsValid() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
xtd := xt.TypeDescriptor()
|
|
||||||
if isValidExtension(mr.Descriptor(), xtd) {
|
|
||||||
mr.Clear(xtd)
|
|
||||||
} else {
|
|
||||||
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
|
|
||||||
if int32(fd.Number()) == xt.Field {
|
|
||||||
mr.Clear(fd)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
}
|
|
||||||
clearUnknown(mr, fieldNum(xt.Field))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClearAllExtensions clears all extensions from m.
|
|
||||||
// This includes populated fields and unknown fields in the extension range.
|
|
||||||
func ClearAllExtensions(m Message) {
|
|
||||||
mr := MessageReflect(m)
|
|
||||||
if mr == nil || !mr.IsValid() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
|
|
||||||
if fd.IsExtension() {
|
|
||||||
mr.Clear(fd)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
clearUnknown(mr, mr.Descriptor().ExtensionRanges())
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetExtension retrieves a proto2 extended field from m.
|
|
||||||
//
|
|
||||||
// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
|
|
||||||
// then GetExtension parses the encoded field and returns a Go value of the specified type.
|
|
||||||
// If the field is not present, then the default value is returned (if one is specified),
|
|
||||||
// otherwise ErrMissingExtension is reported.
|
|
||||||
//
|
|
||||||
// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil),
|
|
||||||
// then GetExtension returns the raw encoded bytes for the extension field.
|
|
||||||
func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) {
|
|
||||||
mr := MessageReflect(m)
|
|
||||||
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
|
|
||||||
return nil, errNotExtendable
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retrieve the unknown fields for this extension field.
|
|
||||||
var bo protoreflect.RawFields
|
|
||||||
for bi := mr.GetUnknown(); len(bi) > 0; {
|
|
||||||
num, _, n := protowire.ConsumeField(bi)
|
|
||||||
if int32(num) == xt.Field {
|
|
||||||
bo = append(bo, bi[:n]...)
|
|
||||||
}
|
|
||||||
bi = bi[n:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// For type incomplete descriptors, only retrieve the unknown fields.
|
|
||||||
if xt.ExtensionType == nil {
|
|
||||||
return []byte(bo), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the extension field only exists as unknown fields, unmarshal it.
|
|
||||||
// This is rarely done since proto.Unmarshal eagerly unmarshals extensions.
|
|
||||||
xtd := xt.TypeDescriptor()
|
|
||||||
if !isValidExtension(mr.Descriptor(), xtd) {
|
|
||||||
return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
|
|
||||||
}
|
|
||||||
if !mr.Has(xtd) && len(bo) > 0 {
|
|
||||||
m2 := mr.New()
|
|
||||||
if err := (proto.UnmarshalOptions{
|
|
||||||
Resolver: extensionResolver{xt},
|
|
||||||
}.Unmarshal(bo, m2.Interface())); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if m2.Has(xtd) {
|
|
||||||
mr.Set(xtd, m2.Get(xtd))
|
|
||||||
clearUnknown(mr, fieldNum(xt.Field))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check whether the message has the extension field set or a default.
|
|
||||||
var pv protoreflect.Value
|
|
||||||
switch {
|
|
||||||
case mr.Has(xtd):
|
|
||||||
pv = mr.Get(xtd)
|
|
||||||
case xtd.HasDefault():
|
|
||||||
pv = xtd.Default()
|
|
||||||
default:
|
|
||||||
return nil, ErrMissingExtension
|
|
||||||
}
|
|
||||||
|
|
||||||
v := xt.InterfaceOf(pv)
|
|
||||||
rv := reflect.ValueOf(v)
|
|
||||||
if isScalarKind(rv.Kind()) {
|
|
||||||
rv2 := reflect.New(rv.Type())
|
|
||||||
rv2.Elem().Set(rv)
|
|
||||||
v = rv2.Interface()
|
|
||||||
}
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// extensionResolver is a custom extension resolver that stores a single
|
|
||||||
// extension type that takes precedence over the global registry.
|
|
||||||
type extensionResolver struct{ xt protoreflect.ExtensionType }
|
|
||||||
|
|
||||||
func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
|
|
||||||
if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field {
|
|
||||||
return r.xt, nil
|
|
||||||
}
|
|
||||||
return protoregistry.GlobalTypes.FindExtensionByName(field)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
|
|
||||||
if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field {
|
|
||||||
return r.xt, nil
|
|
||||||
}
|
|
||||||
return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetExtensions returns a list of the extensions values present in m,
|
|
||||||
// corresponding with the provided list of extension descriptors, xts.
|
|
||||||
// If an extension is missing in m, the corresponding value is nil.
|
|
||||||
func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) {
|
|
||||||
mr := MessageReflect(m)
|
|
||||||
if mr == nil || !mr.IsValid() {
|
|
||||||
return nil, errNotExtendable
|
|
||||||
}
|
|
||||||
|
|
||||||
vs := make([]interface{}, len(xts))
|
|
||||||
for i, xt := range xts {
|
|
||||||
v, err := GetExtension(m, xt)
|
|
||||||
if err != nil {
|
|
||||||
if err == ErrMissingExtension {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return vs, err
|
|
||||||
}
|
|
||||||
vs[i] = v
|
|
||||||
}
|
|
||||||
return vs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetExtension sets an extension field in m to the provided value.
|
|
||||||
func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error {
|
|
||||||
mr := MessageReflect(m)
|
|
||||||
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
|
|
||||||
return errNotExtendable
|
|
||||||
}
|
|
||||||
|
|
||||||
rv := reflect.ValueOf(v)
|
|
||||||
if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) {
|
|
||||||
return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType)
|
|
||||||
}
|
|
||||||
if rv.Kind() == reflect.Ptr {
|
|
||||||
if rv.IsNil() {
|
|
||||||
return fmt.Errorf("proto: SetExtension called with nil value of type %T", v)
|
|
||||||
}
|
|
||||||
if isScalarKind(rv.Elem().Kind()) {
|
|
||||||
v = rv.Elem().Interface()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
xtd := xt.TypeDescriptor()
|
|
||||||
if !isValidExtension(mr.Descriptor(), xtd) {
|
|
||||||
return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
|
|
||||||
}
|
|
||||||
mr.Set(xtd, xt.ValueOf(v))
|
|
||||||
clearUnknown(mr, fieldNum(xt.Field))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetRawExtension inserts b into the unknown fields of m.
|
|
||||||
//
|
|
||||||
// Deprecated: Use Message.ProtoReflect.SetUnknown instead.
|
|
||||||
func SetRawExtension(m Message, fnum int32, b []byte) {
|
|
||||||
mr := MessageReflect(m)
|
|
||||||
if mr == nil || !mr.IsValid() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify that the raw field is valid.
|
|
||||||
for b0 := b; len(b0) > 0; {
|
|
||||||
num, _, n := protowire.ConsumeField(b0)
|
|
||||||
if int32(num) != fnum {
|
|
||||||
panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum))
|
|
||||||
}
|
|
||||||
b0 = b0[n:]
|
|
||||||
}
|
|
||||||
|
|
||||||
ClearExtension(m, &ExtensionDesc{Field: fnum})
|
|
||||||
mr.SetUnknown(append(mr.GetUnknown(), b...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExtensionDescs returns a list of extension descriptors found in m,
|
|
||||||
// containing descriptors for both populated extension fields in m and
|
|
||||||
// also unknown fields of m that are in the extension range.
|
|
||||||
// For the later case, an type incomplete descriptor is provided where only
|
|
||||||
// the ExtensionDesc.Field field is populated.
|
|
||||||
// The order of the extension descriptors is undefined.
|
|
||||||
func ExtensionDescs(m Message) ([]*ExtensionDesc, error) {
|
|
||||||
mr := MessageReflect(m)
|
|
||||||
if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
|
|
||||||
return nil, errNotExtendable
|
|
||||||
}
|
|
||||||
|
|
||||||
// Collect a set of known extension descriptors.
|
|
||||||
extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc)
|
|
||||||
mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
|
||||||
if fd.IsExtension() {
|
|
||||||
xt := fd.(protoreflect.ExtensionTypeDescriptor)
|
|
||||||
if xd, ok := xt.Type().(*ExtensionDesc); ok {
|
|
||||||
extDescs[fd.Number()] = xd
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
|
|
||||||
// Collect a set of unknown extension descriptors.
|
|
||||||
extRanges := mr.Descriptor().ExtensionRanges()
|
|
||||||
for b := mr.GetUnknown(); len(b) > 0; {
|
|
||||||
num, _, n := protowire.ConsumeField(b)
|
|
||||||
if extRanges.Has(num) && extDescs[num] == nil {
|
|
||||||
extDescs[num] = nil
|
|
||||||
}
|
|
||||||
b = b[n:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Transpose the set of descriptors into a list.
|
|
||||||
var xts []*ExtensionDesc
|
|
||||||
for num, xt := range extDescs {
|
|
||||||
if xt == nil {
|
|
||||||
xt = &ExtensionDesc{Field: int32(num)}
|
|
||||||
}
|
|
||||||
xts = append(xts, xt)
|
|
||||||
}
|
|
||||||
return xts, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// isValidExtension reports whether xtd is a valid extension descriptor for md.
|
|
||||||
func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool {
|
|
||||||
return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number())
|
|
||||||
}
|
|
||||||
|
|
||||||
// isScalarKind reports whether k is a protobuf scalar kind (except bytes).
|
|
||||||
// This function exists for historical reasons since the representation of
|
|
||||||
// scalars differs between v1 and v2, where v1 uses *T and v2 uses T.
|
|
||||||
func isScalarKind(k reflect.Kind) bool {
|
|
||||||
switch k {
|
|
||||||
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
|
|
||||||
return true
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// clearUnknown removes unknown fields from m where remover.Has reports true.
|
|
||||||
func clearUnknown(m protoreflect.Message, remover interface {
|
|
||||||
Has(protoreflect.FieldNumber) bool
|
|
||||||
}) {
|
|
||||||
var bo protoreflect.RawFields
|
|
||||||
for bi := m.GetUnknown(); len(bi) > 0; {
|
|
||||||
num, _, n := protowire.ConsumeField(bi)
|
|
||||||
if !remover.Has(num) {
|
|
||||||
bo = append(bo, bi[:n]...)
|
|
||||||
}
|
|
||||||
bi = bi[n:]
|
|
||||||
}
|
|
||||||
if bi := m.GetUnknown(); len(bi) != len(bo) {
|
|
||||||
m.SetUnknown(bo)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type fieldNum protoreflect.FieldNumber
|
|
||||||
|
|
||||||
func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool {
|
|
||||||
return protoreflect.FieldNumber(n1) == n2
|
|
||||||
}
|
|
||||||
306
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
306
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
@@ -1,306 +0,0 @@
|
|||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
"google.golang.org/protobuf/runtime/protoimpl"
|
|
||||||
)
|
|
||||||
|
|
||||||
// StructProperties represents protocol buffer type information for a
|
|
||||||
// generated protobuf message in the open-struct API.
|
|
||||||
//
|
|
||||||
// Deprecated: Do not use.
|
|
||||||
type StructProperties struct {
|
|
||||||
// Prop are the properties for each field.
|
|
||||||
//
|
|
||||||
// Fields belonging to a oneof are stored in OneofTypes instead, with a
|
|
||||||
// single Properties representing the parent oneof held here.
|
|
||||||
//
|
|
||||||
// The order of Prop matches the order of fields in the Go struct.
|
|
||||||
// Struct fields that are not related to protobufs have a "XXX_" prefix
|
|
||||||
// in the Properties.Name and must be ignored by the user.
|
|
||||||
Prop []*Properties
|
|
||||||
|
|
||||||
// OneofTypes contains information about the oneof fields in this message.
|
|
||||||
// It is keyed by the protobuf field name.
|
|
||||||
OneofTypes map[string]*OneofProperties
|
|
||||||
}
|
|
||||||
|
|
||||||
// Properties represents the type information for a protobuf message field.
|
|
||||||
//
|
|
||||||
// Deprecated: Do not use.
|
|
||||||
type Properties struct {
|
|
||||||
// Name is a placeholder name with little meaningful semantic value.
|
|
||||||
// If the name has an "XXX_" prefix, the entire Properties must be ignored.
|
|
||||||
Name string
|
|
||||||
// OrigName is the protobuf field name or oneof name.
|
|
||||||
OrigName string
|
|
||||||
// JSONName is the JSON name for the protobuf field.
|
|
||||||
JSONName string
|
|
||||||
// Enum is a placeholder name for enums.
|
|
||||||
// For historical reasons, this is neither the Go name for the enum,
|
|
||||||
// nor the protobuf name for the enum.
|
|
||||||
Enum string // Deprecated: Do not use.
|
|
||||||
// Weak contains the full name of the weakly referenced message.
|
|
||||||
Weak string
|
|
||||||
// Wire is a string representation of the wire type.
|
|
||||||
Wire string
|
|
||||||
// WireType is the protobuf wire type for the field.
|
|
||||||
WireType int
|
|
||||||
// Tag is the protobuf field number.
|
|
||||||
Tag int
|
|
||||||
// Required reports whether this is a required field.
|
|
||||||
Required bool
|
|
||||||
// Optional reports whether this is a optional field.
|
|
||||||
Optional bool
|
|
||||||
// Repeated reports whether this is a repeated field.
|
|
||||||
Repeated bool
|
|
||||||
// Packed reports whether this is a packed repeated field of scalars.
|
|
||||||
Packed bool
|
|
||||||
// Proto3 reports whether this field operates under the proto3 syntax.
|
|
||||||
Proto3 bool
|
|
||||||
// Oneof reports whether this field belongs within a oneof.
|
|
||||||
Oneof bool
|
|
||||||
|
|
||||||
// Default is the default value in string form.
|
|
||||||
Default string
|
|
||||||
// HasDefault reports whether the field has a default value.
|
|
||||||
HasDefault bool
|
|
||||||
|
|
||||||
// MapKeyProp is the properties for the key field for a map field.
|
|
||||||
MapKeyProp *Properties
|
|
||||||
// MapValProp is the properties for the value field for a map field.
|
|
||||||
MapValProp *Properties
|
|
||||||
}
|
|
||||||
|
|
||||||
// OneofProperties represents the type information for a protobuf oneof.
|
|
||||||
//
|
|
||||||
// Deprecated: Do not use.
|
|
||||||
type OneofProperties struct {
|
|
||||||
// Type is a pointer to the generated wrapper type for the field value.
|
|
||||||
// This is nil for messages that are not in the open-struct API.
|
|
||||||
Type reflect.Type
|
|
||||||
// Field is the index into StructProperties.Prop for the containing oneof.
|
|
||||||
Field int
|
|
||||||
// Prop is the properties for the field.
|
|
||||||
Prop *Properties
|
|
||||||
}
|
|
||||||
|
|
||||||
// String formats the properties in the protobuf struct field tag style.
|
|
||||||
func (p *Properties) String() string {
|
|
||||||
s := p.Wire
|
|
||||||
s += "," + strconv.Itoa(p.Tag)
|
|
||||||
if p.Required {
|
|
||||||
s += ",req"
|
|
||||||
}
|
|
||||||
if p.Optional {
|
|
||||||
s += ",opt"
|
|
||||||
}
|
|
||||||
if p.Repeated {
|
|
||||||
s += ",rep"
|
|
||||||
}
|
|
||||||
if p.Packed {
|
|
||||||
s += ",packed"
|
|
||||||
}
|
|
||||||
s += ",name=" + p.OrigName
|
|
||||||
if p.JSONName != "" {
|
|
||||||
s += ",json=" + p.JSONName
|
|
||||||
}
|
|
||||||
if len(p.Enum) > 0 {
|
|
||||||
s += ",enum=" + p.Enum
|
|
||||||
}
|
|
||||||
if len(p.Weak) > 0 {
|
|
||||||
s += ",weak=" + p.Weak
|
|
||||||
}
|
|
||||||
if p.Proto3 {
|
|
||||||
s += ",proto3"
|
|
||||||
}
|
|
||||||
if p.Oneof {
|
|
||||||
s += ",oneof"
|
|
||||||
}
|
|
||||||
if p.HasDefault {
|
|
||||||
s += ",def=" + p.Default
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse populates p by parsing a string in the protobuf struct field tag style.
|
|
||||||
func (p *Properties) Parse(tag string) {
|
|
||||||
// For example: "bytes,49,opt,name=foo,def=hello!"
|
|
||||||
for len(tag) > 0 {
|
|
||||||
i := strings.IndexByte(tag, ',')
|
|
||||||
if i < 0 {
|
|
||||||
i = len(tag)
|
|
||||||
}
|
|
||||||
switch s := tag[:i]; {
|
|
||||||
case strings.HasPrefix(s, "name="):
|
|
||||||
p.OrigName = s[len("name="):]
|
|
||||||
case strings.HasPrefix(s, "json="):
|
|
||||||
p.JSONName = s[len("json="):]
|
|
||||||
case strings.HasPrefix(s, "enum="):
|
|
||||||
p.Enum = s[len("enum="):]
|
|
||||||
case strings.HasPrefix(s, "weak="):
|
|
||||||
p.Weak = s[len("weak="):]
|
|
||||||
case strings.Trim(s, "0123456789") == "":
|
|
||||||
n, _ := strconv.ParseUint(s, 10, 32)
|
|
||||||
p.Tag = int(n)
|
|
||||||
case s == "opt":
|
|
||||||
p.Optional = true
|
|
||||||
case s == "req":
|
|
||||||
p.Required = true
|
|
||||||
case s == "rep":
|
|
||||||
p.Repeated = true
|
|
||||||
case s == "varint" || s == "zigzag32" || s == "zigzag64":
|
|
||||||
p.Wire = s
|
|
||||||
p.WireType = WireVarint
|
|
||||||
case s == "fixed32":
|
|
||||||
p.Wire = s
|
|
||||||
p.WireType = WireFixed32
|
|
||||||
case s == "fixed64":
|
|
||||||
p.Wire = s
|
|
||||||
p.WireType = WireFixed64
|
|
||||||
case s == "bytes":
|
|
||||||
p.Wire = s
|
|
||||||
p.WireType = WireBytes
|
|
||||||
case s == "group":
|
|
||||||
p.Wire = s
|
|
||||||
p.WireType = WireStartGroup
|
|
||||||
case s == "packed":
|
|
||||||
p.Packed = true
|
|
||||||
case s == "proto3":
|
|
||||||
p.Proto3 = true
|
|
||||||
case s == "oneof":
|
|
||||||
p.Oneof = true
|
|
||||||
case strings.HasPrefix(s, "def="):
|
|
||||||
// The default tag is special in that everything afterwards is the
|
|
||||||
// default regardless of the presence of commas.
|
|
||||||
p.HasDefault = true
|
|
||||||
p.Default, i = tag[len("def="):], len(tag)
|
|
||||||
}
|
|
||||||
tag = strings.TrimPrefix(tag[i:], ",")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Init populates the properties from a protocol buffer struct tag.
|
|
||||||
//
|
|
||||||
// Deprecated: Do not use.
|
|
||||||
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
|
|
||||||
p.Name = name
|
|
||||||
p.OrigName = name
|
|
||||||
if tag == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p.Parse(tag)
|
|
||||||
|
|
||||||
if typ != nil && typ.Kind() == reflect.Map {
|
|
||||||
p.MapKeyProp = new(Properties)
|
|
||||||
p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil)
|
|
||||||
p.MapValProp = new(Properties)
|
|
||||||
p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var propertiesCache sync.Map // map[reflect.Type]*StructProperties
|
|
||||||
|
|
||||||
// GetProperties returns the list of properties for the type represented by t,
|
|
||||||
// which must be a generated protocol buffer message in the open-struct API,
|
|
||||||
// where protobuf message fields are represented by exported Go struct fields.
|
|
||||||
//
|
|
||||||
// Deprecated: Use protobuf reflection instead.
|
|
||||||
func GetProperties(t reflect.Type) *StructProperties {
|
|
||||||
if p, ok := propertiesCache.Load(t); ok {
|
|
||||||
return p.(*StructProperties)
|
|
||||||
}
|
|
||||||
p, _ := propertiesCache.LoadOrStore(t, newProperties(t))
|
|
||||||
return p.(*StructProperties)
|
|
||||||
}
|
|
||||||
|
|
||||||
func newProperties(t reflect.Type) *StructProperties {
|
|
||||||
if t.Kind() != reflect.Struct {
|
|
||||||
panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
|
|
||||||
}
|
|
||||||
|
|
||||||
var hasOneof bool
|
|
||||||
prop := new(StructProperties)
|
|
||||||
|
|
||||||
// Construct a list of properties for each field in the struct.
|
|
||||||
for i := 0; i < t.NumField(); i++ {
|
|
||||||
p := new(Properties)
|
|
||||||
f := t.Field(i)
|
|
||||||
tagField := f.Tag.Get("protobuf")
|
|
||||||
p.Init(f.Type, f.Name, tagField, &f)
|
|
||||||
|
|
||||||
tagOneof := f.Tag.Get("protobuf_oneof")
|
|
||||||
if tagOneof != "" {
|
|
||||||
hasOneof = true
|
|
||||||
p.OrigName = tagOneof
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rename unrelated struct fields with the "XXX_" prefix since so much
|
|
||||||
// user code simply checks for this to exclude special fields.
|
|
||||||
if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") {
|
|
||||||
p.Name = "XXX_" + p.Name
|
|
||||||
p.OrigName = "XXX_" + p.OrigName
|
|
||||||
} else if p.Weak != "" {
|
|
||||||
p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field
|
|
||||||
}
|
|
||||||
|
|
||||||
prop.Prop = append(prop.Prop, p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Construct a mapping of oneof field names to properties.
|
|
||||||
if hasOneof {
|
|
||||||
var oneofWrappers []interface{}
|
|
||||||
if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok {
|
|
||||||
oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{})
|
|
||||||
}
|
|
||||||
if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok {
|
|
||||||
oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{})
|
|
||||||
}
|
|
||||||
if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok {
|
|
||||||
if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok {
|
|
||||||
oneofWrappers = m.ProtoMessageInfo().OneofWrappers
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
prop.OneofTypes = make(map[string]*OneofProperties)
|
|
||||||
for _, wrapper := range oneofWrappers {
|
|
||||||
p := &OneofProperties{
|
|
||||||
Type: reflect.ValueOf(wrapper).Type(), // *T
|
|
||||||
Prop: new(Properties),
|
|
||||||
}
|
|
||||||
f := p.Type.Elem().Field(0)
|
|
||||||
p.Prop.Name = f.Name
|
|
||||||
p.Prop.Parse(f.Tag.Get("protobuf"))
|
|
||||||
|
|
||||||
// Determine the struct field that contains this oneof.
|
|
||||||
// Each wrapper is assignable to exactly one parent field.
|
|
||||||
var foundOneof bool
|
|
||||||
for i := 0; i < t.NumField() && !foundOneof; i++ {
|
|
||||||
if p.Type.AssignableTo(t.Field(i).Type) {
|
|
||||||
p.Field = i
|
|
||||||
foundOneof = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !foundOneof {
|
|
||||||
panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
|
|
||||||
}
|
|
||||||
prop.OneofTypes[p.Prop.OrigName] = p
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return prop
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sp *StructProperties) Len() int { return len(sp.Prop) }
|
|
||||||
func (sp *StructProperties) Less(i, j int) bool { return false }
|
|
||||||
func (sp *StructProperties) Swap(i, j int) { return }
|
|
||||||
167
vendor/github.com/golang/protobuf/proto/proto.go
generated
vendored
167
vendor/github.com/golang/protobuf/proto/proto.go
generated
vendored
@@ -1,167 +0,0 @@
|
|||||||
// Copyright 2019 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package proto provides functionality for handling protocol buffer messages.
|
|
||||||
// In particular, it provides marshaling and unmarshaling between a protobuf
|
|
||||||
// message and the binary wire format.
|
|
||||||
//
|
|
||||||
// See https://developers.google.com/protocol-buffers/docs/gotutorial for
|
|
||||||
// more information.
|
|
||||||
//
|
|
||||||
// Deprecated: Use the "google.golang.org/protobuf/proto" package instead.
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
protoV2 "google.golang.org/protobuf/proto"
|
|
||||||
"google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
"google.golang.org/protobuf/runtime/protoiface"
|
|
||||||
"google.golang.org/protobuf/runtime/protoimpl"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
ProtoPackageIsVersion1 = true
|
|
||||||
ProtoPackageIsVersion2 = true
|
|
||||||
ProtoPackageIsVersion3 = true
|
|
||||||
ProtoPackageIsVersion4 = true
|
|
||||||
)
|
|
||||||
|
|
||||||
// GeneratedEnum is any enum type generated by protoc-gen-go
|
|
||||||
// which is a named int32 kind.
|
|
||||||
// This type exists for documentation purposes.
|
|
||||||
type GeneratedEnum interface{}
|
|
||||||
|
|
||||||
// GeneratedMessage is any message type generated by protoc-gen-go
|
|
||||||
// which is a pointer to a named struct kind.
|
|
||||||
// This type exists for documentation purposes.
|
|
||||||
type GeneratedMessage interface{}
|
|
||||||
|
|
||||||
// Message is a protocol buffer message.
|
|
||||||
//
|
|
||||||
// This is the v1 version of the message interface and is marginally better
|
|
||||||
// than an empty interface as it lacks any method to programatically interact
|
|
||||||
// with the contents of the message.
|
|
||||||
//
|
|
||||||
// A v2 message is declared in "google.golang.org/protobuf/proto".Message and
|
|
||||||
// exposes protobuf reflection as a first-class feature of the interface.
|
|
||||||
//
|
|
||||||
// To convert a v1 message to a v2 message, use the MessageV2 function.
|
|
||||||
// To convert a v2 message to a v1 message, use the MessageV1 function.
|
|
||||||
type Message = protoiface.MessageV1
|
|
||||||
|
|
||||||
// MessageV1 converts either a v1 or v2 message to a v1 message.
|
|
||||||
// It returns nil if m is nil.
|
|
||||||
func MessageV1(m GeneratedMessage) protoiface.MessageV1 {
|
|
||||||
return protoimpl.X.ProtoMessageV1Of(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MessageV2 converts either a v1 or v2 message to a v2 message.
|
|
||||||
// It returns nil if m is nil.
|
|
||||||
func MessageV2(m GeneratedMessage) protoV2.Message {
|
|
||||||
return protoimpl.X.ProtoMessageV2Of(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MessageReflect returns a reflective view for a message.
|
|
||||||
// It returns nil if m is nil.
|
|
||||||
func MessageReflect(m Message) protoreflect.Message {
|
|
||||||
return protoimpl.X.MessageOf(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Marshaler is implemented by messages that can marshal themselves.
|
|
||||||
// This interface is used by the following functions: Size, Marshal,
|
|
||||||
// Buffer.Marshal, and Buffer.EncodeMessage.
|
|
||||||
//
|
|
||||||
// Deprecated: Do not implement.
|
|
||||||
type Marshaler interface {
|
|
||||||
// Marshal formats the encoded bytes of the message.
|
|
||||||
// It should be deterministic and emit valid protobuf wire data.
|
|
||||||
// The caller takes ownership of the returned buffer.
|
|
||||||
Marshal() ([]byte, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshaler is implemented by messages that can unmarshal themselves.
|
|
||||||
// This interface is used by the following functions: Unmarshal, UnmarshalMerge,
|
|
||||||
// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup.
|
|
||||||
//
|
|
||||||
// Deprecated: Do not implement.
|
|
||||||
type Unmarshaler interface {
|
|
||||||
// Unmarshal parses the encoded bytes of the protobuf wire input.
|
|
||||||
// The provided buffer is only valid for during method call.
|
|
||||||
// It should not reset the receiver message.
|
|
||||||
Unmarshal([]byte) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merger is implemented by messages that can merge themselves.
|
|
||||||
// This interface is used by the following functions: Clone and Merge.
|
|
||||||
//
|
|
||||||
// Deprecated: Do not implement.
|
|
||||||
type Merger interface {
|
|
||||||
// Merge merges the contents of src into the receiver message.
|
|
||||||
// It clones all data structures in src such that it aliases no mutable
|
|
||||||
// memory referenced by src.
|
|
||||||
Merge(src Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RequiredNotSetError is an error type returned when
|
|
||||||
// marshaling or unmarshaling a message with missing required fields.
|
|
||||||
type RequiredNotSetError struct {
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *RequiredNotSetError) Error() string {
|
|
||||||
if e.err != nil {
|
|
||||||
return e.err.Error()
|
|
||||||
}
|
|
||||||
return "proto: required field not set"
|
|
||||||
}
|
|
||||||
func (e *RequiredNotSetError) RequiredNotSet() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkRequiredNotSet(m protoV2.Message) error {
|
|
||||||
if err := protoV2.CheckInitialized(m); err != nil {
|
|
||||||
return &RequiredNotSetError{err: err}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clone returns a deep copy of src.
|
|
||||||
func Clone(src Message) Message {
|
|
||||||
return MessageV1(protoV2.Clone(MessageV2(src)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merge merges src into dst, which must be messages of the same type.
|
|
||||||
//
|
|
||||||
// Populated scalar fields in src are copied to dst, while populated
|
|
||||||
// singular messages in src are merged into dst by recursively calling Merge.
|
|
||||||
// The elements of every list field in src is appended to the corresponded
|
|
||||||
// list fields in dst. The entries of every map field in src is copied into
|
|
||||||
// the corresponding map field in dst, possibly replacing existing entries.
|
|
||||||
// The unknown fields of src are appended to the unknown fields of dst.
|
|
||||||
func Merge(dst, src Message) {
|
|
||||||
protoV2.Merge(MessageV2(dst), MessageV2(src))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Equal reports whether two messages are equal.
|
|
||||||
// If two messages marshal to the same bytes under deterministic serialization,
|
|
||||||
// then Equal is guaranteed to report true.
|
|
||||||
//
|
|
||||||
// Two messages are equal if they are the same protobuf message type,
|
|
||||||
// have the same set of populated known and extension field values,
|
|
||||||
// and the same set of unknown fields values.
|
|
||||||
//
|
|
||||||
// Scalar values are compared with the equivalent of the == operator in Go,
|
|
||||||
// except bytes values which are compared using bytes.Equal and
|
|
||||||
// floating point values which specially treat NaNs as equal.
|
|
||||||
// Message values are compared by recursively calling Equal.
|
|
||||||
// Lists are equal if each element value is also equal.
|
|
||||||
// Maps are equal if they have the same set of keys, where the pair of values
|
|
||||||
// for each key is also equal.
|
|
||||||
func Equal(x, y Message) bool {
|
|
||||||
return protoV2.Equal(MessageV2(x), MessageV2(y))
|
|
||||||
}
|
|
||||||
|
|
||||||
func isMessageSet(md protoreflect.MessageDescriptor) bool {
|
|
||||||
ms, ok := md.(interface{ IsMessageSet() bool })
|
|
||||||
return ok && ms.IsMessageSet()
|
|
||||||
}
|
|
||||||
323
vendor/github.com/golang/protobuf/proto/registry.go
generated
vendored
323
vendor/github.com/golang/protobuf/proto/registry.go
generated
vendored
@@ -1,323 +0,0 @@
|
|||||||
// Copyright 2019 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"compress/gzip"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
"google.golang.org/protobuf/reflect/protoregistry"
|
|
||||||
"google.golang.org/protobuf/runtime/protoimpl"
|
|
||||||
)
|
|
||||||
|
|
||||||
// filePath is the path to the proto source file.
|
|
||||||
type filePath = string // e.g., "google/protobuf/descriptor.proto"
|
|
||||||
|
|
||||||
// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto.
|
|
||||||
type fileDescGZIP = []byte
|
|
||||||
|
|
||||||
var fileCache sync.Map // map[filePath]fileDescGZIP
|
|
||||||
|
|
||||||
// RegisterFile is called from generated code to register the compressed
|
|
||||||
// FileDescriptorProto with the file path for a proto source file.
|
|
||||||
//
|
|
||||||
// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead.
|
|
||||||
func RegisterFile(s filePath, d fileDescGZIP) {
|
|
||||||
// Decompress the descriptor.
|
|
||||||
zr, err := gzip.NewReader(bytes.NewReader(d))
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
|
|
||||||
}
|
|
||||||
b, err := ioutil.ReadAll(zr)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Construct a protoreflect.FileDescriptor from the raw descriptor.
|
|
||||||
// Note that DescBuilder.Build automatically registers the constructed
|
|
||||||
// file descriptor with the v2 registry.
|
|
||||||
protoimpl.DescBuilder{RawDescriptor: b}.Build()
|
|
||||||
|
|
||||||
// Locally cache the raw descriptor form for the file.
|
|
||||||
fileCache.Store(s, d)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileDescriptor returns the compressed FileDescriptorProto given the file path
|
|
||||||
// for a proto source file. It returns nil if not found.
|
|
||||||
//
|
|
||||||
// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead.
|
|
||||||
func FileDescriptor(s filePath) fileDescGZIP {
|
|
||||||
if v, ok := fileCache.Load(s); ok {
|
|
||||||
return v.(fileDescGZIP)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find the descriptor in the v2 registry.
|
|
||||||
var b []byte
|
|
||||||
if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil {
|
|
||||||
if fd, ok := fd.(interface{ ProtoLegacyRawDesc() []byte }); ok {
|
|
||||||
b = fd.ProtoLegacyRawDesc()
|
|
||||||
} else {
|
|
||||||
// TODO: Use protodesc.ToFileDescriptorProto to construct
|
|
||||||
// a descriptorpb.FileDescriptorProto and marshal it.
|
|
||||||
// However, doing so causes the proto package to have a dependency
|
|
||||||
// on descriptorpb, leading to cyclic dependency issues.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Locally cache the raw descriptor form for the file.
|
|
||||||
if len(b) > 0 {
|
|
||||||
v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b))
|
|
||||||
return v.(fileDescGZIP)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// enumName is the name of an enum. For historical reasons, the enum name is
|
|
||||||
// neither the full Go name nor the full protobuf name of the enum.
|
|
||||||
// The name is the dot-separated combination of just the proto package that the
|
|
||||||
// enum is declared within followed by the Go type name of the generated enum.
|
|
||||||
type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum"
|
|
||||||
|
|
||||||
// enumsByName maps enum values by name to their numeric counterpart.
|
|
||||||
type enumsByName = map[string]int32
|
|
||||||
|
|
||||||
// enumsByNumber maps enum values by number to their name counterpart.
|
|
||||||
type enumsByNumber = map[int32]string
|
|
||||||
|
|
||||||
var enumCache sync.Map // map[enumName]enumsByName
|
|
||||||
var numFilesCache sync.Map // map[protoreflect.FullName]int
|
|
||||||
|
|
||||||
// RegisterEnum is called from the generated code to register the mapping of
|
|
||||||
// enum value names to enum numbers for the enum identified by s.
|
|
||||||
//
|
|
||||||
// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead.
|
|
||||||
func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) {
|
|
||||||
if _, ok := enumCache.Load(s); ok {
|
|
||||||
panic("proto: duplicate enum registered: " + s)
|
|
||||||
}
|
|
||||||
enumCache.Store(s, m)
|
|
||||||
|
|
||||||
// This does not forward registration to the v2 registry since this API
|
|
||||||
// lacks sufficient information to construct a complete v2 enum descriptor.
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnumValueMap returns the mapping from enum value names to enum numbers for
|
|
||||||
// the enum of the given name. It returns nil if not found.
|
|
||||||
//
|
|
||||||
// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead.
|
|
||||||
func EnumValueMap(s enumName) enumsByName {
|
|
||||||
if v, ok := enumCache.Load(s); ok {
|
|
||||||
return v.(enumsByName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check whether the cache is stale. If the number of files in the current
|
|
||||||
// package differs, then it means that some enums may have been recently
|
|
||||||
// registered upstream that we do not know about.
|
|
||||||
var protoPkg protoreflect.FullName
|
|
||||||
if i := strings.LastIndexByte(s, '.'); i >= 0 {
|
|
||||||
protoPkg = protoreflect.FullName(s[:i])
|
|
||||||
}
|
|
||||||
v, _ := numFilesCache.Load(protoPkg)
|
|
||||||
numFiles, _ := v.(int)
|
|
||||||
if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles {
|
|
||||||
return nil // cache is up-to-date; was not found earlier
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the enum cache for all enums declared in the given proto package.
|
|
||||||
numFiles = 0
|
|
||||||
protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool {
|
|
||||||
walkEnums(fd, func(ed protoreflect.EnumDescriptor) {
|
|
||||||
name := protoimpl.X.LegacyEnumName(ed)
|
|
||||||
if _, ok := enumCache.Load(name); !ok {
|
|
||||||
m := make(enumsByName)
|
|
||||||
evs := ed.Values()
|
|
||||||
for i := evs.Len() - 1; i >= 0; i-- {
|
|
||||||
ev := evs.Get(i)
|
|
||||||
m[string(ev.Name())] = int32(ev.Number())
|
|
||||||
}
|
|
||||||
enumCache.LoadOrStore(name, m)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
numFiles++
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
numFilesCache.Store(protoPkg, numFiles)
|
|
||||||
|
|
||||||
// Check cache again for enum map.
|
|
||||||
if v, ok := enumCache.Load(s); ok {
|
|
||||||
return v.(enumsByName)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// walkEnums recursively walks all enums declared in d.
|
|
||||||
func walkEnums(d interface {
|
|
||||||
Enums() protoreflect.EnumDescriptors
|
|
||||||
Messages() protoreflect.MessageDescriptors
|
|
||||||
}, f func(protoreflect.EnumDescriptor)) {
|
|
||||||
eds := d.Enums()
|
|
||||||
for i := eds.Len() - 1; i >= 0; i-- {
|
|
||||||
f(eds.Get(i))
|
|
||||||
}
|
|
||||||
mds := d.Messages()
|
|
||||||
for i := mds.Len() - 1; i >= 0; i-- {
|
|
||||||
walkEnums(mds.Get(i), f)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// messageName is the full name of protobuf message.
|
|
||||||
type messageName = string
|
|
||||||
|
|
||||||
var messageTypeCache sync.Map // map[messageName]reflect.Type
|
|
||||||
|
|
||||||
// RegisterType is called from generated code to register the message Go type
|
|
||||||
// for a message of the given name.
|
|
||||||
//
|
|
||||||
// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead.
|
|
||||||
func RegisterType(m Message, s messageName) {
|
|
||||||
mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s))
|
|
||||||
if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
messageTypeCache.Store(s, reflect.TypeOf(m))
|
|
||||||
}
|
|
||||||
|
|
||||||
// RegisterMapType is called from generated code to register the Go map type
|
|
||||||
// for a protobuf message representing a map entry.
|
|
||||||
//
|
|
||||||
// Deprecated: Do not use.
|
|
||||||
func RegisterMapType(m interface{}, s messageName) {
|
|
||||||
t := reflect.TypeOf(m)
|
|
||||||
if t.Kind() != reflect.Map {
|
|
||||||
panic(fmt.Sprintf("invalid map kind: %v", t))
|
|
||||||
}
|
|
||||||
if _, ok := messageTypeCache.Load(s); ok {
|
|
||||||
panic(fmt.Errorf("proto: duplicate proto message registered: %s", s))
|
|
||||||
}
|
|
||||||
messageTypeCache.Store(s, t)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MessageType returns the message type for a named message.
|
|
||||||
// It returns nil if not found.
|
|
||||||
//
|
|
||||||
// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead.
|
|
||||||
func MessageType(s messageName) reflect.Type {
|
|
||||||
if v, ok := messageTypeCache.Load(s); ok {
|
|
||||||
return v.(reflect.Type)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Derive the message type from the v2 registry.
|
|
||||||
var t reflect.Type
|
|
||||||
if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil {
|
|
||||||
t = messageGoType(mt)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we could not get a concrete type, it is possible that it is a
|
|
||||||
// pseudo-message for a map entry.
|
|
||||||
if t == nil {
|
|
||||||
d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s))
|
|
||||||
if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() {
|
|
||||||
kt := goTypeForField(md.Fields().ByNumber(1))
|
|
||||||
vt := goTypeForField(md.Fields().ByNumber(2))
|
|
||||||
t = reflect.MapOf(kt, vt)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Locally cache the message type for the given name.
|
|
||||||
if t != nil {
|
|
||||||
v, _ := messageTypeCache.LoadOrStore(s, t)
|
|
||||||
return v.(reflect.Type)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type {
|
|
||||||
switch k := fd.Kind(); k {
|
|
||||||
case protoreflect.EnumKind:
|
|
||||||
if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil {
|
|
||||||
return enumGoType(et)
|
|
||||||
}
|
|
||||||
return reflect.TypeOf(protoreflect.EnumNumber(0))
|
|
||||||
case protoreflect.MessageKind, protoreflect.GroupKind:
|
|
||||||
if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil {
|
|
||||||
return messageGoType(mt)
|
|
||||||
}
|
|
||||||
return reflect.TypeOf((*protoreflect.Message)(nil)).Elem()
|
|
||||||
default:
|
|
||||||
return reflect.TypeOf(fd.Default().Interface())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func enumGoType(et protoreflect.EnumType) reflect.Type {
|
|
||||||
return reflect.TypeOf(et.New(0))
|
|
||||||
}
|
|
||||||
|
|
||||||
func messageGoType(mt protoreflect.MessageType) reflect.Type {
|
|
||||||
return reflect.TypeOf(MessageV1(mt.Zero().Interface()))
|
|
||||||
}
|
|
||||||
|
|
||||||
// MessageName returns the full protobuf name for the given message type.
|
|
||||||
//
|
|
||||||
// Deprecated: Use protoreflect.MessageDescriptor.FullName instead.
|
|
||||||
func MessageName(m Message) messageName {
|
|
||||||
if m == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
if m, ok := m.(interface{ XXX_MessageName() messageName }); ok {
|
|
||||||
return m.XXX_MessageName()
|
|
||||||
}
|
|
||||||
return messageName(protoimpl.X.MessageDescriptorOf(m).FullName())
|
|
||||||
}
|
|
||||||
|
|
||||||
// RegisterExtension is called from the generated code to register
|
|
||||||
// the extension descriptor.
|
|
||||||
//
|
|
||||||
// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead.
|
|
||||||
func RegisterExtension(d *ExtensionDesc) {
|
|
||||||
if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type extensionsByNumber = map[int32]*ExtensionDesc
|
|
||||||
|
|
||||||
var extensionCache sync.Map // map[messageName]extensionsByNumber
|
|
||||||
|
|
||||||
// RegisteredExtensions returns a map of the registered extensions for the
|
|
||||||
// provided protobuf message, indexed by the extension field number.
|
|
||||||
//
|
|
||||||
// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead.
|
|
||||||
func RegisteredExtensions(m Message) extensionsByNumber {
|
|
||||||
// Check whether the cache is stale. If the number of extensions for
|
|
||||||
// the given message differs, then it means that some extensions were
|
|
||||||
// recently registered upstream that we do not know about.
|
|
||||||
s := MessageName(m)
|
|
||||||
v, _ := extensionCache.Load(s)
|
|
||||||
xs, _ := v.(extensionsByNumber)
|
|
||||||
if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) {
|
|
||||||
return xs // cache is up-to-date
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cache is stale, re-compute the extensions map.
|
|
||||||
xs = make(extensionsByNumber)
|
|
||||||
protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool {
|
|
||||||
if xd, ok := xt.(*ExtensionDesc); ok {
|
|
||||||
xs[int32(xt.TypeDescriptor().Number())] = xd
|
|
||||||
} else {
|
|
||||||
// TODO: This implies that the protoreflect.ExtensionType is a
|
|
||||||
// custom type not generated by protoc-gen-go. We could try and
|
|
||||||
// convert the type to an ExtensionDesc.
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
extensionCache.Store(s, xs)
|
|
||||||
return xs
|
|
||||||
}
|
|
||||||
801
vendor/github.com/golang/protobuf/proto/text_decode.go
generated
vendored
801
vendor/github.com/golang/protobuf/proto/text_decode.go
generated
vendored
@@ -1,801 +0,0 @@
|
|||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"unicode/utf8"
|
|
||||||
|
|
||||||
"google.golang.org/protobuf/encoding/prototext"
|
|
||||||
protoV2 "google.golang.org/protobuf/proto"
|
|
||||||
"google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
"google.golang.org/protobuf/reflect/protoregistry"
|
|
||||||
)
|
|
||||||
|
|
||||||
const wrapTextUnmarshalV2 = false
|
|
||||||
|
|
||||||
// ParseError is returned by UnmarshalText.
|
|
||||||
type ParseError struct {
|
|
||||||
Message string
|
|
||||||
|
|
||||||
// Deprecated: Do not use.
|
|
||||||
Line, Offset int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *ParseError) Error() string {
|
|
||||||
if wrapTextUnmarshalV2 {
|
|
||||||
return e.Message
|
|
||||||
}
|
|
||||||
if e.Line == 1 {
|
|
||||||
return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message)
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("line %d: %v", e.Line, e.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalText parses a proto text formatted string into m.
|
|
||||||
func UnmarshalText(s string, m Message) error {
|
|
||||||
if u, ok := m.(encoding.TextUnmarshaler); ok {
|
|
||||||
return u.UnmarshalText([]byte(s))
|
|
||||||
}
|
|
||||||
|
|
||||||
m.Reset()
|
|
||||||
mi := MessageV2(m)
|
|
||||||
|
|
||||||
if wrapTextUnmarshalV2 {
|
|
||||||
err := prototext.UnmarshalOptions{
|
|
||||||
AllowPartial: true,
|
|
||||||
}.Unmarshal([]byte(s), mi)
|
|
||||||
if err != nil {
|
|
||||||
return &ParseError{Message: err.Error()}
|
|
||||||
}
|
|
||||||
return checkRequiredNotSet(mi)
|
|
||||||
} else {
|
|
||||||
if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return checkRequiredNotSet(mi)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type textParser struct {
|
|
||||||
s string // remaining input
|
|
||||||
done bool // whether the parsing is finished (success or error)
|
|
||||||
backed bool // whether back() was called
|
|
||||||
offset, line int
|
|
||||||
cur token
|
|
||||||
}
|
|
||||||
|
|
||||||
type token struct {
|
|
||||||
value string
|
|
||||||
err *ParseError
|
|
||||||
line int // line number
|
|
||||||
offset int // byte number from start of input, not start of line
|
|
||||||
unquoted string // the unquoted version of value, if it was a quoted string
|
|
||||||
}
|
|
||||||
|
|
||||||
func newTextParser(s string) *textParser {
|
|
||||||
p := new(textParser)
|
|
||||||
p.s = s
|
|
||||||
p.line = 1
|
|
||||||
p.cur.line = 1
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) {
|
|
||||||
md := m.Descriptor()
|
|
||||||
fds := md.Fields()
|
|
||||||
|
|
||||||
// A struct is a sequence of "name: value", terminated by one of
|
|
||||||
// '>' or '}', or the end of the input. A name may also be
|
|
||||||
// "[extension]" or "[type/url]".
|
|
||||||
//
|
|
||||||
// The whole struct can also be an expanded Any message, like:
|
|
||||||
// [type/url] < ... struct contents ... >
|
|
||||||
seen := make(map[protoreflect.FieldNumber]bool)
|
|
||||||
for {
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return tok.err
|
|
||||||
}
|
|
||||||
if tok.value == terminator {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if tok.value == "[" {
|
|
||||||
if err := p.unmarshalExtensionOrAny(m, seen); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is a normal, non-extension field.
|
|
||||||
name := protoreflect.Name(tok.value)
|
|
||||||
fd := fds.ByName(name)
|
|
||||||
switch {
|
|
||||||
case fd == nil:
|
|
||||||
gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name))))
|
|
||||||
if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name {
|
|
||||||
fd = gd
|
|
||||||
}
|
|
||||||
case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name:
|
|
||||||
fd = nil
|
|
||||||
case fd.IsWeak() && fd.Message().IsPlaceholder():
|
|
||||||
fd = nil
|
|
||||||
}
|
|
||||||
if fd == nil {
|
|
||||||
typeName := string(md.FullName())
|
|
||||||
if m, ok := m.Interface().(Message); ok {
|
|
||||||
t := reflect.TypeOf(m)
|
|
||||||
if t.Kind() == reflect.Ptr {
|
|
||||||
typeName = t.Elem().String()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return p.errorf("unknown field name %q in %v", name, typeName)
|
|
||||||
}
|
|
||||||
if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil {
|
|
||||||
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name())
|
|
||||||
}
|
|
||||||
if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] {
|
|
||||||
return p.errorf("non-repeated field %q was repeated", fd.Name())
|
|
||||||
}
|
|
||||||
seen[fd.Number()] = true
|
|
||||||
|
|
||||||
// Consume any colon.
|
|
||||||
if err := p.checkForColon(fd); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse into the field.
|
|
||||||
v := m.Get(fd)
|
|
||||||
if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
|
|
||||||
v = m.Mutable(fd)
|
|
||||||
}
|
|
||||||
if v, err = p.unmarshalValue(v, fd); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
m.Set(fd, v)
|
|
||||||
|
|
||||||
if err := p.consumeOptionalSeparator(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error {
|
|
||||||
name, err := p.consumeExtensionOrAnyName()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// If it contains a slash, it's an Any type URL.
|
|
||||||
if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 {
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return tok.err
|
|
||||||
}
|
|
||||||
// consume an optional colon
|
|
||||||
if tok.value == ":" {
|
|
||||||
tok = p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return tok.err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var terminator string
|
|
||||||
switch tok.value {
|
|
||||||
case "<":
|
|
||||||
terminator = ">"
|
|
||||||
case "{":
|
|
||||||
terminator = "}"
|
|
||||||
default:
|
|
||||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
|
||||||
}
|
|
||||||
|
|
||||||
mt, err := protoregistry.GlobalTypes.FindMessageByURL(name)
|
|
||||||
if err != nil {
|
|
||||||
return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):])
|
|
||||||
}
|
|
||||||
m2 := mt.New()
|
|
||||||
if err := p.unmarshalMessage(m2, terminator); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
b, err := protoV2.Marshal(m2.Interface())
|
|
||||||
if err != nil {
|
|
||||||
return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err)
|
|
||||||
}
|
|
||||||
|
|
||||||
urlFD := m.Descriptor().Fields().ByName("type_url")
|
|
||||||
valFD := m.Descriptor().Fields().ByName("value")
|
|
||||||
if seen[urlFD.Number()] {
|
|
||||||
return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name())
|
|
||||||
}
|
|
||||||
if seen[valFD.Number()] {
|
|
||||||
return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name())
|
|
||||||
}
|
|
||||||
m.Set(urlFD, protoreflect.ValueOfString(name))
|
|
||||||
m.Set(valFD, protoreflect.ValueOfBytes(b))
|
|
||||||
seen[urlFD.Number()] = true
|
|
||||||
seen[valFD.Number()] = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
xname := protoreflect.FullName(name)
|
|
||||||
xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
|
|
||||||
if xt == nil && isMessageSet(m.Descriptor()) {
|
|
||||||
xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
|
|
||||||
}
|
|
||||||
if xt == nil {
|
|
||||||
return p.errorf("unrecognized extension %q", name)
|
|
||||||
}
|
|
||||||
fd := xt.TypeDescriptor()
|
|
||||||
if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
|
|
||||||
return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName())
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := p.checkForColon(fd); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
v := m.Get(fd)
|
|
||||||
if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
|
|
||||||
v = m.Mutable(fd)
|
|
||||||
}
|
|
||||||
v, err = p.unmarshalValue(v, fd)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
m.Set(fd, v)
|
|
||||||
return p.consumeOptionalSeparator()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return v, tok.err
|
|
||||||
}
|
|
||||||
if tok.value == "" {
|
|
||||||
return v, p.errorf("unexpected EOF")
|
|
||||||
}
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case fd.IsList():
|
|
||||||
lv := v.List()
|
|
||||||
var err error
|
|
||||||
if tok.value == "[" {
|
|
||||||
// Repeated field with list notation, like [1,2,3].
|
|
||||||
for {
|
|
||||||
vv := lv.NewElement()
|
|
||||||
vv, err = p.unmarshalSingularValue(vv, fd)
|
|
||||||
if err != nil {
|
|
||||||
return v, err
|
|
||||||
}
|
|
||||||
lv.Append(vv)
|
|
||||||
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return v, tok.err
|
|
||||||
}
|
|
||||||
if tok.value == "]" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if tok.value != "," {
|
|
||||||
return v, p.errorf("Expected ']' or ',' found %q", tok.value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// One value of the repeated field.
|
|
||||||
p.back()
|
|
||||||
vv := lv.NewElement()
|
|
||||||
vv, err = p.unmarshalSingularValue(vv, fd)
|
|
||||||
if err != nil {
|
|
||||||
return v, err
|
|
||||||
}
|
|
||||||
lv.Append(vv)
|
|
||||||
return v, nil
|
|
||||||
case fd.IsMap():
|
|
||||||
// The map entry should be this sequence of tokens:
|
|
||||||
// < key : KEY value : VALUE >
|
|
||||||
// However, implementations may omit key or value, and technically
|
|
||||||
// we should support them in any order.
|
|
||||||
var terminator string
|
|
||||||
switch tok.value {
|
|
||||||
case "<":
|
|
||||||
terminator = ">"
|
|
||||||
case "{":
|
|
||||||
terminator = "}"
|
|
||||||
default:
|
|
||||||
return v, p.errorf("expected '{' or '<', found %q", tok.value)
|
|
||||||
}
|
|
||||||
|
|
||||||
keyFD := fd.MapKey()
|
|
||||||
valFD := fd.MapValue()
|
|
||||||
|
|
||||||
mv := v.Map()
|
|
||||||
kv := keyFD.Default()
|
|
||||||
vv := mv.NewValue()
|
|
||||||
for {
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return v, tok.err
|
|
||||||
}
|
|
||||||
if tok.value == terminator {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
var err error
|
|
||||||
switch tok.value {
|
|
||||||
case "key":
|
|
||||||
if err := p.consumeToken(":"); err != nil {
|
|
||||||
return v, err
|
|
||||||
}
|
|
||||||
if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil {
|
|
||||||
return v, err
|
|
||||||
}
|
|
||||||
if err := p.consumeOptionalSeparator(); err != nil {
|
|
||||||
return v, err
|
|
||||||
}
|
|
||||||
case "value":
|
|
||||||
if err := p.checkForColon(valFD); err != nil {
|
|
||||||
return v, err
|
|
||||||
}
|
|
||||||
if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil {
|
|
||||||
return v, err
|
|
||||||
}
|
|
||||||
if err := p.consumeOptionalSeparator(); err != nil {
|
|
||||||
return v, err
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
p.back()
|
|
||||||
return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
mv.Set(kv.MapKey(), vv)
|
|
||||||
return v, nil
|
|
||||||
default:
|
|
||||||
p.back()
|
|
||||||
return p.unmarshalSingularValue(v, fd)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return v, tok.err
|
|
||||||
}
|
|
||||||
if tok.value == "" {
|
|
||||||
return v, p.errorf("unexpected EOF")
|
|
||||||
}
|
|
||||||
|
|
||||||
switch fd.Kind() {
|
|
||||||
case protoreflect.BoolKind:
|
|
||||||
switch tok.value {
|
|
||||||
case "true", "1", "t", "True":
|
|
||||||
return protoreflect.ValueOfBool(true), nil
|
|
||||||
case "false", "0", "f", "False":
|
|
||||||
return protoreflect.ValueOfBool(false), nil
|
|
||||||
}
|
|
||||||
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
|
|
||||||
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
|
|
||||||
return protoreflect.ValueOfInt32(int32(x)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// The C++ parser accepts large positive hex numbers that uses
|
|
||||||
// two's complement arithmetic to represent negative numbers.
|
|
||||||
// This feature is here for backwards compatibility with C++.
|
|
||||||
if strings.HasPrefix(tok.value, "0x") {
|
|
||||||
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
|
||||||
return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
|
||||||
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
|
|
||||||
return protoreflect.ValueOfInt64(int64(x)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// The C++ parser accepts large positive hex numbers that uses
|
|
||||||
// two's complement arithmetic to represent negative numbers.
|
|
||||||
// This feature is here for backwards compatibility with C++.
|
|
||||||
if strings.HasPrefix(tok.value, "0x") {
|
|
||||||
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
|
|
||||||
return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
|
|
||||||
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
|
||||||
return protoreflect.ValueOfUint32(uint32(x)), nil
|
|
||||||
}
|
|
||||||
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
|
||||||
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
|
|
||||||
return protoreflect.ValueOfUint64(uint64(x)), nil
|
|
||||||
}
|
|
||||||
case protoreflect.FloatKind:
|
|
||||||
// Ignore 'f' for compatibility with output generated by C++,
|
|
||||||
// but don't remove 'f' when the value is "-inf" or "inf".
|
|
||||||
v := tok.value
|
|
||||||
if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
|
|
||||||
v = v[:len(v)-len("f")]
|
|
||||||
}
|
|
||||||
if x, err := strconv.ParseFloat(v, 32); err == nil {
|
|
||||||
return protoreflect.ValueOfFloat32(float32(x)), nil
|
|
||||||
}
|
|
||||||
case protoreflect.DoubleKind:
|
|
||||||
// Ignore 'f' for compatibility with output generated by C++,
|
|
||||||
// but don't remove 'f' when the value is "-inf" or "inf".
|
|
||||||
v := tok.value
|
|
||||||
if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
|
|
||||||
v = v[:len(v)-len("f")]
|
|
||||||
}
|
|
||||||
if x, err := strconv.ParseFloat(v, 64); err == nil {
|
|
||||||
return protoreflect.ValueOfFloat64(float64(x)), nil
|
|
||||||
}
|
|
||||||
case protoreflect.StringKind:
|
|
||||||
if isQuote(tok.value[0]) {
|
|
||||||
return protoreflect.ValueOfString(tok.unquoted), nil
|
|
||||||
}
|
|
||||||
case protoreflect.BytesKind:
|
|
||||||
if isQuote(tok.value[0]) {
|
|
||||||
return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil
|
|
||||||
}
|
|
||||||
case protoreflect.EnumKind:
|
|
||||||
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
|
|
||||||
return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil
|
|
||||||
}
|
|
||||||
vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value))
|
|
||||||
if vd != nil {
|
|
||||||
return protoreflect.ValueOfEnum(vd.Number()), nil
|
|
||||||
}
|
|
||||||
case protoreflect.MessageKind, protoreflect.GroupKind:
|
|
||||||
var terminator string
|
|
||||||
switch tok.value {
|
|
||||||
case "{":
|
|
||||||
terminator = "}"
|
|
||||||
case "<":
|
|
||||||
terminator = ">"
|
|
||||||
default:
|
|
||||||
return v, p.errorf("expected '{' or '<', found %q", tok.value)
|
|
||||||
}
|
|
||||||
err := p.unmarshalMessage(v.Message(), terminator)
|
|
||||||
return v, err
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
|
|
||||||
}
|
|
||||||
return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consume a ':' from the input stream (if the next token is a colon),
|
|
||||||
// returning an error if a colon is needed but not present.
|
|
||||||
func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError {
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return tok.err
|
|
||||||
}
|
|
||||||
if tok.value != ":" {
|
|
||||||
if fd.Message() == nil {
|
|
||||||
return p.errorf("expected ':', found %q", tok.value)
|
|
||||||
}
|
|
||||||
p.back()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// consumeExtensionOrAnyName consumes an extension name or an Any type URL and
|
|
||||||
// the following ']'. It returns the name or URL consumed.
|
|
||||||
func (p *textParser) consumeExtensionOrAnyName() (string, error) {
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return "", tok.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// If extension name or type url is quoted, it's a single token.
|
|
||||||
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
|
|
||||||
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return name, p.consumeToken("]")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consume everything up to "]"
|
|
||||||
var parts []string
|
|
||||||
for tok.value != "]" {
|
|
||||||
parts = append(parts, tok.value)
|
|
||||||
tok = p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
|
|
||||||
}
|
|
||||||
if p.done && tok.value != "]" {
|
|
||||||
return "", p.errorf("unclosed type_url or extension name")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return strings.Join(parts, ""), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// consumeOptionalSeparator consumes an optional semicolon or comma.
|
|
||||||
// It is used in unmarshalMessage to provide backward compatibility.
|
|
||||||
func (p *textParser) consumeOptionalSeparator() error {
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return tok.err
|
|
||||||
}
|
|
||||||
if tok.value != ";" && tok.value != "," {
|
|
||||||
p.back()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
|
|
||||||
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
|
|
||||||
p.cur.err = pe
|
|
||||||
p.done = true
|
|
||||||
return pe
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *textParser) skipWhitespace() {
|
|
||||||
i := 0
|
|
||||||
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
|
|
||||||
if p.s[i] == '#' {
|
|
||||||
// comment; skip to end of line or input
|
|
||||||
for i < len(p.s) && p.s[i] != '\n' {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
if i == len(p.s) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if p.s[i] == '\n' {
|
|
||||||
p.line++
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
p.offset += i
|
|
||||||
p.s = p.s[i:len(p.s)]
|
|
||||||
if len(p.s) == 0 {
|
|
||||||
p.done = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *textParser) advance() {
|
|
||||||
// Skip whitespace
|
|
||||||
p.skipWhitespace()
|
|
||||||
if p.done {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start of non-whitespace
|
|
||||||
p.cur.err = nil
|
|
||||||
p.cur.offset, p.cur.line = p.offset, p.line
|
|
||||||
p.cur.unquoted = ""
|
|
||||||
switch p.s[0] {
|
|
||||||
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
|
|
||||||
// Single symbol
|
|
||||||
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
|
|
||||||
case '"', '\'':
|
|
||||||
// Quoted string
|
|
||||||
i := 1
|
|
||||||
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
|
|
||||||
if p.s[i] == '\\' && i+1 < len(p.s) {
|
|
||||||
// skip escaped char
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
if i >= len(p.s) || p.s[i] != p.s[0] {
|
|
||||||
p.errorf("unmatched quote")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
|
|
||||||
if err != nil {
|
|
||||||
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
|
|
||||||
p.cur.unquoted = unq
|
|
||||||
default:
|
|
||||||
i := 0
|
|
||||||
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
if i == 0 {
|
|
||||||
p.errorf("unexpected byte %#x", p.s[0])
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
|
|
||||||
}
|
|
||||||
p.offset += len(p.cur.value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Back off the parser by one token. Can only be done between calls to next().
|
|
||||||
// It makes the next advance() a no-op.
|
|
||||||
func (p *textParser) back() { p.backed = true }
|
|
||||||
|
|
||||||
// Advances the parser and returns the new current token.
|
|
||||||
func (p *textParser) next() *token {
|
|
||||||
if p.backed || p.done {
|
|
||||||
p.backed = false
|
|
||||||
return &p.cur
|
|
||||||
}
|
|
||||||
p.advance()
|
|
||||||
if p.done {
|
|
||||||
p.cur.value = ""
|
|
||||||
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
|
|
||||||
// Look for multiple quoted strings separated by whitespace,
|
|
||||||
// and concatenate them.
|
|
||||||
cat := p.cur
|
|
||||||
for {
|
|
||||||
p.skipWhitespace()
|
|
||||||
if p.done || !isQuote(p.s[0]) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
p.advance()
|
|
||||||
if p.cur.err != nil {
|
|
||||||
return &p.cur
|
|
||||||
}
|
|
||||||
cat.value += " " + p.cur.value
|
|
||||||
cat.unquoted += p.cur.unquoted
|
|
||||||
}
|
|
||||||
p.done = false // parser may have seen EOF, but we want to return cat
|
|
||||||
p.cur = cat
|
|
||||||
}
|
|
||||||
return &p.cur
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *textParser) consumeToken(s string) error {
|
|
||||||
tok := p.next()
|
|
||||||
if tok.err != nil {
|
|
||||||
return tok.err
|
|
||||||
}
|
|
||||||
if tok.value != s {
|
|
||||||
p.back()
|
|
||||||
return p.errorf("expected %q, found %q", s, tok.value)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var errBadUTF8 = errors.New("proto: bad UTF-8")
|
|
||||||
|
|
||||||
func unquoteC(s string, quote rune) (string, error) {
|
|
||||||
// This is based on C++'s tokenizer.cc.
|
|
||||||
// Despite its name, this is *not* parsing C syntax.
|
|
||||||
// For instance, "\0" is an invalid quoted string.
|
|
||||||
|
|
||||||
// Avoid allocation in trivial cases.
|
|
||||||
simple := true
|
|
||||||
for _, r := range s {
|
|
||||||
if r == '\\' || r == quote {
|
|
||||||
simple = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if simple {
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := make([]byte, 0, 3*len(s)/2)
|
|
||||||
for len(s) > 0 {
|
|
||||||
r, n := utf8.DecodeRuneInString(s)
|
|
||||||
if r == utf8.RuneError && n == 1 {
|
|
||||||
return "", errBadUTF8
|
|
||||||
}
|
|
||||||
s = s[n:]
|
|
||||||
if r != '\\' {
|
|
||||||
if r < utf8.RuneSelf {
|
|
||||||
buf = append(buf, byte(r))
|
|
||||||
} else {
|
|
||||||
buf = append(buf, string(r)...)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
ch, tail, err := unescape(s)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
buf = append(buf, ch...)
|
|
||||||
s = tail
|
|
||||||
}
|
|
||||||
return string(buf), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func unescape(s string) (ch string, tail string, err error) {
|
|
||||||
r, n := utf8.DecodeRuneInString(s)
|
|
||||||
if r == utf8.RuneError && n == 1 {
|
|
||||||
return "", "", errBadUTF8
|
|
||||||
}
|
|
||||||
s = s[n:]
|
|
||||||
switch r {
|
|
||||||
case 'a':
|
|
||||||
return "\a", s, nil
|
|
||||||
case 'b':
|
|
||||||
return "\b", s, nil
|
|
||||||
case 'f':
|
|
||||||
return "\f", s, nil
|
|
||||||
case 'n':
|
|
||||||
return "\n", s, nil
|
|
||||||
case 'r':
|
|
||||||
return "\r", s, nil
|
|
||||||
case 't':
|
|
||||||
return "\t", s, nil
|
|
||||||
case 'v':
|
|
||||||
return "\v", s, nil
|
|
||||||
case '?':
|
|
||||||
return "?", s, nil // trigraph workaround
|
|
||||||
case '\'', '"', '\\':
|
|
||||||
return string(r), s, nil
|
|
||||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
|
||||||
if len(s) < 2 {
|
|
||||||
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
|
|
||||||
}
|
|
||||||
ss := string(r) + s[:2]
|
|
||||||
s = s[2:]
|
|
||||||
i, err := strconv.ParseUint(ss, 8, 8)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
|
|
||||||
}
|
|
||||||
return string([]byte{byte(i)}), s, nil
|
|
||||||
case 'x', 'X', 'u', 'U':
|
|
||||||
var n int
|
|
||||||
switch r {
|
|
||||||
case 'x', 'X':
|
|
||||||
n = 2
|
|
||||||
case 'u':
|
|
||||||
n = 4
|
|
||||||
case 'U':
|
|
||||||
n = 8
|
|
||||||
}
|
|
||||||
if len(s) < n {
|
|
||||||
return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
|
|
||||||
}
|
|
||||||
ss := s[:n]
|
|
||||||
s = s[n:]
|
|
||||||
i, err := strconv.ParseUint(ss, 16, 64)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
|
|
||||||
}
|
|
||||||
if r == 'x' || r == 'X' {
|
|
||||||
return string([]byte{byte(i)}), s, nil
|
|
||||||
}
|
|
||||||
if i > utf8.MaxRune {
|
|
||||||
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
|
|
||||||
}
|
|
||||||
return string(rune(i)), s, nil
|
|
||||||
}
|
|
||||||
return "", "", fmt.Errorf(`unknown escape \%c`, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func isIdentOrNumberChar(c byte) bool {
|
|
||||||
switch {
|
|
||||||
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
|
|
||||||
return true
|
|
||||||
case '0' <= c && c <= '9':
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
switch c {
|
|
||||||
case '-', '+', '.', '_':
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func isWhitespace(c byte) bool {
|
|
||||||
switch c {
|
|
||||||
case ' ', '\t', '\n', '\r':
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func isQuote(c byte) bool {
|
|
||||||
switch c {
|
|
||||||
case '"', '\'':
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
560
vendor/github.com/golang/protobuf/proto/text_encode.go
generated
vendored
560
vendor/github.com/golang/protobuf/proto/text_encode.go
generated
vendored
@@ -1,560 +0,0 @@
|
|||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"math"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"google.golang.org/protobuf/encoding/prototext"
|
|
||||||
"google.golang.org/protobuf/encoding/protowire"
|
|
||||||
"google.golang.org/protobuf/proto"
|
|
||||||
"google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
"google.golang.org/protobuf/reflect/protoregistry"
|
|
||||||
)
|
|
||||||
|
|
||||||
const wrapTextMarshalV2 = false
|
|
||||||
|
|
||||||
// TextMarshaler is a configurable text format marshaler.
|
|
||||||
type TextMarshaler struct {
|
|
||||||
Compact bool // use compact text format (one line)
|
|
||||||
ExpandAny bool // expand google.protobuf.Any messages of known types
|
|
||||||
}
|
|
||||||
|
|
||||||
// Marshal writes the proto text format of m to w.
|
|
||||||
func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error {
|
|
||||||
b, err := tm.marshal(m)
|
|
||||||
if len(b) > 0 {
|
|
||||||
if _, err := w.Write(b); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Text returns a proto text formatted string of m.
|
|
||||||
func (tm *TextMarshaler) Text(m Message) string {
|
|
||||||
b, _ := tm.marshal(m)
|
|
||||||
return string(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tm *TextMarshaler) marshal(m Message) ([]byte, error) {
|
|
||||||
mr := MessageReflect(m)
|
|
||||||
if mr == nil || !mr.IsValid() {
|
|
||||||
return []byte("<nil>"), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if wrapTextMarshalV2 {
|
|
||||||
if m, ok := m.(encoding.TextMarshaler); ok {
|
|
||||||
return m.MarshalText()
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := prototext.MarshalOptions{
|
|
||||||
AllowPartial: true,
|
|
||||||
EmitUnknown: true,
|
|
||||||
}
|
|
||||||
if !tm.Compact {
|
|
||||||
opts.Indent = " "
|
|
||||||
}
|
|
||||||
if !tm.ExpandAny {
|
|
||||||
opts.Resolver = (*protoregistry.Types)(nil)
|
|
||||||
}
|
|
||||||
return opts.Marshal(mr.Interface())
|
|
||||||
} else {
|
|
||||||
w := &textWriter{
|
|
||||||
compact: tm.Compact,
|
|
||||||
expandAny: tm.ExpandAny,
|
|
||||||
complete: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
if m, ok := m.(encoding.TextMarshaler); ok {
|
|
||||||
b, err := m.MarshalText()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
w.Write(b)
|
|
||||||
return w.buf, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
err := w.writeMessage(mr)
|
|
||||||
return w.buf, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
defaultTextMarshaler = TextMarshaler{}
|
|
||||||
compactTextMarshaler = TextMarshaler{Compact: true}
|
|
||||||
)
|
|
||||||
|
|
||||||
// MarshalText writes the proto text format of m to w.
|
|
||||||
func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) }
|
|
||||||
|
|
||||||
// MarshalTextString returns a proto text formatted string of m.
|
|
||||||
func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) }
|
|
||||||
|
|
||||||
// CompactText writes the compact proto text format of m to w.
|
|
||||||
func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) }
|
|
||||||
|
|
||||||
// CompactTextString returns a compact proto text formatted string of m.
|
|
||||||
func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) }
|
|
||||||
|
|
||||||
var (
|
|
||||||
newline = []byte("\n")
|
|
||||||
endBraceNewline = []byte("}\n")
|
|
||||||
posInf = []byte("inf")
|
|
||||||
negInf = []byte("-inf")
|
|
||||||
nan = []byte("nan")
|
|
||||||
)
|
|
||||||
|
|
||||||
// textWriter is an io.Writer that tracks its indentation level.
|
|
||||||
type textWriter struct {
|
|
||||||
compact bool // same as TextMarshaler.Compact
|
|
||||||
expandAny bool // same as TextMarshaler.ExpandAny
|
|
||||||
complete bool // whether the current position is a complete line
|
|
||||||
indent int // indentation level; never negative
|
|
||||||
buf []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *textWriter) Write(p []byte) (n int, _ error) {
|
|
||||||
newlines := bytes.Count(p, newline)
|
|
||||||
if newlines == 0 {
|
|
||||||
if !w.compact && w.complete {
|
|
||||||
w.writeIndent()
|
|
||||||
}
|
|
||||||
w.buf = append(w.buf, p...)
|
|
||||||
w.complete = false
|
|
||||||
return len(p), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
frags := bytes.SplitN(p, newline, newlines+1)
|
|
||||||
if w.compact {
|
|
||||||
for i, frag := range frags {
|
|
||||||
if i > 0 {
|
|
||||||
w.buf = append(w.buf, ' ')
|
|
||||||
n++
|
|
||||||
}
|
|
||||||
w.buf = append(w.buf, frag...)
|
|
||||||
n += len(frag)
|
|
||||||
}
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, frag := range frags {
|
|
||||||
if w.complete {
|
|
||||||
w.writeIndent()
|
|
||||||
}
|
|
||||||
w.buf = append(w.buf, frag...)
|
|
||||||
n += len(frag)
|
|
||||||
if i+1 < len(frags) {
|
|
||||||
w.buf = append(w.buf, '\n')
|
|
||||||
n++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
w.complete = len(frags[len(frags)-1]) == 0
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *textWriter) WriteByte(c byte) error {
|
|
||||||
if w.compact && c == '\n' {
|
|
||||||
c = ' '
|
|
||||||
}
|
|
||||||
if !w.compact && w.complete {
|
|
||||||
w.writeIndent()
|
|
||||||
}
|
|
||||||
w.buf = append(w.buf, c)
|
|
||||||
w.complete = c == '\n'
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) {
|
|
||||||
if !w.compact && w.complete {
|
|
||||||
w.writeIndent()
|
|
||||||
}
|
|
||||||
w.complete = false
|
|
||||||
|
|
||||||
if fd.Kind() != protoreflect.GroupKind {
|
|
||||||
w.buf = append(w.buf, fd.Name()...)
|
|
||||||
w.WriteByte(':')
|
|
||||||
} else {
|
|
||||||
// Use message type name for group field name.
|
|
||||||
w.buf = append(w.buf, fd.Message().Name()...)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !w.compact {
|
|
||||||
w.WriteByte(' ')
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func requiresQuotes(u string) bool {
|
|
||||||
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
|
|
||||||
for _, ch := range u {
|
|
||||||
switch {
|
|
||||||
case ch == '.' || ch == '/' || ch == '_':
|
|
||||||
continue
|
|
||||||
case '0' <= ch && ch <= '9':
|
|
||||||
continue
|
|
||||||
case 'A' <= ch && ch <= 'Z':
|
|
||||||
continue
|
|
||||||
case 'a' <= ch && ch <= 'z':
|
|
||||||
continue
|
|
||||||
default:
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeProto3Any writes an expanded google.protobuf.Any message.
|
|
||||||
//
|
|
||||||
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
|
|
||||||
// required messages are not linked in).
|
|
||||||
//
|
|
||||||
// It returns (true, error) when sv was written in expanded format or an error
|
|
||||||
// was encountered.
|
|
||||||
func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) {
|
|
||||||
md := m.Descriptor()
|
|
||||||
fdURL := md.Fields().ByName("type_url")
|
|
||||||
fdVal := md.Fields().ByName("value")
|
|
||||||
|
|
||||||
url := m.Get(fdURL).String()
|
|
||||||
mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
|
|
||||||
if err != nil {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
b := m.Get(fdVal).Bytes()
|
|
||||||
m2 := mt.New()
|
|
||||||
if err := proto.Unmarshal(b, m2.Interface()); err != nil {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
w.Write([]byte("["))
|
|
||||||
if requiresQuotes(url) {
|
|
||||||
w.writeQuotedString(url)
|
|
||||||
} else {
|
|
||||||
w.Write([]byte(url))
|
|
||||||
}
|
|
||||||
if w.compact {
|
|
||||||
w.Write([]byte("]:<"))
|
|
||||||
} else {
|
|
||||||
w.Write([]byte("]: <\n"))
|
|
||||||
w.indent++
|
|
||||||
}
|
|
||||||
if err := w.writeMessage(m2); err != nil {
|
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
if w.compact {
|
|
||||||
w.Write([]byte("> "))
|
|
||||||
} else {
|
|
||||||
w.indent--
|
|
||||||
w.Write([]byte(">\n"))
|
|
||||||
}
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *textWriter) writeMessage(m protoreflect.Message) error {
|
|
||||||
md := m.Descriptor()
|
|
||||||
if w.expandAny && md.FullName() == "google.protobuf.Any" {
|
|
||||||
if canExpand, err := w.writeProto3Any(m); canExpand {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fds := md.Fields()
|
|
||||||
for i := 0; i < fds.Len(); {
|
|
||||||
fd := fds.Get(i)
|
|
||||||
if od := fd.ContainingOneof(); od != nil {
|
|
||||||
fd = m.WhichOneof(od)
|
|
||||||
i += od.Fields().Len()
|
|
||||||
} else {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
if fd == nil || !m.Has(fd) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case fd.IsList():
|
|
||||||
lv := m.Get(fd).List()
|
|
||||||
for j := 0; j < lv.Len(); j++ {
|
|
||||||
w.writeName(fd)
|
|
||||||
v := lv.Get(j)
|
|
||||||
if err := w.writeSingularValue(v, fd); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
w.WriteByte('\n')
|
|
||||||
}
|
|
||||||
case fd.IsMap():
|
|
||||||
kfd := fd.MapKey()
|
|
||||||
vfd := fd.MapValue()
|
|
||||||
mv := m.Get(fd).Map()
|
|
||||||
|
|
||||||
type entry struct{ key, val protoreflect.Value }
|
|
||||||
var entries []entry
|
|
||||||
mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
|
|
||||||
entries = append(entries, entry{k.Value(), v})
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
sort.Slice(entries, func(i, j int) bool {
|
|
||||||
switch kfd.Kind() {
|
|
||||||
case protoreflect.BoolKind:
|
|
||||||
return !entries[i].key.Bool() && entries[j].key.Bool()
|
|
||||||
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
|
||||||
return entries[i].key.Int() < entries[j].key.Int()
|
|
||||||
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
|
||||||
return entries[i].key.Uint() < entries[j].key.Uint()
|
|
||||||
case protoreflect.StringKind:
|
|
||||||
return entries[i].key.String() < entries[j].key.String()
|
|
||||||
default:
|
|
||||||
panic("invalid kind")
|
|
||||||
}
|
|
||||||
})
|
|
||||||
for _, entry := range entries {
|
|
||||||
w.writeName(fd)
|
|
||||||
w.WriteByte('<')
|
|
||||||
if !w.compact {
|
|
||||||
w.WriteByte('\n')
|
|
||||||
}
|
|
||||||
w.indent++
|
|
||||||
w.writeName(kfd)
|
|
||||||
if err := w.writeSingularValue(entry.key, kfd); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
w.WriteByte('\n')
|
|
||||||
w.writeName(vfd)
|
|
||||||
if err := w.writeSingularValue(entry.val, vfd); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
w.WriteByte('\n')
|
|
||||||
w.indent--
|
|
||||||
w.WriteByte('>')
|
|
||||||
w.WriteByte('\n')
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
w.writeName(fd)
|
|
||||||
if err := w.writeSingularValue(m.Get(fd), fd); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
w.WriteByte('\n')
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if b := m.GetUnknown(); len(b) > 0 {
|
|
||||||
w.writeUnknownFields(b)
|
|
||||||
}
|
|
||||||
return w.writeExtensions(m)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
|
|
||||||
switch fd.Kind() {
|
|
||||||
case protoreflect.FloatKind, protoreflect.DoubleKind:
|
|
||||||
switch vf := v.Float(); {
|
|
||||||
case math.IsInf(vf, +1):
|
|
||||||
w.Write(posInf)
|
|
||||||
case math.IsInf(vf, -1):
|
|
||||||
w.Write(negInf)
|
|
||||||
case math.IsNaN(vf):
|
|
||||||
w.Write(nan)
|
|
||||||
default:
|
|
||||||
fmt.Fprint(w, v.Interface())
|
|
||||||
}
|
|
||||||
case protoreflect.StringKind:
|
|
||||||
// NOTE: This does not validate UTF-8 for historical reasons.
|
|
||||||
w.writeQuotedString(string(v.String()))
|
|
||||||
case protoreflect.BytesKind:
|
|
||||||
w.writeQuotedString(string(v.Bytes()))
|
|
||||||
case protoreflect.MessageKind, protoreflect.GroupKind:
|
|
||||||
var bra, ket byte = '<', '>'
|
|
||||||
if fd.Kind() == protoreflect.GroupKind {
|
|
||||||
bra, ket = '{', '}'
|
|
||||||
}
|
|
||||||
w.WriteByte(bra)
|
|
||||||
if !w.compact {
|
|
||||||
w.WriteByte('\n')
|
|
||||||
}
|
|
||||||
w.indent++
|
|
||||||
m := v.Message()
|
|
||||||
if m2, ok := m.Interface().(encoding.TextMarshaler); ok {
|
|
||||||
b, err := m2.MarshalText()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
w.Write(b)
|
|
||||||
} else {
|
|
||||||
w.writeMessage(m)
|
|
||||||
}
|
|
||||||
w.indent--
|
|
||||||
w.WriteByte(ket)
|
|
||||||
case protoreflect.EnumKind:
|
|
||||||
if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil {
|
|
||||||
fmt.Fprint(w, ev.Name())
|
|
||||||
} else {
|
|
||||||
fmt.Fprint(w, v.Enum())
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
fmt.Fprint(w, v.Interface())
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeQuotedString writes a quoted string in the protocol buffer text format.
|
|
||||||
func (w *textWriter) writeQuotedString(s string) {
|
|
||||||
w.WriteByte('"')
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
switch c := s[i]; c {
|
|
||||||
case '\n':
|
|
||||||
w.buf = append(w.buf, `\n`...)
|
|
||||||
case '\r':
|
|
||||||
w.buf = append(w.buf, `\r`...)
|
|
||||||
case '\t':
|
|
||||||
w.buf = append(w.buf, `\t`...)
|
|
||||||
case '"':
|
|
||||||
w.buf = append(w.buf, `\"`...)
|
|
||||||
case '\\':
|
|
||||||
w.buf = append(w.buf, `\\`...)
|
|
||||||
default:
|
|
||||||
if isPrint := c >= 0x20 && c < 0x7f; isPrint {
|
|
||||||
w.buf = append(w.buf, c)
|
|
||||||
} else {
|
|
||||||
w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
w.WriteByte('"')
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *textWriter) writeUnknownFields(b []byte) {
|
|
||||||
if !w.compact {
|
|
||||||
fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b))
|
|
||||||
}
|
|
||||||
|
|
||||||
for len(b) > 0 {
|
|
||||||
num, wtyp, n := protowire.ConsumeTag(b)
|
|
||||||
if n < 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b = b[n:]
|
|
||||||
|
|
||||||
if wtyp == protowire.EndGroupType {
|
|
||||||
w.indent--
|
|
||||||
w.Write(endBraceNewline)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
fmt.Fprint(w, num)
|
|
||||||
if wtyp != protowire.StartGroupType {
|
|
||||||
w.WriteByte(':')
|
|
||||||
}
|
|
||||||
if !w.compact || wtyp == protowire.StartGroupType {
|
|
||||||
w.WriteByte(' ')
|
|
||||||
}
|
|
||||||
switch wtyp {
|
|
||||||
case protowire.VarintType:
|
|
||||||
v, n := protowire.ConsumeVarint(b)
|
|
||||||
if n < 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b = b[n:]
|
|
||||||
fmt.Fprint(w, v)
|
|
||||||
case protowire.Fixed32Type:
|
|
||||||
v, n := protowire.ConsumeFixed32(b)
|
|
||||||
if n < 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b = b[n:]
|
|
||||||
fmt.Fprint(w, v)
|
|
||||||
case protowire.Fixed64Type:
|
|
||||||
v, n := protowire.ConsumeFixed64(b)
|
|
||||||
if n < 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b = b[n:]
|
|
||||||
fmt.Fprint(w, v)
|
|
||||||
case protowire.BytesType:
|
|
||||||
v, n := protowire.ConsumeBytes(b)
|
|
||||||
if n < 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
b = b[n:]
|
|
||||||
fmt.Fprintf(w, "%q", v)
|
|
||||||
case protowire.StartGroupType:
|
|
||||||
w.WriteByte('{')
|
|
||||||
w.indent++
|
|
||||||
default:
|
|
||||||
fmt.Fprintf(w, "/* unknown wire type %d */", wtyp)
|
|
||||||
}
|
|
||||||
w.WriteByte('\n')
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeExtensions writes all the extensions in m.
|
|
||||||
func (w *textWriter) writeExtensions(m protoreflect.Message) error {
|
|
||||||
md := m.Descriptor()
|
|
||||||
if md.ExtensionRanges().Len() == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type ext struct {
|
|
||||||
desc protoreflect.FieldDescriptor
|
|
||||||
val protoreflect.Value
|
|
||||||
}
|
|
||||||
var exts []ext
|
|
||||||
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
|
||||||
if fd.IsExtension() {
|
|
||||||
exts = append(exts, ext{fd, v})
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
sort.Slice(exts, func(i, j int) bool {
|
|
||||||
return exts[i].desc.Number() < exts[j].desc.Number()
|
|
||||||
})
|
|
||||||
|
|
||||||
for _, ext := range exts {
|
|
||||||
// For message set, use the name of the message as the extension name.
|
|
||||||
name := string(ext.desc.FullName())
|
|
||||||
if isMessageSet(ext.desc.ContainingMessage()) {
|
|
||||||
name = strings.TrimSuffix(name, ".message_set_extension")
|
|
||||||
}
|
|
||||||
|
|
||||||
if !ext.desc.IsList() {
|
|
||||||
if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
lv := ext.val.List()
|
|
||||||
for i := 0; i < lv.Len(); i++ {
|
|
||||||
if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
|
|
||||||
fmt.Fprintf(w, "[%s]:", name)
|
|
||||||
if !w.compact {
|
|
||||||
w.WriteByte(' ')
|
|
||||||
}
|
|
||||||
if err := w.writeSingularValue(v, fd); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
w.WriteByte('\n')
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *textWriter) writeIndent() {
|
|
||||||
if !w.complete {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for i := 0; i < w.indent*2; i++ {
|
|
||||||
w.buf = append(w.buf, ' ')
|
|
||||||
}
|
|
||||||
w.complete = false
|
|
||||||
}
|
|
||||||
78
vendor/github.com/golang/protobuf/proto/wire.go
generated
vendored
78
vendor/github.com/golang/protobuf/proto/wire.go
generated
vendored
@@ -1,78 +0,0 @@
|
|||||||
// Copyright 2019 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
import (
|
|
||||||
protoV2 "google.golang.org/protobuf/proto"
|
|
||||||
"google.golang.org/protobuf/runtime/protoiface"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Size returns the size in bytes of the wire-format encoding of m.
|
|
||||||
func Size(m Message) int {
|
|
||||||
if m == nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
mi := MessageV2(m)
|
|
||||||
return protoV2.Size(mi)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Marshal returns the wire-format encoding of m.
|
|
||||||
func Marshal(m Message) ([]byte, error) {
|
|
||||||
b, err := marshalAppend(nil, m, false)
|
|
||||||
if b == nil {
|
|
||||||
b = zeroBytes
|
|
||||||
}
|
|
||||||
return b, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var zeroBytes = make([]byte, 0, 0)
|
|
||||||
|
|
||||||
func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) {
|
|
||||||
if m == nil {
|
|
||||||
return nil, ErrNil
|
|
||||||
}
|
|
||||||
mi := MessageV2(m)
|
|
||||||
nbuf, err := protoV2.MarshalOptions{
|
|
||||||
Deterministic: deterministic,
|
|
||||||
AllowPartial: true,
|
|
||||||
}.MarshalAppend(buf, mi)
|
|
||||||
if err != nil {
|
|
||||||
return buf, err
|
|
||||||
}
|
|
||||||
if len(buf) == len(nbuf) {
|
|
||||||
if !mi.ProtoReflect().IsValid() {
|
|
||||||
return buf, ErrNil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nbuf, checkRequiredNotSet(mi)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshal parses a wire-format message in b and places the decoded results in m.
|
|
||||||
//
|
|
||||||
// Unmarshal resets m before starting to unmarshal, so any existing data in m is always
|
|
||||||
// removed. Use UnmarshalMerge to preserve and append to existing data.
|
|
||||||
func Unmarshal(b []byte, m Message) error {
|
|
||||||
m.Reset()
|
|
||||||
return UnmarshalMerge(b, m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalMerge parses a wire-format message in b and places the decoded results in m.
|
|
||||||
func UnmarshalMerge(b []byte, m Message) error {
|
|
||||||
mi := MessageV2(m)
|
|
||||||
out, err := protoV2.UnmarshalOptions{
|
|
||||||
AllowPartial: true,
|
|
||||||
Merge: true,
|
|
||||||
}.UnmarshalState(protoiface.UnmarshalInput{
|
|
||||||
Buf: b,
|
|
||||||
Message: mi.ProtoReflect(),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if out.Flags&protoiface.UnmarshalInitialized > 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return checkRequiredNotSet(mi)
|
|
||||||
}
|
|
||||||
34
vendor/github.com/golang/protobuf/proto/wrappers.go
generated
vendored
34
vendor/github.com/golang/protobuf/proto/wrappers.go
generated
vendored
@@ -1,34 +0,0 @@
|
|||||||
// Copyright 2019 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package proto
|
|
||||||
|
|
||||||
// Bool stores v in a new bool value and returns a pointer to it.
|
|
||||||
func Bool(v bool) *bool { return &v }
|
|
||||||
|
|
||||||
// Int stores v in a new int32 value and returns a pointer to it.
|
|
||||||
//
|
|
||||||
// Deprecated: Use Int32 instead.
|
|
||||||
func Int(v int) *int32 { return Int32(int32(v)) }
|
|
||||||
|
|
||||||
// Int32 stores v in a new int32 value and returns a pointer to it.
|
|
||||||
func Int32(v int32) *int32 { return &v }
|
|
||||||
|
|
||||||
// Int64 stores v in a new int64 value and returns a pointer to it.
|
|
||||||
func Int64(v int64) *int64 { return &v }
|
|
||||||
|
|
||||||
// Uint32 stores v in a new uint32 value and returns a pointer to it.
|
|
||||||
func Uint32(v uint32) *uint32 { return &v }
|
|
||||||
|
|
||||||
// Uint64 stores v in a new uint64 value and returns a pointer to it.
|
|
||||||
func Uint64(v uint64) *uint64 { return &v }
|
|
||||||
|
|
||||||
// Float32 stores v in a new float32 value and returns a pointer to it.
|
|
||||||
func Float32(v float32) *float32 { return &v }
|
|
||||||
|
|
||||||
// Float64 stores v in a new float64 value and returns a pointer to it.
|
|
||||||
func Float64(v float64) *float64 { return &v }
|
|
||||||
|
|
||||||
// String stores v in a new string value and returns a pointer to it.
|
|
||||||
func String(v string) *string { return &v }
|
|
||||||
165
vendor/github.com/golang/protobuf/ptypes/any.go
generated
vendored
165
vendor/github.com/golang/protobuf/ptypes/any.go
generated
vendored
@@ -1,165 +0,0 @@
|
|||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package ptypes
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
"google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
"google.golang.org/protobuf/reflect/protoregistry"
|
|
||||||
|
|
||||||
anypb "github.com/golang/protobuf/ptypes/any"
|
|
||||||
)
|
|
||||||
|
|
||||||
const urlPrefix = "type.googleapis.com/"
|
|
||||||
|
|
||||||
// AnyMessageName returns the message name contained in an anypb.Any message.
|
|
||||||
// Most type assertions should use the Is function instead.
|
|
||||||
func AnyMessageName(any *anypb.Any) (string, error) {
|
|
||||||
name, err := anyMessageName(any)
|
|
||||||
return string(name), err
|
|
||||||
}
|
|
||||||
func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) {
|
|
||||||
if any == nil {
|
|
||||||
return "", fmt.Errorf("message is nil")
|
|
||||||
}
|
|
||||||
name := protoreflect.FullName(any.TypeUrl)
|
|
||||||
if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 {
|
|
||||||
name = name[i+len("/"):]
|
|
||||||
}
|
|
||||||
if !name.IsValid() {
|
|
||||||
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
|
|
||||||
}
|
|
||||||
return name, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalAny marshals the given message m into an anypb.Any message.
|
|
||||||
func MarshalAny(m proto.Message) (*anypb.Any, error) {
|
|
||||||
switch dm := m.(type) {
|
|
||||||
case DynamicAny:
|
|
||||||
m = dm.Message
|
|
||||||
case *DynamicAny:
|
|
||||||
if dm == nil {
|
|
||||||
return nil, proto.ErrNil
|
|
||||||
}
|
|
||||||
m = dm.Message
|
|
||||||
}
|
|
||||||
b, err := proto.Marshal(m)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Empty returns a new message of the type specified in an anypb.Any message.
|
|
||||||
// It returns protoregistry.NotFound if the corresponding message type could not
|
|
||||||
// be resolved in the global registry.
|
|
||||||
func Empty(any *anypb.Any) (proto.Message, error) {
|
|
||||||
name, err := anyMessageName(any)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
mt, err := protoregistry.GlobalTypes.FindMessageByName(name)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return proto.MessageV1(mt.New().Interface()), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message
|
|
||||||
// into the provided message m. It returns an error if the target message
|
|
||||||
// does not match the type in the Any message or if an unmarshal error occurs.
|
|
||||||
//
|
|
||||||
// The target message m may be a *DynamicAny message. If the underlying message
|
|
||||||
// type could not be resolved, then this returns protoregistry.NotFound.
|
|
||||||
func UnmarshalAny(any *anypb.Any, m proto.Message) error {
|
|
||||||
if dm, ok := m.(*DynamicAny); ok {
|
|
||||||
if dm.Message == nil {
|
|
||||||
var err error
|
|
||||||
dm.Message, err = Empty(any)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
m = dm.Message
|
|
||||||
}
|
|
||||||
|
|
||||||
anyName, err := AnyMessageName(any)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
msgName := proto.MessageName(m)
|
|
||||||
if anyName != msgName {
|
|
||||||
return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName)
|
|
||||||
}
|
|
||||||
return proto.Unmarshal(any.Value, m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Is reports whether the Any message contains a message of the specified type.
|
|
||||||
func Is(any *anypb.Any, m proto.Message) bool {
|
|
||||||
if any == nil || m == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
name := proto.MessageName(m)
|
|
||||||
if !strings.HasSuffix(any.TypeUrl, name) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/'
|
|
||||||
}
|
|
||||||
|
|
||||||
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
|
|
||||||
// allocate a proto.Message for the type specified in an anypb.Any message.
|
|
||||||
// The allocated message is stored in the embedded proto.Message.
|
|
||||||
//
|
|
||||||
// Example:
|
|
||||||
// var x ptypes.DynamicAny
|
|
||||||
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
|
|
||||||
// fmt.Printf("unmarshaled message: %v", x.Message)
|
|
||||||
type DynamicAny struct{ proto.Message }
|
|
||||||
|
|
||||||
func (m DynamicAny) String() string {
|
|
||||||
if m.Message == nil {
|
|
||||||
return "<nil>"
|
|
||||||
}
|
|
||||||
return m.Message.String()
|
|
||||||
}
|
|
||||||
func (m DynamicAny) Reset() {
|
|
||||||
if m.Message == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
m.Message.Reset()
|
|
||||||
}
|
|
||||||
func (m DynamicAny) ProtoMessage() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
func (m DynamicAny) ProtoReflect() protoreflect.Message {
|
|
||||||
if m.Message == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return dynamicAny{proto.MessageReflect(m.Message)}
|
|
||||||
}
|
|
||||||
|
|
||||||
type dynamicAny struct{ protoreflect.Message }
|
|
||||||
|
|
||||||
func (m dynamicAny) Type() protoreflect.MessageType {
|
|
||||||
return dynamicAnyType{m.Message.Type()}
|
|
||||||
}
|
|
||||||
func (m dynamicAny) New() protoreflect.Message {
|
|
||||||
return dynamicAnyType{m.Message.Type()}.New()
|
|
||||||
}
|
|
||||||
func (m dynamicAny) Interface() protoreflect.ProtoMessage {
|
|
||||||
return DynamicAny{proto.MessageV1(m.Message.Interface())}
|
|
||||||
}
|
|
||||||
|
|
||||||
type dynamicAnyType struct{ protoreflect.MessageType }
|
|
||||||
|
|
||||||
func (t dynamicAnyType) New() protoreflect.Message {
|
|
||||||
return dynamicAny{t.MessageType.New()}
|
|
||||||
}
|
|
||||||
func (t dynamicAnyType) Zero() protoreflect.Message {
|
|
||||||
return dynamicAny{t.MessageType.Zero()}
|
|
||||||
}
|
|
||||||
62
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
generated
vendored
62
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
generated
vendored
@@ -1,62 +0,0 @@
|
|||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|
||||||
// source: github.com/golang/protobuf/ptypes/any/any.proto
|
|
||||||
|
|
||||||
package any
|
|
||||||
|
|
||||||
import (
|
|
||||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
|
||||||
anypb "google.golang.org/protobuf/types/known/anypb"
|
|
||||||
reflect "reflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Symbols defined in public import of google/protobuf/any.proto.
|
|
||||||
|
|
||||||
type Any = anypb.Any
|
|
||||||
|
|
||||||
var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor
|
|
||||||
|
|
||||||
var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{
|
|
||||||
0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
|
||||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
|
||||||
0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
|
||||||
0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
|
|
||||||
0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29,
|
|
||||||
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e,
|
|
||||||
0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65,
|
|
||||||
0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f,
|
|
||||||
0x74, 0x6f, 0x33,
|
|
||||||
}
|
|
||||||
|
|
||||||
var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{}
|
|
||||||
var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{
|
|
||||||
0, // [0:0] is the sub-list for method output_type
|
|
||||||
0, // [0:0] is the sub-list for method input_type
|
|
||||||
0, // [0:0] is the sub-list for extension type_name
|
|
||||||
0, // [0:0] is the sub-list for extension extendee
|
|
||||||
0, // [0:0] is the sub-list for field type_name
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() }
|
|
||||||
func file_github_com_golang_protobuf_ptypes_any_any_proto_init() {
|
|
||||||
if File_github_com_golang_protobuf_ptypes_any_any_proto != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
type x struct{}
|
|
||||||
out := protoimpl.TypeBuilder{
|
|
||||||
File: protoimpl.DescBuilder{
|
|
||||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
|
||||||
RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc,
|
|
||||||
NumEnums: 0,
|
|
||||||
NumMessages: 0,
|
|
||||||
NumExtensions: 0,
|
|
||||||
NumServices: 0,
|
|
||||||
},
|
|
||||||
GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes,
|
|
||||||
DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs,
|
|
||||||
}.Build()
|
|
||||||
File_github_com_golang_protobuf_ptypes_any_any_proto = out.File
|
|
||||||
file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil
|
|
||||||
file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil
|
|
||||||
file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil
|
|
||||||
}
|
|
||||||
6
vendor/github.com/golang/protobuf/ptypes/doc.go
generated
vendored
6
vendor/github.com/golang/protobuf/ptypes/doc.go
generated
vendored
@@ -1,6 +0,0 @@
|
|||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package ptypes provides functionality for interacting with well-known types.
|
|
||||||
package ptypes
|
|
||||||
72
vendor/github.com/golang/protobuf/ptypes/duration.go
generated
vendored
72
vendor/github.com/golang/protobuf/ptypes/duration.go
generated
vendored
@@ -1,72 +0,0 @@
|
|||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package ptypes
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
durationpb "github.com/golang/protobuf/ptypes/duration"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Range of google.protobuf.Duration as specified in duration.proto.
|
|
||||||
// This is about 10,000 years in seconds.
|
|
||||||
const (
|
|
||||||
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
|
|
||||||
minSeconds = -maxSeconds
|
|
||||||
)
|
|
||||||
|
|
||||||
// Duration converts a durationpb.Duration to a time.Duration.
|
|
||||||
// Duration returns an error if dur is invalid or overflows a time.Duration.
|
|
||||||
func Duration(dur *durationpb.Duration) (time.Duration, error) {
|
|
||||||
if err := validateDuration(dur); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
d := time.Duration(dur.Seconds) * time.Second
|
|
||||||
if int64(d/time.Second) != dur.Seconds {
|
|
||||||
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
|
|
||||||
}
|
|
||||||
if dur.Nanos != 0 {
|
|
||||||
d += time.Duration(dur.Nanos) * time.Nanosecond
|
|
||||||
if (d < 0) != (dur.Nanos < 0) {
|
|
||||||
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return d, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DurationProto converts a time.Duration to a durationpb.Duration.
|
|
||||||
func DurationProto(d time.Duration) *durationpb.Duration {
|
|
||||||
nanos := d.Nanoseconds()
|
|
||||||
secs := nanos / 1e9
|
|
||||||
nanos -= secs * 1e9
|
|
||||||
return &durationpb.Duration{
|
|
||||||
Seconds: int64(secs),
|
|
||||||
Nanos: int32(nanos),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateDuration determines whether the durationpb.Duration is valid
|
|
||||||
// according to the definition in google/protobuf/duration.proto.
|
|
||||||
// A valid durpb.Duration may still be too large to fit into a time.Duration
|
|
||||||
// Note that the range of durationpb.Duration is about 10,000 years,
|
|
||||||
// while the range of time.Duration is about 290 years.
|
|
||||||
func validateDuration(dur *durationpb.Duration) error {
|
|
||||||
if dur == nil {
|
|
||||||
return errors.New("duration: nil Duration")
|
|
||||||
}
|
|
||||||
if dur.Seconds < minSeconds || dur.Seconds > maxSeconds {
|
|
||||||
return fmt.Errorf("duration: %v: seconds out of range", dur)
|
|
||||||
}
|
|
||||||
if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 {
|
|
||||||
return fmt.Errorf("duration: %v: nanos out of range", dur)
|
|
||||||
}
|
|
||||||
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
|
|
||||||
if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) {
|
|
||||||
return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
63
vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
generated
vendored
63
vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
generated
vendored
@@ -1,63 +0,0 @@
|
|||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|
||||||
// source: github.com/golang/protobuf/ptypes/duration/duration.proto
|
|
||||||
|
|
||||||
package duration
|
|
||||||
|
|
||||||
import (
|
|
||||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
|
||||||
durationpb "google.golang.org/protobuf/types/known/durationpb"
|
|
||||||
reflect "reflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Symbols defined in public import of google/protobuf/duration.proto.
|
|
||||||
|
|
||||||
type Duration = durationpb.Duration
|
|
||||||
|
|
||||||
var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor
|
|
||||||
|
|
||||||
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{
|
|
||||||
0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
|
||||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
|
||||||
0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72,
|
|
||||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
|
|
||||||
0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
|
|
||||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
|
|
||||||
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
|
|
||||||
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
|
|
||||||
0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69,
|
|
||||||
0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
|
||||||
}
|
|
||||||
|
|
||||||
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{}
|
|
||||||
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{
|
|
||||||
0, // [0:0] is the sub-list for method output_type
|
|
||||||
0, // [0:0] is the sub-list for method input_type
|
|
||||||
0, // [0:0] is the sub-list for extension type_name
|
|
||||||
0, // [0:0] is the sub-list for extension extendee
|
|
||||||
0, // [0:0] is the sub-list for field type_name
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() }
|
|
||||||
func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() {
|
|
||||||
if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
type x struct{}
|
|
||||||
out := protoimpl.TypeBuilder{
|
|
||||||
File: protoimpl.DescBuilder{
|
|
||||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
|
||||||
RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc,
|
|
||||||
NumEnums: 0,
|
|
||||||
NumMessages: 0,
|
|
||||||
NumExtensions: 0,
|
|
||||||
NumServices: 0,
|
|
||||||
},
|
|
||||||
GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes,
|
|
||||||
DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs,
|
|
||||||
}.Build()
|
|
||||||
File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File
|
|
||||||
file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil
|
|
||||||
file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil
|
|
||||||
file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil
|
|
||||||
}
|
|
||||||
103
vendor/github.com/golang/protobuf/ptypes/timestamp.go
generated
vendored
103
vendor/github.com/golang/protobuf/ptypes/timestamp.go
generated
vendored
@@ -1,103 +0,0 @@
|
|||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package ptypes
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
timestamppb "github.com/golang/protobuf/ptypes/timestamp"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Range of google.protobuf.Duration as specified in timestamp.proto.
|
|
||||||
const (
|
|
||||||
// Seconds field of the earliest valid Timestamp.
|
|
||||||
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
|
||||||
minValidSeconds = -62135596800
|
|
||||||
// Seconds field just after the latest valid Timestamp.
|
|
||||||
// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
|
||||||
maxValidSeconds = 253402300800
|
|
||||||
)
|
|
||||||
|
|
||||||
// Timestamp converts a timestamppb.Timestamp to a time.Time.
|
|
||||||
// It returns an error if the argument is invalid.
|
|
||||||
//
|
|
||||||
// Unlike most Go functions, if Timestamp returns an error, the first return
|
|
||||||
// value is not the zero time.Time. Instead, it is the value obtained from the
|
|
||||||
// time.Unix function when passed the contents of the Timestamp, in the UTC
|
|
||||||
// locale. This may or may not be a meaningful time; many invalid Timestamps
|
|
||||||
// do map to valid time.Times.
|
|
||||||
//
|
|
||||||
// A nil Timestamp returns an error. The first return value in that case is
|
|
||||||
// undefined.
|
|
||||||
func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
|
|
||||||
// Don't return the zero value on error, because corresponds to a valid
|
|
||||||
// timestamp. Instead return whatever time.Unix gives us.
|
|
||||||
var t time.Time
|
|
||||||
if ts == nil {
|
|
||||||
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
|
|
||||||
} else {
|
|
||||||
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
|
|
||||||
}
|
|
||||||
return t, validateTimestamp(ts)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TimestampNow returns a google.protobuf.Timestamp for the current time.
|
|
||||||
func TimestampNow() *timestamppb.Timestamp {
|
|
||||||
ts, err := TimestampProto(time.Now())
|
|
||||||
if err != nil {
|
|
||||||
panic("ptypes: time.Now() out of Timestamp range")
|
|
||||||
}
|
|
||||||
return ts
|
|
||||||
}
|
|
||||||
|
|
||||||
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
|
|
||||||
// It returns an error if the resulting Timestamp is invalid.
|
|
||||||
func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
|
|
||||||
ts := ×tamppb.Timestamp{
|
|
||||||
Seconds: t.Unix(),
|
|
||||||
Nanos: int32(t.Nanosecond()),
|
|
||||||
}
|
|
||||||
if err := validateTimestamp(ts); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return ts, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TimestampString returns the RFC 3339 string for valid Timestamps.
|
|
||||||
// For invalid Timestamps, it returns an error message in parentheses.
|
|
||||||
func TimestampString(ts *timestamppb.Timestamp) string {
|
|
||||||
t, err := Timestamp(ts)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Sprintf("(%v)", err)
|
|
||||||
}
|
|
||||||
return t.Format(time.RFC3339Nano)
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateTimestamp determines whether a Timestamp is valid.
|
|
||||||
// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01)
|
|
||||||
// and has a Nanos field in the range [0, 1e9).
|
|
||||||
//
|
|
||||||
// If the Timestamp is valid, validateTimestamp returns nil.
|
|
||||||
// Otherwise, it returns an error that describes the problem.
|
|
||||||
//
|
|
||||||
// Every valid Timestamp can be represented by a time.Time,
|
|
||||||
// but the converse is not true.
|
|
||||||
func validateTimestamp(ts *timestamppb.Timestamp) error {
|
|
||||||
if ts == nil {
|
|
||||||
return errors.New("timestamp: nil Timestamp")
|
|
||||||
}
|
|
||||||
if ts.Seconds < minValidSeconds {
|
|
||||||
return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
|
|
||||||
}
|
|
||||||
if ts.Seconds >= maxValidSeconds {
|
|
||||||
return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
|
|
||||||
}
|
|
||||||
if ts.Nanos < 0 || ts.Nanos >= 1e9 {
|
|
||||||
return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
64
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
generated
vendored
64
vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
generated
vendored
@@ -1,64 +0,0 @@
|
|||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|
||||||
// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
|
|
||||||
|
|
||||||
package timestamp
|
|
||||||
|
|
||||||
import (
|
|
||||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
|
||||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
|
||||||
reflect "reflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Symbols defined in public import of google/protobuf/timestamp.proto.
|
|
||||||
|
|
||||||
type Timestamp = timestamppb.Timestamp
|
|
||||||
|
|
||||||
var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor
|
|
||||||
|
|
||||||
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{
|
|
||||||
0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
|
||||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
|
||||||
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69,
|
|
||||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
|
|
||||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
|
|
||||||
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37,
|
|
||||||
0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
|
||||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
|
||||||
0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69,
|
|
||||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
|
||||||
0x33,
|
|
||||||
}
|
|
||||||
|
|
||||||
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{}
|
|
||||||
var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{
|
|
||||||
0, // [0:0] is the sub-list for method output_type
|
|
||||||
0, // [0:0] is the sub-list for method input_type
|
|
||||||
0, // [0:0] is the sub-list for extension type_name
|
|
||||||
0, // [0:0] is the sub-list for extension extendee
|
|
||||||
0, // [0:0] is the sub-list for field type_name
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() }
|
|
||||||
func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() {
|
|
||||||
if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
type x struct{}
|
|
||||||
out := protoimpl.TypeBuilder{
|
|
||||||
File: protoimpl.DescBuilder{
|
|
||||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
|
||||||
RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc,
|
|
||||||
NumEnums: 0,
|
|
||||||
NumMessages: 0,
|
|
||||||
NumExtensions: 0,
|
|
||||||
NumServices: 0,
|
|
||||||
},
|
|
||||||
GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes,
|
|
||||||
DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs,
|
|
||||||
}.Build()
|
|
||||||
File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File
|
|
||||||
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil
|
|
||||||
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil
|
|
||||||
file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil
|
|
||||||
}
|
|
||||||
201
vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
generated
vendored
201
vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
generated
vendored
@@ -1,201 +0,0 @@
|
|||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright {yyyy} {name of copyright owner}
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
1
vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE
generated
vendored
1
vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE
generated
vendored
@@ -1 +0,0 @@
|
|||||||
Copyright 2012 Matt T. Proud (matt.proud@gmail.com)
|
|
||||||
1
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore
generated
vendored
1
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore
generated
vendored
@@ -1 +0,0 @@
|
|||||||
cover.dat
|
|
||||||
7
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
generated
vendored
7
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
generated
vendored
@@ -1,7 +0,0 @@
|
|||||||
all:
|
|
||||||
|
|
||||||
cover:
|
|
||||||
go test -cover -v -coverprofile=cover.dat ./...
|
|
||||||
go tool cover -func cover.dat
|
|
||||||
|
|
||||||
.PHONY: cover
|
|
||||||
75
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
generated
vendored
75
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
generated
vendored
@@ -1,75 +0,0 @@
|
|||||||
// Copyright 2013 Matt T. Proud
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package pbutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
)
|
|
||||||
|
|
||||||
var errInvalidVarint = errors.New("invalid varint32 encountered")
|
|
||||||
|
|
||||||
// ReadDelimited decodes a message from the provided length-delimited stream,
|
|
||||||
// where the length is encoded as 32-bit varint prefix to the message body.
|
|
||||||
// It returns the total number of bytes read and any applicable error. This is
|
|
||||||
// roughly equivalent to the companion Java API's
|
|
||||||
// MessageLite#parseDelimitedFrom. As per the reader contract, this function
|
|
||||||
// calls r.Read repeatedly as required until exactly one message including its
|
|
||||||
// prefix is read and decoded (or an error has occurred). The function never
|
|
||||||
// reads more bytes from the stream than required. The function never returns
|
|
||||||
// an error if a message has been read and decoded correctly, even if the end
|
|
||||||
// of the stream has been reached in doing so. In that case, any subsequent
|
|
||||||
// calls return (0, io.EOF).
|
|
||||||
func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
|
|
||||||
// Per AbstractParser#parsePartialDelimitedFrom with
|
|
||||||
// CodedInputStream#readRawVarint32.
|
|
||||||
var headerBuf [binary.MaxVarintLen32]byte
|
|
||||||
var bytesRead, varIntBytes int
|
|
||||||
var messageLength uint64
|
|
||||||
for varIntBytes == 0 { // i.e. no varint has been decoded yet.
|
|
||||||
if bytesRead >= len(headerBuf) {
|
|
||||||
return bytesRead, errInvalidVarint
|
|
||||||
}
|
|
||||||
// We have to read byte by byte here to avoid reading more bytes
|
|
||||||
// than required. Each read byte is appended to what we have
|
|
||||||
// read before.
|
|
||||||
newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])
|
|
||||||
if newBytesRead == 0 {
|
|
||||||
if err != nil {
|
|
||||||
return bytesRead, err
|
|
||||||
}
|
|
||||||
// A Reader should not return (0, nil), but if it does,
|
|
||||||
// it should be treated as no-op (according to the
|
|
||||||
// Reader contract). So let's go on...
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
bytesRead += newBytesRead
|
|
||||||
// Now present everything read so far to the varint decoder and
|
|
||||||
// see if a varint can be decoded already.
|
|
||||||
messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead])
|
|
||||||
}
|
|
||||||
|
|
||||||
messageBuf := make([]byte, messageLength)
|
|
||||||
newBytesRead, err := io.ReadFull(r, messageBuf)
|
|
||||||
bytesRead += newBytesRead
|
|
||||||
if err != nil {
|
|
||||||
return bytesRead, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return bytesRead, proto.Unmarshal(messageBuf, m)
|
|
||||||
}
|
|
||||||
46
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
generated
vendored
46
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
generated
vendored
@@ -1,46 +0,0 @@
|
|||||||
// Copyright 2013 Matt T. Proud
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package pbutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
)
|
|
||||||
|
|
||||||
// WriteDelimited encodes and dumps a message to the provided writer prefixed
|
|
||||||
// with a 32-bit varint indicating the length of the encoded message, producing
|
|
||||||
// a length-delimited record stream, which can be used to chain together
|
|
||||||
// encoded messages of the same type together in a file. It returns the total
|
|
||||||
// number of bytes written and any applicable error. This is roughly
|
|
||||||
// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
|
|
||||||
func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
|
|
||||||
buffer, err := proto.Marshal(m)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var buf [binary.MaxVarintLen32]byte
|
|
||||||
encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer)))
|
|
||||||
|
|
||||||
sync, err := w.Write(buf[:encodedLength])
|
|
||||||
if err != nil {
|
|
||||||
return sync, err
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err = w.Write(buffer)
|
|
||||||
return n + sync, err
|
|
||||||
}
|
|
||||||
2
vendor/github.com/prometheus/client_golang/prometheus/README.md
generated
vendored
2
vendor/github.com/prometheus/client_golang/prometheus/README.md
generated
vendored
@@ -1 +1 @@
|
|||||||
See [](https://godoc.org/github.com/prometheus/client_golang/prometheus).
|
See [](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus).
|
||||||
|
|||||||
38
vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go
generated
vendored
Normal file
38
vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
// Copyright 2021 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import "runtime/debug"
|
||||||
|
|
||||||
|
// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector.
|
||||||
|
// See there for documentation.
|
||||||
|
//
|
||||||
|
// Deprecated: Use collectors.NewBuildInfoCollector instead.
|
||||||
|
func NewBuildInfoCollector() Collector {
|
||||||
|
path, version, sum := "unknown", "unknown", "unknown"
|
||||||
|
if bi, ok := debug.ReadBuildInfo(); ok {
|
||||||
|
path = bi.Main.Path
|
||||||
|
version = bi.Main.Version
|
||||||
|
sum = bi.Main.Sum
|
||||||
|
}
|
||||||
|
c := &selfCollector{MustNewConstMetric(
|
||||||
|
NewDesc(
|
||||||
|
"go_build_info",
|
||||||
|
"Build information about the main Go module.",
|
||||||
|
nil, Labels{"path": path, "version": version, "checksum": sum},
|
||||||
|
),
|
||||||
|
GaugeValue, 1)}
|
||||||
|
c.init(c.self)
|
||||||
|
return c
|
||||||
|
}
|
||||||
8
vendor/github.com/prometheus/client_golang/prometheus/collector.go
generated
vendored
8
vendor/github.com/prometheus/client_golang/prometheus/collector.go
generated
vendored
@@ -118,3 +118,11 @@ func (c *selfCollector) Describe(ch chan<- *Desc) {
|
|||||||
func (c *selfCollector) Collect(ch chan<- Metric) {
|
func (c *selfCollector) Collect(ch chan<- Metric) {
|
||||||
ch <- c.self
|
ch <- c.self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// collectorMetric is a metric that is also a collector.
|
||||||
|
// Because of selfCollector, most (if not all) Metrics in
|
||||||
|
// this package are also collectors.
|
||||||
|
type collectorMetric interface {
|
||||||
|
Metric
|
||||||
|
Collector
|
||||||
|
}
|
||||||
|
|||||||
61
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
61
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
@@ -20,6 +20,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
"google.golang.org/protobuf/types/known/timestamppb"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Counter is a Metric that represents a single numerical value that only ever
|
// Counter is a Metric that represents a single numerical value that only ever
|
||||||
@@ -51,7 +52,7 @@ type Counter interface {
|
|||||||
// will lead to a valid (label-less) exemplar. But if Labels is nil, the current
|
// will lead to a valid (label-less) exemplar. But if Labels is nil, the current
|
||||||
// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any
|
// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any
|
||||||
// of the provided labels are invalid, or if the provided labels contain more
|
// of the provided labels are invalid, or if the provided labels contain more
|
||||||
// than 64 runes in total.
|
// than 128 runes in total.
|
||||||
type ExemplarAdder interface {
|
type ExemplarAdder interface {
|
||||||
AddWithExemplar(value float64, exemplar Labels)
|
AddWithExemplar(value float64, exemplar Labels)
|
||||||
}
|
}
|
||||||
@@ -59,6 +60,18 @@ type ExemplarAdder interface {
|
|||||||
// CounterOpts is an alias for Opts. See there for doc comments.
|
// CounterOpts is an alias for Opts. See there for doc comments.
|
||||||
type CounterOpts Opts
|
type CounterOpts Opts
|
||||||
|
|
||||||
|
// CounterVecOpts bundles the options to create a CounterVec metric.
|
||||||
|
// It is mandatory to set CounterOpts, see there for mandatory fields. VariableLabels
|
||||||
|
// is optional and can safely be left to its default value.
|
||||||
|
type CounterVecOpts struct {
|
||||||
|
CounterOpts
|
||||||
|
|
||||||
|
// VariableLabels are used to partition the metric vector by the given set
|
||||||
|
// of labels. Each label value will be constrained with the optional Constraint
|
||||||
|
// function, if provided.
|
||||||
|
VariableLabels ConstrainableLabels
|
||||||
|
}
|
||||||
|
|
||||||
// NewCounter creates a new Counter based on the provided CounterOpts.
|
// NewCounter creates a new Counter based on the provided CounterOpts.
|
||||||
//
|
//
|
||||||
// The returned implementation also implements ExemplarAdder. It is safe to
|
// The returned implementation also implements ExemplarAdder. It is safe to
|
||||||
@@ -78,8 +91,12 @@ func NewCounter(opts CounterOpts) Counter {
|
|||||||
nil,
|
nil,
|
||||||
opts.ConstLabels,
|
opts.ConstLabels,
|
||||||
)
|
)
|
||||||
result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now}
|
if opts.now == nil {
|
||||||
|
opts.now = time.Now
|
||||||
|
}
|
||||||
|
result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: opts.now}
|
||||||
result.init(result) // Init self-collection.
|
result.init(result) // Init self-collection.
|
||||||
|
result.createdTs = timestamppb.New(opts.now())
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -94,10 +111,12 @@ type counter struct {
|
|||||||
selfCollector
|
selfCollector
|
||||||
desc *Desc
|
desc *Desc
|
||||||
|
|
||||||
|
createdTs *timestamppb.Timestamp
|
||||||
labelPairs []*dto.LabelPair
|
labelPairs []*dto.LabelPair
|
||||||
exemplar atomic.Value // Containing nil or a *dto.Exemplar.
|
exemplar atomic.Value // Containing nil or a *dto.Exemplar.
|
||||||
|
|
||||||
now func() time.Time // To mock out time.Now() for testing.
|
// now is for testing purposes, by default it's time.Now.
|
||||||
|
now func() time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *counter) Desc() *Desc {
|
func (c *counter) Desc() *Desc {
|
||||||
@@ -133,17 +152,21 @@ func (c *counter) Inc() {
|
|||||||
atomic.AddUint64(&c.valInt, 1)
|
atomic.AddUint64(&c.valInt, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *counter) Write(out *dto.Metric) error {
|
func (c *counter) get() float64 {
|
||||||
fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
|
fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
|
||||||
ival := atomic.LoadUint64(&c.valInt)
|
ival := atomic.LoadUint64(&c.valInt)
|
||||||
val := fval + float64(ival)
|
return fval + float64(ival)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *counter) Write(out *dto.Metric) error {
|
||||||
|
// Read the Exemplar first and the value second. This is to avoid a race condition
|
||||||
|
// where users see an exemplar for a not-yet-existing observation.
|
||||||
var exemplar *dto.Exemplar
|
var exemplar *dto.Exemplar
|
||||||
if e := c.exemplar.Load(); e != nil {
|
if e := c.exemplar.Load(); e != nil {
|
||||||
exemplar = e.(*dto.Exemplar)
|
exemplar = e.(*dto.Exemplar)
|
||||||
}
|
}
|
||||||
|
val := c.get()
|
||||||
return populateMetric(CounterValue, val, c.labelPairs, exemplar, out)
|
return populateMetric(CounterValue, val, c.labelPairs, exemplar, out, c.createdTs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *counter) updateExemplar(v float64, l Labels) {
|
func (c *counter) updateExemplar(v float64, l Labels) {
|
||||||
@@ -169,19 +192,31 @@ type CounterVec struct {
|
|||||||
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
|
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
|
||||||
// partitioned by the given label names.
|
// partitioned by the given label names.
|
||||||
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
|
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
|
||||||
desc := NewDesc(
|
return V2.NewCounterVec(CounterVecOpts{
|
||||||
|
CounterOpts: opts,
|
||||||
|
VariableLabels: UnconstrainedLabels(labelNames),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCounterVec creates a new CounterVec based on the provided CounterVecOpts.
|
||||||
|
func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec {
|
||||||
|
desc := V2.NewDesc(
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||||
opts.Help,
|
opts.Help,
|
||||||
labelNames,
|
opts.VariableLabels,
|
||||||
opts.ConstLabels,
|
opts.ConstLabels,
|
||||||
)
|
)
|
||||||
|
if opts.now == nil {
|
||||||
|
opts.now = time.Now
|
||||||
|
}
|
||||||
return &CounterVec{
|
return &CounterVec{
|
||||||
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
|
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
|
||||||
if len(lvs) != len(desc.variableLabels) {
|
if len(lvs) != len(desc.variableLabels.names) {
|
||||||
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
|
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs))
|
||||||
}
|
}
|
||||||
result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now}
|
result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: opts.now}
|
||||||
result.init(result) // Init self-collection.
|
result.init(result) // Init self-collection.
|
||||||
|
result.createdTs = timestamppb.New(opts.now())
|
||||||
return result
|
return result
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
@@ -241,6 +276,7 @@ func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
|
|||||||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
||||||
// GetMetricWithLabelValues would have returned an error. Not returning an
|
// GetMetricWithLabelValues would have returned an error. Not returning an
|
||||||
// error allows shortcuts like
|
// error allows shortcuts like
|
||||||
|
//
|
||||||
// myVec.WithLabelValues("404", "GET").Add(42)
|
// myVec.WithLabelValues("404", "GET").Add(42)
|
||||||
func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
|
func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
|
||||||
c, err := v.GetMetricWithLabelValues(lvs...)
|
c, err := v.GetMetricWithLabelValues(lvs...)
|
||||||
@@ -252,6 +288,7 @@ func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
|
|||||||
|
|
||||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
||||||
// returned an error. Not returning an error allows shortcuts like
|
// returned an error. Not returning an error allows shortcuts like
|
||||||
|
//
|
||||||
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
|
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
|
||||||
func (v *CounterVec) With(labels Labels) Counter {
|
func (v *CounterVec) With(labels Labels) Counter {
|
||||||
c, err := v.GetMetricWith(labels)
|
c, err := v.GetMetricWith(labels)
|
||||||
|
|||||||
59
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
59
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
@@ -14,17 +14,16 @@
|
|||||||
package prometheus
|
package prometheus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/cespare/xxhash/v2"
|
"github.com/cespare/xxhash/v2"
|
||||||
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
"github.com/prometheus/common/model"
|
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus/internal"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Desc is the descriptor used by every Prometheus Metric. It is essentially
|
// Desc is the descriptor used by every Prometheus Metric. It is essentially
|
||||||
@@ -51,9 +50,9 @@ type Desc struct {
|
|||||||
// constLabelPairs contains precalculated DTO label pairs based on
|
// constLabelPairs contains precalculated DTO label pairs based on
|
||||||
// the constant labels.
|
// the constant labels.
|
||||||
constLabelPairs []*dto.LabelPair
|
constLabelPairs []*dto.LabelPair
|
||||||
// variableLabels contains names of labels for which the metric
|
// variableLabels contains names of labels and normalization function for
|
||||||
// maintains variable values.
|
// which the metric maintains variable values.
|
||||||
variableLabels []string
|
variableLabels *compiledLabels
|
||||||
// id is a hash of the values of the ConstLabels and fqName. This
|
// id is a hash of the values of the ConstLabels and fqName. This
|
||||||
// must be unique among all registered descriptors and can therefore be
|
// must be unique among all registered descriptors and can therefore be
|
||||||
// used as an identifier of the descriptor.
|
// used as an identifier of the descriptor.
|
||||||
@@ -77,10 +76,24 @@ type Desc struct {
|
|||||||
// For constLabels, the label values are constant. Therefore, they are fully
|
// For constLabels, the label values are constant. Therefore, they are fully
|
||||||
// specified in the Desc. See the Collector example for a usage pattern.
|
// specified in the Desc. See the Collector example for a usage pattern.
|
||||||
func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
|
func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
|
||||||
|
return V2.NewDesc(fqName, help, UnconstrainedLabels(variableLabels), constLabels)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
|
||||||
|
// and will be reported on registration time. variableLabels and constLabels can
|
||||||
|
// be nil if no such labels should be set. fqName must not be empty.
|
||||||
|
//
|
||||||
|
// variableLabels only contain the label names and normalization functions. Their
|
||||||
|
// label values are variable and therefore not part of the Desc. (They are managed
|
||||||
|
// within the Metric.)
|
||||||
|
//
|
||||||
|
// For constLabels, the label values are constant. Therefore, they are fully
|
||||||
|
// specified in the Desc. See the Collector example for a usage pattern.
|
||||||
|
func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, constLabels Labels) *Desc {
|
||||||
d := &Desc{
|
d := &Desc{
|
||||||
fqName: fqName,
|
fqName: fqName,
|
||||||
help: help,
|
help: help,
|
||||||
variableLabels: variableLabels,
|
variableLabels: variableLabels.compile(),
|
||||||
}
|
}
|
||||||
if !model.IsValidMetricName(model.LabelValue(fqName)) {
|
if !model.IsValidMetricName(model.LabelValue(fqName)) {
|
||||||
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
|
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
|
||||||
@@ -90,7 +103,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
|
|||||||
// their sorted label names) plus the fqName (at position 0).
|
// their sorted label names) plus the fqName (at position 0).
|
||||||
labelValues := make([]string, 1, len(constLabels)+1)
|
labelValues := make([]string, 1, len(constLabels)+1)
|
||||||
labelValues[0] = fqName
|
labelValues[0] = fqName
|
||||||
labelNames := make([]string, 0, len(constLabels)+len(variableLabels))
|
labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels.names))
|
||||||
labelNameSet := map[string]struct{}{}
|
labelNameSet := map[string]struct{}{}
|
||||||
// First add only the const label names and sort them...
|
// First add only the const label names and sort them...
|
||||||
for labelName := range constLabels {
|
for labelName := range constLabels {
|
||||||
@@ -115,16 +128,16 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
|
|||||||
// Now add the variable label names, but prefix them with something that
|
// Now add the variable label names, but prefix them with something that
|
||||||
// cannot be in a regular label name. That prevents matching the label
|
// cannot be in a regular label name. That prevents matching the label
|
||||||
// dimension with a different mix between preset and variable labels.
|
// dimension with a different mix between preset and variable labels.
|
||||||
for _, labelName := range variableLabels {
|
for _, label := range d.variableLabels.names {
|
||||||
if !checkLabelName(labelName) {
|
if !checkLabelName(label) {
|
||||||
d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
|
d.err = fmt.Errorf("%q is not a valid label name for metric %q", label, fqName)
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
labelNames = append(labelNames, "$"+labelName)
|
labelNames = append(labelNames, "$"+label)
|
||||||
labelNameSet[labelName] = struct{}{}
|
labelNameSet[label] = struct{}{}
|
||||||
}
|
}
|
||||||
if len(labelNames) != len(labelNameSet) {
|
if len(labelNames) != len(labelNameSet) {
|
||||||
d.err = errors.New("duplicate label names")
|
d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName)
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -154,7 +167,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
|
|||||||
Value: proto.String(v),
|
Value: proto.String(v),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
sort.Sort(labelPairSorter(d.constLabelPairs))
|
sort.Sort(internal.LabelPairSorter(d.constLabelPairs))
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -176,11 +189,19 @@ func (d *Desc) String() string {
|
|||||||
fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
|
fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
vlStrings := make([]string, 0, len(d.variableLabels.names))
|
||||||
|
for _, vl := range d.variableLabels.names {
|
||||||
|
if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil {
|
||||||
|
vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl))
|
||||||
|
} else {
|
||||||
|
vlStrings = append(vlStrings, vl)
|
||||||
|
}
|
||||||
|
}
|
||||||
return fmt.Sprintf(
|
return fmt.Sprintf(
|
||||||
"Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}",
|
"Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: {%s}}",
|
||||||
d.fqName,
|
d.fqName,
|
||||||
d.help,
|
d.help,
|
||||||
strings.Join(lpStrings, ","),
|
strings.Join(lpStrings, ","),
|
||||||
d.variableLabels,
|
strings.Join(vlStrings, ","),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|||||||
61
vendor/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
61
vendor/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
@@ -21,7 +21,7 @@
|
|||||||
// All exported functions and methods are safe to be used concurrently unless
|
// All exported functions and methods are safe to be used concurrently unless
|
||||||
// specified otherwise.
|
// specified otherwise.
|
||||||
//
|
//
|
||||||
// A Basic Example
|
// # A Basic Example
|
||||||
//
|
//
|
||||||
// As a starting point, a very basic usage example:
|
// As a starting point, a very basic usage example:
|
||||||
//
|
//
|
||||||
@@ -35,41 +35,52 @@
|
|||||||
// "github.com/prometheus/client_golang/prometheus/promhttp"
|
// "github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
// )
|
// )
|
||||||
//
|
//
|
||||||
// var (
|
// type metrics struct {
|
||||||
// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{
|
// cpuTemp prometheus.Gauge
|
||||||
|
// hdFailures *prometheus.CounterVec
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// func NewMetrics(reg prometheus.Registerer) *metrics {
|
||||||
|
// m := &metrics{
|
||||||
|
// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{
|
||||||
// Name: "cpu_temperature_celsius",
|
// Name: "cpu_temperature_celsius",
|
||||||
// Help: "Current temperature of the CPU.",
|
// Help: "Current temperature of the CPU.",
|
||||||
// })
|
// }),
|
||||||
// hdFailures = prometheus.NewCounterVec(
|
// hdFailures: prometheus.NewCounterVec(
|
||||||
// prometheus.CounterOpts{
|
// prometheus.CounterOpts{
|
||||||
// Name: "hd_errors_total",
|
// Name: "hd_errors_total",
|
||||||
// Help: "Number of hard-disk errors.",
|
// Help: "Number of hard-disk errors.",
|
||||||
// },
|
// },
|
||||||
// []string{"device"},
|
// []string{"device"},
|
||||||
// )
|
// ),
|
||||||
// )
|
// }
|
||||||
//
|
// reg.MustRegister(m.cpuTemp)
|
||||||
// func init() {
|
// reg.MustRegister(m.hdFailures)
|
||||||
// // Metrics have to be registered to be exposed:
|
// return m
|
||||||
// prometheus.MustRegister(cpuTemp)
|
|
||||||
// prometheus.MustRegister(hdFailures)
|
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// func main() {
|
// func main() {
|
||||||
// cpuTemp.Set(65.3)
|
// // Create a non-global registry.
|
||||||
// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
|
// reg := prometheus.NewRegistry()
|
||||||
//
|
//
|
||||||
// // The Handler function provides a default handler to expose metrics
|
// // Create new metrics and register them using the custom registry.
|
||||||
// // via an HTTP server. "/metrics" is the usual endpoint for that.
|
// m := NewMetrics(reg)
|
||||||
// http.Handle("/metrics", promhttp.Handler())
|
// // Set values for the new created metrics.
|
||||||
|
// m.cpuTemp.Set(65.3)
|
||||||
|
// m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
|
||||||
|
//
|
||||||
|
// // Expose metrics and custom registry via an HTTP server
|
||||||
|
// // using the HandleFor function. "/metrics" is the usual endpoint for that.
|
||||||
|
// http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}))
|
||||||
// log.Fatal(http.ListenAndServe(":8080", nil))
|
// log.Fatal(http.ListenAndServe(":8080", nil))
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
//
|
|
||||||
// This is a complete program that exports two metrics, a Gauge and a Counter,
|
// This is a complete program that exports two metrics, a Gauge and a Counter,
|
||||||
// the latter with a label attached to turn it into a (one-dimensional) vector.
|
// the latter with a label attached to turn it into a (one-dimensional) vector.
|
||||||
|
// It register the metrics using a custom registry and exposes them via an HTTP server
|
||||||
|
// on the /metrics endpoint.
|
||||||
//
|
//
|
||||||
// Metrics
|
// # Metrics
|
||||||
//
|
//
|
||||||
// The number of exported identifiers in this package might appear a bit
|
// The number of exported identifiers in this package might appear a bit
|
||||||
// overwhelming. However, in addition to the basic plumbing shown in the example
|
// overwhelming. However, in addition to the basic plumbing shown in the example
|
||||||
@@ -100,7 +111,7 @@
|
|||||||
// To create instances of Metrics and their vector versions, you need a suitable
|
// To create instances of Metrics and their vector versions, you need a suitable
|
||||||
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts.
|
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts.
|
||||||
//
|
//
|
||||||
// Custom Collectors and constant Metrics
|
// # Custom Collectors and constant Metrics
|
||||||
//
|
//
|
||||||
// While you could create your own implementations of Metric, most likely you
|
// While you could create your own implementations of Metric, most likely you
|
||||||
// will only ever implement the Collector interface on your own. At a first
|
// will only ever implement the Collector interface on your own. At a first
|
||||||
@@ -141,7 +152,7 @@
|
|||||||
// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
|
// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
|
||||||
// shortcuts.
|
// shortcuts.
|
||||||
//
|
//
|
||||||
// Advanced Uses of the Registry
|
// # Advanced Uses of the Registry
|
||||||
//
|
//
|
||||||
// While MustRegister is the by far most common way of registering a Collector,
|
// While MustRegister is the by far most common way of registering a Collector,
|
||||||
// sometimes you might want to handle the errors the registration might cause.
|
// sometimes you might want to handle the errors the registration might cause.
|
||||||
@@ -176,23 +187,23 @@
|
|||||||
// NewProcessCollector). With a custom registry, you are in control and decide
|
// NewProcessCollector). With a custom registry, you are in control and decide
|
||||||
// yourself about the Collectors to register.
|
// yourself about the Collectors to register.
|
||||||
//
|
//
|
||||||
// HTTP Exposition
|
// # HTTP Exposition
|
||||||
//
|
//
|
||||||
// The Registry implements the Gatherer interface. The caller of the Gather
|
// The Registry implements the Gatherer interface. The caller of the Gather
|
||||||
// method can then expose the gathered metrics in some way. Usually, the metrics
|
// method can then expose the gathered metrics in some way. Usually, the metrics
|
||||||
// are served via HTTP on the /metrics endpoint. That's happening in the example
|
// are served via HTTP on the /metrics endpoint. That's happening in the example
|
||||||
// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
|
// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
|
||||||
//
|
//
|
||||||
// Pushing to the Pushgateway
|
// # Pushing to the Pushgateway
|
||||||
//
|
//
|
||||||
// Function for pushing to the Pushgateway can be found in the push sub-package.
|
// Function for pushing to the Pushgateway can be found in the push sub-package.
|
||||||
//
|
//
|
||||||
// Graphite Bridge
|
// # Graphite Bridge
|
||||||
//
|
//
|
||||||
// Functions and examples to push metrics from a Gatherer to Graphite can be
|
// Functions and examples to push metrics from a Gatherer to Graphite can be
|
||||||
// found in the graphite sub-package.
|
// found in the graphite sub-package.
|
||||||
//
|
//
|
||||||
// Other Means of Exposition
|
// # Other Means of Exposition
|
||||||
//
|
//
|
||||||
// More ways of exposing metrics can easily be added by following the approaches
|
// More ways of exposing metrics can easily be added by following the approaches
|
||||||
// of the existing implementations.
|
// of the existing implementations.
|
||||||
|
|||||||
2
vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
generated
vendored
2
vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
generated
vendored
@@ -48,7 +48,7 @@ func (e *expvarCollector) Collect(ch chan<- Metric) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
var v interface{}
|
var v interface{}
|
||||||
labels := make([]string, len(desc.variableLabels))
|
labels := make([]string, len(desc.variableLabels.names))
|
||||||
if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
|
if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
|
||||||
ch <- NewInvalidMetric(desc, err)
|
ch <- NewInvalidMetric(desc, err)
|
||||||
continue
|
continue
|
||||||
|
|||||||
32
vendor/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
32
vendor/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
@@ -55,6 +55,18 @@ type Gauge interface {
|
|||||||
// GaugeOpts is an alias for Opts. See there for doc comments.
|
// GaugeOpts is an alias for Opts. See there for doc comments.
|
||||||
type GaugeOpts Opts
|
type GaugeOpts Opts
|
||||||
|
|
||||||
|
// GaugeVecOpts bundles the options to create a GaugeVec metric.
|
||||||
|
// It is mandatory to set GaugeOpts, see there for mandatory fields. VariableLabels
|
||||||
|
// is optional and can safely be left to its default value.
|
||||||
|
type GaugeVecOpts struct {
|
||||||
|
GaugeOpts
|
||||||
|
|
||||||
|
// VariableLabels are used to partition the metric vector by the given set
|
||||||
|
// of labels. Each label value will be constrained with the optional Constraint
|
||||||
|
// function, if provided.
|
||||||
|
VariableLabels ConstrainableLabels
|
||||||
|
}
|
||||||
|
|
||||||
// NewGauge creates a new Gauge based on the provided GaugeOpts.
|
// NewGauge creates a new Gauge based on the provided GaugeOpts.
|
||||||
//
|
//
|
||||||
// The returned implementation is optimized for a fast Set method. If you have a
|
// The returned implementation is optimized for a fast Set method. If you have a
|
||||||
@@ -123,7 +135,7 @@ func (g *gauge) Sub(val float64) {
|
|||||||
|
|
||||||
func (g *gauge) Write(out *dto.Metric) error {
|
func (g *gauge) Write(out *dto.Metric) error {
|
||||||
val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
|
val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
|
||||||
return populateMetric(GaugeValue, val, g.labelPairs, nil, out)
|
return populateMetric(GaugeValue, val, g.labelPairs, nil, out, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GaugeVec is a Collector that bundles a set of Gauges that all share the same
|
// GaugeVec is a Collector that bundles a set of Gauges that all share the same
|
||||||
@@ -138,16 +150,24 @@ type GaugeVec struct {
|
|||||||
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
|
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
|
||||||
// partitioned by the given label names.
|
// partitioned by the given label names.
|
||||||
func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
|
func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
|
||||||
desc := NewDesc(
|
return V2.NewGaugeVec(GaugeVecOpts{
|
||||||
|
GaugeOpts: opts,
|
||||||
|
VariableLabels: UnconstrainedLabels(labelNames),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGaugeVec creates a new GaugeVec based on the provided GaugeVecOpts.
|
||||||
|
func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec {
|
||||||
|
desc := V2.NewDesc(
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||||
opts.Help,
|
opts.Help,
|
||||||
labelNames,
|
opts.VariableLabels,
|
||||||
opts.ConstLabels,
|
opts.ConstLabels,
|
||||||
)
|
)
|
||||||
return &GaugeVec{
|
return &GaugeVec{
|
||||||
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
|
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
|
||||||
if len(lvs) != len(desc.variableLabels) {
|
if len(lvs) != len(desc.variableLabels.names) {
|
||||||
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
|
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs))
|
||||||
}
|
}
|
||||||
result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)}
|
result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)}
|
||||||
result.init(result) // Init self-collection.
|
result.init(result) // Init self-collection.
|
||||||
@@ -210,6 +230,7 @@ func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
|
|||||||
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
// WithLabelValues works as GetMetricWithLabelValues, but panics where
|
||||||
// GetMetricWithLabelValues would have returned an error. Not returning an
|
// GetMetricWithLabelValues would have returned an error. Not returning an
|
||||||
// error allows shortcuts like
|
// error allows shortcuts like
|
||||||
|
//
|
||||||
// myVec.WithLabelValues("404", "GET").Add(42)
|
// myVec.WithLabelValues("404", "GET").Add(42)
|
||||||
func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
|
func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
|
||||||
g, err := v.GetMetricWithLabelValues(lvs...)
|
g, err := v.GetMetricWithLabelValues(lvs...)
|
||||||
@@ -221,6 +242,7 @@ func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
|
|||||||
|
|
||||||
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
|
||||||
// returned an error. Not returning an error allows shortcuts like
|
// returned an error. Not returning an error allows shortcuts like
|
||||||
|
//
|
||||||
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
|
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
|
||||||
func (v *GaugeVec) With(labels Labels) Gauge {
|
func (v *GaugeVec) With(labels Labels) Gauge {
|
||||||
g, err := v.GetMetricWith(labels)
|
g, err := v.GetMetricWith(labels)
|
||||||
|
|||||||
26
vendor/github.com/prometheus/client_golang/prometheus/get_pid.go
generated
vendored
Normal file
26
vendor/github.com/prometheus/client_golang/prometheus/get_pid.go
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
// Copyright 2015 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build !js || wasm
|
||||||
|
// +build !js wasm
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import "os"
|
||||||
|
|
||||||
|
func getPIDFn() func() (int, error) {
|
||||||
|
pid := os.Getpid()
|
||||||
|
return func() (int, error) {
|
||||||
|
return pid, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
23
vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go
generated
vendored
Normal file
23
vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
// Copyright 2015 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build js && !wasm
|
||||||
|
// +build js,!wasm
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
func getPIDFn() func() (int, error) {
|
||||||
|
return func() (int, error) {
|
||||||
|
return 1, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
182
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
182
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
@@ -16,53 +16,15 @@ package prometheus
|
|||||||
import (
|
import (
|
||||||
"runtime"
|
"runtime"
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
type goCollector struct {
|
// goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats.
|
||||||
goroutinesDesc *Desc
|
// From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so
|
||||||
threadsDesc *Desc
|
// while eval closure works on runtime.MemStats, the struct from Go 1.17+ is
|
||||||
gcDesc *Desc
|
// populated using runtime/metrics.
|
||||||
goInfoDesc *Desc
|
func goRuntimeMemStats() memStatsMetrics {
|
||||||
|
return memStatsMetrics{
|
||||||
// ms... are memstats related.
|
|
||||||
msLast *runtime.MemStats // Previously collected memstats.
|
|
||||||
msLastTimestamp time.Time
|
|
||||||
msMtx sync.Mutex // Protects msLast and msLastTimestamp.
|
|
||||||
msMetrics memStatsMetrics
|
|
||||||
msRead func(*runtime.MemStats) // For mocking in tests.
|
|
||||||
msMaxWait time.Duration // Wait time for fresh memstats.
|
|
||||||
msMaxAge time.Duration // Maximum allowed age of old memstats.
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewGoCollector is the obsolete version of collectors.NewGoCollector.
|
|
||||||
// See there for documentation.
|
|
||||||
//
|
|
||||||
// Deprecated: Use collectors.NewGoCollector instead.
|
|
||||||
func NewGoCollector() Collector {
|
|
||||||
return &goCollector{
|
|
||||||
goroutinesDesc: NewDesc(
|
|
||||||
"go_goroutines",
|
|
||||||
"Number of goroutines that currently exist.",
|
|
||||||
nil, nil),
|
|
||||||
threadsDesc: NewDesc(
|
|
||||||
"go_threads",
|
|
||||||
"Number of OS threads created.",
|
|
||||||
nil, nil),
|
|
||||||
gcDesc: NewDesc(
|
|
||||||
"go_gc_duration_seconds",
|
|
||||||
"A summary of the pause duration of garbage collection cycles.",
|
|
||||||
nil, nil),
|
|
||||||
goInfoDesc: NewDesc(
|
|
||||||
"go_info",
|
|
||||||
"Information about the Go environment.",
|
|
||||||
nil, Labels{"version": runtime.Version()}),
|
|
||||||
msLast: &runtime.MemStats{},
|
|
||||||
msRead: runtime.ReadMemStats,
|
|
||||||
msMaxWait: time.Second,
|
|
||||||
msMaxAge: 5 * time.Minute,
|
|
||||||
msMetrics: memStatsMetrics{
|
|
||||||
{
|
{
|
||||||
desc: NewDesc(
|
desc: NewDesc(
|
||||||
memstatNamespace("alloc_bytes"),
|
memstatNamespace("alloc_bytes"),
|
||||||
@@ -239,61 +201,58 @@ func NewGoCollector() Collector {
|
|||||||
),
|
),
|
||||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
|
eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
|
||||||
valType: GaugeValue,
|
valType: GaugeValue,
|
||||||
}, {
|
|
||||||
desc: NewDesc(
|
|
||||||
memstatNamespace("last_gc_time_seconds"),
|
|
||||||
"Number of seconds since 1970 of last garbage collection.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
|
|
||||||
valType: GaugeValue,
|
|
||||||
}, {
|
|
||||||
desc: NewDesc(
|
|
||||||
memstatNamespace("gc_cpu_fraction"),
|
|
||||||
"The fraction of this program's available CPU time used by the GC since the program started.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
|
|
||||||
valType: GaugeValue,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func memstatNamespace(s string) string {
|
type baseGoCollector struct {
|
||||||
return "go_memstats_" + s
|
goroutinesDesc *Desc
|
||||||
|
threadsDesc *Desc
|
||||||
|
gcDesc *Desc
|
||||||
|
gcLastTimeDesc *Desc
|
||||||
|
goInfoDesc *Desc
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBaseGoCollector() baseGoCollector {
|
||||||
|
return baseGoCollector{
|
||||||
|
goroutinesDesc: NewDesc(
|
||||||
|
"go_goroutines",
|
||||||
|
"Number of goroutines that currently exist.",
|
||||||
|
nil, nil),
|
||||||
|
threadsDesc: NewDesc(
|
||||||
|
"go_threads",
|
||||||
|
"Number of OS threads created.",
|
||||||
|
nil, nil),
|
||||||
|
gcDesc: NewDesc(
|
||||||
|
"go_gc_duration_seconds",
|
||||||
|
"A summary of the pause duration of garbage collection cycles.",
|
||||||
|
nil, nil),
|
||||||
|
gcLastTimeDesc: NewDesc(
|
||||||
|
"go_memstats_last_gc_time_seconds",
|
||||||
|
"Number of seconds since 1970 of last garbage collection.",
|
||||||
|
nil, nil),
|
||||||
|
goInfoDesc: NewDesc(
|
||||||
|
"go_info",
|
||||||
|
"Information about the Go environment.",
|
||||||
|
nil, Labels{"version": runtime.Version()}),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Describe returns all descriptions of the collector.
|
// Describe returns all descriptions of the collector.
|
||||||
func (c *goCollector) Describe(ch chan<- *Desc) {
|
func (c *baseGoCollector) Describe(ch chan<- *Desc) {
|
||||||
ch <- c.goroutinesDesc
|
ch <- c.goroutinesDesc
|
||||||
ch <- c.threadsDesc
|
ch <- c.threadsDesc
|
||||||
ch <- c.gcDesc
|
ch <- c.gcDesc
|
||||||
|
ch <- c.gcLastTimeDesc
|
||||||
ch <- c.goInfoDesc
|
ch <- c.goInfoDesc
|
||||||
for _, i := range c.msMetrics {
|
|
||||||
ch <- i.desc
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Collect returns the current state of all metrics of the collector.
|
// Collect returns the current state of all metrics of the collector.
|
||||||
func (c *goCollector) Collect(ch chan<- Metric) {
|
func (c *baseGoCollector) Collect(ch chan<- Metric) {
|
||||||
var (
|
|
||||||
ms = &runtime.MemStats{}
|
|
||||||
done = make(chan struct{})
|
|
||||||
)
|
|
||||||
// Start reading memstats first as it might take a while.
|
|
||||||
go func() {
|
|
||||||
c.msRead(ms)
|
|
||||||
c.msMtx.Lock()
|
|
||||||
c.msLast = ms
|
|
||||||
c.msLastTimestamp = time.Now()
|
|
||||||
c.msMtx.Unlock()
|
|
||||||
close(done)
|
|
||||||
}()
|
|
||||||
|
|
||||||
ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
|
ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
|
||||||
n, _ := runtime.ThreadCreateProfile(nil)
|
|
||||||
ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
|
n := getRuntimeNumThreads()
|
||||||
|
ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, n)
|
||||||
|
|
||||||
var stats debug.GCStats
|
var stats debug.GCStats
|
||||||
stats.PauseQuantiles = make([]time.Duration, 5)
|
stats.PauseQuantiles = make([]time.Duration, 5)
|
||||||
@@ -305,63 +264,18 @@ func (c *goCollector) Collect(ch chan<- Metric) {
|
|||||||
}
|
}
|
||||||
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
|
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
|
||||||
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
|
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
|
||||||
|
ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9)
|
||||||
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
|
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
|
||||||
|
|
||||||
timer := time.NewTimer(c.msMaxWait)
|
|
||||||
select {
|
|
||||||
case <-done: // Our own ReadMemStats succeeded in time. Use it.
|
|
||||||
timer.Stop() // Important for high collection frequencies to not pile up timers.
|
|
||||||
c.msCollect(ch, ms)
|
|
||||||
return
|
|
||||||
case <-timer.C: // Time out, use last memstats if possible. Continue below.
|
|
||||||
}
|
|
||||||
c.msMtx.Lock()
|
|
||||||
if time.Since(c.msLastTimestamp) < c.msMaxAge {
|
|
||||||
// Last memstats are recent enough. Collect from them under the lock.
|
|
||||||
c.msCollect(ch, c.msLast)
|
|
||||||
c.msMtx.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// If we are here, the last memstats are too old or don't exist. We have
|
|
||||||
// to wait until our own ReadMemStats finally completes. For that to
|
|
||||||
// happen, we have to release the lock.
|
|
||||||
c.msMtx.Unlock()
|
|
||||||
<-done
|
|
||||||
c.msCollect(ch, ms)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
|
func memstatNamespace(s string) string {
|
||||||
for _, i := range c.msMetrics {
|
return "go_memstats_" + s
|
||||||
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// memStatsMetrics provide description, value, and value type for memstat metrics.
|
// memStatsMetrics provide description, evaluator, runtime/metrics name, and
|
||||||
|
// value type for memstat metrics.
|
||||||
type memStatsMetrics []struct {
|
type memStatsMetrics []struct {
|
||||||
desc *Desc
|
desc *Desc
|
||||||
eval func(*runtime.MemStats) float64
|
eval func(*runtime.MemStats) float64
|
||||||
valType ValueType
|
valType ValueType
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector.
|
|
||||||
// See there for documentation.
|
|
||||||
//
|
|
||||||
// Deprecated: Use collectors.NewBuildInfoCollector instead.
|
|
||||||
func NewBuildInfoCollector() Collector {
|
|
||||||
path, version, sum := "unknown", "unknown", "unknown"
|
|
||||||
if bi, ok := debug.ReadBuildInfo(); ok {
|
|
||||||
path = bi.Main.Path
|
|
||||||
version = bi.Main.Version
|
|
||||||
sum = bi.Main.Sum
|
|
||||||
}
|
|
||||||
c := &selfCollector{MustNewConstMetric(
|
|
||||||
NewDesc(
|
|
||||||
"go_build_info",
|
|
||||||
"Build information about the main Go module.",
|
|
||||||
nil, Labels{"path": path, "version": version, "checksum": sum},
|
|
||||||
),
|
|
||||||
GaugeValue, 1)}
|
|
||||||
c.init(c.self)
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|||||||
122
vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go
generated
vendored
Normal file
122
vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go
generated
vendored
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
// Copyright 2021 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build !go1.17
|
||||||
|
// +build !go1.17
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type goCollector struct {
|
||||||
|
base baseGoCollector
|
||||||
|
|
||||||
|
// ms... are memstats related.
|
||||||
|
msLast *runtime.MemStats // Previously collected memstats.
|
||||||
|
msLastTimestamp time.Time
|
||||||
|
msMtx sync.Mutex // Protects msLast and msLastTimestamp.
|
||||||
|
msMetrics memStatsMetrics
|
||||||
|
msRead func(*runtime.MemStats) // For mocking in tests.
|
||||||
|
msMaxWait time.Duration // Wait time for fresh memstats.
|
||||||
|
msMaxAge time.Duration // Maximum allowed age of old memstats.
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGoCollector is the obsolete version of collectors.NewGoCollector.
|
||||||
|
// See there for documentation.
|
||||||
|
//
|
||||||
|
// Deprecated: Use collectors.NewGoCollector instead.
|
||||||
|
func NewGoCollector() Collector {
|
||||||
|
msMetrics := goRuntimeMemStats()
|
||||||
|
msMetrics = append(msMetrics, struct {
|
||||||
|
desc *Desc
|
||||||
|
eval func(*runtime.MemStats) float64
|
||||||
|
valType ValueType
|
||||||
|
}{
|
||||||
|
// This metric is omitted in Go1.17+, see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
|
||||||
|
desc: NewDesc(
|
||||||
|
memstatNamespace("gc_cpu_fraction"),
|
||||||
|
"The fraction of this program's available CPU time used by the GC since the program started.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
|
||||||
|
valType: GaugeValue,
|
||||||
|
})
|
||||||
|
return &goCollector{
|
||||||
|
base: newBaseGoCollector(),
|
||||||
|
msLast: &runtime.MemStats{},
|
||||||
|
msRead: runtime.ReadMemStats,
|
||||||
|
msMaxWait: time.Second,
|
||||||
|
msMaxAge: 5 * time.Minute,
|
||||||
|
msMetrics: msMetrics,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describe returns all descriptions of the collector.
|
||||||
|
func (c *goCollector) Describe(ch chan<- *Desc) {
|
||||||
|
c.base.Describe(ch)
|
||||||
|
for _, i := range c.msMetrics {
|
||||||
|
ch <- i.desc
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect returns the current state of all metrics of the collector.
|
||||||
|
func (c *goCollector) Collect(ch chan<- Metric) {
|
||||||
|
var (
|
||||||
|
ms = &runtime.MemStats{}
|
||||||
|
done = make(chan struct{})
|
||||||
|
)
|
||||||
|
// Start reading memstats first as it might take a while.
|
||||||
|
go func() {
|
||||||
|
c.msRead(ms)
|
||||||
|
c.msMtx.Lock()
|
||||||
|
c.msLast = ms
|
||||||
|
c.msLastTimestamp = time.Now()
|
||||||
|
c.msMtx.Unlock()
|
||||||
|
close(done)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Collect base non-memory metrics.
|
||||||
|
c.base.Collect(ch)
|
||||||
|
|
||||||
|
timer := time.NewTimer(c.msMaxWait)
|
||||||
|
select {
|
||||||
|
case <-done: // Our own ReadMemStats succeeded in time. Use it.
|
||||||
|
timer.Stop() // Important for high collection frequencies to not pile up timers.
|
||||||
|
c.msCollect(ch, ms)
|
||||||
|
return
|
||||||
|
case <-timer.C: // Time out, use last memstats if possible. Continue below.
|
||||||
|
}
|
||||||
|
c.msMtx.Lock()
|
||||||
|
if time.Since(c.msLastTimestamp) < c.msMaxAge {
|
||||||
|
// Last memstats are recent enough. Collect from them under the lock.
|
||||||
|
c.msCollect(ch, c.msLast)
|
||||||
|
c.msMtx.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// If we are here, the last memstats are too old or don't exist. We have
|
||||||
|
// to wait until our own ReadMemStats finally completes. For that to
|
||||||
|
// happen, we have to release the lock.
|
||||||
|
c.msMtx.Unlock()
|
||||||
|
<-done
|
||||||
|
c.msCollect(ch, ms)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
|
||||||
|
for _, i := range c.msMetrics {
|
||||||
|
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
|
||||||
|
}
|
||||||
|
}
|
||||||
567
vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
generated
vendored
Normal file
567
vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
generated
vendored
Normal file
@@ -0,0 +1,567 @@
|
|||||||
|
// Copyright 2021 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build go1.17
|
||||||
|
// +build go1.17
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"runtime"
|
||||||
|
"runtime/metrics"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus/internal"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// constants for strings referenced more than once.
|
||||||
|
goGCHeapTinyAllocsObjects = "/gc/heap/tiny/allocs:objects"
|
||||||
|
goGCHeapAllocsObjects = "/gc/heap/allocs:objects"
|
||||||
|
goGCHeapFreesObjects = "/gc/heap/frees:objects"
|
||||||
|
goGCHeapFreesBytes = "/gc/heap/frees:bytes"
|
||||||
|
goGCHeapAllocsBytes = "/gc/heap/allocs:bytes"
|
||||||
|
goGCHeapObjects = "/gc/heap/objects:objects"
|
||||||
|
goGCHeapGoalBytes = "/gc/heap/goal:bytes"
|
||||||
|
goMemoryClassesTotalBytes = "/memory/classes/total:bytes"
|
||||||
|
goMemoryClassesHeapObjectsBytes = "/memory/classes/heap/objects:bytes"
|
||||||
|
goMemoryClassesHeapUnusedBytes = "/memory/classes/heap/unused:bytes"
|
||||||
|
goMemoryClassesHeapReleasedBytes = "/memory/classes/heap/released:bytes"
|
||||||
|
goMemoryClassesHeapFreeBytes = "/memory/classes/heap/free:bytes"
|
||||||
|
goMemoryClassesHeapStacksBytes = "/memory/classes/heap/stacks:bytes"
|
||||||
|
goMemoryClassesOSStacksBytes = "/memory/classes/os-stacks:bytes"
|
||||||
|
goMemoryClassesMetadataMSpanInuseBytes = "/memory/classes/metadata/mspan/inuse:bytes"
|
||||||
|
goMemoryClassesMetadataMSPanFreeBytes = "/memory/classes/metadata/mspan/free:bytes"
|
||||||
|
goMemoryClassesMetadataMCacheInuseBytes = "/memory/classes/metadata/mcache/inuse:bytes"
|
||||||
|
goMemoryClassesMetadataMCacheFreeBytes = "/memory/classes/metadata/mcache/free:bytes"
|
||||||
|
goMemoryClassesProfilingBucketsBytes = "/memory/classes/profiling/buckets:bytes"
|
||||||
|
goMemoryClassesMetadataOtherBytes = "/memory/classes/metadata/other:bytes"
|
||||||
|
goMemoryClassesOtherBytes = "/memory/classes/other:bytes"
|
||||||
|
)
|
||||||
|
|
||||||
|
// rmNamesForMemStatsMetrics represents runtime/metrics names required to populate goRuntimeMemStats from like logic.
|
||||||
|
var rmNamesForMemStatsMetrics = []string{
|
||||||
|
goGCHeapTinyAllocsObjects,
|
||||||
|
goGCHeapAllocsObjects,
|
||||||
|
goGCHeapFreesObjects,
|
||||||
|
goGCHeapAllocsBytes,
|
||||||
|
goGCHeapObjects,
|
||||||
|
goGCHeapGoalBytes,
|
||||||
|
goMemoryClassesTotalBytes,
|
||||||
|
goMemoryClassesHeapObjectsBytes,
|
||||||
|
goMemoryClassesHeapUnusedBytes,
|
||||||
|
goMemoryClassesHeapReleasedBytes,
|
||||||
|
goMemoryClassesHeapFreeBytes,
|
||||||
|
goMemoryClassesHeapStacksBytes,
|
||||||
|
goMemoryClassesOSStacksBytes,
|
||||||
|
goMemoryClassesMetadataMSpanInuseBytes,
|
||||||
|
goMemoryClassesMetadataMSPanFreeBytes,
|
||||||
|
goMemoryClassesMetadataMCacheInuseBytes,
|
||||||
|
goMemoryClassesMetadataMCacheFreeBytes,
|
||||||
|
goMemoryClassesProfilingBucketsBytes,
|
||||||
|
goMemoryClassesMetadataOtherBytes,
|
||||||
|
goMemoryClassesOtherBytes,
|
||||||
|
}
|
||||||
|
|
||||||
|
func bestEffortLookupRM(lookup []string) []metrics.Description {
|
||||||
|
ret := make([]metrics.Description, 0, len(lookup))
|
||||||
|
for _, rm := range metrics.All() {
|
||||||
|
for _, m := range lookup {
|
||||||
|
if m == rm.Name {
|
||||||
|
ret = append(ret, rm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
type goCollector struct {
|
||||||
|
base baseGoCollector
|
||||||
|
|
||||||
|
// mu protects updates to all fields ensuring a consistent
|
||||||
|
// snapshot is always produced by Collect.
|
||||||
|
mu sync.Mutex
|
||||||
|
|
||||||
|
// Contains all samples that has to retrieved from runtime/metrics (not all of them will be exposed).
|
||||||
|
sampleBuf []metrics.Sample
|
||||||
|
// sampleMap allows lookup for MemStats metrics and runtime/metrics histograms for exact sums.
|
||||||
|
sampleMap map[string]*metrics.Sample
|
||||||
|
|
||||||
|
// rmExposedMetrics represents all runtime/metrics package metrics
|
||||||
|
// that were configured to be exposed.
|
||||||
|
rmExposedMetrics []collectorMetric
|
||||||
|
rmExactSumMapForHist map[string]string
|
||||||
|
|
||||||
|
// With Go 1.17, the runtime/metrics package was introduced.
|
||||||
|
// From that point on, metric names produced by the runtime/metrics
|
||||||
|
// package could be generated from runtime/metrics names. However,
|
||||||
|
// these differ from the old names for the same values.
|
||||||
|
//
|
||||||
|
// This field exists to export the same values under the old names
|
||||||
|
// as well.
|
||||||
|
msMetrics memStatsMetrics
|
||||||
|
msMetricsEnabled bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type rmMetricDesc struct {
|
||||||
|
metrics.Description
|
||||||
|
}
|
||||||
|
|
||||||
|
func matchRuntimeMetricsRules(rules []internal.GoCollectorRule) []rmMetricDesc {
|
||||||
|
var descs []rmMetricDesc
|
||||||
|
for _, d := range metrics.All() {
|
||||||
|
var (
|
||||||
|
deny = true
|
||||||
|
desc rmMetricDesc
|
||||||
|
)
|
||||||
|
|
||||||
|
for _, r := range rules {
|
||||||
|
if !r.Matcher.MatchString(d.Name) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
deny = r.Deny
|
||||||
|
}
|
||||||
|
if deny {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
desc.Description = d
|
||||||
|
descs = append(descs, desc)
|
||||||
|
}
|
||||||
|
return descs
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultGoCollectorOptions() internal.GoCollectorOptions {
|
||||||
|
return internal.GoCollectorOptions{
|
||||||
|
RuntimeMetricSumForHist: map[string]string{
|
||||||
|
"/gc/heap/allocs-by-size:bytes": goGCHeapAllocsBytes,
|
||||||
|
"/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes,
|
||||||
|
},
|
||||||
|
RuntimeMetricRules: []internal.GoCollectorRule{
|
||||||
|
//{Matcher: regexp.MustCompile("")},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGoCollector is the obsolete version of collectors.NewGoCollector.
|
||||||
|
// See there for documentation.
|
||||||
|
//
|
||||||
|
// Deprecated: Use collectors.NewGoCollector instead.
|
||||||
|
func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
|
||||||
|
opt := defaultGoCollectorOptions()
|
||||||
|
for _, o := range opts {
|
||||||
|
o(&opt)
|
||||||
|
}
|
||||||
|
|
||||||
|
exposedDescriptions := matchRuntimeMetricsRules(opt.RuntimeMetricRules)
|
||||||
|
|
||||||
|
// Collect all histogram samples so that we can get their buckets.
|
||||||
|
// The API guarantees that the buckets are always fixed for the lifetime
|
||||||
|
// of the process.
|
||||||
|
var histograms []metrics.Sample
|
||||||
|
for _, d := range exposedDescriptions {
|
||||||
|
if d.Kind == metrics.KindFloat64Histogram {
|
||||||
|
histograms = append(histograms, metrics.Sample{Name: d.Name})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(histograms) > 0 {
|
||||||
|
metrics.Read(histograms)
|
||||||
|
}
|
||||||
|
|
||||||
|
bucketsMap := make(map[string][]float64)
|
||||||
|
for i := range histograms {
|
||||||
|
bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate a collector for each exposed runtime/metrics metric.
|
||||||
|
metricSet := make([]collectorMetric, 0, len(exposedDescriptions))
|
||||||
|
// SampleBuf is used for reading from runtime/metrics.
|
||||||
|
// We are assuming the largest case to have stable pointers for sampleMap purposes.
|
||||||
|
sampleBuf := make([]metrics.Sample, 0, len(exposedDescriptions)+len(opt.RuntimeMetricSumForHist)+len(rmNamesForMemStatsMetrics))
|
||||||
|
sampleMap := make(map[string]*metrics.Sample, len(exposedDescriptions))
|
||||||
|
for _, d := range exposedDescriptions {
|
||||||
|
namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(&d.Description)
|
||||||
|
if !ok {
|
||||||
|
// Just ignore this metric; we can't do anything with it here.
|
||||||
|
// If a user decides to use the latest version of Go, we don't want
|
||||||
|
// to fail here. This condition is tested in TestExpectedRuntimeMetrics.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name})
|
||||||
|
sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1]
|
||||||
|
|
||||||
|
var m collectorMetric
|
||||||
|
if d.Kind == metrics.KindFloat64Histogram {
|
||||||
|
_, hasSum := opt.RuntimeMetricSumForHist[d.Name]
|
||||||
|
unit := d.Name[strings.IndexRune(d.Name, ':')+1:]
|
||||||
|
m = newBatchHistogram(
|
||||||
|
NewDesc(
|
||||||
|
BuildFQName(namespace, subsystem, name),
|
||||||
|
d.Description.Description,
|
||||||
|
nil,
|
||||||
|
nil,
|
||||||
|
),
|
||||||
|
internal.RuntimeMetricsBucketsForUnit(bucketsMap[d.Name], unit),
|
||||||
|
hasSum,
|
||||||
|
)
|
||||||
|
} else if d.Cumulative {
|
||||||
|
m = NewCounter(CounterOpts{
|
||||||
|
Namespace: namespace,
|
||||||
|
Subsystem: subsystem,
|
||||||
|
Name: name,
|
||||||
|
Help: d.Description.Description,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
m = NewGauge(GaugeOpts{
|
||||||
|
Namespace: namespace,
|
||||||
|
Subsystem: subsystem,
|
||||||
|
Name: name,
|
||||||
|
Help: d.Description.Description,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
metricSet = append(metricSet, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add exact sum metrics to sampleBuf if not added before.
|
||||||
|
for _, h := range histograms {
|
||||||
|
sumMetric, ok := opt.RuntimeMetricSumForHist[h.Name]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := sampleMap[sumMetric]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
sampleBuf = append(sampleBuf, metrics.Sample{Name: sumMetric})
|
||||||
|
sampleMap[sumMetric] = &sampleBuf[len(sampleBuf)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
msMetrics memStatsMetrics
|
||||||
|
msDescriptions []metrics.Description
|
||||||
|
)
|
||||||
|
|
||||||
|
if !opt.DisableMemStatsLikeMetrics {
|
||||||
|
msMetrics = goRuntimeMemStats()
|
||||||
|
msDescriptions = bestEffortLookupRM(rmNamesForMemStatsMetrics)
|
||||||
|
|
||||||
|
// Check if metric was not exposed before and if not, add to sampleBuf.
|
||||||
|
for _, mdDesc := range msDescriptions {
|
||||||
|
if _, ok := sampleMap[mdDesc.Name]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
sampleBuf = append(sampleBuf, metrics.Sample{Name: mdDesc.Name})
|
||||||
|
sampleMap[mdDesc.Name] = &sampleBuf[len(sampleBuf)-1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &goCollector{
|
||||||
|
base: newBaseGoCollector(),
|
||||||
|
sampleBuf: sampleBuf,
|
||||||
|
sampleMap: sampleMap,
|
||||||
|
rmExposedMetrics: metricSet,
|
||||||
|
rmExactSumMapForHist: opt.RuntimeMetricSumForHist,
|
||||||
|
msMetrics: msMetrics,
|
||||||
|
msMetricsEnabled: !opt.DisableMemStatsLikeMetrics,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describe returns all descriptions of the collector.
|
||||||
|
func (c *goCollector) Describe(ch chan<- *Desc) {
|
||||||
|
c.base.Describe(ch)
|
||||||
|
for _, i := range c.msMetrics {
|
||||||
|
ch <- i.desc
|
||||||
|
}
|
||||||
|
for _, m := range c.rmExposedMetrics {
|
||||||
|
ch <- m.Desc()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect returns the current state of all metrics of the collector.
|
||||||
|
func (c *goCollector) Collect(ch chan<- Metric) {
|
||||||
|
// Collect base non-memory metrics.
|
||||||
|
c.base.Collect(ch)
|
||||||
|
|
||||||
|
if len(c.sampleBuf) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect must be thread-safe, so prevent concurrent use of
|
||||||
|
// sampleBuf elements. Just read into sampleBuf but write all the data
|
||||||
|
// we get into our Metrics or MemStats.
|
||||||
|
//
|
||||||
|
// This lock also ensures that the Metrics we send out are all from
|
||||||
|
// the same updates, ensuring their mutual consistency insofar as
|
||||||
|
// is guaranteed by the runtime/metrics package.
|
||||||
|
//
|
||||||
|
// N.B. This locking is heavy-handed, but Collect is expected to be called
|
||||||
|
// relatively infrequently. Also the core operation here, metrics.Read,
|
||||||
|
// is fast (O(tens of microseconds)) so contention should certainly be
|
||||||
|
// low, though channel operations and any allocations may add to that.
|
||||||
|
c.mu.Lock()
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
|
// Populate runtime/metrics sample buffer.
|
||||||
|
metrics.Read(c.sampleBuf)
|
||||||
|
|
||||||
|
// Collect all our runtime/metrics user chose to expose from sampleBuf (if any).
|
||||||
|
for i, metric := range c.rmExposedMetrics {
|
||||||
|
// We created samples for exposed metrics first in order, so indexes match.
|
||||||
|
sample := c.sampleBuf[i]
|
||||||
|
|
||||||
|
// N.B. switch on concrete type because it's significantly more efficient
|
||||||
|
// than checking for the Counter and Gauge interface implementations. In
|
||||||
|
// this case, we control all the types here.
|
||||||
|
switch m := metric.(type) {
|
||||||
|
case *counter:
|
||||||
|
// Guard against decreases. This should never happen, but a failure
|
||||||
|
// to do so will result in a panic, which is a harsh consequence for
|
||||||
|
// a metrics collection bug.
|
||||||
|
v0, v1 := m.get(), unwrapScalarRMValue(sample.Value)
|
||||||
|
if v1 > v0 {
|
||||||
|
m.Add(unwrapScalarRMValue(sample.Value) - m.get())
|
||||||
|
}
|
||||||
|
m.Collect(ch)
|
||||||
|
case *gauge:
|
||||||
|
m.Set(unwrapScalarRMValue(sample.Value))
|
||||||
|
m.Collect(ch)
|
||||||
|
case *batchHistogram:
|
||||||
|
m.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name))
|
||||||
|
m.Collect(ch)
|
||||||
|
default:
|
||||||
|
panic("unexpected metric type")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.msMetricsEnabled {
|
||||||
|
// ms is a dummy MemStats that we populate ourselves so that we can
|
||||||
|
// populate the old metrics from it if goMemStatsCollection is enabled.
|
||||||
|
var ms runtime.MemStats
|
||||||
|
memStatsFromRM(&ms, c.sampleMap)
|
||||||
|
for _, i := range c.msMetrics {
|
||||||
|
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// unwrapScalarRMValue unwraps a runtime/metrics value that is assumed
|
||||||
|
// to be scalar and returns the equivalent float64 value. Panics if the
|
||||||
|
// value is not scalar.
|
||||||
|
func unwrapScalarRMValue(v metrics.Value) float64 {
|
||||||
|
switch v.Kind() {
|
||||||
|
case metrics.KindUint64:
|
||||||
|
return float64(v.Uint64())
|
||||||
|
case metrics.KindFloat64:
|
||||||
|
return v.Float64()
|
||||||
|
case metrics.KindBad:
|
||||||
|
// Unsupported metric.
|
||||||
|
//
|
||||||
|
// This should never happen because we always populate our metric
|
||||||
|
// set from the runtime/metrics package.
|
||||||
|
panic("unexpected unsupported metric")
|
||||||
|
default:
|
||||||
|
// Unsupported metric kind.
|
||||||
|
//
|
||||||
|
// This should never happen because we check for this during initialization
|
||||||
|
// and flag and filter metrics whose kinds we don't understand.
|
||||||
|
panic("unexpected unsupported metric kind")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// exactSumFor takes a runtime/metrics metric name (that is assumed to
|
||||||
|
// be of kind KindFloat64Histogram) and returns its exact sum and whether
|
||||||
|
// its exact sum exists.
|
||||||
|
//
|
||||||
|
// The runtime/metrics API for histograms doesn't currently expose exact
|
||||||
|
// sums, but some of the other metrics are in fact exact sums of histograms.
|
||||||
|
func (c *goCollector) exactSumFor(rmName string) float64 {
|
||||||
|
sumName, ok := c.rmExactSumMapForHist[rmName]
|
||||||
|
if !ok {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
s, ok := c.sampleMap[sumName]
|
||||||
|
if !ok {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return unwrapScalarRMValue(s.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) {
|
||||||
|
lookupOrZero := func(name string) uint64 {
|
||||||
|
if s, ok := rm[name]; ok {
|
||||||
|
return s.Value.Uint64()
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Currently, MemStats adds tiny alloc count to both Mallocs AND Frees.
|
||||||
|
// The reason for this is because MemStats couldn't be extended at the time
|
||||||
|
// but there was a desire to have Mallocs at least be a little more representative,
|
||||||
|
// while having Mallocs - Frees still represent a live object count.
|
||||||
|
// Unfortunately, MemStats doesn't actually export a large allocation count,
|
||||||
|
// so it's impossible to pull this number out directly.
|
||||||
|
tinyAllocs := lookupOrZero(goGCHeapTinyAllocsObjects)
|
||||||
|
ms.Mallocs = lookupOrZero(goGCHeapAllocsObjects) + tinyAllocs
|
||||||
|
ms.Frees = lookupOrZero(goGCHeapFreesObjects) + tinyAllocs
|
||||||
|
|
||||||
|
ms.TotalAlloc = lookupOrZero(goGCHeapAllocsBytes)
|
||||||
|
ms.Sys = lookupOrZero(goMemoryClassesTotalBytes)
|
||||||
|
ms.Lookups = 0 // Already always zero.
|
||||||
|
ms.HeapAlloc = lookupOrZero(goMemoryClassesHeapObjectsBytes)
|
||||||
|
ms.Alloc = ms.HeapAlloc
|
||||||
|
ms.HeapInuse = ms.HeapAlloc + lookupOrZero(goMemoryClassesHeapUnusedBytes)
|
||||||
|
ms.HeapReleased = lookupOrZero(goMemoryClassesHeapReleasedBytes)
|
||||||
|
ms.HeapIdle = ms.HeapReleased + lookupOrZero(goMemoryClassesHeapFreeBytes)
|
||||||
|
ms.HeapSys = ms.HeapInuse + ms.HeapIdle
|
||||||
|
ms.HeapObjects = lookupOrZero(goGCHeapObjects)
|
||||||
|
ms.StackInuse = lookupOrZero(goMemoryClassesHeapStacksBytes)
|
||||||
|
ms.StackSys = ms.StackInuse + lookupOrZero(goMemoryClassesOSStacksBytes)
|
||||||
|
ms.MSpanInuse = lookupOrZero(goMemoryClassesMetadataMSpanInuseBytes)
|
||||||
|
ms.MSpanSys = ms.MSpanInuse + lookupOrZero(goMemoryClassesMetadataMSPanFreeBytes)
|
||||||
|
ms.MCacheInuse = lookupOrZero(goMemoryClassesMetadataMCacheInuseBytes)
|
||||||
|
ms.MCacheSys = ms.MCacheInuse + lookupOrZero(goMemoryClassesMetadataMCacheFreeBytes)
|
||||||
|
ms.BuckHashSys = lookupOrZero(goMemoryClassesProfilingBucketsBytes)
|
||||||
|
ms.GCSys = lookupOrZero(goMemoryClassesMetadataOtherBytes)
|
||||||
|
ms.OtherSys = lookupOrZero(goMemoryClassesOtherBytes)
|
||||||
|
ms.NextGC = lookupOrZero(goGCHeapGoalBytes)
|
||||||
|
|
||||||
|
// N.B. GCCPUFraction is intentionally omitted. This metric is not useful,
|
||||||
|
// and often misleading due to the fact that it's an average over the lifetime
|
||||||
|
// of the process.
|
||||||
|
// See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
|
||||||
|
// for more details.
|
||||||
|
ms.GCCPUFraction = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// batchHistogram is a mutable histogram that is updated
|
||||||
|
// in batches.
|
||||||
|
type batchHistogram struct {
|
||||||
|
selfCollector
|
||||||
|
|
||||||
|
// Static fields updated only once.
|
||||||
|
desc *Desc
|
||||||
|
hasSum bool
|
||||||
|
|
||||||
|
// Because this histogram operates in batches, it just uses a
|
||||||
|
// single mutex for everything. updates are always serialized
|
||||||
|
// but Write calls may operate concurrently with updates.
|
||||||
|
// Contention between these two sources should be rare.
|
||||||
|
mu sync.Mutex
|
||||||
|
buckets []float64 // Inclusive lower bounds, like runtime/metrics.
|
||||||
|
counts []uint64
|
||||||
|
sum float64 // Used if hasSum is true.
|
||||||
|
}
|
||||||
|
|
||||||
|
// newBatchHistogram creates a new batch histogram value with the given
|
||||||
|
// Desc, buckets, and whether or not it has an exact sum available.
|
||||||
|
//
|
||||||
|
// buckets must always be from the runtime/metrics package, following
|
||||||
|
// the same conventions.
|
||||||
|
func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram {
|
||||||
|
// We need to remove -Inf values. runtime/metrics keeps them around.
|
||||||
|
// But -Inf bucket should not be allowed for prometheus histograms.
|
||||||
|
if buckets[0] == math.Inf(-1) {
|
||||||
|
buckets = buckets[1:]
|
||||||
|
}
|
||||||
|
h := &batchHistogram{
|
||||||
|
desc: desc,
|
||||||
|
buckets: buckets,
|
||||||
|
// Because buckets follows runtime/metrics conventions, there's
|
||||||
|
// 1 more value in the buckets list than there are buckets represented,
|
||||||
|
// because in runtime/metrics, the bucket values represent *boundaries*,
|
||||||
|
// and non-Inf boundaries are inclusive lower bounds for that bucket.
|
||||||
|
counts: make([]uint64, len(buckets)-1),
|
||||||
|
hasSum: hasSum,
|
||||||
|
}
|
||||||
|
h.init(h)
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
// update updates the batchHistogram from a runtime/metrics histogram.
|
||||||
|
//
|
||||||
|
// sum must be provided if the batchHistogram was created to have an exact sum.
|
||||||
|
// h.buckets must be a strict subset of his.Buckets.
|
||||||
|
func (h *batchHistogram) update(his *metrics.Float64Histogram, sum float64) {
|
||||||
|
counts, buckets := his.Counts, his.Buckets
|
||||||
|
|
||||||
|
h.mu.Lock()
|
||||||
|
defer h.mu.Unlock()
|
||||||
|
|
||||||
|
// Clear buckets.
|
||||||
|
for i := range h.counts {
|
||||||
|
h.counts[i] = 0
|
||||||
|
}
|
||||||
|
// Copy and reduce buckets.
|
||||||
|
var j int
|
||||||
|
for i, count := range counts {
|
||||||
|
h.counts[j] += count
|
||||||
|
if buckets[i+1] == h.buckets[j+1] {
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if h.hasSum {
|
||||||
|
h.sum = sum
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *batchHistogram) Desc() *Desc {
|
||||||
|
return h.desc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *batchHistogram) Write(out *dto.Metric) error {
|
||||||
|
h.mu.Lock()
|
||||||
|
defer h.mu.Unlock()
|
||||||
|
|
||||||
|
sum := float64(0)
|
||||||
|
if h.hasSum {
|
||||||
|
sum = h.sum
|
||||||
|
}
|
||||||
|
dtoBuckets := make([]*dto.Bucket, 0, len(h.counts))
|
||||||
|
totalCount := uint64(0)
|
||||||
|
for i, count := range h.counts {
|
||||||
|
totalCount += count
|
||||||
|
if !h.hasSum {
|
||||||
|
if count != 0 {
|
||||||
|
// N.B. This computed sum is an underestimate.
|
||||||
|
sum += h.buckets[i] * float64(count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip the +Inf bucket, but only for the bucket list.
|
||||||
|
// It must still count for sum and totalCount.
|
||||||
|
if math.IsInf(h.buckets[i+1], 1) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Float64Histogram's upper bound is exclusive, so make it inclusive
|
||||||
|
// by obtaining the next float64 value down, in order.
|
||||||
|
upperBound := math.Nextafter(h.buckets[i+1], h.buckets[i])
|
||||||
|
dtoBuckets = append(dtoBuckets, &dto.Bucket{
|
||||||
|
CumulativeCount: proto.Uint64(totalCount),
|
||||||
|
UpperBound: proto.Float64(upperBound),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
out.Histogram = &dto.Histogram{
|
||||||
|
Bucket: dtoBuckets,
|
||||||
|
SampleCount: proto.Uint64(totalCount),
|
||||||
|
SampleSum: proto.Float64(sum),
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
1097
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
1097
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
File diff suppressed because it is too large
Load Diff
60
vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go
generated
vendored
Normal file
60
vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go
generated
vendored
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
// Copyright (c) 2015 Björn Rabenstein
|
||||||
|
//
|
||||||
|
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
// of this software and associated documentation files (the "Software"), to deal
|
||||||
|
// in the Software without restriction, including without limitation the rights
|
||||||
|
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
// copies of the Software, and to permit persons to whom the Software is
|
||||||
|
// furnished to do so, subject to the following conditions:
|
||||||
|
//
|
||||||
|
// The above copyright notice and this permission notice shall be included in all
|
||||||
|
// copies or substantial portions of the Software.
|
||||||
|
//
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
// SOFTWARE.
|
||||||
|
//
|
||||||
|
// The code in this package is copy/paste to avoid a dependency. Hence this file
|
||||||
|
// carries the copyright of the original repo.
|
||||||
|
// https://github.com/beorn7/floats
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
)
|
||||||
|
|
||||||
|
// minNormalFloat64 is the smallest positive normal value of type float64.
|
||||||
|
var minNormalFloat64 = math.Float64frombits(0x0010000000000000)
|
||||||
|
|
||||||
|
// AlmostEqualFloat64 returns true if a and b are equal within a relative error
|
||||||
|
// of epsilon. See http://floating-point-gui.de/errors/comparison/ for the
|
||||||
|
// details of the applied method.
|
||||||
|
func AlmostEqualFloat64(a, b, epsilon float64) bool {
|
||||||
|
if a == b {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
absA := math.Abs(a)
|
||||||
|
absB := math.Abs(b)
|
||||||
|
diff := math.Abs(a - b)
|
||||||
|
if a == 0 || b == 0 || absA+absB < minNormalFloat64 {
|
||||||
|
return diff < epsilon*minNormalFloat64
|
||||||
|
}
|
||||||
|
return diff/math.Min(absA+absB, math.MaxFloat64) < epsilon
|
||||||
|
}
|
||||||
|
|
||||||
|
// AlmostEqualFloat64s is the slice form of AlmostEqualFloat64.
|
||||||
|
func AlmostEqualFloat64s(a, b []float64, epsilon float64) bool {
|
||||||
|
if len(a) != len(b) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i := range a {
|
||||||
|
if !AlmostEqualFloat64(a[i], b[i], epsilon) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
654
vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
generated
vendored
Normal file
654
vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
generated
vendored
Normal file
@@ -0,0 +1,654 @@
|
|||||||
|
// Copyright 2022 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
//
|
||||||
|
// It provides tools to compare sequences of strings and generate textual diffs.
|
||||||
|
//
|
||||||
|
// Maintaining `GetUnifiedDiffString` here because original repository
|
||||||
|
// (https://github.com/pmezard/go-difflib) is no longer maintained.
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func min(a, b int) int {
|
||||||
|
if a < b {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func max(a, b int) int {
|
||||||
|
if a > b {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func calculateRatio(matches, length int) float64 {
|
||||||
|
if length > 0 {
|
||||||
|
return 2.0 * float64(matches) / float64(length)
|
||||||
|
}
|
||||||
|
return 1.0
|
||||||
|
}
|
||||||
|
|
||||||
|
type Match struct {
|
||||||
|
A int
|
||||||
|
B int
|
||||||
|
Size int
|
||||||
|
}
|
||||||
|
|
||||||
|
type OpCode struct {
|
||||||
|
Tag byte
|
||||||
|
I1 int
|
||||||
|
I2 int
|
||||||
|
J1 int
|
||||||
|
J2 int
|
||||||
|
}
|
||||||
|
|
||||||
|
// SequenceMatcher compares sequence of strings. The basic
|
||||||
|
// algorithm predates, and is a little fancier than, an algorithm
|
||||||
|
// published in the late 1980's by Ratcliff and Obershelp under the
|
||||||
|
// hyperbolic name "gestalt pattern matching". The basic idea is to find
|
||||||
|
// the longest contiguous matching subsequence that contains no "junk"
|
||||||
|
// elements (R-O doesn't address junk). The same idea is then applied
|
||||||
|
// recursively to the pieces of the sequences to the left and to the right
|
||||||
|
// of the matching subsequence. This does not yield minimal edit
|
||||||
|
// sequences, but does tend to yield matches that "look right" to people.
|
||||||
|
//
|
||||||
|
// SequenceMatcher tries to compute a "human-friendly diff" between two
|
||||||
|
// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
|
||||||
|
// longest *contiguous* & junk-free matching subsequence. That's what
|
||||||
|
// catches peoples' eyes. The Windows(tm) windiff has another interesting
|
||||||
|
// notion, pairing up elements that appear uniquely in each sequence.
|
||||||
|
// That, and the method here, appear to yield more intuitive difference
|
||||||
|
// reports than does diff. This method appears to be the least vulnerable
|
||||||
|
// to synching up on blocks of "junk lines", though (like blank lines in
|
||||||
|
// ordinary text files, or maybe "<P>" lines in HTML files). That may be
|
||||||
|
// because this is the only method of the 3 that has a *concept* of
|
||||||
|
// "junk" <wink>.
|
||||||
|
//
|
||||||
|
// Timing: Basic R-O is cubic time worst case and quadratic time expected
|
||||||
|
// case. SequenceMatcher is quadratic time for the worst case and has
|
||||||
|
// expected-case behavior dependent in a complicated way on how many
|
||||||
|
// elements the sequences have in common; best case time is linear.
|
||||||
|
type SequenceMatcher struct {
|
||||||
|
a []string
|
||||||
|
b []string
|
||||||
|
b2j map[string][]int
|
||||||
|
IsJunk func(string) bool
|
||||||
|
autoJunk bool
|
||||||
|
bJunk map[string]struct{}
|
||||||
|
matchingBlocks []Match
|
||||||
|
fullBCount map[string]int
|
||||||
|
bPopular map[string]struct{}
|
||||||
|
opCodes []OpCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMatcher(a, b []string) *SequenceMatcher {
|
||||||
|
m := SequenceMatcher{autoJunk: true}
|
||||||
|
m.SetSeqs(a, b)
|
||||||
|
return &m
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMatcherWithJunk(a, b []string, autoJunk bool,
|
||||||
|
isJunk func(string) bool,
|
||||||
|
) *SequenceMatcher {
|
||||||
|
m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
|
||||||
|
m.SetSeqs(a, b)
|
||||||
|
return &m
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set two sequences to be compared.
|
||||||
|
func (m *SequenceMatcher) SetSeqs(a, b []string) {
|
||||||
|
m.SetSeq1(a)
|
||||||
|
m.SetSeq2(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the first sequence to be compared. The second sequence to be compared is
|
||||||
|
// not changed.
|
||||||
|
//
|
||||||
|
// SequenceMatcher computes and caches detailed information about the second
|
||||||
|
// sequence, so if you want to compare one sequence S against many sequences,
|
||||||
|
// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
|
||||||
|
// sequences.
|
||||||
|
//
|
||||||
|
// See also SetSeqs() and SetSeq2().
|
||||||
|
func (m *SequenceMatcher) SetSeq1(a []string) {
|
||||||
|
if &a == &m.a {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m.a = a
|
||||||
|
m.matchingBlocks = nil
|
||||||
|
m.opCodes = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the second sequence to be compared. The first sequence to be compared is
|
||||||
|
// not changed.
|
||||||
|
func (m *SequenceMatcher) SetSeq2(b []string) {
|
||||||
|
if &b == &m.b {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m.b = b
|
||||||
|
m.matchingBlocks = nil
|
||||||
|
m.opCodes = nil
|
||||||
|
m.fullBCount = nil
|
||||||
|
m.chainB()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SequenceMatcher) chainB() {
|
||||||
|
// Populate line -> index mapping
|
||||||
|
b2j := map[string][]int{}
|
||||||
|
for i, s := range m.b {
|
||||||
|
indices := b2j[s]
|
||||||
|
indices = append(indices, i)
|
||||||
|
b2j[s] = indices
|
||||||
|
}
|
||||||
|
|
||||||
|
// Purge junk elements
|
||||||
|
m.bJunk = map[string]struct{}{}
|
||||||
|
if m.IsJunk != nil {
|
||||||
|
junk := m.bJunk
|
||||||
|
for s := range b2j {
|
||||||
|
if m.IsJunk(s) {
|
||||||
|
junk[s] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for s := range junk {
|
||||||
|
delete(b2j, s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Purge remaining popular elements
|
||||||
|
popular := map[string]struct{}{}
|
||||||
|
n := len(m.b)
|
||||||
|
if m.autoJunk && n >= 200 {
|
||||||
|
ntest := n/100 + 1
|
||||||
|
for s, indices := range b2j {
|
||||||
|
if len(indices) > ntest {
|
||||||
|
popular[s] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for s := range popular {
|
||||||
|
delete(b2j, s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.bPopular = popular
|
||||||
|
m.b2j = b2j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SequenceMatcher) isBJunk(s string) bool {
|
||||||
|
_, ok := m.bJunk[s]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find longest matching block in a[alo:ahi] and b[blo:bhi].
|
||||||
|
//
|
||||||
|
// If IsJunk is not defined:
|
||||||
|
//
|
||||||
|
// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
|
||||||
|
//
|
||||||
|
// alo <= i <= i+k <= ahi
|
||||||
|
// blo <= j <= j+k <= bhi
|
||||||
|
//
|
||||||
|
// and for all (i',j',k') meeting those conditions,
|
||||||
|
//
|
||||||
|
// k >= k'
|
||||||
|
// i <= i'
|
||||||
|
// and if i == i', j <= j'
|
||||||
|
//
|
||||||
|
// In other words, of all maximal matching blocks, return one that
|
||||||
|
// starts earliest in a, and of all those maximal matching blocks that
|
||||||
|
// start earliest in a, return the one that starts earliest in b.
|
||||||
|
//
|
||||||
|
// If IsJunk is defined, first the longest matching block is
|
||||||
|
// determined as above, but with the additional restriction that no
|
||||||
|
// junk element appears in the block. Then that block is extended as
|
||||||
|
// far as possible by matching (only) junk elements on both sides. So
|
||||||
|
// the resulting block never matches on junk except as identical junk
|
||||||
|
// happens to be adjacent to an "interesting" match.
|
||||||
|
//
|
||||||
|
// If no blocks match, return (alo, blo, 0).
|
||||||
|
func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
|
||||||
|
// CAUTION: stripping common prefix or suffix would be incorrect.
|
||||||
|
// E.g.,
|
||||||
|
// ab
|
||||||
|
// acab
|
||||||
|
// Longest matching block is "ab", but if common prefix is
|
||||||
|
// stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
|
||||||
|
// strip, so ends up claiming that ab is changed to acab by
|
||||||
|
// inserting "ca" in the middle. That's minimal but unintuitive:
|
||||||
|
// "it's obvious" that someone inserted "ac" at the front.
|
||||||
|
// Windiff ends up at the same place as diff, but by pairing up
|
||||||
|
// the unique 'b's and then matching the first two 'a's.
|
||||||
|
besti, bestj, bestsize := alo, blo, 0
|
||||||
|
|
||||||
|
// find longest junk-free match
|
||||||
|
// during an iteration of the loop, j2len[j] = length of longest
|
||||||
|
// junk-free match ending with a[i-1] and b[j]
|
||||||
|
j2len := map[int]int{}
|
||||||
|
for i := alo; i != ahi; i++ {
|
||||||
|
// look at all instances of a[i] in b; note that because
|
||||||
|
// b2j has no junk keys, the loop is skipped if a[i] is junk
|
||||||
|
newj2len := map[int]int{}
|
||||||
|
for _, j := range m.b2j[m.a[i]] {
|
||||||
|
// a[i] matches b[j]
|
||||||
|
if j < blo {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if j >= bhi {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
k := j2len[j-1] + 1
|
||||||
|
newj2len[j] = k
|
||||||
|
if k > bestsize {
|
||||||
|
besti, bestj, bestsize = i-k+1, j-k+1, k
|
||||||
|
}
|
||||||
|
}
|
||||||
|
j2len = newj2len
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extend the best by non-junk elements on each end. In particular,
|
||||||
|
// "popular" non-junk elements aren't in b2j, which greatly speeds
|
||||||
|
// the inner loop above, but also means "the best" match so far
|
||||||
|
// doesn't contain any junk *or* popular non-junk elements.
|
||||||
|
for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
|
||||||
|
m.a[besti-1] == m.b[bestj-1] {
|
||||||
|
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
|
||||||
|
}
|
||||||
|
for besti+bestsize < ahi && bestj+bestsize < bhi &&
|
||||||
|
!m.isBJunk(m.b[bestj+bestsize]) &&
|
||||||
|
m.a[besti+bestsize] == m.b[bestj+bestsize] {
|
||||||
|
bestsize++
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now that we have a wholly interesting match (albeit possibly
|
||||||
|
// empty!), we may as well suck up the matching junk on each
|
||||||
|
// side of it too. Can't think of a good reason not to, and it
|
||||||
|
// saves post-processing the (possibly considerable) expense of
|
||||||
|
// figuring out what to do with it. In the case of an empty
|
||||||
|
// interesting match, this is clearly the right thing to do,
|
||||||
|
// because no other kind of match is possible in the regions.
|
||||||
|
for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
|
||||||
|
m.a[besti-1] == m.b[bestj-1] {
|
||||||
|
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
|
||||||
|
}
|
||||||
|
for besti+bestsize < ahi && bestj+bestsize < bhi &&
|
||||||
|
m.isBJunk(m.b[bestj+bestsize]) &&
|
||||||
|
m.a[besti+bestsize] == m.b[bestj+bestsize] {
|
||||||
|
bestsize++
|
||||||
|
}
|
||||||
|
|
||||||
|
return Match{A: besti, B: bestj, Size: bestsize}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return list of triples describing matching subsequences.
|
||||||
|
//
|
||||||
|
// Each triple is of the form (i, j, n), and means that
|
||||||
|
// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
|
||||||
|
// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
|
||||||
|
// adjacent triples in the list, and the second is not the last triple in the
|
||||||
|
// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
|
||||||
|
// adjacent equal blocks.
|
||||||
|
//
|
||||||
|
// The last triple is a dummy, (len(a), len(b), 0), and is the only
|
||||||
|
// triple with n==0.
|
||||||
|
func (m *SequenceMatcher) GetMatchingBlocks() []Match {
|
||||||
|
if m.matchingBlocks != nil {
|
||||||
|
return m.matchingBlocks
|
||||||
|
}
|
||||||
|
|
||||||
|
var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
|
||||||
|
matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
|
||||||
|
match := m.findLongestMatch(alo, ahi, blo, bhi)
|
||||||
|
i, j, k := match.A, match.B, match.Size
|
||||||
|
if match.Size > 0 {
|
||||||
|
if alo < i && blo < j {
|
||||||
|
matched = matchBlocks(alo, i, blo, j, matched)
|
||||||
|
}
|
||||||
|
matched = append(matched, match)
|
||||||
|
if i+k < ahi && j+k < bhi {
|
||||||
|
matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return matched
|
||||||
|
}
|
||||||
|
matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
|
||||||
|
|
||||||
|
// It's possible that we have adjacent equal blocks in the
|
||||||
|
// matching_blocks list now.
|
||||||
|
nonAdjacent := []Match{}
|
||||||
|
i1, j1, k1 := 0, 0, 0
|
||||||
|
for _, b := range matched {
|
||||||
|
// Is this block adjacent to i1, j1, k1?
|
||||||
|
i2, j2, k2 := b.A, b.B, b.Size
|
||||||
|
if i1+k1 == i2 && j1+k1 == j2 {
|
||||||
|
// Yes, so collapse them -- this just increases the length of
|
||||||
|
// the first block by the length of the second, and the first
|
||||||
|
// block so lengthened remains the block to compare against.
|
||||||
|
k1 += k2
|
||||||
|
} else {
|
||||||
|
// Not adjacent. Remember the first block (k1==0 means it's
|
||||||
|
// the dummy we started with), and make the second block the
|
||||||
|
// new block to compare against.
|
||||||
|
if k1 > 0 {
|
||||||
|
nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
|
||||||
|
}
|
||||||
|
i1, j1, k1 = i2, j2, k2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if k1 > 0 {
|
||||||
|
nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
|
||||||
|
}
|
||||||
|
|
||||||
|
nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
|
||||||
|
m.matchingBlocks = nonAdjacent
|
||||||
|
return m.matchingBlocks
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return list of 5-tuples describing how to turn a into b.
|
||||||
|
//
|
||||||
|
// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
|
||||||
|
// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
|
||||||
|
// tuple preceding it, and likewise for j1 == the previous j2.
|
||||||
|
//
|
||||||
|
// The tags are characters, with these meanings:
|
||||||
|
//
|
||||||
|
// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2]
|
||||||
|
//
|
||||||
|
// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case.
|
||||||
|
//
|
||||||
|
// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
|
||||||
|
//
|
||||||
|
// 'e' (equal): a[i1:i2] == b[j1:j2]
|
||||||
|
func (m *SequenceMatcher) GetOpCodes() []OpCode {
|
||||||
|
if m.opCodes != nil {
|
||||||
|
return m.opCodes
|
||||||
|
}
|
||||||
|
i, j := 0, 0
|
||||||
|
matching := m.GetMatchingBlocks()
|
||||||
|
opCodes := make([]OpCode, 0, len(matching))
|
||||||
|
for _, m := range matching {
|
||||||
|
// invariant: we've pumped out correct diffs to change
|
||||||
|
// a[:i] into b[:j], and the next matching block is
|
||||||
|
// a[ai:ai+size] == b[bj:bj+size]. So we need to pump
|
||||||
|
// out a diff to change a[i:ai] into b[j:bj], pump out
|
||||||
|
// the matching block, and move (i,j) beyond the match
|
||||||
|
ai, bj, size := m.A, m.B, m.Size
|
||||||
|
tag := byte(0)
|
||||||
|
if i < ai && j < bj {
|
||||||
|
tag = 'r'
|
||||||
|
} else if i < ai {
|
||||||
|
tag = 'd'
|
||||||
|
} else if j < bj {
|
||||||
|
tag = 'i'
|
||||||
|
}
|
||||||
|
if tag > 0 {
|
||||||
|
opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
|
||||||
|
}
|
||||||
|
i, j = ai+size, bj+size
|
||||||
|
// the list of matching blocks is terminated by a
|
||||||
|
// sentinel with size 0
|
||||||
|
if size > 0 {
|
||||||
|
opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.opCodes = opCodes
|
||||||
|
return m.opCodes
|
||||||
|
}
|
||||||
|
|
||||||
|
// Isolate change clusters by eliminating ranges with no changes.
|
||||||
|
//
|
||||||
|
// Return a generator of groups with up to n lines of context.
|
||||||
|
// Each group is in the same format as returned by GetOpCodes().
|
||||||
|
func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
|
||||||
|
if n < 0 {
|
||||||
|
n = 3
|
||||||
|
}
|
||||||
|
codes := m.GetOpCodes()
|
||||||
|
if len(codes) == 0 {
|
||||||
|
codes = []OpCode{{'e', 0, 1, 0, 1}}
|
||||||
|
}
|
||||||
|
// Fixup leading and trailing groups if they show no changes.
|
||||||
|
if codes[0].Tag == 'e' {
|
||||||
|
c := codes[0]
|
||||||
|
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||||
|
codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
|
||||||
|
}
|
||||||
|
if codes[len(codes)-1].Tag == 'e' {
|
||||||
|
c := codes[len(codes)-1]
|
||||||
|
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||||
|
codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
|
||||||
|
}
|
||||||
|
nn := n + n
|
||||||
|
groups := [][]OpCode{}
|
||||||
|
group := []OpCode{}
|
||||||
|
for _, c := range codes {
|
||||||
|
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||||
|
// End the current group and start a new one whenever
|
||||||
|
// there is a large range with no changes.
|
||||||
|
if c.Tag == 'e' && i2-i1 > nn {
|
||||||
|
group = append(group, OpCode{
|
||||||
|
c.Tag, i1, min(i2, i1+n),
|
||||||
|
j1, min(j2, j1+n),
|
||||||
|
})
|
||||||
|
groups = append(groups, group)
|
||||||
|
group = []OpCode{}
|
||||||
|
i1, j1 = max(i1, i2-n), max(j1, j2-n)
|
||||||
|
}
|
||||||
|
group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
|
||||||
|
}
|
||||||
|
if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
|
||||||
|
groups = append(groups, group)
|
||||||
|
}
|
||||||
|
return groups
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return a measure of the sequences' similarity (float in [0,1]).
|
||||||
|
//
|
||||||
|
// Where T is the total number of elements in both sequences, and
|
||||||
|
// M is the number of matches, this is 2.0*M / T.
|
||||||
|
// Note that this is 1 if the sequences are identical, and 0 if
|
||||||
|
// they have nothing in common.
|
||||||
|
//
|
||||||
|
// .Ratio() is expensive to compute if you haven't already computed
|
||||||
|
// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
|
||||||
|
// want to try .QuickRatio() or .RealQuickRation() first to get an
|
||||||
|
// upper bound.
|
||||||
|
func (m *SequenceMatcher) Ratio() float64 {
|
||||||
|
matches := 0
|
||||||
|
for _, m := range m.GetMatchingBlocks() {
|
||||||
|
matches += m.Size
|
||||||
|
}
|
||||||
|
return calculateRatio(matches, len(m.a)+len(m.b))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return an upper bound on ratio() relatively quickly.
|
||||||
|
//
|
||||||
|
// This isn't defined beyond that it is an upper bound on .Ratio(), and
|
||||||
|
// is faster to compute.
|
||||||
|
func (m *SequenceMatcher) QuickRatio() float64 {
|
||||||
|
// viewing a and b as multisets, set matches to the cardinality
|
||||||
|
// of their intersection; this counts the number of matches
|
||||||
|
// without regard to order, so is clearly an upper bound
|
||||||
|
if m.fullBCount == nil {
|
||||||
|
m.fullBCount = map[string]int{}
|
||||||
|
for _, s := range m.b {
|
||||||
|
m.fullBCount[s]++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// avail[x] is the number of times x appears in 'b' less the
|
||||||
|
// number of times we've seen it in 'a' so far ... kinda
|
||||||
|
avail := map[string]int{}
|
||||||
|
matches := 0
|
||||||
|
for _, s := range m.a {
|
||||||
|
n, ok := avail[s]
|
||||||
|
if !ok {
|
||||||
|
n = m.fullBCount[s]
|
||||||
|
}
|
||||||
|
avail[s] = n - 1
|
||||||
|
if n > 0 {
|
||||||
|
matches++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return calculateRatio(matches, len(m.a)+len(m.b))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return an upper bound on ratio() very quickly.
|
||||||
|
//
|
||||||
|
// This isn't defined beyond that it is an upper bound on .Ratio(), and
|
||||||
|
// is faster to compute than either .Ratio() or .QuickRatio().
|
||||||
|
func (m *SequenceMatcher) RealQuickRatio() float64 {
|
||||||
|
la, lb := len(m.a), len(m.b)
|
||||||
|
return calculateRatio(min(la, lb), la+lb)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert range to the "ed" format
|
||||||
|
func formatRangeUnified(start, stop int) string {
|
||||||
|
// Per the diff spec at http://www.unix.org/single_unix_specification/
|
||||||
|
beginning := start + 1 // lines start numbering with one
|
||||||
|
length := stop - start
|
||||||
|
if length == 1 {
|
||||||
|
return fmt.Sprintf("%d", beginning)
|
||||||
|
}
|
||||||
|
if length == 0 {
|
||||||
|
beginning-- // empty ranges begin at line just before the range
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%d,%d", beginning, length)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unified diff parameters
|
||||||
|
type UnifiedDiff struct {
|
||||||
|
A []string // First sequence lines
|
||||||
|
FromFile string // First file name
|
||||||
|
FromDate string // First file time
|
||||||
|
B []string // Second sequence lines
|
||||||
|
ToFile string // Second file name
|
||||||
|
ToDate string // Second file time
|
||||||
|
Eol string // Headers end of line, defaults to LF
|
||||||
|
Context int // Number of context lines
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compare two sequences of lines; generate the delta as a unified diff.
|
||||||
|
//
|
||||||
|
// Unified diffs are a compact way of showing line changes and a few
|
||||||
|
// lines of context. The number of context lines is set by 'n' which
|
||||||
|
// defaults to three.
|
||||||
|
//
|
||||||
|
// By default, the diff control lines (those with ---, +++, or @@) are
|
||||||
|
// created with a trailing newline. This is helpful so that inputs
|
||||||
|
// created from file.readlines() result in diffs that are suitable for
|
||||||
|
// file.writelines() since both the inputs and outputs have trailing
|
||||||
|
// newlines.
|
||||||
|
//
|
||||||
|
// For inputs that do not have trailing newlines, set the lineterm
|
||||||
|
// argument to "" so that the output will be uniformly newline free.
|
||||||
|
//
|
||||||
|
// The unidiff format normally has a header for filenames and modification
|
||||||
|
// times. Any or all of these may be specified using strings for
|
||||||
|
// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
|
||||||
|
// The modification times are normally expressed in the ISO 8601 format.
|
||||||
|
func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
|
||||||
|
buf := bufio.NewWriter(writer)
|
||||||
|
defer buf.Flush()
|
||||||
|
wf := func(format string, args ...interface{}) error {
|
||||||
|
_, err := buf.WriteString(fmt.Sprintf(format, args...))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ws := func(s string) error {
|
||||||
|
_, err := buf.WriteString(s)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(diff.Eol) == 0 {
|
||||||
|
diff.Eol = "\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
started := false
|
||||||
|
m := NewMatcher(diff.A, diff.B)
|
||||||
|
for _, g := range m.GetGroupedOpCodes(diff.Context) {
|
||||||
|
if !started {
|
||||||
|
started = true
|
||||||
|
fromDate := ""
|
||||||
|
if len(diff.FromDate) > 0 {
|
||||||
|
fromDate = "\t" + diff.FromDate
|
||||||
|
}
|
||||||
|
toDate := ""
|
||||||
|
if len(diff.ToDate) > 0 {
|
||||||
|
toDate = "\t" + diff.ToDate
|
||||||
|
}
|
||||||
|
if diff.FromFile != "" || diff.ToFile != "" {
|
||||||
|
err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
first, last := g[0], g[len(g)-1]
|
||||||
|
range1 := formatRangeUnified(first.I1, last.I2)
|
||||||
|
range2 := formatRangeUnified(first.J1, last.J2)
|
||||||
|
if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, c := range g {
|
||||||
|
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||||
|
if c.Tag == 'e' {
|
||||||
|
for _, line := range diff.A[i1:i2] {
|
||||||
|
if err := ws(" " + line); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if c.Tag == 'r' || c.Tag == 'd' {
|
||||||
|
for _, line := range diff.A[i1:i2] {
|
||||||
|
if err := ws("-" + line); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if c.Tag == 'r' || c.Tag == 'i' {
|
||||||
|
for _, line := range diff.B[j1:j2] {
|
||||||
|
if err := ws("+" + line); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Like WriteUnifiedDiff but returns the diff a string.
|
||||||
|
func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
|
||||||
|
w := &bytes.Buffer{}
|
||||||
|
err := WriteUnifiedDiff(w, diff)
|
||||||
|
return w.String(), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Split a string on "\n" while preserving them. The output can be used
|
||||||
|
// as input for UnifiedDiff and ContextDiff structures.
|
||||||
|
func SplitLines(s string) []string {
|
||||||
|
lines := strings.SplitAfter(s, "\n")
|
||||||
|
lines[len(lines)-1] += "\n"
|
||||||
|
return lines
|
||||||
|
}
|
||||||
32
vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
generated
vendored
Normal file
32
vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
// Copyright 2021 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import "regexp"
|
||||||
|
|
||||||
|
type GoCollectorRule struct {
|
||||||
|
Matcher *regexp.Regexp
|
||||||
|
Deny bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// GoCollectorOptions should not be used be directly by anything, except `collectors` package.
|
||||||
|
// Use it via collectors package instead. See issue
|
||||||
|
// https://github.com/prometheus/client_golang/issues/1030.
|
||||||
|
//
|
||||||
|
// This is internal, so external users only can use it via `collector.WithGoCollector*` methods
|
||||||
|
type GoCollectorOptions struct {
|
||||||
|
DisableMemStatsLikeMetrics bool
|
||||||
|
RuntimeMetricSumForHist map[string]string
|
||||||
|
RuntimeMetricRules []GoCollectorRule
|
||||||
|
}
|
||||||
142
vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
generated
vendored
Normal file
142
vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
generated
vendored
Normal file
@@ -0,0 +1,142 @@
|
|||||||
|
// Copyright 2021 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build go1.17
|
||||||
|
// +build go1.17
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"path"
|
||||||
|
"runtime/metrics"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RuntimeMetricsToProm produces a Prometheus metric name from a runtime/metrics
|
||||||
|
// metric description and validates whether the metric is suitable for integration
|
||||||
|
// with Prometheus.
|
||||||
|
//
|
||||||
|
// Returns false if a name could not be produced, or if Prometheus does not understand
|
||||||
|
// the runtime/metrics Kind.
|
||||||
|
//
|
||||||
|
// Note that the main reason a name couldn't be produced is if the runtime/metrics
|
||||||
|
// package exports a name with characters outside the valid Prometheus metric name
|
||||||
|
// character set. This is theoretically possible, but should never happen in practice.
|
||||||
|
// Still, don't rely on it.
|
||||||
|
func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) {
|
||||||
|
namespace := "go"
|
||||||
|
|
||||||
|
comp := strings.SplitN(d.Name, ":", 2)
|
||||||
|
key := comp[0]
|
||||||
|
unit := comp[1]
|
||||||
|
|
||||||
|
// The last path element in the key is the name,
|
||||||
|
// the rest is the subsystem.
|
||||||
|
subsystem := path.Dir(key[1:] /* remove leading / */)
|
||||||
|
name := path.Base(key)
|
||||||
|
|
||||||
|
// subsystem is translated by replacing all / and - with _.
|
||||||
|
subsystem = strings.ReplaceAll(subsystem, "/", "_")
|
||||||
|
subsystem = strings.ReplaceAll(subsystem, "-", "_")
|
||||||
|
|
||||||
|
// unit is translated assuming that the unit contains no
|
||||||
|
// non-ASCII characters.
|
||||||
|
unit = strings.ReplaceAll(unit, "-", "_")
|
||||||
|
unit = strings.ReplaceAll(unit, "*", "_")
|
||||||
|
unit = strings.ReplaceAll(unit, "/", "_per_")
|
||||||
|
|
||||||
|
// name has - replaced with _ and is concatenated with the unit and
|
||||||
|
// other data.
|
||||||
|
name = strings.ReplaceAll(name, "-", "_")
|
||||||
|
name += "_" + unit
|
||||||
|
if d.Cumulative && d.Kind != metrics.KindFloat64Histogram {
|
||||||
|
name += "_total"
|
||||||
|
}
|
||||||
|
|
||||||
|
valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name))
|
||||||
|
switch d.Kind {
|
||||||
|
case metrics.KindUint64:
|
||||||
|
case metrics.KindFloat64:
|
||||||
|
case metrics.KindFloat64Histogram:
|
||||||
|
default:
|
||||||
|
valid = false
|
||||||
|
}
|
||||||
|
return namespace, subsystem, name, valid
|
||||||
|
}
|
||||||
|
|
||||||
|
// RuntimeMetricsBucketsForUnit takes a set of buckets obtained for a runtime/metrics histogram
|
||||||
|
// type (so, lower-bound inclusive) and a unit from a runtime/metrics name, and produces
|
||||||
|
// a reduced set of buckets. This function always removes any -Inf bucket as it's represented
|
||||||
|
// as the bottom-most upper-bound inclusive bucket in Prometheus.
|
||||||
|
func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 {
|
||||||
|
switch unit {
|
||||||
|
case "bytes":
|
||||||
|
// Re-bucket as powers of 2.
|
||||||
|
return reBucketExp(buckets, 2)
|
||||||
|
case "seconds":
|
||||||
|
// Re-bucket as powers of 10 and then merge all buckets greater
|
||||||
|
// than 1 second into the +Inf bucket.
|
||||||
|
b := reBucketExp(buckets, 10)
|
||||||
|
for i := range b {
|
||||||
|
if b[i] <= 1 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
b[i] = math.Inf(1)
|
||||||
|
b = b[:i+1]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
return buckets
|
||||||
|
}
|
||||||
|
|
||||||
|
// reBucketExp takes a list of bucket boundaries (lower bound inclusive) and
|
||||||
|
// downsamples the buckets to those a multiple of base apart. The end result
|
||||||
|
// is a roughly exponential (in many cases, perfectly exponential) bucketing
|
||||||
|
// scheme.
|
||||||
|
func reBucketExp(buckets []float64, base float64) []float64 {
|
||||||
|
bucket := buckets[0]
|
||||||
|
var newBuckets []float64
|
||||||
|
// We may see a -Inf here, in which case, add it and skip it
|
||||||
|
// since we risk producing NaNs otherwise.
|
||||||
|
//
|
||||||
|
// We need to preserve -Inf values to maintain runtime/metrics
|
||||||
|
// conventions. We'll strip it out later.
|
||||||
|
if bucket == math.Inf(-1) {
|
||||||
|
newBuckets = append(newBuckets, bucket)
|
||||||
|
buckets = buckets[1:]
|
||||||
|
bucket = buckets[0]
|
||||||
|
}
|
||||||
|
// From now on, bucket should always have a non-Inf value because
|
||||||
|
// Infs are only ever at the ends of the bucket lists, so
|
||||||
|
// arithmetic operations on it are non-NaN.
|
||||||
|
for i := 1; i < len(buckets); i++ {
|
||||||
|
if bucket >= 0 && buckets[i] < bucket*base {
|
||||||
|
// The next bucket we want to include is at least bucket*base.
|
||||||
|
continue
|
||||||
|
} else if bucket < 0 && buckets[i] < bucket/base {
|
||||||
|
// In this case the bucket we're targeting is negative, and since
|
||||||
|
// we're ascending through buckets here, we need to divide to get
|
||||||
|
// closer to zero exponentially.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// The +Inf bucket will always be the last one, and we'll always
|
||||||
|
// end up including it here because bucket
|
||||||
|
newBuckets = append(newBuckets, bucket)
|
||||||
|
bucket = buckets[i]
|
||||||
|
}
|
||||||
|
return append(newBuckets, bucket)
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user