Log extra environment information (#95)
* Make metrics tests use a host port * fix port forwarding in devconfig tests * Log extra environment information at startup
This commit is contained in:
committed by
Stephen Marshall
parent
6996f2b9b3
commit
9f3032f014
60
NOTICES.txt
60
NOTICES.txt
@@ -20,6 +20,14 @@ Affected Components:
|
||||
* golang.org/x/sys
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
B.2 MIT license
|
||||
* github.com/genuinetools/amicontained
|
||||
Copyright (c) 2018 The Genuinetools Authors
|
||||
|
||||
B.3 BSD 2-Clause license
|
||||
* github.com/syndtr/gocapability/
|
||||
Copyright 2013 Suryandaru Triandana <syndtr@gmail.com>
|
||||
|
||||
===============================================================================
|
||||
END OF A. SUMMARY
|
||||
===============================================================================
|
||||
@@ -58,6 +66,58 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
END OF B.1 BSD 3-Clause license
|
||||
==========================================================================
|
||||
|
||||
==========================================================================
|
||||
B.2 MIT license
|
||||
==========================================================================
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
==========================================================================
|
||||
END OF B.2 MIT license
|
||||
==========================================================================
|
||||
|
||||
==========================================================================
|
||||
B.3 BSD 2-Clause license
|
||||
==========================================================================
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
==========================================================================
|
||||
END OF B.3 BSD 2-Clause license
|
||||
==========================================================================
|
||||
|
||||
==========================================================================
|
||||
END OF B. LICENSE FILES AND OTHER INFORMATION
|
||||
==========================================================================
|
||||
|
||||
@@ -22,9 +22,18 @@ import (
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/ibm-messaging/mq-container/internal/capabilities"
|
||||
"github.com/genuinetools/amicontained/container"
|
||||
)
|
||||
|
||||
func logContainerRuntime() error {
|
||||
r, err := container.DetectRuntime()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Printf("Container runtime: %v", r)
|
||||
return nil
|
||||
}
|
||||
|
||||
func logBaseImage() error {
|
||||
buf, err := ioutil.ReadFile("/etc/os-release")
|
||||
if err != nil {
|
||||
@@ -35,7 +44,7 @@ func logBaseImage() error {
|
||||
if strings.HasPrefix(l, "PRETTY_NAME=") {
|
||||
words := strings.Split(l, "\"")
|
||||
if len(words) >= 2 {
|
||||
log.Printf("Base image detected: %v", words[1])
|
||||
log.Printf("Base image: %v", words[1])
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -46,20 +55,50 @@ func logBaseImage() error {
|
||||
func logUser() {
|
||||
u, err := user.Current()
|
||||
if err == nil {
|
||||
log.Printf("Running as user ID %v (%v) with primary group %v", u.Uid, u.Name, u.Gid)
|
||||
g, err := u.GroupIds()
|
||||
if err != nil {
|
||||
log.Printf("Running as user ID %v (%v) with primary group %v", u.Uid, u.Name, u.Gid)
|
||||
} else {
|
||||
// Look for the primary group in the list of group IDs
|
||||
for i, v := range g {
|
||||
if v == u.Gid {
|
||||
// Remove the element from the slice
|
||||
g = append(g[:i], g[i+1:]...)
|
||||
}
|
||||
}
|
||||
log.Printf("Running as user ID %v (%v) with primary group %v, and supplemental groups %v", u.Uid, u.Name, u.Gid, strings.Join(g, ","))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func logCapabilities() {
|
||||
status, err := readProc("/proc/1/status")
|
||||
// logCapabilities logs the Linux capabilities (e.g. setuid, setgid). See https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities
|
||||
func logCapabilities() error {
|
||||
caps, err := container.Capabilities()
|
||||
if err != nil {
|
||||
// Ignore
|
||||
return
|
||||
return err
|
||||
}
|
||||
caps, err := capabilities.DetectCapabilities(status)
|
||||
if err == nil {
|
||||
log.Printf("Detected capabilities: %v", strings.Join(caps, ","))
|
||||
for k, v := range caps {
|
||||
if len(v) > 0 {
|
||||
log.Printf("Capabilities (%s set): %v", strings.ToLower(k), strings.Join(v, ","))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// logSeccomp logs the seccomp enforcing mode, which affects which kernel calls can be made
|
||||
func logSeccomp() error {
|
||||
s, err := container.SeccompEnforcingMode()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Printf("seccomp enforcing mode: %v", s)
|
||||
return nil
|
||||
}
|
||||
|
||||
func logAppArmor() error {
|
||||
s := container.AppArmorProfile()
|
||||
log.Printf("AppArmor profile: %v", s)
|
||||
return nil
|
||||
}
|
||||
|
||||
func readProc(filename string) (value string, err error) {
|
||||
@@ -106,6 +145,7 @@ func logConfig() error {
|
||||
} else {
|
||||
log.Printf("Linux kernel version: %v", osr)
|
||||
}
|
||||
logContainerRuntime()
|
||||
logBaseImage()
|
||||
fileMax, err := readProc("/proc/sys/fs/file-max")
|
||||
if err != nil {
|
||||
@@ -115,6 +155,8 @@ func logConfig() error {
|
||||
}
|
||||
logUser()
|
||||
logCapabilities()
|
||||
logSeccomp()
|
||||
logAppArmor()
|
||||
err = readMounts()
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
12
glide.lock
generated
12
glide.lock
generated
@@ -1,10 +1,14 @@
|
||||
hash: f59997762f179f5e20cadd65e11fcaae47ca0c3f8dee580e77fa4a9ec9d2c7d1
|
||||
updated: 2018-05-25T11:36:22.237379254+01:00
|
||||
hash: cec2ce517533a66eb741a92073827f058d3f82c09ea63735fd658df18b24653a
|
||||
updated: 2018-05-30T15:39:50.39084+01:00
|
||||
imports:
|
||||
- name: github.com/beorn7/perks
|
||||
version: 3a771d992973f24aa725d07868b467d1ddfceafb
|
||||
subpackages:
|
||||
- quantile
|
||||
- name: github.com/genuinetools/amicontained
|
||||
version: fcae88544f0212fbb1e20699c41566655b68679b
|
||||
subpackages:
|
||||
- container
|
||||
- name: github.com/golang/protobuf
|
||||
version: 4bd1920723d7b7c925de087aa32e2187708897f7
|
||||
subpackages:
|
||||
@@ -38,6 +42,10 @@ imports:
|
||||
- internal/util
|
||||
- nfs
|
||||
- xfs
|
||||
- name: github.com/syndtr/gocapability
|
||||
version: 33e07d32887e1e06b7c025f27ce52f62c7990bc0
|
||||
subpackages:
|
||||
- capability
|
||||
- name: golang.org/x/sys
|
||||
version: 7a4fde3fda8ef580a89dbae8138c26041be14299
|
||||
subpackages:
|
||||
|
||||
@@ -21,5 +21,8 @@ excludeDirs:
|
||||
import:
|
||||
- package: golang.org/x/sys/unix
|
||||
- package: github.com/prometheus/client_golang
|
||||
version: 0.8.0
|
||||
- package: github.com/ibm-messaging/mq-golang
|
||||
version: dev
|
||||
- package: github.com/genuinetools/amicontained
|
||||
version: 0.4.0
|
||||
@@ -1,158 +0,0 @@
|
||||
/*
|
||||
© Copyright IBM Corporation 2017, 2018
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package capabilities allows querying of information on Linux capabilities
|
||||
package capabilities
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// DetectCapabilities determines Linux capabilities, based on the contents of a Linux "status" file.
|
||||
// For example, the contents of file `/proc/1/status`
|
||||
func DetectCapabilities(status string) ([]string, error) {
|
||||
lines := strings.Split(status, "\n")
|
||||
for _, line := range lines {
|
||||
if strings.HasPrefix(strings.TrimSpace(line), "CapPrm:") {
|
||||
words := strings.Fields(line)
|
||||
cap, err := strconv.ParseUint(words[1], 16, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return getCapabilities(cap), nil
|
||||
}
|
||||
}
|
||||
return nil, errors.New("Unable to detect capabilities")
|
||||
}
|
||||
|
||||
// getCapabilities converts an encoded uint64 into a slice of string names of Linux capabilities
|
||||
func getCapabilities(cap uint64) []string {
|
||||
caps := make([]string, 0, 37)
|
||||
if cap&0x0000000040000000 == 0x0000000040000000 {
|
||||
caps = append(caps, "AUDIT_CONTROL")
|
||||
}
|
||||
if cap&0x0000000020000000 == 0x0000000020000000 {
|
||||
caps = append(caps, "AUDIT_WRITE")
|
||||
}
|
||||
if cap&0x0000001000000000 == 0x0000001000000000 {
|
||||
caps = append(caps, "BLOCK_SUSPEND")
|
||||
}
|
||||
if cap&0x0000000000000001 == 0x0000000000000001 {
|
||||
caps = append(caps, "CHOWN")
|
||||
}
|
||||
if cap&0x0000000000000002 == 0x0000000000000002 {
|
||||
caps = append(caps, "DAC_OVERRIDE")
|
||||
}
|
||||
if cap&0x0000000000000004 == 0x0000000000000004 {
|
||||
caps = append(caps, "DAC_READ_SEARCH")
|
||||
}
|
||||
if cap&0x0000000000000008 == 0x0000000000000008 {
|
||||
caps = append(caps, "FOWNER")
|
||||
}
|
||||
if cap&0x0000000000000010 == 0x0000000000000010 {
|
||||
caps = append(caps, "FSETID")
|
||||
}
|
||||
if cap&0x0000000000004000 == 0x0000000000004000 {
|
||||
caps = append(caps, "IPC_LOCK")
|
||||
}
|
||||
if cap&0x0000000000008000 == 0x0000000000008000 {
|
||||
caps = append(caps, "IPC_OWNER")
|
||||
}
|
||||
if cap&0x0000000000000020 == 0x0000000000000020 {
|
||||
caps = append(caps, "KILL")
|
||||
}
|
||||
if cap&0x0000000010000000 == 0x0000000010000000 {
|
||||
caps = append(caps, "LEASE")
|
||||
}
|
||||
if cap&0x0000000000000200 == 0x0000000000000200 {
|
||||
caps = append(caps, "LINUX_IMMUTABLE")
|
||||
}
|
||||
if cap&0x0000000200000000 == 0x0000000200000000 {
|
||||
caps = append(caps, "MAC_ADMIN")
|
||||
}
|
||||
if cap&0x0000000100000000 == 0x0000000100000000 {
|
||||
caps = append(caps, "MAC_OVERRIDE")
|
||||
}
|
||||
if cap&0x0000000008000000 == 0x0000000008000000 {
|
||||
caps = append(caps, "MKNOD")
|
||||
}
|
||||
if cap&0x0000000000001000 == 0x0000000000001000 {
|
||||
caps = append(caps, "NET_ADMIN")
|
||||
}
|
||||
if cap&0x0000000000000400 == 0x0000000000000400 {
|
||||
caps = append(caps, "NET_BIND_SERVICE")
|
||||
}
|
||||
if cap&0x0000000000000800 == 0x0000000000000800 {
|
||||
caps = append(caps, "NET_BROADCAST")
|
||||
}
|
||||
if cap&0x0000000000002000 == 0x0000000000002000 {
|
||||
caps = append(caps, "NET_RAW")
|
||||
}
|
||||
if cap&0x0000000080000000 == 0x0000000080000000 {
|
||||
caps = append(caps, "SETFCAP")
|
||||
}
|
||||
if cap&0x0000000000000040 == 0x0000000000000040 {
|
||||
caps = append(caps, "SETGID")
|
||||
}
|
||||
if cap&0x0000000000000100 == 0x0000000000000100 {
|
||||
caps = append(caps, "SETPCAP")
|
||||
}
|
||||
if cap&0x0000000000000080 == 0x0000000000000080 {
|
||||
caps = append(caps, "SETUID")
|
||||
}
|
||||
if cap&0x0000000400000000 == 0x0000000400000000 {
|
||||
caps = append(caps, "SYSLOG")
|
||||
}
|
||||
if cap&0x0000000000200000 == 0x0000000000200000 {
|
||||
caps = append(caps, "SYS_ADMIN")
|
||||
}
|
||||
if cap&0x0000000000400000 == 0x0000000000400000 {
|
||||
caps = append(caps, "SYS_BOOT")
|
||||
}
|
||||
if cap&0x0000000000040000 == 0x0000000000040000 {
|
||||
caps = append(caps, "SYS_CHROOT")
|
||||
}
|
||||
if cap&0x0000000000010000 == 0x0000000000010000 {
|
||||
caps = append(caps, "SYS_MODULE")
|
||||
}
|
||||
if cap&0x0000000000800000 == 0x0000000000800000 {
|
||||
caps = append(caps, "SYS_NICE")
|
||||
}
|
||||
if cap&0x0000000000100000 == 0x0000000000100000 {
|
||||
caps = append(caps, "SYS_PACCT")
|
||||
}
|
||||
if cap&0x0000000000080000 == 0x0000000000080000 {
|
||||
caps = append(caps, "SYS_PTRACE")
|
||||
}
|
||||
if cap&0x0000000000020000 == 0x0000000000020000 {
|
||||
caps = append(caps, "SYS_RAWIO")
|
||||
}
|
||||
if cap&0x0000000001000000 == 0x0000000001000000 {
|
||||
caps = append(caps, "SYS_RESOURCE")
|
||||
}
|
||||
if cap&0x0000000002000000 == 0x0000000002000000 {
|
||||
caps = append(caps, "SYS_TIME")
|
||||
}
|
||||
if cap&0x0000000004000000 == 0x0000000004000000 {
|
||||
caps = append(caps, "SYS_TTY_CONFIG")
|
||||
}
|
||||
if cap&0x0000000800000000 == 0x0000000800000000 {
|
||||
caps = append(caps, "WAKE_ALARM")
|
||||
}
|
||||
return caps
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
/*
|
||||
© Copyright IBM Corporation 2017
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package capabilities
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var capTests = []struct {
|
||||
in uint64
|
||||
out []string
|
||||
}{
|
||||
{0x0000000040000000, []string{"AUDIT_CONTROL"}},
|
||||
// Default values when you run a Docker container without changing capabilities:
|
||||
{0x00000000a80425fb, []string{"AUDIT_WRITE", "CHOWN", "DAC_OVERRIDE", "FOWNER", "FSETID", "KILL", "MKNOD", "NET_BIND_SERVICE", "NET_RAW", "SETFCAP", "SETGID", "SETPCAP", "SETUID", "SYS_CHROOT"}},
|
||||
}
|
||||
|
||||
func TestGetCapabilities(t *testing.T) {
|
||||
for _, table := range capTests {
|
||||
caps := getCapabilities(table.in)
|
||||
if !reflect.DeepEqual(caps, table.out) {
|
||||
t.Errorf("getCapabilities(%v) - expected %v, got %v", table.in, table.out, caps)
|
||||
}
|
||||
}
|
||||
}
|
||||
50
vendor/github.com/genuinetools/amicontained/.gitignore
generated
vendored
Normal file
50
vendor/github.com/genuinetools/amicontained/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
###Go###
|
||||
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
|
||||
|
||||
###OSX###
|
||||
|
||||
.DS_Store
|
||||
.AppleDouble
|
||||
.LSOverride
|
||||
|
||||
# Icon must ends with two \r.
|
||||
Icon
|
||||
|
||||
|
||||
# Thumbnails
|
||||
._*
|
||||
|
||||
# Files that might appear on external disk
|
||||
.Spotlight-V100
|
||||
.Trashes
|
||||
|
||||
amicontained
|
||||
cross/
|
||||
|
||||
# Go coverage results
|
||||
coverage.txt
|
||||
profile.out
|
||||
49
vendor/github.com/genuinetools/amicontained/.travis.yml
generated
vendored
Normal file
49
vendor/github.com/genuinetools/amicontained/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
---
|
||||
language: go
|
||||
sudo: false
|
||||
notifications:
|
||||
email: true
|
||||
go:
|
||||
- 1.x
|
||||
- tip
|
||||
env:
|
||||
global:
|
||||
- GO15VENDOREXPERIMENT=1
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
fast_finish: true
|
||||
install:
|
||||
- go get github.com/golang/lint/golint
|
||||
- go get honnef.co/go/tools/cmd/staticcheck
|
||||
script:
|
||||
- go build -v
|
||||
- go vet $(go list ./... | grep -v vendor)
|
||||
- staticcheck $(go list ./... | grep -v vendor)
|
||||
- test -z "$(golint ./... | grep -v vendor | tee /dev/stderr)"
|
||||
- test -z "$(gofmt -s -l . | grep -v vendor | tee /dev/stderr)"
|
||||
- go test $(go list ./... | grep -v vendor)
|
||||
- make cover
|
||||
- make release
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
deploy:
|
||||
provider: releases
|
||||
api_key:
|
||||
secure: "PMnPJQ00zNL2MeTosnC2uL47srkL38PDgHllf7VlEQCjTtAd0LiPuYWXIhRncEXyXQX66uAmz2nDSuR0H+2mB1tIt9gpTMe0bItUHIQhcgJllJWtcMIwE7decMwDRaXVhh/vlzGYTLnT2wZEGFQS57nHZdZd9kXD9w2sepQvgaLmELnAlwXP+TXf7U8WWZ1fw3a4LdotnZRRcy4NkzpsaRep2tTRJTPu01YPMALusucaPGAmlw/pCKvflNR7fqEMKR/dP+hmtk0DFhCNaSSIWHplwFMc4PGOTRjWIdGEvIivqx7HVfF/jFqAZDGr38qcErpW4jUVNBTTrBW22RKLt1vZ/BgdcBYn5siWeytu4YWnZpsxh2u9bcoHhO323SHRdQtDU5N96UV5NqFymYKCl941xRS8mZ1Wu5vetswYYtRG45OEDq3KZFw88QDBjGeNDkXk6hQdu3hQt3CNDNCI/2vzh20/rWErc7jiDy9uNsJdIiruO+3AWt20aZgzw+X7GfbNYGa42RcW6a//4RoN9S7iEaw/xbfHuh08gNCT4Zfg6EY98XU3yX+IJupoqST68kMWIupr0cA9WKwtUHcHquZP1VbkPXkg7xeb6zLCPNDyJ3ly1UFeZ7L+TGUdyNP2NWNAcbgkMgN+TkdbidywSn0GoK1D4rw9vXuqzgRQGK8="
|
||||
file:
|
||||
- cross/amicontained-linux-amd64.md5
|
||||
- cross/amicontained-linux-arm
|
||||
- cross/amicontained-linux-arm64.sha256
|
||||
- cross/amicontained-linux-arm.sha256
|
||||
- cross/amicontained-linux-386.sha256
|
||||
- cross/amicontained-linux-arm64.md5
|
||||
- cross/amicontained-linux-arm64
|
||||
- cross/amicontained-linux-amd64.sha256
|
||||
- cross/amicontained-linux-386.md5
|
||||
- cross/amicontained-linux-arm.md5
|
||||
- cross/amicontained-linux-386
|
||||
- cross/amicontained-linux-amd64
|
||||
skip_cleanup: true
|
||||
on:
|
||||
tags: true
|
||||
32
vendor/github.com/genuinetools/amicontained/Dockerfile
generated
vendored
Normal file
32
vendor/github.com/genuinetools/amicontained/Dockerfile
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
FROM golang:alpine as builder
|
||||
MAINTAINER Jessica Frazelle <jess@linux.com>
|
||||
|
||||
ENV PATH /go/bin:/usr/local/go/bin:$PATH
|
||||
ENV GOPATH /go
|
||||
|
||||
RUN apk add --no-cache \
|
||||
ca-certificates
|
||||
|
||||
COPY . /go/src/github.com/genuinetools/amicontained
|
||||
|
||||
RUN set -x \
|
||||
&& apk add --no-cache --virtual .build-deps \
|
||||
git \
|
||||
gcc \
|
||||
libc-dev \
|
||||
libgcc \
|
||||
make \
|
||||
&& cd /go/src/github.com/genuinetools/amicontained \
|
||||
&& make static \
|
||||
&& mv amicontained /usr/bin/amicontained \
|
||||
&& apk del .build-deps \
|
||||
&& rm -rf /go \
|
||||
&& echo "Build complete."
|
||||
|
||||
FROM scratch
|
||||
|
||||
COPY --from=builder /usr/bin/amicontained /usr/bin/amicontained
|
||||
COPY --from=builder /etc/ssl/certs/ /etc/ssl/certs
|
||||
|
||||
ENTRYPOINT [ "amicontained" ]
|
||||
CMD [ "--help" ]
|
||||
36
vendor/github.com/genuinetools/amicontained/Gopkg.lock
generated
vendored
Normal file
36
vendor/github.com/genuinetools/amicontained/Gopkg.lock
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/sirupsen/logrus"
|
||||
packages = ["."]
|
||||
revision = "d682213848ed68c0a260ca37d6dd5ace8423f5ba"
|
||||
version = "v1.0.4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/syndtr/gocapability"
|
||||
packages = ["capability"]
|
||||
revision = "33e07d32887e1e06b7c025f27ce52f62c7990bc0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["ssh/terminal"]
|
||||
revision = "c7dcf104e3a7a1417abc0230cb0d5240d764159d"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"unix",
|
||||
"windows"
|
||||
]
|
||||
revision = "7dca6fe1f43775aa6d1334576870ff63f978f539"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "5953687325d75acd19cee162d99f0d313ea8e61aa4336cfff6b0be32fa814d9e"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
25
vendor/github.com/genuinetools/amicontained/Gopkg.toml
generated
vendored
Normal file
25
vendor/github.com/genuinetools/amicontained/Gopkg.toml
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
|
||||
# for detailed Gopkg.toml documentation.
|
||||
#
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project2"
|
||||
# branch = "dev"
|
||||
# source = "github.com/myfork/project2"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
|
||||
|
||||
[[override]]
|
||||
name = "github.com/Sirupsen/logrus"
|
||||
source = "github.com/sirupsen/logrus"
|
||||
21
vendor/github.com/genuinetools/amicontained/LICENSE
generated
vendored
Normal file
21
vendor/github.com/genuinetools/amicontained/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2018 The Genuinetools Authors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
141
vendor/github.com/genuinetools/amicontained/Makefile
generated
vendored
Normal file
141
vendor/github.com/genuinetools/amicontained/Makefile
generated
vendored
Normal file
@@ -0,0 +1,141 @@
|
||||
# Set an output prefix, which is the local directory if not specified
|
||||
PREFIX?=$(shell pwd)
|
||||
|
||||
# Setup name variables for the package/tool
|
||||
NAME := amicontained
|
||||
PKG := github.com/genuinetools/$(NAME)
|
||||
|
||||
# Set any default go build tags
|
||||
BUILDTAGS :=
|
||||
|
||||
# Set the build dir, where built cross-compiled binaries will be output
|
||||
BUILDDIR := ${PREFIX}/cross
|
||||
|
||||
# Populate version variables
|
||||
# Add to compile time flags
|
||||
VERSION := $(shell cat VERSION.txt)
|
||||
GITCOMMIT := $(shell git rev-parse --short HEAD)
|
||||
GITUNTRACKEDCHANGES := $(shell git status --porcelain --untracked-files=no)
|
||||
ifneq ($(GITUNTRACKEDCHANGES),)
|
||||
GITCOMMIT := $(GITCOMMIT)-dirty
|
||||
endif
|
||||
CTIMEVAR=-X $(PKG)/version.GITCOMMIT=$(GITCOMMIT) -X $(PKG)/version.VERSION=$(VERSION)
|
||||
GO_LDFLAGS=-ldflags "-w $(CTIMEVAR)"
|
||||
GO_LDFLAGS_STATIC=-ldflags "-w $(CTIMEVAR) -extldflags -static"
|
||||
|
||||
# List the GOOS and GOARCH to build
|
||||
GOOSARCHES = linux/arm linux/arm64 linux/amd64 linux/386
|
||||
|
||||
all: clean build fmt lint test staticcheck vet install ## Runs a clean, build, fmt, lint, test, staticcheck, vet and install
|
||||
|
||||
.PHONY: build
|
||||
build: $(NAME) ## Builds a dynamic executable or package
|
||||
|
||||
$(NAME): *.go VERSION.txt
|
||||
@echo "+ $@"
|
||||
go build -tags "$(BUILDTAGS)" ${GO_LDFLAGS} -o $(NAME) .
|
||||
|
||||
.PHONY: static
|
||||
static: ## Builds a static executable
|
||||
@echo "+ $@"
|
||||
CGO_ENABLED=0 go build \
|
||||
-tags "$(BUILDTAGS) static_build" \
|
||||
${GO_LDFLAGS_STATIC} -o $(NAME) .
|
||||
|
||||
.PHONY: fmt
|
||||
fmt: ## Verifies all files have men `gofmt`ed
|
||||
@echo "+ $@"
|
||||
@gofmt -s -l . | grep -v '.pb.go:' | grep -v vendor | tee /dev/stderr
|
||||
|
||||
.PHONY: lint
|
||||
lint: ## Verifies `golint` passes
|
||||
@echo "+ $@"
|
||||
@golint ./... | grep -v '.pb.go:' | grep -v vendor | tee /dev/stderr
|
||||
|
||||
.PHONY: test
|
||||
test: ## Runs the go tests
|
||||
@echo "+ $@"
|
||||
@go test -v -tags "$(BUILDTAGS) cgo" $(shell go list ./... | grep -v vendor)
|
||||
|
||||
.PHONY: vet
|
||||
vet: ## Verifies `go vet` passes
|
||||
@echo "+ $@"
|
||||
@go vet $(shell go list ./... | grep -v vendor) | grep -v '.pb.go:' | tee /dev/stderr
|
||||
|
||||
.PHONY: staticcheck
|
||||
staticcheck: ## Verifies `staticcheck` passes
|
||||
@echo "+ $@"
|
||||
@staticcheck $(shell go list ./... | grep -v vendor) | grep -v '.pb.go:' | tee /dev/stderr
|
||||
|
||||
.PHONY: cover
|
||||
cover: ## Runs go test with coverage
|
||||
@echo "" > coverage.txt
|
||||
@for d in $(shell go list ./... | grep -v vendor); do \
|
||||
go test -race -coverprofile=profile.out -covermode=atomic "$$d"; \
|
||||
if [ -f profile.out ]; then \
|
||||
cat profile.out >> coverage.txt; \
|
||||
rm profile.out; \
|
||||
fi; \
|
||||
done;
|
||||
|
||||
.PHONY: install
|
||||
install: ## Installs the executable or package
|
||||
@echo "+ $@"
|
||||
go install -a -tags "$(BUILDTAGS)" ${GO_LDFLAGS} .
|
||||
|
||||
define buildpretty
|
||||
mkdir -p $(BUILDDIR)/$(1)/$(2);
|
||||
GOOS=$(1) GOARCH=$(2) CGO_ENABLED=0 go build \
|
||||
-o $(BUILDDIR)/$(1)/$(2)/$(NAME) \
|
||||
-a -tags "$(BUILDTAGS) static_build netgo" \
|
||||
-installsuffix netgo ${GO_LDFLAGS_STATIC} .;
|
||||
md5sum $(BUILDDIR)/$(1)/$(2)/$(NAME) > $(BUILDDIR)/$(1)/$(2)/$(NAME).md5;
|
||||
sha256sum $(BUILDDIR)/$(1)/$(2)/$(NAME) > $(BUILDDIR)/$(1)/$(2)/$(NAME).sha256;
|
||||
endef
|
||||
|
||||
.PHONY: cross
|
||||
cross: *.go VERSION.txt ## Builds the cross-compiled binaries, creating a clean directory structure (eg. GOOS/GOARCH/binary)
|
||||
@echo "+ $@"
|
||||
$(foreach GOOSARCH,$(GOOSARCHES), $(call buildpretty,$(subst /,,$(dir $(GOOSARCH))),$(notdir $(GOOSARCH))))
|
||||
|
||||
define buildrelease
|
||||
GOOS=$(1) GOARCH=$(2) CGO_ENABLED=0 go build \
|
||||
-o $(BUILDDIR)/$(NAME)-$(1)-$(2) \
|
||||
-a -tags "$(BUILDTAGS) static_build netgo" \
|
||||
-installsuffix netgo ${GO_LDFLAGS_STATIC} .;
|
||||
md5sum $(BUILDDIR)/$(NAME)-$(1)-$(2) > $(BUILDDIR)/$(NAME)-$(1)-$(2).md5;
|
||||
sha256sum $(BUILDDIR)/$(NAME)-$(1)-$(2) > $(BUILDDIR)/$(NAME)-$(1)-$(2).sha256;
|
||||
endef
|
||||
|
||||
.PHONY: release
|
||||
release: *.go VERSION.txt ## Builds the cross-compiled binaries, naming them in such a way for release (eg. binary-GOOS-GOARCH)
|
||||
@echo "+ $@"
|
||||
$(foreach GOOSARCH,$(GOOSARCHES), $(call buildrelease,$(subst /,,$(dir $(GOOSARCH))),$(notdir $(GOOSARCH))))
|
||||
|
||||
.PHONY: bump-version
|
||||
BUMP := patch
|
||||
bump-version: ## Bump the version in the version file. Set BUMP to [ patch | major | minor ]
|
||||
@go get -u github.com/jessfraz/junk/sembump # update sembump tool
|
||||
$(eval NEW_VERSION = $(shell sembump --kind $(BUMP) $(VERSION)))
|
||||
@echo "Bumping VERSION.txt from $(VERSION) to $(NEW_VERSION)"
|
||||
echo $(NEW_VERSION) > VERSION.txt
|
||||
@echo "Updating links to download binaries in README.md"
|
||||
sed -i s/$(VERSION)/$(NEW_VERSION)/g README.md
|
||||
git add VERSION.txt README.md
|
||||
git commit -vsam "Bump version to $(NEW_VERSION)"
|
||||
@echo "Run make tag to create and push the tag for new version $(NEW_VERSION)"
|
||||
|
||||
.PHONY: tag
|
||||
tag: ## Create a new git tag to prepare to build a release
|
||||
git tag -sa $(VERSION) -m "$(VERSION)"
|
||||
@echo "Run git push origin $(VERSION) to push your new tag to GitHub and trigger a travis build."
|
||||
|
||||
.PHONY: clean
|
||||
clean: ## Cleanup any build binaries or packages
|
||||
@echo "+ $@"
|
||||
$(RM) $(NAME)
|
||||
$(RM) -r $(BUILDDIR)
|
||||
|
||||
.PHONY: help
|
||||
help:
|
||||
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
||||
177
vendor/github.com/genuinetools/amicontained/README.md
generated
vendored
Normal file
177
vendor/github.com/genuinetools/amicontained/README.md
generated
vendored
Normal file
@@ -0,0 +1,177 @@
|
||||
# amicontained
|
||||
|
||||
[](https://travis-ci.org/genuinetools/amicontained)
|
||||
|
||||
Container introspection tool. Find out what container runtime is being used as
|
||||
well as features available.
|
||||
|
||||
- [Installation](#installation)
|
||||
+ [Binaries](#binaries)
|
||||
+ [Via Go](#via-go)
|
||||
- [Usage](#usage)
|
||||
- [Examples](#examples)
|
||||
+ [docker](#docker)
|
||||
+ [lxc](#lxc)
|
||||
+ [systemd-nspawn](#systemd-nspawn)
|
||||
+ [rkt](#rkt)
|
||||
+ [unshare](#unshare)
|
||||
|
||||
## Installation
|
||||
|
||||
#### Binaries
|
||||
|
||||
- **linux** [386](https://github.com/genuinetools/amicontained/releases/download/v0.4.0/amicontained-linux-386) / [amd64](https://github.com/genuinetools/amicontained/releases/download/v0.4.0/amicontained-linux-amd64) / [arm](https://github.com/genuinetools/amicontained/releases/download/v0.4.0/amicontained-linux-arm) / [arm64](https://github.com/genuinetools/amicontained/releases/download/v0.4.0/amicontained-linux-arm64)
|
||||
|
||||
#### Via Go
|
||||
|
||||
```bash
|
||||
$ go get github.com/genuinetools/amicontained
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```console
|
||||
$ amicontained -h
|
||||
_ _ _ _
|
||||
__ _ _ __ ___ (_) ___ ___ _ __ | |_ __ _(_)_ __ ___ __| |
|
||||
/ _` | '_ ` _ \| |/ __/ _ \| '_ \| __/ _` | | '_ \ / _ \/ _` |
|
||||
| (_| | | | | | | | (_| (_) | | | | || (_| | | | | | __/ (_| |
|
||||
\__,_|_| |_| |_|_|\___\___/|_| |_|\__\__,_|_|_| |_|\___|\__,_|
|
||||
Container introspection tool.
|
||||
Version: v0.4.0
|
||||
|
||||
-d run in debug mode
|
||||
-v print version and exit (shorthand)
|
||||
-version
|
||||
print version and exit
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
#### docker
|
||||
|
||||
```console
|
||||
$ docker run --rm -it r.j3ss.co/amicontained
|
||||
Container Runtime: docker
|
||||
Has Namespaces:
|
||||
pid: true
|
||||
user: true
|
||||
User Namespace Mappings:
|
||||
Container -> 0
|
||||
Host -> 886432
|
||||
Range -> 65536
|
||||
AppArmor Profile: docker-default (enforce)
|
||||
Capabilities:
|
||||
BOUNDING -> chown dac_override fowner fsetid kill setgid setuid setpcap net_bind_service net_raw sys_chroot mknod audit_write setfcap
|
||||
Chroot (not pivot_root): false
|
||||
|
||||
$ docker run --rm -it --pid host r.j3ss.co/amicontained
|
||||
Container Runtime: docker
|
||||
Has Namespaces:
|
||||
pid: false
|
||||
user: false
|
||||
AppArmor Profile: docker-default (enforce)
|
||||
Capabilities:
|
||||
BOUNDING -> chown dac_override fowner fsetid kill setgid setuid setpcap net_bind_service net_raw sys_chroot mknod audit_write setfcap
|
||||
Chroot (not pivot_root): false
|
||||
|
||||
$ docker run --rm -it --security-opt "apparmor=unconfined" r.j3ss.co/amicontained
|
||||
Container Runtime: docker
|
||||
Has Namespaces:
|
||||
pid: true
|
||||
user: false
|
||||
AppArmor Profile: unconfined
|
||||
Capabilities:
|
||||
BOUNDING -> chown dac_override fowner fsetid kill setgid setuid setpcap net_bind_service net_raw sys_chroot mknod audit_write setfcap
|
||||
Chroot (not pivot_root): false
|
||||
```
|
||||
|
||||
#### lxc
|
||||
|
||||
```console
|
||||
$ lxc-attach -n xenial
|
||||
root@xenial:/# amicontained
|
||||
Container Runtime: lxc
|
||||
Has Namespaces:
|
||||
pid: true
|
||||
user: true
|
||||
User Namespace Mappings:
|
||||
Container -> 0 Host -> 100000 Range -> 65536
|
||||
AppArmor Profile: none
|
||||
Capabilities:
|
||||
BOUNDING -> chown dac_override dac_read_search fowner fsetid kill setgid setuid setpcap linux_immutable net_bind_service net_broadcast net_admin net_raw ipc_lock ipc_owner sys_chroot sys_ptrace sys_pacct sys_admin sys_boot sys_nice sys_resource sys_tty_config mknod lease audit_write audit_control setfcap syslog wake_alarm block_suspend audit_read
|
||||
Chroot (not pivot_root): false
|
||||
|
||||
$ lxc-execute -n xenial -- /bin/amicontained
|
||||
Container Runtime: lxc
|
||||
Has Namespaces:
|
||||
pid: true
|
||||
user: true
|
||||
User Namespace Mappings:
|
||||
Container -> 0 Host -> 100000 Range -> 65536
|
||||
AppArmor Profile: none
|
||||
Capabilities:
|
||||
BOUNDING -> chown dac_override dac_read_search fowner fsetid kill setgid setuid setpcap linux_immutable net_bind_service net_broadcast net_admin net_raw ipc_lock ipc_owner sys_chroot sys_ptrace sys_pacct sys_admin sys_boot sys_nice sys_resource sys_tty_config mknod lease audit_write audit_control setfcap syslog wake_alarm block_suspend audit_read
|
||||
Chroot (not pivot_root): false
|
||||
```
|
||||
|
||||
#### systemd-nspawn
|
||||
|
||||
```console
|
||||
$ sudo systemd-nspawn --machine amicontained --directory nspawn-amicontained /usr/bin/amicontained
|
||||
Spawning container amicontained on /home/genuinetools/nspawn-amicontained.
|
||||
Press ^] three times within 1s to kill container.
|
||||
Timezone UTC does not exist in container, not updating container timezone.
|
||||
Container Runtime: systemd-nspawn
|
||||
Has Namespaces:
|
||||
pid: true
|
||||
user: false
|
||||
AppArmor Profile: none
|
||||
Capabilities:
|
||||
BOUNDING -> chown dac_override dac_read_search fowner fsetid kill setgid setuid setpcap linux_immutable net_bind_service net_broadcast net_raw ipc_owner sys_chroot sys_ptrace sys_admin sys_boot sys_nice sys_resource sys_tty_config mknod lease audit_write audit_control setfcap
|
||||
Chroot (not pivot_root): false
|
||||
Container amicontained exited successfully.
|
||||
```
|
||||
|
||||
#### rkt
|
||||
|
||||
```console
|
||||
$ sudo rkt --insecure-options=image run docker://r.j3ss.co/amicontained
|
||||
[ 631.522121] amicontained[5]: Container Runtime: rkt
|
||||
[ 631.522471] amicontained[5]: Host PID Namespace: false
|
||||
[ 631.522617] amicontained[5]: AppArmor Profile: none
|
||||
[ 631.522768] amicontained[5]: User Namespace: false
|
||||
[ 631.522922] amicontained[5]: Capabilities:
|
||||
[ 631.523075] amicontained[5]: BOUNDING -> chown dac_override fowner fsetid kill setgid setuid setpcap net_bind_service net_raw sys_chroot mknod audit_write setfcap
|
||||
[ 631.523213] amicontained[5]: Chroot (not pivot_root): false
|
||||
|
||||
$ sudo rkt --insecure-options=image run --private-users=true --no-overlay docker://r.j3ss.co/amicontained
|
||||
[ 785.547050] amicontained[5]: Container Runtime: rkt
|
||||
[ 785.547360] amicontained[5]: Host PID Namespace: false
|
||||
[ 785.547567] amicontained[5]: AppArmor Profile: none
|
||||
[ 785.547717] amicontained[5]: User Namespace: true
|
||||
[ 785.547856] amicontained[5]: User Namespace Mappings:
|
||||
[ 785.548064] amicontained[5]: Container -> 0 Host -> 229834752 Range -> 65536
|
||||
[ 785.548335] amicontained[5]: Capabilities:
|
||||
[ 785.548537] amicontained[5]: BOUNDING -> chown dac_override fowner fsetid kill setgid setuid setpcap net_bind_service net_raw sys_chroot mknod audit_write setfcap
|
||||
[ 785.548679] amicontained[5]: Chroot (not pivot_root): false
|
||||
```
|
||||
|
||||
#### unshare
|
||||
|
||||
```console
|
||||
$ sudo unshare --user -r
|
||||
root@coreos:/home/jessie/.go/src/github.com/genuinetools/amicontained# ./amicontained
|
||||
Container Runtime: not-found
|
||||
Has Namespaces:
|
||||
pid: false
|
||||
user: true
|
||||
User Namespace Mappings:
|
||||
Container -> 0
|
||||
Host -> 0
|
||||
Range -> 1
|
||||
AppArmor Profile: unconfined
|
||||
Capabilities:
|
||||
BOUNDING -> chown dac_override dac_read_search fowner fsetid kill setgid setuid setpcap linux_immutable net_bind_service net_broadcast net_admin net_raw ipc_lock ipc_owner sys_module sys_rawio sys_chroot sys_ptrace sys_pacct sys_admin sys_boot sys_nice sys_resource sys_time sys_tty_config mknod lease audit_write audit_control setfcap mac_override mac_admin syslog wake_alarm block_suspend audit_read
|
||||
Chroot (not pivot_root): false
|
||||
```
|
||||
1
vendor/github.com/genuinetools/amicontained/VERSION.txt
generated
vendored
Normal file
1
vendor/github.com/genuinetools/amicontained/VERSION.txt
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
v0.4.0
|
||||
287
vendor/github.com/genuinetools/amicontained/container/container.go
generated
vendored
Normal file
287
vendor/github.com/genuinetools/amicontained/container/container.go
generated
vendored
Normal file
@@ -0,0 +1,287 @@
|
||||
// Package container provides tools for introspecting containers.
|
||||
package container
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const (
|
||||
// RuntimeDocker is the string for the docker runtime.
|
||||
RuntimeDocker = "docker"
|
||||
// RuntimeRkt is the string for the rkt runtime.
|
||||
RuntimeRkt = "rkt"
|
||||
// RuntimeNspawn is the string for the systemd-nspawn runtime.
|
||||
RuntimeNspawn = "systemd-nspawn"
|
||||
// RuntimeLXC is the string for the lxc runtime.
|
||||
RuntimeLXC = "lxc"
|
||||
// RuntimeLXCLibvirt is the string for the lxc-libvirt runtime.
|
||||
RuntimeLXCLibvirt = "lxc-libvirt"
|
||||
// RuntimeOpenVZ is the string for the openvz runtime.
|
||||
RuntimeOpenVZ = "openvz"
|
||||
// RuntimeKubernetes is the string for the kubernetes runtime.
|
||||
RuntimeKubernetes = "kube"
|
||||
// RuntimeGarden is the string for the garden runtime.
|
||||
RuntimeGarden = "garden"
|
||||
|
||||
uint32Max = 4294967295
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrContainerRuntimeNotFound describes when a container runtime could not be found.
|
||||
ErrContainerRuntimeNotFound = errors.New("container runtime could not be found")
|
||||
|
||||
runtimes = []string{RuntimeDocker, RuntimeRkt, RuntimeNspawn, RuntimeLXC, RuntimeLXCLibvirt, RuntimeOpenVZ, RuntimeKubernetes, RuntimeGarden}
|
||||
)
|
||||
|
||||
// DetectRuntime returns the container runtime the process is running in.
|
||||
func DetectRuntime() (string, error) {
|
||||
// read the cgroups file
|
||||
cgroups := readFile("/proc/self/cgroup")
|
||||
if len(cgroups) > 0 {
|
||||
for _, runtime := range runtimes {
|
||||
if strings.Contains(cgroups, runtime) {
|
||||
return runtime, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// /proc/vz exists in container and outside of the container, /proc/bc only outside of the container.
|
||||
if fileExists("/proc/vz") && !fileExists("/proc/bc") {
|
||||
return RuntimeOpenVZ, nil
|
||||
}
|
||||
|
||||
ctrenv := os.Getenv("container")
|
||||
if ctrenv != "" {
|
||||
for _, runtime := range runtimes {
|
||||
if ctrenv == runtime {
|
||||
return runtime, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PID 1 might have dropped this information into a file in /run.
|
||||
// Read from /run/systemd/container since it is better than accessing /proc/1/environ,
|
||||
// which needs CAP_SYS_PTRACE
|
||||
f := readFile("/run/systemd/container")
|
||||
if len(f) > 0 {
|
||||
for _, runtime := range runtimes {
|
||||
if f == runtime {
|
||||
return runtime, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "not-found", ErrContainerRuntimeNotFound
|
||||
}
|
||||
|
||||
// HasNamespace determines if the container is using a particular namespace or the
|
||||
// host namespace.
|
||||
// The device number of an unnamespaced /proc/1/ns/{ns} is 4 and anything else is
|
||||
// higher.
|
||||
func HasNamespace(ns string) (bool, error) {
|
||||
file := fmt.Sprintf("/proc/1/ns/%s", ns)
|
||||
|
||||
// Use Lstat to not follow the symlink.
|
||||
var info syscall.Stat_t
|
||||
if err := syscall.Lstat(file, &info); err != nil {
|
||||
return false, &os.PathError{Op: "lstat", Path: file, Err: err}
|
||||
}
|
||||
|
||||
// Get the device number. If it is higher than 4 it is in a namespace.
|
||||
if info.Dev > 4 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// AppArmorProfile determines the apparmor profile for a container.
|
||||
func AppArmorProfile() string {
|
||||
f := readFile("/proc/self/attr/current")
|
||||
if f == "" {
|
||||
return "none"
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// UserMapping holds the values for a {uid,gid}_map.
|
||||
type UserMapping struct {
|
||||
ContainerID int64
|
||||
HostID int64
|
||||
Range int64
|
||||
}
|
||||
|
||||
// UserNamespace determines if the container is running in a UserNamespace and returns the mappings if so.
|
||||
func UserNamespace() (bool, []UserMapping) {
|
||||
f := readFile("/proc/self/uid_map")
|
||||
if len(f) < 0 {
|
||||
// user namespace is uninitialized
|
||||
return true, nil
|
||||
}
|
||||
|
||||
userNs, mappings, err := readUserMappings(f)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return userNs, mappings
|
||||
}
|
||||
|
||||
func readUserMappings(f string) (iuserNS bool, mappings []UserMapping, err error) {
|
||||
parts := strings.Split(f, " ")
|
||||
parts = deleteEmpty(parts)
|
||||
if len(parts) < 3 {
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
for i := 0; i < len(parts); i += 3 {
|
||||
nsu, hu, r := parts[i], parts[i+1], parts[i+2]
|
||||
mapping := UserMapping{}
|
||||
|
||||
mapping.ContainerID, err = strconv.ParseInt(nsu, 10, 0)
|
||||
if err != nil {
|
||||
return false, nil, nil
|
||||
}
|
||||
mapping.HostID, err = strconv.ParseInt(hu, 10, 0)
|
||||
if err != nil {
|
||||
return false, nil, nil
|
||||
}
|
||||
mapping.Range, err = strconv.ParseInt(r, 10, 0)
|
||||
if err != nil {
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
if mapping.ContainerID == 0 && mapping.HostID == 0 && mapping.Range == uint32Max {
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
mappings = append(mappings, mapping)
|
||||
}
|
||||
|
||||
return true, mappings, nil
|
||||
}
|
||||
|
||||
// Capabilities returns the allowed capabilities in the container.
|
||||
func Capabilities() (map[string][]string, error) {
|
||||
allCaps := capability.List()
|
||||
|
||||
caps, err := capability.NewPid(0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
allowedCaps := map[string][]string{}
|
||||
allowedCaps["EFFECTIVE | PERMITTED | INHERITABLE"] = []string{}
|
||||
allowedCaps["BOUNDING"] = []string{}
|
||||
allowedCaps["AMBIENT"] = []string{}
|
||||
|
||||
for _, cap := range allCaps {
|
||||
if caps.Get(capability.CAPS, cap) {
|
||||
allowedCaps["EFFECTIVE | PERMITTED | INHERITABLE"] = append(allowedCaps["EFFECTIVE | PERMITTED | INHERITABLE"], cap.String())
|
||||
}
|
||||
if caps.Get(capability.BOUNDING, cap) {
|
||||
allowedCaps["BOUNDING"] = append(allowedCaps["BOUNDING"], cap.String())
|
||||
}
|
||||
if caps.Get(capability.AMBIENT, cap) {
|
||||
allowedCaps["AMBIENT"] = append(allowedCaps["AMBIENT"], cap.String())
|
||||
}
|
||||
}
|
||||
|
||||
return allowedCaps, nil
|
||||
}
|
||||
|
||||
// Chroot detects if we are running in a chroot or a pivot_root.
|
||||
// Currently, we can not distinguish between the two.
|
||||
func Chroot() (bool, error) {
|
||||
var a, b syscall.Stat_t
|
||||
|
||||
if err := syscall.Lstat("/proc/1/root", &a); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if err := syscall.Lstat("/", &b); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return a.Ino == b.Ino && a.Dev == b.Dev, nil
|
||||
}
|
||||
|
||||
// SeccompEnforcingMode returns the seccomp enforcing level (disabled, filtering, strict)
|
||||
func SeccompEnforcingMode() (string, error) {
|
||||
// Read from /proc/self/status Linux 3.8+
|
||||
s := readFile("/proc/self/status")
|
||||
|
||||
// Pre linux 3.8
|
||||
if !strings.Contains(s, "Seccomp") {
|
||||
// Check if Seccomp is supported, via CONFIG_SECCOMP.
|
||||
if err := unix.Prctl(unix.PR_GET_SECCOMP, 0, 0, 0, 0); err != unix.EINVAL {
|
||||
// Make sure the kernel has CONFIG_SECCOMP_FILTER.
|
||||
if err := unix.Prctl(unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, 0, 0, 0); err != unix.EINVAL {
|
||||
return "strict", nil
|
||||
}
|
||||
}
|
||||
return "disabled", nil
|
||||
}
|
||||
|
||||
// Split status file string by line
|
||||
statusMappings := strings.Split(s, "\n")
|
||||
statusMappings = deleteEmpty(statusMappings)
|
||||
|
||||
mode := "-1"
|
||||
for _, line := range statusMappings {
|
||||
if strings.Contains(line, "Seccomp:") {
|
||||
mode = string(line[len(line)-1])
|
||||
}
|
||||
}
|
||||
|
||||
seccompModes := map[string]string{
|
||||
"0": "disabled",
|
||||
"1": "strict",
|
||||
"2": "filtering",
|
||||
}
|
||||
|
||||
seccompMode, ok := seccompModes[mode]
|
||||
if !ok {
|
||||
return "", errors.New("could not retrieve seccomp filtering status")
|
||||
}
|
||||
|
||||
return seccompMode, nil
|
||||
}
|
||||
|
||||
func fileExists(file string) bool {
|
||||
if _, err := os.Stat(file); !os.IsNotExist(err) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func readFile(file string) string {
|
||||
if !fileExists(file) {
|
||||
return ""
|
||||
}
|
||||
|
||||
b, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return strings.TrimSpace(string(b))
|
||||
}
|
||||
|
||||
func deleteEmpty(s []string) []string {
|
||||
var r []string
|
||||
for _, str := range s {
|
||||
if strings.TrimSpace(str) != "" {
|
||||
r = append(r, strings.TrimSpace(str))
|
||||
}
|
||||
}
|
||||
return r
|
||||
}
|
||||
41
vendor/github.com/genuinetools/amicontained/container/container_test.go
generated
vendored
Normal file
41
vendor/github.com/genuinetools/amicontained/container/container_test.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
package container
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestReadUserMappings(t *testing.T) {
|
||||
f := ` 0 100000 1000
|
||||
1000 1000 1
|
||||
1001 101001 64535`
|
||||
expected := []UserMapping{
|
||||
{
|
||||
ContainerID: 0,
|
||||
HostID: 100000,
|
||||
Range: 1000,
|
||||
},
|
||||
{
|
||||
ContainerID: 1000,
|
||||
HostID: 1000,
|
||||
Range: 1,
|
||||
},
|
||||
{
|
||||
ContainerID: 1001,
|
||||
HostID: 101001,
|
||||
Range: 64535,
|
||||
},
|
||||
}
|
||||
|
||||
userNs, mappings, err := readUserMappings(f)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !userNs {
|
||||
t.Fatal("expected user namespaces to be true")
|
||||
}
|
||||
|
||||
if len(expected) != len(mappings) {
|
||||
t.Fatalf("expected length %d got %d", len(expected), len(mappings))
|
||||
}
|
||||
}
|
||||
145
vendor/github.com/genuinetools/amicontained/main.go
generated
vendored
Normal file
145
vendor/github.com/genuinetools/amicontained/main.go
generated
vendored
Normal file
@@ -0,0 +1,145 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/genuinetools/amicontained/container"
|
||||
"github.com/genuinetools/amicontained/version"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
// BANNER is what is printed for help/info output
|
||||
BANNER = ` _ _ _ _
|
||||
__ _ _ __ ___ (_) ___ ___ _ __ | |_ __ _(_)_ __ ___ __| |
|
||||
/ _` + "`" + ` | '_ ` + "`" + ` _ \| |/ __/ _ \| '_ \| __/ _` + "`" + ` | | '_ \ / _ \/ _` + "`" + ` |
|
||||
| (_| | | | | | | | (_| (_) | | | | || (_| | | | | | __/ (_| |
|
||||
\__,_|_| |_| |_|_|\___\___/|_| |_|\__\__,_|_|_| |_|\___|\__,_|
|
||||
Container introspection tool.
|
||||
Version: %s
|
||||
|
||||
`
|
||||
)
|
||||
|
||||
var (
|
||||
debug bool
|
||||
vrsn bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
// parse flags
|
||||
flag.BoolVar(&vrsn, "version", false, "print version and exit")
|
||||
flag.BoolVar(&vrsn, "v", false, "print version and exit (shorthand)")
|
||||
flag.BoolVar(&debug, "d", false, "run in debug mode")
|
||||
|
||||
flag.Usage = func() {
|
||||
fmt.Fprint(os.Stderr, fmt.Sprintf(BANNER, version.VERSION))
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
|
||||
flag.Parse()
|
||||
|
||||
// set log level
|
||||
if debug {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
|
||||
if vrsn {
|
||||
fmt.Printf("amicontained version %s, build %s\n", version.VERSION, version.GITCOMMIT)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if flag.NArg() < 1 {
|
||||
return
|
||||
}
|
||||
|
||||
// parse the arg
|
||||
arg := flag.Args()[0]
|
||||
|
||||
if arg == "help" {
|
||||
usageAndExit("", 0)
|
||||
}
|
||||
|
||||
if arg == "version" {
|
||||
fmt.Printf("amicontained version %s, build %s\n", version.VERSION, version.GITCOMMIT)
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Container Runtime
|
||||
runtime, err := container.DetectRuntime()
|
||||
if err != nil && err != container.ErrContainerRuntimeNotFound {
|
||||
log.Fatal(err)
|
||||
return
|
||||
}
|
||||
fmt.Printf("Container Runtime: %s\n", runtime)
|
||||
|
||||
// Namespaces
|
||||
namespaces := []string{"pid"}
|
||||
fmt.Println("Has Namespaces:")
|
||||
for _, namespace := range namespaces {
|
||||
ns, err := container.HasNamespace(namespace)
|
||||
if err != nil {
|
||||
fmt.Printf("\t%s: error -> %v\n", namespace, err)
|
||||
continue
|
||||
}
|
||||
fmt.Printf("\t%s: %t\n", namespace, ns)
|
||||
}
|
||||
|
||||
// User Namespaces
|
||||
userNS, userMappings := container.UserNamespace()
|
||||
fmt.Printf("\tuser: %t\n", userNS)
|
||||
if len(userMappings) > 0 {
|
||||
fmt.Println("User Namespace Mappings:")
|
||||
for _, userMapping := range userMappings {
|
||||
fmt.Printf("\tContainer -> %d\tHost -> %d\tRange -> %d\n", userMapping.ContainerID, userMapping.HostID, userMapping.Range)
|
||||
}
|
||||
}
|
||||
|
||||
// AppArmor Profile
|
||||
aaprof := container.AppArmorProfile()
|
||||
fmt.Printf("AppArmor Profile: %s\n", aaprof)
|
||||
|
||||
// Capabilities
|
||||
caps, err := container.Capabilities()
|
||||
if err != nil {
|
||||
logrus.Warnf("getting capabilities failed: %v", err)
|
||||
}
|
||||
if len(caps) > 0 {
|
||||
fmt.Println("Capabilities:")
|
||||
for k, v := range caps {
|
||||
if len(v) > 0 {
|
||||
fmt.Printf("\t%s -> %s\n", k, strings.Join(v, " "))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Chroot
|
||||
chroot, err := container.Chroot()
|
||||
if err != nil {
|
||||
logrus.Debugf("chroot check error: %v", err)
|
||||
}
|
||||
fmt.Printf("Chroot (not pivot_root): %t\n", chroot)
|
||||
|
||||
// Seccomp
|
||||
seccompMode, err := container.SeccompEnforcingMode()
|
||||
if err != nil {
|
||||
logrus.Debugf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("Seccomp: %s\n", seccompMode)
|
||||
}
|
||||
|
||||
func usageAndExit(message string, exitCode int) {
|
||||
if message != "" {
|
||||
fmt.Fprintf(os.Stderr, message)
|
||||
fmt.Fprintf(os.Stderr, "\n\n")
|
||||
}
|
||||
flag.Usage()
|
||||
fmt.Fprintf(os.Stderr, "\n")
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
1
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/.gitignore
generated
vendored
Normal file
1
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/.gitignore
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
logrus
|
||||
15
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/.travis.yml
generated
vendored
Normal file
15
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- tip
|
||||
env:
|
||||
- GOMAXPROCS=4 GORACE=halt_on_error=1
|
||||
install:
|
||||
- go get github.com/stretchr/testify/assert
|
||||
- go get gopkg.in/gemnasium/logrus-airbrake-hook.v2
|
||||
- go get golang.org/x/sys/unix
|
||||
- go get golang.org/x/sys/windows
|
||||
script:
|
||||
- go test -race -v ./...
|
||||
118
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/CHANGELOG.md
generated
vendored
Normal file
118
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/CHANGELOG.md
generated
vendored
Normal file
@@ -0,0 +1,118 @@
|
||||
# 1.0.4
|
||||
|
||||
* Fix race when adding hooks (#612)
|
||||
* Fix terminal check in AppEngine (#635)
|
||||
|
||||
# 1.0.3
|
||||
|
||||
* Replace example files with testable examples
|
||||
|
||||
# 1.0.2
|
||||
|
||||
* bug: quote non-string values in text formatter (#583)
|
||||
* Make (*Logger) SetLevel a public method
|
||||
|
||||
# 1.0.1
|
||||
|
||||
* bug: fix escaping in text formatter (#575)
|
||||
|
||||
# 1.0.0
|
||||
|
||||
* Officially changed name to lower-case
|
||||
* bug: colors on Windows 10 (#541)
|
||||
* bug: fix race in accessing level (#512)
|
||||
|
||||
# 0.11.5
|
||||
|
||||
* feature: add writer and writerlevel to entry (#372)
|
||||
|
||||
# 0.11.4
|
||||
|
||||
* bug: fix undefined variable on solaris (#493)
|
||||
|
||||
# 0.11.3
|
||||
|
||||
* formatter: configure quoting of empty values (#484)
|
||||
* formatter: configure quoting character (default is `"`) (#484)
|
||||
* bug: fix not importing io correctly in non-linux environments (#481)
|
||||
|
||||
# 0.11.2
|
||||
|
||||
* bug: fix windows terminal detection (#476)
|
||||
|
||||
# 0.11.1
|
||||
|
||||
* bug: fix tty detection with custom out (#471)
|
||||
|
||||
# 0.11.0
|
||||
|
||||
* performance: Use bufferpool to allocate (#370)
|
||||
* terminal: terminal detection for app-engine (#343)
|
||||
* feature: exit handler (#375)
|
||||
|
||||
# 0.10.0
|
||||
|
||||
* feature: Add a test hook (#180)
|
||||
* feature: `ParseLevel` is now case-insensitive (#326)
|
||||
* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
|
||||
* performance: avoid re-allocations on `WithFields` (#335)
|
||||
|
||||
# 0.9.0
|
||||
|
||||
* logrus/text_formatter: don't emit empty msg
|
||||
* logrus/hooks/airbrake: move out of main repository
|
||||
* logrus/hooks/sentry: move out of main repository
|
||||
* logrus/hooks/papertrail: move out of main repository
|
||||
* logrus/hooks/bugsnag: move out of main repository
|
||||
* logrus/core: run tests with `-race`
|
||||
* logrus/core: detect TTY based on `stderr`
|
||||
* logrus/core: support `WithError` on logger
|
||||
* logrus/core: Solaris support
|
||||
|
||||
# 0.8.7
|
||||
|
||||
* logrus/core: fix possible race (#216)
|
||||
* logrus/doc: small typo fixes and doc improvements
|
||||
|
||||
|
||||
# 0.8.6
|
||||
|
||||
* hooks/raven: allow passing an initialized client
|
||||
|
||||
# 0.8.5
|
||||
|
||||
* logrus/core: revert #208
|
||||
|
||||
# 0.8.4
|
||||
|
||||
* formatter/text: fix data race (#218)
|
||||
|
||||
# 0.8.3
|
||||
|
||||
* logrus/core: fix entry log level (#208)
|
||||
* logrus/core: improve performance of text formatter by 40%
|
||||
* logrus/core: expose `LevelHooks` type
|
||||
* logrus/core: add support for DragonflyBSD and NetBSD
|
||||
* formatter/text: print structs more verbosely
|
||||
|
||||
# 0.8.2
|
||||
|
||||
* logrus: fix more Fatal family functions
|
||||
|
||||
# 0.8.1
|
||||
|
||||
* logrus: fix not exiting on `Fatalf` and `Fatalln`
|
||||
|
||||
# 0.8.0
|
||||
|
||||
* logrus: defaults to stderr instead of stdout
|
||||
* hooks/sentry: add special field for `*http.Request`
|
||||
* formatter/text: ignore Windows for colors
|
||||
|
||||
# 0.7.3
|
||||
|
||||
* formatter/\*: allow configuration of timestamp layout
|
||||
|
||||
# 0.7.2
|
||||
|
||||
* formatter/text: Add configuration option for time format (#158)
|
||||
21
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/LICENSE
generated
vendored
Normal file
21
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Simon Eskildsen
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
509
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/README.md
generated
vendored
Normal file
509
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/README.md
generated
vendored
Normal file
@@ -0,0 +1,509 @@
|
||||
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [](https://travis-ci.org/sirupsen/logrus) [](https://godoc.org/github.com/sirupsen/logrus)
|
||||
|
||||
Logrus is a structured logger for Go (golang), completely API compatible with
|
||||
the standard library logger.
|
||||
|
||||
**Seeing weird case-sensitive problems?** It's in the past been possible to
|
||||
import Logrus as both upper- and lower-case. Due to the Go package environment,
|
||||
this caused issues in the community and we needed a standard. Some environments
|
||||
experienced problems with the upper-case variant, so the lower-case was decided.
|
||||
Everything using `logrus` will need to use the lower-case:
|
||||
`github.com/sirupsen/logrus`. Any package that isn't, should be changed.
|
||||
|
||||
To fix Glide, see [these
|
||||
comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437).
|
||||
For an in-depth explanation of the casing issue, see [this
|
||||
comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276).
|
||||
|
||||
**Are you interested in assisting in maintaining Logrus?** Currently I have a
|
||||
lot of obligations, and I am unable to provide Logrus with the maintainership it
|
||||
needs. If you'd like to help, please reach out to me at `simon at author's
|
||||
username dot com`.
|
||||
|
||||
Nicely color-coded in development (when a TTY is attached, otherwise just
|
||||
plain text):
|
||||
|
||||

|
||||
|
||||
With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
|
||||
or Splunk:
|
||||
|
||||
```json
|
||||
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
|
||||
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
|
||||
|
||||
{"level":"warning","msg":"The group's number increased tremendously!",
|
||||
"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
|
||||
|
||||
{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
|
||||
"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
|
||||
|
||||
{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
|
||||
"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
|
||||
|
||||
{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
|
||||
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
|
||||
```
|
||||
|
||||
With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
|
||||
attached, the output is compatible with the
|
||||
[logfmt](http://godoc.org/github.com/kr/logfmt) format:
|
||||
|
||||
```text
|
||||
time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
|
||||
time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
|
||||
time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
|
||||
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
|
||||
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
|
||||
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
|
||||
exit status 1
|
||||
```
|
||||
|
||||
#### Case-sensitivity
|
||||
|
||||
The organization's name was changed to lower-case--and this will not be changed
|
||||
back. If you are getting import conflicts due to case sensitivity, please use
|
||||
the lower-case import: `github.com/sirupsen/logrus`.
|
||||
|
||||
#### Example
|
||||
|
||||
The simplest way to use Logrus is simply the package-level exported logger:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.WithFields(log.Fields{
|
||||
"animal": "walrus",
|
||||
}).Info("A walrus appears")
|
||||
}
|
||||
```
|
||||
|
||||
Note that it's completely api-compatible with the stdlib logger, so you can
|
||||
replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"`
|
||||
and you'll now have the flexibility of Logrus. You can customize it all you
|
||||
want:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Log as JSON instead of the default ASCII formatter.
|
||||
log.SetFormatter(&log.JSONFormatter{})
|
||||
|
||||
// Output to stdout instead of the default stderr
|
||||
// Can be any io.Writer, see below for File example
|
||||
log.SetOutput(os.Stdout)
|
||||
|
||||
// Only log the warning severity or above.
|
||||
log.SetLevel(log.WarnLevel)
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.WithFields(log.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"omg": true,
|
||||
"number": 122,
|
||||
}).Warn("The group's number increased tremendously!")
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"omg": true,
|
||||
"number": 100,
|
||||
}).Fatal("The ice breaks!")
|
||||
|
||||
// A common pattern is to re-use fields between logging statements by re-using
|
||||
// the logrus.Entry returned from WithFields()
|
||||
contextLogger := log.WithFields(log.Fields{
|
||||
"common": "this is a common field",
|
||||
"other": "I also should be logged always",
|
||||
})
|
||||
|
||||
contextLogger.Info("I'll be logged with common and other field")
|
||||
contextLogger.Info("Me too")
|
||||
}
|
||||
```
|
||||
|
||||
For more advanced usage such as logging to multiple locations from the same
|
||||
application, you can also create an instance of the `logrus` Logger:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Create a new instance of the logger. You can have any number of instances.
|
||||
var log = logrus.New()
|
||||
|
||||
func main() {
|
||||
// The API for setting attributes is a little different than the package level
|
||||
// exported logger. See Godoc.
|
||||
log.Out = os.Stdout
|
||||
|
||||
// You could set this to any `io.Writer` such as a file
|
||||
// file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666)
|
||||
// if err == nil {
|
||||
// log.Out = file
|
||||
// } else {
|
||||
// log.Info("Failed to log to file, using default stderr")
|
||||
// }
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
}
|
||||
```
|
||||
|
||||
#### Fields
|
||||
|
||||
Logrus encourages careful, structured logging through logging fields instead of
|
||||
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
|
||||
to send event %s to topic %s with key %d")`, you should log the much more
|
||||
discoverable:
|
||||
|
||||
```go
|
||||
log.WithFields(log.Fields{
|
||||
"event": event,
|
||||
"topic": topic,
|
||||
"key": key,
|
||||
}).Fatal("Failed to send event")
|
||||
```
|
||||
|
||||
We've found this API forces you to think about logging in a way that produces
|
||||
much more useful logging messages. We've been in countless situations where just
|
||||
a single added field to a log statement that was already there would've saved us
|
||||
hours. The `WithFields` call is optional.
|
||||
|
||||
In general, with Logrus using any of the `printf`-family functions should be
|
||||
seen as a hint you should add a field, however, you can still use the
|
||||
`printf`-family functions with Logrus.
|
||||
|
||||
#### Default Fields
|
||||
|
||||
Often it's helpful to have fields _always_ attached to log statements in an
|
||||
application or parts of one. For example, you may want to always log the
|
||||
`request_id` and `user_ip` in the context of a request. Instead of writing
|
||||
`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on
|
||||
every line, you can create a `logrus.Entry` to pass around instead:
|
||||
|
||||
```go
|
||||
requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})
|
||||
requestLogger.Info("something happened on that request") # will log request_id and user_ip
|
||||
requestLogger.Warn("something not great happened")
|
||||
```
|
||||
|
||||
#### Hooks
|
||||
|
||||
You can add hooks for logging levels. For example to send errors to an exception
|
||||
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
|
||||
multiple places simultaneously, e.g. syslog.
|
||||
|
||||
Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
|
||||
`init`:
|
||||
|
||||
```go
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
|
||||
logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
|
||||
"log/syslog"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
||||
// Use the Airbrake hook to report errors that have Error severity or above to
|
||||
// an exception tracker. You can create custom hooks, see the Hooks section.
|
||||
log.AddHook(airbrake.NewHook(123, "xyz", "production"))
|
||||
|
||||
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
||||
if err != nil {
|
||||
log.Error("Unable to connect to local syslog daemon")
|
||||
} else {
|
||||
log.AddHook(hook)
|
||||
}
|
||||
}
|
||||
```
|
||||
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
|
||||
|
||||
| Hook | Description |
|
||||
| ----- | ----------- |
|
||||
| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
|
||||
| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
|
||||
| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) |
|
||||
| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
|
||||
| [AzureTableHook](https://github.com/kpfaulkner/azuretablehook/) | Hook for logging to Azure Table Storage|
|
||||
| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
|
||||
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
|
||||
| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) |
|
||||
| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
|
||||
| [Firehose](https://github.com/beaubrewer/logrus_firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/)
|
||||
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
|
||||
| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) |
|
||||
| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
|
||||
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
|
||||
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
|
||||
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
|
||||
| [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) |
|
||||
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
|
||||
| [KafkaLogrus](https://github.com/tracer0tong/kafkalogrus) | Hook for logging to Kafka |
|
||||
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
|
||||
| [Logbeat](https://github.com/macandmia/logbeat) | Hook for logging to [Opbeat](https://opbeat.com/) |
|
||||
| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) |
|
||||
| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
|
||||
| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
|
||||
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
|
||||
| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
|
||||
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
|
||||
| [Mattermost](https://github.com/shuLhan/mattermost-integration/tree/master/hooks/logrus) | Hook for logging to [Mattermost](https://mattermost.com/) |
|
||||
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
|
||||
| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) |
|
||||
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
|
||||
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
|
||||
| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
|
||||
| [Promrus](https://github.com/weaveworks/promrus) | Expose number of log messages as [Prometheus](https://prometheus.io/) metrics |
|
||||
| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
|
||||
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
|
||||
| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
|
||||
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
|
||||
| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
|
||||
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
|
||||
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
|
||||
| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) |
|
||||
| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
|
||||
| [Syslog](https://github.com/sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
|
||||
| [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. |
|
||||
| [Telegram](https://github.com/rossmcdonald/telegram_hook) | Hook for logging errors to [Telegram](https://telegram.org/) |
|
||||
| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) |
|
||||
| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
|
||||
| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
|
||||
| [SQS-Hook](https://github.com/tsarpaul/logrus_sqs) | Hook for logging to [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) |
|
||||
|
||||
#### Level logging
|
||||
|
||||
Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
|
||||
|
||||
```go
|
||||
log.Debug("Useful debugging information.")
|
||||
log.Info("Something noteworthy happened!")
|
||||
log.Warn("You should probably take a look at this.")
|
||||
log.Error("Something failed but I'm not quitting.")
|
||||
// Calls os.Exit(1) after logging
|
||||
log.Fatal("Bye.")
|
||||
// Calls panic() after logging
|
||||
log.Panic("I'm bailing.")
|
||||
```
|
||||
|
||||
You can set the logging level on a `Logger`, then it will only log entries with
|
||||
that severity or anything above it:
|
||||
|
||||
```go
|
||||
// Will log anything that is info or above (warn, error, fatal, panic). Default.
|
||||
log.SetLevel(log.InfoLevel)
|
||||
```
|
||||
|
||||
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
|
||||
environment if your application has that.
|
||||
|
||||
#### Entries
|
||||
|
||||
Besides the fields added with `WithField` or `WithFields` some fields are
|
||||
automatically added to all logging events:
|
||||
|
||||
1. `time`. The timestamp when the entry was created.
|
||||
2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
|
||||
the `AddFields` call. E.g. `Failed to send event.`
|
||||
3. `level`. The logging level. E.g. `info`.
|
||||
|
||||
#### Environments
|
||||
|
||||
Logrus has no notion of environment.
|
||||
|
||||
If you wish for hooks and formatters to only be used in specific environments,
|
||||
you should handle that yourself. For example, if your application has a global
|
||||
variable `Environment`, which is a string representation of the environment you
|
||||
could do:
|
||||
|
||||
```go
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
init() {
|
||||
// do something here to set environment depending on an environment variable
|
||||
// or command-line flag
|
||||
if Environment == "production" {
|
||||
log.SetFormatter(&log.JSONFormatter{})
|
||||
} else {
|
||||
// The TextFormatter is default, you don't actually have to do this.
|
||||
log.SetFormatter(&log.TextFormatter{})
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This configuration is how `logrus` was intended to be used, but JSON in
|
||||
production is mostly only useful if you do log aggregation with tools like
|
||||
Splunk or Logstash.
|
||||
|
||||
#### Formatters
|
||||
|
||||
The built-in logging formatters are:
|
||||
|
||||
* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
|
||||
without colors.
|
||||
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
|
||||
field to `true`. To force no colored output even if there is a TTY set the
|
||||
`DisableColors` field to `true`. For Windows, see
|
||||
[github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
|
||||
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
|
||||
* `logrus.JSONFormatter`. Logs fields as JSON.
|
||||
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
|
||||
|
||||
Third party logging formatters:
|
||||
|
||||
* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine.
|
||||
* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
|
||||
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
|
||||
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
|
||||
|
||||
You can define your formatter by implementing the `Formatter` interface,
|
||||
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
|
||||
`Fields` type (`map[string]interface{}`) with all your fields as well as the
|
||||
default ones (see Entries section above):
|
||||
|
||||
```go
|
||||
type MyJSONFormatter struct {
|
||||
}
|
||||
|
||||
log.SetFormatter(new(MyJSONFormatter))
|
||||
|
||||
func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
// Note this doesn't include Time, Level and Message which are available on
|
||||
// the Entry. Consult `godoc` on information about those fields or read the
|
||||
// source of the official loggers.
|
||||
serialized, err := json.Marshal(entry.Data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||
}
|
||||
return append(serialized, '\n'), nil
|
||||
}
|
||||
```
|
||||
|
||||
#### Logger as an `io.Writer`
|
||||
|
||||
Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
|
||||
|
||||
```go
|
||||
w := logger.Writer()
|
||||
defer w.Close()
|
||||
|
||||
srv := http.Server{
|
||||
// create a stdlib log.Logger that writes to
|
||||
// logrus.Logger.
|
||||
ErrorLog: log.New(w, "", 0),
|
||||
}
|
||||
```
|
||||
|
||||
Each line written to that writer will be printed the usual way, using formatters
|
||||
and hooks. The level for those entries is `info`.
|
||||
|
||||
This means that we can override the standard library logger easily:
|
||||
|
||||
```go
|
||||
logger := logrus.New()
|
||||
logger.Formatter = &logrus.JSONFormatter{}
|
||||
|
||||
// Use logrus for standard log output
|
||||
// Note that `log` here references stdlib's log
|
||||
// Not logrus imported under the name `log`.
|
||||
log.SetOutput(logger.Writer())
|
||||
```
|
||||
|
||||
#### Rotation
|
||||
|
||||
Log rotation is not provided with Logrus. Log rotation should be done by an
|
||||
external program (like `logrotate(8)`) that can compress and delete old log
|
||||
entries. It should not be a feature of the application-level logger.
|
||||
|
||||
#### Tools
|
||||
|
||||
| Tool | Description |
|
||||
| ---- | ----------- |
|
||||
|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
|
||||
|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
|
||||
|
||||
#### Testing
|
||||
|
||||
Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
|
||||
|
||||
* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
|
||||
* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
|
||||
|
||||
```go
|
||||
import(
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSomething(t*testing.T){
|
||||
logger, hook := test.NewNullLogger()
|
||||
logger.Error("Helloerror")
|
||||
|
||||
assert.Equal(t, 1, len(hook.Entries))
|
||||
assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level)
|
||||
assert.Equal(t, "Helloerror", hook.LastEntry().Message)
|
||||
|
||||
hook.Reset()
|
||||
assert.Nil(t, hook.LastEntry())
|
||||
}
|
||||
```
|
||||
|
||||
#### Fatal handlers
|
||||
|
||||
Logrus can register one or more functions that will be called when any `fatal`
|
||||
level message is logged. The registered handlers will be executed before
|
||||
logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
|
||||
to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
|
||||
|
||||
```
|
||||
...
|
||||
handler := func() {
|
||||
// gracefully shutdown something...
|
||||
}
|
||||
logrus.RegisterExitHandler(handler)
|
||||
...
|
||||
```
|
||||
|
||||
#### Thread safety
|
||||
|
||||
By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
|
||||
If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
|
||||
|
||||
Situation when locking is not needed includes:
|
||||
|
||||
* You have no hooks registered, or hooks calling is already thread-safe.
|
||||
|
||||
* Writing to logger.Out is already thread-safe, for example:
|
||||
|
||||
1) logger.Out is protected by locks.
|
||||
|
||||
2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
|
||||
|
||||
(Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
|
||||
64
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/alt_exit.go
generated
vendored
Normal file
64
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/alt_exit.go
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
package logrus
|
||||
|
||||
// The following code was sourced and modified from the
|
||||
// https://github.com/tebeka/atexit package governed by the following license:
|
||||
//
|
||||
// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
// this software and associated documentation files (the "Software"), to deal in
|
||||
// the Software without restriction, including without limitation the rights to
|
||||
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
// the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
// subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in all
|
||||
// copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
var handlers = []func(){}
|
||||
|
||||
func runHandler(handler func()) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
|
||||
}
|
||||
}()
|
||||
|
||||
handler()
|
||||
}
|
||||
|
||||
func runHandlers() {
|
||||
for _, handler := range handlers {
|
||||
runHandler(handler)
|
||||
}
|
||||
}
|
||||
|
||||
// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
|
||||
func Exit(code int) {
|
||||
runHandlers()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
|
||||
// all handlers. The handlers will also be invoked when any Fatal log entry is
|
||||
// made.
|
||||
//
|
||||
// This method is useful when a caller wishes to use logrus to log a fatal
|
||||
// message but also needs to gracefully shutdown. An example usecase could be
|
||||
// closing database connections, or sending a alert that the application is
|
||||
// closing.
|
||||
func RegisterExitHandler(handler func()) {
|
||||
handlers = append(handlers, handler)
|
||||
}
|
||||
83
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/alt_exit_test.go
generated
vendored
Normal file
83
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/alt_exit_test.go
generated
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestRegister(t *testing.T) {
|
||||
current := len(handlers)
|
||||
RegisterExitHandler(func() {})
|
||||
if len(handlers) != current+1 {
|
||||
t.Fatalf("expected %d handlers, got %d", current+1, len(handlers))
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandler(t *testing.T) {
|
||||
tempDir, err := ioutil.TempDir("", "test_handler")
|
||||
if err != nil {
|
||||
log.Fatalf("can't create temp dir. %q", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
gofile := filepath.Join(tempDir, "gofile.go")
|
||||
if err := ioutil.WriteFile(gofile, testprog, 0666); err != nil {
|
||||
t.Fatalf("can't create go file. %q", err)
|
||||
}
|
||||
|
||||
outfile := filepath.Join(tempDir, "outfile.out")
|
||||
arg := time.Now().UTC().String()
|
||||
err = exec.Command("go", "run", gofile, outfile, arg).Run()
|
||||
if err == nil {
|
||||
t.Fatalf("completed normally, should have failed")
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadFile(outfile)
|
||||
if err != nil {
|
||||
t.Fatalf("can't read output file %s. %q", outfile, err)
|
||||
}
|
||||
|
||||
if string(data) != arg {
|
||||
t.Fatalf("bad data. Expected %q, got %q", data, arg)
|
||||
}
|
||||
}
|
||||
|
||||
var testprog = []byte(`
|
||||
// Test program for atexit, gets output file and data as arguments and writes
|
||||
// data to output file in atexit handler.
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
var outfile = ""
|
||||
var data = ""
|
||||
|
||||
func handler() {
|
||||
ioutil.WriteFile(outfile, []byte(data), 0666)
|
||||
}
|
||||
|
||||
func badHandler() {
|
||||
n := 0
|
||||
fmt.Println(1/n)
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
outfile = flag.Arg(0)
|
||||
data = flag.Arg(1)
|
||||
|
||||
logrus.RegisterExitHandler(handler)
|
||||
logrus.RegisterExitHandler(badHandler)
|
||||
logrus.Fatal("Bye bye")
|
||||
}
|
||||
`)
|
||||
14
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/appveyor.yml
generated
vendored
Normal file
14
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/appveyor.yml
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
version: "{build}"
|
||||
platform: x64
|
||||
clone_folder: c:\gopath\src\github.com\sirupsen\logrus
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
install:
|
||||
- set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
|
||||
- go version
|
||||
build_script:
|
||||
- go get -t
|
||||
- go test
|
||||
26
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/doc.go
generated
vendored
Normal file
26
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/doc.go
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
/*
|
||||
Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
|
||||
|
||||
|
||||
The simplest way to use Logrus is simply the package-level exported logger:
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.WithFields(log.Fields{
|
||||
"animal": "walrus",
|
||||
"number": 1,
|
||||
"size": 10,
|
||||
}).Info("A walrus appears")
|
||||
}
|
||||
|
||||
Output:
|
||||
time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
|
||||
|
||||
For a full guide visit https://github.com/sirupsen/logrus
|
||||
*/
|
||||
package logrus
|
||||
279
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/entry.go
generated
vendored
Normal file
279
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/entry.go
generated
vendored
Normal file
@@ -0,0 +1,279 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var bufferPool *sync.Pool
|
||||
|
||||
func init() {
|
||||
bufferPool = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
return new(bytes.Buffer)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Defines the key when adding errors using WithError.
|
||||
var ErrorKey = "error"
|
||||
|
||||
// An entry is the final or intermediate Logrus logging entry. It contains all
|
||||
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
|
||||
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
|
||||
// passed around as much as you wish to avoid field duplication.
|
||||
type Entry struct {
|
||||
Logger *Logger
|
||||
|
||||
// Contains all the fields set by the user.
|
||||
Data Fields
|
||||
|
||||
// Time at which the log entry was created
|
||||
Time time.Time
|
||||
|
||||
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
|
||||
// This field will be set on entry firing and the value will be equal to the one in Logger struct field.
|
||||
Level Level
|
||||
|
||||
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
|
||||
Message string
|
||||
|
||||
// When formatter is called in entry.log(), an Buffer may be set to entry
|
||||
Buffer *bytes.Buffer
|
||||
}
|
||||
|
||||
func NewEntry(logger *Logger) *Entry {
|
||||
return &Entry{
|
||||
Logger: logger,
|
||||
// Default is three fields, give a little extra room
|
||||
Data: make(Fields, 5),
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the string representation from the reader and ultimately the
|
||||
// formatter.
|
||||
func (entry *Entry) String() (string, error) {
|
||||
serialized, err := entry.Logger.Formatter.Format(entry)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
str := string(serialized)
|
||||
return str, nil
|
||||
}
|
||||
|
||||
// Add an error as single field (using the key defined in ErrorKey) to the Entry.
|
||||
func (entry *Entry) WithError(err error) *Entry {
|
||||
return entry.WithField(ErrorKey, err)
|
||||
}
|
||||
|
||||
// Add a single field to the Entry.
|
||||
func (entry *Entry) WithField(key string, value interface{}) *Entry {
|
||||
return entry.WithFields(Fields{key: value})
|
||||
}
|
||||
|
||||
// Add a map of fields to the Entry.
|
||||
func (entry *Entry) WithFields(fields Fields) *Entry {
|
||||
data := make(Fields, len(entry.Data)+len(fields))
|
||||
for k, v := range entry.Data {
|
||||
data[k] = v
|
||||
}
|
||||
for k, v := range fields {
|
||||
data[k] = v
|
||||
}
|
||||
return &Entry{Logger: entry.Logger, Data: data}
|
||||
}
|
||||
|
||||
// This function is not declared with a pointer value because otherwise
|
||||
// race conditions will occur when using multiple goroutines
|
||||
func (entry Entry) log(level Level, msg string) {
|
||||
var buffer *bytes.Buffer
|
||||
entry.Time = time.Now()
|
||||
entry.Level = level
|
||||
entry.Message = msg
|
||||
|
||||
entry.Logger.mu.Lock()
|
||||
err := entry.Logger.Hooks.Fire(level, &entry)
|
||||
entry.Logger.mu.Unlock()
|
||||
if err != nil {
|
||||
entry.Logger.mu.Lock()
|
||||
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
|
||||
entry.Logger.mu.Unlock()
|
||||
}
|
||||
buffer = bufferPool.Get().(*bytes.Buffer)
|
||||
buffer.Reset()
|
||||
defer bufferPool.Put(buffer)
|
||||
entry.Buffer = buffer
|
||||
serialized, err := entry.Logger.Formatter.Format(&entry)
|
||||
entry.Buffer = nil
|
||||
if err != nil {
|
||||
entry.Logger.mu.Lock()
|
||||
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
|
||||
entry.Logger.mu.Unlock()
|
||||
} else {
|
||||
entry.Logger.mu.Lock()
|
||||
_, err = entry.Logger.Out.Write(serialized)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
|
||||
}
|
||||
entry.Logger.mu.Unlock()
|
||||
}
|
||||
|
||||
// To avoid Entry#log() returning a value that only would make sense for
|
||||
// panic() to use in Entry#Panic(), we avoid the allocation by checking
|
||||
// directly here.
|
||||
if level <= PanicLevel {
|
||||
panic(&entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Debug(args ...interface{}) {
|
||||
if entry.Logger.level() >= DebugLevel {
|
||||
entry.log(DebugLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Print(args ...interface{}) {
|
||||
entry.Info(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Info(args ...interface{}) {
|
||||
if entry.Logger.level() >= InfoLevel {
|
||||
entry.log(InfoLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Warn(args ...interface{}) {
|
||||
if entry.Logger.level() >= WarnLevel {
|
||||
entry.log(WarnLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Warning(args ...interface{}) {
|
||||
entry.Warn(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Error(args ...interface{}) {
|
||||
if entry.Logger.level() >= ErrorLevel {
|
||||
entry.log(ErrorLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatal(args ...interface{}) {
|
||||
if entry.Logger.level() >= FatalLevel {
|
||||
entry.log(FatalLevel, fmt.Sprint(args...))
|
||||
}
|
||||
Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panic(args ...interface{}) {
|
||||
if entry.Logger.level() >= PanicLevel {
|
||||
entry.log(PanicLevel, fmt.Sprint(args...))
|
||||
}
|
||||
panic(fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
// Entry Printf family functions
|
||||
|
||||
func (entry *Entry) Debugf(format string, args ...interface{}) {
|
||||
if entry.Logger.level() >= DebugLevel {
|
||||
entry.Debug(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Infof(format string, args ...interface{}) {
|
||||
if entry.Logger.level() >= InfoLevel {
|
||||
entry.Info(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Printf(format string, args ...interface{}) {
|
||||
entry.Infof(format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warnf(format string, args ...interface{}) {
|
||||
if entry.Logger.level() >= WarnLevel {
|
||||
entry.Warn(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Warningf(format string, args ...interface{}) {
|
||||
entry.Warnf(format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Errorf(format string, args ...interface{}) {
|
||||
if entry.Logger.level() >= ErrorLevel {
|
||||
entry.Error(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatalf(format string, args ...interface{}) {
|
||||
if entry.Logger.level() >= FatalLevel {
|
||||
entry.Fatal(fmt.Sprintf(format, args...))
|
||||
}
|
||||
Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panicf(format string, args ...interface{}) {
|
||||
if entry.Logger.level() >= PanicLevel {
|
||||
entry.Panic(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
// Entry Println family functions
|
||||
|
||||
func (entry *Entry) Debugln(args ...interface{}) {
|
||||
if entry.Logger.level() >= DebugLevel {
|
||||
entry.Debug(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Infoln(args ...interface{}) {
|
||||
if entry.Logger.level() >= InfoLevel {
|
||||
entry.Info(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Println(args ...interface{}) {
|
||||
entry.Infoln(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warnln(args ...interface{}) {
|
||||
if entry.Logger.level() >= WarnLevel {
|
||||
entry.Warn(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Warningln(args ...interface{}) {
|
||||
entry.Warnln(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Errorln(args ...interface{}) {
|
||||
if entry.Logger.level() >= ErrorLevel {
|
||||
entry.Error(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatalln(args ...interface{}) {
|
||||
if entry.Logger.level() >= FatalLevel {
|
||||
entry.Fatal(entry.sprintlnn(args...))
|
||||
}
|
||||
Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panicln(args ...interface{}) {
|
||||
if entry.Logger.level() >= PanicLevel {
|
||||
entry.Panic(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
// Sprintlnn => Sprint no newline. This is to get the behavior of how
|
||||
// fmt.Sprintln where spaces are always added between operands, regardless of
|
||||
// their type. Instead of vendoring the Sprintln implementation to spare a
|
||||
// string allocation, we do the simplest thing.
|
||||
func (entry *Entry) sprintlnn(args ...interface{}) string {
|
||||
msg := fmt.Sprintln(args...)
|
||||
return msg[:len(msg)-1]
|
||||
}
|
||||
77
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/entry_test.go
generated
vendored
Normal file
77
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/entry_test.go
generated
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestEntryWithError(t *testing.T) {
|
||||
|
||||
assert := assert.New(t)
|
||||
|
||||
defer func() {
|
||||
ErrorKey = "error"
|
||||
}()
|
||||
|
||||
err := fmt.Errorf("kaboom at layer %d", 4711)
|
||||
|
||||
assert.Equal(err, WithError(err).Data["error"])
|
||||
|
||||
logger := New()
|
||||
logger.Out = &bytes.Buffer{}
|
||||
entry := NewEntry(logger)
|
||||
|
||||
assert.Equal(err, entry.WithError(err).Data["error"])
|
||||
|
||||
ErrorKey = "err"
|
||||
|
||||
assert.Equal(err, entry.WithError(err).Data["err"])
|
||||
|
||||
}
|
||||
|
||||
func TestEntryPanicln(t *testing.T) {
|
||||
errBoom := fmt.Errorf("boom time")
|
||||
|
||||
defer func() {
|
||||
p := recover()
|
||||
assert.NotNil(t, p)
|
||||
|
||||
switch pVal := p.(type) {
|
||||
case *Entry:
|
||||
assert.Equal(t, "kaboom", pVal.Message)
|
||||
assert.Equal(t, errBoom, pVal.Data["err"])
|
||||
default:
|
||||
t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
|
||||
}
|
||||
}()
|
||||
|
||||
logger := New()
|
||||
logger.Out = &bytes.Buffer{}
|
||||
entry := NewEntry(logger)
|
||||
entry.WithField("err", errBoom).Panicln("kaboom")
|
||||
}
|
||||
|
||||
func TestEntryPanicf(t *testing.T) {
|
||||
errBoom := fmt.Errorf("boom again")
|
||||
|
||||
defer func() {
|
||||
p := recover()
|
||||
assert.NotNil(t, p)
|
||||
|
||||
switch pVal := p.(type) {
|
||||
case *Entry:
|
||||
assert.Equal(t, "kaboom true", pVal.Message)
|
||||
assert.Equal(t, errBoom, pVal.Data["err"])
|
||||
default:
|
||||
t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
|
||||
}
|
||||
}()
|
||||
|
||||
logger := New()
|
||||
logger.Out = &bytes.Buffer{}
|
||||
entry := NewEntry(logger)
|
||||
entry.WithField("err", errBoom).Panicf("kaboom %v", true)
|
||||
}
|
||||
69
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/example_basic_test.go
generated
vendored
Normal file
69
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/example_basic_test.go
generated
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
package logrus_test
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"os"
|
||||
)
|
||||
|
||||
func Example_basic() {
|
||||
var log = logrus.New()
|
||||
log.Formatter = new(logrus.JSONFormatter)
|
||||
log.Formatter = new(logrus.TextFormatter) //default
|
||||
log.Formatter.(*logrus.TextFormatter).DisableTimestamp = true // remove timestamp from test output
|
||||
log.Level = logrus.DebugLevel
|
||||
log.Out = os.Stdout
|
||||
|
||||
// file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666)
|
||||
// if err == nil {
|
||||
// log.Out = file
|
||||
// } else {
|
||||
// log.Info("Failed to log to file, using default stderr")
|
||||
// }
|
||||
|
||||
defer func() {
|
||||
err := recover()
|
||||
if err != nil {
|
||||
entry := err.(*logrus.Entry)
|
||||
log.WithFields(logrus.Fields{
|
||||
"omg": true,
|
||||
"err_animal": entry.Data["animal"],
|
||||
"err_size": entry.Data["size"],
|
||||
"err_level": entry.Level,
|
||||
"err_message": entry.Message,
|
||||
"number": 100,
|
||||
}).Error("The ice breaks!") // or use Fatal() to force the process to exit with a nonzero code
|
||||
}
|
||||
}()
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "walrus",
|
||||
"number": 8,
|
||||
}).Debug("Started observing beach")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"omg": true,
|
||||
"number": 122,
|
||||
}).Warn("The group's number increased tremendously!")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"temperature": -4,
|
||||
}).Debug("Temperature changes")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "orca",
|
||||
"size": 9009,
|
||||
}).Panic("It's over 9000!")
|
||||
|
||||
// Output:
|
||||
// level=debug msg="Started observing beach" animal=walrus number=8
|
||||
// level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
|
||||
// level=warning msg="The group's number increased tremendously!" number=122 omg=true
|
||||
// level=debug msg="Temperature changes" temperature=-4
|
||||
// level=panic msg="It's over 9000!" animal=orca size=9009
|
||||
// level=error msg="The ice breaks!" err_animal=orca err_level=panic err_message="It's over 9000!" err_size=9009 number=100 omg=true
|
||||
}
|
||||
35
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/example_hook_test.go
generated
vendored
Normal file
35
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/example_hook_test.go
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
package logrus_test
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"gopkg.in/gemnasium/logrus-airbrake-hook.v2"
|
||||
"os"
|
||||
)
|
||||
|
||||
func Example_hook() {
|
||||
var log = logrus.New()
|
||||
log.Formatter = new(logrus.TextFormatter) // default
|
||||
log.Formatter.(*logrus.TextFormatter).DisableTimestamp = true // remove timestamp from test output
|
||||
log.Hooks.Add(airbrake.NewHook(123, "xyz", "development"))
|
||||
log.Out = os.Stdout
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"omg": true,
|
||||
"number": 122,
|
||||
}).Warn("The group's number increased tremendously!")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"omg": true,
|
||||
"number": 100,
|
||||
}).Error("The ice breaks!")
|
||||
|
||||
// Output:
|
||||
// level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
|
||||
// level=warning msg="The group's number increased tremendously!" number=122 omg=true
|
||||
// level=error msg="The ice breaks!" number=100 omg=true
|
||||
}
|
||||
193
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/exported.go
generated
vendored
Normal file
193
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/exported.go
generated
vendored
Normal file
@@ -0,0 +1,193 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
var (
|
||||
// std is the name of the standard logger in stdlib `log`
|
||||
std = New()
|
||||
)
|
||||
|
||||
func StandardLogger() *Logger {
|
||||
return std
|
||||
}
|
||||
|
||||
// SetOutput sets the standard logger output.
|
||||
func SetOutput(out io.Writer) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.Out = out
|
||||
}
|
||||
|
||||
// SetFormatter sets the standard logger formatter.
|
||||
func SetFormatter(formatter Formatter) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.Formatter = formatter
|
||||
}
|
||||
|
||||
// SetLevel sets the standard logger level.
|
||||
func SetLevel(level Level) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.SetLevel(level)
|
||||
}
|
||||
|
||||
// GetLevel returns the standard logger level.
|
||||
func GetLevel() Level {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
return std.level()
|
||||
}
|
||||
|
||||
// AddHook adds a hook to the standard logger hooks.
|
||||
func AddHook(hook Hook) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.Hooks.Add(hook)
|
||||
}
|
||||
|
||||
// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
|
||||
func WithError(err error) *Entry {
|
||||
return std.WithField(ErrorKey, err)
|
||||
}
|
||||
|
||||
// WithField creates an entry from the standard logger and adds a field to
|
||||
// it. If you want multiple fields, use `WithFields`.
|
||||
//
|
||||
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
||||
// or Panic on the Entry it returns.
|
||||
func WithField(key string, value interface{}) *Entry {
|
||||
return std.WithField(key, value)
|
||||
}
|
||||
|
||||
// WithFields creates an entry from the standard logger and adds multiple
|
||||
// fields to it. This is simply a helper for `WithField`, invoking it
|
||||
// once for each field.
|
||||
//
|
||||
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
||||
// or Panic on the Entry it returns.
|
||||
func WithFields(fields Fields) *Entry {
|
||||
return std.WithFields(fields)
|
||||
}
|
||||
|
||||
// Debug logs a message at level Debug on the standard logger.
|
||||
func Debug(args ...interface{}) {
|
||||
std.Debug(args...)
|
||||
}
|
||||
|
||||
// Print logs a message at level Info on the standard logger.
|
||||
func Print(args ...interface{}) {
|
||||
std.Print(args...)
|
||||
}
|
||||
|
||||
// Info logs a message at level Info on the standard logger.
|
||||
func Info(args ...interface{}) {
|
||||
std.Info(args...)
|
||||
}
|
||||
|
||||
// Warn logs a message at level Warn on the standard logger.
|
||||
func Warn(args ...interface{}) {
|
||||
std.Warn(args...)
|
||||
}
|
||||
|
||||
// Warning logs a message at level Warn on the standard logger.
|
||||
func Warning(args ...interface{}) {
|
||||
std.Warning(args...)
|
||||
}
|
||||
|
||||
// Error logs a message at level Error on the standard logger.
|
||||
func Error(args ...interface{}) {
|
||||
std.Error(args...)
|
||||
}
|
||||
|
||||
// Panic logs a message at level Panic on the standard logger.
|
||||
func Panic(args ...interface{}) {
|
||||
std.Panic(args...)
|
||||
}
|
||||
|
||||
// Fatal logs a message at level Fatal on the standard logger.
|
||||
func Fatal(args ...interface{}) {
|
||||
std.Fatal(args...)
|
||||
}
|
||||
|
||||
// Debugf logs a message at level Debug on the standard logger.
|
||||
func Debugf(format string, args ...interface{}) {
|
||||
std.Debugf(format, args...)
|
||||
}
|
||||
|
||||
// Printf logs a message at level Info on the standard logger.
|
||||
func Printf(format string, args ...interface{}) {
|
||||
std.Printf(format, args...)
|
||||
}
|
||||
|
||||
// Infof logs a message at level Info on the standard logger.
|
||||
func Infof(format string, args ...interface{}) {
|
||||
std.Infof(format, args...)
|
||||
}
|
||||
|
||||
// Warnf logs a message at level Warn on the standard logger.
|
||||
func Warnf(format string, args ...interface{}) {
|
||||
std.Warnf(format, args...)
|
||||
}
|
||||
|
||||
// Warningf logs a message at level Warn on the standard logger.
|
||||
func Warningf(format string, args ...interface{}) {
|
||||
std.Warningf(format, args...)
|
||||
}
|
||||
|
||||
// Errorf logs a message at level Error on the standard logger.
|
||||
func Errorf(format string, args ...interface{}) {
|
||||
std.Errorf(format, args...)
|
||||
}
|
||||
|
||||
// Panicf logs a message at level Panic on the standard logger.
|
||||
func Panicf(format string, args ...interface{}) {
|
||||
std.Panicf(format, args...)
|
||||
}
|
||||
|
||||
// Fatalf logs a message at level Fatal on the standard logger.
|
||||
func Fatalf(format string, args ...interface{}) {
|
||||
std.Fatalf(format, args...)
|
||||
}
|
||||
|
||||
// Debugln logs a message at level Debug on the standard logger.
|
||||
func Debugln(args ...interface{}) {
|
||||
std.Debugln(args...)
|
||||
}
|
||||
|
||||
// Println logs a message at level Info on the standard logger.
|
||||
func Println(args ...interface{}) {
|
||||
std.Println(args...)
|
||||
}
|
||||
|
||||
// Infoln logs a message at level Info on the standard logger.
|
||||
func Infoln(args ...interface{}) {
|
||||
std.Infoln(args...)
|
||||
}
|
||||
|
||||
// Warnln logs a message at level Warn on the standard logger.
|
||||
func Warnln(args ...interface{}) {
|
||||
std.Warnln(args...)
|
||||
}
|
||||
|
||||
// Warningln logs a message at level Warn on the standard logger.
|
||||
func Warningln(args ...interface{}) {
|
||||
std.Warningln(args...)
|
||||
}
|
||||
|
||||
// Errorln logs a message at level Error on the standard logger.
|
||||
func Errorln(args ...interface{}) {
|
||||
std.Errorln(args...)
|
||||
}
|
||||
|
||||
// Panicln logs a message at level Panic on the standard logger.
|
||||
func Panicln(args ...interface{}) {
|
||||
std.Panicln(args...)
|
||||
}
|
||||
|
||||
// Fatalln logs a message at level Fatal on the standard logger.
|
||||
func Fatalln(args ...interface{}) {
|
||||
std.Fatalln(args...)
|
||||
}
|
||||
45
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/formatter.go
generated
vendored
Normal file
45
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/formatter.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
package logrus
|
||||
|
||||
import "time"
|
||||
|
||||
const defaultTimestampFormat = time.RFC3339
|
||||
|
||||
// The Formatter interface is used to implement a custom Formatter. It takes an
|
||||
// `Entry`. It exposes all the fields, including the default ones:
|
||||
//
|
||||
// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
|
||||
// * `entry.Data["time"]`. The timestamp.
|
||||
// * `entry.Data["level"]. The level the entry was logged at.
|
||||
//
|
||||
// Any additional fields added with `WithField` or `WithFields` are also in
|
||||
// `entry.Data`. Format is expected to return an array of bytes which are then
|
||||
// logged to `logger.Out`.
|
||||
type Formatter interface {
|
||||
Format(*Entry) ([]byte, error)
|
||||
}
|
||||
|
||||
// This is to not silently overwrite `time`, `msg` and `level` fields when
|
||||
// dumping it. If this code wasn't there doing:
|
||||
//
|
||||
// logrus.WithField("level", 1).Info("hello")
|
||||
//
|
||||
// Would just silently drop the user provided level. Instead with this code
|
||||
// it'll logged as:
|
||||
//
|
||||
// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
|
||||
//
|
||||
// It's not exported because it's still using Data in an opinionated way. It's to
|
||||
// avoid code duplication between the two default formatters.
|
||||
func prefixFieldClashes(data Fields) {
|
||||
if t, ok := data["time"]; ok {
|
||||
data["fields.time"] = t
|
||||
}
|
||||
|
||||
if m, ok := data["msg"]; ok {
|
||||
data["fields.msg"] = m
|
||||
}
|
||||
|
||||
if l, ok := data["level"]; ok {
|
||||
data["fields.level"] = l
|
||||
}
|
||||
}
|
||||
101
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/formatter_bench_test.go
generated
vendored
Normal file
101
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/formatter_bench_test.go
generated
vendored
Normal file
@@ -0,0 +1,101 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// smallFields is a small size data set for benchmarking
|
||||
var smallFields = Fields{
|
||||
"foo": "bar",
|
||||
"baz": "qux",
|
||||
"one": "two",
|
||||
"three": "four",
|
||||
}
|
||||
|
||||
// largeFields is a large size data set for benchmarking
|
||||
var largeFields = Fields{
|
||||
"foo": "bar",
|
||||
"baz": "qux",
|
||||
"one": "two",
|
||||
"three": "four",
|
||||
"five": "six",
|
||||
"seven": "eight",
|
||||
"nine": "ten",
|
||||
"eleven": "twelve",
|
||||
"thirteen": "fourteen",
|
||||
"fifteen": "sixteen",
|
||||
"seventeen": "eighteen",
|
||||
"nineteen": "twenty",
|
||||
"a": "b",
|
||||
"c": "d",
|
||||
"e": "f",
|
||||
"g": "h",
|
||||
"i": "j",
|
||||
"k": "l",
|
||||
"m": "n",
|
||||
"o": "p",
|
||||
"q": "r",
|
||||
"s": "t",
|
||||
"u": "v",
|
||||
"w": "x",
|
||||
"y": "z",
|
||||
"this": "will",
|
||||
"make": "thirty",
|
||||
"entries": "yeah",
|
||||
}
|
||||
|
||||
var errorFields = Fields{
|
||||
"foo": fmt.Errorf("bar"),
|
||||
"baz": fmt.Errorf("qux"),
|
||||
}
|
||||
|
||||
func BenchmarkErrorTextFormatter(b *testing.B) {
|
||||
doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields)
|
||||
}
|
||||
|
||||
func BenchmarkSmallTextFormatter(b *testing.B) {
|
||||
doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields)
|
||||
}
|
||||
|
||||
func BenchmarkLargeTextFormatter(b *testing.B) {
|
||||
doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields)
|
||||
}
|
||||
|
||||
func BenchmarkSmallColoredTextFormatter(b *testing.B) {
|
||||
doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields)
|
||||
}
|
||||
|
||||
func BenchmarkLargeColoredTextFormatter(b *testing.B) {
|
||||
doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields)
|
||||
}
|
||||
|
||||
func BenchmarkSmallJSONFormatter(b *testing.B) {
|
||||
doBenchmark(b, &JSONFormatter{}, smallFields)
|
||||
}
|
||||
|
||||
func BenchmarkLargeJSONFormatter(b *testing.B) {
|
||||
doBenchmark(b, &JSONFormatter{}, largeFields)
|
||||
}
|
||||
|
||||
func doBenchmark(b *testing.B, formatter Formatter, fields Fields) {
|
||||
logger := New()
|
||||
|
||||
entry := &Entry{
|
||||
Time: time.Time{},
|
||||
Level: InfoLevel,
|
||||
Message: "message",
|
||||
Data: fields,
|
||||
Logger: logger,
|
||||
}
|
||||
var d []byte
|
||||
var err error
|
||||
for i := 0; i < b.N; i++ {
|
||||
d, err = formatter.Format(entry)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.SetBytes(int64(len(d)))
|
||||
}
|
||||
}
|
||||
144
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/hook_test.go
generated
vendored
Normal file
144
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/hook_test.go
generated
vendored
Normal file
@@ -0,0 +1,144 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type TestHook struct {
|
||||
Fired bool
|
||||
}
|
||||
|
||||
func (hook *TestHook) Fire(entry *Entry) error {
|
||||
hook.Fired = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hook *TestHook) Levels() []Level {
|
||||
return []Level{
|
||||
DebugLevel,
|
||||
InfoLevel,
|
||||
WarnLevel,
|
||||
ErrorLevel,
|
||||
FatalLevel,
|
||||
PanicLevel,
|
||||
}
|
||||
}
|
||||
|
||||
func TestHookFires(t *testing.T) {
|
||||
hook := new(TestHook)
|
||||
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Hooks.Add(hook)
|
||||
assert.Equal(t, hook.Fired, false)
|
||||
|
||||
log.Print("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, hook.Fired, true)
|
||||
})
|
||||
}
|
||||
|
||||
type ModifyHook struct {
|
||||
}
|
||||
|
||||
func (hook *ModifyHook) Fire(entry *Entry) error {
|
||||
entry.Data["wow"] = "whale"
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hook *ModifyHook) Levels() []Level {
|
||||
return []Level{
|
||||
DebugLevel,
|
||||
InfoLevel,
|
||||
WarnLevel,
|
||||
ErrorLevel,
|
||||
FatalLevel,
|
||||
PanicLevel,
|
||||
}
|
||||
}
|
||||
|
||||
func TestHookCanModifyEntry(t *testing.T) {
|
||||
hook := new(ModifyHook)
|
||||
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Hooks.Add(hook)
|
||||
log.WithField("wow", "elephant").Print("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["wow"], "whale")
|
||||
})
|
||||
}
|
||||
|
||||
func TestCanFireMultipleHooks(t *testing.T) {
|
||||
hook1 := new(ModifyHook)
|
||||
hook2 := new(TestHook)
|
||||
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Hooks.Add(hook1)
|
||||
log.Hooks.Add(hook2)
|
||||
|
||||
log.WithField("wow", "elephant").Print("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["wow"], "whale")
|
||||
assert.Equal(t, hook2.Fired, true)
|
||||
})
|
||||
}
|
||||
|
||||
type ErrorHook struct {
|
||||
Fired bool
|
||||
}
|
||||
|
||||
func (hook *ErrorHook) Fire(entry *Entry) error {
|
||||
hook.Fired = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hook *ErrorHook) Levels() []Level {
|
||||
return []Level{
|
||||
ErrorLevel,
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorHookShouldntFireOnInfo(t *testing.T) {
|
||||
hook := new(ErrorHook)
|
||||
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Hooks.Add(hook)
|
||||
log.Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, hook.Fired, false)
|
||||
})
|
||||
}
|
||||
|
||||
func TestErrorHookShouldFireOnError(t *testing.T) {
|
||||
hook := new(ErrorHook)
|
||||
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Hooks.Add(hook)
|
||||
log.Error("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, hook.Fired, true)
|
||||
})
|
||||
}
|
||||
|
||||
func TestAddHookRace(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
hook := new(ErrorHook)
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
log.AddHook(hook)
|
||||
}()
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
log.Error("test")
|
||||
}()
|
||||
wg.Wait()
|
||||
}, func(fields Fields) {
|
||||
// the line may have been logged
|
||||
// before the hook was added, so we can't
|
||||
// actually assert on the hook
|
||||
})
|
||||
}
|
||||
34
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/hooks.go
generated
vendored
Normal file
34
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/hooks.go
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
package logrus
|
||||
|
||||
// A hook to be fired when logging on the logging levels returned from
|
||||
// `Levels()` on your implementation of the interface. Note that this is not
|
||||
// fired in a goroutine or a channel with workers, you should handle such
|
||||
// functionality yourself if your call is non-blocking and you don't wish for
|
||||
// the logging calls for levels returned from `Levels()` to block.
|
||||
type Hook interface {
|
||||
Levels() []Level
|
||||
Fire(*Entry) error
|
||||
}
|
||||
|
||||
// Internal type for storing the hooks on a logger instance.
|
||||
type LevelHooks map[Level][]Hook
|
||||
|
||||
// Add a hook to an instance of logger. This is called with
|
||||
// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
|
||||
func (hooks LevelHooks) Add(hook Hook) {
|
||||
for _, level := range hook.Levels() {
|
||||
hooks[level] = append(hooks[level], hook)
|
||||
}
|
||||
}
|
||||
|
||||
// Fire all the hooks for the passed level. Used by `entry.log` to fire
|
||||
// appropriate hooks for a log entry.
|
||||
func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
|
||||
for _, hook := range hooks[level] {
|
||||
if err := hook.Fire(entry); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
39
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/hooks/syslog/README.md
generated
vendored
Normal file
39
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/hooks/syslog/README.md
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
# Syslog Hooks for Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
import (
|
||||
"log/syslog"
|
||||
"github.com/sirupsen/logrus"
|
||||
lSyslog "github.com/sirupsen/logrus/hooks/syslog"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log := logrus.New()
|
||||
hook, err := lSyslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
||||
|
||||
if err == nil {
|
||||
log.Hooks.Add(hook)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
If you want to connect to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). Just assign empty string to the first two parameters of `NewSyslogHook`. It should look like the following.
|
||||
|
||||
```go
|
||||
import (
|
||||
"log/syslog"
|
||||
"github.com/sirupsen/logrus"
|
||||
lSyslog "github.com/sirupsen/logrus/hooks/syslog"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log := logrus.New()
|
||||
hook, err := lSyslog.NewSyslogHook("", "", syslog.LOG_INFO, "")
|
||||
|
||||
if err == nil {
|
||||
log.Hooks.Add(hook)
|
||||
}
|
||||
}
|
||||
```
|
||||
55
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/hooks/syslog/syslog.go
generated
vendored
Normal file
55
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/hooks/syslog/syslog.go
generated
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
// +build !windows,!nacl,!plan9
|
||||
|
||||
package syslog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log/syslog"
|
||||
"os"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// SyslogHook to send logs via syslog.
|
||||
type SyslogHook struct {
|
||||
Writer *syslog.Writer
|
||||
SyslogNetwork string
|
||||
SyslogRaddr string
|
||||
}
|
||||
|
||||
// Creates a hook to be added to an instance of logger. This is called with
|
||||
// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")`
|
||||
// `if err == nil { log.Hooks.Add(hook) }`
|
||||
func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) {
|
||||
w, err := syslog.Dial(network, raddr, priority, tag)
|
||||
return &SyslogHook{w, network, raddr}, err
|
||||
}
|
||||
|
||||
func (hook *SyslogHook) Fire(entry *logrus.Entry) error {
|
||||
line, err := entry.String()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
switch entry.Level {
|
||||
case logrus.PanicLevel:
|
||||
return hook.Writer.Crit(line)
|
||||
case logrus.FatalLevel:
|
||||
return hook.Writer.Crit(line)
|
||||
case logrus.ErrorLevel:
|
||||
return hook.Writer.Err(line)
|
||||
case logrus.WarnLevel:
|
||||
return hook.Writer.Warning(line)
|
||||
case logrus.InfoLevel:
|
||||
return hook.Writer.Info(line)
|
||||
case logrus.DebugLevel:
|
||||
return hook.Writer.Debug(line)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (hook *SyslogHook) Levels() []logrus.Level {
|
||||
return logrus.AllLevels
|
||||
}
|
||||
27
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/hooks/syslog/syslog_test.go
generated
vendored
Normal file
27
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/hooks/syslog/syslog_test.go
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
package syslog
|
||||
|
||||
import (
|
||||
"log/syslog"
|
||||
"testing"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func TestLocalhostAddAndPrint(t *testing.T) {
|
||||
log := logrus.New()
|
||||
hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Unable to connect to local syslog.")
|
||||
}
|
||||
|
||||
log.Hooks.Add(hook)
|
||||
|
||||
for _, level := range hook.Levels() {
|
||||
if len(log.Hooks[level]) != 1 {
|
||||
t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level]))
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("Congratulations!")
|
||||
}
|
||||
95
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/hooks/test/test.go
generated
vendored
Normal file
95
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/hooks/test/test.go
generated
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
// The Test package is used for testing logrus. It is here for backwards
|
||||
// compatibility from when logrus' organization was upper-case. Please use
|
||||
// lower-case logrus and the `null` package instead of this one.
|
||||
package test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Hook is a hook designed for dealing with logs in test scenarios.
|
||||
type Hook struct {
|
||||
// Entries is an array of all entries that have been received by this hook.
|
||||
// For safe access, use the AllEntries() method, rather than reading this
|
||||
// value directly.
|
||||
Entries []*logrus.Entry
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// NewGlobal installs a test hook for the global logger.
|
||||
func NewGlobal() *Hook {
|
||||
|
||||
hook := new(Hook)
|
||||
logrus.AddHook(hook)
|
||||
|
||||
return hook
|
||||
|
||||
}
|
||||
|
||||
// NewLocal installs a test hook for a given local logger.
|
||||
func NewLocal(logger *logrus.Logger) *Hook {
|
||||
|
||||
hook := new(Hook)
|
||||
logger.Hooks.Add(hook)
|
||||
|
||||
return hook
|
||||
|
||||
}
|
||||
|
||||
// NewNullLogger creates a discarding logger and installs the test hook.
|
||||
func NewNullLogger() (*logrus.Logger, *Hook) {
|
||||
|
||||
logger := logrus.New()
|
||||
logger.Out = ioutil.Discard
|
||||
|
||||
return logger, NewLocal(logger)
|
||||
|
||||
}
|
||||
|
||||
func (t *Hook) Fire(e *logrus.Entry) error {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
t.Entries = append(t.Entries, e)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Hook) Levels() []logrus.Level {
|
||||
return logrus.AllLevels
|
||||
}
|
||||
|
||||
// LastEntry returns the last entry that was logged or nil.
|
||||
func (t *Hook) LastEntry() *logrus.Entry {
|
||||
t.mu.RLock()
|
||||
defer t.mu.RUnlock()
|
||||
i := len(t.Entries) - 1
|
||||
if i < 0 {
|
||||
return nil
|
||||
}
|
||||
// Make a copy, for safety
|
||||
e := *t.Entries[i]
|
||||
return &e
|
||||
}
|
||||
|
||||
// AllEntries returns all entries that were logged.
|
||||
func (t *Hook) AllEntries() []*logrus.Entry {
|
||||
t.mu.RLock()
|
||||
defer t.mu.RUnlock()
|
||||
// Make a copy so the returned value won't race with future log requests
|
||||
entries := make([]*logrus.Entry, len(t.Entries))
|
||||
for i, entry := range t.Entries {
|
||||
// Make a copy, for safety
|
||||
e := *entry
|
||||
entries[i] = &e
|
||||
}
|
||||
return entries
|
||||
}
|
||||
|
||||
// Reset removes all Entries from this test hook.
|
||||
func (t *Hook) Reset() {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
t.Entries = make([]*logrus.Entry, 0)
|
||||
}
|
||||
39
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/hooks/test/test_test.go
generated
vendored
Normal file
39
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/hooks/test/test_test.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
package test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAllHooks(t *testing.T) {
|
||||
|
||||
assert := assert.New(t)
|
||||
|
||||
logger, hook := NewNullLogger()
|
||||
assert.Nil(hook.LastEntry())
|
||||
assert.Equal(0, len(hook.Entries))
|
||||
|
||||
logger.Error("Hello error")
|
||||
assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
|
||||
assert.Equal("Hello error", hook.LastEntry().Message)
|
||||
assert.Equal(1, len(hook.Entries))
|
||||
|
||||
logger.Warn("Hello warning")
|
||||
assert.Equal(logrus.WarnLevel, hook.LastEntry().Level)
|
||||
assert.Equal("Hello warning", hook.LastEntry().Message)
|
||||
assert.Equal(2, len(hook.Entries))
|
||||
|
||||
hook.Reset()
|
||||
assert.Nil(hook.LastEntry())
|
||||
assert.Equal(0, len(hook.Entries))
|
||||
|
||||
hook = NewGlobal()
|
||||
|
||||
logrus.Error("Hello error")
|
||||
assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
|
||||
assert.Equal("Hello error", hook.LastEntry().Message)
|
||||
assert.Equal(1, len(hook.Entries))
|
||||
|
||||
}
|
||||
79
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/json_formatter.go
generated
vendored
Normal file
79
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/json_formatter.go
generated
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type fieldKey string
|
||||
|
||||
// FieldMap allows customization of the key names for default fields.
|
||||
type FieldMap map[fieldKey]string
|
||||
|
||||
// Default key names for the default fields
|
||||
const (
|
||||
FieldKeyMsg = "msg"
|
||||
FieldKeyLevel = "level"
|
||||
FieldKeyTime = "time"
|
||||
)
|
||||
|
||||
func (f FieldMap) resolve(key fieldKey) string {
|
||||
if k, ok := f[key]; ok {
|
||||
return k
|
||||
}
|
||||
|
||||
return string(key)
|
||||
}
|
||||
|
||||
// JSONFormatter formats logs into parsable json
|
||||
type JSONFormatter struct {
|
||||
// TimestampFormat sets the format used for marshaling timestamps.
|
||||
TimestampFormat string
|
||||
|
||||
// DisableTimestamp allows disabling automatic timestamps in output
|
||||
DisableTimestamp bool
|
||||
|
||||
// FieldMap allows users to customize the names of keys for default fields.
|
||||
// As an example:
|
||||
// formatter := &JSONFormatter{
|
||||
// FieldMap: FieldMap{
|
||||
// FieldKeyTime: "@timestamp",
|
||||
// FieldKeyLevel: "@level",
|
||||
// FieldKeyMsg: "@message",
|
||||
// },
|
||||
// }
|
||||
FieldMap FieldMap
|
||||
}
|
||||
|
||||
// Format renders a single log entry
|
||||
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
data := make(Fields, len(entry.Data)+3)
|
||||
for k, v := range entry.Data {
|
||||
switch v := v.(type) {
|
||||
case error:
|
||||
// Otherwise errors are ignored by `encoding/json`
|
||||
// https://github.com/sirupsen/logrus/issues/137
|
||||
data[k] = v.Error()
|
||||
default:
|
||||
data[k] = v
|
||||
}
|
||||
}
|
||||
prefixFieldClashes(data)
|
||||
|
||||
timestampFormat := f.TimestampFormat
|
||||
if timestampFormat == "" {
|
||||
timestampFormat = defaultTimestampFormat
|
||||
}
|
||||
|
||||
if !f.DisableTimestamp {
|
||||
data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
|
||||
}
|
||||
data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
|
||||
data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
|
||||
|
||||
serialized, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||
}
|
||||
return append(serialized, '\n'), nil
|
||||
}
|
||||
199
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/json_formatter_test.go
generated
vendored
Normal file
199
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/json_formatter_test.go
generated
vendored
Normal file
@@ -0,0 +1,199 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestErrorNotLost(t *testing.T) {
|
||||
formatter := &JSONFormatter{}
|
||||
|
||||
b, err := formatter.Format(WithField("error", errors.New("wild walrus")))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
|
||||
entry := make(map[string]interface{})
|
||||
err = json.Unmarshal(b, &entry)
|
||||
if err != nil {
|
||||
t.Fatal("Unable to unmarshal formatted entry: ", err)
|
||||
}
|
||||
|
||||
if entry["error"] != "wild walrus" {
|
||||
t.Fatal("Error field not set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorNotLostOnFieldNotNamedError(t *testing.T) {
|
||||
formatter := &JSONFormatter{}
|
||||
|
||||
b, err := formatter.Format(WithField("omg", errors.New("wild walrus")))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
|
||||
entry := make(map[string]interface{})
|
||||
err = json.Unmarshal(b, &entry)
|
||||
if err != nil {
|
||||
t.Fatal("Unable to unmarshal formatted entry: ", err)
|
||||
}
|
||||
|
||||
if entry["omg"] != "wild walrus" {
|
||||
t.Fatal("Error field not set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFieldClashWithTime(t *testing.T) {
|
||||
formatter := &JSONFormatter{}
|
||||
|
||||
b, err := formatter.Format(WithField("time", "right now!"))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
|
||||
entry := make(map[string]interface{})
|
||||
err = json.Unmarshal(b, &entry)
|
||||
if err != nil {
|
||||
t.Fatal("Unable to unmarshal formatted entry: ", err)
|
||||
}
|
||||
|
||||
if entry["fields.time"] != "right now!" {
|
||||
t.Fatal("fields.time not set to original time field")
|
||||
}
|
||||
|
||||
if entry["time"] != "0001-01-01T00:00:00Z" {
|
||||
t.Fatal("time field not set to current time, was: ", entry["time"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestFieldClashWithMsg(t *testing.T) {
|
||||
formatter := &JSONFormatter{}
|
||||
|
||||
b, err := formatter.Format(WithField("msg", "something"))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
|
||||
entry := make(map[string]interface{})
|
||||
err = json.Unmarshal(b, &entry)
|
||||
if err != nil {
|
||||
t.Fatal("Unable to unmarshal formatted entry: ", err)
|
||||
}
|
||||
|
||||
if entry["fields.msg"] != "something" {
|
||||
t.Fatal("fields.msg not set to original msg field")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFieldClashWithLevel(t *testing.T) {
|
||||
formatter := &JSONFormatter{}
|
||||
|
||||
b, err := formatter.Format(WithField("level", "something"))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
|
||||
entry := make(map[string]interface{})
|
||||
err = json.Unmarshal(b, &entry)
|
||||
if err != nil {
|
||||
t.Fatal("Unable to unmarshal formatted entry: ", err)
|
||||
}
|
||||
|
||||
if entry["fields.level"] != "something" {
|
||||
t.Fatal("fields.level not set to original level field")
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONEntryEndsWithNewline(t *testing.T) {
|
||||
formatter := &JSONFormatter{}
|
||||
|
||||
b, err := formatter.Format(WithField("level", "something"))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
|
||||
if b[len(b)-1] != '\n' {
|
||||
t.Fatal("Expected JSON log entry to end with a newline")
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONMessageKey(t *testing.T) {
|
||||
formatter := &JSONFormatter{
|
||||
FieldMap: FieldMap{
|
||||
FieldKeyMsg: "message",
|
||||
},
|
||||
}
|
||||
|
||||
b, err := formatter.Format(&Entry{Message: "oh hai"})
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
s := string(b)
|
||||
if !(strings.Contains(s, "message") && strings.Contains(s, "oh hai")) {
|
||||
t.Fatal("Expected JSON to format message key")
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONLevelKey(t *testing.T) {
|
||||
formatter := &JSONFormatter{
|
||||
FieldMap: FieldMap{
|
||||
FieldKeyLevel: "somelevel",
|
||||
},
|
||||
}
|
||||
|
||||
b, err := formatter.Format(WithField("level", "something"))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
s := string(b)
|
||||
if !strings.Contains(s, "somelevel") {
|
||||
t.Fatal("Expected JSON to format level key")
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONTimeKey(t *testing.T) {
|
||||
formatter := &JSONFormatter{
|
||||
FieldMap: FieldMap{
|
||||
FieldKeyTime: "timeywimey",
|
||||
},
|
||||
}
|
||||
|
||||
b, err := formatter.Format(WithField("level", "something"))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
s := string(b)
|
||||
if !strings.Contains(s, "timeywimey") {
|
||||
t.Fatal("Expected JSON to format time key")
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONDisableTimestamp(t *testing.T) {
|
||||
formatter := &JSONFormatter{
|
||||
DisableTimestamp: true,
|
||||
}
|
||||
|
||||
b, err := formatter.Format(WithField("level", "something"))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
s := string(b)
|
||||
if strings.Contains(s, FieldKeyTime) {
|
||||
t.Error("Did not prevent timestamp", s)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONEnableTimestamp(t *testing.T) {
|
||||
formatter := &JSONFormatter{}
|
||||
|
||||
b, err := formatter.Format(WithField("level", "something"))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
s := string(b)
|
||||
if !strings.Contains(s, FieldKeyTime) {
|
||||
t.Error("Timestamp not present", s)
|
||||
}
|
||||
}
|
||||
323
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/logger.go
generated
vendored
Normal file
323
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/logger.go
generated
vendored
Normal file
@@ -0,0 +1,323 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type Logger struct {
|
||||
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
|
||||
// file, or leave it default which is `os.Stderr`. You can also set this to
|
||||
// something more adventorous, such as logging to Kafka.
|
||||
Out io.Writer
|
||||
// Hooks for the logger instance. These allow firing events based on logging
|
||||
// levels and log entries. For example, to send errors to an error tracking
|
||||
// service, log to StatsD or dump the core on fatal errors.
|
||||
Hooks LevelHooks
|
||||
// All log entries pass through the formatter before logged to Out. The
|
||||
// included formatters are `TextFormatter` and `JSONFormatter` for which
|
||||
// TextFormatter is the default. In development (when a TTY is attached) it
|
||||
// logs with colors, but to a file it wouldn't. You can easily implement your
|
||||
// own that implements the `Formatter` interface, see the `README` or included
|
||||
// formatters for examples.
|
||||
Formatter Formatter
|
||||
// The logging level the logger should log at. This is typically (and defaults
|
||||
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
|
||||
// logged.
|
||||
Level Level
|
||||
// Used to sync writing to the log. Locking is enabled by Default
|
||||
mu MutexWrap
|
||||
// Reusable empty entry
|
||||
entryPool sync.Pool
|
||||
}
|
||||
|
||||
type MutexWrap struct {
|
||||
lock sync.Mutex
|
||||
disabled bool
|
||||
}
|
||||
|
||||
func (mw *MutexWrap) Lock() {
|
||||
if !mw.disabled {
|
||||
mw.lock.Lock()
|
||||
}
|
||||
}
|
||||
|
||||
func (mw *MutexWrap) Unlock() {
|
||||
if !mw.disabled {
|
||||
mw.lock.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (mw *MutexWrap) Disable() {
|
||||
mw.disabled = true
|
||||
}
|
||||
|
||||
// Creates a new logger. Configuration should be set by changing `Formatter`,
|
||||
// `Out` and `Hooks` directly on the default logger instance. You can also just
|
||||
// instantiate your own:
|
||||
//
|
||||
// var log = &Logger{
|
||||
// Out: os.Stderr,
|
||||
// Formatter: new(JSONFormatter),
|
||||
// Hooks: make(LevelHooks),
|
||||
// Level: logrus.DebugLevel,
|
||||
// }
|
||||
//
|
||||
// It's recommended to make this a global instance called `log`.
|
||||
func New() *Logger {
|
||||
return &Logger{
|
||||
Out: os.Stderr,
|
||||
Formatter: new(TextFormatter),
|
||||
Hooks: make(LevelHooks),
|
||||
Level: InfoLevel,
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) newEntry() *Entry {
|
||||
entry, ok := logger.entryPool.Get().(*Entry)
|
||||
if ok {
|
||||
return entry
|
||||
}
|
||||
return NewEntry(logger)
|
||||
}
|
||||
|
||||
func (logger *Logger) releaseEntry(entry *Entry) {
|
||||
logger.entryPool.Put(entry)
|
||||
}
|
||||
|
||||
// Adds a field to the log entry, note that it doesn't log until you call
|
||||
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
|
||||
// If you want multiple fields, use `WithFields`.
|
||||
func (logger *Logger) WithField(key string, value interface{}) *Entry {
|
||||
entry := logger.newEntry()
|
||||
defer logger.releaseEntry(entry)
|
||||
return entry.WithField(key, value)
|
||||
}
|
||||
|
||||
// Adds a struct of fields to the log entry. All it does is call `WithField` for
|
||||
// each `Field`.
|
||||
func (logger *Logger) WithFields(fields Fields) *Entry {
|
||||
entry := logger.newEntry()
|
||||
defer logger.releaseEntry(entry)
|
||||
return entry.WithFields(fields)
|
||||
}
|
||||
|
||||
// Add an error as single field to the log entry. All it does is call
|
||||
// `WithError` for the given `error`.
|
||||
func (logger *Logger) WithError(err error) *Entry {
|
||||
entry := logger.newEntry()
|
||||
defer logger.releaseEntry(entry)
|
||||
return entry.WithError(err)
|
||||
}
|
||||
|
||||
func (logger *Logger) Debugf(format string, args ...interface{}) {
|
||||
if logger.level() >= DebugLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Debugf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Infof(format string, args ...interface{}) {
|
||||
if logger.level() >= InfoLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Infof(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Printf(format string, args ...interface{}) {
|
||||
entry := logger.newEntry()
|
||||
entry.Printf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warnf(format string, args ...interface{}) {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Warningf(format string, args ...interface{}) {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Errorf(format string, args ...interface{}) {
|
||||
if logger.level() >= ErrorLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Errorf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
||||
if logger.level() >= FatalLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Fatalf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
Exit(1)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panicf(format string, args ...interface{}) {
|
||||
if logger.level() >= PanicLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Panicf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Debug(args ...interface{}) {
|
||||
if logger.level() >= DebugLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Debug(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Info(args ...interface{}) {
|
||||
if logger.level() >= InfoLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Info(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Print(args ...interface{}) {
|
||||
entry := logger.newEntry()
|
||||
entry.Info(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warn(args ...interface{}) {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warn(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Warning(args ...interface{}) {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warn(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Error(args ...interface{}) {
|
||||
if logger.level() >= ErrorLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Error(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatal(args ...interface{}) {
|
||||
if logger.level() >= FatalLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Fatal(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
Exit(1)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panic(args ...interface{}) {
|
||||
if logger.level() >= PanicLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Panic(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Debugln(args ...interface{}) {
|
||||
if logger.level() >= DebugLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Debugln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Infoln(args ...interface{}) {
|
||||
if logger.level() >= InfoLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Infoln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Println(args ...interface{}) {
|
||||
entry := logger.newEntry()
|
||||
entry.Println(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warnln(args ...interface{}) {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Warningln(args ...interface{}) {
|
||||
if logger.level() >= WarnLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Errorln(args ...interface{}) {
|
||||
if logger.level() >= ErrorLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Errorln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatalln(args ...interface{}) {
|
||||
if logger.level() >= FatalLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Fatalln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
Exit(1)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panicln(args ...interface{}) {
|
||||
if logger.level() >= PanicLevel {
|
||||
entry := logger.newEntry()
|
||||
entry.Panicln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
//When file is opened with appending mode, it's safe to
|
||||
//write concurrently to a file (within 4k message on Linux).
|
||||
//In these cases user can choose to disable the lock.
|
||||
func (logger *Logger) SetNoLock() {
|
||||
logger.mu.Disable()
|
||||
}
|
||||
|
||||
func (logger *Logger) level() Level {
|
||||
return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
|
||||
}
|
||||
|
||||
func (logger *Logger) SetLevel(level Level) {
|
||||
atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
|
||||
}
|
||||
|
||||
func (logger *Logger) AddHook(hook Hook) {
|
||||
logger.mu.Lock()
|
||||
defer logger.mu.Unlock()
|
||||
logger.Hooks.Add(hook)
|
||||
}
|
||||
61
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/logger_bench_test.go
generated
vendored
Normal file
61
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/logger_bench_test.go
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// smallFields is a small size data set for benchmarking
|
||||
var loggerFields = Fields{
|
||||
"foo": "bar",
|
||||
"baz": "qux",
|
||||
"one": "two",
|
||||
"three": "four",
|
||||
}
|
||||
|
||||
func BenchmarkDummyLogger(b *testing.B) {
|
||||
nullf, err := os.OpenFile("/dev/null", os.O_WRONLY, 0666)
|
||||
if err != nil {
|
||||
b.Fatalf("%v", err)
|
||||
}
|
||||
defer nullf.Close()
|
||||
doLoggerBenchmark(b, nullf, &TextFormatter{DisableColors: true}, smallFields)
|
||||
}
|
||||
|
||||
func BenchmarkDummyLoggerNoLock(b *testing.B) {
|
||||
nullf, err := os.OpenFile("/dev/null", os.O_WRONLY|os.O_APPEND, 0666)
|
||||
if err != nil {
|
||||
b.Fatalf("%v", err)
|
||||
}
|
||||
defer nullf.Close()
|
||||
doLoggerBenchmarkNoLock(b, nullf, &TextFormatter{DisableColors: true}, smallFields)
|
||||
}
|
||||
|
||||
func doLoggerBenchmark(b *testing.B, out *os.File, formatter Formatter, fields Fields) {
|
||||
logger := Logger{
|
||||
Out: out,
|
||||
Level: InfoLevel,
|
||||
Formatter: formatter,
|
||||
}
|
||||
entry := logger.WithFields(fields)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
entry.Info("aaa")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func doLoggerBenchmarkNoLock(b *testing.B, out *os.File, formatter Formatter, fields Fields) {
|
||||
logger := Logger{
|
||||
Out: out,
|
||||
Level: InfoLevel,
|
||||
Formatter: formatter,
|
||||
}
|
||||
logger.SetNoLock()
|
||||
entry := logger.WithFields(fields)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
entry.Info("aaa")
|
||||
}
|
||||
})
|
||||
}
|
||||
143
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/logrus.go
generated
vendored
Normal file
143
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/logrus.go
generated
vendored
Normal file
@@ -0,0 +1,143 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Fields type, used to pass to `WithFields`.
|
||||
type Fields map[string]interface{}
|
||||
|
||||
// Level type
|
||||
type Level uint32
|
||||
|
||||
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
|
||||
func (level Level) String() string {
|
||||
switch level {
|
||||
case DebugLevel:
|
||||
return "debug"
|
||||
case InfoLevel:
|
||||
return "info"
|
||||
case WarnLevel:
|
||||
return "warning"
|
||||
case ErrorLevel:
|
||||
return "error"
|
||||
case FatalLevel:
|
||||
return "fatal"
|
||||
case PanicLevel:
|
||||
return "panic"
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
// ParseLevel takes a string level and returns the Logrus log level constant.
|
||||
func ParseLevel(lvl string) (Level, error) {
|
||||
switch strings.ToLower(lvl) {
|
||||
case "panic":
|
||||
return PanicLevel, nil
|
||||
case "fatal":
|
||||
return FatalLevel, nil
|
||||
case "error":
|
||||
return ErrorLevel, nil
|
||||
case "warn", "warning":
|
||||
return WarnLevel, nil
|
||||
case "info":
|
||||
return InfoLevel, nil
|
||||
case "debug":
|
||||
return DebugLevel, nil
|
||||
}
|
||||
|
||||
var l Level
|
||||
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
|
||||
}
|
||||
|
||||
// A constant exposing all logging levels
|
||||
var AllLevels = []Level{
|
||||
PanicLevel,
|
||||
FatalLevel,
|
||||
ErrorLevel,
|
||||
WarnLevel,
|
||||
InfoLevel,
|
||||
DebugLevel,
|
||||
}
|
||||
|
||||
// These are the different logging levels. You can set the logging level to log
|
||||
// on your instance of logger, obtained with `logrus.New()`.
|
||||
const (
|
||||
// PanicLevel level, highest level of severity. Logs and then calls panic with the
|
||||
// message passed to Debug, Info, ...
|
||||
PanicLevel Level = iota
|
||||
// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
|
||||
// logging level is set to Panic.
|
||||
FatalLevel
|
||||
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
|
||||
// Commonly used for hooks to send errors to an error tracking service.
|
||||
ErrorLevel
|
||||
// WarnLevel level. Non-critical entries that deserve eyes.
|
||||
WarnLevel
|
||||
// InfoLevel level. General operational entries about what's going on inside the
|
||||
// application.
|
||||
InfoLevel
|
||||
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
|
||||
DebugLevel
|
||||
)
|
||||
|
||||
// Won't compile if StdLogger can't be realized by a log.Logger
|
||||
var (
|
||||
_ StdLogger = &log.Logger{}
|
||||
_ StdLogger = &Entry{}
|
||||
_ StdLogger = &Logger{}
|
||||
)
|
||||
|
||||
// StdLogger is what your logrus-enabled library should take, that way
|
||||
// it'll accept a stdlib logger and a logrus logger. There's no standard
|
||||
// interface, this is the closest we get, unfortunately.
|
||||
type StdLogger interface {
|
||||
Print(...interface{})
|
||||
Printf(string, ...interface{})
|
||||
Println(...interface{})
|
||||
|
||||
Fatal(...interface{})
|
||||
Fatalf(string, ...interface{})
|
||||
Fatalln(...interface{})
|
||||
|
||||
Panic(...interface{})
|
||||
Panicf(string, ...interface{})
|
||||
Panicln(...interface{})
|
||||
}
|
||||
|
||||
// The FieldLogger interface generalizes the Entry and Logger types
|
||||
type FieldLogger interface {
|
||||
WithField(key string, value interface{}) *Entry
|
||||
WithFields(fields Fields) *Entry
|
||||
WithError(err error) *Entry
|
||||
|
||||
Debugf(format string, args ...interface{})
|
||||
Infof(format string, args ...interface{})
|
||||
Printf(format string, args ...interface{})
|
||||
Warnf(format string, args ...interface{})
|
||||
Warningf(format string, args ...interface{})
|
||||
Errorf(format string, args ...interface{})
|
||||
Fatalf(format string, args ...interface{})
|
||||
Panicf(format string, args ...interface{})
|
||||
|
||||
Debug(args ...interface{})
|
||||
Info(args ...interface{})
|
||||
Print(args ...interface{})
|
||||
Warn(args ...interface{})
|
||||
Warning(args ...interface{})
|
||||
Error(args ...interface{})
|
||||
Fatal(args ...interface{})
|
||||
Panic(args ...interface{})
|
||||
|
||||
Debugln(args ...interface{})
|
||||
Infoln(args ...interface{})
|
||||
Println(args ...interface{})
|
||||
Warnln(args ...interface{})
|
||||
Warningln(args ...interface{})
|
||||
Errorln(args ...interface{})
|
||||
Fatalln(args ...interface{})
|
||||
Panicln(args ...interface{})
|
||||
}
|
||||
386
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/logrus_test.go
generated
vendored
Normal file
386
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/logrus_test.go
generated
vendored
Normal file
@@ -0,0 +1,386 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) {
|
||||
var buffer bytes.Buffer
|
||||
var fields Fields
|
||||
|
||||
logger := New()
|
||||
logger.Out = &buffer
|
||||
logger.Formatter = new(JSONFormatter)
|
||||
|
||||
log(logger)
|
||||
|
||||
err := json.Unmarshal(buffer.Bytes(), &fields)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assertions(fields)
|
||||
}
|
||||
|
||||
func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) {
|
||||
var buffer bytes.Buffer
|
||||
|
||||
logger := New()
|
||||
logger.Out = &buffer
|
||||
logger.Formatter = &TextFormatter{
|
||||
DisableColors: true,
|
||||
}
|
||||
|
||||
log(logger)
|
||||
|
||||
fields := make(map[string]string)
|
||||
for _, kv := range strings.Split(buffer.String(), " ") {
|
||||
if !strings.Contains(kv, "=") {
|
||||
continue
|
||||
}
|
||||
kvArr := strings.Split(kv, "=")
|
||||
key := strings.TrimSpace(kvArr[0])
|
||||
val := kvArr[1]
|
||||
if kvArr[1][0] == '"' {
|
||||
var err error
|
||||
val, err = strconv.Unquote(val)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
fields[key] = val
|
||||
}
|
||||
assertions(fields)
|
||||
}
|
||||
|
||||
func TestPrint(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Print("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test")
|
||||
assert.Equal(t, fields["level"], "info")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfo(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test")
|
||||
assert.Equal(t, fields["level"], "info")
|
||||
})
|
||||
}
|
||||
|
||||
func TestWarn(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Warn("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test")
|
||||
assert.Equal(t, fields["level"], "warning")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Infoln("test", "test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test test")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Infoln("test", 10)
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test 10")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Infoln(10, 10)
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "10 10")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Infoln(10, 10)
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "10 10")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Info("test", 10)
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test10")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Info("test", "test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "testtest")
|
||||
})
|
||||
}
|
||||
|
||||
func TestWithFieldsShouldAllowAssignments(t *testing.T) {
|
||||
var buffer bytes.Buffer
|
||||
var fields Fields
|
||||
|
||||
logger := New()
|
||||
logger.Out = &buffer
|
||||
logger.Formatter = new(JSONFormatter)
|
||||
|
||||
localLog := logger.WithFields(Fields{
|
||||
"key1": "value1",
|
||||
})
|
||||
|
||||
localLog.WithField("key2", "value2").Info("test")
|
||||
err := json.Unmarshal(buffer.Bytes(), &fields)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, "value2", fields["key2"])
|
||||
assert.Equal(t, "value1", fields["key1"])
|
||||
|
||||
buffer = bytes.Buffer{}
|
||||
fields = Fields{}
|
||||
localLog.Info("test")
|
||||
err = json.Unmarshal(buffer.Bytes(), &fields)
|
||||
assert.Nil(t, err)
|
||||
|
||||
_, ok := fields["key2"]
|
||||
assert.Equal(t, false, ok)
|
||||
assert.Equal(t, "value1", fields["key1"])
|
||||
}
|
||||
|
||||
func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.WithField("msg", "hello").Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test")
|
||||
})
|
||||
}
|
||||
|
||||
func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.WithField("msg", "hello").Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test")
|
||||
assert.Equal(t, fields["fields.msg"], "hello")
|
||||
})
|
||||
}
|
||||
|
||||
func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.WithField("time", "hello").Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["fields.time"], "hello")
|
||||
})
|
||||
}
|
||||
|
||||
func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.WithField("level", 1).Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["level"], "info")
|
||||
assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only
|
||||
})
|
||||
}
|
||||
|
||||
func TestDefaultFieldsAreNotPrefixed(t *testing.T) {
|
||||
LogAndAssertText(t, func(log *Logger) {
|
||||
ll := log.WithField("herp", "derp")
|
||||
ll.Info("hello")
|
||||
ll.Info("bye")
|
||||
}, func(fields map[string]string) {
|
||||
for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} {
|
||||
if _, ok := fields[fieldName]; ok {
|
||||
t.Fatalf("should not have prefixed %q: %v", fieldName, fields)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) {
|
||||
|
||||
var buffer bytes.Buffer
|
||||
var fields Fields
|
||||
|
||||
logger := New()
|
||||
logger.Out = &buffer
|
||||
logger.Formatter = new(JSONFormatter)
|
||||
|
||||
llog := logger.WithField("context", "eating raw fish")
|
||||
|
||||
llog.Info("looks delicious")
|
||||
|
||||
err := json.Unmarshal(buffer.Bytes(), &fields)
|
||||
assert.NoError(t, err, "should have decoded first message")
|
||||
assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
|
||||
assert.Equal(t, fields["msg"], "looks delicious")
|
||||
assert.Equal(t, fields["context"], "eating raw fish")
|
||||
|
||||
buffer.Reset()
|
||||
|
||||
llog.Warn("omg it is!")
|
||||
|
||||
err = json.Unmarshal(buffer.Bytes(), &fields)
|
||||
assert.NoError(t, err, "should have decoded second message")
|
||||
assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
|
||||
assert.Equal(t, fields["msg"], "omg it is!")
|
||||
assert.Equal(t, fields["context"], "eating raw fish")
|
||||
assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry")
|
||||
|
||||
}
|
||||
|
||||
func TestConvertLevelToString(t *testing.T) {
|
||||
assert.Equal(t, "debug", DebugLevel.String())
|
||||
assert.Equal(t, "info", InfoLevel.String())
|
||||
assert.Equal(t, "warning", WarnLevel.String())
|
||||
assert.Equal(t, "error", ErrorLevel.String())
|
||||
assert.Equal(t, "fatal", FatalLevel.String())
|
||||
assert.Equal(t, "panic", PanicLevel.String())
|
||||
}
|
||||
|
||||
func TestParseLevel(t *testing.T) {
|
||||
l, err := ParseLevel("panic")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, PanicLevel, l)
|
||||
|
||||
l, err = ParseLevel("PANIC")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, PanicLevel, l)
|
||||
|
||||
l, err = ParseLevel("fatal")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, FatalLevel, l)
|
||||
|
||||
l, err = ParseLevel("FATAL")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, FatalLevel, l)
|
||||
|
||||
l, err = ParseLevel("error")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, ErrorLevel, l)
|
||||
|
||||
l, err = ParseLevel("ERROR")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, ErrorLevel, l)
|
||||
|
||||
l, err = ParseLevel("warn")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, WarnLevel, l)
|
||||
|
||||
l, err = ParseLevel("WARN")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, WarnLevel, l)
|
||||
|
||||
l, err = ParseLevel("warning")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, WarnLevel, l)
|
||||
|
||||
l, err = ParseLevel("WARNING")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, WarnLevel, l)
|
||||
|
||||
l, err = ParseLevel("info")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, InfoLevel, l)
|
||||
|
||||
l, err = ParseLevel("INFO")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, InfoLevel, l)
|
||||
|
||||
l, err = ParseLevel("debug")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, DebugLevel, l)
|
||||
|
||||
l, err = ParseLevel("DEBUG")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, DebugLevel, l)
|
||||
|
||||
l, err = ParseLevel("invalid")
|
||||
assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error())
|
||||
}
|
||||
|
||||
func TestGetSetLevelRace(t *testing.T) {
|
||||
wg := sync.WaitGroup{}
|
||||
for i := 0; i < 100; i++ {
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
if i%2 == 0 {
|
||||
SetLevel(InfoLevel)
|
||||
} else {
|
||||
GetLevel()
|
||||
}
|
||||
}(i)
|
||||
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestLoggingRace(t *testing.T) {
|
||||
logger := New()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(100)
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
go func() {
|
||||
logger.Info("info")
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Compile test
|
||||
func TestLogrusInterface(t *testing.T) {
|
||||
var buffer bytes.Buffer
|
||||
fn := func(l FieldLogger) {
|
||||
b := l.WithField("key", "value")
|
||||
b.Debug("Test")
|
||||
}
|
||||
// test logger
|
||||
logger := New()
|
||||
logger.Out = &buffer
|
||||
fn(logger)
|
||||
|
||||
// test Entry
|
||||
e := logger.WithField("another", "value")
|
||||
fn(e)
|
||||
}
|
||||
|
||||
// Implements io.Writer using channels for synchronization, so we can wait on
|
||||
// the Entry.Writer goroutine to write in a non-racey way. This does assume that
|
||||
// there is a single call to Logger.Out for each message.
|
||||
type channelWriter chan []byte
|
||||
|
||||
func (cw channelWriter) Write(p []byte) (int, error) {
|
||||
cw <- p
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func TestEntryWriter(t *testing.T) {
|
||||
cw := channelWriter(make(chan []byte, 1))
|
||||
log := New()
|
||||
log.Out = cw
|
||||
log.Formatter = new(JSONFormatter)
|
||||
log.WithField("foo", "bar").WriterLevel(WarnLevel).Write([]byte("hello\n"))
|
||||
|
||||
bs := <-cw
|
||||
var fields Fields
|
||||
err := json.Unmarshal(bs, &fields)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, fields["foo"], "bar")
|
||||
assert.Equal(t, fields["level"], "warning")
|
||||
}
|
||||
10
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/terminal_bsd.go
generated
vendored
Normal file
10
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/terminal_bsd.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
// +build darwin freebsd openbsd netbsd dragonfly
|
||||
// +build !appengine
|
||||
|
||||
package logrus
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
const ioctlReadTermios = unix.TIOCGETA
|
||||
|
||||
type Termios unix.Termios
|
||||
11
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
generated
vendored
Normal file
11
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
// +build appengine
|
||||
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
func checkIfTerminal(w io.Writer) bool {
|
||||
return true
|
||||
}
|
||||
19
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
generated
vendored
Normal file
19
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
// +build !appengine
|
||||
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
)
|
||||
|
||||
func checkIfTerminal(w io.Writer) bool {
|
||||
switch v := w.(type) {
|
||||
case *os.File:
|
||||
return terminal.IsTerminal(int(v.Fd()))
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
14
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/terminal_linux.go
generated
vendored
Normal file
14
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/terminal_linux.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
// Based on ssh/terminal:
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
|
||||
package logrus
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
const ioctlReadTermios = unix.TCGETS
|
||||
|
||||
type Termios unix.Termios
|
||||
178
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/text_formatter.go
generated
vendored
Normal file
178
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/text_formatter.go
generated
vendored
Normal file
@@ -0,0 +1,178 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
nocolor = 0
|
||||
red = 31
|
||||
green = 32
|
||||
yellow = 33
|
||||
blue = 36
|
||||
gray = 37
|
||||
)
|
||||
|
||||
var (
|
||||
baseTimestamp time.Time
|
||||
)
|
||||
|
||||
func init() {
|
||||
baseTimestamp = time.Now()
|
||||
}
|
||||
|
||||
// TextFormatter formats logs into text
|
||||
type TextFormatter struct {
|
||||
// Set to true to bypass checking for a TTY before outputting colors.
|
||||
ForceColors bool
|
||||
|
||||
// Force disabling colors.
|
||||
DisableColors bool
|
||||
|
||||
// Disable timestamp logging. useful when output is redirected to logging
|
||||
// system that already adds timestamps.
|
||||
DisableTimestamp bool
|
||||
|
||||
// Enable logging the full timestamp when a TTY is attached instead of just
|
||||
// the time passed since beginning of execution.
|
||||
FullTimestamp bool
|
||||
|
||||
// TimestampFormat to use for display when a full timestamp is printed
|
||||
TimestampFormat string
|
||||
|
||||
// The fields are sorted by default for a consistent output. For applications
|
||||
// that log extremely frequently and don't use the JSON formatter this may not
|
||||
// be desired.
|
||||
DisableSorting bool
|
||||
|
||||
// QuoteEmptyFields will wrap empty fields in quotes if true
|
||||
QuoteEmptyFields bool
|
||||
|
||||
// Whether the logger's out is to a terminal
|
||||
isTerminal bool
|
||||
|
||||
sync.Once
|
||||
}
|
||||
|
||||
func (f *TextFormatter) init(entry *Entry) {
|
||||
if entry.Logger != nil {
|
||||
f.isTerminal = checkIfTerminal(entry.Logger.Out)
|
||||
}
|
||||
}
|
||||
|
||||
// Format renders a single log entry
|
||||
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
var b *bytes.Buffer
|
||||
keys := make([]string, 0, len(entry.Data))
|
||||
for k := range entry.Data {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
if !f.DisableSorting {
|
||||
sort.Strings(keys)
|
||||
}
|
||||
if entry.Buffer != nil {
|
||||
b = entry.Buffer
|
||||
} else {
|
||||
b = &bytes.Buffer{}
|
||||
}
|
||||
|
||||
prefixFieldClashes(entry.Data)
|
||||
|
||||
f.Do(func() { f.init(entry) })
|
||||
|
||||
isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors
|
||||
|
||||
timestampFormat := f.TimestampFormat
|
||||
if timestampFormat == "" {
|
||||
timestampFormat = defaultTimestampFormat
|
||||
}
|
||||
if isColored {
|
||||
f.printColored(b, entry, keys, timestampFormat)
|
||||
} else {
|
||||
if !f.DisableTimestamp {
|
||||
f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
|
||||
}
|
||||
f.appendKeyValue(b, "level", entry.Level.String())
|
||||
if entry.Message != "" {
|
||||
f.appendKeyValue(b, "msg", entry.Message)
|
||||
}
|
||||
for _, key := range keys {
|
||||
f.appendKeyValue(b, key, entry.Data[key])
|
||||
}
|
||||
}
|
||||
|
||||
b.WriteByte('\n')
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
|
||||
var levelColor int
|
||||
switch entry.Level {
|
||||
case DebugLevel:
|
||||
levelColor = gray
|
||||
case WarnLevel:
|
||||
levelColor = yellow
|
||||
case ErrorLevel, FatalLevel, PanicLevel:
|
||||
levelColor = red
|
||||
default:
|
||||
levelColor = blue
|
||||
}
|
||||
|
||||
levelText := strings.ToUpper(entry.Level.String())[0:4]
|
||||
|
||||
if f.DisableTimestamp {
|
||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
|
||||
} else if !f.FullTimestamp {
|
||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
|
||||
} else {
|
||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
|
||||
}
|
||||
for _, k := range keys {
|
||||
v := entry.Data[k]
|
||||
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
|
||||
f.appendValue(b, v)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *TextFormatter) needsQuoting(text string) bool {
|
||||
if f.QuoteEmptyFields && len(text) == 0 {
|
||||
return true
|
||||
}
|
||||
for _, ch := range text {
|
||||
if !((ch >= 'a' && ch <= 'z') ||
|
||||
(ch >= 'A' && ch <= 'Z') ||
|
||||
(ch >= '0' && ch <= '9') ||
|
||||
ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
|
||||
if b.Len() > 0 {
|
||||
b.WriteByte(' ')
|
||||
}
|
||||
b.WriteString(key)
|
||||
b.WriteByte('=')
|
||||
f.appendValue(b, value)
|
||||
}
|
||||
|
||||
func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
|
||||
stringVal, ok := value.(string)
|
||||
if !ok {
|
||||
stringVal = fmt.Sprint(value)
|
||||
}
|
||||
|
||||
if !f.needsQuoting(stringVal) {
|
||||
b.WriteString(stringVal)
|
||||
} else {
|
||||
b.WriteString(fmt.Sprintf("%q", stringVal))
|
||||
}
|
||||
}
|
||||
141
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/text_formatter_test.go
generated
vendored
Normal file
141
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/text_formatter_test.go
generated
vendored
Normal file
@@ -0,0 +1,141 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestFormatting(t *testing.T) {
|
||||
tf := &TextFormatter{DisableColors: true}
|
||||
|
||||
testCases := []struct {
|
||||
value string
|
||||
expected string
|
||||
}{
|
||||
{`foo`, "time=\"0001-01-01T00:00:00Z\" level=panic test=foo\n"},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
b, _ := tf.Format(WithField("test", tc.value))
|
||||
|
||||
if string(b) != tc.expected {
|
||||
t.Errorf("formatting expected for %q (result was %q instead of %q)", tc.value, string(b), tc.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestQuoting(t *testing.T) {
|
||||
tf := &TextFormatter{DisableColors: true}
|
||||
|
||||
checkQuoting := func(q bool, value interface{}) {
|
||||
b, _ := tf.Format(WithField("test", value))
|
||||
idx := bytes.Index(b, ([]byte)("test="))
|
||||
cont := bytes.Contains(b[idx+5:], []byte("\""))
|
||||
if cont != q {
|
||||
if q {
|
||||
t.Errorf("quoting expected for: %#v", value)
|
||||
} else {
|
||||
t.Errorf("quoting not expected for: %#v", value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
checkQuoting(false, "")
|
||||
checkQuoting(false, "abcd")
|
||||
checkQuoting(false, "v1.0")
|
||||
checkQuoting(false, "1234567890")
|
||||
checkQuoting(false, "/foobar")
|
||||
checkQuoting(false, "foo_bar")
|
||||
checkQuoting(false, "foo@bar")
|
||||
checkQuoting(false, "foobar^")
|
||||
checkQuoting(false, "+/-_^@f.oobar")
|
||||
checkQuoting(true, "foobar$")
|
||||
checkQuoting(true, "&foobar")
|
||||
checkQuoting(true, "x y")
|
||||
checkQuoting(true, "x,y")
|
||||
checkQuoting(false, errors.New("invalid"))
|
||||
checkQuoting(true, errors.New("invalid argument"))
|
||||
|
||||
// Test for quoting empty fields.
|
||||
tf.QuoteEmptyFields = true
|
||||
checkQuoting(true, "")
|
||||
checkQuoting(false, "abcd")
|
||||
checkQuoting(true, errors.New("invalid argument"))
|
||||
}
|
||||
|
||||
func TestEscaping(t *testing.T) {
|
||||
tf := &TextFormatter{DisableColors: true}
|
||||
|
||||
testCases := []struct {
|
||||
value string
|
||||
expected string
|
||||
}{
|
||||
{`ba"r`, `ba\"r`},
|
||||
{`ba'r`, `ba'r`},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
b, _ := tf.Format(WithField("test", tc.value))
|
||||
if !bytes.Contains(b, []byte(tc.expected)) {
|
||||
t.Errorf("escaping expected for %q (result was %q instead of %q)", tc.value, string(b), tc.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEscaping_Interface(t *testing.T) {
|
||||
tf := &TextFormatter{DisableColors: true}
|
||||
|
||||
ts := time.Now()
|
||||
|
||||
testCases := []struct {
|
||||
value interface{}
|
||||
expected string
|
||||
}{
|
||||
{ts, fmt.Sprintf("\"%s\"", ts.String())},
|
||||
{errors.New("error: something went wrong"), "\"error: something went wrong\""},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
b, _ := tf.Format(WithField("test", tc.value))
|
||||
if !bytes.Contains(b, []byte(tc.expected)) {
|
||||
t.Errorf("escaping expected for %q (result was %q instead of %q)", tc.value, string(b), tc.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimestampFormat(t *testing.T) {
|
||||
checkTimeStr := func(format string) {
|
||||
customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format}
|
||||
customStr, _ := customFormatter.Format(WithField("test", "test"))
|
||||
timeStart := bytes.Index(customStr, ([]byte)("time="))
|
||||
timeEnd := bytes.Index(customStr, ([]byte)("level="))
|
||||
timeStr := customStr[timeStart+5+len("\"") : timeEnd-1-len("\"")]
|
||||
if format == "" {
|
||||
format = time.RFC3339
|
||||
}
|
||||
_, e := time.Parse(format, (string)(timeStr))
|
||||
if e != nil {
|
||||
t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e)
|
||||
}
|
||||
}
|
||||
|
||||
checkTimeStr("2006-01-02T15:04:05.000000000Z07:00")
|
||||
checkTimeStr("Mon Jan _2 15:04:05 2006")
|
||||
checkTimeStr("")
|
||||
}
|
||||
|
||||
func TestDisableTimestampWithColoredOutput(t *testing.T) {
|
||||
tf := &TextFormatter{DisableTimestamp: true, ForceColors: true}
|
||||
|
||||
b, _ := tf.Format(WithField("test", "test"))
|
||||
if strings.Contains(string(b), "[0000]") {
|
||||
t.Error("timestamp not expected when DisableTimestamp is true")
|
||||
}
|
||||
}
|
||||
|
||||
// TODO add tests for sorting etc., this requires a parser for the text
|
||||
// formatter output.
|
||||
62
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/writer.go
generated
vendored
Normal file
62
vendor/github.com/genuinetools/amicontained/vendor/github.com/sirupsen/logrus/writer.go
generated
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
func (logger *Logger) Writer() *io.PipeWriter {
|
||||
return logger.WriterLevel(InfoLevel)
|
||||
}
|
||||
|
||||
func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
|
||||
return NewEntry(logger).WriterLevel(level)
|
||||
}
|
||||
|
||||
func (entry *Entry) Writer() *io.PipeWriter {
|
||||
return entry.WriterLevel(InfoLevel)
|
||||
}
|
||||
|
||||
func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
|
||||
reader, writer := io.Pipe()
|
||||
|
||||
var printFunc func(args ...interface{})
|
||||
|
||||
switch level {
|
||||
case DebugLevel:
|
||||
printFunc = entry.Debug
|
||||
case InfoLevel:
|
||||
printFunc = entry.Info
|
||||
case WarnLevel:
|
||||
printFunc = entry.Warn
|
||||
case ErrorLevel:
|
||||
printFunc = entry.Error
|
||||
case FatalLevel:
|
||||
printFunc = entry.Fatal
|
||||
case PanicLevel:
|
||||
printFunc = entry.Panic
|
||||
default:
|
||||
printFunc = entry.Print
|
||||
}
|
||||
|
||||
go entry.writerScanner(reader, printFunc)
|
||||
runtime.SetFinalizer(writer, writerFinalizer)
|
||||
|
||||
return writer
|
||||
}
|
||||
|
||||
func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
|
||||
scanner := bufio.NewScanner(reader)
|
||||
for scanner.Scan() {
|
||||
printFunc(scanner.Text())
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
entry.Errorf("Error while reading from Writer: %s", err)
|
||||
}
|
||||
reader.Close()
|
||||
}
|
||||
|
||||
func writerFinalizer(writer *io.PipeWriter) {
|
||||
writer.Close()
|
||||
}
|
||||
24
vendor/github.com/genuinetools/amicontained/vendor/github.com/syndtr/gocapability/LICENSE
generated
vendored
Normal file
24
vendor/github.com/genuinetools/amicontained/vendor/github.com/syndtr/gocapability/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
Copyright 2013 Suryandaru Triandana <syndtr@gmail.com>
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
133
vendor/github.com/genuinetools/amicontained/vendor/github.com/syndtr/gocapability/capability/capability.go
generated
vendored
Normal file
133
vendor/github.com/genuinetools/amicontained/vendor/github.com/syndtr/gocapability/capability/capability.go
generated
vendored
Normal file
@@ -0,0 +1,133 @@
|
||||
// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Package capability provides utilities for manipulating POSIX capabilities.
|
||||
package capability
|
||||
|
||||
type Capabilities interface {
|
||||
// Get check whether a capability present in the given
|
||||
// capabilities set. The 'which' value should be one of EFFECTIVE,
|
||||
// PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
|
||||
Get(which CapType, what Cap) bool
|
||||
|
||||
// Empty check whether all capability bits of the given capabilities
|
||||
// set are zero. The 'which' value should be one of EFFECTIVE,
|
||||
// PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
|
||||
Empty(which CapType) bool
|
||||
|
||||
// Full check whether all capability bits of the given capabilities
|
||||
// set are one. The 'which' value should be one of EFFECTIVE,
|
||||
// PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
|
||||
Full(which CapType) bool
|
||||
|
||||
// Set sets capabilities of the given capabilities sets. The
|
||||
// 'which' value should be one or combination (OR'ed) of EFFECTIVE,
|
||||
// PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
|
||||
Set(which CapType, caps ...Cap)
|
||||
|
||||
// Unset unsets capabilities of the given capabilities sets. The
|
||||
// 'which' value should be one or combination (OR'ed) of EFFECTIVE,
|
||||
// PERMITTED, INHERITABLE, BOUNDING or AMBIENT.
|
||||
Unset(which CapType, caps ...Cap)
|
||||
|
||||
// Fill sets all bits of the given capabilities kind to one. The
|
||||
// 'kind' value should be one or combination (OR'ed) of CAPS,
|
||||
// BOUNDS or AMBS.
|
||||
Fill(kind CapType)
|
||||
|
||||
// Clear sets all bits of the given capabilities kind to zero. The
|
||||
// 'kind' value should be one or combination (OR'ed) of CAPS,
|
||||
// BOUNDS or AMBS.
|
||||
Clear(kind CapType)
|
||||
|
||||
// String return current capabilities state of the given capabilities
|
||||
// set as string. The 'which' value should be one of EFFECTIVE,
|
||||
// PERMITTED, INHERITABLE BOUNDING or AMBIENT
|
||||
StringCap(which CapType) string
|
||||
|
||||
// String return current capabilities state as string.
|
||||
String() string
|
||||
|
||||
// Load load actual capabilities value. This will overwrite all
|
||||
// outstanding changes.
|
||||
Load() error
|
||||
|
||||
// Apply apply the capabilities settings, so all changes will take
|
||||
// effect.
|
||||
Apply(kind CapType) error
|
||||
}
|
||||
|
||||
// NewPid initializes a new Capabilities object for given pid when
|
||||
// it is nonzero, or for the current process if pid is 0.
|
||||
//
|
||||
// Deprecated: Replace with NewPid2. For example, replace:
|
||||
//
|
||||
// c, err := NewPid(0)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// with:
|
||||
//
|
||||
// c, err := NewPid2(0)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// err = c.Load()
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
func NewPid(pid int) (Capabilities, error) {
|
||||
c, err := newPid(pid)
|
||||
if err != nil {
|
||||
return c, err
|
||||
}
|
||||
err = c.Load()
|
||||
return c, err
|
||||
}
|
||||
|
||||
// NewPid2 initializes a new Capabilities object for given pid when
|
||||
// it is nonzero, or for the current process if pid is 0. This
|
||||
// does not load the process's current capabilities; to do that you
|
||||
// must call Load explicitly.
|
||||
func NewPid2(pid int) (Capabilities, error) {
|
||||
return newPid(pid)
|
||||
}
|
||||
|
||||
// NewFile initializes a new Capabilities object for given file path.
|
||||
//
|
||||
// Deprecated: Replace with NewFile2. For example, replace:
|
||||
//
|
||||
// c, err := NewFile(path)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// with:
|
||||
//
|
||||
// c, err := NewFile2(path)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// err = c.Load()
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
func NewFile(path string) (Capabilities, error) {
|
||||
c, err := newFile(path)
|
||||
if err != nil {
|
||||
return c, err
|
||||
}
|
||||
err = c.Load()
|
||||
return c, err
|
||||
}
|
||||
|
||||
// NewFile2 creates a new initialized Capabilities object for given
|
||||
// file path. This does not load the process's current capabilities;
|
||||
// to do that you must call Load explicitly.
|
||||
func NewFile2(path string) (Capabilities, error) {
|
||||
return newFile(path)
|
||||
}
|
||||
642
vendor/github.com/genuinetools/amicontained/vendor/github.com/syndtr/gocapability/capability/capability_linux.go
generated
vendored
Normal file
642
vendor/github.com/genuinetools/amicontained/vendor/github.com/syndtr/gocapability/capability/capability_linux.go
generated
vendored
Normal file
@@ -0,0 +1,642 @@
|
||||
// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package capability
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var errUnknownVers = errors.New("unknown capability version")
|
||||
|
||||
const (
|
||||
linuxCapVer1 = 0x19980330
|
||||
linuxCapVer2 = 0x20071026
|
||||
linuxCapVer3 = 0x20080522
|
||||
)
|
||||
|
||||
var (
|
||||
capVers uint32
|
||||
capLastCap Cap
|
||||
)
|
||||
|
||||
func init() {
|
||||
var hdr capHeader
|
||||
capget(&hdr, nil)
|
||||
capVers = hdr.version
|
||||
|
||||
if initLastCap() == nil {
|
||||
CAP_LAST_CAP = capLastCap
|
||||
if capLastCap > 31 {
|
||||
capUpperMask = (uint32(1) << (uint(capLastCap) - 31)) - 1
|
||||
} else {
|
||||
capUpperMask = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func initLastCap() error {
|
||||
if capLastCap != 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
f, err := os.Open("/proc/sys/kernel/cap_last_cap")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var b []byte = make([]byte, 11)
|
||||
_, err = f.Read(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Sscanf(string(b), "%d", &capLastCap)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func mkStringCap(c Capabilities, which CapType) (ret string) {
|
||||
for i, first := Cap(0), true; i <= CAP_LAST_CAP; i++ {
|
||||
if !c.Get(which, i) {
|
||||
continue
|
||||
}
|
||||
if first {
|
||||
first = false
|
||||
} else {
|
||||
ret += ", "
|
||||
}
|
||||
ret += i.String()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func mkString(c Capabilities, max CapType) (ret string) {
|
||||
ret = "{"
|
||||
for i := CapType(1); i <= max; i <<= 1 {
|
||||
ret += " " + i.String() + "=\""
|
||||
if c.Empty(i) {
|
||||
ret += "empty"
|
||||
} else if c.Full(i) {
|
||||
ret += "full"
|
||||
} else {
|
||||
ret += c.StringCap(i)
|
||||
}
|
||||
ret += "\""
|
||||
}
|
||||
ret += " }"
|
||||
return
|
||||
}
|
||||
|
||||
func newPid(pid int) (c Capabilities, err error) {
|
||||
switch capVers {
|
||||
case linuxCapVer1:
|
||||
p := new(capsV1)
|
||||
p.hdr.version = capVers
|
||||
p.hdr.pid = pid
|
||||
c = p
|
||||
case linuxCapVer2, linuxCapVer3:
|
||||
p := new(capsV3)
|
||||
p.hdr.version = capVers
|
||||
p.hdr.pid = pid
|
||||
c = p
|
||||
default:
|
||||
err = errUnknownVers
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type capsV1 struct {
|
||||
hdr capHeader
|
||||
data capData
|
||||
}
|
||||
|
||||
func (c *capsV1) Get(which CapType, what Cap) bool {
|
||||
if what > 32 {
|
||||
return false
|
||||
}
|
||||
|
||||
switch which {
|
||||
case EFFECTIVE:
|
||||
return (1<<uint(what))&c.data.effective != 0
|
||||
case PERMITTED:
|
||||
return (1<<uint(what))&c.data.permitted != 0
|
||||
case INHERITABLE:
|
||||
return (1<<uint(what))&c.data.inheritable != 0
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *capsV1) getData(which CapType) (ret uint32) {
|
||||
switch which {
|
||||
case EFFECTIVE:
|
||||
ret = c.data.effective
|
||||
case PERMITTED:
|
||||
ret = c.data.permitted
|
||||
case INHERITABLE:
|
||||
ret = c.data.inheritable
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *capsV1) Empty(which CapType) bool {
|
||||
return c.getData(which) == 0
|
||||
}
|
||||
|
||||
func (c *capsV1) Full(which CapType) bool {
|
||||
return (c.getData(which) & 0x7fffffff) == 0x7fffffff
|
||||
}
|
||||
|
||||
func (c *capsV1) Set(which CapType, caps ...Cap) {
|
||||
for _, what := range caps {
|
||||
if what > 32 {
|
||||
continue
|
||||
}
|
||||
|
||||
if which&EFFECTIVE != 0 {
|
||||
c.data.effective |= 1 << uint(what)
|
||||
}
|
||||
if which&PERMITTED != 0 {
|
||||
c.data.permitted |= 1 << uint(what)
|
||||
}
|
||||
if which&INHERITABLE != 0 {
|
||||
c.data.inheritable |= 1 << uint(what)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *capsV1) Unset(which CapType, caps ...Cap) {
|
||||
for _, what := range caps {
|
||||
if what > 32 {
|
||||
continue
|
||||
}
|
||||
|
||||
if which&EFFECTIVE != 0 {
|
||||
c.data.effective &= ^(1 << uint(what))
|
||||
}
|
||||
if which&PERMITTED != 0 {
|
||||
c.data.permitted &= ^(1 << uint(what))
|
||||
}
|
||||
if which&INHERITABLE != 0 {
|
||||
c.data.inheritable &= ^(1 << uint(what))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *capsV1) Fill(kind CapType) {
|
||||
if kind&CAPS == CAPS {
|
||||
c.data.effective = 0x7fffffff
|
||||
c.data.permitted = 0x7fffffff
|
||||
c.data.inheritable = 0
|
||||
}
|
||||
}
|
||||
|
||||
func (c *capsV1) Clear(kind CapType) {
|
||||
if kind&CAPS == CAPS {
|
||||
c.data.effective = 0
|
||||
c.data.permitted = 0
|
||||
c.data.inheritable = 0
|
||||
}
|
||||
}
|
||||
|
||||
func (c *capsV1) StringCap(which CapType) (ret string) {
|
||||
return mkStringCap(c, which)
|
||||
}
|
||||
|
||||
func (c *capsV1) String() (ret string) {
|
||||
return mkString(c, BOUNDING)
|
||||
}
|
||||
|
||||
func (c *capsV1) Load() (err error) {
|
||||
return capget(&c.hdr, &c.data)
|
||||
}
|
||||
|
||||
func (c *capsV1) Apply(kind CapType) error {
|
||||
if kind&CAPS == CAPS {
|
||||
return capset(&c.hdr, &c.data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type capsV3 struct {
|
||||
hdr capHeader
|
||||
data [2]capData
|
||||
bounds [2]uint32
|
||||
ambient [2]uint32
|
||||
}
|
||||
|
||||
func (c *capsV3) Get(which CapType, what Cap) bool {
|
||||
var i uint
|
||||
if what > 31 {
|
||||
i = uint(what) >> 5
|
||||
what %= 32
|
||||
}
|
||||
|
||||
switch which {
|
||||
case EFFECTIVE:
|
||||
return (1<<uint(what))&c.data[i].effective != 0
|
||||
case PERMITTED:
|
||||
return (1<<uint(what))&c.data[i].permitted != 0
|
||||
case INHERITABLE:
|
||||
return (1<<uint(what))&c.data[i].inheritable != 0
|
||||
case BOUNDING:
|
||||
return (1<<uint(what))&c.bounds[i] != 0
|
||||
case AMBIENT:
|
||||
return (1<<uint(what))&c.ambient[i] != 0
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *capsV3) getData(which CapType, dest []uint32) {
|
||||
switch which {
|
||||
case EFFECTIVE:
|
||||
dest[0] = c.data[0].effective
|
||||
dest[1] = c.data[1].effective
|
||||
case PERMITTED:
|
||||
dest[0] = c.data[0].permitted
|
||||
dest[1] = c.data[1].permitted
|
||||
case INHERITABLE:
|
||||
dest[0] = c.data[0].inheritable
|
||||
dest[1] = c.data[1].inheritable
|
||||
case BOUNDING:
|
||||
dest[0] = c.bounds[0]
|
||||
dest[1] = c.bounds[1]
|
||||
case AMBIENT:
|
||||
dest[0] = c.ambient[0]
|
||||
dest[1] = c.ambient[1]
|
||||
}
|
||||
}
|
||||
|
||||
func (c *capsV3) Empty(which CapType) bool {
|
||||
var data [2]uint32
|
||||
c.getData(which, data[:])
|
||||
return data[0] == 0 && data[1] == 0
|
||||
}
|
||||
|
||||
func (c *capsV3) Full(which CapType) bool {
|
||||
var data [2]uint32
|
||||
c.getData(which, data[:])
|
||||
if (data[0] & 0xffffffff) != 0xffffffff {
|
||||
return false
|
||||
}
|
||||
return (data[1] & capUpperMask) == capUpperMask
|
||||
}
|
||||
|
||||
func (c *capsV3) Set(which CapType, caps ...Cap) {
|
||||
for _, what := range caps {
|
||||
var i uint
|
||||
if what > 31 {
|
||||
i = uint(what) >> 5
|
||||
what %= 32
|
||||
}
|
||||
|
||||
if which&EFFECTIVE != 0 {
|
||||
c.data[i].effective |= 1 << uint(what)
|
||||
}
|
||||
if which&PERMITTED != 0 {
|
||||
c.data[i].permitted |= 1 << uint(what)
|
||||
}
|
||||
if which&INHERITABLE != 0 {
|
||||
c.data[i].inheritable |= 1 << uint(what)
|
||||
}
|
||||
if which&BOUNDING != 0 {
|
||||
c.bounds[i] |= 1 << uint(what)
|
||||
}
|
||||
if which&AMBIENT != 0 {
|
||||
c.ambient[i] |= 1 << uint(what)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *capsV3) Unset(which CapType, caps ...Cap) {
|
||||
for _, what := range caps {
|
||||
var i uint
|
||||
if what > 31 {
|
||||
i = uint(what) >> 5
|
||||
what %= 32
|
||||
}
|
||||
|
||||
if which&EFFECTIVE != 0 {
|
||||
c.data[i].effective &= ^(1 << uint(what))
|
||||
}
|
||||
if which&PERMITTED != 0 {
|
||||
c.data[i].permitted &= ^(1 << uint(what))
|
||||
}
|
||||
if which&INHERITABLE != 0 {
|
||||
c.data[i].inheritable &= ^(1 << uint(what))
|
||||
}
|
||||
if which&BOUNDING != 0 {
|
||||
c.bounds[i] &= ^(1 << uint(what))
|
||||
}
|
||||
if which&AMBIENT != 0 {
|
||||
c.ambient[i] &= ^(1 << uint(what))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *capsV3) Fill(kind CapType) {
|
||||
if kind&CAPS == CAPS {
|
||||
c.data[0].effective = 0xffffffff
|
||||
c.data[0].permitted = 0xffffffff
|
||||
c.data[0].inheritable = 0
|
||||
c.data[1].effective = 0xffffffff
|
||||
c.data[1].permitted = 0xffffffff
|
||||
c.data[1].inheritable = 0
|
||||
}
|
||||
|
||||
if kind&BOUNDS == BOUNDS {
|
||||
c.bounds[0] = 0xffffffff
|
||||
c.bounds[1] = 0xffffffff
|
||||
}
|
||||
if kind&AMBS == AMBS {
|
||||
c.ambient[0] = 0xffffffff
|
||||
c.ambient[1] = 0xffffffff
|
||||
}
|
||||
}
|
||||
|
||||
func (c *capsV3) Clear(kind CapType) {
|
||||
if kind&CAPS == CAPS {
|
||||
c.data[0].effective = 0
|
||||
c.data[0].permitted = 0
|
||||
c.data[0].inheritable = 0
|
||||
c.data[1].effective = 0
|
||||
c.data[1].permitted = 0
|
||||
c.data[1].inheritable = 0
|
||||
}
|
||||
|
||||
if kind&BOUNDS == BOUNDS {
|
||||
c.bounds[0] = 0
|
||||
c.bounds[1] = 0
|
||||
}
|
||||
if kind&AMBS == AMBS {
|
||||
c.ambient[0] = 0
|
||||
c.ambient[1] = 0
|
||||
}
|
||||
}
|
||||
|
||||
func (c *capsV3) StringCap(which CapType) (ret string) {
|
||||
return mkStringCap(c, which)
|
||||
}
|
||||
|
||||
func (c *capsV3) String() (ret string) {
|
||||
return mkString(c, BOUNDING)
|
||||
}
|
||||
|
||||
func (c *capsV3) Load() (err error) {
|
||||
err = capget(&c.hdr, &c.data[0])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var status_path string
|
||||
|
||||
if c.hdr.pid == 0 {
|
||||
status_path = fmt.Sprintf("/proc/self/status")
|
||||
} else {
|
||||
status_path = fmt.Sprintf("/proc/%d/status", c.hdr.pid)
|
||||
}
|
||||
|
||||
f, err := os.Open(status_path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
b := bufio.NewReader(f)
|
||||
for {
|
||||
line, e := b.ReadString('\n')
|
||||
if e != nil {
|
||||
if e != io.EOF {
|
||||
err = e
|
||||
}
|
||||
break
|
||||
}
|
||||
if strings.HasPrefix(line, "CapB") {
|
||||
fmt.Sscanf(line[4:], "nd: %08x%08x", &c.bounds[1], &c.bounds[0])
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(line, "CapA") {
|
||||
fmt.Sscanf(line[4:], "mb: %08x%08x", &c.ambient[1], &c.ambient[0])
|
||||
continue
|
||||
}
|
||||
}
|
||||
f.Close()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *capsV3) Apply(kind CapType) (err error) {
|
||||
if kind&BOUNDS == BOUNDS {
|
||||
var data [2]capData
|
||||
err = capget(&c.hdr, &data[0])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if (1<<uint(CAP_SETPCAP))&data[0].effective != 0 {
|
||||
for i := Cap(0); i <= CAP_LAST_CAP; i++ {
|
||||
if c.Get(BOUNDING, i) {
|
||||
continue
|
||||
}
|
||||
err = prctl(syscall.PR_CAPBSET_DROP, uintptr(i), 0, 0, 0)
|
||||
if err != nil {
|
||||
// Ignore EINVAL since the capability may not be supported in this system.
|
||||
if errno, ok := err.(syscall.Errno); ok && errno == syscall.EINVAL {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if kind&CAPS == CAPS {
|
||||
err = capset(&c.hdr, &c.data[0])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if kind&AMBS == AMBS {
|
||||
for i := Cap(0); i <= CAP_LAST_CAP; i++ {
|
||||
action := pr_CAP_AMBIENT_LOWER
|
||||
if c.Get(AMBIENT, i) {
|
||||
action = pr_CAP_AMBIENT_RAISE
|
||||
}
|
||||
err := prctl(pr_CAP_AMBIENT, action, uintptr(i), 0, 0)
|
||||
// Ignore EINVAL as not supported on kernels before 4.3
|
||||
if errno, ok := err.(syscall.Errno); ok && errno == syscall.EINVAL {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func newFile(path string) (c Capabilities, err error) {
|
||||
c = &capsFile{path: path}
|
||||
return
|
||||
}
|
||||
|
||||
type capsFile struct {
|
||||
path string
|
||||
data vfscapData
|
||||
}
|
||||
|
||||
func (c *capsFile) Get(which CapType, what Cap) bool {
|
||||
var i uint
|
||||
if what > 31 {
|
||||
if c.data.version == 1 {
|
||||
return false
|
||||
}
|
||||
i = uint(what) >> 5
|
||||
what %= 32
|
||||
}
|
||||
|
||||
switch which {
|
||||
case EFFECTIVE:
|
||||
return (1<<uint(what))&c.data.effective[i] != 0
|
||||
case PERMITTED:
|
||||
return (1<<uint(what))&c.data.data[i].permitted != 0
|
||||
case INHERITABLE:
|
||||
return (1<<uint(what))&c.data.data[i].inheritable != 0
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *capsFile) getData(which CapType, dest []uint32) {
|
||||
switch which {
|
||||
case EFFECTIVE:
|
||||
dest[0] = c.data.effective[0]
|
||||
dest[1] = c.data.effective[1]
|
||||
case PERMITTED:
|
||||
dest[0] = c.data.data[0].permitted
|
||||
dest[1] = c.data.data[1].permitted
|
||||
case INHERITABLE:
|
||||
dest[0] = c.data.data[0].inheritable
|
||||
dest[1] = c.data.data[1].inheritable
|
||||
}
|
||||
}
|
||||
|
||||
func (c *capsFile) Empty(which CapType) bool {
|
||||
var data [2]uint32
|
||||
c.getData(which, data[:])
|
||||
return data[0] == 0 && data[1] == 0
|
||||
}
|
||||
|
||||
func (c *capsFile) Full(which CapType) bool {
|
||||
var data [2]uint32
|
||||
c.getData(which, data[:])
|
||||
if c.data.version == 0 {
|
||||
return (data[0] & 0x7fffffff) == 0x7fffffff
|
||||
}
|
||||
if (data[0] & 0xffffffff) != 0xffffffff {
|
||||
return false
|
||||
}
|
||||
return (data[1] & capUpperMask) == capUpperMask
|
||||
}
|
||||
|
||||
func (c *capsFile) Set(which CapType, caps ...Cap) {
|
||||
for _, what := range caps {
|
||||
var i uint
|
||||
if what > 31 {
|
||||
if c.data.version == 1 {
|
||||
continue
|
||||
}
|
||||
i = uint(what) >> 5
|
||||
what %= 32
|
||||
}
|
||||
|
||||
if which&EFFECTIVE != 0 {
|
||||
c.data.effective[i] |= 1 << uint(what)
|
||||
}
|
||||
if which&PERMITTED != 0 {
|
||||
c.data.data[i].permitted |= 1 << uint(what)
|
||||
}
|
||||
if which&INHERITABLE != 0 {
|
||||
c.data.data[i].inheritable |= 1 << uint(what)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *capsFile) Unset(which CapType, caps ...Cap) {
|
||||
for _, what := range caps {
|
||||
var i uint
|
||||
if what > 31 {
|
||||
if c.data.version == 1 {
|
||||
continue
|
||||
}
|
||||
i = uint(what) >> 5
|
||||
what %= 32
|
||||
}
|
||||
|
||||
if which&EFFECTIVE != 0 {
|
||||
c.data.effective[i] &= ^(1 << uint(what))
|
||||
}
|
||||
if which&PERMITTED != 0 {
|
||||
c.data.data[i].permitted &= ^(1 << uint(what))
|
||||
}
|
||||
if which&INHERITABLE != 0 {
|
||||
c.data.data[i].inheritable &= ^(1 << uint(what))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *capsFile) Fill(kind CapType) {
|
||||
if kind&CAPS == CAPS {
|
||||
c.data.effective[0] = 0xffffffff
|
||||
c.data.data[0].permitted = 0xffffffff
|
||||
c.data.data[0].inheritable = 0
|
||||
if c.data.version == 2 {
|
||||
c.data.effective[1] = 0xffffffff
|
||||
c.data.data[1].permitted = 0xffffffff
|
||||
c.data.data[1].inheritable = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *capsFile) Clear(kind CapType) {
|
||||
if kind&CAPS == CAPS {
|
||||
c.data.effective[0] = 0
|
||||
c.data.data[0].permitted = 0
|
||||
c.data.data[0].inheritable = 0
|
||||
if c.data.version == 2 {
|
||||
c.data.effective[1] = 0
|
||||
c.data.data[1].permitted = 0
|
||||
c.data.data[1].inheritable = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *capsFile) StringCap(which CapType) (ret string) {
|
||||
return mkStringCap(c, which)
|
||||
}
|
||||
|
||||
func (c *capsFile) String() (ret string) {
|
||||
return mkString(c, INHERITABLE)
|
||||
}
|
||||
|
||||
func (c *capsFile) Load() (err error) {
|
||||
return getVfsCap(c.path, &c.data)
|
||||
}
|
||||
|
||||
func (c *capsFile) Apply(kind CapType) (err error) {
|
||||
if kind&CAPS == CAPS {
|
||||
return setVfsCap(c.path, &c.data)
|
||||
}
|
||||
return
|
||||
}
|
||||
19
vendor/github.com/genuinetools/amicontained/vendor/github.com/syndtr/gocapability/capability/capability_noop.go
generated
vendored
Normal file
19
vendor/github.com/genuinetools/amicontained/vendor/github.com/syndtr/gocapability/capability/capability_noop.go
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// +build !linux
|
||||
|
||||
package capability
|
||||
|
||||
import "errors"
|
||||
|
||||
func newPid(pid int) (Capabilities, error) {
|
||||
return nil, errors.New("not supported")
|
||||
}
|
||||
|
||||
func newFile(path string) (Capabilities, error) {
|
||||
return nil, errors.New("not supported")
|
||||
}
|
||||
83
vendor/github.com/genuinetools/amicontained/vendor/github.com/syndtr/gocapability/capability/capability_test.go
generated
vendored
Normal file
83
vendor/github.com/genuinetools/amicontained/vendor/github.com/syndtr/gocapability/capability/capability_test.go
generated
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package capability
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestState(t *testing.T) {
|
||||
testEmpty := func(name string, c Capabilities, whats CapType) {
|
||||
for i := CapType(1); i <= BOUNDING; i <<= 1 {
|
||||
if (i&whats) != 0 && !c.Empty(i) {
|
||||
t.Errorf(name+": capabilities set %q wasn't empty", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
testFull := func(name string, c Capabilities, whats CapType) {
|
||||
for i := CapType(1); i <= BOUNDING; i <<= 1 {
|
||||
if (i&whats) != 0 && !c.Full(i) {
|
||||
t.Errorf(name+": capabilities set %q wasn't full", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
testPartial := func(name string, c Capabilities, whats CapType) {
|
||||
for i := CapType(1); i <= BOUNDING; i <<= 1 {
|
||||
if (i&whats) != 0 && (c.Empty(i) || c.Full(i)) {
|
||||
t.Errorf(name+": capabilities set %q wasn't partial", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
testGet := func(name string, c Capabilities, whats CapType, max Cap) {
|
||||
for i := CapType(1); i <= BOUNDING; i <<= 1 {
|
||||
if (i & whats) == 0 {
|
||||
continue
|
||||
}
|
||||
for j := Cap(0); j <= max; j++ {
|
||||
if !c.Get(i, j) {
|
||||
t.Errorf(name+": capability %q wasn't found on %q", j, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
capf := new(capsFile)
|
||||
capf.data.version = 2
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
c Capabilities
|
||||
sets CapType
|
||||
max Cap
|
||||
}{
|
||||
{"v1", new(capsV1), EFFECTIVE | PERMITTED, CAP_AUDIT_CONTROL},
|
||||
{"v3", new(capsV3), EFFECTIVE | PERMITTED | BOUNDING, CAP_LAST_CAP},
|
||||
{"file_v1", new(capsFile), EFFECTIVE | PERMITTED, CAP_AUDIT_CONTROL},
|
||||
{"file_v2", capf, EFFECTIVE | PERMITTED, CAP_LAST_CAP},
|
||||
} {
|
||||
testEmpty(tc.name, tc.c, tc.sets)
|
||||
tc.c.Fill(CAPS | BOUNDS)
|
||||
testFull(tc.name, tc.c, tc.sets)
|
||||
testGet(tc.name, tc.c, tc.sets, tc.max)
|
||||
tc.c.Clear(CAPS | BOUNDS)
|
||||
testEmpty(tc.name, tc.c, tc.sets)
|
||||
for i := CapType(1); i <= BOUNDING; i <<= 1 {
|
||||
for j := Cap(0); j <= CAP_LAST_CAP; j++ {
|
||||
tc.c.Set(i, j)
|
||||
}
|
||||
}
|
||||
testFull(tc.name, tc.c, tc.sets)
|
||||
testGet(tc.name, tc.c, tc.sets, tc.max)
|
||||
for i := CapType(1); i <= BOUNDING; i <<= 1 {
|
||||
for j := Cap(0); j <= CAP_LAST_CAP; j++ {
|
||||
tc.c.Unset(i, j)
|
||||
}
|
||||
}
|
||||
testEmpty(tc.name, tc.c, tc.sets)
|
||||
tc.c.Set(PERMITTED, CAP_CHOWN)
|
||||
testPartial(tc.name, tc.c, PERMITTED)
|
||||
tc.c.Clear(CAPS | BOUNDS)
|
||||
testEmpty(tc.name, tc.c, tc.sets)
|
||||
}
|
||||
}
|
||||
268
vendor/github.com/genuinetools/amicontained/vendor/github.com/syndtr/gocapability/capability/enum.go
generated
vendored
Normal file
268
vendor/github.com/genuinetools/amicontained/vendor/github.com/syndtr/gocapability/capability/enum.go
generated
vendored
Normal file
@@ -0,0 +1,268 @@
|
||||
// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package capability
|
||||
|
||||
type CapType uint
|
||||
|
||||
func (c CapType) String() string {
|
||||
switch c {
|
||||
case EFFECTIVE:
|
||||
return "effective"
|
||||
case PERMITTED:
|
||||
return "permitted"
|
||||
case INHERITABLE:
|
||||
return "inheritable"
|
||||
case BOUNDING:
|
||||
return "bounding"
|
||||
case CAPS:
|
||||
return "caps"
|
||||
case AMBIENT:
|
||||
return "ambient"
|
||||
}
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
const (
|
||||
EFFECTIVE CapType = 1 << iota
|
||||
PERMITTED
|
||||
INHERITABLE
|
||||
BOUNDING
|
||||
AMBIENT
|
||||
|
||||
CAPS = EFFECTIVE | PERMITTED | INHERITABLE
|
||||
BOUNDS = BOUNDING
|
||||
AMBS = AMBIENT
|
||||
)
|
||||
|
||||
//go:generate go run enumgen/gen.go
|
||||
type Cap int
|
||||
|
||||
// POSIX-draft defined capabilities.
|
||||
const (
|
||||
// In a system with the [_POSIX_CHOWN_RESTRICTED] option defined, this
|
||||
// overrides the restriction of changing file ownership and group
|
||||
// ownership.
|
||||
CAP_CHOWN = Cap(0)
|
||||
|
||||
// Override all DAC access, including ACL execute access if
|
||||
// [_POSIX_ACL] is defined. Excluding DAC access covered by
|
||||
// CAP_LINUX_IMMUTABLE.
|
||||
CAP_DAC_OVERRIDE = Cap(1)
|
||||
|
||||
// Overrides all DAC restrictions regarding read and search on files
|
||||
// and directories, including ACL restrictions if [_POSIX_ACL] is
|
||||
// defined. Excluding DAC access covered by CAP_LINUX_IMMUTABLE.
|
||||
CAP_DAC_READ_SEARCH = Cap(2)
|
||||
|
||||
// Overrides all restrictions about allowed operations on files, where
|
||||
// file owner ID must be equal to the user ID, except where CAP_FSETID
|
||||
// is applicable. It doesn't override MAC and DAC restrictions.
|
||||
CAP_FOWNER = Cap(3)
|
||||
|
||||
// Overrides the following restrictions that the effective user ID
|
||||
// shall match the file owner ID when setting the S_ISUID and S_ISGID
|
||||
// bits on that file; that the effective group ID (or one of the
|
||||
// supplementary group IDs) shall match the file owner ID when setting
|
||||
// the S_ISGID bit on that file; that the S_ISUID and S_ISGID bits are
|
||||
// cleared on successful return from chown(2) (not implemented).
|
||||
CAP_FSETID = Cap(4)
|
||||
|
||||
// Overrides the restriction that the real or effective user ID of a
|
||||
// process sending a signal must match the real or effective user ID
|
||||
// of the process receiving the signal.
|
||||
CAP_KILL = Cap(5)
|
||||
|
||||
// Allows setgid(2) manipulation
|
||||
// Allows setgroups(2)
|
||||
// Allows forged gids on socket credentials passing.
|
||||
CAP_SETGID = Cap(6)
|
||||
|
||||
// Allows set*uid(2) manipulation (including fsuid).
|
||||
// Allows forged pids on socket credentials passing.
|
||||
CAP_SETUID = Cap(7)
|
||||
|
||||
// Linux-specific capabilities
|
||||
|
||||
// Without VFS support for capabilities:
|
||||
// Transfer any capability in your permitted set to any pid,
|
||||
// remove any capability in your permitted set from any pid
|
||||
// With VFS support for capabilities (neither of above, but)
|
||||
// Add any capability from current's capability bounding set
|
||||
// to the current process' inheritable set
|
||||
// Allow taking bits out of capability bounding set
|
||||
// Allow modification of the securebits for a process
|
||||
CAP_SETPCAP = Cap(8)
|
||||
|
||||
// Allow modification of S_IMMUTABLE and S_APPEND file attributes
|
||||
CAP_LINUX_IMMUTABLE = Cap(9)
|
||||
|
||||
// Allows binding to TCP/UDP sockets below 1024
|
||||
// Allows binding to ATM VCIs below 32
|
||||
CAP_NET_BIND_SERVICE = Cap(10)
|
||||
|
||||
// Allow broadcasting, listen to multicast
|
||||
CAP_NET_BROADCAST = Cap(11)
|
||||
|
||||
// Allow interface configuration
|
||||
// Allow administration of IP firewall, masquerading and accounting
|
||||
// Allow setting debug option on sockets
|
||||
// Allow modification of routing tables
|
||||
// Allow setting arbitrary process / process group ownership on
|
||||
// sockets
|
||||
// Allow binding to any address for transparent proxying (also via NET_RAW)
|
||||
// Allow setting TOS (type of service)
|
||||
// Allow setting promiscuous mode
|
||||
// Allow clearing driver statistics
|
||||
// Allow multicasting
|
||||
// Allow read/write of device-specific registers
|
||||
// Allow activation of ATM control sockets
|
||||
CAP_NET_ADMIN = Cap(12)
|
||||
|
||||
// Allow use of RAW sockets
|
||||
// Allow use of PACKET sockets
|
||||
// Allow binding to any address for transparent proxying (also via NET_ADMIN)
|
||||
CAP_NET_RAW = Cap(13)
|
||||
|
||||
// Allow locking of shared memory segments
|
||||
// Allow mlock and mlockall (which doesn't really have anything to do
|
||||
// with IPC)
|
||||
CAP_IPC_LOCK = Cap(14)
|
||||
|
||||
// Override IPC ownership checks
|
||||
CAP_IPC_OWNER = Cap(15)
|
||||
|
||||
// Insert and remove kernel modules - modify kernel without limit
|
||||
CAP_SYS_MODULE = Cap(16)
|
||||
|
||||
// Allow ioperm/iopl access
|
||||
// Allow sending USB messages to any device via /proc/bus/usb
|
||||
CAP_SYS_RAWIO = Cap(17)
|
||||
|
||||
// Allow use of chroot()
|
||||
CAP_SYS_CHROOT = Cap(18)
|
||||
|
||||
// Allow ptrace() of any process
|
||||
CAP_SYS_PTRACE = Cap(19)
|
||||
|
||||
// Allow configuration of process accounting
|
||||
CAP_SYS_PACCT = Cap(20)
|
||||
|
||||
// Allow configuration of the secure attention key
|
||||
// Allow administration of the random device
|
||||
// Allow examination and configuration of disk quotas
|
||||
// Allow setting the domainname
|
||||
// Allow setting the hostname
|
||||
// Allow calling bdflush()
|
||||
// Allow mount() and umount(), setting up new smb connection
|
||||
// Allow some autofs root ioctls
|
||||
// Allow nfsservctl
|
||||
// Allow VM86_REQUEST_IRQ
|
||||
// Allow to read/write pci config on alpha
|
||||
// Allow irix_prctl on mips (setstacksize)
|
||||
// Allow flushing all cache on m68k (sys_cacheflush)
|
||||
// Allow removing semaphores
|
||||
// Used instead of CAP_CHOWN to "chown" IPC message queues, semaphores
|
||||
// and shared memory
|
||||
// Allow locking/unlocking of shared memory segment
|
||||
// Allow turning swap on/off
|
||||
// Allow forged pids on socket credentials passing
|
||||
// Allow setting readahead and flushing buffers on block devices
|
||||
// Allow setting geometry in floppy driver
|
||||
// Allow turning DMA on/off in xd driver
|
||||
// Allow administration of md devices (mostly the above, but some
|
||||
// extra ioctls)
|
||||
// Allow tuning the ide driver
|
||||
// Allow access to the nvram device
|
||||
// Allow administration of apm_bios, serial and bttv (TV) device
|
||||
// Allow manufacturer commands in isdn CAPI support driver
|
||||
// Allow reading non-standardized portions of pci configuration space
|
||||
// Allow DDI debug ioctl on sbpcd driver
|
||||
// Allow setting up serial ports
|
||||
// Allow sending raw qic-117 commands
|
||||
// Allow enabling/disabling tagged queuing on SCSI controllers and sending
|
||||
// arbitrary SCSI commands
|
||||
// Allow setting encryption key on loopback filesystem
|
||||
// Allow setting zone reclaim policy
|
||||
CAP_SYS_ADMIN = Cap(21)
|
||||
|
||||
// Allow use of reboot()
|
||||
CAP_SYS_BOOT = Cap(22)
|
||||
|
||||
// Allow raising priority and setting priority on other (different
|
||||
// UID) processes
|
||||
// Allow use of FIFO and round-robin (realtime) scheduling on own
|
||||
// processes and setting the scheduling algorithm used by another
|
||||
// process.
|
||||
// Allow setting cpu affinity on other processes
|
||||
CAP_SYS_NICE = Cap(23)
|
||||
|
||||
// Override resource limits. Set resource limits.
|
||||
// Override quota limits.
|
||||
// Override reserved space on ext2 filesystem
|
||||
// Modify data journaling mode on ext3 filesystem (uses journaling
|
||||
// resources)
|
||||
// NOTE: ext2 honors fsuid when checking for resource overrides, so
|
||||
// you can override using fsuid too
|
||||
// Override size restrictions on IPC message queues
|
||||
// Allow more than 64hz interrupts from the real-time clock
|
||||
// Override max number of consoles on console allocation
|
||||
// Override max number of keymaps
|
||||
CAP_SYS_RESOURCE = Cap(24)
|
||||
|
||||
// Allow manipulation of system clock
|
||||
// Allow irix_stime on mips
|
||||
// Allow setting the real-time clock
|
||||
CAP_SYS_TIME = Cap(25)
|
||||
|
||||
// Allow configuration of tty devices
|
||||
// Allow vhangup() of tty
|
||||
CAP_SYS_TTY_CONFIG = Cap(26)
|
||||
|
||||
// Allow the privileged aspects of mknod()
|
||||
CAP_MKNOD = Cap(27)
|
||||
|
||||
// Allow taking of leases on files
|
||||
CAP_LEASE = Cap(28)
|
||||
|
||||
CAP_AUDIT_WRITE = Cap(29)
|
||||
CAP_AUDIT_CONTROL = Cap(30)
|
||||
CAP_SETFCAP = Cap(31)
|
||||
|
||||
// Override MAC access.
|
||||
// The base kernel enforces no MAC policy.
|
||||
// An LSM may enforce a MAC policy, and if it does and it chooses
|
||||
// to implement capability based overrides of that policy, this is
|
||||
// the capability it should use to do so.
|
||||
CAP_MAC_OVERRIDE = Cap(32)
|
||||
|
||||
// Allow MAC configuration or state changes.
|
||||
// The base kernel requires no MAC configuration.
|
||||
// An LSM may enforce a MAC policy, and if it does and it chooses
|
||||
// to implement capability based checks on modifications to that
|
||||
// policy or the data required to maintain it, this is the
|
||||
// capability it should use to do so.
|
||||
CAP_MAC_ADMIN = Cap(33)
|
||||
|
||||
// Allow configuring the kernel's syslog (printk behaviour)
|
||||
CAP_SYSLOG = Cap(34)
|
||||
|
||||
// Allow triggering something that will wake the system
|
||||
CAP_WAKE_ALARM = Cap(35)
|
||||
|
||||
// Allow preventing system suspends
|
||||
CAP_BLOCK_SUSPEND = Cap(36)
|
||||
|
||||
// Allow reading audit messages from the kernel
|
||||
CAP_AUDIT_READ = Cap(37)
|
||||
)
|
||||
|
||||
var (
|
||||
// Highest valid capability of the running kernel.
|
||||
CAP_LAST_CAP = Cap(63)
|
||||
|
||||
capUpperMask = ^uint32(0)
|
||||
)
|
||||
129
vendor/github.com/genuinetools/amicontained/vendor/github.com/syndtr/gocapability/capability/enum_gen.go
generated
vendored
Normal file
129
vendor/github.com/genuinetools/amicontained/vendor/github.com/syndtr/gocapability/capability/enum_gen.go
generated
vendored
Normal file
@@ -0,0 +1,129 @@
|
||||
// generated file; DO NOT EDIT - use go generate in directory with source
|
||||
|
||||
package capability
|
||||
|
||||
func (c Cap) String() string {
|
||||
switch c {
|
||||
case CAP_CHOWN:
|
||||
return "chown"
|
||||
case CAP_DAC_OVERRIDE:
|
||||
return "dac_override"
|
||||
case CAP_DAC_READ_SEARCH:
|
||||
return "dac_read_search"
|
||||
case CAP_FOWNER:
|
||||
return "fowner"
|
||||
case CAP_FSETID:
|
||||
return "fsetid"
|
||||
case CAP_KILL:
|
||||
return "kill"
|
||||
case CAP_SETGID:
|
||||
return "setgid"
|
||||
case CAP_SETUID:
|
||||
return "setuid"
|
||||
case CAP_SETPCAP:
|
||||
return "setpcap"
|
||||
case CAP_LINUX_IMMUTABLE:
|
||||
return "linux_immutable"
|
||||
case CAP_NET_BIND_SERVICE:
|
||||
return "net_bind_service"
|
||||
case CAP_NET_BROADCAST:
|
||||
return "net_broadcast"
|
||||
case CAP_NET_ADMIN:
|
||||
return "net_admin"
|
||||
case CAP_NET_RAW:
|
||||
return "net_raw"
|
||||
case CAP_IPC_LOCK:
|
||||
return "ipc_lock"
|
||||
case CAP_IPC_OWNER:
|
||||
return "ipc_owner"
|
||||
case CAP_SYS_MODULE:
|
||||
return "sys_module"
|
||||
case CAP_SYS_RAWIO:
|
||||
return "sys_rawio"
|
||||
case CAP_SYS_CHROOT:
|
||||
return "sys_chroot"
|
||||
case CAP_SYS_PTRACE:
|
||||
return "sys_ptrace"
|
||||
case CAP_SYS_PACCT:
|
||||
return "sys_pacct"
|
||||
case CAP_SYS_ADMIN:
|
||||
return "sys_admin"
|
||||
case CAP_SYS_BOOT:
|
||||
return "sys_boot"
|
||||
case CAP_SYS_NICE:
|
||||
return "sys_nice"
|
||||
case CAP_SYS_RESOURCE:
|
||||
return "sys_resource"
|
||||
case CAP_SYS_TIME:
|
||||
return "sys_time"
|
||||
case CAP_SYS_TTY_CONFIG:
|
||||
return "sys_tty_config"
|
||||
case CAP_MKNOD:
|
||||
return "mknod"
|
||||
case CAP_LEASE:
|
||||
return "lease"
|
||||
case CAP_AUDIT_WRITE:
|
||||
return "audit_write"
|
||||
case CAP_AUDIT_CONTROL:
|
||||
return "audit_control"
|
||||
case CAP_SETFCAP:
|
||||
return "setfcap"
|
||||
case CAP_MAC_OVERRIDE:
|
||||
return "mac_override"
|
||||
case CAP_MAC_ADMIN:
|
||||
return "mac_admin"
|
||||
case CAP_SYSLOG:
|
||||
return "syslog"
|
||||
case CAP_WAKE_ALARM:
|
||||
return "wake_alarm"
|
||||
case CAP_BLOCK_SUSPEND:
|
||||
return "block_suspend"
|
||||
case CAP_AUDIT_READ:
|
||||
return "audit_read"
|
||||
}
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
// List returns list of all supported capabilities
|
||||
func List() []Cap {
|
||||
return []Cap{
|
||||
CAP_CHOWN,
|
||||
CAP_DAC_OVERRIDE,
|
||||
CAP_DAC_READ_SEARCH,
|
||||
CAP_FOWNER,
|
||||
CAP_FSETID,
|
||||
CAP_KILL,
|
||||
CAP_SETGID,
|
||||
CAP_SETUID,
|
||||
CAP_SETPCAP,
|
||||
CAP_LINUX_IMMUTABLE,
|
||||
CAP_NET_BIND_SERVICE,
|
||||
CAP_NET_BROADCAST,
|
||||
CAP_NET_ADMIN,
|
||||
CAP_NET_RAW,
|
||||
CAP_IPC_LOCK,
|
||||
CAP_IPC_OWNER,
|
||||
CAP_SYS_MODULE,
|
||||
CAP_SYS_RAWIO,
|
||||
CAP_SYS_CHROOT,
|
||||
CAP_SYS_PTRACE,
|
||||
CAP_SYS_PACCT,
|
||||
CAP_SYS_ADMIN,
|
||||
CAP_SYS_BOOT,
|
||||
CAP_SYS_NICE,
|
||||
CAP_SYS_RESOURCE,
|
||||
CAP_SYS_TIME,
|
||||
CAP_SYS_TTY_CONFIG,
|
||||
CAP_MKNOD,
|
||||
CAP_LEASE,
|
||||
CAP_AUDIT_WRITE,
|
||||
CAP_AUDIT_CONTROL,
|
||||
CAP_SETFCAP,
|
||||
CAP_MAC_OVERRIDE,
|
||||
CAP_MAC_ADMIN,
|
||||
CAP_SYSLOG,
|
||||
CAP_WAKE_ALARM,
|
||||
CAP_BLOCK_SUSPEND,
|
||||
CAP_AUDIT_READ,
|
||||
}
|
||||
}
|
||||
92
vendor/github.com/genuinetools/amicontained/vendor/github.com/syndtr/gocapability/capability/enumgen/gen.go
generated
vendored
Normal file
92
vendor/github.com/genuinetools/amicontained/vendor/github.com/syndtr/gocapability/capability/enumgen/gen.go
generated
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/format"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const fileName = "enum.go"
|
||||
const genName = "enum_gen.go"
|
||||
|
||||
type generator struct {
|
||||
buf bytes.Buffer
|
||||
caps []string
|
||||
}
|
||||
|
||||
func (g *generator) writeHeader() {
|
||||
g.buf.WriteString("// generated file; DO NOT EDIT - use go generate in directory with source\n")
|
||||
g.buf.WriteString("\n")
|
||||
g.buf.WriteString("package capability")
|
||||
}
|
||||
|
||||
func (g *generator) writeStringFunc() {
|
||||
g.buf.WriteString("\n")
|
||||
g.buf.WriteString("func (c Cap) String() string {\n")
|
||||
g.buf.WriteString("switch c {\n")
|
||||
for _, cap := range g.caps {
|
||||
fmt.Fprintf(&g.buf, "case %s:\n", cap)
|
||||
fmt.Fprintf(&g.buf, "return \"%s\"\n", strings.ToLower(cap[4:]))
|
||||
}
|
||||
g.buf.WriteString("}\n")
|
||||
g.buf.WriteString("return \"unknown\"\n")
|
||||
g.buf.WriteString("}\n")
|
||||
}
|
||||
|
||||
func (g *generator) writeListFunc() {
|
||||
g.buf.WriteString("\n")
|
||||
g.buf.WriteString("// List returns list of all supported capabilities\n")
|
||||
g.buf.WriteString("func List() []Cap {\n")
|
||||
g.buf.WriteString("return []Cap{\n")
|
||||
for _, cap := range g.caps {
|
||||
fmt.Fprintf(&g.buf, "%s,\n", cap)
|
||||
}
|
||||
g.buf.WriteString("}\n")
|
||||
g.buf.WriteString("}\n")
|
||||
}
|
||||
|
||||
func main() {
|
||||
fs := token.NewFileSet()
|
||||
parsedFile, err := parser.ParseFile(fs, fileName, nil, 0)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
var caps []string
|
||||
for _, decl := range parsedFile.Decls {
|
||||
decl, ok := decl.(*ast.GenDecl)
|
||||
if !ok || decl.Tok != token.CONST {
|
||||
continue
|
||||
}
|
||||
for _, spec := range decl.Specs {
|
||||
vspec := spec.(*ast.ValueSpec)
|
||||
name := vspec.Names[0].Name
|
||||
if strings.HasPrefix(name, "CAP_") {
|
||||
caps = append(caps, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
g := &generator{caps: caps}
|
||||
g.writeHeader()
|
||||
g.writeStringFunc()
|
||||
g.writeListFunc()
|
||||
src, err := format.Source(g.buf.Bytes())
|
||||
if err != nil {
|
||||
fmt.Println("generated invalid Go code")
|
||||
fmt.Println(g.buf.String())
|
||||
log.Fatal(err)
|
||||
}
|
||||
fi, err := os.Stat(fileName)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := ioutil.WriteFile(genName, src, fi.Mode().Perm()); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
154
vendor/github.com/genuinetools/amicontained/vendor/github.com/syndtr/gocapability/capability/syscall_linux.go
generated
vendored
Normal file
154
vendor/github.com/genuinetools/amicontained/vendor/github.com/syndtr/gocapability/capability/syscall_linux.go
generated
vendored
Normal file
@@ -0,0 +1,154 @@
|
||||
// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package capability
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type capHeader struct {
|
||||
version uint32
|
||||
pid int
|
||||
}
|
||||
|
||||
type capData struct {
|
||||
effective uint32
|
||||
permitted uint32
|
||||
inheritable uint32
|
||||
}
|
||||
|
||||
func capget(hdr *capHeader, data *capData) (err error) {
|
||||
_, _, e1 := syscall.Syscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func capset(hdr *capHeader, data *capData) (err error) {
|
||||
_, _, e1 := syscall.Syscall(syscall.SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0)
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// not yet in syscall
|
||||
const (
|
||||
pr_CAP_AMBIENT = 47
|
||||
pr_CAP_AMBIENT_IS_SET = uintptr(1)
|
||||
pr_CAP_AMBIENT_RAISE = uintptr(2)
|
||||
pr_CAP_AMBIENT_LOWER = uintptr(3)
|
||||
pr_CAP_AMBIENT_CLEAR_ALL = uintptr(4)
|
||||
)
|
||||
|
||||
func prctl(option int, arg2, arg3, arg4, arg5 uintptr) (err error) {
|
||||
_, _, e1 := syscall.Syscall6(syscall.SYS_PRCTL, uintptr(option), arg2, arg3, arg4, arg5, 0)
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
const (
|
||||
vfsXattrName = "security.capability"
|
||||
|
||||
vfsCapVerMask = 0xff000000
|
||||
vfsCapVer1 = 0x01000000
|
||||
vfsCapVer2 = 0x02000000
|
||||
|
||||
vfsCapFlagMask = ^vfsCapVerMask
|
||||
vfsCapFlageffective = 0x000001
|
||||
|
||||
vfscapDataSizeV1 = 4 * (1 + 2*1)
|
||||
vfscapDataSizeV2 = 4 * (1 + 2*2)
|
||||
)
|
||||
|
||||
type vfscapData struct {
|
||||
magic uint32
|
||||
data [2]struct {
|
||||
permitted uint32
|
||||
inheritable uint32
|
||||
}
|
||||
effective [2]uint32
|
||||
version int8
|
||||
}
|
||||
|
||||
var (
|
||||
_vfsXattrName *byte
|
||||
)
|
||||
|
||||
func init() {
|
||||
_vfsXattrName, _ = syscall.BytePtrFromString(vfsXattrName)
|
||||
}
|
||||
|
||||
func getVfsCap(path string, dest *vfscapData) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
r0, _, e1 := syscall.Syscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(dest)), vfscapDataSizeV2, 0, 0)
|
||||
if e1 != 0 {
|
||||
if e1 == syscall.ENODATA {
|
||||
dest.version = 2
|
||||
return
|
||||
}
|
||||
err = e1
|
||||
}
|
||||
switch dest.magic & vfsCapVerMask {
|
||||
case vfsCapVer1:
|
||||
dest.version = 1
|
||||
if r0 != vfscapDataSizeV1 {
|
||||
return syscall.EINVAL
|
||||
}
|
||||
dest.data[1].permitted = 0
|
||||
dest.data[1].inheritable = 0
|
||||
case vfsCapVer2:
|
||||
dest.version = 2
|
||||
if r0 != vfscapDataSizeV2 {
|
||||
return syscall.EINVAL
|
||||
}
|
||||
default:
|
||||
return syscall.EINVAL
|
||||
}
|
||||
if dest.magic&vfsCapFlageffective != 0 {
|
||||
dest.effective[0] = dest.data[0].permitted | dest.data[0].inheritable
|
||||
dest.effective[1] = dest.data[1].permitted | dest.data[1].inheritable
|
||||
} else {
|
||||
dest.effective[0] = 0
|
||||
dest.effective[1] = 0
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func setVfsCap(path string, data *vfscapData) (err error) {
|
||||
var _p0 *byte
|
||||
_p0, err = syscall.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var size uintptr
|
||||
if data.version == 1 {
|
||||
data.magic = vfsCapVer1
|
||||
size = vfscapDataSizeV1
|
||||
} else if data.version == 2 {
|
||||
data.magic = vfsCapVer2
|
||||
if data.effective[0] != 0 || data.effective[1] != 0 {
|
||||
data.magic |= vfsCapFlageffective
|
||||
}
|
||||
size = vfscapDataSizeV2
|
||||
} else {
|
||||
return syscall.EINVAL
|
||||
}
|
||||
_, _, e1 := syscall.Syscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_vfsXattrName)), uintptr(unsafe.Pointer(data)), size, 0, 0)
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
||||
10
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/.gitattributes
generated
vendored
Normal file
10
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/.gitattributes
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
# Treat all files in this repo as binary, with no git magic updating
|
||||
# line endings. Windows users contributing to Go will need to use a
|
||||
# modern version of git and editors capable of LF line endings.
|
||||
#
|
||||
# We'll prevent accidental CRLF line endings from entering the repo
|
||||
# via the git-review gofmt checks.
|
||||
#
|
||||
# See golang.org/issue/9281
|
||||
|
||||
* -text
|
||||
2
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/.gitignore
generated
vendored
Normal file
2
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
# Add no patterns to .hgignore except for files generated by the build.
|
||||
last-change
|
||||
3
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/AUTHORS
generated
vendored
Normal file
3
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/AUTHORS
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at https://tip.golang.org/AUTHORS.
|
||||
31
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/CONTRIBUTING.md
generated
vendored
Normal file
31
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/CONTRIBUTING.md
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
# Contributing to Go
|
||||
|
||||
Go is an open source project.
|
||||
|
||||
It is the work of hundreds of contributors. We appreciate your help!
|
||||
|
||||
|
||||
## Filing issues
|
||||
|
||||
When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions:
|
||||
|
||||
1. What version of Go are you using (`go version`)?
|
||||
2. What operating system and processor architecture are you using?
|
||||
3. What did you do?
|
||||
4. What did you expect to see?
|
||||
5. What did you see instead?
|
||||
|
||||
General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
|
||||
The gophers there will answer or ask you to file an issue if you've tripped over a bug.
|
||||
|
||||
## Contributing code
|
||||
|
||||
Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
|
||||
before sending patches.
|
||||
|
||||
**We do not accept GitHub pull requests**
|
||||
(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review).
|
||||
|
||||
Unless otherwise noted, the Go source files are distributed under
|
||||
the BSD-style license found in the LICENSE file.
|
||||
|
||||
3
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/CONTRIBUTORS
generated
vendored
Normal file
3
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/CONTRIBUTORS
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at https://tip.golang.org/CONTRIBUTORS.
|
||||
27
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/LICENSE
generated
vendored
Normal file
27
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
22
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/PATENTS
generated
vendored
Normal file
22
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/PATENTS
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
||||
21
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/README.md
generated
vendored
Normal file
21
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/README.md
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
# Go Cryptography
|
||||
|
||||
This repository holds supplementary Go cryptography libraries.
|
||||
|
||||
## Download/Install
|
||||
|
||||
The easiest way to install is to run `go get -u golang.org/x/crypto/...`. You
|
||||
can also manually git clone the repository to `$GOPATH/src/golang.org/x/crypto`.
|
||||
|
||||
## Report Issues / Send Patches
|
||||
|
||||
This repository uses Gerrit for code changes. To learn how to submit changes to
|
||||
this repository, see https://golang.org/doc/contribute.html.
|
||||
|
||||
The main issue tracker for the crypto repository is located at
|
||||
https://github.com/golang/go/issues. Prefix your issue with "x/crypto:" in the
|
||||
subject line, so it is easy to find.
|
||||
|
||||
Note that contributions to the cryptography package receive additional scrutiny
|
||||
due to their sensitive nature. Patches may take longer than normal to receive
|
||||
feedback.
|
||||
1065
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/acme/acme.go
generated
vendored
Normal file
1065
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/acme/acme.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1380
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/acme/acme_test.go
generated
vendored
Normal file
1380
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/acme/acme_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
962
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/acme/autocert/autocert.go
generated
vendored
Normal file
962
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/acme/autocert/autocert.go
generated
vendored
Normal file
@@ -0,0 +1,962 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package autocert provides automatic access to certificates from Let's Encrypt
|
||||
// and any other ACME-based CA.
|
||||
//
|
||||
// This package is a work in progress and makes no API stability promises.
|
||||
package autocert
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
mathrand "math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/acme"
|
||||
)
|
||||
|
||||
// createCertRetryAfter is how much time to wait before removing a failed state
|
||||
// entry due to an unsuccessful createCert call.
|
||||
// This is a variable instead of a const for testing.
|
||||
// TODO: Consider making it configurable or an exp backoff?
|
||||
var createCertRetryAfter = time.Minute
|
||||
|
||||
// pseudoRand is safe for concurrent use.
|
||||
var pseudoRand *lockedMathRand
|
||||
|
||||
func init() {
|
||||
src := mathrand.NewSource(timeNow().UnixNano())
|
||||
pseudoRand = &lockedMathRand{rnd: mathrand.New(src)}
|
||||
}
|
||||
|
||||
// AcceptTOS is a Manager.Prompt function that always returns true to
|
||||
// indicate acceptance of the CA's Terms of Service during account
|
||||
// registration.
|
||||
func AcceptTOS(tosURL string) bool { return true }
|
||||
|
||||
// HostPolicy specifies which host names the Manager is allowed to respond to.
|
||||
// It returns a non-nil error if the host should be rejected.
|
||||
// The returned error is accessible via tls.Conn.Handshake and its callers.
|
||||
// See Manager's HostPolicy field and GetCertificate method docs for more details.
|
||||
type HostPolicy func(ctx context.Context, host string) error
|
||||
|
||||
// HostWhitelist returns a policy where only the specified host names are allowed.
|
||||
// Only exact matches are currently supported. Subdomains, regexp or wildcard
|
||||
// will not match.
|
||||
func HostWhitelist(hosts ...string) HostPolicy {
|
||||
whitelist := make(map[string]bool, len(hosts))
|
||||
for _, h := range hosts {
|
||||
whitelist[h] = true
|
||||
}
|
||||
return func(_ context.Context, host string) error {
|
||||
if !whitelist[host] {
|
||||
return errors.New("acme/autocert: host not configured")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// defaultHostPolicy is used when Manager.HostPolicy is not set.
|
||||
func defaultHostPolicy(context.Context, string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Manager is a stateful certificate manager built on top of acme.Client.
|
||||
// It obtains and refreshes certificates automatically using "tls-sni-01",
|
||||
// "tls-sni-02" and "http-01" challenge types, as well as providing them
|
||||
// to a TLS server via tls.Config.
|
||||
//
|
||||
// You must specify a cache implementation, such as DirCache,
|
||||
// to reuse obtained certificates across program restarts.
|
||||
// Otherwise your server is very likely to exceed the certificate
|
||||
// issuer's request rate limits.
|
||||
type Manager struct {
|
||||
// Prompt specifies a callback function to conditionally accept a CA's Terms of Service (TOS).
|
||||
// The registration may require the caller to agree to the CA's TOS.
|
||||
// If so, Manager calls Prompt with a TOS URL provided by the CA. Prompt should report
|
||||
// whether the caller agrees to the terms.
|
||||
//
|
||||
// To always accept the terms, the callers can use AcceptTOS.
|
||||
Prompt func(tosURL string) bool
|
||||
|
||||
// Cache optionally stores and retrieves previously-obtained certificates.
|
||||
// If nil, certs will only be cached for the lifetime of the Manager.
|
||||
//
|
||||
// Manager passes the Cache certificates data encoded in PEM, with private/public
|
||||
// parts combined in a single Cache.Put call, private key first.
|
||||
Cache Cache
|
||||
|
||||
// HostPolicy controls which domains the Manager will attempt
|
||||
// to retrieve new certificates for. It does not affect cached certs.
|
||||
//
|
||||
// If non-nil, HostPolicy is called before requesting a new cert.
|
||||
// If nil, all hosts are currently allowed. This is not recommended,
|
||||
// as it opens a potential attack where clients connect to a server
|
||||
// by IP address and pretend to be asking for an incorrect host name.
|
||||
// Manager will attempt to obtain a certificate for that host, incorrectly,
|
||||
// eventually reaching the CA's rate limit for certificate requests
|
||||
// and making it impossible to obtain actual certificates.
|
||||
//
|
||||
// See GetCertificate for more details.
|
||||
HostPolicy HostPolicy
|
||||
|
||||
// RenewBefore optionally specifies how early certificates should
|
||||
// be renewed before they expire.
|
||||
//
|
||||
// If zero, they're renewed 30 days before expiration.
|
||||
RenewBefore time.Duration
|
||||
|
||||
// Client is used to perform low-level operations, such as account registration
|
||||
// and requesting new certificates.
|
||||
// If Client is nil, a zero-value acme.Client is used with acme.LetsEncryptURL
|
||||
// directory endpoint and a newly-generated ECDSA P-256 key.
|
||||
//
|
||||
// Mutating the field after the first call of GetCertificate method will have no effect.
|
||||
Client *acme.Client
|
||||
|
||||
// Email optionally specifies a contact email address.
|
||||
// This is used by CAs, such as Let's Encrypt, to notify about problems
|
||||
// with issued certificates.
|
||||
//
|
||||
// If the Client's account key is already registered, Email is not used.
|
||||
Email string
|
||||
|
||||
// ForceRSA makes the Manager generate certificates with 2048-bit RSA keys.
|
||||
//
|
||||
// If false, a default is used. Currently the default
|
||||
// is EC-based keys using the P-256 curve.
|
||||
ForceRSA bool
|
||||
|
||||
clientMu sync.Mutex
|
||||
client *acme.Client // initialized by acmeClient method
|
||||
|
||||
stateMu sync.Mutex
|
||||
state map[string]*certState // keyed by domain name
|
||||
|
||||
// renewal tracks the set of domains currently running renewal timers.
|
||||
// It is keyed by domain name.
|
||||
renewalMu sync.Mutex
|
||||
renewal map[string]*domainRenewal
|
||||
|
||||
// tokensMu guards the rest of the fields: tryHTTP01, certTokens and httpTokens.
|
||||
tokensMu sync.RWMutex
|
||||
// tryHTTP01 indicates whether the Manager should try "http-01" challenge type
|
||||
// during the authorization flow.
|
||||
tryHTTP01 bool
|
||||
// httpTokens contains response body values for http-01 challenges
|
||||
// and is keyed by the URL path at which a challenge response is expected
|
||||
// to be provisioned.
|
||||
// The entries are stored for the duration of the authorization flow.
|
||||
httpTokens map[string][]byte
|
||||
// certTokens contains temporary certificates for tls-sni challenges
|
||||
// and is keyed by token domain name, which matches server name of ClientHello.
|
||||
// Keys always have ".acme.invalid" suffix.
|
||||
// The entries are stored for the duration of the authorization flow.
|
||||
certTokens map[string]*tls.Certificate
|
||||
}
|
||||
|
||||
// GetCertificate implements the tls.Config.GetCertificate hook.
|
||||
// It provides a TLS certificate for hello.ServerName host, including answering
|
||||
// *.acme.invalid (TLS-SNI) challenges. All other fields of hello are ignored.
|
||||
//
|
||||
// If m.HostPolicy is non-nil, GetCertificate calls the policy before requesting
|
||||
// a new cert. A non-nil error returned from m.HostPolicy halts TLS negotiation.
|
||||
// The error is propagated back to the caller of GetCertificate and is user-visible.
|
||||
// This does not affect cached certs. See HostPolicy field description for more details.
|
||||
func (m *Manager) GetCertificate(hello *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
if m.Prompt == nil {
|
||||
return nil, errors.New("acme/autocert: Manager.Prompt not set")
|
||||
}
|
||||
|
||||
name := hello.ServerName
|
||||
if name == "" {
|
||||
return nil, errors.New("acme/autocert: missing server name")
|
||||
}
|
||||
if !strings.Contains(strings.Trim(name, "."), ".") {
|
||||
return nil, errors.New("acme/autocert: server name component count invalid")
|
||||
}
|
||||
if strings.ContainsAny(name, `/\`) {
|
||||
return nil, errors.New("acme/autocert: server name contains invalid character")
|
||||
}
|
||||
|
||||
// In the worst-case scenario, the timeout needs to account for caching, host policy,
|
||||
// domain ownership verification and certificate issuance.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
// check whether this is a token cert requested for TLS-SNI challenge
|
||||
if strings.HasSuffix(name, ".acme.invalid") {
|
||||
m.tokensMu.RLock()
|
||||
defer m.tokensMu.RUnlock()
|
||||
if cert := m.certTokens[name]; cert != nil {
|
||||
return cert, nil
|
||||
}
|
||||
if cert, err := m.cacheGet(ctx, name); err == nil {
|
||||
return cert, nil
|
||||
}
|
||||
// TODO: cache error results?
|
||||
return nil, fmt.Errorf("acme/autocert: no token cert for %q", name)
|
||||
}
|
||||
|
||||
// regular domain
|
||||
name = strings.TrimSuffix(name, ".") // golang.org/issue/18114
|
||||
cert, err := m.cert(ctx, name)
|
||||
if err == nil {
|
||||
return cert, nil
|
||||
}
|
||||
if err != ErrCacheMiss {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// first-time
|
||||
if err := m.hostPolicy()(ctx, name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cert, err = m.createCert(ctx, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.cachePut(ctx, name, cert)
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
// HTTPHandler configures the Manager to provision ACME "http-01" challenge responses.
|
||||
// It returns an http.Handler that responds to the challenges and must be
|
||||
// running on port 80. If it receives a request that is not an ACME challenge,
|
||||
// it delegates the request to the optional fallback handler.
|
||||
//
|
||||
// If fallback is nil, the returned handler redirects all GET and HEAD requests
|
||||
// to the default TLS port 443 with 302 Found status code, preserving the original
|
||||
// request path and query. It responds with 400 Bad Request to all other HTTP methods.
|
||||
// The fallback is not protected by the optional HostPolicy.
|
||||
//
|
||||
// Because the fallback handler is run with unencrypted port 80 requests,
|
||||
// the fallback should not serve TLS-only requests.
|
||||
//
|
||||
// If HTTPHandler is never called, the Manager will only use TLS SNI
|
||||
// challenges for domain verification.
|
||||
func (m *Manager) HTTPHandler(fallback http.Handler) http.Handler {
|
||||
m.tokensMu.Lock()
|
||||
defer m.tokensMu.Unlock()
|
||||
m.tryHTTP01 = true
|
||||
|
||||
if fallback == nil {
|
||||
fallback = http.HandlerFunc(handleHTTPRedirect)
|
||||
}
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if !strings.HasPrefix(r.URL.Path, "/.well-known/acme-challenge/") {
|
||||
fallback.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
// A reasonable context timeout for cache and host policy only,
|
||||
// because we don't wait for a new certificate issuance here.
|
||||
ctx, cancel := context.WithTimeout(r.Context(), time.Minute)
|
||||
defer cancel()
|
||||
if err := m.hostPolicy()(ctx, r.Host); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
data, err := m.httpToken(ctx, r.URL.Path)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
w.Write(data)
|
||||
})
|
||||
}
|
||||
|
||||
func handleHTTPRedirect(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != "GET" && r.Method != "HEAD" {
|
||||
http.Error(w, "Use HTTPS", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
target := "https://" + stripPort(r.Host) + r.URL.RequestURI()
|
||||
http.Redirect(w, r, target, http.StatusFound)
|
||||
}
|
||||
|
||||
func stripPort(hostport string) string {
|
||||
host, _, err := net.SplitHostPort(hostport)
|
||||
if err != nil {
|
||||
return hostport
|
||||
}
|
||||
return net.JoinHostPort(host, "443")
|
||||
}
|
||||
|
||||
// cert returns an existing certificate either from m.state or cache.
|
||||
// If a certificate is found in cache but not in m.state, the latter will be filled
|
||||
// with the cached value.
|
||||
func (m *Manager) cert(ctx context.Context, name string) (*tls.Certificate, error) {
|
||||
m.stateMu.Lock()
|
||||
if s, ok := m.state[name]; ok {
|
||||
m.stateMu.Unlock()
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.tlscert()
|
||||
}
|
||||
defer m.stateMu.Unlock()
|
||||
cert, err := m.cacheGet(ctx, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
signer, ok := cert.PrivateKey.(crypto.Signer)
|
||||
if !ok {
|
||||
return nil, errors.New("acme/autocert: private key cannot sign")
|
||||
}
|
||||
if m.state == nil {
|
||||
m.state = make(map[string]*certState)
|
||||
}
|
||||
s := &certState{
|
||||
key: signer,
|
||||
cert: cert.Certificate,
|
||||
leaf: cert.Leaf,
|
||||
}
|
||||
m.state[name] = s
|
||||
go m.renew(name, s.key, s.leaf.NotAfter)
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
// cacheGet always returns a valid certificate, or an error otherwise.
|
||||
// If a cached certficate exists but is not valid, ErrCacheMiss is returned.
|
||||
func (m *Manager) cacheGet(ctx context.Context, domain string) (*tls.Certificate, error) {
|
||||
if m.Cache == nil {
|
||||
return nil, ErrCacheMiss
|
||||
}
|
||||
data, err := m.Cache.Get(ctx, domain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// private
|
||||
priv, pub := pem.Decode(data)
|
||||
if priv == nil || !strings.Contains(priv.Type, "PRIVATE") {
|
||||
return nil, ErrCacheMiss
|
||||
}
|
||||
privKey, err := parsePrivateKey(priv.Bytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// public
|
||||
var pubDER [][]byte
|
||||
for len(pub) > 0 {
|
||||
var b *pem.Block
|
||||
b, pub = pem.Decode(pub)
|
||||
if b == nil {
|
||||
break
|
||||
}
|
||||
pubDER = append(pubDER, b.Bytes)
|
||||
}
|
||||
if len(pub) > 0 {
|
||||
// Leftover content not consumed by pem.Decode. Corrupt. Ignore.
|
||||
return nil, ErrCacheMiss
|
||||
}
|
||||
|
||||
// verify and create TLS cert
|
||||
leaf, err := validCert(domain, pubDER, privKey)
|
||||
if err != nil {
|
||||
return nil, ErrCacheMiss
|
||||
}
|
||||
tlscert := &tls.Certificate{
|
||||
Certificate: pubDER,
|
||||
PrivateKey: privKey,
|
||||
Leaf: leaf,
|
||||
}
|
||||
return tlscert, nil
|
||||
}
|
||||
|
||||
func (m *Manager) cachePut(ctx context.Context, domain string, tlscert *tls.Certificate) error {
|
||||
if m.Cache == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// contains PEM-encoded data
|
||||
var buf bytes.Buffer
|
||||
|
||||
// private
|
||||
switch key := tlscert.PrivateKey.(type) {
|
||||
case *ecdsa.PrivateKey:
|
||||
if err := encodeECDSAKey(&buf, key); err != nil {
|
||||
return err
|
||||
}
|
||||
case *rsa.PrivateKey:
|
||||
b := x509.MarshalPKCS1PrivateKey(key)
|
||||
pb := &pem.Block{Type: "RSA PRIVATE KEY", Bytes: b}
|
||||
if err := pem.Encode(&buf, pb); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return errors.New("acme/autocert: unknown private key type")
|
||||
}
|
||||
|
||||
// public
|
||||
for _, b := range tlscert.Certificate {
|
||||
pb := &pem.Block{Type: "CERTIFICATE", Bytes: b}
|
||||
if err := pem.Encode(&buf, pb); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return m.Cache.Put(ctx, domain, buf.Bytes())
|
||||
}
|
||||
|
||||
func encodeECDSAKey(w io.Writer, key *ecdsa.PrivateKey) error {
|
||||
b, err := x509.MarshalECPrivateKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pb := &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}
|
||||
return pem.Encode(w, pb)
|
||||
}
|
||||
|
||||
// createCert starts the domain ownership verification and returns a certificate
|
||||
// for that domain upon success.
|
||||
//
|
||||
// If the domain is already being verified, it waits for the existing verification to complete.
|
||||
// Either way, createCert blocks for the duration of the whole process.
|
||||
func (m *Manager) createCert(ctx context.Context, domain string) (*tls.Certificate, error) {
|
||||
// TODO: maybe rewrite this whole piece using sync.Once
|
||||
state, err := m.certState(domain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// state may exist if another goroutine is already working on it
|
||||
// in which case just wait for it to finish
|
||||
if !state.locked {
|
||||
state.RLock()
|
||||
defer state.RUnlock()
|
||||
return state.tlscert()
|
||||
}
|
||||
|
||||
// We are the first; state is locked.
|
||||
// Unblock the readers when domain ownership is verified
|
||||
// and we got the cert or the process failed.
|
||||
defer state.Unlock()
|
||||
state.locked = false
|
||||
|
||||
der, leaf, err := m.authorizedCert(ctx, state.key, domain)
|
||||
if err != nil {
|
||||
// Remove the failed state after some time,
|
||||
// making the manager call createCert again on the following TLS hello.
|
||||
time.AfterFunc(createCertRetryAfter, func() {
|
||||
defer testDidRemoveState(domain)
|
||||
m.stateMu.Lock()
|
||||
defer m.stateMu.Unlock()
|
||||
// Verify the state hasn't changed and it's still invalid
|
||||
// before deleting.
|
||||
s, ok := m.state[domain]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if _, err := validCert(domain, s.cert, s.key); err == nil {
|
||||
return
|
||||
}
|
||||
delete(m.state, domain)
|
||||
})
|
||||
return nil, err
|
||||
}
|
||||
state.cert = der
|
||||
state.leaf = leaf
|
||||
go m.renew(domain, state.key, state.leaf.NotAfter)
|
||||
return state.tlscert()
|
||||
}
|
||||
|
||||
// certState returns a new or existing certState.
|
||||
// If a new certState is returned, state.exist is false and the state is locked.
|
||||
// The returned error is non-nil only in the case where a new state could not be created.
|
||||
func (m *Manager) certState(domain string) (*certState, error) {
|
||||
m.stateMu.Lock()
|
||||
defer m.stateMu.Unlock()
|
||||
if m.state == nil {
|
||||
m.state = make(map[string]*certState)
|
||||
}
|
||||
// existing state
|
||||
if state, ok := m.state[domain]; ok {
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// new locked state
|
||||
var (
|
||||
err error
|
||||
key crypto.Signer
|
||||
)
|
||||
if m.ForceRSA {
|
||||
key, err = rsa.GenerateKey(rand.Reader, 2048)
|
||||
} else {
|
||||
key, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
state := &certState{
|
||||
key: key,
|
||||
locked: true,
|
||||
}
|
||||
state.Lock() // will be unlocked by m.certState caller
|
||||
m.state[domain] = state
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// authorizedCert starts the domain ownership verification process and requests a new cert upon success.
|
||||
// The key argument is the certificate private key.
|
||||
func (m *Manager) authorizedCert(ctx context.Context, key crypto.Signer, domain string) (der [][]byte, leaf *x509.Certificate, err error) {
|
||||
client, err := m.acmeClient(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if err := m.verify(ctx, client, domain); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
csr, err := certRequest(key, domain)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
der, _, err = client.CreateCert(ctx, csr, 0, true)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
leaf, err = validCert(domain, der, key)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return der, leaf, nil
|
||||
}
|
||||
|
||||
// verify runs the identifier (domain) authorization flow
|
||||
// using each applicable ACME challenge type.
|
||||
func (m *Manager) verify(ctx context.Context, client *acme.Client, domain string) error {
|
||||
// The list of challenge types we'll try to fulfill
|
||||
// in this specific order.
|
||||
challengeTypes := []string{"tls-sni-02", "tls-sni-01"}
|
||||
m.tokensMu.RLock()
|
||||
if m.tryHTTP01 {
|
||||
challengeTypes = append(challengeTypes, "http-01")
|
||||
}
|
||||
m.tokensMu.RUnlock()
|
||||
|
||||
var nextTyp int // challengeType index of the next challenge type to try
|
||||
for {
|
||||
// Start domain authorization and get the challenge.
|
||||
authz, err := client.Authorize(ctx, domain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// No point in accepting challenges if the authorization status
|
||||
// is in a final state.
|
||||
switch authz.Status {
|
||||
case acme.StatusValid:
|
||||
return nil // already authorized
|
||||
case acme.StatusInvalid:
|
||||
return fmt.Errorf("acme/autocert: invalid authorization %q", authz.URI)
|
||||
}
|
||||
|
||||
// Pick the next preferred challenge.
|
||||
var chal *acme.Challenge
|
||||
for chal == nil && nextTyp < len(challengeTypes) {
|
||||
chal = pickChallenge(challengeTypes[nextTyp], authz.Challenges)
|
||||
nextTyp++
|
||||
}
|
||||
if chal == nil {
|
||||
return fmt.Errorf("acme/autocert: unable to authorize %q; tried %q", domain, challengeTypes)
|
||||
}
|
||||
cleanup, err := m.fulfill(ctx, client, chal)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
defer cleanup()
|
||||
if _, err := client.Accept(ctx, chal); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// A challenge is fulfilled and accepted: wait for the CA to validate.
|
||||
if _, err := client.WaitAuthorization(ctx, authz.URI); err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fulfill provisions a response to the challenge chal.
|
||||
// The cleanup is non-nil only if provisioning succeeded.
|
||||
func (m *Manager) fulfill(ctx context.Context, client *acme.Client, chal *acme.Challenge) (cleanup func(), err error) {
|
||||
switch chal.Type {
|
||||
case "tls-sni-01":
|
||||
cert, name, err := client.TLSSNI01ChallengeCert(chal.Token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.putCertToken(ctx, name, &cert)
|
||||
return func() { go m.deleteCertToken(name) }, nil
|
||||
case "tls-sni-02":
|
||||
cert, name, err := client.TLSSNI02ChallengeCert(chal.Token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.putCertToken(ctx, name, &cert)
|
||||
return func() { go m.deleteCertToken(name) }, nil
|
||||
case "http-01":
|
||||
resp, err := client.HTTP01ChallengeResponse(chal.Token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p := client.HTTP01ChallengePath(chal.Token)
|
||||
m.putHTTPToken(ctx, p, resp)
|
||||
return func() { go m.deleteHTTPToken(p) }, nil
|
||||
}
|
||||
return nil, fmt.Errorf("acme/autocert: unknown challenge type %q", chal.Type)
|
||||
}
|
||||
|
||||
func pickChallenge(typ string, chal []*acme.Challenge) *acme.Challenge {
|
||||
for _, c := range chal {
|
||||
if c.Type == typ {
|
||||
return c
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// putCertToken stores the cert under the named key in both m.certTokens map
|
||||
// and m.Cache.
|
||||
func (m *Manager) putCertToken(ctx context.Context, name string, cert *tls.Certificate) {
|
||||
m.tokensMu.Lock()
|
||||
defer m.tokensMu.Unlock()
|
||||
if m.certTokens == nil {
|
||||
m.certTokens = make(map[string]*tls.Certificate)
|
||||
}
|
||||
m.certTokens[name] = cert
|
||||
m.cachePut(ctx, name, cert)
|
||||
}
|
||||
|
||||
// deleteCertToken removes the token certificate for the specified domain name
|
||||
// from both m.certTokens map and m.Cache.
|
||||
func (m *Manager) deleteCertToken(name string) {
|
||||
m.tokensMu.Lock()
|
||||
defer m.tokensMu.Unlock()
|
||||
delete(m.certTokens, name)
|
||||
if m.Cache != nil {
|
||||
m.Cache.Delete(context.Background(), name)
|
||||
}
|
||||
}
|
||||
|
||||
// httpToken retrieves an existing http-01 token value from an in-memory map
|
||||
// or the optional cache.
|
||||
func (m *Manager) httpToken(ctx context.Context, tokenPath string) ([]byte, error) {
|
||||
m.tokensMu.RLock()
|
||||
defer m.tokensMu.RUnlock()
|
||||
if v, ok := m.httpTokens[tokenPath]; ok {
|
||||
return v, nil
|
||||
}
|
||||
if m.Cache == nil {
|
||||
return nil, fmt.Errorf("acme/autocert: no token at %q", tokenPath)
|
||||
}
|
||||
return m.Cache.Get(ctx, httpTokenCacheKey(tokenPath))
|
||||
}
|
||||
|
||||
// putHTTPToken stores an http-01 token value using tokenPath as key
|
||||
// in both in-memory map and the optional Cache.
|
||||
//
|
||||
// It ignores any error returned from Cache.Put.
|
||||
func (m *Manager) putHTTPToken(ctx context.Context, tokenPath, val string) {
|
||||
m.tokensMu.Lock()
|
||||
defer m.tokensMu.Unlock()
|
||||
if m.httpTokens == nil {
|
||||
m.httpTokens = make(map[string][]byte)
|
||||
}
|
||||
b := []byte(val)
|
||||
m.httpTokens[tokenPath] = b
|
||||
if m.Cache != nil {
|
||||
m.Cache.Put(ctx, httpTokenCacheKey(tokenPath), b)
|
||||
}
|
||||
}
|
||||
|
||||
// deleteHTTPToken removes an http-01 token value from both in-memory map
|
||||
// and the optional Cache, ignoring any error returned from the latter.
|
||||
//
|
||||
// If m.Cache is non-nil, it blocks until Cache.Delete returns without a timeout.
|
||||
func (m *Manager) deleteHTTPToken(tokenPath string) {
|
||||
m.tokensMu.Lock()
|
||||
defer m.tokensMu.Unlock()
|
||||
delete(m.httpTokens, tokenPath)
|
||||
if m.Cache != nil {
|
||||
m.Cache.Delete(context.Background(), httpTokenCacheKey(tokenPath))
|
||||
}
|
||||
}
|
||||
|
||||
// httpTokenCacheKey returns a key at which an http-01 token value may be stored
|
||||
// in the Manager's optional Cache.
|
||||
func httpTokenCacheKey(tokenPath string) string {
|
||||
return "http-01-" + path.Base(tokenPath)
|
||||
}
|
||||
|
||||
// renew starts a cert renewal timer loop, one per domain.
|
||||
//
|
||||
// The loop is scheduled in two cases:
|
||||
// - a cert was fetched from cache for the first time (wasn't in m.state)
|
||||
// - a new cert was created by m.createCert
|
||||
//
|
||||
// The key argument is a certificate private key.
|
||||
// The exp argument is the cert expiration time (NotAfter).
|
||||
func (m *Manager) renew(domain string, key crypto.Signer, exp time.Time) {
|
||||
m.renewalMu.Lock()
|
||||
defer m.renewalMu.Unlock()
|
||||
if m.renewal[domain] != nil {
|
||||
// another goroutine is already on it
|
||||
return
|
||||
}
|
||||
if m.renewal == nil {
|
||||
m.renewal = make(map[string]*domainRenewal)
|
||||
}
|
||||
dr := &domainRenewal{m: m, domain: domain, key: key}
|
||||
m.renewal[domain] = dr
|
||||
dr.start(exp)
|
||||
}
|
||||
|
||||
// stopRenew stops all currently running cert renewal timers.
|
||||
// The timers are not restarted during the lifetime of the Manager.
|
||||
func (m *Manager) stopRenew() {
|
||||
m.renewalMu.Lock()
|
||||
defer m.renewalMu.Unlock()
|
||||
for name, dr := range m.renewal {
|
||||
delete(m.renewal, name)
|
||||
dr.stop()
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Manager) accountKey(ctx context.Context) (crypto.Signer, error) {
|
||||
const keyName = "acme_account.key"
|
||||
|
||||
genKey := func() (*ecdsa.PrivateKey, error) {
|
||||
return ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
}
|
||||
|
||||
if m.Cache == nil {
|
||||
return genKey()
|
||||
}
|
||||
|
||||
data, err := m.Cache.Get(ctx, keyName)
|
||||
if err == ErrCacheMiss {
|
||||
key, err := genKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if err := encodeECDSAKey(&buf, key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := m.Cache.Put(ctx, keyName, buf.Bytes()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return key, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
priv, _ := pem.Decode(data)
|
||||
if priv == nil || !strings.Contains(priv.Type, "PRIVATE") {
|
||||
return nil, errors.New("acme/autocert: invalid account key found in cache")
|
||||
}
|
||||
return parsePrivateKey(priv.Bytes)
|
||||
}
|
||||
|
||||
func (m *Manager) acmeClient(ctx context.Context) (*acme.Client, error) {
|
||||
m.clientMu.Lock()
|
||||
defer m.clientMu.Unlock()
|
||||
if m.client != nil {
|
||||
return m.client, nil
|
||||
}
|
||||
|
||||
client := m.Client
|
||||
if client == nil {
|
||||
client = &acme.Client{DirectoryURL: acme.LetsEncryptURL}
|
||||
}
|
||||
if client.Key == nil {
|
||||
var err error
|
||||
client.Key, err = m.accountKey(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var contact []string
|
||||
if m.Email != "" {
|
||||
contact = []string{"mailto:" + m.Email}
|
||||
}
|
||||
a := &acme.Account{Contact: contact}
|
||||
_, err := client.Register(ctx, a, m.Prompt)
|
||||
if ae, ok := err.(*acme.Error); err == nil || ok && ae.StatusCode == http.StatusConflict {
|
||||
// conflict indicates the key is already registered
|
||||
m.client = client
|
||||
err = nil
|
||||
}
|
||||
return m.client, err
|
||||
}
|
||||
|
||||
func (m *Manager) hostPolicy() HostPolicy {
|
||||
if m.HostPolicy != nil {
|
||||
return m.HostPolicy
|
||||
}
|
||||
return defaultHostPolicy
|
||||
}
|
||||
|
||||
func (m *Manager) renewBefore() time.Duration {
|
||||
if m.RenewBefore > renewJitter {
|
||||
return m.RenewBefore
|
||||
}
|
||||
return 720 * time.Hour // 30 days
|
||||
}
|
||||
|
||||
// certState is ready when its mutex is unlocked for reading.
|
||||
type certState struct {
|
||||
sync.RWMutex
|
||||
locked bool // locked for read/write
|
||||
key crypto.Signer // private key for cert
|
||||
cert [][]byte // DER encoding
|
||||
leaf *x509.Certificate // parsed cert[0]; always non-nil if cert != nil
|
||||
}
|
||||
|
||||
// tlscert creates a tls.Certificate from s.key and s.cert.
|
||||
// Callers should wrap it in s.RLock() and s.RUnlock().
|
||||
func (s *certState) tlscert() (*tls.Certificate, error) {
|
||||
if s.key == nil {
|
||||
return nil, errors.New("acme/autocert: missing signer")
|
||||
}
|
||||
if len(s.cert) == 0 {
|
||||
return nil, errors.New("acme/autocert: missing certificate")
|
||||
}
|
||||
return &tls.Certificate{
|
||||
PrivateKey: s.key,
|
||||
Certificate: s.cert,
|
||||
Leaf: s.leaf,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// certRequest creates a certificate request for the given common name cn
|
||||
// and optional SANs.
|
||||
func certRequest(key crypto.Signer, cn string, san ...string) ([]byte, error) {
|
||||
req := &x509.CertificateRequest{
|
||||
Subject: pkix.Name{CommonName: cn},
|
||||
DNSNames: san,
|
||||
}
|
||||
return x509.CreateCertificateRequest(rand.Reader, req, key)
|
||||
}
|
||||
|
||||
// Attempt to parse the given private key DER block. OpenSSL 0.9.8 generates
|
||||
// PKCS#1 private keys by default, while OpenSSL 1.0.0 generates PKCS#8 keys.
|
||||
// OpenSSL ecparam generates SEC1 EC private keys for ECDSA. We try all three.
|
||||
//
|
||||
// Inspired by parsePrivateKey in crypto/tls/tls.go.
|
||||
func parsePrivateKey(der []byte) (crypto.Signer, error) {
|
||||
if key, err := x509.ParsePKCS1PrivateKey(der); err == nil {
|
||||
return key, nil
|
||||
}
|
||||
if key, err := x509.ParsePKCS8PrivateKey(der); err == nil {
|
||||
switch key := key.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
return key, nil
|
||||
case *ecdsa.PrivateKey:
|
||||
return key, nil
|
||||
default:
|
||||
return nil, errors.New("acme/autocert: unknown private key type in PKCS#8 wrapping")
|
||||
}
|
||||
}
|
||||
if key, err := x509.ParseECPrivateKey(der); err == nil {
|
||||
return key, nil
|
||||
}
|
||||
|
||||
return nil, errors.New("acme/autocert: failed to parse private key")
|
||||
}
|
||||
|
||||
// validCert parses a cert chain provided as der argument and verifies the leaf, der[0],
|
||||
// corresponds to the private key, as well as the domain match and expiration dates.
|
||||
// It doesn't do any revocation checking.
|
||||
//
|
||||
// The returned value is the verified leaf cert.
|
||||
func validCert(domain string, der [][]byte, key crypto.Signer) (leaf *x509.Certificate, err error) {
|
||||
// parse public part(s)
|
||||
var n int
|
||||
for _, b := range der {
|
||||
n += len(b)
|
||||
}
|
||||
pub := make([]byte, n)
|
||||
n = 0
|
||||
for _, b := range der {
|
||||
n += copy(pub[n:], b)
|
||||
}
|
||||
x509Cert, err := x509.ParseCertificates(pub)
|
||||
if len(x509Cert) == 0 {
|
||||
return nil, errors.New("acme/autocert: no public key found")
|
||||
}
|
||||
// verify the leaf is not expired and matches the domain name
|
||||
leaf = x509Cert[0]
|
||||
now := timeNow()
|
||||
if now.Before(leaf.NotBefore) {
|
||||
return nil, errors.New("acme/autocert: certificate is not valid yet")
|
||||
}
|
||||
if now.After(leaf.NotAfter) {
|
||||
return nil, errors.New("acme/autocert: expired certificate")
|
||||
}
|
||||
if err := leaf.VerifyHostname(domain); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// ensure the leaf corresponds to the private key
|
||||
switch pub := leaf.PublicKey.(type) {
|
||||
case *rsa.PublicKey:
|
||||
prv, ok := key.(*rsa.PrivateKey)
|
||||
if !ok {
|
||||
return nil, errors.New("acme/autocert: private key type does not match public key type")
|
||||
}
|
||||
if pub.N.Cmp(prv.N) != 0 {
|
||||
return nil, errors.New("acme/autocert: private key does not match public key")
|
||||
}
|
||||
case *ecdsa.PublicKey:
|
||||
prv, ok := key.(*ecdsa.PrivateKey)
|
||||
if !ok {
|
||||
return nil, errors.New("acme/autocert: private key type does not match public key type")
|
||||
}
|
||||
if pub.X.Cmp(prv.X) != 0 || pub.Y.Cmp(prv.Y) != 0 {
|
||||
return nil, errors.New("acme/autocert: private key does not match public key")
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("acme/autocert: unknown public key algorithm")
|
||||
}
|
||||
return leaf, nil
|
||||
}
|
||||
|
||||
type lockedMathRand struct {
|
||||
sync.Mutex
|
||||
rnd *mathrand.Rand
|
||||
}
|
||||
|
||||
func (r *lockedMathRand) int63n(max int64) int64 {
|
||||
r.Lock()
|
||||
n := r.rnd.Int63n(max)
|
||||
r.Unlock()
|
||||
return n
|
||||
}
|
||||
|
||||
// For easier testing.
|
||||
var (
|
||||
timeNow = time.Now
|
||||
|
||||
// Called when a state is removed.
|
||||
testDidRemoveState = func(domain string) {}
|
||||
)
|
||||
757
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/acme/autocert/autocert_test.go
generated
vendored
Normal file
757
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/acme/autocert/autocert_test.go
generated
vendored
Normal file
@@ -0,0 +1,757 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package autocert
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"io"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/acme"
|
||||
)
|
||||
|
||||
var discoTmpl = template.Must(template.New("disco").Parse(`{
|
||||
"new-reg": "{{.}}/new-reg",
|
||||
"new-authz": "{{.}}/new-authz",
|
||||
"new-cert": "{{.}}/new-cert"
|
||||
}`))
|
||||
|
||||
var authzTmpl = template.Must(template.New("authz").Parse(`{
|
||||
"status": "pending",
|
||||
"challenges": [
|
||||
{
|
||||
"uri": "{{.}}/challenge/1",
|
||||
"type": "tls-sni-01",
|
||||
"token": "token-01"
|
||||
},
|
||||
{
|
||||
"uri": "{{.}}/challenge/2",
|
||||
"type": "tls-sni-02",
|
||||
"token": "token-02"
|
||||
},
|
||||
{
|
||||
"uri": "{{.}}/challenge/dns-01",
|
||||
"type": "dns-01",
|
||||
"token": "token-dns-01"
|
||||
},
|
||||
{
|
||||
"uri": "{{.}}/challenge/http-01",
|
||||
"type": "http-01",
|
||||
"token": "token-http-01"
|
||||
}
|
||||
]
|
||||
}`))
|
||||
|
||||
type memCache struct {
|
||||
mu sync.Mutex
|
||||
keyData map[string][]byte
|
||||
}
|
||||
|
||||
func (m *memCache) Get(ctx context.Context, key string) ([]byte, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
v, ok := m.keyData[key]
|
||||
if !ok {
|
||||
return nil, ErrCacheMiss
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (m *memCache) Put(ctx context.Context, key string, data []byte) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
m.keyData[key] = data
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *memCache) Delete(ctx context.Context, key string) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
delete(m.keyData, key)
|
||||
return nil
|
||||
}
|
||||
|
||||
func newMemCache() *memCache {
|
||||
return &memCache{
|
||||
keyData: make(map[string][]byte),
|
||||
}
|
||||
}
|
||||
|
||||
func dummyCert(pub interface{}, san ...string) ([]byte, error) {
|
||||
return dateDummyCert(pub, time.Now(), time.Now().Add(90*24*time.Hour), san...)
|
||||
}
|
||||
|
||||
func dateDummyCert(pub interface{}, start, end time.Time, san ...string) ([]byte, error) {
|
||||
// use EC key to run faster on 386
|
||||
key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t := &x509.Certificate{
|
||||
SerialNumber: big.NewInt(1),
|
||||
NotBefore: start,
|
||||
NotAfter: end,
|
||||
BasicConstraintsValid: true,
|
||||
KeyUsage: x509.KeyUsageKeyEncipherment,
|
||||
DNSNames: san,
|
||||
}
|
||||
if pub == nil {
|
||||
pub = &key.PublicKey
|
||||
}
|
||||
return x509.CreateCertificate(rand.Reader, t, t, pub, key)
|
||||
}
|
||||
|
||||
func decodePayload(v interface{}, r io.Reader) error {
|
||||
var req struct{ Payload string }
|
||||
if err := json.NewDecoder(r).Decode(&req); err != nil {
|
||||
return err
|
||||
}
|
||||
payload, err := base64.RawURLEncoding.DecodeString(req.Payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return json.Unmarshal(payload, v)
|
||||
}
|
||||
|
||||
func TestGetCertificate(t *testing.T) {
|
||||
man := &Manager{Prompt: AcceptTOS}
|
||||
defer man.stopRenew()
|
||||
hello := &tls.ClientHelloInfo{ServerName: "example.org"}
|
||||
testGetCertificate(t, man, "example.org", hello)
|
||||
}
|
||||
|
||||
func TestGetCertificate_trailingDot(t *testing.T) {
|
||||
man := &Manager{Prompt: AcceptTOS}
|
||||
defer man.stopRenew()
|
||||
hello := &tls.ClientHelloInfo{ServerName: "example.org."}
|
||||
testGetCertificate(t, man, "example.org", hello)
|
||||
}
|
||||
|
||||
func TestGetCertificate_ForceRSA(t *testing.T) {
|
||||
man := &Manager{
|
||||
Prompt: AcceptTOS,
|
||||
Cache: newMemCache(),
|
||||
ForceRSA: true,
|
||||
}
|
||||
defer man.stopRenew()
|
||||
hello := &tls.ClientHelloInfo{ServerName: "example.org"}
|
||||
testGetCertificate(t, man, "example.org", hello)
|
||||
|
||||
cert, err := man.cacheGet(context.Background(), "example.org")
|
||||
if err != nil {
|
||||
t.Fatalf("man.cacheGet: %v", err)
|
||||
}
|
||||
if _, ok := cert.PrivateKey.(*rsa.PrivateKey); !ok {
|
||||
t.Errorf("cert.PrivateKey is %T; want *rsa.PrivateKey", cert.PrivateKey)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCertificate_nilPrompt(t *testing.T) {
|
||||
man := &Manager{}
|
||||
defer man.stopRenew()
|
||||
url, finish := startACMEServerStub(t, man, "example.org")
|
||||
defer finish()
|
||||
key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
man.Client = &acme.Client{
|
||||
Key: key,
|
||||
DirectoryURL: url,
|
||||
}
|
||||
hello := &tls.ClientHelloInfo{ServerName: "example.org"}
|
||||
if _, err := man.GetCertificate(hello); err == nil {
|
||||
t.Error("got certificate for example.org; wanted error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCertificate_expiredCache(t *testing.T) {
|
||||
// Make an expired cert and cache it.
|
||||
pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tmpl := &x509.Certificate{
|
||||
SerialNumber: big.NewInt(1),
|
||||
Subject: pkix.Name{CommonName: "example.org"},
|
||||
NotAfter: time.Now(),
|
||||
}
|
||||
pub, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, &pk.PublicKey, pk)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tlscert := &tls.Certificate{
|
||||
Certificate: [][]byte{pub},
|
||||
PrivateKey: pk,
|
||||
}
|
||||
|
||||
man := &Manager{Prompt: AcceptTOS, Cache: newMemCache()}
|
||||
defer man.stopRenew()
|
||||
if err := man.cachePut(context.Background(), "example.org", tlscert); err != nil {
|
||||
t.Fatalf("man.cachePut: %v", err)
|
||||
}
|
||||
|
||||
// The expired cached cert should trigger a new cert issuance
|
||||
// and return without an error.
|
||||
hello := &tls.ClientHelloInfo{ServerName: "example.org"}
|
||||
testGetCertificate(t, man, "example.org", hello)
|
||||
}
|
||||
|
||||
func TestGetCertificate_failedAttempt(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
const example = "example.org"
|
||||
d := createCertRetryAfter
|
||||
f := testDidRemoveState
|
||||
defer func() {
|
||||
createCertRetryAfter = d
|
||||
testDidRemoveState = f
|
||||
}()
|
||||
createCertRetryAfter = 0
|
||||
done := make(chan struct{})
|
||||
testDidRemoveState = func(domain string) {
|
||||
if domain != example {
|
||||
t.Errorf("testDidRemoveState: domain = %q; want %q", domain, example)
|
||||
}
|
||||
close(done)
|
||||
}
|
||||
|
||||
key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
man := &Manager{
|
||||
Prompt: AcceptTOS,
|
||||
Client: &acme.Client{
|
||||
Key: key,
|
||||
DirectoryURL: ts.URL,
|
||||
},
|
||||
}
|
||||
defer man.stopRenew()
|
||||
hello := &tls.ClientHelloInfo{ServerName: example}
|
||||
if _, err := man.GetCertificate(hello); err == nil {
|
||||
t.Error("GetCertificate: err is nil")
|
||||
}
|
||||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Errorf("took too long to remove the %q state", example)
|
||||
case <-done:
|
||||
man.stateMu.Lock()
|
||||
defer man.stateMu.Unlock()
|
||||
if v, exist := man.state[example]; exist {
|
||||
t.Errorf("state exists for %q: %+v", example, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// startACMEServerStub runs an ACME server
|
||||
// The domain argument is the expected domain name of a certificate request.
|
||||
func startACMEServerStub(t *testing.T, man *Manager, domain string) (url string, finish func()) {
|
||||
// echo token-02 | shasum -a 256
|
||||
// then divide result in 2 parts separated by dot
|
||||
tokenCertName := "4e8eb87631187e9ff2153b56b13a4dec.13a35d002e485d60ff37354b32f665d9.token.acme.invalid"
|
||||
verifyTokenCert := func() {
|
||||
hello := &tls.ClientHelloInfo{ServerName: tokenCertName}
|
||||
_, err := man.GetCertificate(hello)
|
||||
if err != nil {
|
||||
t.Errorf("verifyTokenCert: GetCertificate(%q): %v", tokenCertName, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// ACME CA server stub
|
||||
var ca *httptest.Server
|
||||
ca = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Replay-Nonce", "nonce")
|
||||
if r.Method == "HEAD" {
|
||||
// a nonce request
|
||||
return
|
||||
}
|
||||
|
||||
switch r.URL.Path {
|
||||
// discovery
|
||||
case "/":
|
||||
if err := discoTmpl.Execute(w, ca.URL); err != nil {
|
||||
t.Errorf("discoTmpl: %v", err)
|
||||
}
|
||||
// client key registration
|
||||
case "/new-reg":
|
||||
w.Write([]byte("{}"))
|
||||
// domain authorization
|
||||
case "/new-authz":
|
||||
w.Header().Set("Location", ca.URL+"/authz/1")
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
if err := authzTmpl.Execute(w, ca.URL); err != nil {
|
||||
t.Errorf("authzTmpl: %v", err)
|
||||
}
|
||||
// accept tls-sni-02 challenge
|
||||
case "/challenge/2":
|
||||
verifyTokenCert()
|
||||
w.Write([]byte("{}"))
|
||||
// authorization status
|
||||
case "/authz/1":
|
||||
w.Write([]byte(`{"status": "valid"}`))
|
||||
// cert request
|
||||
case "/new-cert":
|
||||
var req struct {
|
||||
CSR string `json:"csr"`
|
||||
}
|
||||
decodePayload(&req, r.Body)
|
||||
b, _ := base64.RawURLEncoding.DecodeString(req.CSR)
|
||||
csr, err := x509.ParseCertificateRequest(b)
|
||||
if err != nil {
|
||||
t.Errorf("new-cert: CSR: %v", err)
|
||||
}
|
||||
if csr.Subject.CommonName != domain {
|
||||
t.Errorf("CommonName in CSR = %q; want %q", csr.Subject.CommonName, domain)
|
||||
}
|
||||
der, err := dummyCert(csr.PublicKey, domain)
|
||||
if err != nil {
|
||||
t.Errorf("new-cert: dummyCert: %v", err)
|
||||
}
|
||||
chainUp := fmt.Sprintf("<%s/ca-cert>; rel=up", ca.URL)
|
||||
w.Header().Set("Link", chainUp)
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
w.Write(der)
|
||||
// CA chain cert
|
||||
case "/ca-cert":
|
||||
der, err := dummyCert(nil, "ca")
|
||||
if err != nil {
|
||||
t.Errorf("ca-cert: dummyCert: %v", err)
|
||||
}
|
||||
w.Write(der)
|
||||
default:
|
||||
t.Errorf("unrecognized r.URL.Path: %s", r.URL.Path)
|
||||
}
|
||||
}))
|
||||
finish = func() {
|
||||
ca.Close()
|
||||
|
||||
// make sure token cert was removed
|
||||
cancel := make(chan struct{})
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer close(done)
|
||||
tick := time.NewTicker(100 * time.Millisecond)
|
||||
defer tick.Stop()
|
||||
for {
|
||||
hello := &tls.ClientHelloInfo{ServerName: tokenCertName}
|
||||
if _, err := man.GetCertificate(hello); err != nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-tick.C:
|
||||
case <-cancel:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(5 * time.Second):
|
||||
close(cancel)
|
||||
t.Error("token cert was not removed")
|
||||
<-done
|
||||
}
|
||||
}
|
||||
return ca.URL, finish
|
||||
}
|
||||
|
||||
// tests man.GetCertificate flow using the provided hello argument.
|
||||
// The domain argument is the expected domain name of a certificate request.
|
||||
func testGetCertificate(t *testing.T, man *Manager, domain string, hello *tls.ClientHelloInfo) {
|
||||
url, finish := startACMEServerStub(t, man, domain)
|
||||
defer finish()
|
||||
|
||||
// use EC key to run faster on 386
|
||||
key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
man.Client = &acme.Client{
|
||||
Key: key,
|
||||
DirectoryURL: url,
|
||||
}
|
||||
|
||||
// simulate tls.Config.GetCertificate
|
||||
var tlscert *tls.Certificate
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
tlscert, err = man.GetCertificate(hello)
|
||||
close(done)
|
||||
}()
|
||||
select {
|
||||
case <-time.After(time.Minute):
|
||||
t.Fatal("man.GetCertificate took too long to return")
|
||||
case <-done:
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("man.GetCertificate: %v", err)
|
||||
}
|
||||
|
||||
// verify the tlscert is the same we responded with from the CA stub
|
||||
if len(tlscert.Certificate) == 0 {
|
||||
t.Fatal("len(tlscert.Certificate) is 0")
|
||||
}
|
||||
cert, err := x509.ParseCertificate(tlscert.Certificate[0])
|
||||
if err != nil {
|
||||
t.Fatalf("x509.ParseCertificate: %v", err)
|
||||
}
|
||||
if len(cert.DNSNames) == 0 || cert.DNSNames[0] != domain {
|
||||
t.Errorf("cert.DNSNames = %v; want %q", cert.DNSNames, domain)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestVerifyHTTP01(t *testing.T) {
|
||||
var (
|
||||
http01 http.Handler
|
||||
|
||||
authzCount int // num. of created authorizations
|
||||
didAcceptHTTP01 bool
|
||||
)
|
||||
|
||||
verifyHTTPToken := func() {
|
||||
r := httptest.NewRequest("GET", "/.well-known/acme-challenge/token-http-01", nil)
|
||||
w := httptest.NewRecorder()
|
||||
http01.ServeHTTP(w, r)
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("http token: w.Code = %d; want %d", w.Code, http.StatusOK)
|
||||
}
|
||||
if v := string(w.Body.Bytes()); !strings.HasPrefix(v, "token-http-01.") {
|
||||
t.Errorf("http token value = %q; want 'token-http-01.' prefix", v)
|
||||
}
|
||||
}
|
||||
|
||||
// ACME CA server stub, only the needed bits.
|
||||
// TODO: Merge this with startACMEServerStub, making it a configurable CA for testing.
|
||||
var ca *httptest.Server
|
||||
ca = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Replay-Nonce", "nonce")
|
||||
if r.Method == "HEAD" {
|
||||
// a nonce request
|
||||
return
|
||||
}
|
||||
|
||||
switch r.URL.Path {
|
||||
// Discovery.
|
||||
case "/":
|
||||
if err := discoTmpl.Execute(w, ca.URL); err != nil {
|
||||
t.Errorf("discoTmpl: %v", err)
|
||||
}
|
||||
// Client key registration.
|
||||
case "/new-reg":
|
||||
w.Write([]byte("{}"))
|
||||
// New domain authorization.
|
||||
case "/new-authz":
|
||||
authzCount++
|
||||
w.Header().Set("Location", fmt.Sprintf("%s/authz/%d", ca.URL, authzCount))
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
if err := authzTmpl.Execute(w, ca.URL); err != nil {
|
||||
t.Errorf("authzTmpl: %v", err)
|
||||
}
|
||||
// Accept tls-sni-02.
|
||||
case "/challenge/2":
|
||||
w.Write([]byte("{}"))
|
||||
// Reject tls-sni-01.
|
||||
case "/challenge/1":
|
||||
http.Error(w, "won't accept tls-sni-01", http.StatusBadRequest)
|
||||
// Should not accept dns-01.
|
||||
case "/challenge/dns-01":
|
||||
t.Errorf("dns-01 challenge was accepted")
|
||||
http.Error(w, "won't accept dns-01", http.StatusBadRequest)
|
||||
// Accept http-01.
|
||||
case "/challenge/http-01":
|
||||
didAcceptHTTP01 = true
|
||||
verifyHTTPToken()
|
||||
w.Write([]byte("{}"))
|
||||
// Authorization statuses.
|
||||
// Make tls-sni-xxx invalid.
|
||||
case "/authz/1", "/authz/2":
|
||||
w.Write([]byte(`{"status": "invalid"}`))
|
||||
case "/authz/3", "/authz/4":
|
||||
w.Write([]byte(`{"status": "valid"}`))
|
||||
default:
|
||||
http.NotFound(w, r)
|
||||
t.Errorf("unrecognized r.URL.Path: %s", r.URL.Path)
|
||||
}
|
||||
}))
|
||||
defer ca.Close()
|
||||
|
||||
key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
m := &Manager{
|
||||
Client: &acme.Client{
|
||||
Key: key,
|
||||
DirectoryURL: ca.URL,
|
||||
},
|
||||
}
|
||||
http01 = m.HTTPHandler(nil)
|
||||
if err := m.verify(context.Background(), m.Client, "example.org"); err != nil {
|
||||
t.Errorf("m.verify: %v", err)
|
||||
}
|
||||
// Only tls-sni-01, tls-sni-02 and http-01 must be accepted
|
||||
// The dns-01 challenge is unsupported.
|
||||
if authzCount != 3 {
|
||||
t.Errorf("authzCount = %d; want 3", authzCount)
|
||||
}
|
||||
if !didAcceptHTTP01 {
|
||||
t.Error("did not accept http-01 challenge")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHTTPHandlerDefaultFallback(t *testing.T) {
|
||||
tt := []struct {
|
||||
method, url string
|
||||
wantCode int
|
||||
wantLocation string
|
||||
}{
|
||||
{"GET", "http://example.org", 302, "https://example.org/"},
|
||||
{"GET", "http://example.org/foo", 302, "https://example.org/foo"},
|
||||
{"GET", "http://example.org/foo/bar/", 302, "https://example.org/foo/bar/"},
|
||||
{"GET", "http://example.org/?a=b", 302, "https://example.org/?a=b"},
|
||||
{"GET", "http://example.org/foo?a=b", 302, "https://example.org/foo?a=b"},
|
||||
{"GET", "http://example.org:80/foo?a=b", 302, "https://example.org:443/foo?a=b"},
|
||||
{"GET", "http://example.org:80/foo%20bar", 302, "https://example.org:443/foo%20bar"},
|
||||
{"GET", "http://[2602:d1:xxxx::c60a]:1234", 302, "https://[2602:d1:xxxx::c60a]:443/"},
|
||||
{"GET", "http://[2602:d1:xxxx::c60a]", 302, "https://[2602:d1:xxxx::c60a]/"},
|
||||
{"GET", "http://[2602:d1:xxxx::c60a]/foo?a=b", 302, "https://[2602:d1:xxxx::c60a]/foo?a=b"},
|
||||
{"HEAD", "http://example.org", 302, "https://example.org/"},
|
||||
{"HEAD", "http://example.org/foo", 302, "https://example.org/foo"},
|
||||
{"HEAD", "http://example.org/foo/bar/", 302, "https://example.org/foo/bar/"},
|
||||
{"HEAD", "http://example.org/?a=b", 302, "https://example.org/?a=b"},
|
||||
{"HEAD", "http://example.org/foo?a=b", 302, "https://example.org/foo?a=b"},
|
||||
{"POST", "http://example.org", 400, ""},
|
||||
{"PUT", "http://example.org", 400, ""},
|
||||
{"GET", "http://example.org/.well-known/acme-challenge/x", 404, ""},
|
||||
}
|
||||
var m Manager
|
||||
h := m.HTTPHandler(nil)
|
||||
for i, test := range tt {
|
||||
r := httptest.NewRequest(test.method, test.url, nil)
|
||||
w := httptest.NewRecorder()
|
||||
h.ServeHTTP(w, r)
|
||||
if w.Code != test.wantCode {
|
||||
t.Errorf("%d: w.Code = %d; want %d", i, w.Code, test.wantCode)
|
||||
t.Errorf("%d: body: %s", i, w.Body.Bytes())
|
||||
}
|
||||
if v := w.Header().Get("Location"); v != test.wantLocation {
|
||||
t.Errorf("%d: Location = %q; want %q", i, v, test.wantLocation)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccountKeyCache(t *testing.T) {
|
||||
m := Manager{Cache: newMemCache()}
|
||||
ctx := context.Background()
|
||||
k1, err := m.accountKey(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
k2, err := m.accountKey(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(k1, k2) {
|
||||
t.Errorf("account keys don't match: k1 = %#v; k2 = %#v", k1, k2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCache(t *testing.T) {
|
||||
privKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tmpl := &x509.Certificate{
|
||||
SerialNumber: big.NewInt(1),
|
||||
Subject: pkix.Name{CommonName: "example.org"},
|
||||
NotAfter: time.Now().Add(time.Hour),
|
||||
}
|
||||
pub, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, &privKey.PublicKey, privKey)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tlscert := &tls.Certificate{
|
||||
Certificate: [][]byte{pub},
|
||||
PrivateKey: privKey,
|
||||
}
|
||||
|
||||
man := &Manager{Cache: newMemCache()}
|
||||
defer man.stopRenew()
|
||||
ctx := context.Background()
|
||||
if err := man.cachePut(ctx, "example.org", tlscert); err != nil {
|
||||
t.Fatalf("man.cachePut: %v", err)
|
||||
}
|
||||
res, err := man.cacheGet(ctx, "example.org")
|
||||
if err != nil {
|
||||
t.Fatalf("man.cacheGet: %v", err)
|
||||
}
|
||||
if res == nil {
|
||||
t.Fatal("res is nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHostWhitelist(t *testing.T) {
|
||||
policy := HostWhitelist("example.com", "example.org", "*.example.net")
|
||||
tt := []struct {
|
||||
host string
|
||||
allow bool
|
||||
}{
|
||||
{"example.com", true},
|
||||
{"example.org", true},
|
||||
{"one.example.com", false},
|
||||
{"two.example.org", false},
|
||||
{"three.example.net", false},
|
||||
{"dummy", false},
|
||||
}
|
||||
for i, test := range tt {
|
||||
err := policy(nil, test.host)
|
||||
if err != nil && test.allow {
|
||||
t.Errorf("%d: policy(%q): %v; want nil", i, test.host, err)
|
||||
}
|
||||
if err == nil && !test.allow {
|
||||
t.Errorf("%d: policy(%q): nil; want an error", i, test.host)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidCert(t *testing.T) {
|
||||
key1, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
key2, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
key3, err := rsa.GenerateKey(rand.Reader, 512)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cert1, err := dummyCert(key1.Public(), "example.org")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cert2, err := dummyCert(key2.Public(), "example.org")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cert3, err := dummyCert(key3.Public(), "example.org")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
now := time.Now()
|
||||
early, err := dateDummyCert(key1.Public(), now.Add(time.Hour), now.Add(2*time.Hour), "example.org")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expired, err := dateDummyCert(key1.Public(), now.Add(-2*time.Hour), now.Add(-time.Hour), "example.org")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tt := []struct {
|
||||
domain string
|
||||
key crypto.Signer
|
||||
cert [][]byte
|
||||
ok bool
|
||||
}{
|
||||
{"example.org", key1, [][]byte{cert1}, true},
|
||||
{"example.org", key3, [][]byte{cert3}, true},
|
||||
{"example.org", key1, [][]byte{cert1, cert2, cert3}, true},
|
||||
{"example.org", key1, [][]byte{cert1, {1}}, false},
|
||||
{"example.org", key1, [][]byte{{1}}, false},
|
||||
{"example.org", key1, [][]byte{cert2}, false},
|
||||
{"example.org", key2, [][]byte{cert1}, false},
|
||||
{"example.org", key1, [][]byte{cert3}, false},
|
||||
{"example.org", key3, [][]byte{cert1}, false},
|
||||
{"example.net", key1, [][]byte{cert1}, false},
|
||||
{"example.org", key1, [][]byte{early}, false},
|
||||
{"example.org", key1, [][]byte{expired}, false},
|
||||
}
|
||||
for i, test := range tt {
|
||||
leaf, err := validCert(test.domain, test.cert, test.key)
|
||||
if err != nil && test.ok {
|
||||
t.Errorf("%d: err = %v", i, err)
|
||||
}
|
||||
if err == nil && !test.ok {
|
||||
t.Errorf("%d: err is nil", i)
|
||||
}
|
||||
if err == nil && test.ok && leaf == nil {
|
||||
t.Errorf("%d: leaf is nil", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type cacheGetFunc func(ctx context.Context, key string) ([]byte, error)
|
||||
|
||||
func (f cacheGetFunc) Get(ctx context.Context, key string) ([]byte, error) {
|
||||
return f(ctx, key)
|
||||
}
|
||||
|
||||
func (f cacheGetFunc) Put(ctx context.Context, key string, data []byte) error {
|
||||
return fmt.Errorf("unsupported Put of %q = %q", key, data)
|
||||
}
|
||||
|
||||
func (f cacheGetFunc) Delete(ctx context.Context, key string) error {
|
||||
return fmt.Errorf("unsupported Delete of %q", key)
|
||||
}
|
||||
|
||||
func TestManagerGetCertificateBogusSNI(t *testing.T) {
|
||||
m := Manager{
|
||||
Prompt: AcceptTOS,
|
||||
Cache: cacheGetFunc(func(ctx context.Context, key string) ([]byte, error) {
|
||||
return nil, fmt.Errorf("cache.Get of %s", key)
|
||||
}),
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
wantErr string
|
||||
}{
|
||||
{"foo.com", "cache.Get of foo.com"},
|
||||
{"foo.com.", "cache.Get of foo.com"},
|
||||
{`a\b.com`, "acme/autocert: server name contains invalid character"},
|
||||
{`a/b.com`, "acme/autocert: server name contains invalid character"},
|
||||
{"", "acme/autocert: missing server name"},
|
||||
{"foo", "acme/autocert: server name component count invalid"},
|
||||
{".foo", "acme/autocert: server name component count invalid"},
|
||||
{"foo.", "acme/autocert: server name component count invalid"},
|
||||
{"fo.o", "cache.Get of fo.o"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
_, err := m.GetCertificate(&tls.ClientHelloInfo{ServerName: tt.name})
|
||||
got := fmt.Sprint(err)
|
||||
if got != tt.wantErr {
|
||||
t.Errorf("GetCertificate(SNI = %q) = %q; want %q", tt.name, got, tt.wantErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
130
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/acme/autocert/cache.go
generated
vendored
Normal file
130
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/acme/autocert/cache.go
generated
vendored
Normal file
@@ -0,0 +1,130 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package autocert
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// ErrCacheMiss is returned when a certificate is not found in cache.
|
||||
var ErrCacheMiss = errors.New("acme/autocert: certificate cache miss")
|
||||
|
||||
// Cache is used by Manager to store and retrieve previously obtained certificates
|
||||
// as opaque data.
|
||||
//
|
||||
// The key argument of the methods refers to a domain name but need not be an FQDN.
|
||||
// Cache implementations should not rely on the key naming pattern.
|
||||
type Cache interface {
|
||||
// Get returns a certificate data for the specified key.
|
||||
// If there's no such key, Get returns ErrCacheMiss.
|
||||
Get(ctx context.Context, key string) ([]byte, error)
|
||||
|
||||
// Put stores the data in the cache under the specified key.
|
||||
// Underlying implementations may use any data storage format,
|
||||
// as long as the reverse operation, Get, results in the original data.
|
||||
Put(ctx context.Context, key string, data []byte) error
|
||||
|
||||
// Delete removes a certificate data from the cache under the specified key.
|
||||
// If there's no such key in the cache, Delete returns nil.
|
||||
Delete(ctx context.Context, key string) error
|
||||
}
|
||||
|
||||
// DirCache implements Cache using a directory on the local filesystem.
|
||||
// If the directory does not exist, it will be created with 0700 permissions.
|
||||
type DirCache string
|
||||
|
||||
// Get reads a certificate data from the specified file name.
|
||||
func (d DirCache) Get(ctx context.Context, name string) ([]byte, error) {
|
||||
name = filepath.Join(string(d), name)
|
||||
var (
|
||||
data []byte
|
||||
err error
|
||||
done = make(chan struct{})
|
||||
)
|
||||
go func() {
|
||||
data, err = ioutil.ReadFile(name)
|
||||
close(done)
|
||||
}()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case <-done:
|
||||
}
|
||||
if os.IsNotExist(err) {
|
||||
return nil, ErrCacheMiss
|
||||
}
|
||||
return data, err
|
||||
}
|
||||
|
||||
// Put writes the certificate data to the specified file name.
|
||||
// The file will be created with 0600 permissions.
|
||||
func (d DirCache) Put(ctx context.Context, name string, data []byte) error {
|
||||
if err := os.MkdirAll(string(d), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
var err error
|
||||
go func() {
|
||||
defer close(done)
|
||||
var tmp string
|
||||
if tmp, err = d.writeTempFile(name, data); err != nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// Don't overwrite the file if the context was canceled.
|
||||
default:
|
||||
newName := filepath.Join(string(d), name)
|
||||
err = os.Rename(tmp, newName)
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-done:
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete removes the specified file name.
|
||||
func (d DirCache) Delete(ctx context.Context, name string) error {
|
||||
name = filepath.Join(string(d), name)
|
||||
var (
|
||||
err error
|
||||
done = make(chan struct{})
|
||||
)
|
||||
go func() {
|
||||
err = os.Remove(name)
|
||||
close(done)
|
||||
}()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-done:
|
||||
}
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeTempFile writes b to a temporary file, closes the file and returns its path.
|
||||
func (d DirCache) writeTempFile(prefix string, b []byte) (string, error) {
|
||||
// TempFile uses 0600 permissions
|
||||
f, err := ioutil.TempFile(string(d), prefix)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if _, err := f.Write(b); err != nil {
|
||||
f.Close()
|
||||
return "", err
|
||||
}
|
||||
return f.Name(), f.Close()
|
||||
}
|
||||
58
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/acme/autocert/cache_test.go
generated
vendored
Normal file
58
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/acme/autocert/cache_test.go
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package autocert
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// make sure DirCache satisfies Cache interface
|
||||
var _ Cache = DirCache("/")
|
||||
|
||||
func TestDirCache(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "autocert")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
dir = filepath.Join(dir, "certs") // a nonexistent dir
|
||||
cache := DirCache(dir)
|
||||
ctx := context.Background()
|
||||
|
||||
// test cache miss
|
||||
if _, err := cache.Get(ctx, "nonexistent"); err != ErrCacheMiss {
|
||||
t.Errorf("get: %v; want ErrCacheMiss", err)
|
||||
}
|
||||
|
||||
// test put/get
|
||||
b1 := []byte{1}
|
||||
if err := cache.Put(ctx, "dummy", b1); err != nil {
|
||||
t.Fatalf("put: %v", err)
|
||||
}
|
||||
b2, err := cache.Get(ctx, "dummy")
|
||||
if err != nil {
|
||||
t.Fatalf("get: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(b1, b2) {
|
||||
t.Errorf("b1 = %v; want %v", b1, b2)
|
||||
}
|
||||
name := filepath.Join(dir, "dummy")
|
||||
if _, err := os.Stat(name); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// test delete
|
||||
if err := cache.Delete(ctx, "dummy"); err != nil {
|
||||
t.Fatalf("delete: %v", err)
|
||||
}
|
||||
if _, err := cache.Get(ctx, "dummy"); err != ErrCacheMiss {
|
||||
t.Errorf("get: %v; want ErrCacheMiss", err)
|
||||
}
|
||||
}
|
||||
36
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/acme/autocert/example_test.go
generated
vendored
Normal file
36
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/acme/autocert/example_test.go
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package autocert_test
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/crypto/acme/autocert"
|
||||
)
|
||||
|
||||
func ExampleNewListener() {
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintf(w, "Hello, TLS user! Your config: %+v", r.TLS)
|
||||
})
|
||||
log.Fatal(http.Serve(autocert.NewListener("example.com"), mux))
|
||||
}
|
||||
|
||||
func ExampleManager() {
|
||||
m := &autocert.Manager{
|
||||
Cache: autocert.DirCache("secret-dir"),
|
||||
Prompt: autocert.AcceptTOS,
|
||||
HostPolicy: autocert.HostWhitelist("example.org"),
|
||||
}
|
||||
go http.ListenAndServe(":http", m.HTTPHandler(nil))
|
||||
s := &http.Server{
|
||||
Addr: ":https",
|
||||
TLSConfig: &tls.Config{GetCertificate: m.GetCertificate},
|
||||
}
|
||||
s.ListenAndServeTLS("", "")
|
||||
}
|
||||
160
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/acme/autocert/listener.go
generated
vendored
Normal file
160
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/acme/autocert/listener.go
generated
vendored
Normal file
@@ -0,0 +1,160 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package autocert
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
// NewListener returns a net.Listener that listens on the standard TLS
|
||||
// port (443) on all interfaces and returns *tls.Conn connections with
|
||||
// LetsEncrypt certificates for the provided domain or domains.
|
||||
//
|
||||
// It enables one-line HTTPS servers:
|
||||
//
|
||||
// log.Fatal(http.Serve(autocert.NewListener("example.com"), handler))
|
||||
//
|
||||
// NewListener is a convenience function for a common configuration.
|
||||
// More complex or custom configurations can use the autocert.Manager
|
||||
// type instead.
|
||||
//
|
||||
// Use of this function implies acceptance of the LetsEncrypt Terms of
|
||||
// Service. If domains is not empty, the provided domains are passed
|
||||
// to HostWhitelist. If domains is empty, the listener will do
|
||||
// LetsEncrypt challenges for any requested domain, which is not
|
||||
// recommended.
|
||||
//
|
||||
// Certificates are cached in a "golang-autocert" directory under an
|
||||
// operating system-specific cache or temp directory. This may not
|
||||
// be suitable for servers spanning multiple machines.
|
||||
//
|
||||
// The returned listener uses a *tls.Config that enables HTTP/2, and
|
||||
// should only be used with servers that support HTTP/2.
|
||||
//
|
||||
// The returned Listener also enables TCP keep-alives on the accepted
|
||||
// connections. The returned *tls.Conn are returned before their TLS
|
||||
// handshake has completed.
|
||||
func NewListener(domains ...string) net.Listener {
|
||||
m := &Manager{
|
||||
Prompt: AcceptTOS,
|
||||
}
|
||||
if len(domains) > 0 {
|
||||
m.HostPolicy = HostWhitelist(domains...)
|
||||
}
|
||||
dir := cacheDir()
|
||||
if err := os.MkdirAll(dir, 0700); err != nil {
|
||||
log.Printf("warning: autocert.NewListener not using a cache: %v", err)
|
||||
} else {
|
||||
m.Cache = DirCache(dir)
|
||||
}
|
||||
return m.Listener()
|
||||
}
|
||||
|
||||
// Listener listens on the standard TLS port (443) on all interfaces
|
||||
// and returns a net.Listener returning *tls.Conn connections.
|
||||
//
|
||||
// The returned listener uses a *tls.Config that enables HTTP/2, and
|
||||
// should only be used with servers that support HTTP/2.
|
||||
//
|
||||
// The returned Listener also enables TCP keep-alives on the accepted
|
||||
// connections. The returned *tls.Conn are returned before their TLS
|
||||
// handshake has completed.
|
||||
//
|
||||
// Unlike NewListener, it is the caller's responsibility to initialize
|
||||
// the Manager m's Prompt, Cache, HostPolicy, and other desired options.
|
||||
func (m *Manager) Listener() net.Listener {
|
||||
ln := &listener{
|
||||
m: m,
|
||||
conf: &tls.Config{
|
||||
GetCertificate: m.GetCertificate, // bonus: panic on nil m
|
||||
NextProtos: []string{"h2", "http/1.1"}, // Enable HTTP/2
|
||||
},
|
||||
}
|
||||
ln.tcpListener, ln.tcpListenErr = net.Listen("tcp", ":443")
|
||||
return ln
|
||||
}
|
||||
|
||||
type listener struct {
|
||||
m *Manager
|
||||
conf *tls.Config
|
||||
|
||||
tcpListener net.Listener
|
||||
tcpListenErr error
|
||||
}
|
||||
|
||||
func (ln *listener) Accept() (net.Conn, error) {
|
||||
if ln.tcpListenErr != nil {
|
||||
return nil, ln.tcpListenErr
|
||||
}
|
||||
conn, err := ln.tcpListener.Accept()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tcpConn := conn.(*net.TCPConn)
|
||||
|
||||
// Because Listener is a convenience function, help out with
|
||||
// this too. This is not possible for the caller to set once
|
||||
// we return a *tcp.Conn wrapping an inaccessible net.Conn.
|
||||
// If callers don't want this, they can do things the manual
|
||||
// way and tweak as needed. But this is what net/http does
|
||||
// itself, so copy that. If net/http changes, we can change
|
||||
// here too.
|
||||
tcpConn.SetKeepAlive(true)
|
||||
tcpConn.SetKeepAlivePeriod(3 * time.Minute)
|
||||
|
||||
return tls.Server(tcpConn, ln.conf), nil
|
||||
}
|
||||
|
||||
func (ln *listener) Addr() net.Addr {
|
||||
if ln.tcpListener != nil {
|
||||
return ln.tcpListener.Addr()
|
||||
}
|
||||
// net.Listen failed. Return something non-nil in case callers
|
||||
// call Addr before Accept:
|
||||
return &net.TCPAddr{IP: net.IP{0, 0, 0, 0}, Port: 443}
|
||||
}
|
||||
|
||||
func (ln *listener) Close() error {
|
||||
if ln.tcpListenErr != nil {
|
||||
return ln.tcpListenErr
|
||||
}
|
||||
return ln.tcpListener.Close()
|
||||
}
|
||||
|
||||
func homeDir() string {
|
||||
if runtime.GOOS == "windows" {
|
||||
return os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
|
||||
}
|
||||
if h := os.Getenv("HOME"); h != "" {
|
||||
return h
|
||||
}
|
||||
return "/"
|
||||
}
|
||||
|
||||
func cacheDir() string {
|
||||
const base = "golang-autocert"
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
return filepath.Join(homeDir(), "Library", "Caches", base)
|
||||
case "windows":
|
||||
for _, ev := range []string{"APPDATA", "CSIDL_APPDATA", "TEMP", "TMP"} {
|
||||
if v := os.Getenv(ev); v != "" {
|
||||
return filepath.Join(v, base)
|
||||
}
|
||||
}
|
||||
// Worst case:
|
||||
return filepath.Join(homeDir(), base)
|
||||
}
|
||||
if xdg := os.Getenv("XDG_CACHE_HOME"); xdg != "" {
|
||||
return filepath.Join(xdg, base)
|
||||
}
|
||||
return filepath.Join(homeDir(), ".cache", base)
|
||||
}
|
||||
126
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/acme/autocert/renewal.go
generated
vendored
Normal file
126
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/acme/autocert/renewal.go
generated
vendored
Normal file
@@ -0,0 +1,126 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package autocert
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// renewJitter is the maximum deviation from Manager.RenewBefore.
|
||||
const renewJitter = time.Hour
|
||||
|
||||
// domainRenewal tracks the state used by the periodic timers
|
||||
// renewing a single domain's cert.
|
||||
type domainRenewal struct {
|
||||
m *Manager
|
||||
domain string
|
||||
key crypto.Signer
|
||||
|
||||
timerMu sync.Mutex
|
||||
timer *time.Timer
|
||||
}
|
||||
|
||||
// start starts a cert renewal timer at the time
|
||||
// defined by the certificate expiration time exp.
|
||||
//
|
||||
// If the timer is already started, calling start is a noop.
|
||||
func (dr *domainRenewal) start(exp time.Time) {
|
||||
dr.timerMu.Lock()
|
||||
defer dr.timerMu.Unlock()
|
||||
if dr.timer != nil {
|
||||
return
|
||||
}
|
||||
dr.timer = time.AfterFunc(dr.next(exp), dr.renew)
|
||||
}
|
||||
|
||||
// stop stops the cert renewal timer.
|
||||
// If the timer is already stopped, calling stop is a noop.
|
||||
func (dr *domainRenewal) stop() {
|
||||
dr.timerMu.Lock()
|
||||
defer dr.timerMu.Unlock()
|
||||
if dr.timer == nil {
|
||||
return
|
||||
}
|
||||
dr.timer.Stop()
|
||||
dr.timer = nil
|
||||
}
|
||||
|
||||
// renew is called periodically by a timer.
|
||||
// The first renew call is kicked off by dr.start.
|
||||
func (dr *domainRenewal) renew() {
|
||||
dr.timerMu.Lock()
|
||||
defer dr.timerMu.Unlock()
|
||||
if dr.timer == nil {
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
|
||||
defer cancel()
|
||||
// TODO: rotate dr.key at some point?
|
||||
next, err := dr.do(ctx)
|
||||
if err != nil {
|
||||
next = renewJitter / 2
|
||||
next += time.Duration(pseudoRand.int63n(int64(next)))
|
||||
}
|
||||
dr.timer = time.AfterFunc(next, dr.renew)
|
||||
testDidRenewLoop(next, err)
|
||||
}
|
||||
|
||||
// do is similar to Manager.createCert but it doesn't lock a Manager.state item.
|
||||
// Instead, it requests a new certificate independently and, upon success,
|
||||
// replaces dr.m.state item with a new one and updates cache for the given domain.
|
||||
//
|
||||
// It may return immediately if the expiration date of the currently cached cert
|
||||
// is far enough in the future.
|
||||
//
|
||||
// The returned value is a time interval after which the renewal should occur again.
|
||||
func (dr *domainRenewal) do(ctx context.Context) (time.Duration, error) {
|
||||
// a race is likely unavoidable in a distributed environment
|
||||
// but we try nonetheless
|
||||
if tlscert, err := dr.m.cacheGet(ctx, dr.domain); err == nil {
|
||||
next := dr.next(tlscert.Leaf.NotAfter)
|
||||
if next > dr.m.renewBefore()+renewJitter {
|
||||
return next, nil
|
||||
}
|
||||
}
|
||||
|
||||
der, leaf, err := dr.m.authorizedCert(ctx, dr.key, dr.domain)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
state := &certState{
|
||||
key: dr.key,
|
||||
cert: der,
|
||||
leaf: leaf,
|
||||
}
|
||||
tlscert, err := state.tlscert()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := dr.m.cachePut(ctx, dr.domain, tlscert); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
dr.m.stateMu.Lock()
|
||||
defer dr.m.stateMu.Unlock()
|
||||
// m.state is guaranteed to be non-nil at this point
|
||||
dr.m.state[dr.domain] = state
|
||||
return dr.next(leaf.NotAfter), nil
|
||||
}
|
||||
|
||||
func (dr *domainRenewal) next(expiry time.Time) time.Duration {
|
||||
d := expiry.Sub(timeNow()) - dr.m.renewBefore()
|
||||
// add a bit of randomness to renew deadline
|
||||
n := pseudoRand.int63n(int64(renewJitter))
|
||||
d -= time.Duration(n)
|
||||
if d < 0 {
|
||||
return 0
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
var testDidRenewLoop = func(next time.Duration, err error) {}
|
||||
191
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/acme/autocert/renewal_test.go
generated
vendored
Normal file
191
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/acme/autocert/renewal_test.go
generated
vendored
Normal file
@@ -0,0 +1,191 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package autocert
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/acme"
|
||||
)
|
||||
|
||||
func TestRenewalNext(t *testing.T) {
|
||||
now := time.Now()
|
||||
timeNow = func() time.Time { return now }
|
||||
defer func() { timeNow = time.Now }()
|
||||
|
||||
man := &Manager{RenewBefore: 7 * 24 * time.Hour}
|
||||
defer man.stopRenew()
|
||||
tt := []struct {
|
||||
expiry time.Time
|
||||
min, max time.Duration
|
||||
}{
|
||||
{now.Add(90 * 24 * time.Hour), 83*24*time.Hour - renewJitter, 83 * 24 * time.Hour},
|
||||
{now.Add(time.Hour), 0, 1},
|
||||
{now, 0, 1},
|
||||
{now.Add(-time.Hour), 0, 1},
|
||||
}
|
||||
|
||||
dr := &domainRenewal{m: man}
|
||||
for i, test := range tt {
|
||||
next := dr.next(test.expiry)
|
||||
if next < test.min || test.max < next {
|
||||
t.Errorf("%d: next = %v; want between %v and %v", i, next, test.min, test.max)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRenewFromCache(t *testing.T) {
|
||||
const domain = "example.org"
|
||||
|
||||
// ACME CA server stub
|
||||
var ca *httptest.Server
|
||||
ca = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Replay-Nonce", "nonce")
|
||||
if r.Method == "HEAD" {
|
||||
// a nonce request
|
||||
return
|
||||
}
|
||||
|
||||
switch r.URL.Path {
|
||||
// discovery
|
||||
case "/":
|
||||
if err := discoTmpl.Execute(w, ca.URL); err != nil {
|
||||
t.Fatalf("discoTmpl: %v", err)
|
||||
}
|
||||
// client key registration
|
||||
case "/new-reg":
|
||||
w.Write([]byte("{}"))
|
||||
// domain authorization
|
||||
case "/new-authz":
|
||||
w.Header().Set("Location", ca.URL+"/authz/1")
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
w.Write([]byte(`{"status": "valid"}`))
|
||||
// cert request
|
||||
case "/new-cert":
|
||||
var req struct {
|
||||
CSR string `json:"csr"`
|
||||
}
|
||||
decodePayload(&req, r.Body)
|
||||
b, _ := base64.RawURLEncoding.DecodeString(req.CSR)
|
||||
csr, err := x509.ParseCertificateRequest(b)
|
||||
if err != nil {
|
||||
t.Fatalf("new-cert: CSR: %v", err)
|
||||
}
|
||||
der, err := dummyCert(csr.PublicKey, domain)
|
||||
if err != nil {
|
||||
t.Fatalf("new-cert: dummyCert: %v", err)
|
||||
}
|
||||
chainUp := fmt.Sprintf("<%s/ca-cert>; rel=up", ca.URL)
|
||||
w.Header().Set("Link", chainUp)
|
||||
w.WriteHeader(http.StatusCreated)
|
||||
w.Write(der)
|
||||
// CA chain cert
|
||||
case "/ca-cert":
|
||||
der, err := dummyCert(nil, "ca")
|
||||
if err != nil {
|
||||
t.Fatalf("ca-cert: dummyCert: %v", err)
|
||||
}
|
||||
w.Write(der)
|
||||
default:
|
||||
t.Errorf("unrecognized r.URL.Path: %s", r.URL.Path)
|
||||
}
|
||||
}))
|
||||
defer ca.Close()
|
||||
|
||||
// use EC key to run faster on 386
|
||||
key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
man := &Manager{
|
||||
Prompt: AcceptTOS,
|
||||
Cache: newMemCache(),
|
||||
RenewBefore: 24 * time.Hour,
|
||||
Client: &acme.Client{
|
||||
Key: key,
|
||||
DirectoryURL: ca.URL,
|
||||
},
|
||||
}
|
||||
defer man.stopRenew()
|
||||
|
||||
// cache an almost expired cert
|
||||
now := time.Now()
|
||||
cert, err := dateDummyCert(key.Public(), now.Add(-2*time.Hour), now.Add(time.Minute), domain)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tlscert := &tls.Certificate{PrivateKey: key, Certificate: [][]byte{cert}}
|
||||
if err := man.cachePut(context.Background(), domain, tlscert); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// veriy the renewal happened
|
||||
defer func() {
|
||||
testDidRenewLoop = func(next time.Duration, err error) {}
|
||||
}()
|
||||
done := make(chan struct{})
|
||||
testDidRenewLoop = func(next time.Duration, err error) {
|
||||
defer close(done)
|
||||
if err != nil {
|
||||
t.Errorf("testDidRenewLoop: %v", err)
|
||||
}
|
||||
// Next should be about 90 days:
|
||||
// dummyCert creates 90days expiry + account for man.RenewBefore.
|
||||
// Previous expiration was within 1 min.
|
||||
future := 88 * 24 * time.Hour
|
||||
if next < future {
|
||||
t.Errorf("testDidRenewLoop: next = %v; want >= %v", next, future)
|
||||
}
|
||||
|
||||
// ensure the new cert is cached
|
||||
after := time.Now().Add(future)
|
||||
tlscert, err := man.cacheGet(context.Background(), domain)
|
||||
if err != nil {
|
||||
t.Fatalf("man.cacheGet: %v", err)
|
||||
}
|
||||
if !tlscert.Leaf.NotAfter.After(after) {
|
||||
t.Errorf("cache leaf.NotAfter = %v; want > %v", tlscert.Leaf.NotAfter, after)
|
||||
}
|
||||
|
||||
// verify the old cert is also replaced in memory
|
||||
man.stateMu.Lock()
|
||||
defer man.stateMu.Unlock()
|
||||
s := man.state[domain]
|
||||
if s == nil {
|
||||
t.Fatalf("m.state[%q] is nil", domain)
|
||||
}
|
||||
tlscert, err = s.tlscert()
|
||||
if err != nil {
|
||||
t.Fatalf("s.tlscert: %v", err)
|
||||
}
|
||||
if !tlscert.Leaf.NotAfter.After(after) {
|
||||
t.Errorf("state leaf.NotAfter = %v; want > %v", tlscert.Leaf.NotAfter, after)
|
||||
}
|
||||
}
|
||||
|
||||
// trigger renew
|
||||
hello := &tls.ClientHelloInfo{ServerName: domain}
|
||||
if _, err := man.GetCertificate(hello); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// wait for renew loop
|
||||
select {
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatal("renew took too long to occur")
|
||||
case <-done:
|
||||
}
|
||||
}
|
||||
153
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/acme/jws.go
generated
vendored
Normal file
153
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/acme/jws.go
generated
vendored
Normal file
@@ -0,0 +1,153 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package acme
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha256"
|
||||
_ "crypto/sha512" // need for EC keys
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// jwsEncodeJSON signs claimset using provided key and a nonce.
|
||||
// The result is serialized in JSON format.
|
||||
// See https://tools.ietf.org/html/rfc7515#section-7.
|
||||
func jwsEncodeJSON(claimset interface{}, key crypto.Signer, nonce string) ([]byte, error) {
|
||||
jwk, err := jwkEncode(key.Public())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
alg, sha := jwsHasher(key)
|
||||
if alg == "" || !sha.Available() {
|
||||
return nil, ErrUnsupportedKey
|
||||
}
|
||||
phead := fmt.Sprintf(`{"alg":%q,"jwk":%s,"nonce":%q}`, alg, jwk, nonce)
|
||||
phead = base64.RawURLEncoding.EncodeToString([]byte(phead))
|
||||
cs, err := json.Marshal(claimset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payload := base64.RawURLEncoding.EncodeToString(cs)
|
||||
hash := sha.New()
|
||||
hash.Write([]byte(phead + "." + payload))
|
||||
sig, err := jwsSign(key, sha, hash.Sum(nil))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
enc := struct {
|
||||
Protected string `json:"protected"`
|
||||
Payload string `json:"payload"`
|
||||
Sig string `json:"signature"`
|
||||
}{
|
||||
Protected: phead,
|
||||
Payload: payload,
|
||||
Sig: base64.RawURLEncoding.EncodeToString(sig),
|
||||
}
|
||||
return json.Marshal(&enc)
|
||||
}
|
||||
|
||||
// jwkEncode encodes public part of an RSA or ECDSA key into a JWK.
|
||||
// The result is also suitable for creating a JWK thumbprint.
|
||||
// https://tools.ietf.org/html/rfc7517
|
||||
func jwkEncode(pub crypto.PublicKey) (string, error) {
|
||||
switch pub := pub.(type) {
|
||||
case *rsa.PublicKey:
|
||||
// https://tools.ietf.org/html/rfc7518#section-6.3.1
|
||||
n := pub.N
|
||||
e := big.NewInt(int64(pub.E))
|
||||
// Field order is important.
|
||||
// See https://tools.ietf.org/html/rfc7638#section-3.3 for details.
|
||||
return fmt.Sprintf(`{"e":"%s","kty":"RSA","n":"%s"}`,
|
||||
base64.RawURLEncoding.EncodeToString(e.Bytes()),
|
||||
base64.RawURLEncoding.EncodeToString(n.Bytes()),
|
||||
), nil
|
||||
case *ecdsa.PublicKey:
|
||||
// https://tools.ietf.org/html/rfc7518#section-6.2.1
|
||||
p := pub.Curve.Params()
|
||||
n := p.BitSize / 8
|
||||
if p.BitSize%8 != 0 {
|
||||
n++
|
||||
}
|
||||
x := pub.X.Bytes()
|
||||
if n > len(x) {
|
||||
x = append(make([]byte, n-len(x)), x...)
|
||||
}
|
||||
y := pub.Y.Bytes()
|
||||
if n > len(y) {
|
||||
y = append(make([]byte, n-len(y)), y...)
|
||||
}
|
||||
// Field order is important.
|
||||
// See https://tools.ietf.org/html/rfc7638#section-3.3 for details.
|
||||
return fmt.Sprintf(`{"crv":"%s","kty":"EC","x":"%s","y":"%s"}`,
|
||||
p.Name,
|
||||
base64.RawURLEncoding.EncodeToString(x),
|
||||
base64.RawURLEncoding.EncodeToString(y),
|
||||
), nil
|
||||
}
|
||||
return "", ErrUnsupportedKey
|
||||
}
|
||||
|
||||
// jwsSign signs the digest using the given key.
|
||||
// It returns ErrUnsupportedKey if the key type is unknown.
|
||||
// The hash is used only for RSA keys.
|
||||
func jwsSign(key crypto.Signer, hash crypto.Hash, digest []byte) ([]byte, error) {
|
||||
switch key := key.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
return key.Sign(rand.Reader, digest, hash)
|
||||
case *ecdsa.PrivateKey:
|
||||
r, s, err := ecdsa.Sign(rand.Reader, key, digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rb, sb := r.Bytes(), s.Bytes()
|
||||
size := key.Params().BitSize / 8
|
||||
if size%8 > 0 {
|
||||
size++
|
||||
}
|
||||
sig := make([]byte, size*2)
|
||||
copy(sig[size-len(rb):], rb)
|
||||
copy(sig[size*2-len(sb):], sb)
|
||||
return sig, nil
|
||||
}
|
||||
return nil, ErrUnsupportedKey
|
||||
}
|
||||
|
||||
// jwsHasher indicates suitable JWS algorithm name and a hash function
|
||||
// to use for signing a digest with the provided key.
|
||||
// It returns ("", 0) if the key is not supported.
|
||||
func jwsHasher(key crypto.Signer) (string, crypto.Hash) {
|
||||
switch key := key.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
return "RS256", crypto.SHA256
|
||||
case *ecdsa.PrivateKey:
|
||||
switch key.Params().Name {
|
||||
case "P-256":
|
||||
return "ES256", crypto.SHA256
|
||||
case "P-384":
|
||||
return "ES384", crypto.SHA384
|
||||
case "P-521":
|
||||
return "ES512", crypto.SHA512
|
||||
}
|
||||
}
|
||||
return "", 0
|
||||
}
|
||||
|
||||
// JWKThumbprint creates a JWK thumbprint out of pub
|
||||
// as specified in https://tools.ietf.org/html/rfc7638.
|
||||
func JWKThumbprint(pub crypto.PublicKey) (string, error) {
|
||||
jwk, err := jwkEncode(pub)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
b := sha256.Sum256([]byte(jwk))
|
||||
return base64.RawURLEncoding.EncodeToString(b[:]), nil
|
||||
}
|
||||
319
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/acme/jws_test.go
generated
vendored
Normal file
319
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/acme/jws_test.go
generated
vendored
Normal file
@@ -0,0 +1,319 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package acme
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
testKeyPEM = `
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEowIBAAKCAQEA4xgZ3eRPkwoRvy7qeRUbmMDe0V+xH9eWLdu0iheeLlrmD2mq
|
||||
WXfP9IeSKApbn34g8TuAS9g5zhq8ELQ3kmjr+KV86GAMgI6VAcGlq3QrzpTCf/30
|
||||
Ab7+zawrfRaFONa1HwEzPY1KHnGVkxJc85gNkwYI9SY2RHXtvln3zs5wITNrdosq
|
||||
EXeaIkVYBEhbhNu54pp3kxo6TuWLi9e6pXeWetEwmlBwtWZlPoib2j3TxLBksKZf
|
||||
oyFyek380mHgJAumQ/I2fjj98/97mk3ihOY4AgVdCDj1z/GCoZkG5Rq7nbCGyosy
|
||||
KWyDX00Zs+nNqVhoLeIvXC4nnWdJMZ6rogxyQQIDAQABAoIBACIEZTOI1Kao9nmV
|
||||
9IeIsuaR1Y61b9neOF/MLmIVIZu+AAJFCMB4Iw11FV6sFodwpEyeZhx2WkpWVN+H
|
||||
r19eGiLX3zsL0DOdqBJoSIHDWCCMxgnYJ6nvS0nRxX3qVrBp8R2g12Ub+gNPbmFm
|
||||
ecf/eeERIVxfifd9VsyRu34eDEvcmKFuLYbElFcPh62xE3x12UZvV/sN7gXbawpP
|
||||
G+w255vbE5MoaKdnnO83cTFlcHvhn24M/78qP7Te5OAeelr1R89kYxQLpuGe4fbS
|
||||
zc6E3ym5Td6urDetGGrSY1Eu10/8sMusX+KNWkm+RsBRbkyKq72ks/qKpOxOa+c6
|
||||
9gm+Y8ECgYEA/iNUyg1ubRdH11p82l8KHtFC1DPE0V1gSZsX29TpM5jS4qv46K+s
|
||||
8Ym1zmrORM8x+cynfPx1VQZQ34EYeCMIX212ryJ+zDATl4NE0I4muMvSiH9vx6Xc
|
||||
7FmhNnaYzPsBL5Tm9nmtQuP09YEn8poiOJFiDs/4olnD5ogA5O4THGkCgYEA5MIL
|
||||
qWYBUuqbEWLRtMruUtpASclrBqNNsJEsMGbeqBJmoMxdHeSZckbLOrqm7GlMyNRJ
|
||||
Ne/5uWRGSzaMYuGmwsPpERzqEvYFnSrpjW5YtXZ+JtxFXNVfm9Z1gLLgvGpOUCIU
|
||||
RbpoDckDe1vgUuk3y5+DjZihs+rqIJ45XzXTzBkCgYBWuf3segruJZy5rEKhTv+o
|
||||
JqeUvRn0jNYYKFpLBeyTVBrbie6GkbUGNIWbrK05pC+c3K9nosvzuRUOQQL1tJbd
|
||||
4gA3oiD9U4bMFNr+BRTHyZ7OQBcIXdz3t1qhuHVKtnngIAN1p25uPlbRFUNpshnt
|
||||
jgeVoHlsBhApcs5DUc+pyQKBgDzeHPg/+g4z+nrPznjKnktRY1W+0El93kgi+J0Q
|
||||
YiJacxBKEGTJ1MKBb8X6sDurcRDm22wMpGfd9I5Cv2v4GsUsF7HD/cx5xdih+G73
|
||||
c4clNj/k0Ff5Nm1izPUno4C+0IOl7br39IPmfpSuR6wH/h6iHQDqIeybjxyKvT1G
|
||||
N0rRAoGBAKGD+4ZI/E1MoJ5CXB8cDDMHagbE3cq/DtmYzE2v1DFpQYu5I4PCm5c7
|
||||
EQeIP6dZtv8IMgtGIb91QX9pXvP0aznzQKwYIA8nZgoENCPfiMTPiEDT9e/0lObO
|
||||
9XWsXpbSTsRPj0sv1rB+UzBJ0PgjK4q2zOF0sNo7b1+6nlM3BWPx
|
||||
-----END RSA PRIVATE KEY-----
|
||||
`
|
||||
|
||||
// This thumbprint is for the testKey defined above.
|
||||
testKeyThumbprint = "6nicxzh6WETQlrvdchkz-U3e3DOQZ4heJKU63rfqMqQ"
|
||||
|
||||
// openssl ecparam -name secp256k1 -genkey -noout
|
||||
testKeyECPEM = `
|
||||
-----BEGIN EC PRIVATE KEY-----
|
||||
MHcCAQEEIK07hGLr0RwyUdYJ8wbIiBS55CjnkMD23DWr+ccnypWLoAoGCCqGSM49
|
||||
AwEHoUQDQgAE5lhEug5xK4xBDZ2nAbaxLtaLiv85bxJ7ePd1dkO23HThqIrvawF5
|
||||
QAaS/RNouybCiRhRjI3EaxLkQwgrCw0gqQ==
|
||||
-----END EC PRIVATE KEY-----
|
||||
`
|
||||
// openssl ecparam -name secp384r1 -genkey -noout
|
||||
testKeyEC384PEM = `
|
||||
-----BEGIN EC PRIVATE KEY-----
|
||||
MIGkAgEBBDAQ4lNtXRORWr1bgKR1CGysr9AJ9SyEk4jiVnlUWWUChmSNL+i9SLSD
|
||||
Oe/naPqXJ6CgBwYFK4EEACKhZANiAAQzKtj+Ms0vHoTX5dzv3/L5YMXOWuI5UKRj
|
||||
JigpahYCqXD2BA1j0E/2xt5vlPf+gm0PL+UHSQsCokGnIGuaHCsJAp3ry0gHQEke
|
||||
WYXapUUFdvaK1R2/2hn5O+eiQM8YzCg=
|
||||
-----END EC PRIVATE KEY-----
|
||||
`
|
||||
// openssl ecparam -name secp521r1 -genkey -noout
|
||||
testKeyEC512PEM = `
|
||||
-----BEGIN EC PRIVATE KEY-----
|
||||
MIHcAgEBBEIBSNZKFcWzXzB/aJClAb305ibalKgtDA7+70eEkdPt28/3LZMM935Z
|
||||
KqYHh/COcxuu3Kt8azRAUz3gyr4zZKhlKUSgBwYFK4EEACOhgYkDgYYABAHUNKbx
|
||||
7JwC7H6pa2sV0tERWhHhB3JmW+OP6SUgMWryvIKajlx73eS24dy4QPGrWO9/ABsD
|
||||
FqcRSkNVTXnIv6+0mAF25knqIBIg5Q8M9BnOu9GGAchcwt3O7RDHmqewnJJDrbjd
|
||||
GGnm6rb+NnWR9DIopM0nKNkToWoF/hzopxu4Ae/GsQ==
|
||||
-----END EC PRIVATE KEY-----
|
||||
`
|
||||
// 1. openssl ec -in key.pem -noout -text
|
||||
// 2. remove first byte, 04 (the header); the rest is X and Y
|
||||
// 3. convert each with: echo <val> | xxd -r -p | base64 -w 100 | tr -d '=' | tr '/+' '_-'
|
||||
testKeyECPubX = "5lhEug5xK4xBDZ2nAbaxLtaLiv85bxJ7ePd1dkO23HQ"
|
||||
testKeyECPubY = "4aiK72sBeUAGkv0TaLsmwokYUYyNxGsS5EMIKwsNIKk"
|
||||
testKeyEC384PubX = "MyrY_jLNLx6E1-Xc79_y-WDFzlriOVCkYyYoKWoWAqlw9gQNY9BP9sbeb5T3_oJt"
|
||||
testKeyEC384PubY = "Dy_lB0kLAqJBpyBrmhwrCQKd68tIB0BJHlmF2qVFBXb2itUdv9oZ-TvnokDPGMwo"
|
||||
testKeyEC512PubX = "AdQ0pvHsnALsfqlraxXS0RFaEeEHcmZb44_pJSAxavK8gpqOXHvd5Lbh3LhA8atY738AGwMWpxFKQ1VNeci_r7SY"
|
||||
testKeyEC512PubY = "AXbmSeogEiDlDwz0Gc670YYByFzC3c7tEMeap7CckkOtuN0Yaebqtv42dZH0MiikzSco2ROhagX-HOinG7gB78ax"
|
||||
|
||||
// echo -n '{"crv":"P-256","kty":"EC","x":"<testKeyECPubX>","y":"<testKeyECPubY>"}' | \
|
||||
// openssl dgst -binary -sha256 | base64 | tr -d '=' | tr '/+' '_-'
|
||||
testKeyECThumbprint = "zedj-Bd1Zshp8KLePv2MB-lJ_Hagp7wAwdkA0NUTniU"
|
||||
)
|
||||
|
||||
var (
|
||||
testKey *rsa.PrivateKey
|
||||
testKeyEC *ecdsa.PrivateKey
|
||||
testKeyEC384 *ecdsa.PrivateKey
|
||||
testKeyEC512 *ecdsa.PrivateKey
|
||||
)
|
||||
|
||||
func init() {
|
||||
testKey = parseRSA(testKeyPEM, "testKeyPEM")
|
||||
testKeyEC = parseEC(testKeyECPEM, "testKeyECPEM")
|
||||
testKeyEC384 = parseEC(testKeyEC384PEM, "testKeyEC384PEM")
|
||||
testKeyEC512 = parseEC(testKeyEC512PEM, "testKeyEC512PEM")
|
||||
}
|
||||
|
||||
func decodePEM(s, name string) []byte {
|
||||
d, _ := pem.Decode([]byte(s))
|
||||
if d == nil {
|
||||
panic("no block found in " + name)
|
||||
}
|
||||
return d.Bytes
|
||||
}
|
||||
|
||||
func parseRSA(s, name string) *rsa.PrivateKey {
|
||||
b := decodePEM(s, name)
|
||||
k, err := x509.ParsePKCS1PrivateKey(b)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("%s: %v", name, err))
|
||||
}
|
||||
return k
|
||||
}
|
||||
|
||||
func parseEC(s, name string) *ecdsa.PrivateKey {
|
||||
b := decodePEM(s, name)
|
||||
k, err := x509.ParseECPrivateKey(b)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("%s: %v", name, err))
|
||||
}
|
||||
return k
|
||||
}
|
||||
|
||||
func TestJWSEncodeJSON(t *testing.T) {
|
||||
claims := struct{ Msg string }{"Hello JWS"}
|
||||
// JWS signed with testKey and "nonce" as the nonce value
|
||||
// JSON-serialized JWS fields are split for easier testing
|
||||
const (
|
||||
// {"alg":"RS256","jwk":{"e":"AQAB","kty":"RSA","n":"..."},"nonce":"nonce"}
|
||||
protected = "eyJhbGciOiJSUzI1NiIsImp3ayI6eyJlIjoiQVFBQiIsImt0eSI6" +
|
||||
"IlJTQSIsIm4iOiI0eGdaM2VSUGt3b1J2eTdxZVJVYm1NRGUwVi14" +
|
||||
"SDllV0xkdTBpaGVlTGxybUQybXFXWGZQOUllU0tBcGJuMzRnOFR1" +
|
||||
"QVM5ZzV6aHE4RUxRM2ttanItS1Y4NkdBTWdJNlZBY0dscTNRcnpw" +
|
||||
"VENmXzMwQWI3LXphd3JmUmFGT05hMUh3RXpQWTFLSG5HVmt4SmM4" +
|
||||
"NWdOa3dZSTlTWTJSSFh0dmxuM3pzNXdJVE5yZG9zcUVYZWFJa1ZZ" +
|
||||
"QkVoYmhOdTU0cHAza3hvNlR1V0xpOWU2cFhlV2V0RXdtbEJ3dFda" +
|
||||
"bFBvaWIyajNUeExCa3NLWmZveUZ5ZWszODBtSGdKQXVtUV9JMmZq" +
|
||||
"ajk4Xzk3bWszaWhPWTRBZ1ZkQ0RqMXpfR0NvWmtHNVJxN25iQ0d5" +
|
||||
"b3N5S1d5RFgwMFpzLW5OcVZob0xlSXZYQzRubldkSk1aNnJvZ3h5" +
|
||||
"UVEifSwibm9uY2UiOiJub25jZSJ9"
|
||||
// {"Msg":"Hello JWS"}
|
||||
payload = "eyJNc2ciOiJIZWxsbyBKV1MifQ"
|
||||
signature = "eAGUikStX_UxyiFhxSLMyuyBcIB80GeBkFROCpap2sW3EmkU_ggF" +
|
||||
"knaQzxrTfItICSAXsCLIquZ5BbrSWA_4vdEYrwWtdUj7NqFKjHRa" +
|
||||
"zpLHcoR7r1rEHvkoP1xj49lS5fc3Wjjq8JUhffkhGbWZ8ZVkgPdC" +
|
||||
"4tMBWiQDoth-x8jELP_3LYOB_ScUXi2mETBawLgOT2K8rA0Vbbmx" +
|
||||
"hWNlOWuUf-8hL5YX4IOEwsS8JK_TrTq5Zc9My0zHJmaieqDV0UlP" +
|
||||
"k0onFjPFkGm7MrPSgd0MqRG-4vSAg2O4hDo7rKv4n8POjjXlNQvM" +
|
||||
"9IPLr8qZ7usYBKhEGwX3yq_eicAwBw"
|
||||
)
|
||||
|
||||
b, err := jwsEncodeJSON(claims, testKey, "nonce")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var jws struct{ Protected, Payload, Signature string }
|
||||
if err := json.Unmarshal(b, &jws); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if jws.Protected != protected {
|
||||
t.Errorf("protected:\n%s\nwant:\n%s", jws.Protected, protected)
|
||||
}
|
||||
if jws.Payload != payload {
|
||||
t.Errorf("payload:\n%s\nwant:\n%s", jws.Payload, payload)
|
||||
}
|
||||
if jws.Signature != signature {
|
||||
t.Errorf("signature:\n%s\nwant:\n%s", jws.Signature, signature)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJWSEncodeJSONEC(t *testing.T) {
|
||||
tt := []struct {
|
||||
key *ecdsa.PrivateKey
|
||||
x, y string
|
||||
alg, crv string
|
||||
}{
|
||||
{testKeyEC, testKeyECPubX, testKeyECPubY, "ES256", "P-256"},
|
||||
{testKeyEC384, testKeyEC384PubX, testKeyEC384PubY, "ES384", "P-384"},
|
||||
{testKeyEC512, testKeyEC512PubX, testKeyEC512PubY, "ES512", "P-521"},
|
||||
}
|
||||
for i, test := range tt {
|
||||
claims := struct{ Msg string }{"Hello JWS"}
|
||||
b, err := jwsEncodeJSON(claims, test.key, "nonce")
|
||||
if err != nil {
|
||||
t.Errorf("%d: %v", i, err)
|
||||
continue
|
||||
}
|
||||
var jws struct{ Protected, Payload, Signature string }
|
||||
if err := json.Unmarshal(b, &jws); err != nil {
|
||||
t.Errorf("%d: %v", i, err)
|
||||
continue
|
||||
}
|
||||
|
||||
b, err = base64.RawURLEncoding.DecodeString(jws.Protected)
|
||||
if err != nil {
|
||||
t.Errorf("%d: jws.Protected: %v", i, err)
|
||||
}
|
||||
var head struct {
|
||||
Alg string
|
||||
Nonce string
|
||||
JWK struct {
|
||||
Crv string
|
||||
Kty string
|
||||
X string
|
||||
Y string
|
||||
} `json:"jwk"`
|
||||
}
|
||||
if err := json.Unmarshal(b, &head); err != nil {
|
||||
t.Errorf("%d: jws.Protected: %v", i, err)
|
||||
}
|
||||
if head.Alg != test.alg {
|
||||
t.Errorf("%d: head.Alg = %q; want %q", i, head.Alg, test.alg)
|
||||
}
|
||||
if head.Nonce != "nonce" {
|
||||
t.Errorf("%d: head.Nonce = %q; want nonce", i, head.Nonce)
|
||||
}
|
||||
if head.JWK.Crv != test.crv {
|
||||
t.Errorf("%d: head.JWK.Crv = %q; want %q", i, head.JWK.Crv, test.crv)
|
||||
}
|
||||
if head.JWK.Kty != "EC" {
|
||||
t.Errorf("%d: head.JWK.Kty = %q; want EC", i, head.JWK.Kty)
|
||||
}
|
||||
if head.JWK.X != test.x {
|
||||
t.Errorf("%d: head.JWK.X = %q; want %q", i, head.JWK.X, test.x)
|
||||
}
|
||||
if head.JWK.Y != test.y {
|
||||
t.Errorf("%d: head.JWK.Y = %q; want %q", i, head.JWK.Y, test.y)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestJWKThumbprintRSA(t *testing.T) {
|
||||
// Key example from RFC 7638
|
||||
const base64N = "0vx7agoebGcQSuuPiLJXZptN9nndrQmbXEps2aiAFbWhM78LhWx4cbbfAAt" +
|
||||
"VT86zwu1RK7aPFFxuhDR1L6tSoc_BJECPebWKRXjBZCiFV4n3oknjhMstn6" +
|
||||
"4tZ_2W-5JsGY4Hc5n9yBXArwl93lqt7_RN5w6Cf0h4QyQ5v-65YGjQR0_FD" +
|
||||
"W2QvzqY368QQMicAtaSqzs8KJZgnYb9c7d0zgdAZHzu6qMQvRL5hajrn1n9" +
|
||||
"1CbOpbISD08qNLyrdkt-bFTWhAI4vMQFh6WeZu0fM4lFd2NcRwr3XPksINH" +
|
||||
"aQ-G_xBniIqbw0Ls1jF44-csFCur-kEgU8awapJzKnqDKgw"
|
||||
const base64E = "AQAB"
|
||||
const expected = "NzbLsXh8uDCcd-6MNwXF4W_7noWXFZAfHkxZsRGC9Xs"
|
||||
|
||||
b, err := base64.RawURLEncoding.DecodeString(base64N)
|
||||
if err != nil {
|
||||
t.Fatalf("Error parsing example key N: %v", err)
|
||||
}
|
||||
n := new(big.Int).SetBytes(b)
|
||||
|
||||
b, err = base64.RawURLEncoding.DecodeString(base64E)
|
||||
if err != nil {
|
||||
t.Fatalf("Error parsing example key E: %v", err)
|
||||
}
|
||||
e := new(big.Int).SetBytes(b)
|
||||
|
||||
pub := &rsa.PublicKey{N: n, E: int(e.Uint64())}
|
||||
th, err := JWKThumbprint(pub)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if th != expected {
|
||||
t.Errorf("thumbprint = %q; want %q", th, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJWKThumbprintEC(t *testing.T) {
|
||||
// Key example from RFC 7520
|
||||
// expected was computed with
|
||||
// echo -n '{"crv":"P-521","kty":"EC","x":"<base64X>","y":"<base64Y>"}' | \
|
||||
// openssl dgst -binary -sha256 | \
|
||||
// base64 | \
|
||||
// tr -d '=' | tr '/+' '_-'
|
||||
const (
|
||||
base64X = "AHKZLLOsCOzz5cY97ewNUajB957y-C-U88c3v13nmGZx6sYl_oJXu9A5RkT" +
|
||||
"KqjqvjyekWF-7ytDyRXYgCF5cj0Kt"
|
||||
base64Y = "AdymlHvOiLxXkEhayXQnNCvDX4h9htZaCJN34kfmC6pV5OhQHiraVySsUda" +
|
||||
"QkAgDPrwQrJmbnX9cwlGfP-HqHZR1"
|
||||
expected = "dHri3SADZkrush5HU_50AoRhcKFryN-PI6jPBtPL55M"
|
||||
)
|
||||
|
||||
b, err := base64.RawURLEncoding.DecodeString(base64X)
|
||||
if err != nil {
|
||||
t.Fatalf("Error parsing example key X: %v", err)
|
||||
}
|
||||
x := new(big.Int).SetBytes(b)
|
||||
|
||||
b, err = base64.RawURLEncoding.DecodeString(base64Y)
|
||||
if err != nil {
|
||||
t.Fatalf("Error parsing example key Y: %v", err)
|
||||
}
|
||||
y := new(big.Int).SetBytes(b)
|
||||
|
||||
pub := &ecdsa.PublicKey{Curve: elliptic.P521(), X: x, Y: y}
|
||||
th, err := JWKThumbprint(pub)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if th != expected {
|
||||
t.Errorf("thumbprint = %q; want %q", th, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJWKThumbprintErrUnsupportedKey(t *testing.T) {
|
||||
_, err := JWKThumbprint(struct{}{})
|
||||
if err != ErrUnsupportedKey {
|
||||
t.Errorf("err = %q; want %q", err, ErrUnsupportedKey)
|
||||
}
|
||||
}
|
||||
329
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/acme/types.go
generated
vendored
Normal file
329
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/acme/types.go
generated
vendored
Normal file
@@ -0,0 +1,329 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package acme
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ACME server response statuses used to describe Authorization and Challenge states.
|
||||
const (
|
||||
StatusUnknown = "unknown"
|
||||
StatusPending = "pending"
|
||||
StatusProcessing = "processing"
|
||||
StatusValid = "valid"
|
||||
StatusInvalid = "invalid"
|
||||
StatusRevoked = "revoked"
|
||||
)
|
||||
|
||||
// CRLReasonCode identifies the reason for a certificate revocation.
|
||||
type CRLReasonCode int
|
||||
|
||||
// CRL reason codes as defined in RFC 5280.
|
||||
const (
|
||||
CRLReasonUnspecified CRLReasonCode = 0
|
||||
CRLReasonKeyCompromise CRLReasonCode = 1
|
||||
CRLReasonCACompromise CRLReasonCode = 2
|
||||
CRLReasonAffiliationChanged CRLReasonCode = 3
|
||||
CRLReasonSuperseded CRLReasonCode = 4
|
||||
CRLReasonCessationOfOperation CRLReasonCode = 5
|
||||
CRLReasonCertificateHold CRLReasonCode = 6
|
||||
CRLReasonRemoveFromCRL CRLReasonCode = 8
|
||||
CRLReasonPrivilegeWithdrawn CRLReasonCode = 9
|
||||
CRLReasonAACompromise CRLReasonCode = 10
|
||||
)
|
||||
|
||||
// ErrUnsupportedKey is returned when an unsupported key type is encountered.
|
||||
var ErrUnsupportedKey = errors.New("acme: unknown key type; only RSA and ECDSA are supported")
|
||||
|
||||
// Error is an ACME error, defined in Problem Details for HTTP APIs doc
|
||||
// http://tools.ietf.org/html/draft-ietf-appsawg-http-problem.
|
||||
type Error struct {
|
||||
// StatusCode is The HTTP status code generated by the origin server.
|
||||
StatusCode int
|
||||
// ProblemType is a URI reference that identifies the problem type,
|
||||
// typically in a "urn:acme:error:xxx" form.
|
||||
ProblemType string
|
||||
// Detail is a human-readable explanation specific to this occurrence of the problem.
|
||||
Detail string
|
||||
// Header is the original server error response headers.
|
||||
// It may be nil.
|
||||
Header http.Header
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("%d %s: %s", e.StatusCode, e.ProblemType, e.Detail)
|
||||
}
|
||||
|
||||
// AuthorizationError indicates that an authorization for an identifier
|
||||
// did not succeed.
|
||||
// It contains all errors from Challenge items of the failed Authorization.
|
||||
type AuthorizationError struct {
|
||||
// URI uniquely identifies the failed Authorization.
|
||||
URI string
|
||||
|
||||
// Identifier is an AuthzID.Value of the failed Authorization.
|
||||
Identifier string
|
||||
|
||||
// Errors is a collection of non-nil error values of Challenge items
|
||||
// of the failed Authorization.
|
||||
Errors []error
|
||||
}
|
||||
|
||||
func (a *AuthorizationError) Error() string {
|
||||
e := make([]string, len(a.Errors))
|
||||
for i, err := range a.Errors {
|
||||
e[i] = err.Error()
|
||||
}
|
||||
return fmt.Sprintf("acme: authorization error for %s: %s", a.Identifier, strings.Join(e, "; "))
|
||||
}
|
||||
|
||||
// RateLimit reports whether err represents a rate limit error and
|
||||
// any Retry-After duration returned by the server.
|
||||
//
|
||||
// See the following for more details on rate limiting:
|
||||
// https://tools.ietf.org/html/draft-ietf-acme-acme-05#section-5.6
|
||||
func RateLimit(err error) (time.Duration, bool) {
|
||||
e, ok := err.(*Error)
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
// Some CA implementations may return incorrect values.
|
||||
// Use case-insensitive comparison.
|
||||
if !strings.HasSuffix(strings.ToLower(e.ProblemType), ":ratelimited") {
|
||||
return 0, false
|
||||
}
|
||||
if e.Header == nil {
|
||||
return 0, true
|
||||
}
|
||||
return retryAfter(e.Header.Get("Retry-After"), 0), true
|
||||
}
|
||||
|
||||
// Account is a user account. It is associated with a private key.
|
||||
type Account struct {
|
||||
// URI is the account unique ID, which is also a URL used to retrieve
|
||||
// account data from the CA.
|
||||
URI string
|
||||
|
||||
// Contact is a slice of contact info used during registration.
|
||||
Contact []string
|
||||
|
||||
// The terms user has agreed to.
|
||||
// A value not matching CurrentTerms indicates that the user hasn't agreed
|
||||
// to the actual Terms of Service of the CA.
|
||||
AgreedTerms string
|
||||
|
||||
// Actual terms of a CA.
|
||||
CurrentTerms string
|
||||
|
||||
// Authz is the authorization URL used to initiate a new authz flow.
|
||||
Authz string
|
||||
|
||||
// Authorizations is a URI from which a list of authorizations
|
||||
// granted to this account can be fetched via a GET request.
|
||||
Authorizations string
|
||||
|
||||
// Certificates is a URI from which a list of certificates
|
||||
// issued for this account can be fetched via a GET request.
|
||||
Certificates string
|
||||
}
|
||||
|
||||
// Directory is ACME server discovery data.
|
||||
type Directory struct {
|
||||
// RegURL is an account endpoint URL, allowing for creating new
|
||||
// and modifying existing accounts.
|
||||
RegURL string
|
||||
|
||||
// AuthzURL is used to initiate Identifier Authorization flow.
|
||||
AuthzURL string
|
||||
|
||||
// CertURL is a new certificate issuance endpoint URL.
|
||||
CertURL string
|
||||
|
||||
// RevokeURL is used to initiate a certificate revocation flow.
|
||||
RevokeURL string
|
||||
|
||||
// Term is a URI identifying the current terms of service.
|
||||
Terms string
|
||||
|
||||
// Website is an HTTP or HTTPS URL locating a website
|
||||
// providing more information about the ACME server.
|
||||
Website string
|
||||
|
||||
// CAA consists of lowercase hostname elements, which the ACME server
|
||||
// recognises as referring to itself for the purposes of CAA record validation
|
||||
// as defined in RFC6844.
|
||||
CAA []string
|
||||
}
|
||||
|
||||
// Challenge encodes a returned CA challenge.
|
||||
// Its Error field may be non-nil if the challenge is part of an Authorization
|
||||
// with StatusInvalid.
|
||||
type Challenge struct {
|
||||
// Type is the challenge type, e.g. "http-01", "tls-sni-02", "dns-01".
|
||||
Type string
|
||||
|
||||
// URI is where a challenge response can be posted to.
|
||||
URI string
|
||||
|
||||
// Token is a random value that uniquely identifies the challenge.
|
||||
Token string
|
||||
|
||||
// Status identifies the status of this challenge.
|
||||
Status string
|
||||
|
||||
// Error indicates the reason for an authorization failure
|
||||
// when this challenge was used.
|
||||
// The type of a non-nil value is *Error.
|
||||
Error error
|
||||
}
|
||||
|
||||
// Authorization encodes an authorization response.
|
||||
type Authorization struct {
|
||||
// URI uniquely identifies a authorization.
|
||||
URI string
|
||||
|
||||
// Status identifies the status of an authorization.
|
||||
Status string
|
||||
|
||||
// Identifier is what the account is authorized to represent.
|
||||
Identifier AuthzID
|
||||
|
||||
// Challenges that the client needs to fulfill in order to prove possession
|
||||
// of the identifier (for pending authorizations).
|
||||
// For final authorizations, the challenges that were used.
|
||||
Challenges []*Challenge
|
||||
|
||||
// A collection of sets of challenges, each of which would be sufficient
|
||||
// to prove possession of the identifier.
|
||||
// Clients must complete a set of challenges that covers at least one set.
|
||||
// Challenges are identified by their indices in the challenges array.
|
||||
// If this field is empty, the client needs to complete all challenges.
|
||||
Combinations [][]int
|
||||
}
|
||||
|
||||
// AuthzID is an identifier that an account is authorized to represent.
|
||||
type AuthzID struct {
|
||||
Type string // The type of identifier, e.g. "dns".
|
||||
Value string // The identifier itself, e.g. "example.org".
|
||||
}
|
||||
|
||||
// wireAuthz is ACME JSON representation of Authorization objects.
|
||||
type wireAuthz struct {
|
||||
Status string
|
||||
Challenges []wireChallenge
|
||||
Combinations [][]int
|
||||
Identifier struct {
|
||||
Type string
|
||||
Value string
|
||||
}
|
||||
}
|
||||
|
||||
func (z *wireAuthz) authorization(uri string) *Authorization {
|
||||
a := &Authorization{
|
||||
URI: uri,
|
||||
Status: z.Status,
|
||||
Identifier: AuthzID{Type: z.Identifier.Type, Value: z.Identifier.Value},
|
||||
Combinations: z.Combinations, // shallow copy
|
||||
Challenges: make([]*Challenge, len(z.Challenges)),
|
||||
}
|
||||
for i, v := range z.Challenges {
|
||||
a.Challenges[i] = v.challenge()
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
func (z *wireAuthz) error(uri string) *AuthorizationError {
|
||||
err := &AuthorizationError{
|
||||
URI: uri,
|
||||
Identifier: z.Identifier.Value,
|
||||
}
|
||||
for _, raw := range z.Challenges {
|
||||
if raw.Error != nil {
|
||||
err.Errors = append(err.Errors, raw.Error.error(nil))
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// wireChallenge is ACME JSON challenge representation.
|
||||
type wireChallenge struct {
|
||||
URI string `json:"uri"`
|
||||
Type string
|
||||
Token string
|
||||
Status string
|
||||
Error *wireError
|
||||
}
|
||||
|
||||
func (c *wireChallenge) challenge() *Challenge {
|
||||
v := &Challenge{
|
||||
URI: c.URI,
|
||||
Type: c.Type,
|
||||
Token: c.Token,
|
||||
Status: c.Status,
|
||||
}
|
||||
if v.Status == "" {
|
||||
v.Status = StatusPending
|
||||
}
|
||||
if c.Error != nil {
|
||||
v.Error = c.Error.error(nil)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// wireError is a subset of fields of the Problem Details object
|
||||
// as described in https://tools.ietf.org/html/rfc7807#section-3.1.
|
||||
type wireError struct {
|
||||
Status int
|
||||
Type string
|
||||
Detail string
|
||||
}
|
||||
|
||||
func (e *wireError) error(h http.Header) *Error {
|
||||
return &Error{
|
||||
StatusCode: e.Status,
|
||||
ProblemType: e.Type,
|
||||
Detail: e.Detail,
|
||||
Header: h,
|
||||
}
|
||||
}
|
||||
|
||||
// CertOption is an optional argument type for the TLSSNIxChallengeCert methods for
|
||||
// customizing a temporary certificate for TLS-SNI challenges.
|
||||
type CertOption interface {
|
||||
privateCertOpt()
|
||||
}
|
||||
|
||||
// WithKey creates an option holding a private/public key pair.
|
||||
// The private part signs a certificate, and the public part represents the signee.
|
||||
func WithKey(key crypto.Signer) CertOption {
|
||||
return &certOptKey{key}
|
||||
}
|
||||
|
||||
type certOptKey struct {
|
||||
key crypto.Signer
|
||||
}
|
||||
|
||||
func (*certOptKey) privateCertOpt() {}
|
||||
|
||||
// WithTemplate creates an option for specifying a certificate template.
|
||||
// See x509.CreateCertificate for template usage details.
|
||||
//
|
||||
// In TLSSNIxChallengeCert methods, the template is also used as parent,
|
||||
// resulting in a self-signed certificate.
|
||||
// The DNSNames field of t is always overwritten for tls-sni challenge certs.
|
||||
func WithTemplate(t *x509.Certificate) CertOption {
|
||||
return (*certOptTemplate)(t)
|
||||
}
|
||||
|
||||
type certOptTemplate x509.Certificate
|
||||
|
||||
func (*certOptTemplate) privateCertOpt() {}
|
||||
63
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/acme/types_test.go
generated
vendored
Normal file
63
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/acme/types_test.go
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package acme
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestRateLimit(t *testing.T) {
|
||||
now := time.Date(2017, 04, 27, 10, 0, 0, 0, time.UTC)
|
||||
f := timeNow
|
||||
defer func() { timeNow = f }()
|
||||
timeNow = func() time.Time { return now }
|
||||
|
||||
h120, hTime := http.Header{}, http.Header{}
|
||||
h120.Set("Retry-After", "120")
|
||||
hTime.Set("Retry-After", "Tue Apr 27 11:00:00 2017")
|
||||
|
||||
err1 := &Error{
|
||||
ProblemType: "urn:ietf:params:acme:error:nolimit",
|
||||
Header: h120,
|
||||
}
|
||||
err2 := &Error{
|
||||
ProblemType: "urn:ietf:params:acme:error:rateLimited",
|
||||
Header: h120,
|
||||
}
|
||||
err3 := &Error{
|
||||
ProblemType: "urn:ietf:params:acme:error:rateLimited",
|
||||
Header: nil,
|
||||
}
|
||||
err4 := &Error{
|
||||
ProblemType: "urn:ietf:params:acme:error:rateLimited",
|
||||
Header: hTime,
|
||||
}
|
||||
|
||||
tt := []struct {
|
||||
err error
|
||||
res time.Duration
|
||||
ok bool
|
||||
}{
|
||||
{nil, 0, false},
|
||||
{errors.New("dummy"), 0, false},
|
||||
{err1, 0, false},
|
||||
{err2, 2 * time.Minute, true},
|
||||
{err3, 0, true},
|
||||
{err4, time.Hour, true},
|
||||
}
|
||||
for i, test := range tt {
|
||||
res, ok := RateLimit(test.err)
|
||||
if ok != test.ok {
|
||||
t.Errorf("%d: RateLimit(%+v): ok = %v; want %v", i, test.err, ok, test.ok)
|
||||
continue
|
||||
}
|
||||
if res != test.res {
|
||||
t.Errorf("%d: RateLimit(%+v) = %v; want %v", i, test.err, res, test.res)
|
||||
}
|
||||
}
|
||||
}
|
||||
285
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/argon2/argon2.go
generated
vendored
Normal file
285
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/argon2/argon2.go
generated
vendored
Normal file
@@ -0,0 +1,285 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package argon2 implements the key derivation function Argon2.
|
||||
// Argon2 was selected as the winner of the Password Hashing Competition and can
|
||||
// be used to derive cryptographic keys from passwords.
|
||||
//
|
||||
// For a detailed specification of Argon2 see [1].
|
||||
//
|
||||
// If you aren't sure which function you need, use Argon2id (IDKey) and
|
||||
// the parameter recommendations for your scenario.
|
||||
//
|
||||
//
|
||||
// Argon2i
|
||||
//
|
||||
// Argon2i (implemented by Key) is the side-channel resistant version of Argon2.
|
||||
// It uses data-independent memory access, which is preferred for password
|
||||
// hashing and password-based key derivation. Argon2i requires more passes over
|
||||
// memory than Argon2id to protect from trade-off attacks. The recommended
|
||||
// parameters (taken from [2]) for non-interactive operations are time=3 and to
|
||||
// use the maximum available memory.
|
||||
//
|
||||
//
|
||||
// Argon2id
|
||||
//
|
||||
// Argon2id (implemented by IDKey) is a hybrid version of Argon2 combining
|
||||
// Argon2i and Argon2d. It uses data-independent memory access for the first
|
||||
// half of the first iteration over the memory and data-dependent memory access
|
||||
// for the rest. Argon2id is side-channel resistant and provides better brute-
|
||||
// force cost savings due to time-memory tradeoffs than Argon2i. The recommended
|
||||
// parameters for non-interactive operations (taken from [2]) are time=1 and to
|
||||
// use the maximum available memory.
|
||||
//
|
||||
// [1] https://github.com/P-H-C/phc-winner-argon2/blob/master/argon2-specs.pdf
|
||||
// [2] https://tools.ietf.org/html/draft-irtf-cfrg-argon2-03#section-9.3
|
||||
package argon2
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/crypto/blake2b"
|
||||
)
|
||||
|
||||
// The Argon2 version implemented by this package.
|
||||
const Version = 0x13
|
||||
|
||||
const (
|
||||
argon2d = iota
|
||||
argon2i
|
||||
argon2id
|
||||
)
|
||||
|
||||
// Key derives a key from the password, salt, and cost parameters using Argon2i
|
||||
// returning a byte slice of length keyLen that can be used as cryptographic
|
||||
// key. The CPU cost and parallelism degree must be greater than zero.
|
||||
//
|
||||
// For example, you can get a derived key for e.g. AES-256 (which needs a
|
||||
// 32-byte key) by doing:
|
||||
//
|
||||
// key := argon2.Key([]byte("some password"), salt, 3, 32*1024, 4, 32)
|
||||
//
|
||||
// The draft RFC recommends[2] time=3, and memory=32*1024 is a sensible number.
|
||||
// If using that amount of memory (32 MB) is not possible in some contexts then
|
||||
// the time parameter can be increased to compensate.
|
||||
//
|
||||
// The time parameter specifies the number of passes over the memory and the
|
||||
// memory parameter specifies the size of the memory in KiB. For example
|
||||
// memory=32*1024 sets the memory cost to ~32 MB. The number of threads can be
|
||||
// adjusted to the number of available CPUs. The cost parameters should be
|
||||
// increased as memory latency and CPU parallelism increases. Remember to get a
|
||||
// good random salt.
|
||||
func Key(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte {
|
||||
return deriveKey(argon2i, password, salt, nil, nil, time, memory, threads, keyLen)
|
||||
}
|
||||
|
||||
// IDKey derives a key from the password, salt, and cost parameters using
|
||||
// Argon2id returning a byte slice of length keyLen that can be used as
|
||||
// cryptographic key. The CPU cost and parallelism degree must be greater than
|
||||
// zero.
|
||||
//
|
||||
// For example, you can get a derived key for e.g. AES-256 (which needs a
|
||||
// 32-byte key) by doing:
|
||||
//
|
||||
// key := argon2.IDKey([]byte("some password"), salt, 1, 64*1024, 4, 32)
|
||||
//
|
||||
// The draft RFC recommends[2] time=1, and memory=64*1024 is a sensible number.
|
||||
// If using that amount of memory (64 MB) is not possible in some contexts then
|
||||
// the time parameter can be increased to compensate.
|
||||
//
|
||||
// The time parameter specifies the number of passes over the memory and the
|
||||
// memory parameter specifies the size of the memory in KiB. For example
|
||||
// memory=64*1024 sets the memory cost to ~64 MB. The number of threads can be
|
||||
// adjusted to the numbers of available CPUs. The cost parameters should be
|
||||
// increased as memory latency and CPU parallelism increases. Remember to get a
|
||||
// good random salt.
|
||||
func IDKey(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte {
|
||||
return deriveKey(argon2id, password, salt, nil, nil, time, memory, threads, keyLen)
|
||||
}
|
||||
|
||||
func deriveKey(mode int, password, salt, secret, data []byte, time, memory uint32, threads uint8, keyLen uint32) []byte {
|
||||
if time < 1 {
|
||||
panic("argon2: number of rounds too small")
|
||||
}
|
||||
if threads < 1 {
|
||||
panic("argon2: parallelism degree too low")
|
||||
}
|
||||
h0 := initHash(password, salt, secret, data, time, memory, uint32(threads), keyLen, mode)
|
||||
|
||||
memory = memory / (syncPoints * uint32(threads)) * (syncPoints * uint32(threads))
|
||||
if memory < 2*syncPoints*uint32(threads) {
|
||||
memory = 2 * syncPoints * uint32(threads)
|
||||
}
|
||||
B := initBlocks(&h0, memory, uint32(threads))
|
||||
processBlocks(B, time, memory, uint32(threads), mode)
|
||||
return extractKey(B, memory, uint32(threads), keyLen)
|
||||
}
|
||||
|
||||
const (
|
||||
blockLength = 128
|
||||
syncPoints = 4
|
||||
)
|
||||
|
||||
type block [blockLength]uint64
|
||||
|
||||
func initHash(password, salt, key, data []byte, time, memory, threads, keyLen uint32, mode int) [blake2b.Size + 8]byte {
|
||||
var (
|
||||
h0 [blake2b.Size + 8]byte
|
||||
params [24]byte
|
||||
tmp [4]byte
|
||||
)
|
||||
|
||||
b2, _ := blake2b.New512(nil)
|
||||
binary.LittleEndian.PutUint32(params[0:4], threads)
|
||||
binary.LittleEndian.PutUint32(params[4:8], keyLen)
|
||||
binary.LittleEndian.PutUint32(params[8:12], memory)
|
||||
binary.LittleEndian.PutUint32(params[12:16], time)
|
||||
binary.LittleEndian.PutUint32(params[16:20], uint32(Version))
|
||||
binary.LittleEndian.PutUint32(params[20:24], uint32(mode))
|
||||
b2.Write(params[:])
|
||||
binary.LittleEndian.PutUint32(tmp[:], uint32(len(password)))
|
||||
b2.Write(tmp[:])
|
||||
b2.Write(password)
|
||||
binary.LittleEndian.PutUint32(tmp[:], uint32(len(salt)))
|
||||
b2.Write(tmp[:])
|
||||
b2.Write(salt)
|
||||
binary.LittleEndian.PutUint32(tmp[:], uint32(len(key)))
|
||||
b2.Write(tmp[:])
|
||||
b2.Write(key)
|
||||
binary.LittleEndian.PutUint32(tmp[:], uint32(len(data)))
|
||||
b2.Write(tmp[:])
|
||||
b2.Write(data)
|
||||
b2.Sum(h0[:0])
|
||||
return h0
|
||||
}
|
||||
|
||||
func initBlocks(h0 *[blake2b.Size + 8]byte, memory, threads uint32) []block {
|
||||
var block0 [1024]byte
|
||||
B := make([]block, memory)
|
||||
for lane := uint32(0); lane < threads; lane++ {
|
||||
j := lane * (memory / threads)
|
||||
binary.LittleEndian.PutUint32(h0[blake2b.Size+4:], lane)
|
||||
|
||||
binary.LittleEndian.PutUint32(h0[blake2b.Size:], 0)
|
||||
blake2bHash(block0[:], h0[:])
|
||||
for i := range B[j+0] {
|
||||
B[j+0][i] = binary.LittleEndian.Uint64(block0[i*8:])
|
||||
}
|
||||
|
||||
binary.LittleEndian.PutUint32(h0[blake2b.Size:], 1)
|
||||
blake2bHash(block0[:], h0[:])
|
||||
for i := range B[j+1] {
|
||||
B[j+1][i] = binary.LittleEndian.Uint64(block0[i*8:])
|
||||
}
|
||||
}
|
||||
return B
|
||||
}
|
||||
|
||||
func processBlocks(B []block, time, memory, threads uint32, mode int) {
|
||||
lanes := memory / threads
|
||||
segments := lanes / syncPoints
|
||||
|
||||
processSegment := func(n, slice, lane uint32, wg *sync.WaitGroup) {
|
||||
var addresses, in, zero block
|
||||
if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) {
|
||||
in[0] = uint64(n)
|
||||
in[1] = uint64(lane)
|
||||
in[2] = uint64(slice)
|
||||
in[3] = uint64(memory)
|
||||
in[4] = uint64(time)
|
||||
in[5] = uint64(mode)
|
||||
}
|
||||
|
||||
index := uint32(0)
|
||||
if n == 0 && slice == 0 {
|
||||
index = 2 // we have already generated the first two blocks
|
||||
if mode == argon2i || mode == argon2id {
|
||||
in[6]++
|
||||
processBlock(&addresses, &in, &zero)
|
||||
processBlock(&addresses, &addresses, &zero)
|
||||
}
|
||||
}
|
||||
|
||||
offset := lane*lanes + slice*segments + index
|
||||
var random uint64
|
||||
for index < segments {
|
||||
prev := offset - 1
|
||||
if index == 0 && slice == 0 {
|
||||
prev += lanes // last block in lane
|
||||
}
|
||||
if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) {
|
||||
if index%blockLength == 0 {
|
||||
in[6]++
|
||||
processBlock(&addresses, &in, &zero)
|
||||
processBlock(&addresses, &addresses, &zero)
|
||||
}
|
||||
random = addresses[index%blockLength]
|
||||
} else {
|
||||
random = B[prev][0]
|
||||
}
|
||||
newOffset := indexAlpha(random, lanes, segments, threads, n, slice, lane, index)
|
||||
processBlockXOR(&B[offset], &B[prev], &B[newOffset])
|
||||
index, offset = index+1, offset+1
|
||||
}
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
for n := uint32(0); n < time; n++ {
|
||||
for slice := uint32(0); slice < syncPoints; slice++ {
|
||||
var wg sync.WaitGroup
|
||||
for lane := uint32(0); lane < threads; lane++ {
|
||||
wg.Add(1)
|
||||
go processSegment(n, slice, lane, &wg)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func extractKey(B []block, memory, threads, keyLen uint32) []byte {
|
||||
lanes := memory / threads
|
||||
for lane := uint32(0); lane < threads-1; lane++ {
|
||||
for i, v := range B[(lane*lanes)+lanes-1] {
|
||||
B[memory-1][i] ^= v
|
||||
}
|
||||
}
|
||||
|
||||
var block [1024]byte
|
||||
for i, v := range B[memory-1] {
|
||||
binary.LittleEndian.PutUint64(block[i*8:], v)
|
||||
}
|
||||
key := make([]byte, keyLen)
|
||||
blake2bHash(key, block[:])
|
||||
return key
|
||||
}
|
||||
|
||||
func indexAlpha(rand uint64, lanes, segments, threads, n, slice, lane, index uint32) uint32 {
|
||||
refLane := uint32(rand>>32) % threads
|
||||
if n == 0 && slice == 0 {
|
||||
refLane = lane
|
||||
}
|
||||
m, s := 3*segments, ((slice+1)%syncPoints)*segments
|
||||
if lane == refLane {
|
||||
m += index
|
||||
}
|
||||
if n == 0 {
|
||||
m, s = slice*segments, 0
|
||||
if slice == 0 || lane == refLane {
|
||||
m += index
|
||||
}
|
||||
}
|
||||
if index == 0 || lane == refLane {
|
||||
m--
|
||||
}
|
||||
return phi(rand, uint64(m), uint64(s), refLane, lanes)
|
||||
}
|
||||
|
||||
func phi(rand, m, s uint64, lane, lanes uint32) uint32 {
|
||||
p := rand & 0xFFFFFFFF
|
||||
p = (p * p) >> 32
|
||||
p = (p * m) >> 32
|
||||
return lane*lanes + uint32((s+m-(p+1))%uint64(lanes))
|
||||
}
|
||||
233
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/argon2/argon2_test.go
generated
vendored
Normal file
233
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/argon2/argon2_test.go
generated
vendored
Normal file
@@ -0,0 +1,233 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package argon2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
genKatPassword = []byte{
|
||||
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
|
||||
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
|
||||
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
|
||||
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
|
||||
}
|
||||
genKatSalt = []byte{0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02}
|
||||
genKatSecret = []byte{0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03}
|
||||
genKatAAD = []byte{0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04}
|
||||
)
|
||||
|
||||
func TestArgon2(t *testing.T) {
|
||||
defer func(sse4 bool) { useSSE4 = sse4 }(useSSE4)
|
||||
|
||||
if useSSE4 {
|
||||
t.Log("SSE4.1 version")
|
||||
testArgon2i(t)
|
||||
testArgon2d(t)
|
||||
testArgon2id(t)
|
||||
useSSE4 = false
|
||||
}
|
||||
t.Log("generic version")
|
||||
testArgon2i(t)
|
||||
testArgon2d(t)
|
||||
testArgon2id(t)
|
||||
}
|
||||
|
||||
func testArgon2d(t *testing.T) {
|
||||
want := []byte{
|
||||
0x51, 0x2b, 0x39, 0x1b, 0x6f, 0x11, 0x62, 0x97,
|
||||
0x53, 0x71, 0xd3, 0x09, 0x19, 0x73, 0x42, 0x94,
|
||||
0xf8, 0x68, 0xe3, 0xbe, 0x39, 0x84, 0xf3, 0xc1,
|
||||
0xa1, 0x3a, 0x4d, 0xb9, 0xfa, 0xbe, 0x4a, 0xcb,
|
||||
}
|
||||
hash := deriveKey(argon2d, genKatPassword, genKatSalt, genKatSecret, genKatAAD, 3, 32, 4, 32)
|
||||
if !bytes.Equal(hash, want) {
|
||||
t.Errorf("derived key does not match - got: %s , want: %s", hex.EncodeToString(hash), hex.EncodeToString(want))
|
||||
}
|
||||
}
|
||||
|
||||
func testArgon2i(t *testing.T) {
|
||||
want := []byte{
|
||||
0xc8, 0x14, 0xd9, 0xd1, 0xdc, 0x7f, 0x37, 0xaa,
|
||||
0x13, 0xf0, 0xd7, 0x7f, 0x24, 0x94, 0xbd, 0xa1,
|
||||
0xc8, 0xde, 0x6b, 0x01, 0x6d, 0xd3, 0x88, 0xd2,
|
||||
0x99, 0x52, 0xa4, 0xc4, 0x67, 0x2b, 0x6c, 0xe8,
|
||||
}
|
||||
hash := deriveKey(argon2i, genKatPassword, genKatSalt, genKatSecret, genKatAAD, 3, 32, 4, 32)
|
||||
if !bytes.Equal(hash, want) {
|
||||
t.Errorf("derived key does not match - got: %s , want: %s", hex.EncodeToString(hash), hex.EncodeToString(want))
|
||||
}
|
||||
}
|
||||
|
||||
func testArgon2id(t *testing.T) {
|
||||
want := []byte{
|
||||
0x0d, 0x64, 0x0d, 0xf5, 0x8d, 0x78, 0x76, 0x6c,
|
||||
0x08, 0xc0, 0x37, 0xa3, 0x4a, 0x8b, 0x53, 0xc9,
|
||||
0xd0, 0x1e, 0xf0, 0x45, 0x2d, 0x75, 0xb6, 0x5e,
|
||||
0xb5, 0x25, 0x20, 0xe9, 0x6b, 0x01, 0xe6, 0x59,
|
||||
}
|
||||
hash := deriveKey(argon2id, genKatPassword, genKatSalt, genKatSecret, genKatAAD, 3, 32, 4, 32)
|
||||
if !bytes.Equal(hash, want) {
|
||||
t.Errorf("derived key does not match - got: %s , want: %s", hex.EncodeToString(hash), hex.EncodeToString(want))
|
||||
}
|
||||
}
|
||||
|
||||
func TestVectors(t *testing.T) {
|
||||
password, salt := []byte("password"), []byte("somesalt")
|
||||
for i, v := range testVectors {
|
||||
want, err := hex.DecodeString(v.hash)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d: failed to decode hash: %v", i, err)
|
||||
}
|
||||
hash := deriveKey(v.mode, password, salt, nil, nil, v.time, v.memory, v.threads, uint32(len(want)))
|
||||
if !bytes.Equal(hash, want) {
|
||||
t.Errorf("Test %d - got: %s want: %s", i, hex.EncodeToString(hash), hex.EncodeToString(want))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkArgon2(mode int, time, memory uint32, threads uint8, keyLen uint32, b *testing.B) {
|
||||
password := []byte("password")
|
||||
salt := []byte("choosing random salts is hard")
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
deriveKey(mode, password, salt, nil, nil, time, memory, threads, keyLen)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkArgon2i(b *testing.B) {
|
||||
b.Run(" Time: 3 Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2i, 3, 32*1024, 1, 32, b) })
|
||||
b.Run(" Time: 4 Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2i, 4, 32*1024, 1, 32, b) })
|
||||
b.Run(" Time: 5 Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2i, 5, 32*1024, 1, 32, b) })
|
||||
b.Run(" Time: 3 Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2i, 3, 64*1024, 4, 32, b) })
|
||||
b.Run(" Time: 4 Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2i, 4, 64*1024, 4, 32, b) })
|
||||
b.Run(" Time: 5 Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2i, 5, 64*1024, 4, 32, b) })
|
||||
}
|
||||
|
||||
func BenchmarkArgon2d(b *testing.B) {
|
||||
b.Run(" Time: 3, Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2d, 3, 32*1024, 1, 32, b) })
|
||||
b.Run(" Time: 4, Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2d, 4, 32*1024, 1, 32, b) })
|
||||
b.Run(" Time: 5, Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2d, 5, 32*1024, 1, 32, b) })
|
||||
b.Run(" Time: 3, Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2d, 3, 64*1024, 4, 32, b) })
|
||||
b.Run(" Time: 4, Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2d, 4, 64*1024, 4, 32, b) })
|
||||
b.Run(" Time: 5, Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2d, 5, 64*1024, 4, 32, b) })
|
||||
}
|
||||
|
||||
func BenchmarkArgon2id(b *testing.B) {
|
||||
b.Run(" Time: 3, Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2id, 3, 32*1024, 1, 32, b) })
|
||||
b.Run(" Time: 4, Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2id, 4, 32*1024, 1, 32, b) })
|
||||
b.Run(" Time: 5, Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2id, 5, 32*1024, 1, 32, b) })
|
||||
b.Run(" Time: 3, Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2id, 3, 64*1024, 4, 32, b) })
|
||||
b.Run(" Time: 4, Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2id, 4, 64*1024, 4, 32, b) })
|
||||
b.Run(" Time: 5, Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2id, 5, 64*1024, 4, 32, b) })
|
||||
}
|
||||
|
||||
// Generated with the CLI of https://github.com/P-H-C/phc-winner-argon2/blob/master/argon2-specs.pdf
|
||||
var testVectors = []struct {
|
||||
mode int
|
||||
time, memory uint32
|
||||
threads uint8
|
||||
hash string
|
||||
}{
|
||||
{
|
||||
mode: argon2i, time: 1, memory: 64, threads: 1,
|
||||
hash: "b9c401d1844a67d50eae3967dc28870b22e508092e861a37",
|
||||
},
|
||||
{
|
||||
mode: argon2d, time: 1, memory: 64, threads: 1,
|
||||
hash: "8727405fd07c32c78d64f547f24150d3f2e703a89f981a19",
|
||||
},
|
||||
{
|
||||
mode: argon2id, time: 1, memory: 64, threads: 1,
|
||||
hash: "655ad15eac652dc59f7170a7332bf49b8469be1fdb9c28bb",
|
||||
},
|
||||
{
|
||||
mode: argon2i, time: 2, memory: 64, threads: 1,
|
||||
hash: "8cf3d8f76a6617afe35fac48eb0b7433a9a670ca4a07ed64",
|
||||
},
|
||||
{
|
||||
mode: argon2d, time: 2, memory: 64, threads: 1,
|
||||
hash: "3be9ec79a69b75d3752acb59a1fbb8b295a46529c48fbb75",
|
||||
},
|
||||
{
|
||||
mode: argon2id, time: 2, memory: 64, threads: 1,
|
||||
hash: "068d62b26455936aa6ebe60060b0a65870dbfa3ddf8d41f7",
|
||||
},
|
||||
{
|
||||
mode: argon2i, time: 2, memory: 64, threads: 2,
|
||||
hash: "2089f3e78a799720f80af806553128f29b132cafe40d059f",
|
||||
},
|
||||
{
|
||||
mode: argon2d, time: 2, memory: 64, threads: 2,
|
||||
hash: "68e2462c98b8bc6bb60ec68db418ae2c9ed24fc6748a40e9",
|
||||
},
|
||||
{
|
||||
mode: argon2id, time: 2, memory: 64, threads: 2,
|
||||
hash: "350ac37222f436ccb5c0972f1ebd3bf6b958bf2071841362",
|
||||
},
|
||||
{
|
||||
mode: argon2i, time: 3, memory: 256, threads: 2,
|
||||
hash: "f5bbf5d4c3836af13193053155b73ec7476a6a2eb93fd5e6",
|
||||
},
|
||||
{
|
||||
mode: argon2d, time: 3, memory: 256, threads: 2,
|
||||
hash: "f4f0669218eaf3641f39cc97efb915721102f4b128211ef2",
|
||||
},
|
||||
{
|
||||
mode: argon2id, time: 3, memory: 256, threads: 2,
|
||||
hash: "4668d30ac4187e6878eedeacf0fd83c5a0a30db2cc16ef0b",
|
||||
},
|
||||
{
|
||||
mode: argon2i, time: 4, memory: 4096, threads: 4,
|
||||
hash: "a11f7b7f3f93f02ad4bddb59ab62d121e278369288a0d0e7",
|
||||
},
|
||||
{
|
||||
mode: argon2d, time: 4, memory: 4096, threads: 4,
|
||||
hash: "935598181aa8dc2b720914aa6435ac8d3e3a4210c5b0fb2d",
|
||||
},
|
||||
{
|
||||
mode: argon2id, time: 4, memory: 4096, threads: 4,
|
||||
hash: "145db9733a9f4ee43edf33c509be96b934d505a4efb33c5a",
|
||||
},
|
||||
{
|
||||
mode: argon2i, time: 4, memory: 1024, threads: 8,
|
||||
hash: "0cdd3956aa35e6b475a7b0c63488822f774f15b43f6e6e17",
|
||||
},
|
||||
{
|
||||
mode: argon2d, time: 4, memory: 1024, threads: 8,
|
||||
hash: "83604fc2ad0589b9d055578f4d3cc55bc616df3578a896e9",
|
||||
},
|
||||
{
|
||||
mode: argon2id, time: 4, memory: 1024, threads: 8,
|
||||
hash: "8dafa8e004f8ea96bf7c0f93eecf67a6047476143d15577f",
|
||||
},
|
||||
{
|
||||
mode: argon2i, time: 2, memory: 64, threads: 3,
|
||||
hash: "5cab452fe6b8479c8661def8cd703b611a3905a6d5477fe6",
|
||||
},
|
||||
{
|
||||
mode: argon2d, time: 2, memory: 64, threads: 3,
|
||||
hash: "22474a423bda2ccd36ec9afd5119e5c8949798cadf659f51",
|
||||
},
|
||||
{
|
||||
mode: argon2id, time: 2, memory: 64, threads: 3,
|
||||
hash: "4a15b31aec7c2590b87d1f520be7d96f56658172deaa3079",
|
||||
},
|
||||
{
|
||||
mode: argon2i, time: 3, memory: 1024, threads: 6,
|
||||
hash: "d236b29c2b2a09babee842b0dec6aa1e83ccbdea8023dced",
|
||||
},
|
||||
{
|
||||
mode: argon2d, time: 3, memory: 1024, threads: 6,
|
||||
hash: "a3351b0319a53229152023d9206902f4ef59661cdca89481",
|
||||
},
|
||||
{
|
||||
mode: argon2id, time: 3, memory: 1024, threads: 6,
|
||||
hash: "1640b932f4b60e272f5d2207b9a9c626ffa1bd88d2349016",
|
||||
},
|
||||
}
|
||||
53
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/argon2/blake2b.go
generated
vendored
Normal file
53
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/argon2/blake2b.go
generated
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package argon2
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"hash"
|
||||
|
||||
"golang.org/x/crypto/blake2b"
|
||||
)
|
||||
|
||||
// blake2bHash computes an arbitrary long hash value of in
|
||||
// and writes the hash to out.
|
||||
func blake2bHash(out []byte, in []byte) {
|
||||
var b2 hash.Hash
|
||||
if n := len(out); n < blake2b.Size {
|
||||
b2, _ = blake2b.New(n, nil)
|
||||
} else {
|
||||
b2, _ = blake2b.New512(nil)
|
||||
}
|
||||
|
||||
var buffer [blake2b.Size]byte
|
||||
binary.LittleEndian.PutUint32(buffer[:4], uint32(len(out)))
|
||||
b2.Write(buffer[:4])
|
||||
b2.Write(in)
|
||||
|
||||
if len(out) <= blake2b.Size {
|
||||
b2.Sum(out[:0])
|
||||
return
|
||||
}
|
||||
|
||||
outLen := len(out)
|
||||
b2.Sum(buffer[:0])
|
||||
b2.Reset()
|
||||
copy(out, buffer[:32])
|
||||
out = out[32:]
|
||||
for len(out) > blake2b.Size {
|
||||
b2.Write(buffer[:])
|
||||
b2.Sum(buffer[:0])
|
||||
copy(out, buffer[:32])
|
||||
out = out[32:]
|
||||
b2.Reset()
|
||||
}
|
||||
|
||||
if outLen%blake2b.Size > 0 { // outLen > 64
|
||||
r := ((outLen + 31) / 32) - 2 // ⌈τ /32⌉-2
|
||||
b2, _ = blake2b.New(outLen-32*r, nil)
|
||||
}
|
||||
b2.Write(buffer[:])
|
||||
b2.Sum(out[:0])
|
||||
}
|
||||
61
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/argon2/blamka_amd64.go
generated
vendored
Normal file
61
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/argon2/blamka_amd64.go
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
package argon2
|
||||
|
||||
func init() {
|
||||
useSSE4 = supportsSSE4()
|
||||
}
|
||||
|
||||
//go:noescape
|
||||
func supportsSSE4() bool
|
||||
|
||||
//go:noescape
|
||||
func mixBlocksSSE2(out, a, b, c *block)
|
||||
|
||||
//go:noescape
|
||||
func xorBlocksSSE2(out, a, b, c *block)
|
||||
|
||||
//go:noescape
|
||||
func blamkaSSE4(b *block)
|
||||
|
||||
func processBlockSSE(out, in1, in2 *block, xor bool) {
|
||||
var t block
|
||||
mixBlocksSSE2(&t, in1, in2, &t)
|
||||
if useSSE4 {
|
||||
blamkaSSE4(&t)
|
||||
} else {
|
||||
for i := 0; i < blockLength; i += 16 {
|
||||
blamkaGeneric(
|
||||
&t[i+0], &t[i+1], &t[i+2], &t[i+3],
|
||||
&t[i+4], &t[i+5], &t[i+6], &t[i+7],
|
||||
&t[i+8], &t[i+9], &t[i+10], &t[i+11],
|
||||
&t[i+12], &t[i+13], &t[i+14], &t[i+15],
|
||||
)
|
||||
}
|
||||
for i := 0; i < blockLength/8; i += 2 {
|
||||
blamkaGeneric(
|
||||
&t[i], &t[i+1], &t[16+i], &t[16+i+1],
|
||||
&t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1],
|
||||
&t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1],
|
||||
&t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1],
|
||||
)
|
||||
}
|
||||
}
|
||||
if xor {
|
||||
xorBlocksSSE2(out, in1, in2, &t)
|
||||
} else {
|
||||
mixBlocksSSE2(out, in1, in2, &t)
|
||||
}
|
||||
}
|
||||
|
||||
func processBlock(out, in1, in2 *block) {
|
||||
processBlockSSE(out, in1, in2, false)
|
||||
}
|
||||
|
||||
func processBlockXOR(out, in1, in2 *block) {
|
||||
processBlockSSE(out, in1, in2, true)
|
||||
}
|
||||
252
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/argon2/blamka_amd64.s
generated
vendored
Normal file
252
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/argon2/blamka_amd64.s
generated
vendored
Normal file
@@ -0,0 +1,252 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
DATA ·c40<>+0x00(SB)/8, $0x0201000706050403
|
||||
DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
|
||||
GLOBL ·c40<>(SB), (NOPTR+RODATA), $16
|
||||
|
||||
DATA ·c48<>+0x00(SB)/8, $0x0100070605040302
|
||||
DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
|
||||
GLOBL ·c48<>(SB), (NOPTR+RODATA), $16
|
||||
|
||||
#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \
|
||||
MOVO v4, t1; \
|
||||
MOVO v5, v4; \
|
||||
MOVO t1, v5; \
|
||||
MOVO v6, t1; \
|
||||
PUNPCKLQDQ v6, t2; \
|
||||
PUNPCKHQDQ v7, v6; \
|
||||
PUNPCKHQDQ t2, v6; \
|
||||
PUNPCKLQDQ v7, t2; \
|
||||
MOVO t1, v7; \
|
||||
MOVO v2, t1; \
|
||||
PUNPCKHQDQ t2, v7; \
|
||||
PUNPCKLQDQ v3, t2; \
|
||||
PUNPCKHQDQ t2, v2; \
|
||||
PUNPCKLQDQ t1, t2; \
|
||||
PUNPCKHQDQ t2, v3
|
||||
|
||||
#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \
|
||||
MOVO v4, t1; \
|
||||
MOVO v5, v4; \
|
||||
MOVO t1, v5; \
|
||||
MOVO v2, t1; \
|
||||
PUNPCKLQDQ v2, t2; \
|
||||
PUNPCKHQDQ v3, v2; \
|
||||
PUNPCKHQDQ t2, v2; \
|
||||
PUNPCKLQDQ v3, t2; \
|
||||
MOVO t1, v3; \
|
||||
MOVO v6, t1; \
|
||||
PUNPCKHQDQ t2, v3; \
|
||||
PUNPCKLQDQ v7, t2; \
|
||||
PUNPCKHQDQ t2, v6; \
|
||||
PUNPCKLQDQ t1, t2; \
|
||||
PUNPCKHQDQ t2, v7
|
||||
|
||||
#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, t0, c40, c48) \
|
||||
MOVO v0, t0; \
|
||||
PMULULQ v2, t0; \
|
||||
PADDQ v2, v0; \
|
||||
PADDQ t0, v0; \
|
||||
PADDQ t0, v0; \
|
||||
PXOR v0, v6; \
|
||||
PSHUFD $0xB1, v6, v6; \
|
||||
MOVO v4, t0; \
|
||||
PMULULQ v6, t0; \
|
||||
PADDQ v6, v4; \
|
||||
PADDQ t0, v4; \
|
||||
PADDQ t0, v4; \
|
||||
PXOR v4, v2; \
|
||||
PSHUFB c40, v2; \
|
||||
MOVO v0, t0; \
|
||||
PMULULQ v2, t0; \
|
||||
PADDQ v2, v0; \
|
||||
PADDQ t0, v0; \
|
||||
PADDQ t0, v0; \
|
||||
PXOR v0, v6; \
|
||||
PSHUFB c48, v6; \
|
||||
MOVO v4, t0; \
|
||||
PMULULQ v6, t0; \
|
||||
PADDQ v6, v4; \
|
||||
PADDQ t0, v4; \
|
||||
PADDQ t0, v4; \
|
||||
PXOR v4, v2; \
|
||||
MOVO v2, t0; \
|
||||
PADDQ v2, t0; \
|
||||
PSRLQ $63, v2; \
|
||||
PXOR t0, v2; \
|
||||
MOVO v1, t0; \
|
||||
PMULULQ v3, t0; \
|
||||
PADDQ v3, v1; \
|
||||
PADDQ t0, v1; \
|
||||
PADDQ t0, v1; \
|
||||
PXOR v1, v7; \
|
||||
PSHUFD $0xB1, v7, v7; \
|
||||
MOVO v5, t0; \
|
||||
PMULULQ v7, t0; \
|
||||
PADDQ v7, v5; \
|
||||
PADDQ t0, v5; \
|
||||
PADDQ t0, v5; \
|
||||
PXOR v5, v3; \
|
||||
PSHUFB c40, v3; \
|
||||
MOVO v1, t0; \
|
||||
PMULULQ v3, t0; \
|
||||
PADDQ v3, v1; \
|
||||
PADDQ t0, v1; \
|
||||
PADDQ t0, v1; \
|
||||
PXOR v1, v7; \
|
||||
PSHUFB c48, v7; \
|
||||
MOVO v5, t0; \
|
||||
PMULULQ v7, t0; \
|
||||
PADDQ v7, v5; \
|
||||
PADDQ t0, v5; \
|
||||
PADDQ t0, v5; \
|
||||
PXOR v5, v3; \
|
||||
MOVO v3, t0; \
|
||||
PADDQ v3, t0; \
|
||||
PSRLQ $63, v3; \
|
||||
PXOR t0, v3
|
||||
|
||||
#define LOAD_MSG_0(block, off) \
|
||||
MOVOU 8*(off+0)(block), X0; \
|
||||
MOVOU 8*(off+2)(block), X1; \
|
||||
MOVOU 8*(off+4)(block), X2; \
|
||||
MOVOU 8*(off+6)(block), X3; \
|
||||
MOVOU 8*(off+8)(block), X4; \
|
||||
MOVOU 8*(off+10)(block), X5; \
|
||||
MOVOU 8*(off+12)(block), X6; \
|
||||
MOVOU 8*(off+14)(block), X7
|
||||
|
||||
#define STORE_MSG_0(block, off) \
|
||||
MOVOU X0, 8*(off+0)(block); \
|
||||
MOVOU X1, 8*(off+2)(block); \
|
||||
MOVOU X2, 8*(off+4)(block); \
|
||||
MOVOU X3, 8*(off+6)(block); \
|
||||
MOVOU X4, 8*(off+8)(block); \
|
||||
MOVOU X5, 8*(off+10)(block); \
|
||||
MOVOU X6, 8*(off+12)(block); \
|
||||
MOVOU X7, 8*(off+14)(block)
|
||||
|
||||
#define LOAD_MSG_1(block, off) \
|
||||
MOVOU 8*off+0*8(block), X0; \
|
||||
MOVOU 8*off+16*8(block), X1; \
|
||||
MOVOU 8*off+32*8(block), X2; \
|
||||
MOVOU 8*off+48*8(block), X3; \
|
||||
MOVOU 8*off+64*8(block), X4; \
|
||||
MOVOU 8*off+80*8(block), X5; \
|
||||
MOVOU 8*off+96*8(block), X6; \
|
||||
MOVOU 8*off+112*8(block), X7
|
||||
|
||||
#define STORE_MSG_1(block, off) \
|
||||
MOVOU X0, 8*off+0*8(block); \
|
||||
MOVOU X1, 8*off+16*8(block); \
|
||||
MOVOU X2, 8*off+32*8(block); \
|
||||
MOVOU X3, 8*off+48*8(block); \
|
||||
MOVOU X4, 8*off+64*8(block); \
|
||||
MOVOU X5, 8*off+80*8(block); \
|
||||
MOVOU X6, 8*off+96*8(block); \
|
||||
MOVOU X7, 8*off+112*8(block)
|
||||
|
||||
#define BLAMKA_ROUND_0(block, off, t0, t1, c40, c48) \
|
||||
LOAD_MSG_0(block, off); \
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \
|
||||
SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \
|
||||
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \
|
||||
STORE_MSG_0(block, off)
|
||||
|
||||
#define BLAMKA_ROUND_1(block, off, t0, t1, c40, c48) \
|
||||
LOAD_MSG_1(block, off); \
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \
|
||||
SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \
|
||||
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \
|
||||
STORE_MSG_1(block, off)
|
||||
|
||||
// func blamkaSSE4(b *block)
|
||||
TEXT ·blamkaSSE4(SB), 4, $0-8
|
||||
MOVQ b+0(FP), AX
|
||||
|
||||
MOVOU ·c40<>(SB), X10
|
||||
MOVOU ·c48<>(SB), X11
|
||||
|
||||
BLAMKA_ROUND_0(AX, 0, X8, X9, X10, X11)
|
||||
BLAMKA_ROUND_0(AX, 16, X8, X9, X10, X11)
|
||||
BLAMKA_ROUND_0(AX, 32, X8, X9, X10, X11)
|
||||
BLAMKA_ROUND_0(AX, 48, X8, X9, X10, X11)
|
||||
BLAMKA_ROUND_0(AX, 64, X8, X9, X10, X11)
|
||||
BLAMKA_ROUND_0(AX, 80, X8, X9, X10, X11)
|
||||
BLAMKA_ROUND_0(AX, 96, X8, X9, X10, X11)
|
||||
BLAMKA_ROUND_0(AX, 112, X8, X9, X10, X11)
|
||||
|
||||
BLAMKA_ROUND_1(AX, 0, X8, X9, X10, X11)
|
||||
BLAMKA_ROUND_1(AX, 2, X8, X9, X10, X11)
|
||||
BLAMKA_ROUND_1(AX, 4, X8, X9, X10, X11)
|
||||
BLAMKA_ROUND_1(AX, 6, X8, X9, X10, X11)
|
||||
BLAMKA_ROUND_1(AX, 8, X8, X9, X10, X11)
|
||||
BLAMKA_ROUND_1(AX, 10, X8, X9, X10, X11)
|
||||
BLAMKA_ROUND_1(AX, 12, X8, X9, X10, X11)
|
||||
BLAMKA_ROUND_1(AX, 14, X8, X9, X10, X11)
|
||||
RET
|
||||
|
||||
// func mixBlocksSSE2(out, a, b, c *block)
|
||||
TEXT ·mixBlocksSSE2(SB), 4, $0-32
|
||||
MOVQ out+0(FP), DX
|
||||
MOVQ a+8(FP), AX
|
||||
MOVQ b+16(FP), BX
|
||||
MOVQ a+24(FP), CX
|
||||
MOVQ $128, BP
|
||||
|
||||
loop:
|
||||
MOVOU 0(AX), X0
|
||||
MOVOU 0(BX), X1
|
||||
MOVOU 0(CX), X2
|
||||
PXOR X1, X0
|
||||
PXOR X2, X0
|
||||
MOVOU X0, 0(DX)
|
||||
ADDQ $16, AX
|
||||
ADDQ $16, BX
|
||||
ADDQ $16, CX
|
||||
ADDQ $16, DX
|
||||
SUBQ $2, BP
|
||||
JA loop
|
||||
RET
|
||||
|
||||
// func xorBlocksSSE2(out, a, b, c *block)
|
||||
TEXT ·xorBlocksSSE2(SB), 4, $0-32
|
||||
MOVQ out+0(FP), DX
|
||||
MOVQ a+8(FP), AX
|
||||
MOVQ b+16(FP), BX
|
||||
MOVQ a+24(FP), CX
|
||||
MOVQ $128, BP
|
||||
|
||||
loop:
|
||||
MOVOU 0(AX), X0
|
||||
MOVOU 0(BX), X1
|
||||
MOVOU 0(CX), X2
|
||||
MOVOU 0(DX), X3
|
||||
PXOR X1, X0
|
||||
PXOR X2, X0
|
||||
PXOR X3, X0
|
||||
MOVOU X0, 0(DX)
|
||||
ADDQ $16, AX
|
||||
ADDQ $16, BX
|
||||
ADDQ $16, CX
|
||||
ADDQ $16, DX
|
||||
SUBQ $2, BP
|
||||
JA loop
|
||||
RET
|
||||
|
||||
// func supportsSSE4() bool
|
||||
TEXT ·supportsSSE4(SB), 4, $0-1
|
||||
MOVL $1, AX
|
||||
CPUID
|
||||
SHRL $19, CX // Bit 19 indicates SSE4 support
|
||||
ANDL $1, CX // CX != 0 if support SSE4
|
||||
MOVB CX, ret+0(FP)
|
||||
RET
|
||||
163
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/argon2/blamka_generic.go
generated
vendored
Normal file
163
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/argon2/blamka_generic.go
generated
vendored
Normal file
@@ -0,0 +1,163 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package argon2
|
||||
|
||||
var useSSE4 bool
|
||||
|
||||
func processBlockGeneric(out, in1, in2 *block, xor bool) {
|
||||
var t block
|
||||
for i := range t {
|
||||
t[i] = in1[i] ^ in2[i]
|
||||
}
|
||||
for i := 0; i < blockLength; i += 16 {
|
||||
blamkaGeneric(
|
||||
&t[i+0], &t[i+1], &t[i+2], &t[i+3],
|
||||
&t[i+4], &t[i+5], &t[i+6], &t[i+7],
|
||||
&t[i+8], &t[i+9], &t[i+10], &t[i+11],
|
||||
&t[i+12], &t[i+13], &t[i+14], &t[i+15],
|
||||
)
|
||||
}
|
||||
for i := 0; i < blockLength/8; i += 2 {
|
||||
blamkaGeneric(
|
||||
&t[i], &t[i+1], &t[16+i], &t[16+i+1],
|
||||
&t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1],
|
||||
&t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1],
|
||||
&t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1],
|
||||
)
|
||||
}
|
||||
if xor {
|
||||
for i := range t {
|
||||
out[i] ^= in1[i] ^ in2[i] ^ t[i]
|
||||
}
|
||||
} else {
|
||||
for i := range t {
|
||||
out[i] = in1[i] ^ in2[i] ^ t[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func blamkaGeneric(t00, t01, t02, t03, t04, t05, t06, t07, t08, t09, t10, t11, t12, t13, t14, t15 *uint64) {
|
||||
v00, v01, v02, v03 := *t00, *t01, *t02, *t03
|
||||
v04, v05, v06, v07 := *t04, *t05, *t06, *t07
|
||||
v08, v09, v10, v11 := *t08, *t09, *t10, *t11
|
||||
v12, v13, v14, v15 := *t12, *t13, *t14, *t15
|
||||
|
||||
v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04))
|
||||
v12 ^= v00
|
||||
v12 = v12>>32 | v12<<32
|
||||
v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12))
|
||||
v04 ^= v08
|
||||
v04 = v04>>24 | v04<<40
|
||||
|
||||
v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04))
|
||||
v12 ^= v00
|
||||
v12 = v12>>16 | v12<<48
|
||||
v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12))
|
||||
v04 ^= v08
|
||||
v04 = v04>>63 | v04<<1
|
||||
|
||||
v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05))
|
||||
v13 ^= v01
|
||||
v13 = v13>>32 | v13<<32
|
||||
v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13))
|
||||
v05 ^= v09
|
||||
v05 = v05>>24 | v05<<40
|
||||
|
||||
v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05))
|
||||
v13 ^= v01
|
||||
v13 = v13>>16 | v13<<48
|
||||
v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13))
|
||||
v05 ^= v09
|
||||
v05 = v05>>63 | v05<<1
|
||||
|
||||
v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06))
|
||||
v14 ^= v02
|
||||
v14 = v14>>32 | v14<<32
|
||||
v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14))
|
||||
v06 ^= v10
|
||||
v06 = v06>>24 | v06<<40
|
||||
|
||||
v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06))
|
||||
v14 ^= v02
|
||||
v14 = v14>>16 | v14<<48
|
||||
v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14))
|
||||
v06 ^= v10
|
||||
v06 = v06>>63 | v06<<1
|
||||
|
||||
v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07))
|
||||
v15 ^= v03
|
||||
v15 = v15>>32 | v15<<32
|
||||
v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15))
|
||||
v07 ^= v11
|
||||
v07 = v07>>24 | v07<<40
|
||||
|
||||
v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07))
|
||||
v15 ^= v03
|
||||
v15 = v15>>16 | v15<<48
|
||||
v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15))
|
||||
v07 ^= v11
|
||||
v07 = v07>>63 | v07<<1
|
||||
|
||||
v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05))
|
||||
v15 ^= v00
|
||||
v15 = v15>>32 | v15<<32
|
||||
v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15))
|
||||
v05 ^= v10
|
||||
v05 = v05>>24 | v05<<40
|
||||
|
||||
v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05))
|
||||
v15 ^= v00
|
||||
v15 = v15>>16 | v15<<48
|
||||
v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15))
|
||||
v05 ^= v10
|
||||
v05 = v05>>63 | v05<<1
|
||||
|
||||
v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06))
|
||||
v12 ^= v01
|
||||
v12 = v12>>32 | v12<<32
|
||||
v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12))
|
||||
v06 ^= v11
|
||||
v06 = v06>>24 | v06<<40
|
||||
|
||||
v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06))
|
||||
v12 ^= v01
|
||||
v12 = v12>>16 | v12<<48
|
||||
v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12))
|
||||
v06 ^= v11
|
||||
v06 = v06>>63 | v06<<1
|
||||
|
||||
v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07))
|
||||
v13 ^= v02
|
||||
v13 = v13>>32 | v13<<32
|
||||
v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13))
|
||||
v07 ^= v08
|
||||
v07 = v07>>24 | v07<<40
|
||||
|
||||
v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07))
|
||||
v13 ^= v02
|
||||
v13 = v13>>16 | v13<<48
|
||||
v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13))
|
||||
v07 ^= v08
|
||||
v07 = v07>>63 | v07<<1
|
||||
|
||||
v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04))
|
||||
v14 ^= v03
|
||||
v14 = v14>>32 | v14<<32
|
||||
v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14))
|
||||
v04 ^= v09
|
||||
v04 = v04>>24 | v04<<40
|
||||
|
||||
v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04))
|
||||
v14 ^= v03
|
||||
v14 = v14>>16 | v14<<48
|
||||
v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14))
|
||||
v04 ^= v09
|
||||
v04 = v04>>63 | v04<<1
|
||||
|
||||
*t00, *t01, *t02, *t03 = v00, v01, v02, v03
|
||||
*t04, *t05, *t06, *t07 = v04, v05, v06, v07
|
||||
*t08, *t09, *t10, *t11 = v08, v09, v10, v11
|
||||
*t12, *t13, *t14, *t15 = v12, v13, v14, v15
|
||||
}
|
||||
15
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/argon2/blamka_ref.go
generated
vendored
Normal file
15
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/argon2/blamka_ref.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !amd64 appengine gccgo
|
||||
|
||||
package argon2
|
||||
|
||||
func processBlock(out, in1, in2 *block) {
|
||||
processBlockGeneric(out, in1, in2, false)
|
||||
}
|
||||
|
||||
func processBlockXOR(out, in1, in2 *block) {
|
||||
processBlockGeneric(out, in1, in2, true)
|
||||
}
|
||||
35
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/bcrypt/base64.go
generated
vendored
Normal file
35
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/bcrypt/base64.go
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bcrypt
|
||||
|
||||
import "encoding/base64"
|
||||
|
||||
const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
|
||||
|
||||
var bcEncoding = base64.NewEncoding(alphabet)
|
||||
|
||||
func base64Encode(src []byte) []byte {
|
||||
n := bcEncoding.EncodedLen(len(src))
|
||||
dst := make([]byte, n)
|
||||
bcEncoding.Encode(dst, src)
|
||||
for dst[n-1] == '=' {
|
||||
n--
|
||||
}
|
||||
return dst[:n]
|
||||
}
|
||||
|
||||
func base64Decode(src []byte) ([]byte, error) {
|
||||
numOfEquals := 4 - (len(src) % 4)
|
||||
for i := 0; i < numOfEquals; i++ {
|
||||
src = append(src, '=')
|
||||
}
|
||||
|
||||
dst := make([]byte, bcEncoding.DecodedLen(len(src)))
|
||||
n, err := bcEncoding.Decode(dst, src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dst[:n], nil
|
||||
}
|
||||
295
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/bcrypt/bcrypt.go
generated
vendored
Normal file
295
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/bcrypt/bcrypt.go
generated
vendored
Normal file
@@ -0,0 +1,295 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing
|
||||
// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf
|
||||
package bcrypt // import "golang.org/x/crypto/bcrypt"
|
||||
|
||||
// The code is a port of Provos and Mazières's C implementation.
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/subtle"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/crypto/blowfish"
|
||||
)
|
||||
|
||||
const (
|
||||
MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword
|
||||
MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword
|
||||
DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword
|
||||
)
|
||||
|
||||
// The error returned from CompareHashAndPassword when a password and hash do
|
||||
// not match.
|
||||
var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password")
|
||||
|
||||
// The error returned from CompareHashAndPassword when a hash is too short to
|
||||
// be a bcrypt hash.
|
||||
var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password")
|
||||
|
||||
// The error returned from CompareHashAndPassword when a hash was created with
|
||||
// a bcrypt algorithm newer than this implementation.
|
||||
type HashVersionTooNewError byte
|
||||
|
||||
func (hv HashVersionTooNewError) Error() string {
|
||||
return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion)
|
||||
}
|
||||
|
||||
// The error returned from CompareHashAndPassword when a hash starts with something other than '$'
|
||||
type InvalidHashPrefixError byte
|
||||
|
||||
func (ih InvalidHashPrefixError) Error() string {
|
||||
return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih))
|
||||
}
|
||||
|
||||
type InvalidCostError int
|
||||
|
||||
func (ic InvalidCostError) Error() string {
|
||||
return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost))
|
||||
}
|
||||
|
||||
const (
|
||||
majorVersion = '2'
|
||||
minorVersion = 'a'
|
||||
maxSaltSize = 16
|
||||
maxCryptedHashSize = 23
|
||||
encodedSaltSize = 22
|
||||
encodedHashSize = 31
|
||||
minHashSize = 59
|
||||
)
|
||||
|
||||
// magicCipherData is an IV for the 64 Blowfish encryption calls in
|
||||
// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes.
|
||||
var magicCipherData = []byte{
|
||||
0x4f, 0x72, 0x70, 0x68,
|
||||
0x65, 0x61, 0x6e, 0x42,
|
||||
0x65, 0x68, 0x6f, 0x6c,
|
||||
0x64, 0x65, 0x72, 0x53,
|
||||
0x63, 0x72, 0x79, 0x44,
|
||||
0x6f, 0x75, 0x62, 0x74,
|
||||
}
|
||||
|
||||
type hashed struct {
|
||||
hash []byte
|
||||
salt []byte
|
||||
cost int // allowed range is MinCost to MaxCost
|
||||
major byte
|
||||
minor byte
|
||||
}
|
||||
|
||||
// GenerateFromPassword returns the bcrypt hash of the password at the given
|
||||
// cost. If the cost given is less than MinCost, the cost will be set to
|
||||
// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package,
|
||||
// to compare the returned hashed password with its cleartext version.
|
||||
func GenerateFromPassword(password []byte, cost int) ([]byte, error) {
|
||||
p, err := newFromPassword(password, cost)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p.Hash(), nil
|
||||
}
|
||||
|
||||
// CompareHashAndPassword compares a bcrypt hashed password with its possible
|
||||
// plaintext equivalent. Returns nil on success, or an error on failure.
|
||||
func CompareHashAndPassword(hashedPassword, password []byte) error {
|
||||
p, err := newFromHash(hashedPassword)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
otherHash, err := bcrypt(password, p.cost, p.salt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor}
|
||||
if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return ErrMismatchedHashAndPassword
|
||||
}
|
||||
|
||||
// Cost returns the hashing cost used to create the given hashed
|
||||
// password. When, in the future, the hashing cost of a password system needs
|
||||
// to be increased in order to adjust for greater computational power, this
|
||||
// function allows one to establish which passwords need to be updated.
|
||||
func Cost(hashedPassword []byte) (int, error) {
|
||||
p, err := newFromHash(hashedPassword)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return p.cost, nil
|
||||
}
|
||||
|
||||
func newFromPassword(password []byte, cost int) (*hashed, error) {
|
||||
if cost < MinCost {
|
||||
cost = DefaultCost
|
||||
}
|
||||
p := new(hashed)
|
||||
p.major = majorVersion
|
||||
p.minor = minorVersion
|
||||
|
||||
err := checkCost(cost)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.cost = cost
|
||||
|
||||
unencodedSalt := make([]byte, maxSaltSize)
|
||||
_, err = io.ReadFull(rand.Reader, unencodedSalt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.salt = base64Encode(unencodedSalt)
|
||||
hash, err := bcrypt(password, p.cost, p.salt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.hash = hash
|
||||
return p, err
|
||||
}
|
||||
|
||||
func newFromHash(hashedSecret []byte) (*hashed, error) {
|
||||
if len(hashedSecret) < minHashSize {
|
||||
return nil, ErrHashTooShort
|
||||
}
|
||||
p := new(hashed)
|
||||
n, err := p.decodeVersion(hashedSecret)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hashedSecret = hashedSecret[n:]
|
||||
n, err = p.decodeCost(hashedSecret)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hashedSecret = hashedSecret[n:]
|
||||
|
||||
// The "+2" is here because we'll have to append at most 2 '=' to the salt
|
||||
// when base64 decoding it in expensiveBlowfishSetup().
|
||||
p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2)
|
||||
copy(p.salt, hashedSecret[:encodedSaltSize])
|
||||
|
||||
hashedSecret = hashedSecret[encodedSaltSize:]
|
||||
p.hash = make([]byte, len(hashedSecret))
|
||||
copy(p.hash, hashedSecret)
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) {
|
||||
cipherData := make([]byte, len(magicCipherData))
|
||||
copy(cipherData, magicCipherData)
|
||||
|
||||
c, err := expensiveBlowfishSetup(password, uint32(cost), salt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := 0; i < 24; i += 8 {
|
||||
for j := 0; j < 64; j++ {
|
||||
c.Encrypt(cipherData[i:i+8], cipherData[i:i+8])
|
||||
}
|
||||
}
|
||||
|
||||
// Bug compatibility with C bcrypt implementations. We only encode 23 of
|
||||
// the 24 bytes encrypted.
|
||||
hsh := base64Encode(cipherData[:maxCryptedHashSize])
|
||||
return hsh, nil
|
||||
}
|
||||
|
||||
func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) {
|
||||
csalt, err := base64Decode(salt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Bug compatibility with C bcrypt implementations. They use the trailing
|
||||
// NULL in the key string during expansion.
|
||||
// We copy the key to prevent changing the underlying array.
|
||||
ckey := append(key[:len(key):len(key)], 0)
|
||||
|
||||
c, err := blowfish.NewSaltedCipher(ckey, csalt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var i, rounds uint64
|
||||
rounds = 1 << cost
|
||||
for i = 0; i < rounds; i++ {
|
||||
blowfish.ExpandKey(ckey, c)
|
||||
blowfish.ExpandKey(csalt, c)
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (p *hashed) Hash() []byte {
|
||||
arr := make([]byte, 60)
|
||||
arr[0] = '$'
|
||||
arr[1] = p.major
|
||||
n := 2
|
||||
if p.minor != 0 {
|
||||
arr[2] = p.minor
|
||||
n = 3
|
||||
}
|
||||
arr[n] = '$'
|
||||
n++
|
||||
copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost)))
|
||||
n += 2
|
||||
arr[n] = '$'
|
||||
n++
|
||||
copy(arr[n:], p.salt)
|
||||
n += encodedSaltSize
|
||||
copy(arr[n:], p.hash)
|
||||
n += encodedHashSize
|
||||
return arr[:n]
|
||||
}
|
||||
|
||||
func (p *hashed) decodeVersion(sbytes []byte) (int, error) {
|
||||
if sbytes[0] != '$' {
|
||||
return -1, InvalidHashPrefixError(sbytes[0])
|
||||
}
|
||||
if sbytes[1] > majorVersion {
|
||||
return -1, HashVersionTooNewError(sbytes[1])
|
||||
}
|
||||
p.major = sbytes[1]
|
||||
n := 3
|
||||
if sbytes[2] != '$' {
|
||||
p.minor = sbytes[2]
|
||||
n++
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// sbytes should begin where decodeVersion left off.
|
||||
func (p *hashed) decodeCost(sbytes []byte) (int, error) {
|
||||
cost, err := strconv.Atoi(string(sbytes[0:2]))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
err = checkCost(cost)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
p.cost = cost
|
||||
return 3, nil
|
||||
}
|
||||
|
||||
func (p *hashed) String() string {
|
||||
return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor)
|
||||
}
|
||||
|
||||
func checkCost(cost int) error {
|
||||
if cost < MinCost || cost > MaxCost {
|
||||
return InvalidCostError(cost)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
243
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/bcrypt/bcrypt_test.go
generated
vendored
Normal file
243
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/bcrypt/bcrypt_test.go
generated
vendored
Normal file
@@ -0,0 +1,243 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bcrypt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBcryptingIsEasy(t *testing.T) {
|
||||
pass := []byte("mypassword")
|
||||
hp, err := GenerateFromPassword(pass, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("GenerateFromPassword error: %s", err)
|
||||
}
|
||||
|
||||
if CompareHashAndPassword(hp, pass) != nil {
|
||||
t.Errorf("%v should hash %s correctly", hp, pass)
|
||||
}
|
||||
|
||||
notPass := "notthepass"
|
||||
err = CompareHashAndPassword(hp, []byte(notPass))
|
||||
if err != ErrMismatchedHashAndPassword {
|
||||
t.Errorf("%v and %s should be mismatched", hp, notPass)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBcryptingIsCorrect(t *testing.T) {
|
||||
pass := []byte("allmine")
|
||||
salt := []byte("XajjQvNhvvRt5GSeFk1xFe")
|
||||
expectedHash := []byte("$2a$10$XajjQvNhvvRt5GSeFk1xFeyqRrsxkhBkUiQeg0dt.wU1qD4aFDcga")
|
||||
|
||||
hash, err := bcrypt(pass, 10, salt)
|
||||
if err != nil {
|
||||
t.Fatalf("bcrypt blew up: %v", err)
|
||||
}
|
||||
if !bytes.HasSuffix(expectedHash, hash) {
|
||||
t.Errorf("%v should be the suffix of %v", hash, expectedHash)
|
||||
}
|
||||
|
||||
h, err := newFromHash(expectedHash)
|
||||
if err != nil {
|
||||
t.Errorf("Unable to parse %s: %v", string(expectedHash), err)
|
||||
}
|
||||
|
||||
// This is not the safe way to compare these hashes. We do this only for
|
||||
// testing clarity. Use bcrypt.CompareHashAndPassword()
|
||||
if err == nil && !bytes.Equal(expectedHash, h.Hash()) {
|
||||
t.Errorf("Parsed hash %v should equal %v", h.Hash(), expectedHash)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVeryShortPasswords(t *testing.T) {
|
||||
key := []byte("k")
|
||||
salt := []byte("XajjQvNhvvRt5GSeFk1xFe")
|
||||
_, err := bcrypt(key, 10, salt)
|
||||
if err != nil {
|
||||
t.Errorf("One byte key resulted in error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTooLongPasswordsWork(t *testing.T) {
|
||||
salt := []byte("XajjQvNhvvRt5GSeFk1xFe")
|
||||
// One byte over the usual 56 byte limit that blowfish has
|
||||
tooLongPass := []byte("012345678901234567890123456789012345678901234567890123456")
|
||||
tooLongExpected := []byte("$2a$10$XajjQvNhvvRt5GSeFk1xFe5l47dONXg781AmZtd869sO8zfsHuw7C")
|
||||
hash, err := bcrypt(tooLongPass, 10, salt)
|
||||
if err != nil {
|
||||
t.Fatalf("bcrypt blew up on long password: %v", err)
|
||||
}
|
||||
if !bytes.HasSuffix(tooLongExpected, hash) {
|
||||
t.Errorf("%v should be the suffix of %v", hash, tooLongExpected)
|
||||
}
|
||||
}
|
||||
|
||||
type InvalidHashTest struct {
|
||||
err error
|
||||
hash []byte
|
||||
}
|
||||
|
||||
var invalidTests = []InvalidHashTest{
|
||||
{ErrHashTooShort, []byte("$2a$10$fooo")},
|
||||
{ErrHashTooShort, []byte("$2a")},
|
||||
{HashVersionTooNewError('3'), []byte("$3a$10$sssssssssssssssssssssshhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")},
|
||||
{InvalidHashPrefixError('%'), []byte("%2a$10$sssssssssssssssssssssshhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")},
|
||||
{InvalidCostError(32), []byte("$2a$32$sssssssssssssssssssssshhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")},
|
||||
}
|
||||
|
||||
func TestInvalidHashErrors(t *testing.T) {
|
||||
check := func(name string, expected, err error) {
|
||||
if err == nil {
|
||||
t.Errorf("%s: Should have returned an error", name)
|
||||
}
|
||||
if err != nil && err != expected {
|
||||
t.Errorf("%s gave err %v but should have given %v", name, err, expected)
|
||||
}
|
||||
}
|
||||
for _, iht := range invalidTests {
|
||||
_, err := newFromHash(iht.hash)
|
||||
check("newFromHash", iht.err, err)
|
||||
err = CompareHashAndPassword(iht.hash, []byte("anything"))
|
||||
check("CompareHashAndPassword", iht.err, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnpaddedBase64Encoding(t *testing.T) {
|
||||
original := []byte{101, 201, 101, 75, 19, 227, 199, 20, 239, 236, 133, 32, 30, 109, 243, 30}
|
||||
encodedOriginal := []byte("XajjQvNhvvRt5GSeFk1xFe")
|
||||
|
||||
encoded := base64Encode(original)
|
||||
|
||||
if !bytes.Equal(encodedOriginal, encoded) {
|
||||
t.Errorf("Encoded %v should have equaled %v", encoded, encodedOriginal)
|
||||
}
|
||||
|
||||
decoded, err := base64Decode(encodedOriginal)
|
||||
if err != nil {
|
||||
t.Fatalf("base64Decode blew up: %s", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(decoded, original) {
|
||||
t.Errorf("Decoded %v should have equaled %v", decoded, original)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCost(t *testing.T) {
|
||||
suffix := "XajjQvNhvvRt5GSeFk1xFe5l47dONXg781AmZtd869sO8zfsHuw7C"
|
||||
for _, vers := range []string{"2a", "2"} {
|
||||
for _, cost := range []int{4, 10} {
|
||||
s := fmt.Sprintf("$%s$%02d$%s", vers, cost, suffix)
|
||||
h := []byte(s)
|
||||
actual, err := Cost(h)
|
||||
if err != nil {
|
||||
t.Errorf("Cost, error: %s", err)
|
||||
continue
|
||||
}
|
||||
if actual != cost {
|
||||
t.Errorf("Cost, expected: %d, actual: %d", cost, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
_, err := Cost([]byte("$a$a$" + suffix))
|
||||
if err == nil {
|
||||
t.Errorf("Cost, malformed but no error returned")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCostValidationInHash(t *testing.T) {
|
||||
if testing.Short() {
|
||||
return
|
||||
}
|
||||
|
||||
pass := []byte("mypassword")
|
||||
|
||||
for c := 0; c < MinCost; c++ {
|
||||
p, _ := newFromPassword(pass, c)
|
||||
if p.cost != DefaultCost {
|
||||
t.Errorf("newFromPassword should default costs below %d to %d, but was %d", MinCost, DefaultCost, p.cost)
|
||||
}
|
||||
}
|
||||
|
||||
p, _ := newFromPassword(pass, 14)
|
||||
if p.cost != 14 {
|
||||
t.Errorf("newFromPassword should default cost to 14, but was %d", p.cost)
|
||||
}
|
||||
|
||||
hp, _ := newFromHash(p.Hash())
|
||||
if p.cost != hp.cost {
|
||||
t.Errorf("newFromHash should maintain the cost at %d, but was %d", p.cost, hp.cost)
|
||||
}
|
||||
|
||||
_, err := newFromPassword(pass, 32)
|
||||
if err == nil {
|
||||
t.Fatalf("newFromPassword: should return a cost error")
|
||||
}
|
||||
if err != InvalidCostError(32) {
|
||||
t.Errorf("newFromPassword: should return cost error, got %#v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCostReturnsWithLeadingZeroes(t *testing.T) {
|
||||
hp, _ := newFromPassword([]byte("abcdefgh"), 7)
|
||||
cost := hp.Hash()[4:7]
|
||||
expected := []byte("07$")
|
||||
|
||||
if !bytes.Equal(expected, cost) {
|
||||
t.Errorf("single digit costs in hash should have leading zeros: was %v instead of %v", cost, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMinorNotRequired(t *testing.T) {
|
||||
noMinorHash := []byte("$2$10$XajjQvNhvvRt5GSeFk1xFeyqRrsxkhBkUiQeg0dt.wU1qD4aFDcga")
|
||||
h, err := newFromHash(noMinorHash)
|
||||
if err != nil {
|
||||
t.Fatalf("No minor hash blew up: %s", err)
|
||||
}
|
||||
if h.minor != 0 {
|
||||
t.Errorf("Should leave minor version at 0, but was %d", h.minor)
|
||||
}
|
||||
|
||||
if !bytes.Equal(noMinorHash, h.Hash()) {
|
||||
t.Errorf("Should generate hash %v, but created %v", noMinorHash, h.Hash())
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEqual(b *testing.B) {
|
||||
b.StopTimer()
|
||||
passwd := []byte("somepasswordyoulike")
|
||||
hash, _ := GenerateFromPassword(passwd, 10)
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
CompareHashAndPassword(hash, passwd)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkGeneration(b *testing.B) {
|
||||
b.StopTimer()
|
||||
passwd := []byte("mylongpassword1234")
|
||||
b.StartTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
GenerateFromPassword(passwd, 10)
|
||||
}
|
||||
}
|
||||
|
||||
// See Issue https://github.com/golang/go/issues/20425.
|
||||
func TestNoSideEffectsFromCompare(t *testing.T) {
|
||||
source := []byte("passw0rd123456")
|
||||
password := source[:len(source)-6]
|
||||
token := source[len(source)-6:]
|
||||
want := make([]byte, len(source))
|
||||
copy(want, source)
|
||||
|
||||
wantHash := []byte("$2a$10$LK9XRuhNxHHCvjX3tdkRKei1QiCDUKrJRhZv7WWZPuQGRUM92rOUa")
|
||||
_ = CompareHashAndPassword(wantHash, password)
|
||||
|
||||
got := bytes.Join([][]byte{password, token}, []byte(""))
|
||||
if !bytes.Equal(got, want) {
|
||||
t.Errorf("got=%q want=%q", got, want)
|
||||
}
|
||||
}
|
||||
221
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/blake2b/blake2b.go
generated
vendored
Normal file
221
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/blake2b/blake2b.go
generated
vendored
Normal file
@@ -0,0 +1,221 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package blake2b implements the BLAKE2b hash algorithm defined by RFC 7693
|
||||
// and the extendable output function (XOF) BLAKE2Xb.
|
||||
//
|
||||
// For a detailed specification of BLAKE2b see https://blake2.net/blake2.pdf
|
||||
// and for BLAKE2Xb see https://blake2.net/blake2x.pdf
|
||||
//
|
||||
// If you aren't sure which function you need, use BLAKE2b (Sum512 or New512).
|
||||
// If you need a secret-key MAC (message authentication code), use the New512
|
||||
// function with a non-nil key.
|
||||
//
|
||||
// BLAKE2X is a construction to compute hash values larger than 64 bytes. It
|
||||
// can produce hash values between 0 and 4 GiB.
|
||||
package blake2b
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"hash"
|
||||
)
|
||||
|
||||
const (
|
||||
// The blocksize of BLAKE2b in bytes.
|
||||
BlockSize = 128
|
||||
// The hash size of BLAKE2b-512 in bytes.
|
||||
Size = 64
|
||||
// The hash size of BLAKE2b-384 in bytes.
|
||||
Size384 = 48
|
||||
// The hash size of BLAKE2b-256 in bytes.
|
||||
Size256 = 32
|
||||
)
|
||||
|
||||
var (
|
||||
useAVX2 bool
|
||||
useAVX bool
|
||||
useSSE4 bool
|
||||
)
|
||||
|
||||
var (
|
||||
errKeySize = errors.New("blake2b: invalid key size")
|
||||
errHashSize = errors.New("blake2b: invalid hash size")
|
||||
)
|
||||
|
||||
var iv = [8]uint64{
|
||||
0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,
|
||||
0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179,
|
||||
}
|
||||
|
||||
// Sum512 returns the BLAKE2b-512 checksum of the data.
|
||||
func Sum512(data []byte) [Size]byte {
|
||||
var sum [Size]byte
|
||||
checkSum(&sum, Size, data)
|
||||
return sum
|
||||
}
|
||||
|
||||
// Sum384 returns the BLAKE2b-384 checksum of the data.
|
||||
func Sum384(data []byte) [Size384]byte {
|
||||
var sum [Size]byte
|
||||
var sum384 [Size384]byte
|
||||
checkSum(&sum, Size384, data)
|
||||
copy(sum384[:], sum[:Size384])
|
||||
return sum384
|
||||
}
|
||||
|
||||
// Sum256 returns the BLAKE2b-256 checksum of the data.
|
||||
func Sum256(data []byte) [Size256]byte {
|
||||
var sum [Size]byte
|
||||
var sum256 [Size256]byte
|
||||
checkSum(&sum, Size256, data)
|
||||
copy(sum256[:], sum[:Size256])
|
||||
return sum256
|
||||
}
|
||||
|
||||
// New512 returns a new hash.Hash computing the BLAKE2b-512 checksum. A non-nil
|
||||
// key turns the hash into a MAC. The key must between zero and 64 bytes long.
|
||||
func New512(key []byte) (hash.Hash, error) { return newDigest(Size, key) }
|
||||
|
||||
// New384 returns a new hash.Hash computing the BLAKE2b-384 checksum. A non-nil
|
||||
// key turns the hash into a MAC. The key must between zero and 64 bytes long.
|
||||
func New384(key []byte) (hash.Hash, error) { return newDigest(Size384, key) }
|
||||
|
||||
// New256 returns a new hash.Hash computing the BLAKE2b-256 checksum. A non-nil
|
||||
// key turns the hash into a MAC. The key must between zero and 64 bytes long.
|
||||
func New256(key []byte) (hash.Hash, error) { return newDigest(Size256, key) }
|
||||
|
||||
// New returns a new hash.Hash computing the BLAKE2b checksum with a custom length.
|
||||
// A non-nil key turns the hash into a MAC. The key must between zero and 64 bytes long.
|
||||
// The hash size can be a value between 1 and 64 but it is highly recommended to use
|
||||
// values equal or greater than:
|
||||
// - 32 if BLAKE2b is used as a hash function (The key is zero bytes long).
|
||||
// - 16 if BLAKE2b is used as a MAC function (The key is at least 16 bytes long).
|
||||
func New(size int, key []byte) (hash.Hash, error) { return newDigest(size, key) }
|
||||
|
||||
func newDigest(hashSize int, key []byte) (*digest, error) {
|
||||
if hashSize < 1 || hashSize > Size {
|
||||
return nil, errHashSize
|
||||
}
|
||||
if len(key) > Size {
|
||||
return nil, errKeySize
|
||||
}
|
||||
d := &digest{
|
||||
size: hashSize,
|
||||
keyLen: len(key),
|
||||
}
|
||||
copy(d.key[:], key)
|
||||
d.Reset()
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func checkSum(sum *[Size]byte, hashSize int, data []byte) {
|
||||
h := iv
|
||||
h[0] ^= uint64(hashSize) | (1 << 16) | (1 << 24)
|
||||
var c [2]uint64
|
||||
|
||||
if length := len(data); length > BlockSize {
|
||||
n := length &^ (BlockSize - 1)
|
||||
if length == n {
|
||||
n -= BlockSize
|
||||
}
|
||||
hashBlocks(&h, &c, 0, data[:n])
|
||||
data = data[n:]
|
||||
}
|
||||
|
||||
var block [BlockSize]byte
|
||||
offset := copy(block[:], data)
|
||||
remaining := uint64(BlockSize - offset)
|
||||
if c[0] < remaining {
|
||||
c[1]--
|
||||
}
|
||||
c[0] -= remaining
|
||||
|
||||
hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:])
|
||||
|
||||
for i, v := range h[:(hashSize+7)/8] {
|
||||
binary.LittleEndian.PutUint64(sum[8*i:], v)
|
||||
}
|
||||
}
|
||||
|
||||
type digest struct {
|
||||
h [8]uint64
|
||||
c [2]uint64
|
||||
size int
|
||||
block [BlockSize]byte
|
||||
offset int
|
||||
|
||||
key [BlockSize]byte
|
||||
keyLen int
|
||||
}
|
||||
|
||||
func (d *digest) BlockSize() int { return BlockSize }
|
||||
|
||||
func (d *digest) Size() int { return d.size }
|
||||
|
||||
func (d *digest) Reset() {
|
||||
d.h = iv
|
||||
d.h[0] ^= uint64(d.size) | (uint64(d.keyLen) << 8) | (1 << 16) | (1 << 24)
|
||||
d.offset, d.c[0], d.c[1] = 0, 0, 0
|
||||
if d.keyLen > 0 {
|
||||
d.block = d.key
|
||||
d.offset = BlockSize
|
||||
}
|
||||
}
|
||||
|
||||
func (d *digest) Write(p []byte) (n int, err error) {
|
||||
n = len(p)
|
||||
|
||||
if d.offset > 0 {
|
||||
remaining := BlockSize - d.offset
|
||||
if n <= remaining {
|
||||
d.offset += copy(d.block[d.offset:], p)
|
||||
return
|
||||
}
|
||||
copy(d.block[d.offset:], p[:remaining])
|
||||
hashBlocks(&d.h, &d.c, 0, d.block[:])
|
||||
d.offset = 0
|
||||
p = p[remaining:]
|
||||
}
|
||||
|
||||
if length := len(p); length > BlockSize {
|
||||
nn := length &^ (BlockSize - 1)
|
||||
if length == nn {
|
||||
nn -= BlockSize
|
||||
}
|
||||
hashBlocks(&d.h, &d.c, 0, p[:nn])
|
||||
p = p[nn:]
|
||||
}
|
||||
|
||||
if len(p) > 0 {
|
||||
d.offset += copy(d.block[:], p)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (d *digest) Sum(sum []byte) []byte {
|
||||
var hash [Size]byte
|
||||
d.finalize(&hash)
|
||||
return append(sum, hash[:d.size]...)
|
||||
}
|
||||
|
||||
func (d *digest) finalize(hash *[Size]byte) {
|
||||
var block [BlockSize]byte
|
||||
copy(block[:], d.block[:d.offset])
|
||||
remaining := uint64(BlockSize - d.offset)
|
||||
|
||||
c := d.c
|
||||
if c[0] < remaining {
|
||||
c[1]--
|
||||
}
|
||||
c[0] -= remaining
|
||||
|
||||
h := d.h
|
||||
hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:])
|
||||
|
||||
for i, v := range h {
|
||||
binary.LittleEndian.PutUint64(hash[8*i:], v)
|
||||
}
|
||||
}
|
||||
43
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go
generated
vendored
Normal file
43
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.7,amd64,!gccgo,!appengine
|
||||
|
||||
package blake2b
|
||||
|
||||
func init() {
|
||||
useAVX2 = supportsAVX2()
|
||||
useAVX = supportsAVX()
|
||||
useSSE4 = supportsSSE4()
|
||||
}
|
||||
|
||||
//go:noescape
|
||||
func supportsSSE4() bool
|
||||
|
||||
//go:noescape
|
||||
func supportsAVX() bool
|
||||
|
||||
//go:noescape
|
||||
func supportsAVX2() bool
|
||||
|
||||
//go:noescape
|
||||
func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
|
||||
|
||||
//go:noescape
|
||||
func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
|
||||
|
||||
//go:noescape
|
||||
func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
|
||||
|
||||
func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) {
|
||||
if useAVX2 {
|
||||
hashBlocksAVX2(h, c, flag, blocks)
|
||||
} else if useAVX {
|
||||
hashBlocksAVX(h, c, flag, blocks)
|
||||
} else if useSSE4 {
|
||||
hashBlocksSSE4(h, c, flag, blocks)
|
||||
} else {
|
||||
hashBlocksGeneric(h, c, flag, blocks)
|
||||
}
|
||||
}
|
||||
762
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s
generated
vendored
Normal file
762
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s
generated
vendored
Normal file
@@ -0,0 +1,762 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.7,amd64,!gccgo,!appengine
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
|
||||
DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
|
||||
DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b
|
||||
DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1
|
||||
GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32
|
||||
|
||||
DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1
|
||||
DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
|
||||
DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b
|
||||
DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179
|
||||
GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32
|
||||
|
||||
DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403
|
||||
DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
|
||||
DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403
|
||||
DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b
|
||||
GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32
|
||||
|
||||
DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302
|
||||
DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
|
||||
DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302
|
||||
DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a
|
||||
GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32
|
||||
|
||||
DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
|
||||
DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
|
||||
GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16
|
||||
|
||||
DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b
|
||||
DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1
|
||||
GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16
|
||||
|
||||
DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1
|
||||
DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
|
||||
GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16
|
||||
|
||||
DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b
|
||||
DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179
|
||||
GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16
|
||||
|
||||
DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403
|
||||
DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
|
||||
GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16
|
||||
|
||||
DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302
|
||||
DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
|
||||
GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16
|
||||
|
||||
#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39
|
||||
#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93
|
||||
#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e
|
||||
#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93
|
||||
#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39
|
||||
|
||||
#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \
|
||||
VPADDQ m0, Y0, Y0; \
|
||||
VPADDQ Y1, Y0, Y0; \
|
||||
VPXOR Y0, Y3, Y3; \
|
||||
VPSHUFD $-79, Y3, Y3; \
|
||||
VPADDQ Y3, Y2, Y2; \
|
||||
VPXOR Y2, Y1, Y1; \
|
||||
VPSHUFB c40, Y1, Y1; \
|
||||
VPADDQ m1, Y0, Y0; \
|
||||
VPADDQ Y1, Y0, Y0; \
|
||||
VPXOR Y0, Y3, Y3; \
|
||||
VPSHUFB c48, Y3, Y3; \
|
||||
VPADDQ Y3, Y2, Y2; \
|
||||
VPXOR Y2, Y1, Y1; \
|
||||
VPADDQ Y1, Y1, t; \
|
||||
VPSRLQ $63, Y1, Y1; \
|
||||
VPXOR t, Y1, Y1; \
|
||||
VPERMQ_0x39_Y1_Y1; \
|
||||
VPERMQ_0x4E_Y2_Y2; \
|
||||
VPERMQ_0x93_Y3_Y3; \
|
||||
VPADDQ m2, Y0, Y0; \
|
||||
VPADDQ Y1, Y0, Y0; \
|
||||
VPXOR Y0, Y3, Y3; \
|
||||
VPSHUFD $-79, Y3, Y3; \
|
||||
VPADDQ Y3, Y2, Y2; \
|
||||
VPXOR Y2, Y1, Y1; \
|
||||
VPSHUFB c40, Y1, Y1; \
|
||||
VPADDQ m3, Y0, Y0; \
|
||||
VPADDQ Y1, Y0, Y0; \
|
||||
VPXOR Y0, Y3, Y3; \
|
||||
VPSHUFB c48, Y3, Y3; \
|
||||
VPADDQ Y3, Y2, Y2; \
|
||||
VPXOR Y2, Y1, Y1; \
|
||||
VPADDQ Y1, Y1, t; \
|
||||
VPSRLQ $63, Y1, Y1; \
|
||||
VPXOR t, Y1, Y1; \
|
||||
VPERMQ_0x39_Y3_Y3; \
|
||||
VPERMQ_0x4E_Y2_Y2; \
|
||||
VPERMQ_0x93_Y1_Y1
|
||||
|
||||
#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E
|
||||
#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26
|
||||
#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E
|
||||
#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36
|
||||
#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E
|
||||
|
||||
#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n
|
||||
#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n
|
||||
#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n
|
||||
#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n
|
||||
#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n
|
||||
|
||||
#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01
|
||||
#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01
|
||||
#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01
|
||||
#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01
|
||||
#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01
|
||||
|
||||
#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01
|
||||
#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01
|
||||
#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01
|
||||
#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01
|
||||
#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01
|
||||
|
||||
#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8
|
||||
#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01
|
||||
|
||||
// load msg: Y12 = (i0, i1, i2, i3)
|
||||
// i0, i1, i2, i3 must not be 0
|
||||
#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \
|
||||
VMOVQ_SI_X12(i0*8); \
|
||||
VMOVQ_SI_X11(i2*8); \
|
||||
VPINSRQ_1_SI_X12(i1*8); \
|
||||
VPINSRQ_1_SI_X11(i3*8); \
|
||||
VINSERTI128 $1, X11, Y12, Y12
|
||||
|
||||
// load msg: Y13 = (i0, i1, i2, i3)
|
||||
// i0, i1, i2, i3 must not be 0
|
||||
#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \
|
||||
VMOVQ_SI_X13(i0*8); \
|
||||
VMOVQ_SI_X11(i2*8); \
|
||||
VPINSRQ_1_SI_X13(i1*8); \
|
||||
VPINSRQ_1_SI_X11(i3*8); \
|
||||
VINSERTI128 $1, X11, Y13, Y13
|
||||
|
||||
// load msg: Y14 = (i0, i1, i2, i3)
|
||||
// i0, i1, i2, i3 must not be 0
|
||||
#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \
|
||||
VMOVQ_SI_X14(i0*8); \
|
||||
VMOVQ_SI_X11(i2*8); \
|
||||
VPINSRQ_1_SI_X14(i1*8); \
|
||||
VPINSRQ_1_SI_X11(i3*8); \
|
||||
VINSERTI128 $1, X11, Y14, Y14
|
||||
|
||||
// load msg: Y15 = (i0, i1, i2, i3)
|
||||
// i0, i1, i2, i3 must not be 0
|
||||
#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \
|
||||
VMOVQ_SI_X15(i0*8); \
|
||||
VMOVQ_SI_X11(i2*8); \
|
||||
VPINSRQ_1_SI_X15(i1*8); \
|
||||
VPINSRQ_1_SI_X11(i3*8); \
|
||||
VINSERTI128 $1, X11, Y15, Y15
|
||||
|
||||
#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \
|
||||
VMOVQ_SI_X12_0; \
|
||||
VMOVQ_SI_X11(4*8); \
|
||||
VPINSRQ_1_SI_X12(2*8); \
|
||||
VPINSRQ_1_SI_X11(6*8); \
|
||||
VINSERTI128 $1, X11, Y12, Y12; \
|
||||
LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \
|
||||
LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \
|
||||
LOAD_MSG_AVX2_Y15(9, 11, 13, 15)
|
||||
|
||||
#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \
|
||||
LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \
|
||||
LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \
|
||||
VMOVQ_SI_X11(11*8); \
|
||||
VPSHUFD $0x4E, 0*8(SI), X14; \
|
||||
VPINSRQ_1_SI_X11(5*8); \
|
||||
VINSERTI128 $1, X11, Y14, Y14; \
|
||||
LOAD_MSG_AVX2_Y15(12, 2, 7, 3)
|
||||
|
||||
#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \
|
||||
VMOVQ_SI_X11(5*8); \
|
||||
VMOVDQU 11*8(SI), X12; \
|
||||
VPINSRQ_1_SI_X11(15*8); \
|
||||
VINSERTI128 $1, X11, Y12, Y12; \
|
||||
VMOVQ_SI_X13(8*8); \
|
||||
VMOVQ_SI_X11(2*8); \
|
||||
VPINSRQ_1_SI_X13_0; \
|
||||
VPINSRQ_1_SI_X11(13*8); \
|
||||
VINSERTI128 $1, X11, Y13, Y13; \
|
||||
LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \
|
||||
LOAD_MSG_AVX2_Y15(14, 6, 1, 4)
|
||||
|
||||
#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \
|
||||
LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \
|
||||
LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \
|
||||
LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \
|
||||
VMOVQ_SI_X15(6*8); \
|
||||
VMOVQ_SI_X11_0; \
|
||||
VPINSRQ_1_SI_X15(10*8); \
|
||||
VPINSRQ_1_SI_X11(8*8); \
|
||||
VINSERTI128 $1, X11, Y15, Y15
|
||||
|
||||
#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \
|
||||
LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \
|
||||
VMOVQ_SI_X13_0; \
|
||||
VMOVQ_SI_X11(4*8); \
|
||||
VPINSRQ_1_SI_X13(7*8); \
|
||||
VPINSRQ_1_SI_X11(15*8); \
|
||||
VINSERTI128 $1, X11, Y13, Y13; \
|
||||
LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \
|
||||
LOAD_MSG_AVX2_Y15(1, 12, 8, 13)
|
||||
|
||||
#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \
|
||||
VMOVQ_SI_X12(2*8); \
|
||||
VMOVQ_SI_X11_0; \
|
||||
VPINSRQ_1_SI_X12(6*8); \
|
||||
VPINSRQ_1_SI_X11(8*8); \
|
||||
VINSERTI128 $1, X11, Y12, Y12; \
|
||||
LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \
|
||||
LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \
|
||||
LOAD_MSG_AVX2_Y15(13, 5, 14, 9)
|
||||
|
||||
#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \
|
||||
LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \
|
||||
LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \
|
||||
VMOVQ_SI_X14_0; \
|
||||
VPSHUFD $0x4E, 8*8(SI), X11; \
|
||||
VPINSRQ_1_SI_X14(6*8); \
|
||||
VINSERTI128 $1, X11, Y14, Y14; \
|
||||
LOAD_MSG_AVX2_Y15(7, 3, 2, 11)
|
||||
|
||||
#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \
|
||||
LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \
|
||||
LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \
|
||||
LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \
|
||||
VMOVQ_SI_X15_0; \
|
||||
VMOVQ_SI_X11(6*8); \
|
||||
VPINSRQ_1_SI_X15(4*8); \
|
||||
VPINSRQ_1_SI_X11(10*8); \
|
||||
VINSERTI128 $1, X11, Y15, Y15
|
||||
|
||||
#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \
|
||||
VMOVQ_SI_X12(6*8); \
|
||||
VMOVQ_SI_X11(11*8); \
|
||||
VPINSRQ_1_SI_X12(14*8); \
|
||||
VPINSRQ_1_SI_X11_0; \
|
||||
VINSERTI128 $1, X11, Y12, Y12; \
|
||||
LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \
|
||||
VMOVQ_SI_X11(1*8); \
|
||||
VMOVDQU 12*8(SI), X14; \
|
||||
VPINSRQ_1_SI_X11(10*8); \
|
||||
VINSERTI128 $1, X11, Y14, Y14; \
|
||||
VMOVQ_SI_X15(2*8); \
|
||||
VMOVDQU 4*8(SI), X11; \
|
||||
VPINSRQ_1_SI_X15(7*8); \
|
||||
VINSERTI128 $1, X11, Y15, Y15
|
||||
|
||||
#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \
|
||||
LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \
|
||||
VMOVQ_SI_X13(2*8); \
|
||||
VPSHUFD $0x4E, 5*8(SI), X11; \
|
||||
VPINSRQ_1_SI_X13(4*8); \
|
||||
VINSERTI128 $1, X11, Y13, Y13; \
|
||||
LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \
|
||||
VMOVQ_SI_X15(11*8); \
|
||||
VMOVQ_SI_X11(12*8); \
|
||||
VPINSRQ_1_SI_X15(14*8); \
|
||||
VPINSRQ_1_SI_X11_0; \
|
||||
VINSERTI128 $1, X11, Y15, Y15
|
||||
|
||||
// func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
|
||||
TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment
|
||||
MOVQ h+0(FP), AX
|
||||
MOVQ c+8(FP), BX
|
||||
MOVQ flag+16(FP), CX
|
||||
MOVQ blocks_base+24(FP), SI
|
||||
MOVQ blocks_len+32(FP), DI
|
||||
|
||||
MOVQ SP, DX
|
||||
MOVQ SP, R9
|
||||
ADDQ $31, R9
|
||||
ANDQ $~31, R9
|
||||
MOVQ R9, SP
|
||||
|
||||
MOVQ CX, 16(SP)
|
||||
XORQ CX, CX
|
||||
MOVQ CX, 24(SP)
|
||||
|
||||
VMOVDQU ·AVX2_c40<>(SB), Y4
|
||||
VMOVDQU ·AVX2_c48<>(SB), Y5
|
||||
|
||||
VMOVDQU 0(AX), Y8
|
||||
VMOVDQU 32(AX), Y9
|
||||
VMOVDQU ·AVX2_iv0<>(SB), Y6
|
||||
VMOVDQU ·AVX2_iv1<>(SB), Y7
|
||||
|
||||
MOVQ 0(BX), R8
|
||||
MOVQ 8(BX), R9
|
||||
MOVQ R9, 8(SP)
|
||||
|
||||
loop:
|
||||
ADDQ $128, R8
|
||||
MOVQ R8, 0(SP)
|
||||
CMPQ R8, $128
|
||||
JGE noinc
|
||||
INCQ R9
|
||||
MOVQ R9, 8(SP)
|
||||
|
||||
noinc:
|
||||
VMOVDQA Y8, Y0
|
||||
VMOVDQA Y9, Y1
|
||||
VMOVDQA Y6, Y2
|
||||
VPXOR 0(SP), Y7, Y3
|
||||
|
||||
LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15()
|
||||
VMOVDQA Y12, 32(SP)
|
||||
VMOVDQA Y13, 64(SP)
|
||||
VMOVDQA Y14, 96(SP)
|
||||
VMOVDQA Y15, 128(SP)
|
||||
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
|
||||
LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3()
|
||||
VMOVDQA Y12, 160(SP)
|
||||
VMOVDQA Y13, 192(SP)
|
||||
VMOVDQA Y14, 224(SP)
|
||||
VMOVDQA Y15, 256(SP)
|
||||
|
||||
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
|
||||
LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4()
|
||||
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
|
||||
LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8()
|
||||
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
|
||||
LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13()
|
||||
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
|
||||
LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9()
|
||||
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
|
||||
LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11()
|
||||
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
|
||||
LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10()
|
||||
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
|
||||
LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5()
|
||||
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
|
||||
LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0()
|
||||
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
|
||||
|
||||
ROUND_AVX2(32(SP), 64(SP), 96(SP), 128(SP), Y10, Y4, Y5)
|
||||
ROUND_AVX2(160(SP), 192(SP), 224(SP), 256(SP), Y10, Y4, Y5)
|
||||
|
||||
VPXOR Y0, Y8, Y8
|
||||
VPXOR Y1, Y9, Y9
|
||||
VPXOR Y2, Y8, Y8
|
||||
VPXOR Y3, Y9, Y9
|
||||
|
||||
LEAQ 128(SI), SI
|
||||
SUBQ $128, DI
|
||||
JNE loop
|
||||
|
||||
MOVQ R8, 0(BX)
|
||||
MOVQ R9, 8(BX)
|
||||
|
||||
VMOVDQU Y8, 0(AX)
|
||||
VMOVDQU Y9, 32(AX)
|
||||
VZEROUPPER
|
||||
|
||||
MOVQ DX, SP
|
||||
RET
|
||||
|
||||
#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA
|
||||
#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB
|
||||
#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF
|
||||
#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD
|
||||
#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE
|
||||
|
||||
#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7
|
||||
#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF
|
||||
#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7
|
||||
#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF
|
||||
#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7
|
||||
#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7
|
||||
#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF
|
||||
#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF
|
||||
|
||||
#define SHUFFLE_AVX() \
|
||||
VMOVDQA X6, X13; \
|
||||
VMOVDQA X2, X14; \
|
||||
VMOVDQA X4, X6; \
|
||||
VPUNPCKLQDQ_X13_X13_X15; \
|
||||
VMOVDQA X5, X4; \
|
||||
VMOVDQA X6, X5; \
|
||||
VPUNPCKHQDQ_X15_X7_X6; \
|
||||
VPUNPCKLQDQ_X7_X7_X15; \
|
||||
VPUNPCKHQDQ_X15_X13_X7; \
|
||||
VPUNPCKLQDQ_X3_X3_X15; \
|
||||
VPUNPCKHQDQ_X15_X2_X2; \
|
||||
VPUNPCKLQDQ_X14_X14_X15; \
|
||||
VPUNPCKHQDQ_X15_X3_X3; \
|
||||
|
||||
#define SHUFFLE_AVX_INV() \
|
||||
VMOVDQA X2, X13; \
|
||||
VMOVDQA X4, X14; \
|
||||
VPUNPCKLQDQ_X2_X2_X15; \
|
||||
VMOVDQA X5, X4; \
|
||||
VPUNPCKHQDQ_X15_X3_X2; \
|
||||
VMOVDQA X14, X5; \
|
||||
VPUNPCKLQDQ_X3_X3_X15; \
|
||||
VMOVDQA X6, X14; \
|
||||
VPUNPCKHQDQ_X15_X13_X3; \
|
||||
VPUNPCKLQDQ_X7_X7_X15; \
|
||||
VPUNPCKHQDQ_X15_X6_X6; \
|
||||
VPUNPCKLQDQ_X14_X14_X15; \
|
||||
VPUNPCKHQDQ_X15_X7_X7; \
|
||||
|
||||
#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \
|
||||
VPADDQ m0, v0, v0; \
|
||||
VPADDQ v2, v0, v0; \
|
||||
VPADDQ m1, v1, v1; \
|
||||
VPADDQ v3, v1, v1; \
|
||||
VPXOR v0, v6, v6; \
|
||||
VPXOR v1, v7, v7; \
|
||||
VPSHUFD $-79, v6, v6; \
|
||||
VPSHUFD $-79, v7, v7; \
|
||||
VPADDQ v6, v4, v4; \
|
||||
VPADDQ v7, v5, v5; \
|
||||
VPXOR v4, v2, v2; \
|
||||
VPXOR v5, v3, v3; \
|
||||
VPSHUFB c40, v2, v2; \
|
||||
VPSHUFB c40, v3, v3; \
|
||||
VPADDQ m2, v0, v0; \
|
||||
VPADDQ v2, v0, v0; \
|
||||
VPADDQ m3, v1, v1; \
|
||||
VPADDQ v3, v1, v1; \
|
||||
VPXOR v0, v6, v6; \
|
||||
VPXOR v1, v7, v7; \
|
||||
VPSHUFB c48, v6, v6; \
|
||||
VPSHUFB c48, v7, v7; \
|
||||
VPADDQ v6, v4, v4; \
|
||||
VPADDQ v7, v5, v5; \
|
||||
VPXOR v4, v2, v2; \
|
||||
VPXOR v5, v3, v3; \
|
||||
VPADDQ v2, v2, t0; \
|
||||
VPSRLQ $63, v2, v2; \
|
||||
VPXOR t0, v2, v2; \
|
||||
VPADDQ v3, v3, t0; \
|
||||
VPSRLQ $63, v3, v3; \
|
||||
VPXOR t0, v3, v3
|
||||
|
||||
// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7)
|
||||
// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0
|
||||
#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \
|
||||
VMOVQ_SI_X12(i0*8); \
|
||||
VMOVQ_SI_X13(i2*8); \
|
||||
VMOVQ_SI_X14(i4*8); \
|
||||
VMOVQ_SI_X15(i6*8); \
|
||||
VPINSRQ_1_SI_X12(i1*8); \
|
||||
VPINSRQ_1_SI_X13(i3*8); \
|
||||
VPINSRQ_1_SI_X14(i5*8); \
|
||||
VPINSRQ_1_SI_X15(i7*8)
|
||||
|
||||
// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7)
|
||||
#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \
|
||||
VMOVQ_SI_X12_0; \
|
||||
VMOVQ_SI_X13(4*8); \
|
||||
VMOVQ_SI_X14(1*8); \
|
||||
VMOVQ_SI_X15(5*8); \
|
||||
VPINSRQ_1_SI_X12(2*8); \
|
||||
VPINSRQ_1_SI_X13(6*8); \
|
||||
VPINSRQ_1_SI_X14(3*8); \
|
||||
VPINSRQ_1_SI_X15(7*8)
|
||||
|
||||
// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3)
|
||||
#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \
|
||||
VPSHUFD $0x4E, 0*8(SI), X12; \
|
||||
VMOVQ_SI_X13(11*8); \
|
||||
VMOVQ_SI_X14(12*8); \
|
||||
VMOVQ_SI_X15(7*8); \
|
||||
VPINSRQ_1_SI_X13(5*8); \
|
||||
VPINSRQ_1_SI_X14(2*8); \
|
||||
VPINSRQ_1_SI_X15(3*8)
|
||||
|
||||
// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13)
|
||||
#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \
|
||||
VMOVDQU 11*8(SI), X12; \
|
||||
VMOVQ_SI_X13(5*8); \
|
||||
VMOVQ_SI_X14(8*8); \
|
||||
VMOVQ_SI_X15(2*8); \
|
||||
VPINSRQ_1_SI_X13(15*8); \
|
||||
VPINSRQ_1_SI_X14_0; \
|
||||
VPINSRQ_1_SI_X15(13*8)
|
||||
|
||||
// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8)
|
||||
#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \
|
||||
VMOVQ_SI_X12(2*8); \
|
||||
VMOVQ_SI_X13(4*8); \
|
||||
VMOVQ_SI_X14(6*8); \
|
||||
VMOVQ_SI_X15_0; \
|
||||
VPINSRQ_1_SI_X12(5*8); \
|
||||
VPINSRQ_1_SI_X13(15*8); \
|
||||
VPINSRQ_1_SI_X14(10*8); \
|
||||
VPINSRQ_1_SI_X15(8*8)
|
||||
|
||||
// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15)
|
||||
#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \
|
||||
VMOVQ_SI_X12(9*8); \
|
||||
VMOVQ_SI_X13(2*8); \
|
||||
VMOVQ_SI_X14_0; \
|
||||
VMOVQ_SI_X15(4*8); \
|
||||
VPINSRQ_1_SI_X12(5*8); \
|
||||
VPINSRQ_1_SI_X13(10*8); \
|
||||
VPINSRQ_1_SI_X14(7*8); \
|
||||
VPINSRQ_1_SI_X15(15*8)
|
||||
|
||||
// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3)
|
||||
#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \
|
||||
VMOVQ_SI_X12(2*8); \
|
||||
VMOVQ_SI_X13_0; \
|
||||
VMOVQ_SI_X14(12*8); \
|
||||
VMOVQ_SI_X15(11*8); \
|
||||
VPINSRQ_1_SI_X12(6*8); \
|
||||
VPINSRQ_1_SI_X13(8*8); \
|
||||
VPINSRQ_1_SI_X14(10*8); \
|
||||
VPINSRQ_1_SI_X15(3*8)
|
||||
|
||||
// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11)
|
||||
#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \
|
||||
MOVQ 0*8(SI), X12; \
|
||||
VPSHUFD $0x4E, 8*8(SI), X13; \
|
||||
MOVQ 7*8(SI), X14; \
|
||||
MOVQ 2*8(SI), X15; \
|
||||
VPINSRQ_1_SI_X12(6*8); \
|
||||
VPINSRQ_1_SI_X14(3*8); \
|
||||
VPINSRQ_1_SI_X15(11*8)
|
||||
|
||||
// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8)
|
||||
#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \
|
||||
MOVQ 6*8(SI), X12; \
|
||||
MOVQ 11*8(SI), X13; \
|
||||
MOVQ 15*8(SI), X14; \
|
||||
MOVQ 3*8(SI), X15; \
|
||||
VPINSRQ_1_SI_X12(14*8); \
|
||||
VPINSRQ_1_SI_X13_0; \
|
||||
VPINSRQ_1_SI_X14(9*8); \
|
||||
VPINSRQ_1_SI_X15(8*8)
|
||||
|
||||
// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10)
|
||||
#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \
|
||||
MOVQ 5*8(SI), X12; \
|
||||
MOVQ 8*8(SI), X13; \
|
||||
MOVQ 0*8(SI), X14; \
|
||||
MOVQ 6*8(SI), X15; \
|
||||
VPINSRQ_1_SI_X12(15*8); \
|
||||
VPINSRQ_1_SI_X13(2*8); \
|
||||
VPINSRQ_1_SI_X14(4*8); \
|
||||
VPINSRQ_1_SI_X15(10*8)
|
||||
|
||||
// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5)
|
||||
#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \
|
||||
VMOVDQU 12*8(SI), X12; \
|
||||
MOVQ 1*8(SI), X13; \
|
||||
MOVQ 2*8(SI), X14; \
|
||||
VPINSRQ_1_SI_X13(10*8); \
|
||||
VPINSRQ_1_SI_X14(7*8); \
|
||||
VMOVDQU 4*8(SI), X15
|
||||
|
||||
// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0)
|
||||
#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \
|
||||
MOVQ 15*8(SI), X12; \
|
||||
MOVQ 3*8(SI), X13; \
|
||||
MOVQ 11*8(SI), X14; \
|
||||
MOVQ 12*8(SI), X15; \
|
||||
VPINSRQ_1_SI_X12(9*8); \
|
||||
VPINSRQ_1_SI_X13(13*8); \
|
||||
VPINSRQ_1_SI_X14(14*8); \
|
||||
VPINSRQ_1_SI_X15_0
|
||||
|
||||
// func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
|
||||
TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment
|
||||
MOVQ h+0(FP), AX
|
||||
MOVQ c+8(FP), BX
|
||||
MOVQ flag+16(FP), CX
|
||||
MOVQ blocks_base+24(FP), SI
|
||||
MOVQ blocks_len+32(FP), DI
|
||||
|
||||
MOVQ SP, BP
|
||||
MOVQ SP, R9
|
||||
ADDQ $15, R9
|
||||
ANDQ $~15, R9
|
||||
MOVQ R9, SP
|
||||
|
||||
VMOVDQU ·AVX_c40<>(SB), X0
|
||||
VMOVDQU ·AVX_c48<>(SB), X1
|
||||
VMOVDQA X0, X8
|
||||
VMOVDQA X1, X9
|
||||
|
||||
VMOVDQU ·AVX_iv3<>(SB), X0
|
||||
VMOVDQA X0, 0(SP)
|
||||
XORQ CX, 0(SP) // 0(SP) = ·AVX_iv3 ^ (CX || 0)
|
||||
|
||||
VMOVDQU 0(AX), X10
|
||||
VMOVDQU 16(AX), X11
|
||||
VMOVDQU 32(AX), X2
|
||||
VMOVDQU 48(AX), X3
|
||||
|
||||
MOVQ 0(BX), R8
|
||||
MOVQ 8(BX), R9
|
||||
|
||||
loop:
|
||||
ADDQ $128, R8
|
||||
CMPQ R8, $128
|
||||
JGE noinc
|
||||
INCQ R9
|
||||
|
||||
noinc:
|
||||
VMOVQ_R8_X15
|
||||
VPINSRQ_1_R9_X15
|
||||
|
||||
VMOVDQA X10, X0
|
||||
VMOVDQA X11, X1
|
||||
VMOVDQU ·AVX_iv0<>(SB), X4
|
||||
VMOVDQU ·AVX_iv1<>(SB), X5
|
||||
VMOVDQU ·AVX_iv2<>(SB), X6
|
||||
|
||||
VPXOR X15, X6, X6
|
||||
VMOVDQA 0(SP), X7
|
||||
|
||||
LOAD_MSG_AVX_0_2_4_6_1_3_5_7()
|
||||
VMOVDQA X12, 16(SP)
|
||||
VMOVDQA X13, 32(SP)
|
||||
VMOVDQA X14, 48(SP)
|
||||
VMOVDQA X15, 64(SP)
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX()
|
||||
LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15)
|
||||
VMOVDQA X12, 80(SP)
|
||||
VMOVDQA X13, 96(SP)
|
||||
VMOVDQA X14, 112(SP)
|
||||
VMOVDQA X15, 128(SP)
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX_INV()
|
||||
|
||||
LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6)
|
||||
VMOVDQA X12, 144(SP)
|
||||
VMOVDQA X13, 160(SP)
|
||||
VMOVDQA X14, 176(SP)
|
||||
VMOVDQA X15, 192(SP)
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX()
|
||||
LOAD_MSG_AVX_1_0_11_5_12_2_7_3()
|
||||
VMOVDQA X12, 208(SP)
|
||||
VMOVDQA X13, 224(SP)
|
||||
VMOVDQA X14, 240(SP)
|
||||
VMOVDQA X15, 256(SP)
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX_INV()
|
||||
|
||||
LOAD_MSG_AVX_11_12_5_15_8_0_2_13()
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX()
|
||||
LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4)
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX_INV()
|
||||
|
||||
LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14)
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX()
|
||||
LOAD_MSG_AVX_2_5_4_15_6_10_0_8()
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX_INV()
|
||||
|
||||
LOAD_MSG_AVX_9_5_2_10_0_7_4_15()
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX()
|
||||
LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13)
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX_INV()
|
||||
|
||||
LOAD_MSG_AVX_2_6_0_8_12_10_11_3()
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX()
|
||||
LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9)
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX_INV()
|
||||
|
||||
LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10)
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX()
|
||||
LOAD_MSG_AVX_0_6_9_8_7_3_2_11()
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX_INV()
|
||||
|
||||
LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9)
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX()
|
||||
LOAD_MSG_AVX_5_15_8_2_0_4_6_10()
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX_INV()
|
||||
|
||||
LOAD_MSG_AVX_6_14_11_0_15_9_3_8()
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX()
|
||||
LOAD_MSG_AVX_12_13_1_10_2_7_4_5()
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX_INV()
|
||||
|
||||
LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5)
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX()
|
||||
LOAD_MSG_AVX_15_9_3_13_11_14_12_0()
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
|
||||
SHUFFLE_AVX_INV()
|
||||
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X15, X8, X9)
|
||||
SHUFFLE_AVX()
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X15, X8, X9)
|
||||
SHUFFLE_AVX_INV()
|
||||
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X15, X8, X9)
|
||||
SHUFFLE_AVX()
|
||||
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X15, X8, X9)
|
||||
SHUFFLE_AVX_INV()
|
||||
|
||||
VMOVDQU 32(AX), X14
|
||||
VMOVDQU 48(AX), X15
|
||||
VPXOR X0, X10, X10
|
||||
VPXOR X1, X11, X11
|
||||
VPXOR X2, X14, X14
|
||||
VPXOR X3, X15, X15
|
||||
VPXOR X4, X10, X10
|
||||
VPXOR X5, X11, X11
|
||||
VPXOR X6, X14, X2
|
||||
VPXOR X7, X15, X3
|
||||
VMOVDQU X2, 32(AX)
|
||||
VMOVDQU X3, 48(AX)
|
||||
|
||||
LEAQ 128(SI), SI
|
||||
SUBQ $128, DI
|
||||
JNE loop
|
||||
|
||||
VMOVDQU X10, 0(AX)
|
||||
VMOVDQU X11, 16(AX)
|
||||
|
||||
MOVQ R8, 0(BX)
|
||||
MOVQ R9, 8(BX)
|
||||
VZEROUPPER
|
||||
|
||||
MOVQ BP, SP
|
||||
RET
|
||||
|
||||
// func supportsAVX2() bool
|
||||
TEXT ·supportsAVX2(SB), 4, $0-1
|
||||
MOVQ runtime·support_avx2(SB), AX
|
||||
MOVB AX, ret+0(FP)
|
||||
RET
|
||||
|
||||
// func supportsAVX() bool
|
||||
TEXT ·supportsAVX(SB), 4, $0-1
|
||||
MOVQ runtime·support_avx(SB), AX
|
||||
MOVB AX, ret+0(FP)
|
||||
RET
|
||||
25
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go
generated
vendored
Normal file
25
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.7,amd64,!gccgo,!appengine
|
||||
|
||||
package blake2b
|
||||
|
||||
func init() {
|
||||
useSSE4 = supportsSSE4()
|
||||
}
|
||||
|
||||
//go:noescape
|
||||
func supportsSSE4() bool
|
||||
|
||||
//go:noescape
|
||||
func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
|
||||
|
||||
func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) {
|
||||
if useSSE4 {
|
||||
hashBlocksSSE4(h, c, flag, blocks)
|
||||
} else {
|
||||
hashBlocksGeneric(h, c, flag, blocks)
|
||||
}
|
||||
}
|
||||
290
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s
generated
vendored
Normal file
290
vendor/github.com/genuinetools/amicontained/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s
generated
vendored
Normal file
@@ -0,0 +1,290 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
|
||||
DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
|
||||
GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16
|
||||
|
||||
DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b
|
||||
DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1
|
||||
GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16
|
||||
|
||||
DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1
|
||||
DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
|
||||
GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16
|
||||
|
||||
DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b
|
||||
DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179
|
||||
GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16
|
||||
|
||||
DATA ·c40<>+0x00(SB)/8, $0x0201000706050403
|
||||
DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
|
||||
GLOBL ·c40<>(SB), (NOPTR+RODATA), $16
|
||||
|
||||
DATA ·c48<>+0x00(SB)/8, $0x0100070605040302
|
||||
DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
|
||||
GLOBL ·c48<>(SB), (NOPTR+RODATA), $16
|
||||
|
||||
#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \
|
||||
MOVO v4, t1; \
|
||||
MOVO v5, v4; \
|
||||
MOVO t1, v5; \
|
||||
MOVO v6, t1; \
|
||||
PUNPCKLQDQ v6, t2; \
|
||||
PUNPCKHQDQ v7, v6; \
|
||||
PUNPCKHQDQ t2, v6; \
|
||||
PUNPCKLQDQ v7, t2; \
|
||||
MOVO t1, v7; \
|
||||
MOVO v2, t1; \
|
||||
PUNPCKHQDQ t2, v7; \
|
||||
PUNPCKLQDQ v3, t2; \
|
||||
PUNPCKHQDQ t2, v2; \
|
||||
PUNPCKLQDQ t1, t2; \
|
||||
PUNPCKHQDQ t2, v3
|
||||
|
||||
#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \
|
||||
MOVO v4, t1; \
|
||||
MOVO v5, v4; \
|
||||
MOVO t1, v5; \
|
||||
MOVO v2, t1; \
|
||||
PUNPCKLQDQ v2, t2; \
|
||||
PUNPCKHQDQ v3, v2; \
|
||||
PUNPCKHQDQ t2, v2; \
|
||||
PUNPCKLQDQ v3, t2; \
|
||||
MOVO t1, v3; \
|
||||
MOVO v6, t1; \
|
||||
PUNPCKHQDQ t2, v3; \
|
||||
PUNPCKLQDQ v7, t2; \
|
||||
PUNPCKHQDQ t2, v6; \
|
||||
PUNPCKLQDQ t1, t2; \
|
||||
PUNPCKHQDQ t2, v7
|
||||
|
||||
#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \
|
||||
PADDQ m0, v0; \
|
||||
PADDQ m1, v1; \
|
||||
PADDQ v2, v0; \
|
||||
PADDQ v3, v1; \
|
||||
PXOR v0, v6; \
|
||||
PXOR v1, v7; \
|
||||
PSHUFD $0xB1, v6, v6; \
|
||||
PSHUFD $0xB1, v7, v7; \
|
||||
PADDQ v6, v4; \
|
||||
PADDQ v7, v5; \
|
||||
PXOR v4, v2; \
|
||||
PXOR v5, v3; \
|
||||
PSHUFB c40, v2; \
|
||||
PSHUFB c40, v3; \
|
||||
PADDQ m2, v0; \
|
||||
PADDQ m3, v1; \
|
||||
PADDQ v2, v0; \
|
||||
PADDQ v3, v1; \
|
||||
PXOR v0, v6; \
|
||||
PXOR v1, v7; \
|
||||
PSHUFB c48, v6; \
|
||||
PSHUFB c48, v7; \
|
||||
PADDQ v6, v4; \
|
||||
PADDQ v7, v5; \
|
||||
PXOR v4, v2; \
|
||||
PXOR v5, v3; \
|
||||
MOVOU v2, t0; \
|
||||
PADDQ v2, t0; \
|
||||
PSRLQ $63, v2; \
|
||||
PXOR t0, v2; \
|
||||
MOVOU v3, t0; \
|
||||
PADDQ v3, t0; \
|
||||
PSRLQ $63, v3; \
|
||||
PXOR t0, v3
|
||||
|
||||
#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \
|
||||
MOVQ i0*8(src), m0; \
|
||||
PINSRQ $1, i1*8(src), m0; \
|
||||
MOVQ i2*8(src), m1; \
|
||||
PINSRQ $1, i3*8(src), m1; \
|
||||
MOVQ i4*8(src), m2; \
|
||||
PINSRQ $1, i5*8(src), m2; \
|
||||
MOVQ i6*8(src), m3; \
|
||||
PINSRQ $1, i7*8(src), m3
|
||||
|
||||
// func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
|
||||
TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment
|
||||
MOVQ h+0(FP), AX
|
||||
MOVQ c+8(FP), BX
|
||||
MOVQ flag+16(FP), CX
|
||||
MOVQ blocks_base+24(FP), SI
|
||||
MOVQ blocks_len+32(FP), DI
|
||||
|
||||
MOVQ SP, BP
|
||||
MOVQ SP, R9
|
||||
ADDQ $15, R9
|
||||
ANDQ $~15, R9
|
||||
MOVQ R9, SP
|
||||
|
||||
MOVOU ·iv3<>(SB), X0
|
||||
MOVO X0, 0(SP)
|
||||
XORQ CX, 0(SP) // 0(SP) = ·iv3 ^ (CX || 0)
|
||||
|
||||
MOVOU ·c40<>(SB), X13
|
||||
MOVOU ·c48<>(SB), X14
|
||||
|
||||
MOVOU 0(AX), X12
|
||||
MOVOU 16(AX), X15
|
||||
|
||||
MOVQ 0(BX), R8
|
||||
MOVQ 8(BX), R9
|
||||
|
||||
loop:
|
||||
ADDQ $128, R8
|
||||
CMPQ R8, $128
|
||||
JGE noinc
|
||||
INCQ R9
|
||||
|
||||
noinc:
|
||||
MOVQ R8, X8
|
||||
PINSRQ $1, R9, X8
|
||||
|
||||
MOVO X12, X0
|
||||
MOVO X15, X1
|
||||
MOVOU 32(AX), X2
|
||||
MOVOU 48(AX), X3
|
||||
MOVOU ·iv0<>(SB), X4
|
||||
MOVOU ·iv1<>(SB), X5
|
||||
MOVOU ·iv2<>(SB), X6
|
||||
|
||||
PXOR X8, X6
|
||||
MOVO 0(SP), X7
|
||||
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7)
|
||||
MOVO X8, 16(SP)
|
||||
MOVO X9, 32(SP)
|
||||
MOVO X10, 48(SP)
|
||||
MOVO X11, 64(SP)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15)
|
||||
MOVO X8, 80(SP)
|
||||
MOVO X9, 96(SP)
|
||||
MOVO X10, 112(SP)
|
||||
MOVO X11, 128(SP)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6)
|
||||
MOVO X8, 144(SP)
|
||||
MOVO X9, 160(SP)
|
||||
MOVO X10, 176(SP)
|
||||
MOVO X11, 192(SP)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3)
|
||||
MOVO X8, 208(SP)
|
||||
MOVO X9, 224(SP)
|
||||
MOVO X10, 240(SP)
|
||||
MOVO X11, 256(SP)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
|
||||
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X11, X13, X14)
|
||||
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X11, X13, X14)
|
||||
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X11, X13, X14)
|
||||
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X11, X13, X14)
|
||||
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
|
||||
|
||||
MOVOU 32(AX), X10
|
||||
MOVOU 48(AX), X11
|
||||
PXOR X0, X12
|
||||
PXOR X1, X15
|
||||
PXOR X2, X10
|
||||
PXOR X3, X11
|
||||
PXOR X4, X12
|
||||
PXOR X5, X15
|
||||
PXOR X6, X10
|
||||
PXOR X7, X11
|
||||
MOVOU X10, 32(AX)
|
||||
MOVOU X11, 48(AX)
|
||||
|
||||
LEAQ 128(SI), SI
|
||||
SUBQ $128, DI
|
||||
JNE loop
|
||||
|
||||
MOVOU X12, 0(AX)
|
||||
MOVOU X15, 16(AX)
|
||||
|
||||
MOVQ R8, 0(BX)
|
||||
MOVQ R9, 8(BX)
|
||||
|
||||
MOVQ BP, SP
|
||||
RET
|
||||
|
||||
// func supportsSSE4() bool
|
||||
TEXT ·supportsSSE4(SB), 4, $0-1
|
||||
MOVL $1, AX
|
||||
CPUID
|
||||
SHRL $19, CX // Bit 19 indicates SSE4 support
|
||||
ANDL $1, CX // CX != 0 if support SSE4
|
||||
MOVB CX, ret+0(FP)
|
||||
RET
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user