mirror of
https://github.com/TECHNOFAB11/zfs-localpv.git
synced 2025-12-11 22:10:11 +01:00
test(zfspv): vendor for ginkgo test code
Signed-off-by: Pawan <pawan@mayadata.io>
This commit is contained in:
parent
d933b47c75
commit
c3c5eb1794
305 changed files with 194450 additions and 1812 deletions
175
Gopkg.lock
generated
175
Gopkg.lock
generated
|
|
@ -1,6 +1,14 @@
|
||||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||||
|
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:26ee1e365ea8f312ee11e170fc6675bac0dd3d4adf2406e753d0a43527e1afb8"
|
||||||
|
name = "cloud.google.com/go"
|
||||||
|
packages = ["compute/metadata"]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "d1ee711ee996fa74abaffbdb572963f368f215a9"
|
||||||
|
version = "v0.49.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:04457f9f6f3ffc5fea48e71d62f2ca256637dee0a04d710288e27e05c8b41976"
|
digest = "1:04457f9f6f3ffc5fea48e71d62f2ca256637dee0a04d710288e27e05c8b41976"
|
||||||
name = "github.com/Sirupsen/logrus"
|
name = "github.com/Sirupsen/logrus"
|
||||||
|
|
@ -25,6 +33,17 @@
|
||||||
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
|
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
|
||||||
version = "v1.1.1"
|
version = "v1.1.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:ecdc8e0fe3bc7d549af1c9c36acf3820523b707d6c071b6d0c3860882c6f7b42"
|
||||||
|
name = "github.com/docker/spdystream"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"spdy",
|
||||||
|
]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "6480d4af844c189cf5dd913db24ddd339d3a4f85"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:ac425d784b13d49b37a5bbed3ce022677f8f3073b216f05d6adcb9303e27fa0f"
|
digest = "1:ac425d784b13d49b37a5bbed3ce022677f8f3073b216f05d6adcb9303e27fa0f"
|
||||||
name = "github.com/evanphx/json-patch"
|
name = "github.com/evanphx/json-patch"
|
||||||
|
|
@ -33,6 +52,14 @@
|
||||||
revision = "026c730a0dcc5d11f93f1cf1cc65b01247ea7b6f"
|
revision = "026c730a0dcc5d11f93f1cf1cc65b01247ea7b6f"
|
||||||
version = "v4.5.0"
|
version = "v4.5.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:2cd7915ab26ede7d95b8749e6b1f933f1c6d5398030684e6505940a10f31cfda"
|
||||||
|
name = "github.com/ghodss/yaml"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7"
|
||||||
|
version = "v1.0.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:8a85f428bc6ebfa87f53216b6e43b52b30eccbcffcbd6b057a69ee16718a2248"
|
digest = "1:8a85f428bc6ebfa87f53216b6e43b52b30eccbcffcbd6b057a69ee16718a2248"
|
||||||
name = "github.com/gogo/protobuf"
|
name = "github.com/gogo/protobuf"
|
||||||
|
|
@ -44,14 +71,6 @@
|
||||||
revision = "0ca988a254f991240804bf9821f3450d87ccbb1b"
|
revision = "0ca988a254f991240804bf9821f3450d87ccbb1b"
|
||||||
version = "v1.3.0"
|
version = "v1.3.0"
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
digest = "1:1ba1d79f2810270045c328ae5d674321db34e3aae468eb4233883b473c5c0467"
|
|
||||||
name = "github.com/golang/glog"
|
|
||||||
packages = ["."]
|
|
||||||
pruneopts = "UT"
|
|
||||||
revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
|
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8"
|
digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8"
|
||||||
|
|
@ -108,6 +127,20 @@
|
||||||
revision = "7f827b33c0f158ec5dfbba01bb0b14a4541fd81d"
|
revision = "7f827b33c0f158ec5dfbba01bb0b14a4541fd81d"
|
||||||
version = "v0.5.3"
|
version = "v0.5.3"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:a1038ef593beb4771c8f0f9c26e8b00410acd800af5c6864651d9bf160ea1813"
|
||||||
|
name = "github.com/hpcloud/tail"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"ratelimiter",
|
||||||
|
"util",
|
||||||
|
"watch",
|
||||||
|
"winfile",
|
||||||
|
]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "a30252cb686a21eb2d0b98132633053ec2f7f1e5"
|
||||||
|
version = "v1.0.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:a0cefd27d12712af4b5018dc7046f245e1e3b5760e2e848c30b171b570708f9b"
|
digest = "1:a0cefd27d12712af4b5018dc7046f245e1e3b5760e2e848c30b171b570708f9b"
|
||||||
name = "github.com/imdario/mergo"
|
name = "github.com/imdario/mergo"
|
||||||
|
|
@ -164,6 +197,54 @@
|
||||||
revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
|
revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
|
||||||
version = "1.0.1"
|
version = "1.0.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:51766ddcba65b7f0eece2906249d7603970959ba0f1011b72037485044339ece"
|
||||||
|
name = "github.com/onsi/ginkgo"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"config",
|
||||||
|
"internal/codelocation",
|
||||||
|
"internal/containernode",
|
||||||
|
"internal/failer",
|
||||||
|
"internal/leafnodes",
|
||||||
|
"internal/remote",
|
||||||
|
"internal/spec",
|
||||||
|
"internal/spec_iterator",
|
||||||
|
"internal/specrunner",
|
||||||
|
"internal/suite",
|
||||||
|
"internal/testingtproxy",
|
||||||
|
"internal/writer",
|
||||||
|
"reporters",
|
||||||
|
"reporters/stenographer",
|
||||||
|
"reporters/stenographer/support/go-colorable",
|
||||||
|
"reporters/stenographer/support/go-isatty",
|
||||||
|
"types",
|
||||||
|
]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "ce5d301e555bb672c693c099ba6ca5087b06c0b4"
|
||||||
|
version = "v1.10.3"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:7e57cd10c5424b2abf91f29354796a2468720396419585fef5a2d346c5a0f24d"
|
||||||
|
name = "github.com/onsi/gomega"
|
||||||
|
packages = [
|
||||||
|
".",
|
||||||
|
"format",
|
||||||
|
"internal/assertion",
|
||||||
|
"internal/asyncassertion",
|
||||||
|
"internal/oraclematcher",
|
||||||
|
"internal/testingtsupport",
|
||||||
|
"matchers",
|
||||||
|
"matchers/support/goraph/bipartitegraph",
|
||||||
|
"matchers/support/goraph/edge",
|
||||||
|
"matchers/support/goraph/node",
|
||||||
|
"matchers/support/goraph/util",
|
||||||
|
"types",
|
||||||
|
]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "f9a52764bd5a0fd2d201bcca584351d03b72f8da"
|
||||||
|
version = "v1.7.1"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:cf31692c14422fa27c83a05292eb5cbe0fb2775972e8f1f8446a71549bd8980b"
|
digest = "1:cf31692c14422fa27c83a05292eb5cbe0fb2775972e8f1f8446a71549bd8980b"
|
||||||
name = "github.com/pkg/errors"
|
name = "github.com/pkg/errors"
|
||||||
|
|
@ -198,11 +279,14 @@
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
digest = "1:e93fe09ca93cf16f8b2dc48053f56c2f91ed4f3fd16bfaf9596b6548c7b48a7f"
|
digest = "1:ff1e32f635eb98d4e9949af197106c5d326f50eae9f908f0ff8c873c6035bcef"
|
||||||
name = "golang.org/x/net"
|
name = "golang.org/x/net"
|
||||||
packages = [
|
packages = [
|
||||||
"context",
|
"context",
|
||||||
"context/ctxhttp",
|
"context/ctxhttp",
|
||||||
|
"html",
|
||||||
|
"html/atom",
|
||||||
|
"html/charset",
|
||||||
"http/httpguts",
|
"http/httpguts",
|
||||||
"http2",
|
"http2",
|
||||||
"http2/hpack",
|
"http2/hpack",
|
||||||
|
|
@ -215,11 +299,14 @@
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
digest = "1:8d1c112fb1679fa097e9a9255a786ee47383fa2549a3da71bcb1334a693ebcfe"
|
digest = "1:31e33f76456ccf54819ab4a646cf01271d1a99d7712ab84bf1a9e7b61cd2031b"
|
||||||
name = "golang.org/x/oauth2"
|
name = "golang.org/x/oauth2"
|
||||||
packages = [
|
packages = [
|
||||||
".",
|
".",
|
||||||
|
"google",
|
||||||
"internal",
|
"internal",
|
||||||
|
"jws",
|
||||||
|
"jwt",
|
||||||
]
|
]
|
||||||
pruneopts = "UT"
|
pruneopts = "UT"
|
||||||
revision = "0f29369cfe4552d0e4bcddc57cc75f4d7e672a33"
|
revision = "0f29369cfe4552d0e4bcddc57cc75f4d7e672a33"
|
||||||
|
|
@ -236,11 +323,21 @@
|
||||||
revision = "9109b7679e13aa34a54834cfb4949cac4b96e576"
|
revision = "9109b7679e13aa34a54834cfb4949cac4b96e576"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:8d8faad6b12a3a4c819a3f9618cb6ee1fa1cfc33253abeeea8b55336721e3405"
|
digest = "1:28deae5fe892797ff37a317b5bcda96d11d1c90dadd89f1337651df3bc4c586e"
|
||||||
name = "golang.org/x/text"
|
name = "golang.org/x/text"
|
||||||
packages = [
|
packages = [
|
||||||
"collate",
|
"collate",
|
||||||
"collate/build",
|
"collate/build",
|
||||||
|
"encoding",
|
||||||
|
"encoding/charmap",
|
||||||
|
"encoding/htmlindex",
|
||||||
|
"encoding/internal",
|
||||||
|
"encoding/internal/identifier",
|
||||||
|
"encoding/japanese",
|
||||||
|
"encoding/korean",
|
||||||
|
"encoding/simplifiedchinese",
|
||||||
|
"encoding/traditionalchinese",
|
||||||
|
"encoding/unicode",
|
||||||
"internal/colltab",
|
"internal/colltab",
|
||||||
"internal/gen",
|
"internal/gen",
|
||||||
"internal/language",
|
"internal/language",
|
||||||
|
|
@ -248,7 +345,9 @@
|
||||||
"internal/tag",
|
"internal/tag",
|
||||||
"internal/triegen",
|
"internal/triegen",
|
||||||
"internal/ucd",
|
"internal/ucd",
|
||||||
|
"internal/utf8internal",
|
||||||
"language",
|
"language",
|
||||||
|
"runes",
|
||||||
"secure/bidirule",
|
"secure/bidirule",
|
||||||
"transform",
|
"transform",
|
||||||
"unicode/bidi",
|
"unicode/bidi",
|
||||||
|
|
@ -290,13 +389,16 @@
|
||||||
revision = "afe7f8212f0d48598f7ba258eba2127cbfb7c3e9"
|
revision = "afe7f8212f0d48598f7ba258eba2127cbfb7c3e9"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:6eb6e3b6d9fffb62958cf7f7d88dbbe1dd6839436b0802e194c590667a40412a"
|
digest = "1:498b722d33dde4471e7d6e5d88a5e7132d2a8306fea5ff5ee82d1f418b4f41ed"
|
||||||
name = "google.golang.org/appengine"
|
name = "google.golang.org/appengine"
|
||||||
packages = [
|
packages = [
|
||||||
|
".",
|
||||||
"internal",
|
"internal",
|
||||||
|
"internal/app_identity",
|
||||||
"internal/base",
|
"internal/base",
|
||||||
"internal/datastore",
|
"internal/datastore",
|
||||||
"internal/log",
|
"internal/log",
|
||||||
|
"internal/modules",
|
||||||
"internal/remote_api",
|
"internal/remote_api",
|
||||||
"internal/urlfetch",
|
"internal/urlfetch",
|
||||||
"urlfetch",
|
"urlfetch",
|
||||||
|
|
@ -355,6 +457,15 @@
|
||||||
revision = "6eaf6f47437a6b4e2153a190160ef39a92c7eceb"
|
revision = "6eaf6f47437a6b4e2153a190160ef39a92c7eceb"
|
||||||
version = "v1.23.0"
|
version = "v1.23.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:abeb38ade3f32a92943e5be54f55ed6d6e3b6602761d74b4aab4c9dd45c18abd"
|
||||||
|
name = "gopkg.in/fsnotify.v1"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
|
||||||
|
source = "https://github.com/fsnotify/fsnotify/archive/v1.4.7.tar.gz"
|
||||||
|
version = "v1.4.7"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a"
|
digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a"
|
||||||
name = "gopkg.in/inf.v0"
|
name = "gopkg.in/inf.v0"
|
||||||
|
|
@ -363,6 +474,14 @@
|
||||||
revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf"
|
revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf"
|
||||||
version = "v0.9.1"
|
version = "v0.9.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "v1"
|
||||||
|
digest = "1:0caa92e17bc0b65a98c63e5bc76a9e844cd5e56493f8fdbb28fad101a16254d9"
|
||||||
|
name = "gopkg.in/tomb.v1"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:4d2e5a73dc1500038e504a8d78b986630e3626dc027bc030ba5c75da257cdb96"
|
digest = "1:4d2e5a73dc1500038e504a8d78b986630e3626dc027bc030ba5c75da257cdb96"
|
||||||
name = "gopkg.in/yaml.v2"
|
name = "gopkg.in/yaml.v2"
|
||||||
|
|
@ -417,7 +536,7 @@
|
||||||
version = "kubernetes-1.14.0"
|
version = "kubernetes-1.14.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:05e778704c75489c318be7673685d848eb22b7e8ec47204c82a399b19b38bdcd"
|
digest = "1:beba29e406bec6b412c64627c3181b343784b8ecd4ce5e71d36808968b28dd84"
|
||||||
name = "k8s.io/apimachinery"
|
name = "k8s.io/apimachinery"
|
||||||
packages = [
|
packages = [
|
||||||
"pkg/api/errors",
|
"pkg/api/errors",
|
||||||
|
|
@ -446,12 +565,14 @@
|
||||||
"pkg/util/diff",
|
"pkg/util/diff",
|
||||||
"pkg/util/errors",
|
"pkg/util/errors",
|
||||||
"pkg/util/framer",
|
"pkg/util/framer",
|
||||||
|
"pkg/util/httpstream",
|
||||||
|
"pkg/util/httpstream/spdy",
|
||||||
"pkg/util/intstr",
|
"pkg/util/intstr",
|
||||||
"pkg/util/json",
|
"pkg/util/json",
|
||||||
"pkg/util/mergepatch",
|
"pkg/util/mergepatch",
|
||||||
"pkg/util/naming",
|
"pkg/util/naming",
|
||||||
"pkg/util/net",
|
"pkg/util/net",
|
||||||
"pkg/util/rand",
|
"pkg/util/remotecommand",
|
||||||
"pkg/util/runtime",
|
"pkg/util/runtime",
|
||||||
"pkg/util/sets",
|
"pkg/util/sets",
|
||||||
"pkg/util/strategicpatch",
|
"pkg/util/strategicpatch",
|
||||||
|
|
@ -462,6 +583,7 @@
|
||||||
"pkg/version",
|
"pkg/version",
|
||||||
"pkg/watch",
|
"pkg/watch",
|
||||||
"third_party/forked/golang/json",
|
"third_party/forked/golang/json",
|
||||||
|
"third_party/forked/golang/netutil",
|
||||||
"third_party/forked/golang/reflect",
|
"third_party/forked/golang/reflect",
|
||||||
]
|
]
|
||||||
pruneopts = "UT"
|
pruneopts = "UT"
|
||||||
|
|
@ -469,7 +591,7 @@
|
||||||
version = "kubernetes-1.14.0"
|
version = "kubernetes-1.14.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:551a9be727bf4521e11bd51ad6e972d7f937a5962e1fdcfe322f5686298aa3a9"
|
digest = "1:7f4cee860ccff5d172625b3a03fe72b6c3adafd35cf7812fe27d4a1cb7f856e8"
|
||||||
name = "k8s.io/client-go"
|
name = "k8s.io/client-go"
|
||||||
packages = [
|
packages = [
|
||||||
"discovery",
|
"discovery",
|
||||||
|
|
@ -601,9 +723,11 @@
|
||||||
"pkg/apis/clientauthentication/v1beta1",
|
"pkg/apis/clientauthentication/v1beta1",
|
||||||
"pkg/version",
|
"pkg/version",
|
||||||
"plugin/pkg/client/auth/exec",
|
"plugin/pkg/client/auth/exec",
|
||||||
|
"plugin/pkg/client/auth/gcp",
|
||||||
"rest",
|
"rest",
|
||||||
"rest/watch",
|
"rest/watch",
|
||||||
"testing",
|
"testing",
|
||||||
|
"third_party/forked/golang/template",
|
||||||
"tools/auth",
|
"tools/auth",
|
||||||
"tools/cache",
|
"tools/cache",
|
||||||
"tools/clientcmd",
|
"tools/clientcmd",
|
||||||
|
|
@ -615,11 +739,15 @@
|
||||||
"tools/record",
|
"tools/record",
|
||||||
"tools/record/util",
|
"tools/record/util",
|
||||||
"tools/reference",
|
"tools/reference",
|
||||||
|
"tools/remotecommand",
|
||||||
"transport",
|
"transport",
|
||||||
|
"transport/spdy",
|
||||||
"util/cert",
|
"util/cert",
|
||||||
"util/connrotation",
|
"util/connrotation",
|
||||||
|
"util/exec",
|
||||||
"util/flowcontrol",
|
"util/flowcontrol",
|
||||||
"util/homedir",
|
"util/homedir",
|
||||||
|
"util/jsonpath",
|
||||||
"util/keyutil",
|
"util/keyutil",
|
||||||
"util/retry",
|
"util/retry",
|
||||||
"util/workqueue",
|
"util/workqueue",
|
||||||
|
|
@ -691,12 +819,9 @@
|
||||||
revision = "743ec37842bffe49dd4221d9026f30fb1d5adbc4"
|
revision = "743ec37842bffe49dd4221d9026f30fb1d5adbc4"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:943dfbf554e6c66394ca3320f7e495237e16afd0ded7048da57d89d91a16c1e8"
|
digest = "1:955f1e20ed4c93fa1a1065670b224fbbe43bcf8ef732c63c47d3424ea438d89a"
|
||||||
name = "k8s.io/kubernetes"
|
name = "k8s.io/kubernetes"
|
||||||
packages = [
|
packages = ["pkg/util/mount"]
|
||||||
"pkg/util/mount",
|
|
||||||
"pkg/util/slice",
|
|
||||||
]
|
|
||||||
pruneopts = "UT"
|
pruneopts = "UT"
|
||||||
revision = "2d3c76f9091b6bec110a5e63777c332469e0cba2"
|
revision = "2d3c76f9091b6bec110a5e63777c332469e0cba2"
|
||||||
version = "v1.15.3"
|
version = "v1.15.3"
|
||||||
|
|
@ -742,16 +867,21 @@
|
||||||
input-imports = [
|
input-imports = [
|
||||||
"github.com/Sirupsen/logrus",
|
"github.com/Sirupsen/logrus",
|
||||||
"github.com/container-storage-interface/spec/lib/go/csi",
|
"github.com/container-storage-interface/spec/lib/go/csi",
|
||||||
"github.com/golang/glog",
|
"github.com/ghodss/yaml",
|
||||||
"github.com/kubernetes-csi/csi-lib-utils/protosanitizer",
|
"github.com/kubernetes-csi/csi-lib-utils/protosanitizer",
|
||||||
|
"github.com/onsi/ginkgo",
|
||||||
|
"github.com/onsi/gomega",
|
||||||
"github.com/pkg/errors",
|
"github.com/pkg/errors",
|
||||||
"github.com/spf13/cobra",
|
"github.com/spf13/cobra",
|
||||||
"golang.org/x/net/context",
|
"golang.org/x/net/context",
|
||||||
"google.golang.org/grpc",
|
"google.golang.org/grpc",
|
||||||
"google.golang.org/grpc/codes",
|
"google.golang.org/grpc/codes",
|
||||||
"google.golang.org/grpc/status",
|
"google.golang.org/grpc/status",
|
||||||
|
"k8s.io/api/apps/v1",
|
||||||
"k8s.io/api/core/v1",
|
"k8s.io/api/core/v1",
|
||||||
|
"k8s.io/api/storage/v1",
|
||||||
"k8s.io/apimachinery/pkg/api/errors",
|
"k8s.io/apimachinery/pkg/api/errors",
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource",
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1",
|
"k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||||
"k8s.io/apimachinery/pkg/labels",
|
"k8s.io/apimachinery/pkg/labels",
|
||||||
"k8s.io/apimachinery/pkg/runtime",
|
"k8s.io/apimachinery/pkg/runtime",
|
||||||
|
|
@ -768,11 +898,13 @@
|
||||||
"k8s.io/client-go/kubernetes",
|
"k8s.io/client-go/kubernetes",
|
||||||
"k8s.io/client-go/kubernetes/scheme",
|
"k8s.io/client-go/kubernetes/scheme",
|
||||||
"k8s.io/client-go/kubernetes/typed/core/v1",
|
"k8s.io/client-go/kubernetes/typed/core/v1",
|
||||||
|
"k8s.io/client-go/plugin/pkg/client/auth/gcp",
|
||||||
"k8s.io/client-go/rest",
|
"k8s.io/client-go/rest",
|
||||||
"k8s.io/client-go/testing",
|
"k8s.io/client-go/testing",
|
||||||
"k8s.io/client-go/tools/cache",
|
"k8s.io/client-go/tools/cache",
|
||||||
"k8s.io/client-go/tools/clientcmd",
|
"k8s.io/client-go/tools/clientcmd",
|
||||||
"k8s.io/client-go/tools/record",
|
"k8s.io/client-go/tools/record",
|
||||||
|
"k8s.io/client-go/tools/remotecommand",
|
||||||
"k8s.io/client-go/util/flowcontrol",
|
"k8s.io/client-go/util/flowcontrol",
|
||||||
"k8s.io/client-go/util/workqueue",
|
"k8s.io/client-go/util/workqueue",
|
||||||
"k8s.io/code-generator/cmd/client-gen",
|
"k8s.io/code-generator/cmd/client-gen",
|
||||||
|
|
@ -781,7 +913,6 @@
|
||||||
"k8s.io/code-generator/cmd/informer-gen",
|
"k8s.io/code-generator/cmd/informer-gen",
|
||||||
"k8s.io/code-generator/cmd/lister-gen",
|
"k8s.io/code-generator/cmd/lister-gen",
|
||||||
"k8s.io/kubernetes/pkg/util/mount",
|
"k8s.io/kubernetes/pkg/util/mount",
|
||||||
"k8s.io/kubernetes/pkg/util/slice",
|
|
||||||
"sigs.k8s.io/controller-runtime/pkg/runtime/signals",
|
"sigs.k8s.io/controller-runtime/pkg/runtime/signals",
|
||||||
]
|
]
|
||||||
solver-name = "gps-cdcl"
|
solver-name = "gps-cdcl"
|
||||||
|
|
|
||||||
202
vendor/cloud.google.com/go/LICENSE
generated
vendored
Normal file
202
vendor/cloud.google.com/go/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,202 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
12
vendor/cloud.google.com/go/compute/metadata/.repo-metadata.json
generated
vendored
Normal file
12
vendor/cloud.google.com/go/compute/metadata/.repo-metadata.json
generated
vendored
Normal file
|
|
@ -0,0 +1,12 @@
|
||||||
|
{
|
||||||
|
"name": "metadata",
|
||||||
|
"name_pretty": "Google Compute Engine Metadata API",
|
||||||
|
"product_documentation": "https://cloud.google.com/compute/docs/storing-retrieving-metadata",
|
||||||
|
"client_documentation": "https://godoc.org/cloud.google.com/go/compute/metadata",
|
||||||
|
"release_level": "ga",
|
||||||
|
"language": "go",
|
||||||
|
"repo": "googleapis/google-cloud-go",
|
||||||
|
"distribution_name": "cloud.google.com/go/compute/metadata",
|
||||||
|
"api_id": "compute:metadata",
|
||||||
|
"requires_billing": false
|
||||||
|
}
|
||||||
526
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
Normal file
526
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
Normal file
|
|
@ -0,0 +1,526 @@
|
||||||
|
// Copyright 2014 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package metadata provides access to Google Compute Engine (GCE)
|
||||||
|
// metadata and API service accounts.
|
||||||
|
//
|
||||||
|
// This package is a wrapper around the GCE metadata service,
|
||||||
|
// as documented at https://developers.google.com/compute/docs/metadata.
|
||||||
|
package metadata // import "cloud.google.com/go/compute/metadata"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// metadataIP is the documented metadata server IP address.
|
||||||
|
metadataIP = "169.254.169.254"
|
||||||
|
|
||||||
|
// metadataHostEnv is the environment variable specifying the
|
||||||
|
// GCE metadata hostname. If empty, the default value of
|
||||||
|
// metadataIP ("169.254.169.254") is used instead.
|
||||||
|
// This is variable name is not defined by any spec, as far as
|
||||||
|
// I know; it was made up for the Go package.
|
||||||
|
metadataHostEnv = "GCE_METADATA_HOST"
|
||||||
|
|
||||||
|
userAgent = "gcloud-golang/0.1"
|
||||||
|
)
|
||||||
|
|
||||||
|
type cachedValue struct {
|
||||||
|
k string
|
||||||
|
trim bool
|
||||||
|
mu sync.Mutex
|
||||||
|
v string
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
projID = &cachedValue{k: "project/project-id", trim: true}
|
||||||
|
projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
|
||||||
|
instID = &cachedValue{k: "instance/id", trim: true}
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
defaultClient = &Client{hc: &http.Client{
|
||||||
|
Transport: &http.Transport{
|
||||||
|
Dial: (&net.Dialer{
|
||||||
|
Timeout: 2 * time.Second,
|
||||||
|
KeepAlive: 30 * time.Second,
|
||||||
|
}).Dial,
|
||||||
|
ResponseHeaderTimeout: 2 * time.Second,
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
subscribeClient = &Client{hc: &http.Client{
|
||||||
|
Transport: &http.Transport{
|
||||||
|
Dial: (&net.Dialer{
|
||||||
|
Timeout: 2 * time.Second,
|
||||||
|
KeepAlive: 30 * time.Second,
|
||||||
|
}).Dial,
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
)
|
||||||
|
|
||||||
|
// NotDefinedError is returned when requested metadata is not defined.
|
||||||
|
//
|
||||||
|
// The underlying string is the suffix after "/computeMetadata/v1/".
|
||||||
|
//
|
||||||
|
// This error is not returned if the value is defined to be the empty
|
||||||
|
// string.
|
||||||
|
type NotDefinedError string
|
||||||
|
|
||||||
|
func (suffix NotDefinedError) Error() string {
|
||||||
|
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cachedValue) get(cl *Client) (v string, err error) {
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
c.mu.Lock()
|
||||||
|
if c.v != "" {
|
||||||
|
return c.v, nil
|
||||||
|
}
|
||||||
|
if c.trim {
|
||||||
|
v, err = cl.getTrimmed(c.k)
|
||||||
|
} else {
|
||||||
|
v, err = cl.Get(c.k)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
c.v = v
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
onGCEOnce sync.Once
|
||||||
|
onGCE bool
|
||||||
|
)
|
||||||
|
|
||||||
|
// OnGCE reports whether this process is running on Google Compute Engine.
|
||||||
|
func OnGCE() bool {
|
||||||
|
onGCEOnce.Do(initOnGCE)
|
||||||
|
return onGCE
|
||||||
|
}
|
||||||
|
|
||||||
|
func initOnGCE() {
|
||||||
|
onGCE = testOnGCE()
|
||||||
|
}
|
||||||
|
|
||||||
|
func testOnGCE() bool {
|
||||||
|
// The user explicitly said they're on GCE, so trust them.
|
||||||
|
if os.Getenv(metadataHostEnv) != "" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
resc := make(chan bool, 2)
|
||||||
|
|
||||||
|
// Try two strategies in parallel.
|
||||||
|
// See https://github.com/googleapis/google-cloud-go/issues/194
|
||||||
|
go func() {
|
||||||
|
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
|
||||||
|
req.Header.Set("User-Agent", userAgent)
|
||||||
|
res, err := defaultClient.hc.Do(req.WithContext(ctx))
|
||||||
|
if err != nil {
|
||||||
|
resc <- false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
resc <- res.Header.Get("Metadata-Flavor") == "Google"
|
||||||
|
}()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
addrs, err := net.LookupHost("metadata.google.internal")
|
||||||
|
if err != nil || len(addrs) == 0 {
|
||||||
|
resc <- false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
resc <- strsContains(addrs, metadataIP)
|
||||||
|
}()
|
||||||
|
|
||||||
|
tryHarder := systemInfoSuggestsGCE()
|
||||||
|
if tryHarder {
|
||||||
|
res := <-resc
|
||||||
|
if res {
|
||||||
|
// The first strategy succeeded, so let's use it.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// Wait for either the DNS or metadata server probe to
|
||||||
|
// contradict the other one and say we are running on
|
||||||
|
// GCE. Give it a lot of time to do so, since the system
|
||||||
|
// info already suggests we're running on a GCE BIOS.
|
||||||
|
timer := time.NewTimer(5 * time.Second)
|
||||||
|
defer timer.Stop()
|
||||||
|
select {
|
||||||
|
case res = <-resc:
|
||||||
|
return res
|
||||||
|
case <-timer.C:
|
||||||
|
// Too slow. Who knows what this system is.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// There's no hint from the system info that we're running on
|
||||||
|
// GCE, so use the first probe's result as truth, whether it's
|
||||||
|
// true or false. The goal here is to optimize for speed for
|
||||||
|
// users who are NOT running on GCE. We can't assume that
|
||||||
|
// either a DNS lookup or an HTTP request to a blackholed IP
|
||||||
|
// address is fast. Worst case this should return when the
|
||||||
|
// metaClient's Transport.ResponseHeaderTimeout or
|
||||||
|
// Transport.Dial.Timeout fires (in two seconds).
|
||||||
|
return <-resc
|
||||||
|
}
|
||||||
|
|
||||||
|
// systemInfoSuggestsGCE reports whether the local system (without
|
||||||
|
// doing network requests) suggests that we're running on GCE. If this
|
||||||
|
// returns true, testOnGCE tries a bit harder to reach its metadata
|
||||||
|
// server.
|
||||||
|
func systemInfoSuggestsGCE() bool {
|
||||||
|
if runtime.GOOS != "linux" {
|
||||||
|
// We don't have any non-Linux clues available, at least yet.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
|
||||||
|
name := strings.TrimSpace(string(slurp))
|
||||||
|
return name == "Google" || name == "Google Compute Engine"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no
|
||||||
|
// ResponseHeaderTimeout).
|
||||||
|
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
||||||
|
return subscribeClient.Subscribe(suffix, fn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get calls Client.Get on the default client.
|
||||||
|
func Get(suffix string) (string, error) { return defaultClient.Get(suffix) }
|
||||||
|
|
||||||
|
// ProjectID returns the current instance's project ID string.
|
||||||
|
func ProjectID() (string, error) { return defaultClient.ProjectID() }
|
||||||
|
|
||||||
|
// NumericProjectID returns the current instance's numeric project ID.
|
||||||
|
func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() }
|
||||||
|
|
||||||
|
// InternalIP returns the instance's primary internal IP address.
|
||||||
|
func InternalIP() (string, error) { return defaultClient.InternalIP() }
|
||||||
|
|
||||||
|
// ExternalIP returns the instance's primary external (public) IP address.
|
||||||
|
func ExternalIP() (string, error) { return defaultClient.ExternalIP() }
|
||||||
|
|
||||||
|
// Email calls Client.Email on the default client.
|
||||||
|
func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) }
|
||||||
|
|
||||||
|
// Hostname returns the instance's hostname. This will be of the form
|
||||||
|
// "<instanceID>.c.<projID>.internal".
|
||||||
|
func Hostname() (string, error) { return defaultClient.Hostname() }
|
||||||
|
|
||||||
|
// InstanceTags returns the list of user-defined instance tags,
|
||||||
|
// assigned when initially creating a GCE instance.
|
||||||
|
func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() }
|
||||||
|
|
||||||
|
// InstanceID returns the current VM's numeric instance ID.
|
||||||
|
func InstanceID() (string, error) { return defaultClient.InstanceID() }
|
||||||
|
|
||||||
|
// InstanceName returns the current VM's instance ID string.
|
||||||
|
func InstanceName() (string, error) { return defaultClient.InstanceName() }
|
||||||
|
|
||||||
|
// Zone returns the current VM's zone, such as "us-central1-b".
|
||||||
|
func Zone() (string, error) { return defaultClient.Zone() }
|
||||||
|
|
||||||
|
// InstanceAttributes calls Client.InstanceAttributes on the default client.
|
||||||
|
func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() }
|
||||||
|
|
||||||
|
// ProjectAttributes calls Client.ProjectAttributes on the default client.
|
||||||
|
func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() }
|
||||||
|
|
||||||
|
// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client.
|
||||||
|
func InstanceAttributeValue(attr string) (string, error) {
|
||||||
|
return defaultClient.InstanceAttributeValue(attr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client.
|
||||||
|
func ProjectAttributeValue(attr string) (string, error) {
|
||||||
|
return defaultClient.ProjectAttributeValue(attr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scopes calls Client.Scopes on the default client.
|
||||||
|
func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) }
|
||||||
|
|
||||||
|
func strsContains(ss []string, s string) bool {
|
||||||
|
for _, v := range ss {
|
||||||
|
if v == s {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Client provides metadata.
|
||||||
|
type Client struct {
|
||||||
|
hc *http.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClient returns a Client that can be used to fetch metadata. All HTTP requests
|
||||||
|
// will use the given http.Client instead of the default client.
|
||||||
|
func NewClient(c *http.Client) *Client {
|
||||||
|
return &Client{hc: c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getETag returns a value from the metadata service as well as the associated ETag.
|
||||||
|
// This func is otherwise equivalent to Get.
|
||||||
|
func (c *Client) getETag(suffix string) (value, etag string, err error) {
|
||||||
|
// Using a fixed IP makes it very difficult to spoof the metadata service in
|
||||||
|
// a container, which is an important use-case for local testing of cloud
|
||||||
|
// deployments. To enable spoofing of the metadata service, the environment
|
||||||
|
// variable GCE_METADATA_HOST is first inspected to decide where metadata
|
||||||
|
// requests shall go.
|
||||||
|
host := os.Getenv(metadataHostEnv)
|
||||||
|
if host == "" {
|
||||||
|
// Using 169.254.169.254 instead of "metadata" here because Go
|
||||||
|
// binaries built with the "netgo" tag and without cgo won't
|
||||||
|
// know the search suffix for "metadata" is
|
||||||
|
// ".google.internal", and this IP address is documented as
|
||||||
|
// being stable anyway.
|
||||||
|
host = metadataIP
|
||||||
|
}
|
||||||
|
u := "http://" + host + "/computeMetadata/v1/" + suffix
|
||||||
|
req, _ := http.NewRequest("GET", u, nil)
|
||||||
|
req.Header.Set("Metadata-Flavor", "Google")
|
||||||
|
req.Header.Set("User-Agent", userAgent)
|
||||||
|
res, err := c.hc.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
if res.StatusCode == http.StatusNotFound {
|
||||||
|
return "", "", NotDefinedError(suffix)
|
||||||
|
}
|
||||||
|
all, err := ioutil.ReadAll(res.Body)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
if res.StatusCode != 200 {
|
||||||
|
return "", "", &Error{Code: res.StatusCode, Message: string(all)}
|
||||||
|
}
|
||||||
|
return string(all), res.Header.Get("Etag"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns a value from the metadata service.
|
||||||
|
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
||||||
|
//
|
||||||
|
// If the GCE_METADATA_HOST environment variable is not defined, a default of
|
||||||
|
// 169.254.169.254 will be used instead.
|
||||||
|
//
|
||||||
|
// If the requested metadata is not defined, the returned error will
|
||||||
|
// be of type NotDefinedError.
|
||||||
|
func (c *Client) Get(suffix string) (string, error) {
|
||||||
|
val, _, err := c.getETag(suffix)
|
||||||
|
return val, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) getTrimmed(suffix string) (s string, err error) {
|
||||||
|
s, err = c.Get(suffix)
|
||||||
|
s = strings.TrimSpace(s)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) lines(suffix string) ([]string, error) {
|
||||||
|
j, err := c.Get(suffix)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
s := strings.Split(strings.TrimSpace(j), "\n")
|
||||||
|
for i := range s {
|
||||||
|
s[i] = strings.TrimSpace(s[i])
|
||||||
|
}
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProjectID returns the current instance's project ID string.
|
||||||
|
func (c *Client) ProjectID() (string, error) { return projID.get(c) }
|
||||||
|
|
||||||
|
// NumericProjectID returns the current instance's numeric project ID.
|
||||||
|
func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) }
|
||||||
|
|
||||||
|
// InstanceID returns the current VM's numeric instance ID.
|
||||||
|
func (c *Client) InstanceID() (string, error) { return instID.get(c) }
|
||||||
|
|
||||||
|
// InternalIP returns the instance's primary internal IP address.
|
||||||
|
func (c *Client) InternalIP() (string, error) {
|
||||||
|
return c.getTrimmed("instance/network-interfaces/0/ip")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Email returns the email address associated with the service account.
|
||||||
|
// The account may be empty or the string "default" to use the instance's
|
||||||
|
// main account.
|
||||||
|
func (c *Client) Email(serviceAccount string) (string, error) {
|
||||||
|
if serviceAccount == "" {
|
||||||
|
serviceAccount = "default"
|
||||||
|
}
|
||||||
|
return c.getTrimmed("instance/service-accounts/" + serviceAccount + "/email")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExternalIP returns the instance's primary external (public) IP address.
|
||||||
|
func (c *Client) ExternalIP() (string, error) {
|
||||||
|
return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hostname returns the instance's hostname. This will be of the form
|
||||||
|
// "<instanceID>.c.<projID>.internal".
|
||||||
|
func (c *Client) Hostname() (string, error) {
|
||||||
|
return c.getTrimmed("instance/hostname")
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstanceTags returns the list of user-defined instance tags,
|
||||||
|
// assigned when initially creating a GCE instance.
|
||||||
|
func (c *Client) InstanceTags() ([]string, error) {
|
||||||
|
var s []string
|
||||||
|
j, err := c.Get("instance/tags")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstanceName returns the current VM's instance ID string.
|
||||||
|
func (c *Client) InstanceName() (string, error) {
|
||||||
|
host, err := c.Hostname()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return strings.Split(host, ".")[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Zone returns the current VM's zone, such as "us-central1-b".
|
||||||
|
func (c *Client) Zone() (string, error) {
|
||||||
|
zone, err := c.getTrimmed("instance/zone")
|
||||||
|
// zone is of the form "projects/<projNum>/zones/<zoneName>".
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return zone[strings.LastIndex(zone, "/")+1:], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstanceAttributes returns the list of user-defined attributes,
|
||||||
|
// assigned when initially creating a GCE VM instance. The value of an
|
||||||
|
// attribute can be obtained with InstanceAttributeValue.
|
||||||
|
func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") }
|
||||||
|
|
||||||
|
// ProjectAttributes returns the list of user-defined attributes
|
||||||
|
// applying to the project as a whole, not just this VM. The value of
|
||||||
|
// an attribute can be obtained with ProjectAttributeValue.
|
||||||
|
func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") }
|
||||||
|
|
||||||
|
// InstanceAttributeValue returns the value of the provided VM
|
||||||
|
// instance attribute.
|
||||||
|
//
|
||||||
|
// If the requested attribute is not defined, the returned error will
|
||||||
|
// be of type NotDefinedError.
|
||||||
|
//
|
||||||
|
// InstanceAttributeValue may return ("", nil) if the attribute was
|
||||||
|
// defined to be the empty string.
|
||||||
|
func (c *Client) InstanceAttributeValue(attr string) (string, error) {
|
||||||
|
return c.Get("instance/attributes/" + attr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProjectAttributeValue returns the value of the provided
|
||||||
|
// project attribute.
|
||||||
|
//
|
||||||
|
// If the requested attribute is not defined, the returned error will
|
||||||
|
// be of type NotDefinedError.
|
||||||
|
//
|
||||||
|
// ProjectAttributeValue may return ("", nil) if the attribute was
|
||||||
|
// defined to be the empty string.
|
||||||
|
func (c *Client) ProjectAttributeValue(attr string) (string, error) {
|
||||||
|
return c.Get("project/attributes/" + attr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scopes returns the service account scopes for the given account.
|
||||||
|
// The account may be empty or the string "default" to use the instance's
|
||||||
|
// main account.
|
||||||
|
func (c *Client) Scopes(serviceAccount string) ([]string, error) {
|
||||||
|
if serviceAccount == "" {
|
||||||
|
serviceAccount = "default"
|
||||||
|
}
|
||||||
|
return c.lines("instance/service-accounts/" + serviceAccount + "/scopes")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subscribe subscribes to a value from the metadata service.
|
||||||
|
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
||||||
|
// The suffix may contain query parameters.
|
||||||
|
//
|
||||||
|
// Subscribe calls fn with the latest metadata value indicated by the provided
|
||||||
|
// suffix. If the metadata value is deleted, fn is called with the empty string
|
||||||
|
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
|
||||||
|
// is deleted. Subscribe returns the error value returned from the last call to
|
||||||
|
// fn, which may be nil when ok == false.
|
||||||
|
func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
||||||
|
const failedSubscribeSleep = time.Second * 5
|
||||||
|
|
||||||
|
// First check to see if the metadata value exists at all.
|
||||||
|
val, lastETag, err := c.getETag(suffix)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := fn(val, true); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ok := true
|
||||||
|
if strings.ContainsRune(suffix, '?') {
|
||||||
|
suffix += "&wait_for_change=true&last_etag="
|
||||||
|
} else {
|
||||||
|
suffix += "?wait_for_change=true&last_etag="
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag))
|
||||||
|
if err != nil {
|
||||||
|
if _, deleted := err.(NotDefinedError); !deleted {
|
||||||
|
time.Sleep(failedSubscribeSleep)
|
||||||
|
continue // Retry on other errors.
|
||||||
|
}
|
||||||
|
ok = false
|
||||||
|
}
|
||||||
|
lastETag = etag
|
||||||
|
|
||||||
|
if err := fn(val, ok); err != nil || !ok {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error contains an error response from the server.
|
||||||
|
type Error struct {
|
||||||
|
// Code is the HTTP response status code.
|
||||||
|
Code int
|
||||||
|
// Message is the server response message.
|
||||||
|
Message string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Error) Error() string {
|
||||||
|
return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message)
|
||||||
|
}
|
||||||
13
vendor/github.com/docker/spdystream/CONTRIBUTING.md
generated
vendored
Normal file
13
vendor/github.com/docker/spdystream/CONTRIBUTING.md
generated
vendored
Normal file
|
|
@ -0,0 +1,13 @@
|
||||||
|
# Contributing to SpdyStream
|
||||||
|
|
||||||
|
Want to hack on spdystream? Awesome! Here are instructions to get you
|
||||||
|
started.
|
||||||
|
|
||||||
|
SpdyStream is a part of the [Docker](https://docker.io) project, and follows
|
||||||
|
the same rules and principles. If you're already familiar with the way
|
||||||
|
Docker does things, you'll feel right at home.
|
||||||
|
|
||||||
|
Otherwise, go read
|
||||||
|
[Docker's contributions guidelines](https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md).
|
||||||
|
|
||||||
|
Happy hacking!
|
||||||
191
vendor/github.com/docker/spdystream/LICENSE
generated
vendored
Normal file
191
vendor/github.com/docker/spdystream/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,191 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
Copyright 2014-2015 Docker, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
425
vendor/github.com/docker/spdystream/LICENSE.docs
generated
vendored
Normal file
425
vendor/github.com/docker/spdystream/LICENSE.docs
generated
vendored
Normal file
|
|
@ -0,0 +1,425 @@
|
||||||
|
Attribution-ShareAlike 4.0 International
|
||||||
|
|
||||||
|
=======================================================================
|
||||||
|
|
||||||
|
Creative Commons Corporation ("Creative Commons") is not a law firm and
|
||||||
|
does not provide legal services or legal advice. Distribution of
|
||||||
|
Creative Commons public licenses does not create a lawyer-client or
|
||||||
|
other relationship. Creative Commons makes its licenses and related
|
||||||
|
information available on an "as-is" basis. Creative Commons gives no
|
||||||
|
warranties regarding its licenses, any material licensed under their
|
||||||
|
terms and conditions, or any related information. Creative Commons
|
||||||
|
disclaims all liability for damages resulting from their use to the
|
||||||
|
fullest extent possible.
|
||||||
|
|
||||||
|
Using Creative Commons Public Licenses
|
||||||
|
|
||||||
|
Creative Commons public licenses provide a standard set of terms and
|
||||||
|
conditions that creators and other rights holders may use to share
|
||||||
|
original works of authorship and other material subject to copyright
|
||||||
|
and certain other rights specified in the public license below. The
|
||||||
|
following considerations are for informational purposes only, are not
|
||||||
|
exhaustive, and do not form part of our licenses.
|
||||||
|
|
||||||
|
Considerations for licensors: Our public licenses are
|
||||||
|
intended for use by those authorized to give the public
|
||||||
|
permission to use material in ways otherwise restricted by
|
||||||
|
copyright and certain other rights. Our licenses are
|
||||||
|
irrevocable. Licensors should read and understand the terms
|
||||||
|
and conditions of the license they choose before applying it.
|
||||||
|
Licensors should also secure all rights necessary before
|
||||||
|
applying our licenses so that the public can reuse the
|
||||||
|
material as expected. Licensors should clearly mark any
|
||||||
|
material not subject to the license. This includes other CC-
|
||||||
|
licensed material, or material used under an exception or
|
||||||
|
limitation to copyright. More considerations for licensors:
|
||||||
|
wiki.creativecommons.org/Considerations_for_licensors
|
||||||
|
|
||||||
|
Considerations for the public: By using one of our public
|
||||||
|
licenses, a licensor grants the public permission to use the
|
||||||
|
licensed material under specified terms and conditions. If
|
||||||
|
the licensor's permission is not necessary for any reason--for
|
||||||
|
example, because of any applicable exception or limitation to
|
||||||
|
copyright--then that use is not regulated by the license. Our
|
||||||
|
licenses grant only permissions under copyright and certain
|
||||||
|
other rights that a licensor has authority to grant. Use of
|
||||||
|
the licensed material may still be restricted for other
|
||||||
|
reasons, including because others have copyright or other
|
||||||
|
rights in the material. A licensor may make special requests,
|
||||||
|
such as asking that all changes be marked or described.
|
||||||
|
Although not required by our licenses, you are encouraged to
|
||||||
|
respect those requests where reasonable. More_considerations
|
||||||
|
for the public:
|
||||||
|
wiki.creativecommons.org/Considerations_for_licensees
|
||||||
|
|
||||||
|
=======================================================================
|
||||||
|
|
||||||
|
Creative Commons Attribution-ShareAlike 4.0 International Public
|
||||||
|
License
|
||||||
|
|
||||||
|
By exercising the Licensed Rights (defined below), You accept and agree
|
||||||
|
to be bound by the terms and conditions of this Creative Commons
|
||||||
|
Attribution-ShareAlike 4.0 International Public License ("Public
|
||||||
|
License"). To the extent this Public License may be interpreted as a
|
||||||
|
contract, You are granted the Licensed Rights in consideration of Your
|
||||||
|
acceptance of these terms and conditions, and the Licensor grants You
|
||||||
|
such rights in consideration of benefits the Licensor receives from
|
||||||
|
making the Licensed Material available under these terms and
|
||||||
|
conditions.
|
||||||
|
|
||||||
|
|
||||||
|
Section 1 -- Definitions.
|
||||||
|
|
||||||
|
a. Adapted Material means material subject to Copyright and Similar
|
||||||
|
Rights that is derived from or based upon the Licensed Material
|
||||||
|
and in which the Licensed Material is translated, altered,
|
||||||
|
arranged, transformed, or otherwise modified in a manner requiring
|
||||||
|
permission under the Copyright and Similar Rights held by the
|
||||||
|
Licensor. For purposes of this Public License, where the Licensed
|
||||||
|
Material is a musical work, performance, or sound recording,
|
||||||
|
Adapted Material is always produced where the Licensed Material is
|
||||||
|
synched in timed relation with a moving image.
|
||||||
|
|
||||||
|
b. Adapter's License means the license You apply to Your Copyright
|
||||||
|
and Similar Rights in Your contributions to Adapted Material in
|
||||||
|
accordance with the terms and conditions of this Public License.
|
||||||
|
|
||||||
|
c. BY-SA Compatible License means a license listed at
|
||||||
|
creativecommons.org/compatiblelicenses, approved by Creative
|
||||||
|
Commons as essentially the equivalent of this Public License.
|
||||||
|
|
||||||
|
d. Copyright and Similar Rights means copyright and/or similar rights
|
||||||
|
closely related to copyright including, without limitation,
|
||||||
|
performance, broadcast, sound recording, and Sui Generis Database
|
||||||
|
Rights, without regard to how the rights are labeled or
|
||||||
|
categorized. For purposes of this Public License, the rights
|
||||||
|
specified in Section 2(b)(1)-(2) are not Copyright and Similar
|
||||||
|
Rights.
|
||||||
|
|
||||||
|
e. Effective Technological Measures means those measures that, in the
|
||||||
|
absence of proper authority, may not be circumvented under laws
|
||||||
|
fulfilling obligations under Article 11 of the WIPO Copyright
|
||||||
|
Treaty adopted on December 20, 1996, and/or similar international
|
||||||
|
agreements.
|
||||||
|
|
||||||
|
f. Exceptions and Limitations means fair use, fair dealing, and/or
|
||||||
|
any other exception or limitation to Copyright and Similar Rights
|
||||||
|
that applies to Your use of the Licensed Material.
|
||||||
|
|
||||||
|
g. License Elements means the license attributes listed in the name
|
||||||
|
of a Creative Commons Public License. The License Elements of this
|
||||||
|
Public License are Attribution and ShareAlike.
|
||||||
|
|
||||||
|
h. Licensed Material means the artistic or literary work, database,
|
||||||
|
or other material to which the Licensor applied this Public
|
||||||
|
License.
|
||||||
|
|
||||||
|
i. Licensed Rights means the rights granted to You subject to the
|
||||||
|
terms and conditions of this Public License, which are limited to
|
||||||
|
all Copyright and Similar Rights that apply to Your use of the
|
||||||
|
Licensed Material and that the Licensor has authority to license.
|
||||||
|
|
||||||
|
j. Licensor means the individual(s) or entity(ies) granting rights
|
||||||
|
under this Public License.
|
||||||
|
|
||||||
|
k. Share means to provide material to the public by any means or
|
||||||
|
process that requires permission under the Licensed Rights, such
|
||||||
|
as reproduction, public display, public performance, distribution,
|
||||||
|
dissemination, communication, or importation, and to make material
|
||||||
|
available to the public including in ways that members of the
|
||||||
|
public may access the material from a place and at a time
|
||||||
|
individually chosen by them.
|
||||||
|
|
||||||
|
l. Sui Generis Database Rights means rights other than copyright
|
||||||
|
resulting from Directive 96/9/EC of the European Parliament and of
|
||||||
|
the Council of 11 March 1996 on the legal protection of databases,
|
||||||
|
as amended and/or succeeded, as well as other essentially
|
||||||
|
equivalent rights anywhere in the world.
|
||||||
|
|
||||||
|
m. You means the individual or entity exercising the Licensed Rights
|
||||||
|
under this Public License. Your has a corresponding meaning.
|
||||||
|
|
||||||
|
|
||||||
|
Section 2 -- Scope.
|
||||||
|
|
||||||
|
a. License grant.
|
||||||
|
|
||||||
|
1. Subject to the terms and conditions of this Public License,
|
||||||
|
the Licensor hereby grants You a worldwide, royalty-free,
|
||||||
|
non-sublicensable, non-exclusive, irrevocable license to
|
||||||
|
exercise the Licensed Rights in the Licensed Material to:
|
||||||
|
|
||||||
|
a. reproduce and Share the Licensed Material, in whole or
|
||||||
|
in part; and
|
||||||
|
|
||||||
|
b. produce, reproduce, and Share Adapted Material.
|
||||||
|
|
||||||
|
2. Exceptions and Limitations. For the avoidance of doubt, where
|
||||||
|
Exceptions and Limitations apply to Your use, this Public
|
||||||
|
License does not apply, and You do not need to comply with
|
||||||
|
its terms and conditions.
|
||||||
|
|
||||||
|
3. Term. The term of this Public License is specified in Section
|
||||||
|
6(a).
|
||||||
|
|
||||||
|
4. Media and formats; technical modifications allowed. The
|
||||||
|
Licensor authorizes You to exercise the Licensed Rights in
|
||||||
|
all media and formats whether now known or hereafter created,
|
||||||
|
and to make technical modifications necessary to do so. The
|
||||||
|
Licensor waives and/or agrees not to assert any right or
|
||||||
|
authority to forbid You from making technical modifications
|
||||||
|
necessary to exercise the Licensed Rights, including
|
||||||
|
technical modifications necessary to circumvent Effective
|
||||||
|
Technological Measures. For purposes of this Public License,
|
||||||
|
simply making modifications authorized by this Section 2(a)
|
||||||
|
(4) never produces Adapted Material.
|
||||||
|
|
||||||
|
5. Downstream recipients.
|
||||||
|
|
||||||
|
a. Offer from the Licensor -- Licensed Material. Every
|
||||||
|
recipient of the Licensed Material automatically
|
||||||
|
receives an offer from the Licensor to exercise the
|
||||||
|
Licensed Rights under the terms and conditions of this
|
||||||
|
Public License.
|
||||||
|
|
||||||
|
b. Additional offer from the Licensor -- Adapted Material.
|
||||||
|
Every recipient of Adapted Material from You
|
||||||
|
automatically receives an offer from the Licensor to
|
||||||
|
exercise the Licensed Rights in the Adapted Material
|
||||||
|
under the conditions of the Adapter's License You apply.
|
||||||
|
|
||||||
|
c. No downstream restrictions. You may not offer or impose
|
||||||
|
any additional or different terms or conditions on, or
|
||||||
|
apply any Effective Technological Measures to, the
|
||||||
|
Licensed Material if doing so restricts exercise of the
|
||||||
|
Licensed Rights by any recipient of the Licensed
|
||||||
|
Material.
|
||||||
|
|
||||||
|
6. No endorsement. Nothing in this Public License constitutes or
|
||||||
|
may be construed as permission to assert or imply that You
|
||||||
|
are, or that Your use of the Licensed Material is, connected
|
||||||
|
with, or sponsored, endorsed, or granted official status by,
|
||||||
|
the Licensor or others designated to receive attribution as
|
||||||
|
provided in Section 3(a)(1)(A)(i).
|
||||||
|
|
||||||
|
b. Other rights.
|
||||||
|
|
||||||
|
1. Moral rights, such as the right of integrity, are not
|
||||||
|
licensed under this Public License, nor are publicity,
|
||||||
|
privacy, and/or other similar personality rights; however, to
|
||||||
|
the extent possible, the Licensor waives and/or agrees not to
|
||||||
|
assert any such rights held by the Licensor to the limited
|
||||||
|
extent necessary to allow You to exercise the Licensed
|
||||||
|
Rights, but not otherwise.
|
||||||
|
|
||||||
|
2. Patent and trademark rights are not licensed under this
|
||||||
|
Public License.
|
||||||
|
|
||||||
|
3. To the extent possible, the Licensor waives any right to
|
||||||
|
collect royalties from You for the exercise of the Licensed
|
||||||
|
Rights, whether directly or through a collecting society
|
||||||
|
under any voluntary or waivable statutory or compulsory
|
||||||
|
licensing scheme. In all other cases the Licensor expressly
|
||||||
|
reserves any right to collect such royalties.
|
||||||
|
|
||||||
|
|
||||||
|
Section 3 -- License Conditions.
|
||||||
|
|
||||||
|
Your exercise of the Licensed Rights is expressly made subject to the
|
||||||
|
following conditions.
|
||||||
|
|
||||||
|
a. Attribution.
|
||||||
|
|
||||||
|
1. If You Share the Licensed Material (including in modified
|
||||||
|
form), You must:
|
||||||
|
|
||||||
|
a. retain the following if it is supplied by the Licensor
|
||||||
|
with the Licensed Material:
|
||||||
|
|
||||||
|
i. identification of the creator(s) of the Licensed
|
||||||
|
Material and any others designated to receive
|
||||||
|
attribution, in any reasonable manner requested by
|
||||||
|
the Licensor (including by pseudonym if
|
||||||
|
designated);
|
||||||
|
|
||||||
|
ii. a copyright notice;
|
||||||
|
|
||||||
|
iii. a notice that refers to this Public License;
|
||||||
|
|
||||||
|
iv. a notice that refers to the disclaimer of
|
||||||
|
warranties;
|
||||||
|
|
||||||
|
v. a URI or hyperlink to the Licensed Material to the
|
||||||
|
extent reasonably practicable;
|
||||||
|
|
||||||
|
b. indicate if You modified the Licensed Material and
|
||||||
|
retain an indication of any previous modifications; and
|
||||||
|
|
||||||
|
c. indicate the Licensed Material is licensed under this
|
||||||
|
Public License, and include the text of, or the URI or
|
||||||
|
hyperlink to, this Public License.
|
||||||
|
|
||||||
|
2. You may satisfy the conditions in Section 3(a)(1) in any
|
||||||
|
reasonable manner based on the medium, means, and context in
|
||||||
|
which You Share the Licensed Material. For example, it may be
|
||||||
|
reasonable to satisfy the conditions by providing a URI or
|
||||||
|
hyperlink to a resource that includes the required
|
||||||
|
information.
|
||||||
|
|
||||||
|
3. If requested by the Licensor, You must remove any of the
|
||||||
|
information required by Section 3(a)(1)(A) to the extent
|
||||||
|
reasonably practicable.
|
||||||
|
|
||||||
|
b. ShareAlike.
|
||||||
|
|
||||||
|
In addition to the conditions in Section 3(a), if You Share
|
||||||
|
Adapted Material You produce, the following conditions also apply.
|
||||||
|
|
||||||
|
1. The Adapter's License You apply must be a Creative Commons
|
||||||
|
license with the same License Elements, this version or
|
||||||
|
later, or a BY-SA Compatible License.
|
||||||
|
|
||||||
|
2. You must include the text of, or the URI or hyperlink to, the
|
||||||
|
Adapter's License You apply. You may satisfy this condition
|
||||||
|
in any reasonable manner based on the medium, means, and
|
||||||
|
context in which You Share Adapted Material.
|
||||||
|
|
||||||
|
3. You may not offer or impose any additional or different terms
|
||||||
|
or conditions on, or apply any Effective Technological
|
||||||
|
Measures to, Adapted Material that restrict exercise of the
|
||||||
|
rights granted under the Adapter's License You apply.
|
||||||
|
|
||||||
|
|
||||||
|
Section 4 -- Sui Generis Database Rights.
|
||||||
|
|
||||||
|
Where the Licensed Rights include Sui Generis Database Rights that
|
||||||
|
apply to Your use of the Licensed Material:
|
||||||
|
|
||||||
|
a. for the avoidance of doubt, Section 2(a)(1) grants You the right
|
||||||
|
to extract, reuse, reproduce, and Share all or a substantial
|
||||||
|
portion of the contents of the database;
|
||||||
|
|
||||||
|
b. if You include all or a substantial portion of the database
|
||||||
|
contents in a database in which You have Sui Generis Database
|
||||||
|
Rights, then the database in which You have Sui Generis Database
|
||||||
|
Rights (but not its individual contents) is Adapted Material,
|
||||||
|
|
||||||
|
including for purposes of Section 3(b); and
|
||||||
|
c. You must comply with the conditions in Section 3(a) if You Share
|
||||||
|
all or a substantial portion of the contents of the database.
|
||||||
|
|
||||||
|
For the avoidance of doubt, this Section 4 supplements and does not
|
||||||
|
replace Your obligations under this Public License where the Licensed
|
||||||
|
Rights include other Copyright and Similar Rights.
|
||||||
|
|
||||||
|
|
||||||
|
Section 5 -- Disclaimer of Warranties and Limitation of Liability.
|
||||||
|
|
||||||
|
a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
|
||||||
|
EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
|
||||||
|
AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
|
||||||
|
ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
|
||||||
|
IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
|
||||||
|
WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
|
||||||
|
PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
|
||||||
|
ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
|
||||||
|
KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
|
||||||
|
ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
|
||||||
|
|
||||||
|
b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
|
||||||
|
TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
|
||||||
|
NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
|
||||||
|
INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
|
||||||
|
COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
|
||||||
|
USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
|
||||||
|
ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
|
||||||
|
DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
|
||||||
|
IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
|
||||||
|
|
||||||
|
c. The disclaimer of warranties and limitation of liability provided
|
||||||
|
above shall be interpreted in a manner that, to the extent
|
||||||
|
possible, most closely approximates an absolute disclaimer and
|
||||||
|
waiver of all liability.
|
||||||
|
|
||||||
|
|
||||||
|
Section 6 -- Term and Termination.
|
||||||
|
|
||||||
|
a. This Public License applies for the term of the Copyright and
|
||||||
|
Similar Rights licensed here. However, if You fail to comply with
|
||||||
|
this Public License, then Your rights under this Public License
|
||||||
|
terminate automatically.
|
||||||
|
|
||||||
|
b. Where Your right to use the Licensed Material has terminated under
|
||||||
|
Section 6(a), it reinstates:
|
||||||
|
|
||||||
|
1. automatically as of the date the violation is cured, provided
|
||||||
|
it is cured within 30 days of Your discovery of the
|
||||||
|
violation; or
|
||||||
|
|
||||||
|
2. upon express reinstatement by the Licensor.
|
||||||
|
|
||||||
|
For the avoidance of doubt, this Section 6(b) does not affect any
|
||||||
|
right the Licensor may have to seek remedies for Your violations
|
||||||
|
of this Public License.
|
||||||
|
|
||||||
|
c. For the avoidance of doubt, the Licensor may also offer the
|
||||||
|
Licensed Material under separate terms or conditions or stop
|
||||||
|
distributing the Licensed Material at any time; however, doing so
|
||||||
|
will not terminate this Public License.
|
||||||
|
|
||||||
|
d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
|
||||||
|
License.
|
||||||
|
|
||||||
|
|
||||||
|
Section 7 -- Other Terms and Conditions.
|
||||||
|
|
||||||
|
a. The Licensor shall not be bound by any additional or different
|
||||||
|
terms or conditions communicated by You unless expressly agreed.
|
||||||
|
|
||||||
|
b. Any arrangements, understandings, or agreements regarding the
|
||||||
|
Licensed Material not stated herein are separate from and
|
||||||
|
independent of the terms and conditions of this Public License.
|
||||||
|
|
||||||
|
|
||||||
|
Section 8 -- Interpretation.
|
||||||
|
|
||||||
|
a. For the avoidance of doubt, this Public License does not, and
|
||||||
|
shall not be interpreted to, reduce, limit, restrict, or impose
|
||||||
|
conditions on any use of the Licensed Material that could lawfully
|
||||||
|
be made without permission under this Public License.
|
||||||
|
|
||||||
|
b. To the extent possible, if any provision of this Public License is
|
||||||
|
deemed unenforceable, it shall be automatically reformed to the
|
||||||
|
minimum extent necessary to make it enforceable. If the provision
|
||||||
|
cannot be reformed, it shall be severed from this Public License
|
||||||
|
without affecting the enforceability of the remaining terms and
|
||||||
|
conditions.
|
||||||
|
|
||||||
|
c. No term or condition of this Public License will be waived and no
|
||||||
|
failure to comply consented to unless expressly agreed to by the
|
||||||
|
Licensor.
|
||||||
|
|
||||||
|
d. Nothing in this Public License constitutes or may be interpreted
|
||||||
|
as a limitation upon, or waiver of, any privileges and immunities
|
||||||
|
that apply to the Licensor or You, including from the legal
|
||||||
|
processes of any jurisdiction or authority.
|
||||||
|
|
||||||
|
|
||||||
|
=======================================================================
|
||||||
|
|
||||||
|
Creative Commons is not a party to its public licenses.
|
||||||
|
Notwithstanding, Creative Commons may elect to apply one of its public
|
||||||
|
licenses to material it publishes and in those instances will be
|
||||||
|
considered the "Licensor." Except for the limited purpose of indicating
|
||||||
|
that material is shared under a Creative Commons public license or as
|
||||||
|
otherwise permitted by the Creative Commons policies published at
|
||||||
|
creativecommons.org/policies, Creative Commons does not authorize the
|
||||||
|
use of the trademark "Creative Commons" or any other trademark or logo
|
||||||
|
of Creative Commons without its prior written consent including,
|
||||||
|
without limitation, in connection with any unauthorized modifications
|
||||||
|
to any of its public licenses or any other arrangements,
|
||||||
|
understandings, or agreements concerning use of licensed material. For
|
||||||
|
the avoidance of doubt, this paragraph does not form part of the public
|
||||||
|
licenses.
|
||||||
|
|
||||||
|
Creative Commons may be contacted at creativecommons.org.
|
||||||
28
vendor/github.com/docker/spdystream/MAINTAINERS
generated
vendored
Normal file
28
vendor/github.com/docker/spdystream/MAINTAINERS
generated
vendored
Normal file
|
|
@ -0,0 +1,28 @@
|
||||||
|
# Spdystream maintainers file
|
||||||
|
#
|
||||||
|
# This file describes who runs the docker/spdystream project and how.
|
||||||
|
# This is a living document - if you see something out of date or missing, speak up!
|
||||||
|
#
|
||||||
|
# It is structured to be consumable by both humans and programs.
|
||||||
|
# To extract its contents programmatically, use any TOML-compliant parser.
|
||||||
|
#
|
||||||
|
# This file is compiled into the MAINTAINERS file in docker/opensource.
|
||||||
|
#
|
||||||
|
[Org]
|
||||||
|
[Org."Core maintainers"]
|
||||||
|
people = [
|
||||||
|
"dmcgowan",
|
||||||
|
]
|
||||||
|
|
||||||
|
[people]
|
||||||
|
|
||||||
|
# A reference list of all people associated with the project.
|
||||||
|
# All other sections should refer to people by their canonical key
|
||||||
|
# in the people section.
|
||||||
|
|
||||||
|
# ADD YOURSELF HERE IN ALPHABETICAL ORDER
|
||||||
|
|
||||||
|
[people.dmcgowan]
|
||||||
|
Name = "Derek McGowan"
|
||||||
|
Email = "derek@docker.com"
|
||||||
|
GitHub = "dmcgowan"
|
||||||
77
vendor/github.com/docker/spdystream/README.md
generated
vendored
Normal file
77
vendor/github.com/docker/spdystream/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,77 @@
|
||||||
|
# SpdyStream
|
||||||
|
|
||||||
|
A multiplexed stream library using spdy
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Client example (connecting to mirroring server without auth)
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/docker/spdystream"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
conn, err := net.Dial("tcp", "localhost:8080")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
spdyConn, err := spdystream.NewConnection(conn, false)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
go spdyConn.Serve(spdystream.NoOpStreamHandler)
|
||||||
|
stream, err := spdyConn.CreateStream(http.Header{}, nil, false)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
stream.Wait()
|
||||||
|
|
||||||
|
fmt.Fprint(stream, "Writing to stream")
|
||||||
|
|
||||||
|
buf := make([]byte, 25)
|
||||||
|
stream.Read(buf)
|
||||||
|
fmt.Println(string(buf))
|
||||||
|
|
||||||
|
stream.Close()
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Server example (mirroring server without auth)
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/docker/spdystream"
|
||||||
|
"net"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
listener, err := net.Listen("tcp", "localhost:8080")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
conn, err := listener.Accept()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
spdyConn, err := spdystream.NewConnection(conn, true)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
go spdyConn.Serve(spdystream.MirrorStreamHandler)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Copyright and license
|
||||||
|
|
||||||
|
Copyright © 2014-2015 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/.
|
||||||
959
vendor/github.com/docker/spdystream/connection.go
generated
vendored
Normal file
959
vendor/github.com/docker/spdystream/connection.go
generated
vendored
Normal file
|
|
@ -0,0 +1,959 @@
|
||||||
|
package spdystream
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/spdystream/spdy"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidStreamId = errors.New("Invalid stream id")
|
||||||
|
ErrTimeout = errors.New("Timeout occurred")
|
||||||
|
ErrReset = errors.New("Stream reset")
|
||||||
|
ErrWriteClosedStream = errors.New("Write on closed stream")
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
FRAME_WORKERS = 5
|
||||||
|
QUEUE_SIZE = 50
|
||||||
|
)
|
||||||
|
|
||||||
|
type StreamHandler func(stream *Stream)
|
||||||
|
|
||||||
|
type AuthHandler func(header http.Header, slot uint8, parent uint32) bool
|
||||||
|
|
||||||
|
type idleAwareFramer struct {
|
||||||
|
f *spdy.Framer
|
||||||
|
conn *Connection
|
||||||
|
writeLock sync.Mutex
|
||||||
|
resetChan chan struct{}
|
||||||
|
setTimeoutLock sync.Mutex
|
||||||
|
setTimeoutChan chan time.Duration
|
||||||
|
timeout time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func newIdleAwareFramer(framer *spdy.Framer) *idleAwareFramer {
|
||||||
|
iaf := &idleAwareFramer{
|
||||||
|
f: framer,
|
||||||
|
resetChan: make(chan struct{}, 2),
|
||||||
|
// setTimeoutChan needs to be buffered to avoid deadlocks when calling setIdleTimeout at about
|
||||||
|
// the same time the connection is being closed
|
||||||
|
setTimeoutChan: make(chan time.Duration, 1),
|
||||||
|
}
|
||||||
|
return iaf
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *idleAwareFramer) monitor() {
|
||||||
|
var (
|
||||||
|
timer *time.Timer
|
||||||
|
expired <-chan time.Time
|
||||||
|
resetChan = i.resetChan
|
||||||
|
setTimeoutChan = i.setTimeoutChan
|
||||||
|
)
|
||||||
|
Loop:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case timeout := <-i.setTimeoutChan:
|
||||||
|
i.timeout = timeout
|
||||||
|
if timeout == 0 {
|
||||||
|
if timer != nil {
|
||||||
|
timer.Stop()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if timer == nil {
|
||||||
|
timer = time.NewTimer(timeout)
|
||||||
|
expired = timer.C
|
||||||
|
} else {
|
||||||
|
timer.Reset(timeout)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case <-resetChan:
|
||||||
|
if timer != nil && i.timeout > 0 {
|
||||||
|
timer.Reset(i.timeout)
|
||||||
|
}
|
||||||
|
case <-expired:
|
||||||
|
i.conn.streamCond.L.Lock()
|
||||||
|
streams := i.conn.streams
|
||||||
|
i.conn.streams = make(map[spdy.StreamId]*Stream)
|
||||||
|
i.conn.streamCond.Broadcast()
|
||||||
|
i.conn.streamCond.L.Unlock()
|
||||||
|
go func() {
|
||||||
|
for _, stream := range streams {
|
||||||
|
stream.resetStream()
|
||||||
|
}
|
||||||
|
i.conn.Close()
|
||||||
|
}()
|
||||||
|
case <-i.conn.closeChan:
|
||||||
|
if timer != nil {
|
||||||
|
timer.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start a goroutine to drain resetChan. This is needed because we've seen
|
||||||
|
// some unit tests with large numbers of goroutines get into a situation
|
||||||
|
// where resetChan fills up, at least 1 call to Write() is still trying to
|
||||||
|
// send to resetChan, the connection gets closed, and this case statement
|
||||||
|
// attempts to grab the write lock that Write() already has, causing a
|
||||||
|
// deadlock.
|
||||||
|
//
|
||||||
|
// See https://github.com/docker/spdystream/issues/49 for more details.
|
||||||
|
go func() {
|
||||||
|
for _ = range resetChan {
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for _ = range setTimeoutChan {
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
i.writeLock.Lock()
|
||||||
|
close(resetChan)
|
||||||
|
i.resetChan = nil
|
||||||
|
i.writeLock.Unlock()
|
||||||
|
|
||||||
|
i.setTimeoutLock.Lock()
|
||||||
|
close(i.setTimeoutChan)
|
||||||
|
i.setTimeoutChan = nil
|
||||||
|
i.setTimeoutLock.Unlock()
|
||||||
|
|
||||||
|
break Loop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Drain resetChan
|
||||||
|
for _ = range resetChan {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *idleAwareFramer) WriteFrame(frame spdy.Frame) error {
|
||||||
|
i.writeLock.Lock()
|
||||||
|
defer i.writeLock.Unlock()
|
||||||
|
if i.resetChan == nil {
|
||||||
|
return io.EOF
|
||||||
|
}
|
||||||
|
err := i.f.WriteFrame(frame)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
i.resetChan <- struct{}{}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *idleAwareFramer) ReadFrame() (spdy.Frame, error) {
|
||||||
|
frame, err := i.f.ReadFrame()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// resetChan should never be closed since it is only closed
|
||||||
|
// when the connection has closed its closeChan. This closure
|
||||||
|
// only occurs after all Reads have finished
|
||||||
|
// TODO (dmcgowan): refactor relationship into connection
|
||||||
|
i.resetChan <- struct{}{}
|
||||||
|
|
||||||
|
return frame, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *idleAwareFramer) setIdleTimeout(timeout time.Duration) {
|
||||||
|
i.setTimeoutLock.Lock()
|
||||||
|
defer i.setTimeoutLock.Unlock()
|
||||||
|
|
||||||
|
if i.setTimeoutChan == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
i.setTimeoutChan <- timeout
|
||||||
|
}
|
||||||
|
|
||||||
|
type Connection struct {
|
||||||
|
conn net.Conn
|
||||||
|
framer *idleAwareFramer
|
||||||
|
|
||||||
|
closeChan chan bool
|
||||||
|
goneAway bool
|
||||||
|
lastStreamChan chan<- *Stream
|
||||||
|
goAwayTimeout time.Duration
|
||||||
|
closeTimeout time.Duration
|
||||||
|
|
||||||
|
streamLock *sync.RWMutex
|
||||||
|
streamCond *sync.Cond
|
||||||
|
streams map[spdy.StreamId]*Stream
|
||||||
|
|
||||||
|
nextIdLock sync.Mutex
|
||||||
|
receiveIdLock sync.Mutex
|
||||||
|
nextStreamId spdy.StreamId
|
||||||
|
receivedStreamId spdy.StreamId
|
||||||
|
|
||||||
|
pingIdLock sync.Mutex
|
||||||
|
pingId uint32
|
||||||
|
pingChans map[uint32]chan error
|
||||||
|
|
||||||
|
shutdownLock sync.Mutex
|
||||||
|
shutdownChan chan error
|
||||||
|
hasShutdown bool
|
||||||
|
|
||||||
|
// for testing https://github.com/docker/spdystream/pull/56
|
||||||
|
dataFrameHandler func(*spdy.DataFrame) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewConnection creates a new spdy connection from an existing
|
||||||
|
// network connection.
|
||||||
|
func NewConnection(conn net.Conn, server bool) (*Connection, error) {
|
||||||
|
framer, framerErr := spdy.NewFramer(conn, conn)
|
||||||
|
if framerErr != nil {
|
||||||
|
return nil, framerErr
|
||||||
|
}
|
||||||
|
idleAwareFramer := newIdleAwareFramer(framer)
|
||||||
|
var sid spdy.StreamId
|
||||||
|
var rid spdy.StreamId
|
||||||
|
var pid uint32
|
||||||
|
if server {
|
||||||
|
sid = 2
|
||||||
|
rid = 1
|
||||||
|
pid = 2
|
||||||
|
} else {
|
||||||
|
sid = 1
|
||||||
|
rid = 2
|
||||||
|
pid = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
streamLock := new(sync.RWMutex)
|
||||||
|
streamCond := sync.NewCond(streamLock)
|
||||||
|
|
||||||
|
session := &Connection{
|
||||||
|
conn: conn,
|
||||||
|
framer: idleAwareFramer,
|
||||||
|
|
||||||
|
closeChan: make(chan bool),
|
||||||
|
goAwayTimeout: time.Duration(0),
|
||||||
|
closeTimeout: time.Duration(0),
|
||||||
|
|
||||||
|
streamLock: streamLock,
|
||||||
|
streamCond: streamCond,
|
||||||
|
streams: make(map[spdy.StreamId]*Stream),
|
||||||
|
nextStreamId: sid,
|
||||||
|
receivedStreamId: rid,
|
||||||
|
|
||||||
|
pingId: pid,
|
||||||
|
pingChans: make(map[uint32]chan error),
|
||||||
|
|
||||||
|
shutdownChan: make(chan error),
|
||||||
|
}
|
||||||
|
session.dataFrameHandler = session.handleDataFrame
|
||||||
|
idleAwareFramer.conn = session
|
||||||
|
go idleAwareFramer.monitor()
|
||||||
|
|
||||||
|
return session, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ping sends a ping frame across the connection and
|
||||||
|
// returns the response time
|
||||||
|
func (s *Connection) Ping() (time.Duration, error) {
|
||||||
|
pid := s.pingId
|
||||||
|
s.pingIdLock.Lock()
|
||||||
|
if s.pingId > 0x7ffffffe {
|
||||||
|
s.pingId = s.pingId - 0x7ffffffe
|
||||||
|
} else {
|
||||||
|
s.pingId = s.pingId + 2
|
||||||
|
}
|
||||||
|
s.pingIdLock.Unlock()
|
||||||
|
pingChan := make(chan error)
|
||||||
|
s.pingChans[pid] = pingChan
|
||||||
|
defer delete(s.pingChans, pid)
|
||||||
|
|
||||||
|
frame := &spdy.PingFrame{Id: pid}
|
||||||
|
startTime := time.Now()
|
||||||
|
writeErr := s.framer.WriteFrame(frame)
|
||||||
|
if writeErr != nil {
|
||||||
|
return time.Duration(0), writeErr
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-s.closeChan:
|
||||||
|
return time.Duration(0), errors.New("connection closed")
|
||||||
|
case err, ok := <-pingChan:
|
||||||
|
if ok && err != nil {
|
||||||
|
return time.Duration(0), err
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return time.Now().Sub(startTime), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serve handles frames sent from the server, including reply frames
|
||||||
|
// which are needed to fully initiate connections. Both clients and servers
|
||||||
|
// should call Serve in a separate goroutine before creating streams.
|
||||||
|
func (s *Connection) Serve(newHandler StreamHandler) {
|
||||||
|
// use a WaitGroup to wait for all frames to be drained after receiving
|
||||||
|
// go-away.
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
// Parition queues to ensure stream frames are handled
|
||||||
|
// by the same worker, ensuring order is maintained
|
||||||
|
frameQueues := make([]*PriorityFrameQueue, FRAME_WORKERS)
|
||||||
|
for i := 0; i < FRAME_WORKERS; i++ {
|
||||||
|
frameQueues[i] = NewPriorityFrameQueue(QUEUE_SIZE)
|
||||||
|
|
||||||
|
// Ensure frame queue is drained when connection is closed
|
||||||
|
go func(frameQueue *PriorityFrameQueue) {
|
||||||
|
<-s.closeChan
|
||||||
|
frameQueue.Drain()
|
||||||
|
}(frameQueues[i])
|
||||||
|
|
||||||
|
wg.Add(1)
|
||||||
|
go func(frameQueue *PriorityFrameQueue) {
|
||||||
|
// let the WaitGroup know this worker is done
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
s.frameHandler(frameQueue, newHandler)
|
||||||
|
}(frameQueues[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
partitionRoundRobin int
|
||||||
|
goAwayFrame *spdy.GoAwayFrame
|
||||||
|
)
|
||||||
|
Loop:
|
||||||
|
for {
|
||||||
|
readFrame, err := s.framer.ReadFrame()
|
||||||
|
if err != nil {
|
||||||
|
if err != io.EOF {
|
||||||
|
debugMessage("frame read error: %s", err)
|
||||||
|
} else {
|
||||||
|
debugMessage("(%p) EOF received", s)
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
var priority uint8
|
||||||
|
var partition int
|
||||||
|
switch frame := readFrame.(type) {
|
||||||
|
case *spdy.SynStreamFrame:
|
||||||
|
if s.checkStreamFrame(frame) {
|
||||||
|
priority = frame.Priority
|
||||||
|
partition = int(frame.StreamId % FRAME_WORKERS)
|
||||||
|
debugMessage("(%p) Add stream frame: %d ", s, frame.StreamId)
|
||||||
|
s.addStreamFrame(frame)
|
||||||
|
} else {
|
||||||
|
debugMessage("(%p) Rejected stream frame: %d ", s, frame.StreamId)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
case *spdy.SynReplyFrame:
|
||||||
|
priority = s.getStreamPriority(frame.StreamId)
|
||||||
|
partition = int(frame.StreamId % FRAME_WORKERS)
|
||||||
|
case *spdy.DataFrame:
|
||||||
|
priority = s.getStreamPriority(frame.StreamId)
|
||||||
|
partition = int(frame.StreamId % FRAME_WORKERS)
|
||||||
|
case *spdy.RstStreamFrame:
|
||||||
|
priority = s.getStreamPriority(frame.StreamId)
|
||||||
|
partition = int(frame.StreamId % FRAME_WORKERS)
|
||||||
|
case *spdy.HeadersFrame:
|
||||||
|
priority = s.getStreamPriority(frame.StreamId)
|
||||||
|
partition = int(frame.StreamId % FRAME_WORKERS)
|
||||||
|
case *spdy.PingFrame:
|
||||||
|
priority = 0
|
||||||
|
partition = partitionRoundRobin
|
||||||
|
partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS
|
||||||
|
case *spdy.GoAwayFrame:
|
||||||
|
// hold on to the go away frame and exit the loop
|
||||||
|
goAwayFrame = frame
|
||||||
|
break Loop
|
||||||
|
default:
|
||||||
|
priority = 7
|
||||||
|
partition = partitionRoundRobin
|
||||||
|
partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS
|
||||||
|
}
|
||||||
|
frameQueues[partition].Push(readFrame, priority)
|
||||||
|
}
|
||||||
|
close(s.closeChan)
|
||||||
|
|
||||||
|
// wait for all frame handler workers to indicate they've drained their queues
|
||||||
|
// before handling the go away frame
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
if goAwayFrame != nil {
|
||||||
|
s.handleGoAwayFrame(goAwayFrame)
|
||||||
|
}
|
||||||
|
|
||||||
|
// now it's safe to close remote channels and empty s.streams
|
||||||
|
s.streamCond.L.Lock()
|
||||||
|
// notify streams that they're now closed, which will
|
||||||
|
// unblock any stream Read() calls
|
||||||
|
for _, stream := range s.streams {
|
||||||
|
stream.closeRemoteChannels()
|
||||||
|
}
|
||||||
|
s.streams = make(map[spdy.StreamId]*Stream)
|
||||||
|
s.streamCond.Broadcast()
|
||||||
|
s.streamCond.L.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Connection) frameHandler(frameQueue *PriorityFrameQueue, newHandler StreamHandler) {
|
||||||
|
for {
|
||||||
|
popFrame := frameQueue.Pop()
|
||||||
|
if popFrame == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var frameErr error
|
||||||
|
switch frame := popFrame.(type) {
|
||||||
|
case *spdy.SynStreamFrame:
|
||||||
|
frameErr = s.handleStreamFrame(frame, newHandler)
|
||||||
|
case *spdy.SynReplyFrame:
|
||||||
|
frameErr = s.handleReplyFrame(frame)
|
||||||
|
case *spdy.DataFrame:
|
||||||
|
frameErr = s.dataFrameHandler(frame)
|
||||||
|
case *spdy.RstStreamFrame:
|
||||||
|
frameErr = s.handleResetFrame(frame)
|
||||||
|
case *spdy.HeadersFrame:
|
||||||
|
frameErr = s.handleHeaderFrame(frame)
|
||||||
|
case *spdy.PingFrame:
|
||||||
|
frameErr = s.handlePingFrame(frame)
|
||||||
|
case *spdy.GoAwayFrame:
|
||||||
|
frameErr = s.handleGoAwayFrame(frame)
|
||||||
|
default:
|
||||||
|
frameErr = fmt.Errorf("unhandled frame type: %T", frame)
|
||||||
|
}
|
||||||
|
|
||||||
|
if frameErr != nil {
|
||||||
|
debugMessage("frame handling error: %s", frameErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Connection) getStreamPriority(streamId spdy.StreamId) uint8 {
|
||||||
|
stream, streamOk := s.getStream(streamId)
|
||||||
|
if !streamOk {
|
||||||
|
return 7
|
||||||
|
}
|
||||||
|
return stream.priority
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Connection) addStreamFrame(frame *spdy.SynStreamFrame) {
|
||||||
|
var parent *Stream
|
||||||
|
if frame.AssociatedToStreamId != spdy.StreamId(0) {
|
||||||
|
parent, _ = s.getStream(frame.AssociatedToStreamId)
|
||||||
|
}
|
||||||
|
|
||||||
|
stream := &Stream{
|
||||||
|
streamId: frame.StreamId,
|
||||||
|
parent: parent,
|
||||||
|
conn: s,
|
||||||
|
startChan: make(chan error),
|
||||||
|
headers: frame.Headers,
|
||||||
|
finished: (frame.CFHeader.Flags & spdy.ControlFlagUnidirectional) != 0x00,
|
||||||
|
replyCond: sync.NewCond(new(sync.Mutex)),
|
||||||
|
dataChan: make(chan []byte),
|
||||||
|
headerChan: make(chan http.Header),
|
||||||
|
closeChan: make(chan bool),
|
||||||
|
priority: frame.Priority,
|
||||||
|
}
|
||||||
|
if frame.CFHeader.Flags&spdy.ControlFlagFin != 0x00 {
|
||||||
|
stream.closeRemoteChannels()
|
||||||
|
}
|
||||||
|
|
||||||
|
s.addStream(stream)
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkStreamFrame checks to see if a stream frame is allowed.
|
||||||
|
// If the stream is invalid, then a reset frame with protocol error
|
||||||
|
// will be returned.
|
||||||
|
func (s *Connection) checkStreamFrame(frame *spdy.SynStreamFrame) bool {
|
||||||
|
s.receiveIdLock.Lock()
|
||||||
|
defer s.receiveIdLock.Unlock()
|
||||||
|
if s.goneAway {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
validationErr := s.validateStreamId(frame.StreamId)
|
||||||
|
if validationErr != nil {
|
||||||
|
go func() {
|
||||||
|
resetErr := s.sendResetFrame(spdy.ProtocolError, frame.StreamId)
|
||||||
|
if resetErr != nil {
|
||||||
|
debugMessage("reset error: %s", resetErr)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Connection) handleStreamFrame(frame *spdy.SynStreamFrame, newHandler StreamHandler) error {
|
||||||
|
stream, ok := s.getStream(frame.StreamId)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Missing stream: %d", frame.StreamId)
|
||||||
|
}
|
||||||
|
|
||||||
|
newHandler(stream)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Connection) handleReplyFrame(frame *spdy.SynReplyFrame) error {
|
||||||
|
debugMessage("(%p) Reply frame received for %d", s, frame.StreamId)
|
||||||
|
stream, streamOk := s.getStream(frame.StreamId)
|
||||||
|
if !streamOk {
|
||||||
|
debugMessage("Reply frame gone away for %d", frame.StreamId)
|
||||||
|
// Stream has already gone away
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if stream.replied {
|
||||||
|
// Stream has already received reply
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
stream.replied = true
|
||||||
|
|
||||||
|
// TODO Check for error
|
||||||
|
if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 {
|
||||||
|
s.remoteStreamFinish(stream)
|
||||||
|
}
|
||||||
|
|
||||||
|
close(stream.startChan)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Connection) handleResetFrame(frame *spdy.RstStreamFrame) error {
|
||||||
|
stream, streamOk := s.getStream(frame.StreamId)
|
||||||
|
if !streamOk {
|
||||||
|
// Stream has already been removed
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
s.removeStream(stream)
|
||||||
|
stream.closeRemoteChannels()
|
||||||
|
|
||||||
|
if !stream.replied {
|
||||||
|
stream.replied = true
|
||||||
|
stream.startChan <- ErrReset
|
||||||
|
close(stream.startChan)
|
||||||
|
}
|
||||||
|
|
||||||
|
stream.finishLock.Lock()
|
||||||
|
stream.finished = true
|
||||||
|
stream.finishLock.Unlock()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Connection) handleHeaderFrame(frame *spdy.HeadersFrame) error {
|
||||||
|
stream, streamOk := s.getStream(frame.StreamId)
|
||||||
|
if !streamOk {
|
||||||
|
// Stream has already gone away
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if !stream.replied {
|
||||||
|
// No reply received...Protocol error?
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO limit headers while not blocking (use buffered chan or goroutine?)
|
||||||
|
select {
|
||||||
|
case <-stream.closeChan:
|
||||||
|
return nil
|
||||||
|
case stream.headerChan <- frame.Headers:
|
||||||
|
}
|
||||||
|
|
||||||
|
if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 {
|
||||||
|
s.remoteStreamFinish(stream)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Connection) handleDataFrame(frame *spdy.DataFrame) error {
|
||||||
|
debugMessage("(%p) Data frame received for %d", s, frame.StreamId)
|
||||||
|
stream, streamOk := s.getStream(frame.StreamId)
|
||||||
|
if !streamOk {
|
||||||
|
debugMessage("(%p) Data frame gone away for %d", s, frame.StreamId)
|
||||||
|
// Stream has already gone away
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if !stream.replied {
|
||||||
|
debugMessage("(%p) Data frame not replied %d", s, frame.StreamId)
|
||||||
|
// No reply received...Protocol error?
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
debugMessage("(%p) (%d) Data frame handling", stream, stream.streamId)
|
||||||
|
if len(frame.Data) > 0 {
|
||||||
|
stream.dataLock.RLock()
|
||||||
|
select {
|
||||||
|
case <-stream.closeChan:
|
||||||
|
debugMessage("(%p) (%d) Data frame not sent (stream shut down)", stream, stream.streamId)
|
||||||
|
case stream.dataChan <- frame.Data:
|
||||||
|
debugMessage("(%p) (%d) Data frame sent", stream, stream.streamId)
|
||||||
|
}
|
||||||
|
stream.dataLock.RUnlock()
|
||||||
|
}
|
||||||
|
if (frame.Flags & spdy.DataFlagFin) != 0x00 {
|
||||||
|
s.remoteStreamFinish(stream)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Connection) handlePingFrame(frame *spdy.PingFrame) error {
|
||||||
|
if s.pingId&0x01 != frame.Id&0x01 {
|
||||||
|
return s.framer.WriteFrame(frame)
|
||||||
|
}
|
||||||
|
pingChan, pingOk := s.pingChans[frame.Id]
|
||||||
|
if pingOk {
|
||||||
|
close(pingChan)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Connection) handleGoAwayFrame(frame *spdy.GoAwayFrame) error {
|
||||||
|
debugMessage("(%p) Go away received", s)
|
||||||
|
s.receiveIdLock.Lock()
|
||||||
|
if s.goneAway {
|
||||||
|
s.receiveIdLock.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
s.goneAway = true
|
||||||
|
s.receiveIdLock.Unlock()
|
||||||
|
|
||||||
|
if s.lastStreamChan != nil {
|
||||||
|
stream, _ := s.getStream(frame.LastGoodStreamId)
|
||||||
|
go func() {
|
||||||
|
s.lastStreamChan <- stream
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do not block frame handler waiting for closure
|
||||||
|
go s.shutdown(s.goAwayTimeout)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Connection) remoteStreamFinish(stream *Stream) {
|
||||||
|
stream.closeRemoteChannels()
|
||||||
|
|
||||||
|
stream.finishLock.Lock()
|
||||||
|
if stream.finished {
|
||||||
|
// Stream is fully closed, cleanup
|
||||||
|
s.removeStream(stream)
|
||||||
|
}
|
||||||
|
stream.finishLock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateStream creates a new spdy stream using the parameters for
|
||||||
|
// creating the stream frame. The stream frame will be sent upon
|
||||||
|
// calling this function, however this function does not wait for
|
||||||
|
// the reply frame. If waiting for the reply is desired, use
|
||||||
|
// the stream Wait or WaitTimeout function on the stream returned
|
||||||
|
// by this function.
|
||||||
|
func (s *Connection) CreateStream(headers http.Header, parent *Stream, fin bool) (*Stream, error) {
|
||||||
|
// MUST synchronize stream creation (all the way to writing the frame)
|
||||||
|
// as stream IDs **MUST** increase monotonically.
|
||||||
|
s.nextIdLock.Lock()
|
||||||
|
defer s.nextIdLock.Unlock()
|
||||||
|
|
||||||
|
streamId := s.getNextStreamId()
|
||||||
|
if streamId == 0 {
|
||||||
|
return nil, fmt.Errorf("Unable to get new stream id")
|
||||||
|
}
|
||||||
|
|
||||||
|
stream := &Stream{
|
||||||
|
streamId: streamId,
|
||||||
|
parent: parent,
|
||||||
|
conn: s,
|
||||||
|
startChan: make(chan error),
|
||||||
|
headers: headers,
|
||||||
|
dataChan: make(chan []byte),
|
||||||
|
headerChan: make(chan http.Header),
|
||||||
|
closeChan: make(chan bool),
|
||||||
|
}
|
||||||
|
|
||||||
|
debugMessage("(%p) (%p) Create stream", s, stream)
|
||||||
|
|
||||||
|
s.addStream(stream)
|
||||||
|
|
||||||
|
return stream, s.sendStream(stream, fin)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Connection) shutdown(closeTimeout time.Duration) {
|
||||||
|
// TODO Ensure this isn't called multiple times
|
||||||
|
s.shutdownLock.Lock()
|
||||||
|
if s.hasShutdown {
|
||||||
|
s.shutdownLock.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.hasShutdown = true
|
||||||
|
s.shutdownLock.Unlock()
|
||||||
|
|
||||||
|
var timeout <-chan time.Time
|
||||||
|
if closeTimeout > time.Duration(0) {
|
||||||
|
timeout = time.After(closeTimeout)
|
||||||
|
}
|
||||||
|
streamsClosed := make(chan bool)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
s.streamCond.L.Lock()
|
||||||
|
for len(s.streams) > 0 {
|
||||||
|
debugMessage("Streams opened: %d, %#v", len(s.streams), s.streams)
|
||||||
|
s.streamCond.Wait()
|
||||||
|
}
|
||||||
|
s.streamCond.L.Unlock()
|
||||||
|
close(streamsClosed)
|
||||||
|
}()
|
||||||
|
|
||||||
|
var err error
|
||||||
|
select {
|
||||||
|
case <-streamsClosed:
|
||||||
|
// No active streams, close should be safe
|
||||||
|
err = s.conn.Close()
|
||||||
|
case <-timeout:
|
||||||
|
// Force ungraceful close
|
||||||
|
err = s.conn.Close()
|
||||||
|
// Wait for cleanup to clear active streams
|
||||||
|
<-streamsClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
duration := 10 * time.Minute
|
||||||
|
time.AfterFunc(duration, func() {
|
||||||
|
select {
|
||||||
|
case err, ok := <-s.shutdownChan:
|
||||||
|
if ok {
|
||||||
|
debugMessage("Unhandled close error after %s: %s", duration, err)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
})
|
||||||
|
s.shutdownChan <- err
|
||||||
|
}
|
||||||
|
close(s.shutdownChan)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Closes spdy connection by sending GoAway frame and initiating shutdown
|
||||||
|
func (s *Connection) Close() error {
|
||||||
|
s.receiveIdLock.Lock()
|
||||||
|
if s.goneAway {
|
||||||
|
s.receiveIdLock.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
s.goneAway = true
|
||||||
|
s.receiveIdLock.Unlock()
|
||||||
|
|
||||||
|
var lastStreamId spdy.StreamId
|
||||||
|
if s.receivedStreamId > 2 {
|
||||||
|
lastStreamId = s.receivedStreamId - 2
|
||||||
|
}
|
||||||
|
|
||||||
|
goAwayFrame := &spdy.GoAwayFrame{
|
||||||
|
LastGoodStreamId: lastStreamId,
|
||||||
|
Status: spdy.GoAwayOK,
|
||||||
|
}
|
||||||
|
|
||||||
|
err := s.framer.WriteFrame(goAwayFrame)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
go s.shutdown(s.closeTimeout)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CloseWait closes the connection and waits for shutdown
|
||||||
|
// to finish. Note the underlying network Connection
|
||||||
|
// is not closed until the end of shutdown.
|
||||||
|
func (s *Connection) CloseWait() error {
|
||||||
|
closeErr := s.Close()
|
||||||
|
if closeErr != nil {
|
||||||
|
return closeErr
|
||||||
|
}
|
||||||
|
shutdownErr, ok := <-s.shutdownChan
|
||||||
|
if ok {
|
||||||
|
return shutdownErr
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait waits for the connection to finish shutdown or for
|
||||||
|
// the wait timeout duration to expire. This needs to be
|
||||||
|
// called either after Close has been called or the GOAWAYFRAME
|
||||||
|
// has been received. If the wait timeout is 0, this function
|
||||||
|
// will block until shutdown finishes. If wait is never called
|
||||||
|
// and a shutdown error occurs, that error will be logged as an
|
||||||
|
// unhandled error.
|
||||||
|
func (s *Connection) Wait(waitTimeout time.Duration) error {
|
||||||
|
var timeout <-chan time.Time
|
||||||
|
if waitTimeout > time.Duration(0) {
|
||||||
|
timeout = time.After(waitTimeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case err, ok := <-s.shutdownChan:
|
||||||
|
if ok {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case <-timeout:
|
||||||
|
return ErrTimeout
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotifyClose registers a channel to be called when the remote
|
||||||
|
// peer inidicates connection closure. The last stream to be
|
||||||
|
// received by the remote will be sent on the channel. The notify
|
||||||
|
// timeout will determine the duration between go away received
|
||||||
|
// and the connection being closed.
|
||||||
|
func (s *Connection) NotifyClose(c chan<- *Stream, timeout time.Duration) {
|
||||||
|
s.goAwayTimeout = timeout
|
||||||
|
s.lastStreamChan = c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetCloseTimeout sets the amount of time close will wait for
|
||||||
|
// streams to finish before terminating the underlying network
|
||||||
|
// connection. Setting the timeout to 0 will cause close to
|
||||||
|
// wait forever, which is the default.
|
||||||
|
func (s *Connection) SetCloseTimeout(timeout time.Duration) {
|
||||||
|
s.closeTimeout = timeout
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetIdleTimeout sets the amount of time the connection may sit idle before
|
||||||
|
// it is forcefully terminated.
|
||||||
|
func (s *Connection) SetIdleTimeout(timeout time.Duration) {
|
||||||
|
s.framer.setIdleTimeout(timeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Connection) sendHeaders(headers http.Header, stream *Stream, fin bool) error {
|
||||||
|
var flags spdy.ControlFlags
|
||||||
|
if fin {
|
||||||
|
flags = spdy.ControlFlagFin
|
||||||
|
}
|
||||||
|
|
||||||
|
headerFrame := &spdy.HeadersFrame{
|
||||||
|
StreamId: stream.streamId,
|
||||||
|
Headers: headers,
|
||||||
|
CFHeader: spdy.ControlFrameHeader{Flags: flags},
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.framer.WriteFrame(headerFrame)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Connection) sendReply(headers http.Header, stream *Stream, fin bool) error {
|
||||||
|
var flags spdy.ControlFlags
|
||||||
|
if fin {
|
||||||
|
flags = spdy.ControlFlagFin
|
||||||
|
}
|
||||||
|
|
||||||
|
replyFrame := &spdy.SynReplyFrame{
|
||||||
|
StreamId: stream.streamId,
|
||||||
|
Headers: headers,
|
||||||
|
CFHeader: spdy.ControlFrameHeader{Flags: flags},
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.framer.WriteFrame(replyFrame)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Connection) sendResetFrame(status spdy.RstStreamStatus, streamId spdy.StreamId) error {
|
||||||
|
resetFrame := &spdy.RstStreamFrame{
|
||||||
|
StreamId: streamId,
|
||||||
|
Status: status,
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.framer.WriteFrame(resetFrame)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Connection) sendReset(status spdy.RstStreamStatus, stream *Stream) error {
|
||||||
|
return s.sendResetFrame(status, stream.streamId)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Connection) sendStream(stream *Stream, fin bool) error {
|
||||||
|
var flags spdy.ControlFlags
|
||||||
|
if fin {
|
||||||
|
flags = spdy.ControlFlagFin
|
||||||
|
stream.finished = true
|
||||||
|
}
|
||||||
|
|
||||||
|
var parentId spdy.StreamId
|
||||||
|
if stream.parent != nil {
|
||||||
|
parentId = stream.parent.streamId
|
||||||
|
}
|
||||||
|
|
||||||
|
streamFrame := &spdy.SynStreamFrame{
|
||||||
|
StreamId: spdy.StreamId(stream.streamId),
|
||||||
|
AssociatedToStreamId: spdy.StreamId(parentId),
|
||||||
|
Headers: stream.headers,
|
||||||
|
CFHeader: spdy.ControlFrameHeader{Flags: flags},
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.framer.WriteFrame(streamFrame)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getNextStreamId returns the next sequential id
|
||||||
|
// every call should produce a unique value or an error
|
||||||
|
func (s *Connection) getNextStreamId() spdy.StreamId {
|
||||||
|
sid := s.nextStreamId
|
||||||
|
if sid > 0x7fffffff {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
s.nextStreamId = s.nextStreamId + 2
|
||||||
|
return sid
|
||||||
|
}
|
||||||
|
|
||||||
|
// PeekNextStreamId returns the next sequential id and keeps the next id untouched
|
||||||
|
func (s *Connection) PeekNextStreamId() spdy.StreamId {
|
||||||
|
sid := s.nextStreamId
|
||||||
|
return sid
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Connection) validateStreamId(rid spdy.StreamId) error {
|
||||||
|
if rid > 0x7fffffff || rid < s.receivedStreamId {
|
||||||
|
return ErrInvalidStreamId
|
||||||
|
}
|
||||||
|
s.receivedStreamId = rid + 2
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Connection) addStream(stream *Stream) {
|
||||||
|
s.streamCond.L.Lock()
|
||||||
|
s.streams[stream.streamId] = stream
|
||||||
|
debugMessage("(%p) (%p) Stream added, broadcasting: %d", s, stream, stream.streamId)
|
||||||
|
s.streamCond.Broadcast()
|
||||||
|
s.streamCond.L.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Connection) removeStream(stream *Stream) {
|
||||||
|
s.streamCond.L.Lock()
|
||||||
|
delete(s.streams, stream.streamId)
|
||||||
|
debugMessage("(%p) (%p) Stream removed, broadcasting: %d", s, stream, stream.streamId)
|
||||||
|
s.streamCond.Broadcast()
|
||||||
|
s.streamCond.L.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Connection) getStream(streamId spdy.StreamId) (stream *Stream, ok bool) {
|
||||||
|
s.streamLock.RLock()
|
||||||
|
stream, ok = s.streams[streamId]
|
||||||
|
s.streamLock.RUnlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindStream looks up the given stream id and either waits for the
|
||||||
|
// stream to be found or returns nil if the stream id is no longer
|
||||||
|
// valid.
|
||||||
|
func (s *Connection) FindStream(streamId uint32) *Stream {
|
||||||
|
var stream *Stream
|
||||||
|
var ok bool
|
||||||
|
s.streamCond.L.Lock()
|
||||||
|
stream, ok = s.streams[spdy.StreamId(streamId)]
|
||||||
|
debugMessage("(%p) Found stream %d? %t", s, spdy.StreamId(streamId), ok)
|
||||||
|
for !ok && streamId >= uint32(s.receivedStreamId) {
|
||||||
|
s.streamCond.Wait()
|
||||||
|
stream, ok = s.streams[spdy.StreamId(streamId)]
|
||||||
|
}
|
||||||
|
s.streamCond.L.Unlock()
|
||||||
|
return stream
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Connection) CloseChan() <-chan bool {
|
||||||
|
return s.closeChan
|
||||||
|
}
|
||||||
36
vendor/github.com/docker/spdystream/handlers.go
generated
vendored
Normal file
36
vendor/github.com/docker/spdystream/handlers.go
generated
vendored
Normal file
|
|
@ -0,0 +1,36 @@
|
||||||
|
package spdystream
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MirrorStreamHandler mirrors all streams.
|
||||||
|
func MirrorStreamHandler(stream *Stream) {
|
||||||
|
replyErr := stream.SendReply(http.Header{}, false)
|
||||||
|
if replyErr != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
io.Copy(stream, stream)
|
||||||
|
stream.Close()
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
header, receiveErr := stream.ReceiveHeader()
|
||||||
|
if receiveErr != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sendErr := stream.SendHeader(header, false)
|
||||||
|
if sendErr != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NoopStreamHandler does nothing when stream connects.
|
||||||
|
func NoOpStreamHandler(stream *Stream) {
|
||||||
|
stream.SendReply(http.Header{}, false)
|
||||||
|
}
|
||||||
98
vendor/github.com/docker/spdystream/priority.go
generated
vendored
Normal file
98
vendor/github.com/docker/spdystream/priority.go
generated
vendored
Normal file
|
|
@ -0,0 +1,98 @@
|
||||||
|
package spdystream
|
||||||
|
|
||||||
|
import (
|
||||||
|
"container/heap"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/docker/spdystream/spdy"
|
||||||
|
)
|
||||||
|
|
||||||
|
type prioritizedFrame struct {
|
||||||
|
frame spdy.Frame
|
||||||
|
priority uint8
|
||||||
|
insertId uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type frameQueue []*prioritizedFrame
|
||||||
|
|
||||||
|
func (fq frameQueue) Len() int {
|
||||||
|
return len(fq)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fq frameQueue) Less(i, j int) bool {
|
||||||
|
if fq[i].priority == fq[j].priority {
|
||||||
|
return fq[i].insertId < fq[j].insertId
|
||||||
|
}
|
||||||
|
return fq[i].priority < fq[j].priority
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fq frameQueue) Swap(i, j int) {
|
||||||
|
fq[i], fq[j] = fq[j], fq[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fq *frameQueue) Push(x interface{}) {
|
||||||
|
*fq = append(*fq, x.(*prioritizedFrame))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fq *frameQueue) Pop() interface{} {
|
||||||
|
old := *fq
|
||||||
|
n := len(old)
|
||||||
|
*fq = old[0 : n-1]
|
||||||
|
return old[n-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
type PriorityFrameQueue struct {
|
||||||
|
queue *frameQueue
|
||||||
|
c *sync.Cond
|
||||||
|
size int
|
||||||
|
nextInsertId uint64
|
||||||
|
drain bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPriorityFrameQueue(size int) *PriorityFrameQueue {
|
||||||
|
queue := make(frameQueue, 0, size)
|
||||||
|
heap.Init(&queue)
|
||||||
|
|
||||||
|
return &PriorityFrameQueue{
|
||||||
|
queue: &queue,
|
||||||
|
size: size,
|
||||||
|
c: sync.NewCond(&sync.Mutex{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *PriorityFrameQueue) Push(frame spdy.Frame, priority uint8) {
|
||||||
|
q.c.L.Lock()
|
||||||
|
defer q.c.L.Unlock()
|
||||||
|
for q.queue.Len() >= q.size {
|
||||||
|
q.c.Wait()
|
||||||
|
}
|
||||||
|
pFrame := &prioritizedFrame{
|
||||||
|
frame: frame,
|
||||||
|
priority: priority,
|
||||||
|
insertId: q.nextInsertId,
|
||||||
|
}
|
||||||
|
q.nextInsertId = q.nextInsertId + 1
|
||||||
|
heap.Push(q.queue, pFrame)
|
||||||
|
q.c.Signal()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *PriorityFrameQueue) Pop() spdy.Frame {
|
||||||
|
q.c.L.Lock()
|
||||||
|
defer q.c.L.Unlock()
|
||||||
|
for q.queue.Len() == 0 {
|
||||||
|
if q.drain {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
q.c.Wait()
|
||||||
|
}
|
||||||
|
frame := heap.Pop(q.queue).(*prioritizedFrame).frame
|
||||||
|
q.c.Signal()
|
||||||
|
return frame
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *PriorityFrameQueue) Drain() {
|
||||||
|
q.c.L.Lock()
|
||||||
|
defer q.c.L.Unlock()
|
||||||
|
q.drain = true
|
||||||
|
q.c.Broadcast()
|
||||||
|
}
|
||||||
187
vendor/github.com/docker/spdystream/spdy/dictionary.go
generated
vendored
Normal file
187
vendor/github.com/docker/spdystream/spdy/dictionary.go
generated
vendored
Normal file
|
|
@ -0,0 +1,187 @@
|
||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package spdy
|
||||||
|
|
||||||
|
// headerDictionary is the dictionary sent to the zlib compressor/decompressor.
|
||||||
|
var headerDictionary = []byte{
|
||||||
|
0x00, 0x00, 0x00, 0x07, 0x6f, 0x70, 0x74, 0x69,
|
||||||
|
0x6f, 0x6e, 0x73, 0x00, 0x00, 0x00, 0x04, 0x68,
|
||||||
|
0x65, 0x61, 0x64, 0x00, 0x00, 0x00, 0x04, 0x70,
|
||||||
|
0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x03, 0x70,
|
||||||
|
0x75, 0x74, 0x00, 0x00, 0x00, 0x06, 0x64, 0x65,
|
||||||
|
0x6c, 0x65, 0x74, 0x65, 0x00, 0x00, 0x00, 0x05,
|
||||||
|
0x74, 0x72, 0x61, 0x63, 0x65, 0x00, 0x00, 0x00,
|
||||||
|
0x06, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x00,
|
||||||
|
0x00, 0x00, 0x0e, 0x61, 0x63, 0x63, 0x65, 0x70,
|
||||||
|
0x74, 0x2d, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65,
|
||||||
|
0x74, 0x00, 0x00, 0x00, 0x0f, 0x61, 0x63, 0x63,
|
||||||
|
0x65, 0x70, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f,
|
||||||
|
0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x0f,
|
||||||
|
0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x6c,
|
||||||
|
0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x00,
|
||||||
|
0x00, 0x00, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70,
|
||||||
|
0x74, 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73,
|
||||||
|
0x00, 0x00, 0x00, 0x03, 0x61, 0x67, 0x65, 0x00,
|
||||||
|
0x00, 0x00, 0x05, 0x61, 0x6c, 0x6c, 0x6f, 0x77,
|
||||||
|
0x00, 0x00, 0x00, 0x0d, 0x61, 0x75, 0x74, 0x68,
|
||||||
|
0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
|
||||||
|
0x6e, 0x00, 0x00, 0x00, 0x0d, 0x63, 0x61, 0x63,
|
||||||
|
0x68, 0x65, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72,
|
||||||
|
0x6f, 0x6c, 0x00, 0x00, 0x00, 0x0a, 0x63, 0x6f,
|
||||||
|
0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
|
||||||
|
0x00, 0x00, 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74,
|
||||||
|
0x65, 0x6e, 0x74, 0x2d, 0x62, 0x61, 0x73, 0x65,
|
||||||
|
0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, 0x6e, 0x74,
|
||||||
|
0x65, 0x6e, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f,
|
||||||
|
0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10,
|
||||||
|
0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d,
|
||||||
|
0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65,
|
||||||
|
0x00, 0x00, 0x00, 0x0e, 0x63, 0x6f, 0x6e, 0x74,
|
||||||
|
0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x65, 0x6e, 0x67,
|
||||||
|
0x74, 0x68, 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f,
|
||||||
|
0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x6f,
|
||||||
|
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00,
|
||||||
|
0x00, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
|
||||||
|
0x74, 0x2d, 0x6d, 0x64, 0x35, 0x00, 0x00, 0x00,
|
||||||
|
0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
|
||||||
|
0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00,
|
||||||
|
0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
|
||||||
|
0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x00, 0x00,
|
||||||
|
0x00, 0x04, 0x64, 0x61, 0x74, 0x65, 0x00, 0x00,
|
||||||
|
0x00, 0x04, 0x65, 0x74, 0x61, 0x67, 0x00, 0x00,
|
||||||
|
0x00, 0x06, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74,
|
||||||
|
0x00, 0x00, 0x00, 0x07, 0x65, 0x78, 0x70, 0x69,
|
||||||
|
0x72, 0x65, 0x73, 0x00, 0x00, 0x00, 0x04, 0x66,
|
||||||
|
0x72, 0x6f, 0x6d, 0x00, 0x00, 0x00, 0x04, 0x68,
|
||||||
|
0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x08, 0x69,
|
||||||
|
0x66, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00,
|
||||||
|
0x00, 0x00, 0x11, 0x69, 0x66, 0x2d, 0x6d, 0x6f,
|
||||||
|
0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x2d, 0x73,
|
||||||
|
0x69, 0x6e, 0x63, 0x65, 0x00, 0x00, 0x00, 0x0d,
|
||||||
|
0x69, 0x66, 0x2d, 0x6e, 0x6f, 0x6e, 0x65, 0x2d,
|
||||||
|
0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, 0x00, 0x00,
|
||||||
|
0x08, 0x69, 0x66, 0x2d, 0x72, 0x61, 0x6e, 0x67,
|
||||||
|
0x65, 0x00, 0x00, 0x00, 0x13, 0x69, 0x66, 0x2d,
|
||||||
|
0x75, 0x6e, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69,
|
||||||
|
0x65, 0x64, 0x2d, 0x73, 0x69, 0x6e, 0x63, 0x65,
|
||||||
|
0x00, 0x00, 0x00, 0x0d, 0x6c, 0x61, 0x73, 0x74,
|
||||||
|
0x2d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65,
|
||||||
|
0x64, 0x00, 0x00, 0x00, 0x08, 0x6c, 0x6f, 0x63,
|
||||||
|
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00,
|
||||||
|
0x0c, 0x6d, 0x61, 0x78, 0x2d, 0x66, 0x6f, 0x72,
|
||||||
|
0x77, 0x61, 0x72, 0x64, 0x73, 0x00, 0x00, 0x00,
|
||||||
|
0x06, 0x70, 0x72, 0x61, 0x67, 0x6d, 0x61, 0x00,
|
||||||
|
0x00, 0x00, 0x12, 0x70, 0x72, 0x6f, 0x78, 0x79,
|
||||||
|
0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74,
|
||||||
|
0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, 0x00,
|
||||||
|
0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2d, 0x61,
|
||||||
|
0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61,
|
||||||
|
0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, 0x05,
|
||||||
|
0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, 0x00,
|
||||||
|
0x07, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x72,
|
||||||
|
0x00, 0x00, 0x00, 0x0b, 0x72, 0x65, 0x74, 0x72,
|
||||||
|
0x79, 0x2d, 0x61, 0x66, 0x74, 0x65, 0x72, 0x00,
|
||||||
|
0x00, 0x00, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65,
|
||||||
|
0x72, 0x00, 0x00, 0x00, 0x02, 0x74, 0x65, 0x00,
|
||||||
|
0x00, 0x00, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c,
|
||||||
|
0x65, 0x72, 0x00, 0x00, 0x00, 0x11, 0x74, 0x72,
|
||||||
|
0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2d, 0x65,
|
||||||
|
0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x00,
|
||||||
|
0x00, 0x00, 0x07, 0x75, 0x70, 0x67, 0x72, 0x61,
|
||||||
|
0x64, 0x65, 0x00, 0x00, 0x00, 0x0a, 0x75, 0x73,
|
||||||
|
0x65, 0x72, 0x2d, 0x61, 0x67, 0x65, 0x6e, 0x74,
|
||||||
|
0x00, 0x00, 0x00, 0x04, 0x76, 0x61, 0x72, 0x79,
|
||||||
|
0x00, 0x00, 0x00, 0x03, 0x76, 0x69, 0x61, 0x00,
|
||||||
|
0x00, 0x00, 0x07, 0x77, 0x61, 0x72, 0x6e, 0x69,
|
||||||
|
0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, 0x77, 0x77,
|
||||||
|
0x77, 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e,
|
||||||
|
0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00,
|
||||||
|
0x00, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64,
|
||||||
|
0x00, 0x00, 0x00, 0x03, 0x67, 0x65, 0x74, 0x00,
|
||||||
|
0x00, 0x00, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
|
||||||
|
0x73, 0x00, 0x00, 0x00, 0x06, 0x32, 0x30, 0x30,
|
||||||
|
0x20, 0x4f, 0x4b, 0x00, 0x00, 0x00, 0x07, 0x76,
|
||||||
|
0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x00, 0x00,
|
||||||
|
0x00, 0x08, 0x48, 0x54, 0x54, 0x50, 0x2f, 0x31,
|
||||||
|
0x2e, 0x31, 0x00, 0x00, 0x00, 0x03, 0x75, 0x72,
|
||||||
|
0x6c, 0x00, 0x00, 0x00, 0x06, 0x70, 0x75, 0x62,
|
||||||
|
0x6c, 0x69, 0x63, 0x00, 0x00, 0x00, 0x0a, 0x73,
|
||||||
|
0x65, 0x74, 0x2d, 0x63, 0x6f, 0x6f, 0x6b, 0x69,
|
||||||
|
0x65, 0x00, 0x00, 0x00, 0x0a, 0x6b, 0x65, 0x65,
|
||||||
|
0x70, 0x2d, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x00,
|
||||||
|
0x00, 0x00, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69,
|
||||||
|
0x6e, 0x31, 0x30, 0x30, 0x31, 0x30, 0x31, 0x32,
|
||||||
|
0x30, 0x31, 0x32, 0x30, 0x32, 0x32, 0x30, 0x35,
|
||||||
|
0x32, 0x30, 0x36, 0x33, 0x30, 0x30, 0x33, 0x30,
|
||||||
|
0x32, 0x33, 0x30, 0x33, 0x33, 0x30, 0x34, 0x33,
|
||||||
|
0x30, 0x35, 0x33, 0x30, 0x36, 0x33, 0x30, 0x37,
|
||||||
|
0x34, 0x30, 0x32, 0x34, 0x30, 0x35, 0x34, 0x30,
|
||||||
|
0x36, 0x34, 0x30, 0x37, 0x34, 0x30, 0x38, 0x34,
|
||||||
|
0x30, 0x39, 0x34, 0x31, 0x30, 0x34, 0x31, 0x31,
|
||||||
|
0x34, 0x31, 0x32, 0x34, 0x31, 0x33, 0x34, 0x31,
|
||||||
|
0x34, 0x34, 0x31, 0x35, 0x34, 0x31, 0x36, 0x34,
|
||||||
|
0x31, 0x37, 0x35, 0x30, 0x32, 0x35, 0x30, 0x34,
|
||||||
|
0x35, 0x30, 0x35, 0x32, 0x30, 0x33, 0x20, 0x4e,
|
||||||
|
0x6f, 0x6e, 0x2d, 0x41, 0x75, 0x74, 0x68, 0x6f,
|
||||||
|
0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65,
|
||||||
|
0x20, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61,
|
||||||
|
0x74, 0x69, 0x6f, 0x6e, 0x32, 0x30, 0x34, 0x20,
|
||||||
|
0x4e, 0x6f, 0x20, 0x43, 0x6f, 0x6e, 0x74, 0x65,
|
||||||
|
0x6e, 0x74, 0x33, 0x30, 0x31, 0x20, 0x4d, 0x6f,
|
||||||
|
0x76, 0x65, 0x64, 0x20, 0x50, 0x65, 0x72, 0x6d,
|
||||||
|
0x61, 0x6e, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x34,
|
||||||
|
0x30, 0x30, 0x20, 0x42, 0x61, 0x64, 0x20, 0x52,
|
||||||
|
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x34, 0x30,
|
||||||
|
0x31, 0x20, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68,
|
||||||
|
0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x34, 0x30,
|
||||||
|
0x33, 0x20, 0x46, 0x6f, 0x72, 0x62, 0x69, 0x64,
|
||||||
|
0x64, 0x65, 0x6e, 0x34, 0x30, 0x34, 0x20, 0x4e,
|
||||||
|
0x6f, 0x74, 0x20, 0x46, 0x6f, 0x75, 0x6e, 0x64,
|
||||||
|
0x35, 0x30, 0x30, 0x20, 0x49, 0x6e, 0x74, 0x65,
|
||||||
|
0x72, 0x6e, 0x61, 0x6c, 0x20, 0x53, 0x65, 0x72,
|
||||||
|
0x76, 0x65, 0x72, 0x20, 0x45, 0x72, 0x72, 0x6f,
|
||||||
|
0x72, 0x35, 0x30, 0x31, 0x20, 0x4e, 0x6f, 0x74,
|
||||||
|
0x20, 0x49, 0x6d, 0x70, 0x6c, 0x65, 0x6d, 0x65,
|
||||||
|
0x6e, 0x74, 0x65, 0x64, 0x35, 0x30, 0x33, 0x20,
|
||||||
|
0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x20,
|
||||||
|
0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61,
|
||||||
|
0x62, 0x6c, 0x65, 0x4a, 0x61, 0x6e, 0x20, 0x46,
|
||||||
|
0x65, 0x62, 0x20, 0x4d, 0x61, 0x72, 0x20, 0x41,
|
||||||
|
0x70, 0x72, 0x20, 0x4d, 0x61, 0x79, 0x20, 0x4a,
|
||||||
|
0x75, 0x6e, 0x20, 0x4a, 0x75, 0x6c, 0x20, 0x41,
|
||||||
|
0x75, 0x67, 0x20, 0x53, 0x65, 0x70, 0x74, 0x20,
|
||||||
|
0x4f, 0x63, 0x74, 0x20, 0x4e, 0x6f, 0x76, 0x20,
|
||||||
|
0x44, 0x65, 0x63, 0x20, 0x30, 0x30, 0x3a, 0x30,
|
||||||
|
0x30, 0x3a, 0x30, 0x30, 0x20, 0x4d, 0x6f, 0x6e,
|
||||||
|
0x2c, 0x20, 0x54, 0x75, 0x65, 0x2c, 0x20, 0x57,
|
||||||
|
0x65, 0x64, 0x2c, 0x20, 0x54, 0x68, 0x75, 0x2c,
|
||||||
|
0x20, 0x46, 0x72, 0x69, 0x2c, 0x20, 0x53, 0x61,
|
||||||
|
0x74, 0x2c, 0x20, 0x53, 0x75, 0x6e, 0x2c, 0x20,
|
||||||
|
0x47, 0x4d, 0x54, 0x63, 0x68, 0x75, 0x6e, 0x6b,
|
||||||
|
0x65, 0x64, 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f,
|
||||||
|
0x68, 0x74, 0x6d, 0x6c, 0x2c, 0x69, 0x6d, 0x61,
|
||||||
|
0x67, 0x65, 0x2f, 0x70, 0x6e, 0x67, 0x2c, 0x69,
|
||||||
|
0x6d, 0x61, 0x67, 0x65, 0x2f, 0x6a, 0x70, 0x67,
|
||||||
|
0x2c, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x67,
|
||||||
|
0x69, 0x66, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69,
|
||||||
|
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78,
|
||||||
|
0x6d, 0x6c, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69,
|
||||||
|
0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78,
|
||||||
|
0x68, 0x74, 0x6d, 0x6c, 0x2b, 0x78, 0x6d, 0x6c,
|
||||||
|
0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x70, 0x6c,
|
||||||
|
0x61, 0x69, 0x6e, 0x2c, 0x74, 0x65, 0x78, 0x74,
|
||||||
|
0x2f, 0x6a, 0x61, 0x76, 0x61, 0x73, 0x63, 0x72,
|
||||||
|
0x69, 0x70, 0x74, 0x2c, 0x70, 0x75, 0x62, 0x6c,
|
||||||
|
0x69, 0x63, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74,
|
||||||
|
0x65, 0x6d, 0x61, 0x78, 0x2d, 0x61, 0x67, 0x65,
|
||||||
|
0x3d, 0x67, 0x7a, 0x69, 0x70, 0x2c, 0x64, 0x65,
|
||||||
|
0x66, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x73, 0x64,
|
||||||
|
0x63, 0x68, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65,
|
||||||
|
0x74, 0x3d, 0x75, 0x74, 0x66, 0x2d, 0x38, 0x63,
|
||||||
|
0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x3d, 0x69,
|
||||||
|
0x73, 0x6f, 0x2d, 0x38, 0x38, 0x35, 0x39, 0x2d,
|
||||||
|
0x31, 0x2c, 0x75, 0x74, 0x66, 0x2d, 0x2c, 0x2a,
|
||||||
|
0x2c, 0x65, 0x6e, 0x71, 0x3d, 0x30, 0x2e,
|
||||||
|
}
|
||||||
348
vendor/github.com/docker/spdystream/spdy/read.go
generated
vendored
Normal file
348
vendor/github.com/docker/spdystream/spdy/read.go
generated
vendored
Normal file
|
|
@ -0,0 +1,348 @@
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package spdy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"compress/zlib"
|
||||||
|
"encoding/binary"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (frame *SynStreamFrame) read(h ControlFrameHeader, f *Framer) error {
|
||||||
|
return f.readSynStreamFrame(h, frame)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (frame *SynReplyFrame) read(h ControlFrameHeader, f *Framer) error {
|
||||||
|
return f.readSynReplyFrame(h, frame)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (frame *RstStreamFrame) read(h ControlFrameHeader, f *Framer) error {
|
||||||
|
frame.CFHeader = h
|
||||||
|
if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if frame.Status == 0 {
|
||||||
|
return &Error{InvalidControlFrame, frame.StreamId}
|
||||||
|
}
|
||||||
|
if frame.StreamId == 0 {
|
||||||
|
return &Error{ZeroStreamId, 0}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (frame *SettingsFrame) read(h ControlFrameHeader, f *Framer) error {
|
||||||
|
frame.CFHeader = h
|
||||||
|
var numSettings uint32
|
||||||
|
if err := binary.Read(f.r, binary.BigEndian, &numSettings); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
frame.FlagIdValues = make([]SettingsFlagIdValue, numSettings)
|
||||||
|
for i := uint32(0); i < numSettings; i++ {
|
||||||
|
if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Id); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
frame.FlagIdValues[i].Flag = SettingsFlag((frame.FlagIdValues[i].Id & 0xff000000) >> 24)
|
||||||
|
frame.FlagIdValues[i].Id &= 0xffffff
|
||||||
|
if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Value); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (frame *PingFrame) read(h ControlFrameHeader, f *Framer) error {
|
||||||
|
frame.CFHeader = h
|
||||||
|
if err := binary.Read(f.r, binary.BigEndian, &frame.Id); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if frame.Id == 0 {
|
||||||
|
return &Error{ZeroStreamId, 0}
|
||||||
|
}
|
||||||
|
if frame.CFHeader.Flags != 0 {
|
||||||
|
return &Error{InvalidControlFrame, StreamId(frame.Id)}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (frame *GoAwayFrame) read(h ControlFrameHeader, f *Framer) error {
|
||||||
|
frame.CFHeader = h
|
||||||
|
if err := binary.Read(f.r, binary.BigEndian, &frame.LastGoodStreamId); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if frame.CFHeader.Flags != 0 {
|
||||||
|
return &Error{InvalidControlFrame, frame.LastGoodStreamId}
|
||||||
|
}
|
||||||
|
if frame.CFHeader.length != 8 {
|
||||||
|
return &Error{InvalidControlFrame, frame.LastGoodStreamId}
|
||||||
|
}
|
||||||
|
if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (frame *HeadersFrame) read(h ControlFrameHeader, f *Framer) error {
|
||||||
|
return f.readHeadersFrame(h, frame)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (frame *WindowUpdateFrame) read(h ControlFrameHeader, f *Framer) error {
|
||||||
|
frame.CFHeader = h
|
||||||
|
if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if frame.CFHeader.Flags != 0 {
|
||||||
|
return &Error{InvalidControlFrame, frame.StreamId}
|
||||||
|
}
|
||||||
|
if frame.CFHeader.length != 8 {
|
||||||
|
return &Error{InvalidControlFrame, frame.StreamId}
|
||||||
|
}
|
||||||
|
if err := binary.Read(f.r, binary.BigEndian, &frame.DeltaWindowSize); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newControlFrame(frameType ControlFrameType) (controlFrame, error) {
|
||||||
|
ctor, ok := cframeCtor[frameType]
|
||||||
|
if !ok {
|
||||||
|
return nil, &Error{Err: InvalidControlFrame}
|
||||||
|
}
|
||||||
|
return ctor(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var cframeCtor = map[ControlFrameType]func() controlFrame{
|
||||||
|
TypeSynStream: func() controlFrame { return new(SynStreamFrame) },
|
||||||
|
TypeSynReply: func() controlFrame { return new(SynReplyFrame) },
|
||||||
|
TypeRstStream: func() controlFrame { return new(RstStreamFrame) },
|
||||||
|
TypeSettings: func() controlFrame { return new(SettingsFrame) },
|
||||||
|
TypePing: func() controlFrame { return new(PingFrame) },
|
||||||
|
TypeGoAway: func() controlFrame { return new(GoAwayFrame) },
|
||||||
|
TypeHeaders: func() controlFrame { return new(HeadersFrame) },
|
||||||
|
TypeWindowUpdate: func() controlFrame { return new(WindowUpdateFrame) },
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Framer) uncorkHeaderDecompressor(payloadSize int64) error {
|
||||||
|
if f.headerDecompressor != nil {
|
||||||
|
f.headerReader.N = payloadSize
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
f.headerReader = io.LimitedReader{R: f.r, N: payloadSize}
|
||||||
|
decompressor, err := zlib.NewReaderDict(&f.headerReader, []byte(headerDictionary))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
f.headerDecompressor = decompressor
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadFrame reads SPDY encoded data and returns a decompressed Frame.
|
||||||
|
func (f *Framer) ReadFrame() (Frame, error) {
|
||||||
|
var firstWord uint32
|
||||||
|
if err := binary.Read(f.r, binary.BigEndian, &firstWord); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if firstWord&0x80000000 != 0 {
|
||||||
|
frameType := ControlFrameType(firstWord & 0xffff)
|
||||||
|
version := uint16(firstWord >> 16 & 0x7fff)
|
||||||
|
return f.parseControlFrame(version, frameType)
|
||||||
|
}
|
||||||
|
return f.parseDataFrame(StreamId(firstWord & 0x7fffffff))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Framer) parseControlFrame(version uint16, frameType ControlFrameType) (Frame, error) {
|
||||||
|
var length uint32
|
||||||
|
if err := binary.Read(f.r, binary.BigEndian, &length); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
flags := ControlFlags((length & 0xff000000) >> 24)
|
||||||
|
length &= 0xffffff
|
||||||
|
header := ControlFrameHeader{version, frameType, flags, length}
|
||||||
|
cframe, err := newControlFrame(frameType)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = cframe.read(header, f); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return cframe, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseHeaderValueBlock(r io.Reader, streamId StreamId) (http.Header, error) {
|
||||||
|
var numHeaders uint32
|
||||||
|
if err := binary.Read(r, binary.BigEndian, &numHeaders); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var e error
|
||||||
|
h := make(http.Header, int(numHeaders))
|
||||||
|
for i := 0; i < int(numHeaders); i++ {
|
||||||
|
var length uint32
|
||||||
|
if err := binary.Read(r, binary.BigEndian, &length); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
nameBytes := make([]byte, length)
|
||||||
|
if _, err := io.ReadFull(r, nameBytes); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
name := string(nameBytes)
|
||||||
|
if name != strings.ToLower(name) {
|
||||||
|
e = &Error{UnlowercasedHeaderName, streamId}
|
||||||
|
name = strings.ToLower(name)
|
||||||
|
}
|
||||||
|
if h[name] != nil {
|
||||||
|
e = &Error{DuplicateHeaders, streamId}
|
||||||
|
}
|
||||||
|
if err := binary.Read(r, binary.BigEndian, &length); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
value := make([]byte, length)
|
||||||
|
if _, err := io.ReadFull(r, value); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
valueList := strings.Split(string(value), headerValueSeparator)
|
||||||
|
for _, v := range valueList {
|
||||||
|
h.Add(name, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if e != nil {
|
||||||
|
return h, e
|
||||||
|
}
|
||||||
|
return h, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Framer) readSynStreamFrame(h ControlFrameHeader, frame *SynStreamFrame) error {
|
||||||
|
frame.CFHeader = h
|
||||||
|
var err error
|
||||||
|
if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err = binary.Read(f.r, binary.BigEndian, &frame.AssociatedToStreamId); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err = binary.Read(f.r, binary.BigEndian, &frame.Priority); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
frame.Priority >>= 5
|
||||||
|
if err = binary.Read(f.r, binary.BigEndian, &frame.Slot); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
reader := f.r
|
||||||
|
if !f.headerCompressionDisabled {
|
||||||
|
err := f.uncorkHeaderDecompressor(int64(h.length - 10))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
reader = f.headerDecompressor
|
||||||
|
}
|
||||||
|
frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId)
|
||||||
|
if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) {
|
||||||
|
err = &Error{WrongCompressedPayloadSize, 0}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for h := range frame.Headers {
|
||||||
|
if invalidReqHeaders[h] {
|
||||||
|
return &Error{InvalidHeaderPresent, frame.StreamId}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if frame.StreamId == 0 {
|
||||||
|
return &Error{ZeroStreamId, 0}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Framer) readSynReplyFrame(h ControlFrameHeader, frame *SynReplyFrame) error {
|
||||||
|
frame.CFHeader = h
|
||||||
|
var err error
|
||||||
|
if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
reader := f.r
|
||||||
|
if !f.headerCompressionDisabled {
|
||||||
|
err := f.uncorkHeaderDecompressor(int64(h.length - 4))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
reader = f.headerDecompressor
|
||||||
|
}
|
||||||
|
frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId)
|
||||||
|
if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) {
|
||||||
|
err = &Error{WrongCompressedPayloadSize, 0}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for h := range frame.Headers {
|
||||||
|
if invalidRespHeaders[h] {
|
||||||
|
return &Error{InvalidHeaderPresent, frame.StreamId}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if frame.StreamId == 0 {
|
||||||
|
return &Error{ZeroStreamId, 0}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Framer) readHeadersFrame(h ControlFrameHeader, frame *HeadersFrame) error {
|
||||||
|
frame.CFHeader = h
|
||||||
|
var err error
|
||||||
|
if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
reader := f.r
|
||||||
|
if !f.headerCompressionDisabled {
|
||||||
|
err := f.uncorkHeaderDecompressor(int64(h.length - 4))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
reader = f.headerDecompressor
|
||||||
|
}
|
||||||
|
frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId)
|
||||||
|
if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) {
|
||||||
|
err = &Error{WrongCompressedPayloadSize, 0}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var invalidHeaders map[string]bool
|
||||||
|
if frame.StreamId%2 == 0 {
|
||||||
|
invalidHeaders = invalidReqHeaders
|
||||||
|
} else {
|
||||||
|
invalidHeaders = invalidRespHeaders
|
||||||
|
}
|
||||||
|
for h := range frame.Headers {
|
||||||
|
if invalidHeaders[h] {
|
||||||
|
return &Error{InvalidHeaderPresent, frame.StreamId}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if frame.StreamId == 0 {
|
||||||
|
return &Error{ZeroStreamId, 0}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Framer) parseDataFrame(streamId StreamId) (*DataFrame, error) {
|
||||||
|
var length uint32
|
||||||
|
if err := binary.Read(f.r, binary.BigEndian, &length); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var frame DataFrame
|
||||||
|
frame.StreamId = streamId
|
||||||
|
frame.Flags = DataFlags(length >> 24)
|
||||||
|
length &= 0xffffff
|
||||||
|
frame.Data = make([]byte, length)
|
||||||
|
if _, err := io.ReadFull(f.r, frame.Data); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if frame.StreamId == 0 {
|
||||||
|
return nil, &Error{ZeroStreamId, 0}
|
||||||
|
}
|
||||||
|
return &frame, nil
|
||||||
|
}
|
||||||
275
vendor/github.com/docker/spdystream/spdy/types.go
generated
vendored
Normal file
275
vendor/github.com/docker/spdystream/spdy/types.go
generated
vendored
Normal file
|
|
@ -0,0 +1,275 @@
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package spdy implements the SPDY protocol (currently SPDY/3), described in
|
||||||
|
// http://www.chromium.org/spdy/spdy-protocol/spdy-protocol-draft3.
|
||||||
|
package spdy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"compress/zlib"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Version is the protocol version number that this package implements.
|
||||||
|
const Version = 3
|
||||||
|
|
||||||
|
// ControlFrameType stores the type field in a control frame header.
|
||||||
|
type ControlFrameType uint16
|
||||||
|
|
||||||
|
const (
|
||||||
|
TypeSynStream ControlFrameType = 0x0001
|
||||||
|
TypeSynReply = 0x0002
|
||||||
|
TypeRstStream = 0x0003
|
||||||
|
TypeSettings = 0x0004
|
||||||
|
TypePing = 0x0006
|
||||||
|
TypeGoAway = 0x0007
|
||||||
|
TypeHeaders = 0x0008
|
||||||
|
TypeWindowUpdate = 0x0009
|
||||||
|
)
|
||||||
|
|
||||||
|
// ControlFlags are the flags that can be set on a control frame.
|
||||||
|
type ControlFlags uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
ControlFlagFin ControlFlags = 0x01
|
||||||
|
ControlFlagUnidirectional = 0x02
|
||||||
|
ControlFlagSettingsClearSettings = 0x01
|
||||||
|
)
|
||||||
|
|
||||||
|
// DataFlags are the flags that can be set on a data frame.
|
||||||
|
type DataFlags uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
DataFlagFin DataFlags = 0x01
|
||||||
|
)
|
||||||
|
|
||||||
|
// MaxDataLength is the maximum number of bytes that can be stored in one frame.
|
||||||
|
const MaxDataLength = 1<<24 - 1
|
||||||
|
|
||||||
|
// headerValueSepator separates multiple header values.
|
||||||
|
const headerValueSeparator = "\x00"
|
||||||
|
|
||||||
|
// Frame is a single SPDY frame in its unpacked in-memory representation. Use
|
||||||
|
// Framer to read and write it.
|
||||||
|
type Frame interface {
|
||||||
|
write(f *Framer) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// ControlFrameHeader contains all the fields in a control frame header,
|
||||||
|
// in its unpacked in-memory representation.
|
||||||
|
type ControlFrameHeader struct {
|
||||||
|
// Note, high bit is the "Control" bit.
|
||||||
|
version uint16 // spdy version number
|
||||||
|
frameType ControlFrameType
|
||||||
|
Flags ControlFlags
|
||||||
|
length uint32 // length of data field
|
||||||
|
}
|
||||||
|
|
||||||
|
type controlFrame interface {
|
||||||
|
Frame
|
||||||
|
read(h ControlFrameHeader, f *Framer) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// StreamId represents a 31-bit value identifying the stream.
|
||||||
|
type StreamId uint32
|
||||||
|
|
||||||
|
// SynStreamFrame is the unpacked, in-memory representation of a SYN_STREAM
|
||||||
|
// frame.
|
||||||
|
type SynStreamFrame struct {
|
||||||
|
CFHeader ControlFrameHeader
|
||||||
|
StreamId StreamId
|
||||||
|
AssociatedToStreamId StreamId // stream id for a stream which this stream is associated to
|
||||||
|
Priority uint8 // priority of this frame (3-bit)
|
||||||
|
Slot uint8 // index in the server's credential vector of the client certificate
|
||||||
|
Headers http.Header
|
||||||
|
}
|
||||||
|
|
||||||
|
// SynReplyFrame is the unpacked, in-memory representation of a SYN_REPLY frame.
|
||||||
|
type SynReplyFrame struct {
|
||||||
|
CFHeader ControlFrameHeader
|
||||||
|
StreamId StreamId
|
||||||
|
Headers http.Header
|
||||||
|
}
|
||||||
|
|
||||||
|
// RstStreamStatus represents the status that led to a RST_STREAM.
|
||||||
|
type RstStreamStatus uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
ProtocolError RstStreamStatus = iota + 1
|
||||||
|
InvalidStream
|
||||||
|
RefusedStream
|
||||||
|
UnsupportedVersion
|
||||||
|
Cancel
|
||||||
|
InternalError
|
||||||
|
FlowControlError
|
||||||
|
StreamInUse
|
||||||
|
StreamAlreadyClosed
|
||||||
|
InvalidCredentials
|
||||||
|
FrameTooLarge
|
||||||
|
)
|
||||||
|
|
||||||
|
// RstStreamFrame is the unpacked, in-memory representation of a RST_STREAM
|
||||||
|
// frame.
|
||||||
|
type RstStreamFrame struct {
|
||||||
|
CFHeader ControlFrameHeader
|
||||||
|
StreamId StreamId
|
||||||
|
Status RstStreamStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
// SettingsFlag represents a flag in a SETTINGS frame.
|
||||||
|
type SettingsFlag uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
FlagSettingsPersistValue SettingsFlag = 0x1
|
||||||
|
FlagSettingsPersisted = 0x2
|
||||||
|
)
|
||||||
|
|
||||||
|
// SettingsFlag represents the id of an id/value pair in a SETTINGS frame.
|
||||||
|
type SettingsId uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
SettingsUploadBandwidth SettingsId = iota + 1
|
||||||
|
SettingsDownloadBandwidth
|
||||||
|
SettingsRoundTripTime
|
||||||
|
SettingsMaxConcurrentStreams
|
||||||
|
SettingsCurrentCwnd
|
||||||
|
SettingsDownloadRetransRate
|
||||||
|
SettingsInitialWindowSize
|
||||||
|
SettingsClientCretificateVectorSize
|
||||||
|
)
|
||||||
|
|
||||||
|
// SettingsFlagIdValue is the unpacked, in-memory representation of the
|
||||||
|
// combined flag/id/value for a setting in a SETTINGS frame.
|
||||||
|
type SettingsFlagIdValue struct {
|
||||||
|
Flag SettingsFlag
|
||||||
|
Id SettingsId
|
||||||
|
Value uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// SettingsFrame is the unpacked, in-memory representation of a SPDY
|
||||||
|
// SETTINGS frame.
|
||||||
|
type SettingsFrame struct {
|
||||||
|
CFHeader ControlFrameHeader
|
||||||
|
FlagIdValues []SettingsFlagIdValue
|
||||||
|
}
|
||||||
|
|
||||||
|
// PingFrame is the unpacked, in-memory representation of a PING frame.
|
||||||
|
type PingFrame struct {
|
||||||
|
CFHeader ControlFrameHeader
|
||||||
|
Id uint32 // unique id for this ping, from server is even, from client is odd.
|
||||||
|
}
|
||||||
|
|
||||||
|
// GoAwayStatus represents the status in a GoAwayFrame.
|
||||||
|
type GoAwayStatus uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
GoAwayOK GoAwayStatus = iota
|
||||||
|
GoAwayProtocolError
|
||||||
|
GoAwayInternalError
|
||||||
|
)
|
||||||
|
|
||||||
|
// GoAwayFrame is the unpacked, in-memory representation of a GOAWAY frame.
|
||||||
|
type GoAwayFrame struct {
|
||||||
|
CFHeader ControlFrameHeader
|
||||||
|
LastGoodStreamId StreamId // last stream id which was accepted by sender
|
||||||
|
Status GoAwayStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
// HeadersFrame is the unpacked, in-memory representation of a HEADERS frame.
|
||||||
|
type HeadersFrame struct {
|
||||||
|
CFHeader ControlFrameHeader
|
||||||
|
StreamId StreamId
|
||||||
|
Headers http.Header
|
||||||
|
}
|
||||||
|
|
||||||
|
// WindowUpdateFrame is the unpacked, in-memory representation of a
|
||||||
|
// WINDOW_UPDATE frame.
|
||||||
|
type WindowUpdateFrame struct {
|
||||||
|
CFHeader ControlFrameHeader
|
||||||
|
StreamId StreamId
|
||||||
|
DeltaWindowSize uint32 // additional number of bytes to existing window size
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Implement credential frame and related methods.
|
||||||
|
|
||||||
|
// DataFrame is the unpacked, in-memory representation of a DATA frame.
|
||||||
|
type DataFrame struct {
|
||||||
|
// Note, high bit is the "Control" bit. Should be 0 for data frames.
|
||||||
|
StreamId StreamId
|
||||||
|
Flags DataFlags
|
||||||
|
Data []byte // payload data of this frame
|
||||||
|
}
|
||||||
|
|
||||||
|
// A SPDY specific error.
|
||||||
|
type ErrorCode string
|
||||||
|
|
||||||
|
const (
|
||||||
|
UnlowercasedHeaderName ErrorCode = "header was not lowercased"
|
||||||
|
DuplicateHeaders = "multiple headers with same name"
|
||||||
|
WrongCompressedPayloadSize = "compressed payload size was incorrect"
|
||||||
|
UnknownFrameType = "unknown frame type"
|
||||||
|
InvalidControlFrame = "invalid control frame"
|
||||||
|
InvalidDataFrame = "invalid data frame"
|
||||||
|
InvalidHeaderPresent = "frame contained invalid header"
|
||||||
|
ZeroStreamId = "stream id zero is disallowed"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Error contains both the type of error and additional values. StreamId is 0
|
||||||
|
// if Error is not associated with a stream.
|
||||||
|
type Error struct {
|
||||||
|
Err ErrorCode
|
||||||
|
StreamId StreamId
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Error) Error() string {
|
||||||
|
return string(e.Err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var invalidReqHeaders = map[string]bool{
|
||||||
|
"Connection": true,
|
||||||
|
"Host": true,
|
||||||
|
"Keep-Alive": true,
|
||||||
|
"Proxy-Connection": true,
|
||||||
|
"Transfer-Encoding": true,
|
||||||
|
}
|
||||||
|
|
||||||
|
var invalidRespHeaders = map[string]bool{
|
||||||
|
"Connection": true,
|
||||||
|
"Keep-Alive": true,
|
||||||
|
"Proxy-Connection": true,
|
||||||
|
"Transfer-Encoding": true,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Framer handles serializing/deserializing SPDY frames, including compressing/
|
||||||
|
// decompressing payloads.
|
||||||
|
type Framer struct {
|
||||||
|
headerCompressionDisabled bool
|
||||||
|
w io.Writer
|
||||||
|
headerBuf *bytes.Buffer
|
||||||
|
headerCompressor *zlib.Writer
|
||||||
|
r io.Reader
|
||||||
|
headerReader io.LimitedReader
|
||||||
|
headerDecompressor io.ReadCloser
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFramer allocates a new Framer for a given SPDY connection, represented by
|
||||||
|
// a io.Writer and io.Reader. Note that Framer will read and write individual fields
|
||||||
|
// from/to the Reader and Writer, so the caller should pass in an appropriately
|
||||||
|
// buffered implementation to optimize performance.
|
||||||
|
func NewFramer(w io.Writer, r io.Reader) (*Framer, error) {
|
||||||
|
compressBuf := new(bytes.Buffer)
|
||||||
|
compressor, err := zlib.NewWriterLevelDict(compressBuf, zlib.BestCompression, []byte(headerDictionary))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
framer := &Framer{
|
||||||
|
w: w,
|
||||||
|
headerBuf: compressBuf,
|
||||||
|
headerCompressor: compressor,
|
||||||
|
r: r,
|
||||||
|
}
|
||||||
|
return framer, nil
|
||||||
|
}
|
||||||
318
vendor/github.com/docker/spdystream/spdy/write.go
generated
vendored
Normal file
318
vendor/github.com/docker/spdystream/spdy/write.go
generated
vendored
Normal file
|
|
@ -0,0 +1,318 @@
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package spdy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (frame *SynStreamFrame) write(f *Framer) error {
|
||||||
|
return f.writeSynStreamFrame(frame)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (frame *SynReplyFrame) write(f *Framer) error {
|
||||||
|
return f.writeSynReplyFrame(frame)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (frame *RstStreamFrame) write(f *Framer) (err error) {
|
||||||
|
if frame.StreamId == 0 {
|
||||||
|
return &Error{ZeroStreamId, 0}
|
||||||
|
}
|
||||||
|
frame.CFHeader.version = Version
|
||||||
|
frame.CFHeader.frameType = TypeRstStream
|
||||||
|
frame.CFHeader.Flags = 0
|
||||||
|
frame.CFHeader.length = 8
|
||||||
|
|
||||||
|
// Serialize frame to Writer.
|
||||||
|
if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if frame.Status == 0 {
|
||||||
|
return &Error{InvalidControlFrame, frame.StreamId}
|
||||||
|
}
|
||||||
|
if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (frame *SettingsFrame) write(f *Framer) (err error) {
|
||||||
|
frame.CFHeader.version = Version
|
||||||
|
frame.CFHeader.frameType = TypeSettings
|
||||||
|
frame.CFHeader.length = uint32(len(frame.FlagIdValues)*8 + 4)
|
||||||
|
|
||||||
|
// Serialize frame to Writer.
|
||||||
|
if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = binary.Write(f.w, binary.BigEndian, uint32(len(frame.FlagIdValues))); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, flagIdValue := range frame.FlagIdValues {
|
||||||
|
flagId := uint32(flagIdValue.Flag)<<24 | uint32(flagIdValue.Id)
|
||||||
|
if err = binary.Write(f.w, binary.BigEndian, flagId); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = binary.Write(f.w, binary.BigEndian, flagIdValue.Value); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (frame *PingFrame) write(f *Framer) (err error) {
|
||||||
|
if frame.Id == 0 {
|
||||||
|
return &Error{ZeroStreamId, 0}
|
||||||
|
}
|
||||||
|
frame.CFHeader.version = Version
|
||||||
|
frame.CFHeader.frameType = TypePing
|
||||||
|
frame.CFHeader.Flags = 0
|
||||||
|
frame.CFHeader.length = 4
|
||||||
|
|
||||||
|
// Serialize frame to Writer.
|
||||||
|
if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = binary.Write(f.w, binary.BigEndian, frame.Id); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (frame *GoAwayFrame) write(f *Framer) (err error) {
|
||||||
|
frame.CFHeader.version = Version
|
||||||
|
frame.CFHeader.frameType = TypeGoAway
|
||||||
|
frame.CFHeader.Flags = 0
|
||||||
|
frame.CFHeader.length = 8
|
||||||
|
|
||||||
|
// Serialize frame to Writer.
|
||||||
|
if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = binary.Write(f.w, binary.BigEndian, frame.LastGoodStreamId); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (frame *HeadersFrame) write(f *Framer) error {
|
||||||
|
return f.writeHeadersFrame(frame)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (frame *WindowUpdateFrame) write(f *Framer) (err error) {
|
||||||
|
frame.CFHeader.version = Version
|
||||||
|
frame.CFHeader.frameType = TypeWindowUpdate
|
||||||
|
frame.CFHeader.Flags = 0
|
||||||
|
frame.CFHeader.length = 8
|
||||||
|
|
||||||
|
// Serialize frame to Writer.
|
||||||
|
if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = binary.Write(f.w, binary.BigEndian, frame.DeltaWindowSize); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (frame *DataFrame) write(f *Framer) error {
|
||||||
|
return f.writeDataFrame(frame)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteFrame writes a frame.
|
||||||
|
func (f *Framer) WriteFrame(frame Frame) error {
|
||||||
|
return frame.write(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeControlFrameHeader(w io.Writer, h ControlFrameHeader) error {
|
||||||
|
if err := binary.Write(w, binary.BigEndian, 0x8000|h.version); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := binary.Write(w, binary.BigEndian, h.frameType); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
flagsAndLength := uint32(h.Flags)<<24 | h.length
|
||||||
|
if err := binary.Write(w, binary.BigEndian, flagsAndLength); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeHeaderValueBlock(w io.Writer, h http.Header) (n int, err error) {
|
||||||
|
n = 0
|
||||||
|
if err = binary.Write(w, binary.BigEndian, uint32(len(h))); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n += 2
|
||||||
|
for name, values := range h {
|
||||||
|
if err = binary.Write(w, binary.BigEndian, uint32(len(name))); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n += 2
|
||||||
|
name = strings.ToLower(name)
|
||||||
|
if _, err = io.WriteString(w, name); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n += len(name)
|
||||||
|
v := strings.Join(values, headerValueSeparator)
|
||||||
|
if err = binary.Write(w, binary.BigEndian, uint32(len(v))); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n += 2
|
||||||
|
if _, err = io.WriteString(w, v); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n += len(v)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Framer) writeSynStreamFrame(frame *SynStreamFrame) (err error) {
|
||||||
|
if frame.StreamId == 0 {
|
||||||
|
return &Error{ZeroStreamId, 0}
|
||||||
|
}
|
||||||
|
// Marshal the headers.
|
||||||
|
var writer io.Writer = f.headerBuf
|
||||||
|
if !f.headerCompressionDisabled {
|
||||||
|
writer = f.headerCompressor
|
||||||
|
}
|
||||||
|
if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !f.headerCompressionDisabled {
|
||||||
|
f.headerCompressor.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set ControlFrameHeader.
|
||||||
|
frame.CFHeader.version = Version
|
||||||
|
frame.CFHeader.frameType = TypeSynStream
|
||||||
|
frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 10)
|
||||||
|
|
||||||
|
// Serialize frame to Writer.
|
||||||
|
if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err = binary.Write(f.w, binary.BigEndian, frame.AssociatedToStreamId); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err = binary.Write(f.w, binary.BigEndian, frame.Priority<<5); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err = binary.Write(f.w, binary.BigEndian, frame.Slot); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
f.headerBuf.Reset()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Framer) writeSynReplyFrame(frame *SynReplyFrame) (err error) {
|
||||||
|
if frame.StreamId == 0 {
|
||||||
|
return &Error{ZeroStreamId, 0}
|
||||||
|
}
|
||||||
|
// Marshal the headers.
|
||||||
|
var writer io.Writer = f.headerBuf
|
||||||
|
if !f.headerCompressionDisabled {
|
||||||
|
writer = f.headerCompressor
|
||||||
|
}
|
||||||
|
if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !f.headerCompressionDisabled {
|
||||||
|
f.headerCompressor.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set ControlFrameHeader.
|
||||||
|
frame.CFHeader.version = Version
|
||||||
|
frame.CFHeader.frameType = TypeSynReply
|
||||||
|
frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4)
|
||||||
|
|
||||||
|
// Serialize frame to Writer.
|
||||||
|
if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
f.headerBuf.Reset()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Framer) writeHeadersFrame(frame *HeadersFrame) (err error) {
|
||||||
|
if frame.StreamId == 0 {
|
||||||
|
return &Error{ZeroStreamId, 0}
|
||||||
|
}
|
||||||
|
// Marshal the headers.
|
||||||
|
var writer io.Writer = f.headerBuf
|
||||||
|
if !f.headerCompressionDisabled {
|
||||||
|
writer = f.headerCompressor
|
||||||
|
}
|
||||||
|
if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !f.headerCompressionDisabled {
|
||||||
|
f.headerCompressor.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set ControlFrameHeader.
|
||||||
|
frame.CFHeader.version = Version
|
||||||
|
frame.CFHeader.frameType = TypeHeaders
|
||||||
|
frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4)
|
||||||
|
|
||||||
|
// Serialize frame to Writer.
|
||||||
|
if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
f.headerBuf.Reset()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Framer) writeDataFrame(frame *DataFrame) (err error) {
|
||||||
|
if frame.StreamId == 0 {
|
||||||
|
return &Error{ZeroStreamId, 0}
|
||||||
|
}
|
||||||
|
if frame.StreamId&0x80000000 != 0 || len(frame.Data) > MaxDataLength {
|
||||||
|
return &Error{InvalidDataFrame, frame.StreamId}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serialize frame to Writer.
|
||||||
|
if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
flagsAndLength := uint32(frame.Flags)<<24 | uint32(len(frame.Data))
|
||||||
|
if err = binary.Write(f.w, binary.BigEndian, flagsAndLength); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, err = f.w.Write(frame.Data); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
327
vendor/github.com/docker/spdystream/stream.go
generated
vendored
Normal file
327
vendor/github.com/docker/spdystream/stream.go
generated
vendored
Normal file
|
|
@ -0,0 +1,327 @@
|
||||||
|
package spdystream
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/spdystream/spdy"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrUnreadPartialData = errors.New("unread partial data")
|
||||||
|
)
|
||||||
|
|
||||||
|
type Stream struct {
|
||||||
|
streamId spdy.StreamId
|
||||||
|
parent *Stream
|
||||||
|
conn *Connection
|
||||||
|
startChan chan error
|
||||||
|
|
||||||
|
dataLock sync.RWMutex
|
||||||
|
dataChan chan []byte
|
||||||
|
unread []byte
|
||||||
|
|
||||||
|
priority uint8
|
||||||
|
headers http.Header
|
||||||
|
headerChan chan http.Header
|
||||||
|
finishLock sync.Mutex
|
||||||
|
finished bool
|
||||||
|
replyCond *sync.Cond
|
||||||
|
replied bool
|
||||||
|
closeLock sync.Mutex
|
||||||
|
closeChan chan bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteData writes data to stream, sending a dataframe per call
|
||||||
|
func (s *Stream) WriteData(data []byte, fin bool) error {
|
||||||
|
s.waitWriteReply()
|
||||||
|
var flags spdy.DataFlags
|
||||||
|
|
||||||
|
if fin {
|
||||||
|
flags = spdy.DataFlagFin
|
||||||
|
s.finishLock.Lock()
|
||||||
|
if s.finished {
|
||||||
|
s.finishLock.Unlock()
|
||||||
|
return ErrWriteClosedStream
|
||||||
|
}
|
||||||
|
s.finished = true
|
||||||
|
s.finishLock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
dataFrame := &spdy.DataFrame{
|
||||||
|
StreamId: s.streamId,
|
||||||
|
Flags: flags,
|
||||||
|
Data: data,
|
||||||
|
}
|
||||||
|
|
||||||
|
debugMessage("(%p) (%d) Writing data frame", s, s.streamId)
|
||||||
|
return s.conn.framer.WriteFrame(dataFrame)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write writes bytes to a stream, calling write data for each call.
|
||||||
|
func (s *Stream) Write(data []byte) (n int, err error) {
|
||||||
|
err = s.WriteData(data, false)
|
||||||
|
if err == nil {
|
||||||
|
n = len(data)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read reads bytes from a stream, a single read will never get more
|
||||||
|
// than what is sent on a single data frame, but a multiple calls to
|
||||||
|
// read may get data from the same data frame.
|
||||||
|
func (s *Stream) Read(p []byte) (n int, err error) {
|
||||||
|
if s.unread == nil {
|
||||||
|
select {
|
||||||
|
case <-s.closeChan:
|
||||||
|
return 0, io.EOF
|
||||||
|
case read, ok := <-s.dataChan:
|
||||||
|
if !ok {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
s.unread = read
|
||||||
|
}
|
||||||
|
}
|
||||||
|
n = copy(p, s.unread)
|
||||||
|
if n < len(s.unread) {
|
||||||
|
s.unread = s.unread[n:]
|
||||||
|
} else {
|
||||||
|
s.unread = nil
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadData reads an entire data frame and returns the byte array
|
||||||
|
// from the data frame. If there is unread data from the result
|
||||||
|
// of a Read call, this function will return an ErrUnreadPartialData.
|
||||||
|
func (s *Stream) ReadData() ([]byte, error) {
|
||||||
|
debugMessage("(%p) Reading data from %d", s, s.streamId)
|
||||||
|
if s.unread != nil {
|
||||||
|
return nil, ErrUnreadPartialData
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-s.closeChan:
|
||||||
|
return nil, io.EOF
|
||||||
|
case read, ok := <-s.dataChan:
|
||||||
|
if !ok {
|
||||||
|
return nil, io.EOF
|
||||||
|
}
|
||||||
|
return read, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) waitWriteReply() {
|
||||||
|
if s.replyCond != nil {
|
||||||
|
s.replyCond.L.Lock()
|
||||||
|
for !s.replied {
|
||||||
|
s.replyCond.Wait()
|
||||||
|
}
|
||||||
|
s.replyCond.L.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait waits for the stream to receive a reply.
|
||||||
|
func (s *Stream) Wait() error {
|
||||||
|
return s.WaitTimeout(time.Duration(0))
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitTimeout waits for the stream to receive a reply or for timeout.
|
||||||
|
// When the timeout is reached, ErrTimeout will be returned.
|
||||||
|
func (s *Stream) WaitTimeout(timeout time.Duration) error {
|
||||||
|
var timeoutChan <-chan time.Time
|
||||||
|
if timeout > time.Duration(0) {
|
||||||
|
timeoutChan = time.After(timeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case err := <-s.startChan:
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
break
|
||||||
|
case <-timeoutChan:
|
||||||
|
return ErrTimeout
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the stream by sending an empty data frame with the
|
||||||
|
// finish flag set, indicating this side is finished with the stream.
|
||||||
|
func (s *Stream) Close() error {
|
||||||
|
select {
|
||||||
|
case <-s.closeChan:
|
||||||
|
// Stream is now fully closed
|
||||||
|
s.conn.removeStream(s)
|
||||||
|
default:
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return s.WriteData([]byte{}, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset sends a reset frame, putting the stream into the fully closed state.
|
||||||
|
func (s *Stream) Reset() error {
|
||||||
|
s.conn.removeStream(s)
|
||||||
|
return s.resetStream()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) resetStream() error {
|
||||||
|
// Always call closeRemoteChannels, even if s.finished is already true.
|
||||||
|
// This makes it so that stream.Close() followed by stream.Reset() allows
|
||||||
|
// stream.Read() to unblock.
|
||||||
|
s.closeRemoteChannels()
|
||||||
|
|
||||||
|
s.finishLock.Lock()
|
||||||
|
if s.finished {
|
||||||
|
s.finishLock.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
s.finished = true
|
||||||
|
s.finishLock.Unlock()
|
||||||
|
|
||||||
|
resetFrame := &spdy.RstStreamFrame{
|
||||||
|
StreamId: s.streamId,
|
||||||
|
Status: spdy.Cancel,
|
||||||
|
}
|
||||||
|
return s.conn.framer.WriteFrame(resetFrame)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateSubStream creates a stream using the current as the parent
|
||||||
|
func (s *Stream) CreateSubStream(headers http.Header, fin bool) (*Stream, error) {
|
||||||
|
return s.conn.CreateStream(headers, s, fin)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPriority sets the stream priority, does not affect the
|
||||||
|
// remote priority of this stream after Open has been called.
|
||||||
|
// Valid values are 0 through 7, 0 being the highest priority
|
||||||
|
// and 7 the lowest.
|
||||||
|
func (s *Stream) SetPriority(priority uint8) {
|
||||||
|
s.priority = priority
|
||||||
|
}
|
||||||
|
|
||||||
|
// SendHeader sends a header frame across the stream
|
||||||
|
func (s *Stream) SendHeader(headers http.Header, fin bool) error {
|
||||||
|
return s.conn.sendHeaders(headers, s, fin)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SendReply sends a reply on a stream, only valid to be called once
|
||||||
|
// when handling a new stream
|
||||||
|
func (s *Stream) SendReply(headers http.Header, fin bool) error {
|
||||||
|
if s.replyCond == nil {
|
||||||
|
return errors.New("cannot reply on initiated stream")
|
||||||
|
}
|
||||||
|
s.replyCond.L.Lock()
|
||||||
|
defer s.replyCond.L.Unlock()
|
||||||
|
if s.replied {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err := s.conn.sendReply(headers, s, fin)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
s.replied = true
|
||||||
|
s.replyCond.Broadcast()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Refuse sends a reset frame with the status refuse, only
|
||||||
|
// valid to be called once when handling a new stream. This
|
||||||
|
// may be used to indicate that a stream is not allowed
|
||||||
|
// when http status codes are not being used.
|
||||||
|
func (s *Stream) Refuse() error {
|
||||||
|
if s.replied {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
s.replied = true
|
||||||
|
return s.conn.sendReset(spdy.RefusedStream, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cancel sends a reset frame with the status canceled. This
|
||||||
|
// can be used at any time by the creator of the Stream to
|
||||||
|
// indicate the stream is no longer needed.
|
||||||
|
func (s *Stream) Cancel() error {
|
||||||
|
return s.conn.sendReset(spdy.Cancel, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReceiveHeader receives a header sent on the other side
|
||||||
|
// of the stream. This function will block until a header
|
||||||
|
// is received or stream is closed.
|
||||||
|
func (s *Stream) ReceiveHeader() (http.Header, error) {
|
||||||
|
select {
|
||||||
|
case <-s.closeChan:
|
||||||
|
break
|
||||||
|
case header, ok := <-s.headerChan:
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("header chan closed")
|
||||||
|
}
|
||||||
|
return header, nil
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("stream closed")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parent returns the parent stream
|
||||||
|
func (s *Stream) Parent() *Stream {
|
||||||
|
return s.parent
|
||||||
|
}
|
||||||
|
|
||||||
|
// Headers returns the headers used to create the stream
|
||||||
|
func (s *Stream) Headers() http.Header {
|
||||||
|
return s.headers
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the string version of stream using the
|
||||||
|
// streamId to uniquely identify the stream
|
||||||
|
func (s *Stream) String() string {
|
||||||
|
return fmt.Sprintf("stream:%d", s.streamId)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Identifier returns a 32 bit identifier for the stream
|
||||||
|
func (s *Stream) Identifier() uint32 {
|
||||||
|
return uint32(s.streamId)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsFinished returns whether the stream has finished
|
||||||
|
// sending data
|
||||||
|
func (s *Stream) IsFinished() bool {
|
||||||
|
return s.finished
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implement net.Conn interface
|
||||||
|
|
||||||
|
func (s *Stream) LocalAddr() net.Addr {
|
||||||
|
return s.conn.conn.LocalAddr()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) RemoteAddr() net.Addr {
|
||||||
|
return s.conn.conn.RemoteAddr()
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO set per stream values instead of connection-wide
|
||||||
|
|
||||||
|
func (s *Stream) SetDeadline(t time.Time) error {
|
||||||
|
return s.conn.conn.SetDeadline(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) SetReadDeadline(t time.Time) error {
|
||||||
|
return s.conn.conn.SetReadDeadline(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) SetWriteDeadline(t time.Time) error {
|
||||||
|
return s.conn.conn.SetWriteDeadline(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) closeRemoteChannels() {
|
||||||
|
s.closeLock.Lock()
|
||||||
|
defer s.closeLock.Unlock()
|
||||||
|
select {
|
||||||
|
case <-s.closeChan:
|
||||||
|
default:
|
||||||
|
close(s.closeChan)
|
||||||
|
}
|
||||||
|
}
|
||||||
16
vendor/github.com/docker/spdystream/utils.go
generated
vendored
Normal file
16
vendor/github.com/docker/spdystream/utils.go
generated
vendored
Normal file
|
|
@ -0,0 +1,16 @@
|
||||||
|
package spdystream
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
DEBUG = os.Getenv("DEBUG")
|
||||||
|
)
|
||||||
|
|
||||||
|
func debugMessage(fmt string, args ...interface{}) {
|
||||||
|
if DEBUG != "" {
|
||||||
|
log.Printf(fmt, args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
20
vendor/github.com/ghodss/yaml/.gitignore
generated
vendored
Normal file
20
vendor/github.com/ghodss/yaml/.gitignore
generated
vendored
Normal file
|
|
@ -0,0 +1,20 @@
|
||||||
|
# OSX leaves these everywhere on SMB shares
|
||||||
|
._*
|
||||||
|
|
||||||
|
# Eclipse files
|
||||||
|
.classpath
|
||||||
|
.project
|
||||||
|
.settings/**
|
||||||
|
|
||||||
|
# Emacs save files
|
||||||
|
*~
|
||||||
|
|
||||||
|
# Vim-related files
|
||||||
|
[._]*.s[a-w][a-z]
|
||||||
|
[._]s[a-w][a-z]
|
||||||
|
*.un~
|
||||||
|
Session.vim
|
||||||
|
.netrwhist
|
||||||
|
|
||||||
|
# Go test binaries
|
||||||
|
*.test
|
||||||
7
vendor/github.com/ghodss/yaml/.travis.yml
generated
vendored
Normal file
7
vendor/github.com/ghodss/yaml/.travis.yml
generated
vendored
Normal file
|
|
@ -0,0 +1,7 @@
|
||||||
|
language: go
|
||||||
|
go:
|
||||||
|
- 1.3
|
||||||
|
- 1.4
|
||||||
|
script:
|
||||||
|
- go test
|
||||||
|
- go build
|
||||||
50
vendor/github.com/ghodss/yaml/LICENSE
generated
vendored
Normal file
50
vendor/github.com/ghodss/yaml/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,50 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2014 Sam Ghods
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
|
||||||
|
|
||||||
|
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
121
vendor/github.com/ghodss/yaml/README.md
generated
vendored
Normal file
121
vendor/github.com/ghodss/yaml/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,121 @@
|
||||||
|
# YAML marshaling and unmarshaling support for Go
|
||||||
|
|
||||||
|
[](https://travis-ci.org/ghodss/yaml)
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
|
||||||
|
|
||||||
|
In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
|
||||||
|
|
||||||
|
## Compatibility
|
||||||
|
|
||||||
|
This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
|
||||||
|
|
||||||
|
## Caveats
|
||||||
|
|
||||||
|
**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example:
|
||||||
|
|
||||||
|
```
|
||||||
|
BAD:
|
||||||
|
exampleKey: !!binary gIGC
|
||||||
|
|
||||||
|
GOOD:
|
||||||
|
exampleKey: gIGC
|
||||||
|
... and decode the base64 data in your code.
|
||||||
|
```
|
||||||
|
|
||||||
|
**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys.
|
||||||
|
|
||||||
|
## Installation and usage
|
||||||
|
|
||||||
|
To install, run:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ go get github.com/ghodss/yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
And import using:
|
||||||
|
|
||||||
|
```
|
||||||
|
import "github.com/ghodss/yaml"
|
||||||
|
```
|
||||||
|
|
||||||
|
Usage is very similar to the JSON library:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ghodss/yaml"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Person struct {
|
||||||
|
Name string `json:"name"` // Affects YAML field names too.
|
||||||
|
Age int `json:"age"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Marshal a Person struct to YAML.
|
||||||
|
p := Person{"John", 30}
|
||||||
|
y, err := yaml.Marshal(p)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("err: %v\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Println(string(y))
|
||||||
|
/* Output:
|
||||||
|
age: 30
|
||||||
|
name: John
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Unmarshal the YAML back into a Person struct.
|
||||||
|
var p2 Person
|
||||||
|
err = yaml.Unmarshal(y, &p2)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("err: %v\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Println(p2)
|
||||||
|
/* Output:
|
||||||
|
{John 30}
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ghodss/yaml"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
j := []byte(`{"name": "John", "age": 30}`)
|
||||||
|
y, err := yaml.JSONToYAML(j)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("err: %v\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Println(string(y))
|
||||||
|
/* Output:
|
||||||
|
name: John
|
||||||
|
age: 30
|
||||||
|
*/
|
||||||
|
j2, err := yaml.YAMLToJSON(y)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("err: %v\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Println(string(j2))
|
||||||
|
/* Output:
|
||||||
|
{"age":30,"name":"John"}
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
```
|
||||||
501
vendor/github.com/ghodss/yaml/fields.go
generated
vendored
Normal file
501
vendor/github.com/ghodss/yaml/fields.go
generated
vendored
Normal file
|
|
@ -0,0 +1,501 @@
|
||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
package yaml
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding"
|
||||||
|
"encoding/json"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"unicode"
|
||||||
|
"unicode/utf8"
|
||||||
|
)
|
||||||
|
|
||||||
|
// indirect walks down v allocating pointers as needed,
|
||||||
|
// until it gets to a non-pointer.
|
||||||
|
// if it encounters an Unmarshaler, indirect stops and returns that.
|
||||||
|
// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
|
||||||
|
func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
|
||||||
|
// If v is a named type and is addressable,
|
||||||
|
// start with its address, so that if the type has pointer methods,
|
||||||
|
// we find them.
|
||||||
|
if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
|
||||||
|
v = v.Addr()
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
// Load value from interface, but only if the result will be
|
||||||
|
// usefully addressable.
|
||||||
|
if v.Kind() == reflect.Interface && !v.IsNil() {
|
||||||
|
e := v.Elem()
|
||||||
|
if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
|
||||||
|
v = e
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if v.Kind() != reflect.Ptr {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if v.IsNil() {
|
||||||
|
if v.CanSet() {
|
||||||
|
v.Set(reflect.New(v.Type().Elem()))
|
||||||
|
} else {
|
||||||
|
v = reflect.New(v.Type().Elem())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v.Type().NumMethod() > 0 {
|
||||||
|
if u, ok := v.Interface().(json.Unmarshaler); ok {
|
||||||
|
return u, nil, reflect.Value{}
|
||||||
|
}
|
||||||
|
if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
|
||||||
|
return nil, u, reflect.Value{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
v = v.Elem()
|
||||||
|
}
|
||||||
|
return nil, nil, v
|
||||||
|
}
|
||||||
|
|
||||||
|
// A field represents a single field found in a struct.
|
||||||
|
type field struct {
|
||||||
|
name string
|
||||||
|
nameBytes []byte // []byte(name)
|
||||||
|
equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
|
||||||
|
|
||||||
|
tag bool
|
||||||
|
index []int
|
||||||
|
typ reflect.Type
|
||||||
|
omitEmpty bool
|
||||||
|
quoted bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func fillField(f field) field {
|
||||||
|
f.nameBytes = []byte(f.name)
|
||||||
|
f.equalFold = foldFunc(f.nameBytes)
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
// byName sorts field by name, breaking ties with depth,
|
||||||
|
// then breaking ties with "name came from json tag", then
|
||||||
|
// breaking ties with index sequence.
|
||||||
|
type byName []field
|
||||||
|
|
||||||
|
func (x byName) Len() int { return len(x) }
|
||||||
|
|
||||||
|
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||||
|
|
||||||
|
func (x byName) Less(i, j int) bool {
|
||||||
|
if x[i].name != x[j].name {
|
||||||
|
return x[i].name < x[j].name
|
||||||
|
}
|
||||||
|
if len(x[i].index) != len(x[j].index) {
|
||||||
|
return len(x[i].index) < len(x[j].index)
|
||||||
|
}
|
||||||
|
if x[i].tag != x[j].tag {
|
||||||
|
return x[i].tag
|
||||||
|
}
|
||||||
|
return byIndex(x).Less(i, j)
|
||||||
|
}
|
||||||
|
|
||||||
|
// byIndex sorts field by index sequence.
|
||||||
|
type byIndex []field
|
||||||
|
|
||||||
|
func (x byIndex) Len() int { return len(x) }
|
||||||
|
|
||||||
|
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||||
|
|
||||||
|
func (x byIndex) Less(i, j int) bool {
|
||||||
|
for k, xik := range x[i].index {
|
||||||
|
if k >= len(x[j].index) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if xik != x[j].index[k] {
|
||||||
|
return xik < x[j].index[k]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return len(x[i].index) < len(x[j].index)
|
||||||
|
}
|
||||||
|
|
||||||
|
// typeFields returns a list of fields that JSON should recognize for the given type.
|
||||||
|
// The algorithm is breadth-first search over the set of structs to include - the top struct
|
||||||
|
// and then any reachable anonymous structs.
|
||||||
|
func typeFields(t reflect.Type) []field {
|
||||||
|
// Anonymous fields to explore at the current level and the next.
|
||||||
|
current := []field{}
|
||||||
|
next := []field{{typ: t}}
|
||||||
|
|
||||||
|
// Count of queued names for current level and the next.
|
||||||
|
count := map[reflect.Type]int{}
|
||||||
|
nextCount := map[reflect.Type]int{}
|
||||||
|
|
||||||
|
// Types already visited at an earlier level.
|
||||||
|
visited := map[reflect.Type]bool{}
|
||||||
|
|
||||||
|
// Fields found.
|
||||||
|
var fields []field
|
||||||
|
|
||||||
|
for len(next) > 0 {
|
||||||
|
current, next = next, current[:0]
|
||||||
|
count, nextCount = nextCount, map[reflect.Type]int{}
|
||||||
|
|
||||||
|
for _, f := range current {
|
||||||
|
if visited[f.typ] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
visited[f.typ] = true
|
||||||
|
|
||||||
|
// Scan f.typ for fields to include.
|
||||||
|
for i := 0; i < f.typ.NumField(); i++ {
|
||||||
|
sf := f.typ.Field(i)
|
||||||
|
if sf.PkgPath != "" { // unexported
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
tag := sf.Tag.Get("json")
|
||||||
|
if tag == "-" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
name, opts := parseTag(tag)
|
||||||
|
if !isValidTag(name) {
|
||||||
|
name = ""
|
||||||
|
}
|
||||||
|
index := make([]int, len(f.index)+1)
|
||||||
|
copy(index, f.index)
|
||||||
|
index[len(f.index)] = i
|
||||||
|
|
||||||
|
ft := sf.Type
|
||||||
|
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
|
||||||
|
// Follow pointer.
|
||||||
|
ft = ft.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Record found field and index sequence.
|
||||||
|
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
|
||||||
|
tagged := name != ""
|
||||||
|
if name == "" {
|
||||||
|
name = sf.Name
|
||||||
|
}
|
||||||
|
fields = append(fields, fillField(field{
|
||||||
|
name: name,
|
||||||
|
tag: tagged,
|
||||||
|
index: index,
|
||||||
|
typ: ft,
|
||||||
|
omitEmpty: opts.Contains("omitempty"),
|
||||||
|
quoted: opts.Contains("string"),
|
||||||
|
}))
|
||||||
|
if count[f.typ] > 1 {
|
||||||
|
// If there were multiple instances, add a second,
|
||||||
|
// so that the annihilation code will see a duplicate.
|
||||||
|
// It only cares about the distinction between 1 or 2,
|
||||||
|
// so don't bother generating any more copies.
|
||||||
|
fields = append(fields, fields[len(fields)-1])
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Record new anonymous struct to explore in next round.
|
||||||
|
nextCount[ft]++
|
||||||
|
if nextCount[ft] == 1 {
|
||||||
|
next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Sort(byName(fields))
|
||||||
|
|
||||||
|
// Delete all fields that are hidden by the Go rules for embedded fields,
|
||||||
|
// except that fields with JSON tags are promoted.
|
||||||
|
|
||||||
|
// The fields are sorted in primary order of name, secondary order
|
||||||
|
// of field index length. Loop over names; for each name, delete
|
||||||
|
// hidden fields by choosing the one dominant field that survives.
|
||||||
|
out := fields[:0]
|
||||||
|
for advance, i := 0, 0; i < len(fields); i += advance {
|
||||||
|
// One iteration per name.
|
||||||
|
// Find the sequence of fields with the name of this first field.
|
||||||
|
fi := fields[i]
|
||||||
|
name := fi.name
|
||||||
|
for advance = 1; i+advance < len(fields); advance++ {
|
||||||
|
fj := fields[i+advance]
|
||||||
|
if fj.name != name {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if advance == 1 { // Only one field with this name
|
||||||
|
out = append(out, fi)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
dominant, ok := dominantField(fields[i : i+advance])
|
||||||
|
if ok {
|
||||||
|
out = append(out, dominant)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fields = out
|
||||||
|
sort.Sort(byIndex(fields))
|
||||||
|
|
||||||
|
return fields
|
||||||
|
}
|
||||||
|
|
||||||
|
// dominantField looks through the fields, all of which are known to
|
||||||
|
// have the same name, to find the single field that dominates the
|
||||||
|
// others using Go's embedding rules, modified by the presence of
|
||||||
|
// JSON tags. If there are multiple top-level fields, the boolean
|
||||||
|
// will be false: This condition is an error in Go and we skip all
|
||||||
|
// the fields.
|
||||||
|
func dominantField(fields []field) (field, bool) {
|
||||||
|
// The fields are sorted in increasing index-length order. The winner
|
||||||
|
// must therefore be one with the shortest index length. Drop all
|
||||||
|
// longer entries, which is easy: just truncate the slice.
|
||||||
|
length := len(fields[0].index)
|
||||||
|
tagged := -1 // Index of first tagged field.
|
||||||
|
for i, f := range fields {
|
||||||
|
if len(f.index) > length {
|
||||||
|
fields = fields[:i]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if f.tag {
|
||||||
|
if tagged >= 0 {
|
||||||
|
// Multiple tagged fields at the same level: conflict.
|
||||||
|
// Return no field.
|
||||||
|
return field{}, false
|
||||||
|
}
|
||||||
|
tagged = i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if tagged >= 0 {
|
||||||
|
return fields[tagged], true
|
||||||
|
}
|
||||||
|
// All remaining fields have the same length. If there's more than one,
|
||||||
|
// we have a conflict (two fields named "X" at the same level) and we
|
||||||
|
// return no field.
|
||||||
|
if len(fields) > 1 {
|
||||||
|
return field{}, false
|
||||||
|
}
|
||||||
|
return fields[0], true
|
||||||
|
}
|
||||||
|
|
||||||
|
var fieldCache struct {
|
||||||
|
sync.RWMutex
|
||||||
|
m map[reflect.Type][]field
|
||||||
|
}
|
||||||
|
|
||||||
|
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
|
||||||
|
func cachedTypeFields(t reflect.Type) []field {
|
||||||
|
fieldCache.RLock()
|
||||||
|
f := fieldCache.m[t]
|
||||||
|
fieldCache.RUnlock()
|
||||||
|
if f != nil {
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute fields without lock.
|
||||||
|
// Might duplicate effort but won't hold other computations back.
|
||||||
|
f = typeFields(t)
|
||||||
|
if f == nil {
|
||||||
|
f = []field{}
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldCache.Lock()
|
||||||
|
if fieldCache.m == nil {
|
||||||
|
fieldCache.m = map[reflect.Type][]field{}
|
||||||
|
}
|
||||||
|
fieldCache.m[t] = f
|
||||||
|
fieldCache.Unlock()
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
func isValidTag(s string) bool {
|
||||||
|
if s == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, c := range s {
|
||||||
|
switch {
|
||||||
|
case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
|
||||||
|
// Backslash and quote chars are reserved, but
|
||||||
|
// otherwise any punctuation chars are allowed
|
||||||
|
// in a tag name.
|
||||||
|
default:
|
||||||
|
if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
|
||||||
|
kelvin = '\u212a'
|
||||||
|
smallLongEss = '\u017f'
|
||||||
|
)
|
||||||
|
|
||||||
|
// foldFunc returns one of four different case folding equivalence
|
||||||
|
// functions, from most general (and slow) to fastest:
|
||||||
|
//
|
||||||
|
// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
|
||||||
|
// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
|
||||||
|
// 3) asciiEqualFold, no special, but includes non-letters (including _)
|
||||||
|
// 4) simpleLetterEqualFold, no specials, no non-letters.
|
||||||
|
//
|
||||||
|
// The letters S and K are special because they map to 3 runes, not just 2:
|
||||||
|
// * S maps to s and to U+017F 'ſ' Latin small letter long s
|
||||||
|
// * k maps to K and to U+212A 'K' Kelvin sign
|
||||||
|
// See http://play.golang.org/p/tTxjOc0OGo
|
||||||
|
//
|
||||||
|
// The returned function is specialized for matching against s and
|
||||||
|
// should only be given s. It's not curried for performance reasons.
|
||||||
|
func foldFunc(s []byte) func(s, t []byte) bool {
|
||||||
|
nonLetter := false
|
||||||
|
special := false // special letter
|
||||||
|
for _, b := range s {
|
||||||
|
if b >= utf8.RuneSelf {
|
||||||
|
return bytes.EqualFold
|
||||||
|
}
|
||||||
|
upper := b & caseMask
|
||||||
|
if upper < 'A' || upper > 'Z' {
|
||||||
|
nonLetter = true
|
||||||
|
} else if upper == 'K' || upper == 'S' {
|
||||||
|
// See above for why these letters are special.
|
||||||
|
special = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if special {
|
||||||
|
return equalFoldRight
|
||||||
|
}
|
||||||
|
if nonLetter {
|
||||||
|
return asciiEqualFold
|
||||||
|
}
|
||||||
|
return simpleLetterEqualFold
|
||||||
|
}
|
||||||
|
|
||||||
|
// equalFoldRight is a specialization of bytes.EqualFold when s is
|
||||||
|
// known to be all ASCII (including punctuation), but contains an 's',
|
||||||
|
// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
|
||||||
|
// See comments on foldFunc.
|
||||||
|
func equalFoldRight(s, t []byte) bool {
|
||||||
|
for _, sb := range s {
|
||||||
|
if len(t) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
tb := t[0]
|
||||||
|
if tb < utf8.RuneSelf {
|
||||||
|
if sb != tb {
|
||||||
|
sbUpper := sb & caseMask
|
||||||
|
if 'A' <= sbUpper && sbUpper <= 'Z' {
|
||||||
|
if sbUpper != tb&caseMask {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t = t[1:]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// sb is ASCII and t is not. t must be either kelvin
|
||||||
|
// sign or long s; sb must be s, S, k, or K.
|
||||||
|
tr, size := utf8.DecodeRune(t)
|
||||||
|
switch sb {
|
||||||
|
case 's', 'S':
|
||||||
|
if tr != smallLongEss {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
case 'k', 'K':
|
||||||
|
if tr != kelvin {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
t = t[size:]
|
||||||
|
|
||||||
|
}
|
||||||
|
if len(t) > 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// asciiEqualFold is a specialization of bytes.EqualFold for use when
|
||||||
|
// s is all ASCII (but may contain non-letters) and contains no
|
||||||
|
// special-folding letters.
|
||||||
|
// See comments on foldFunc.
|
||||||
|
func asciiEqualFold(s, t []byte) bool {
|
||||||
|
if len(s) != len(t) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i, sb := range s {
|
||||||
|
tb := t[i]
|
||||||
|
if sb == tb {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
|
||||||
|
if sb&caseMask != tb&caseMask {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// simpleLetterEqualFold is a specialization of bytes.EqualFold for
|
||||||
|
// use when s is all ASCII letters (no underscores, etc) and also
|
||||||
|
// doesn't contain 'k', 'K', 's', or 'S'.
|
||||||
|
// See comments on foldFunc.
|
||||||
|
func simpleLetterEqualFold(s, t []byte) bool {
|
||||||
|
if len(s) != len(t) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i, b := range s {
|
||||||
|
if b&caseMask != t[i]&caseMask {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// tagOptions is the string following a comma in a struct field's "json"
|
||||||
|
// tag, or the empty string. It does not include the leading comma.
|
||||||
|
type tagOptions string
|
||||||
|
|
||||||
|
// parseTag splits a struct field's json tag into its name and
|
||||||
|
// comma-separated options.
|
||||||
|
func parseTag(tag string) (string, tagOptions) {
|
||||||
|
if idx := strings.Index(tag, ","); idx != -1 {
|
||||||
|
return tag[:idx], tagOptions(tag[idx+1:])
|
||||||
|
}
|
||||||
|
return tag, tagOptions("")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Contains reports whether a comma-separated list of options
|
||||||
|
// contains a particular substr flag. substr must be surrounded by a
|
||||||
|
// string boundary or commas.
|
||||||
|
func (o tagOptions) Contains(optionName string) bool {
|
||||||
|
if len(o) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
s := string(o)
|
||||||
|
for s != "" {
|
||||||
|
var next string
|
||||||
|
i := strings.Index(s, ",")
|
||||||
|
if i >= 0 {
|
||||||
|
s, next = s[:i], s[i+1:]
|
||||||
|
}
|
||||||
|
if s == optionName {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
s = next
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
277
vendor/github.com/ghodss/yaml/yaml.go
generated
vendored
Normal file
277
vendor/github.com/ghodss/yaml/yaml.go
generated
vendored
Normal file
|
|
@ -0,0 +1,277 @@
|
||||||
|
package yaml
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"gopkg.in/yaml.v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Marshals the object into JSON then converts JSON to YAML and returns the
|
||||||
|
// YAML.
|
||||||
|
func Marshal(o interface{}) ([]byte, error) {
|
||||||
|
j, err := json.Marshal(o)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error marshaling into JSON: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
y, err := JSONToYAML(j)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error converting JSON to YAML: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return y, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Converts YAML to JSON then uses JSON to unmarshal into an object.
|
||||||
|
func Unmarshal(y []byte, o interface{}) error {
|
||||||
|
vo := reflect.ValueOf(o)
|
||||||
|
j, err := yamlToJSON(y, &vo)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error converting YAML to JSON: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = json.Unmarshal(j, o)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error unmarshaling JSON: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert JSON to YAML.
|
||||||
|
func JSONToYAML(j []byte) ([]byte, error) {
|
||||||
|
// Convert the JSON to an object.
|
||||||
|
var jsonObj interface{}
|
||||||
|
// We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
|
||||||
|
// Go JSON library doesn't try to pick the right number type (int, float,
|
||||||
|
// etc.) when unmarshalling to interface{}, it just picks float64
|
||||||
|
// universally. go-yaml does go through the effort of picking the right
|
||||||
|
// number type, so we can preserve number type throughout this process.
|
||||||
|
err := yaml.Unmarshal(j, &jsonObj)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal this object into YAML.
|
||||||
|
return yaml.Marshal(jsonObj)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through
|
||||||
|
// this method should be a no-op.
|
||||||
|
//
|
||||||
|
// Things YAML can do that are not supported by JSON:
|
||||||
|
// * In YAML you can have binary and null keys in your maps. These are invalid
|
||||||
|
// in JSON. (int and float keys are converted to strings.)
|
||||||
|
// * Binary data in YAML with the !!binary tag is not supported. If you want to
|
||||||
|
// use binary data with this library, encode the data as base64 as usual but do
|
||||||
|
// not use the !!binary tag in your YAML. This will ensure the original base64
|
||||||
|
// encoded data makes it all the way through to the JSON.
|
||||||
|
func YAMLToJSON(y []byte) ([]byte, error) {
|
||||||
|
return yamlToJSON(y, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) {
|
||||||
|
// Convert the YAML to an object.
|
||||||
|
var yamlObj interface{}
|
||||||
|
err := yaml.Unmarshal(y, &yamlObj)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// YAML objects are not completely compatible with JSON objects (e.g. you
|
||||||
|
// can have non-string keys in YAML). So, convert the YAML-compatible object
|
||||||
|
// to a JSON-compatible object, failing with an error if irrecoverable
|
||||||
|
// incompatibilties happen along the way.
|
||||||
|
jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert this object to JSON and return the data.
|
||||||
|
return json.Marshal(jsonObj)
|
||||||
|
}
|
||||||
|
|
||||||
|
func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// Resolve jsonTarget to a concrete value (i.e. not a pointer or an
|
||||||
|
// interface). We pass decodingNull as false because we're not actually
|
||||||
|
// decoding into the value, we're just checking if the ultimate target is a
|
||||||
|
// string.
|
||||||
|
if jsonTarget != nil {
|
||||||
|
ju, tu, pv := indirect(*jsonTarget, false)
|
||||||
|
// We have a JSON or Text Umarshaler at this level, so we can't be trying
|
||||||
|
// to decode into a string.
|
||||||
|
if ju != nil || tu != nil {
|
||||||
|
jsonTarget = nil
|
||||||
|
} else {
|
||||||
|
jsonTarget = &pv
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If yamlObj is a number or a boolean, check if jsonTarget is a string -
|
||||||
|
// if so, coerce. Else return normal.
|
||||||
|
// If yamlObj is a map or array, find the field that each key is
|
||||||
|
// unmarshaling to, and when you recurse pass the reflect.Value for that
|
||||||
|
// field back into this function.
|
||||||
|
switch typedYAMLObj := yamlObj.(type) {
|
||||||
|
case map[interface{}]interface{}:
|
||||||
|
// JSON does not support arbitrary keys in a map, so we must convert
|
||||||
|
// these keys to strings.
|
||||||
|
//
|
||||||
|
// From my reading of go-yaml v2 (specifically the resolve function),
|
||||||
|
// keys can only have the types string, int, int64, float64, binary
|
||||||
|
// (unsupported), or null (unsupported).
|
||||||
|
strMap := make(map[string]interface{})
|
||||||
|
for k, v := range typedYAMLObj {
|
||||||
|
// Resolve the key to a string first.
|
||||||
|
var keyString string
|
||||||
|
switch typedKey := k.(type) {
|
||||||
|
case string:
|
||||||
|
keyString = typedKey
|
||||||
|
case int:
|
||||||
|
keyString = strconv.Itoa(typedKey)
|
||||||
|
case int64:
|
||||||
|
// go-yaml will only return an int64 as a key if the system
|
||||||
|
// architecture is 32-bit and the key's value is between 32-bit
|
||||||
|
// and 64-bit. Otherwise the key type will simply be int.
|
||||||
|
keyString = strconv.FormatInt(typedKey, 10)
|
||||||
|
case float64:
|
||||||
|
// Stolen from go-yaml to use the same conversion to string as
|
||||||
|
// the go-yaml library uses to convert float to string when
|
||||||
|
// Marshaling.
|
||||||
|
s := strconv.FormatFloat(typedKey, 'g', -1, 32)
|
||||||
|
switch s {
|
||||||
|
case "+Inf":
|
||||||
|
s = ".inf"
|
||||||
|
case "-Inf":
|
||||||
|
s = "-.inf"
|
||||||
|
case "NaN":
|
||||||
|
s = ".nan"
|
||||||
|
}
|
||||||
|
keyString = s
|
||||||
|
case bool:
|
||||||
|
if typedKey {
|
||||||
|
keyString = "true"
|
||||||
|
} else {
|
||||||
|
keyString = "false"
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v",
|
||||||
|
reflect.TypeOf(k), k, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// jsonTarget should be a struct or a map. If it's a struct, find
|
||||||
|
// the field it's going to map to and pass its reflect.Value. If
|
||||||
|
// it's a map, find the element type of the map and pass the
|
||||||
|
// reflect.Value created from that type. If it's neither, just pass
|
||||||
|
// nil - JSON conversion will error for us if it's a real issue.
|
||||||
|
if jsonTarget != nil {
|
||||||
|
t := *jsonTarget
|
||||||
|
if t.Kind() == reflect.Struct {
|
||||||
|
keyBytes := []byte(keyString)
|
||||||
|
// Find the field that the JSON library would use.
|
||||||
|
var f *field
|
||||||
|
fields := cachedTypeFields(t.Type())
|
||||||
|
for i := range fields {
|
||||||
|
ff := &fields[i]
|
||||||
|
if bytes.Equal(ff.nameBytes, keyBytes) {
|
||||||
|
f = ff
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Do case-insensitive comparison.
|
||||||
|
if f == nil && ff.equalFold(ff.nameBytes, keyBytes) {
|
||||||
|
f = ff
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if f != nil {
|
||||||
|
// Find the reflect.Value of the most preferential
|
||||||
|
// struct field.
|
||||||
|
jtf := t.Field(f.index[0])
|
||||||
|
strMap[keyString], err = convertToJSONableObject(v, &jtf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else if t.Kind() == reflect.Map {
|
||||||
|
// Create a zero value of the map's element type to use as
|
||||||
|
// the JSON target.
|
||||||
|
jtv := reflect.Zero(t.Type().Elem())
|
||||||
|
strMap[keyString], err = convertToJSONableObject(v, &jtv)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
strMap[keyString], err = convertToJSONableObject(v, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return strMap, nil
|
||||||
|
case []interface{}:
|
||||||
|
// We need to recurse into arrays in case there are any
|
||||||
|
// map[interface{}]interface{}'s inside and to convert any
|
||||||
|
// numbers to strings.
|
||||||
|
|
||||||
|
// If jsonTarget is a slice (which it really should be), find the
|
||||||
|
// thing it's going to map to. If it's not a slice, just pass nil
|
||||||
|
// - JSON conversion will error for us if it's a real issue.
|
||||||
|
var jsonSliceElemValue *reflect.Value
|
||||||
|
if jsonTarget != nil {
|
||||||
|
t := *jsonTarget
|
||||||
|
if t.Kind() == reflect.Slice {
|
||||||
|
// By default slices point to nil, but we need a reflect.Value
|
||||||
|
// pointing to a value of the slice type, so we create one here.
|
||||||
|
ev := reflect.Indirect(reflect.New(t.Type().Elem()))
|
||||||
|
jsonSliceElemValue = &ev
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make and use a new array.
|
||||||
|
arr := make([]interface{}, len(typedYAMLObj))
|
||||||
|
for i, v := range typedYAMLObj {
|
||||||
|
arr[i], err = convertToJSONableObject(v, jsonSliceElemValue)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return arr, nil
|
||||||
|
default:
|
||||||
|
// If the target type is a string and the YAML type is a number,
|
||||||
|
// convert the YAML type to a string.
|
||||||
|
if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String {
|
||||||
|
// Based on my reading of go-yaml, it may return int, int64,
|
||||||
|
// float64, or uint64.
|
||||||
|
var s string
|
||||||
|
switch typedVal := typedYAMLObj.(type) {
|
||||||
|
case int:
|
||||||
|
s = strconv.FormatInt(int64(typedVal), 10)
|
||||||
|
case int64:
|
||||||
|
s = strconv.FormatInt(typedVal, 10)
|
||||||
|
case float64:
|
||||||
|
s = strconv.FormatFloat(typedVal, 'g', -1, 32)
|
||||||
|
case uint64:
|
||||||
|
s = strconv.FormatUint(typedVal, 10)
|
||||||
|
case bool:
|
||||||
|
if typedVal {
|
||||||
|
s = "true"
|
||||||
|
} else {
|
||||||
|
s = "false"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(s) > 0 {
|
||||||
|
yamlObj = interface{}(s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return yamlObj, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
191
vendor/github.com/golang/glog/LICENSE
generated
vendored
191
vendor/github.com/golang/glog/LICENSE
generated
vendored
|
|
@ -1,191 +0,0 @@
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction, and
|
|
||||||
distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
|
||||||
owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all other entities
|
|
||||||
that control, are controlled by, or are under common control with that entity.
|
|
||||||
For the purposes of this definition, "control" means (i) the power, direct or
|
|
||||||
indirect, to cause the direction or management of such entity, whether by
|
|
||||||
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
|
||||||
permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications, including
|
|
||||||
but not limited to software source code, documentation source, and configuration
|
|
||||||
files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical transformation or
|
|
||||||
translation of a Source form, including but not limited to compiled object code,
|
|
||||||
generated documentation, and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
|
||||||
available under the License, as indicated by a copyright notice that is included
|
|
||||||
in or attached to the work (an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
|
||||||
is based on (or derived from) the Work and for which the editorial revisions,
|
|
||||||
annotations, elaborations, or other modifications represent, as a whole, an
|
|
||||||
original work of authorship. For the purposes of this License, Derivative Works
|
|
||||||
shall not include works that remain separable from, or merely link (or bind by
|
|
||||||
name) to the interfaces of, the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including the original version
|
|
||||||
of the Work and any modifications or additions to that Work or Derivative Works
|
|
||||||
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
|
||||||
by the copyright owner or by an individual or Legal Entity authorized to submit
|
|
||||||
on behalf of the copyright owner. For the purposes of this definition,
|
|
||||||
"submitted" means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems, and
|
|
||||||
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
|
||||||
the purpose of discussing and improving the Work, but excluding communication
|
|
||||||
that is conspicuously marked or otherwise designated in writing by the copyright
|
|
||||||
owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
|
||||||
of whom a Contribution has been received by Licensor and subsequently
|
|
||||||
incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License.
|
|
||||||
|
|
||||||
Subject to the terms and conditions of this License, each Contributor hereby
|
|
||||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
|
||||||
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the Work and such
|
|
||||||
Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License.
|
|
||||||
|
|
||||||
Subject to the terms and conditions of this License, each Contributor hereby
|
|
||||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
|
||||||
irrevocable (except as stated in this section) patent license to make, have
|
|
||||||
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
|
||||||
such license applies only to those patent claims licensable by such Contributor
|
|
||||||
that are necessarily infringed by their Contribution(s) alone or by combination
|
|
||||||
of their Contribution(s) with the Work to which such Contribution(s) was
|
|
||||||
submitted. If You institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
|
||||||
Contribution incorporated within the Work constitutes direct or contributory
|
|
||||||
patent infringement, then any patent licenses granted to You under this License
|
|
||||||
for that Work shall terminate as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution.
|
|
||||||
|
|
||||||
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
|
||||||
in any medium, with or without modifications, and in Source or Object form,
|
|
||||||
provided that You meet the following conditions:
|
|
||||||
|
|
||||||
You must give any other recipients of the Work or Derivative Works a copy of
|
|
||||||
this License; and
|
|
||||||
You must cause any modified files to carry prominent notices stating that You
|
|
||||||
changed the files; and
|
|
||||||
You must retain, in the Source form of any Derivative Works that You distribute,
|
|
||||||
all copyright, patent, trademark, and attribution notices from the Source form
|
|
||||||
of the Work, excluding those notices that do not pertain to any part of the
|
|
||||||
Derivative Works; and
|
|
||||||
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
|
||||||
Derivative Works that You distribute must include a readable copy of the
|
|
||||||
attribution notices contained within such NOTICE file, excluding those notices
|
|
||||||
that do not pertain to any part of the Derivative Works, in at least one of the
|
|
||||||
following places: within a NOTICE text file distributed as part of the
|
|
||||||
Derivative Works; within the Source form or documentation, if provided along
|
|
||||||
with the Derivative Works; or, within a display generated by the Derivative
|
|
||||||
Works, if and wherever such third-party notices normally appear. The contents of
|
|
||||||
the NOTICE file are for informational purposes only and do not modify the
|
|
||||||
License. You may add Your own attribution notices within Derivative Works that
|
|
||||||
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
|
||||||
provided that such additional attribution notices cannot be construed as
|
|
||||||
modifying the License.
|
|
||||||
You may add Your own copyright statement to Your modifications and may provide
|
|
||||||
additional or different license terms and conditions for use, reproduction, or
|
|
||||||
distribution of Your modifications, or for any such Derivative Works as a whole,
|
|
||||||
provided Your use, reproduction, and distribution of the Work otherwise complies
|
|
||||||
with the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions.
|
|
||||||
|
|
||||||
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
|
||||||
for inclusion in the Work by You to the Licensor shall be under the terms and
|
|
||||||
conditions of this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
|
||||||
any separate license agreement you may have executed with Licensor regarding
|
|
||||||
such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks.
|
|
||||||
|
|
||||||
This License does not grant permission to use the trade names, trademarks,
|
|
||||||
service marks, or product names of the Licensor, except as required for
|
|
||||||
reasonable and customary use in describing the origin of the Work and
|
|
||||||
reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty.
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, Licensor provides the
|
|
||||||
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
|
||||||
including, without limitation, any warranties or conditions of TITLE,
|
|
||||||
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
|
||||||
solely responsible for determining the appropriateness of using or
|
|
||||||
redistributing the Work and assume any risks associated with Your exercise of
|
|
||||||
permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability.
|
|
||||||
|
|
||||||
In no event and under no legal theory, whether in tort (including negligence),
|
|
||||||
contract, or otherwise, unless required by applicable law (such as deliberate
|
|
||||||
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special, incidental,
|
|
||||||
or consequential damages of any character arising as a result of this License or
|
|
||||||
out of the use or inability to use the Work (including but not limited to
|
|
||||||
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
|
||||||
any and all other commercial damages or losses), even if such Contributor has
|
|
||||||
been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability.
|
|
||||||
|
|
||||||
While redistributing the Work or Derivative Works thereof, You may choose to
|
|
||||||
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
|
||||||
other liability obligations and/or rights consistent with this License. However,
|
|
||||||
in accepting such obligations, You may act only on Your own behalf and on Your
|
|
||||||
sole responsibility, not on behalf of any other Contributor, and only if You
|
|
||||||
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason of your
|
|
||||||
accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following boilerplate
|
|
||||||
notice, with the fields enclosed by brackets "[]" replaced with your own
|
|
||||||
identifying information. (Don't include the brackets!) The text should be
|
|
||||||
enclosed in the appropriate comment syntax for the file format. We also
|
|
||||||
recommend that a file or class name and description of purpose be included on
|
|
||||||
the same "printed page" as the copyright notice for easier identification within
|
|
||||||
third-party archives.
|
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
44
vendor/github.com/golang/glog/README
generated
vendored
44
vendor/github.com/golang/glog/README
generated
vendored
|
|
@ -1,44 +0,0 @@
|
||||||
glog
|
|
||||||
====
|
|
||||||
|
|
||||||
Leveled execution logs for Go.
|
|
||||||
|
|
||||||
This is an efficient pure Go implementation of leveled logs in the
|
|
||||||
manner of the open source C++ package
|
|
||||||
https://github.com/google/glog
|
|
||||||
|
|
||||||
By binding methods to booleans it is possible to use the log package
|
|
||||||
without paying the expense of evaluating the arguments to the log.
|
|
||||||
Through the -vmodule flag, the package also provides fine-grained
|
|
||||||
control over logging at the file level.
|
|
||||||
|
|
||||||
The comment from glog.go introduces the ideas:
|
|
||||||
|
|
||||||
Package glog implements logging analogous to the Google-internal
|
|
||||||
C++ INFO/ERROR/V setup. It provides functions Info, Warning,
|
|
||||||
Error, Fatal, plus formatting variants such as Infof. It
|
|
||||||
also provides V-style logging controlled by the -v and
|
|
||||||
-vmodule=file=2 flags.
|
|
||||||
|
|
||||||
Basic examples:
|
|
||||||
|
|
||||||
glog.Info("Prepare to repel boarders")
|
|
||||||
|
|
||||||
glog.Fatalf("Initialization failed: %s", err)
|
|
||||||
|
|
||||||
See the documentation for the V function for an explanation
|
|
||||||
of these examples:
|
|
||||||
|
|
||||||
if glog.V(2) {
|
|
||||||
glog.Info("Starting transaction...")
|
|
||||||
}
|
|
||||||
|
|
||||||
glog.V(2).Infoln("Processed", nItems, "elements")
|
|
||||||
|
|
||||||
|
|
||||||
The repository contains an open source version of the log package
|
|
||||||
used inside Google. The master copy of the source lives inside
|
|
||||||
Google, not here. The code in this repo is for export only and is not itself
|
|
||||||
under development. Feature requests will be ignored.
|
|
||||||
|
|
||||||
Send bug reports to golang-nuts@googlegroups.com.
|
|
||||||
1180
vendor/github.com/golang/glog/glog.go
generated
vendored
1180
vendor/github.com/golang/glog/glog.go
generated
vendored
File diff suppressed because it is too large
Load diff
124
vendor/github.com/golang/glog/glog_file.go
generated
vendored
124
vendor/github.com/golang/glog/glog_file.go
generated
vendored
|
|
@ -1,124 +0,0 @@
|
||||||
// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
|
|
||||||
//
|
|
||||||
// Copyright 2013 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// File I/O for logs.
|
|
||||||
|
|
||||||
package glog
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"os/user"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MaxSize is the maximum size of a log file in bytes.
|
|
||||||
var MaxSize uint64 = 1024 * 1024 * 1800
|
|
||||||
|
|
||||||
// logDirs lists the candidate directories for new log files.
|
|
||||||
var logDirs []string
|
|
||||||
|
|
||||||
// If non-empty, overrides the choice of directory in which to write logs.
|
|
||||||
// See createLogDirs for the full list of possible destinations.
|
|
||||||
var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory")
|
|
||||||
|
|
||||||
func createLogDirs() {
|
|
||||||
if *logDir != "" {
|
|
||||||
logDirs = append(logDirs, *logDir)
|
|
||||||
}
|
|
||||||
logDirs = append(logDirs, os.TempDir())
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
pid = os.Getpid()
|
|
||||||
program = filepath.Base(os.Args[0])
|
|
||||||
host = "unknownhost"
|
|
||||||
userName = "unknownuser"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
h, err := os.Hostname()
|
|
||||||
if err == nil {
|
|
||||||
host = shortHostname(h)
|
|
||||||
}
|
|
||||||
|
|
||||||
current, err := user.Current()
|
|
||||||
if err == nil {
|
|
||||||
userName = current.Username
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sanitize userName since it may contain filepath separators on Windows.
|
|
||||||
userName = strings.Replace(userName, `\`, "_", -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// shortHostname returns its argument, truncating at the first period.
|
|
||||||
// For instance, given "www.google.com" it returns "www".
|
|
||||||
func shortHostname(hostname string) string {
|
|
||||||
if i := strings.Index(hostname, "."); i >= 0 {
|
|
||||||
return hostname[:i]
|
|
||||||
}
|
|
||||||
return hostname
|
|
||||||
}
|
|
||||||
|
|
||||||
// logName returns a new log file name containing tag, with start time t, and
|
|
||||||
// the name for the symlink for tag.
|
|
||||||
func logName(tag string, t time.Time) (name, link string) {
|
|
||||||
name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
|
|
||||||
program,
|
|
||||||
host,
|
|
||||||
userName,
|
|
||||||
tag,
|
|
||||||
t.Year(),
|
|
||||||
t.Month(),
|
|
||||||
t.Day(),
|
|
||||||
t.Hour(),
|
|
||||||
t.Minute(),
|
|
||||||
t.Second(),
|
|
||||||
pid)
|
|
||||||
return name, program + "." + tag
|
|
||||||
}
|
|
||||||
|
|
||||||
var onceLogDirs sync.Once
|
|
||||||
|
|
||||||
// create creates a new log file and returns the file and its filename, which
|
|
||||||
// contains tag ("INFO", "FATAL", etc.) and t. If the file is created
|
|
||||||
// successfully, create also attempts to update the symlink for that tag, ignoring
|
|
||||||
// errors.
|
|
||||||
func create(tag string, t time.Time) (f *os.File, filename string, err error) {
|
|
||||||
onceLogDirs.Do(createLogDirs)
|
|
||||||
if len(logDirs) == 0 {
|
|
||||||
return nil, "", errors.New("log: no log dirs")
|
|
||||||
}
|
|
||||||
name, link := logName(tag, t)
|
|
||||||
var lastErr error
|
|
||||||
for _, dir := range logDirs {
|
|
||||||
fname := filepath.Join(dir, name)
|
|
||||||
f, err := os.Create(fname)
|
|
||||||
if err == nil {
|
|
||||||
symlink := filepath.Join(dir, link)
|
|
||||||
os.Remove(symlink) // ignore err
|
|
||||||
os.Symlink(name, symlink) // ignore err
|
|
||||||
return f, fname, nil
|
|
||||||
}
|
|
||||||
lastErr = err
|
|
||||||
}
|
|
||||||
return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
|
|
||||||
}
|
|
||||||
3
vendor/github.com/hpcloud/tail/.gitignore
generated
vendored
Normal file
3
vendor/github.com/hpcloud/tail/.gitignore
generated
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
||||||
|
.test
|
||||||
|
.go
|
||||||
|
|
||||||
18
vendor/github.com/hpcloud/tail/.travis.yml
generated
vendored
Normal file
18
vendor/github.com/hpcloud/tail/.travis.yml
generated
vendored
Normal file
|
|
@ -0,0 +1,18 @@
|
||||||
|
language: go
|
||||||
|
|
||||||
|
script:
|
||||||
|
- go test -race -v ./...
|
||||||
|
|
||||||
|
go:
|
||||||
|
- 1.4
|
||||||
|
- 1.5
|
||||||
|
- 1.6
|
||||||
|
- tip
|
||||||
|
|
||||||
|
matrix:
|
||||||
|
allow_failures:
|
||||||
|
- go: tip
|
||||||
|
|
||||||
|
install:
|
||||||
|
- go get gopkg.in/fsnotify.v1
|
||||||
|
- go get gopkg.in/tomb.v1
|
||||||
63
vendor/github.com/hpcloud/tail/CHANGES.md
generated
vendored
Normal file
63
vendor/github.com/hpcloud/tail/CHANGES.md
generated
vendored
Normal file
|
|
@ -0,0 +1,63 @@
|
||||||
|
# API v1 (gopkg.in/hpcloud/tail.v1)
|
||||||
|
|
||||||
|
## April, 2016
|
||||||
|
|
||||||
|
* Migrated to godep, as depman is not longer supported
|
||||||
|
* Introduced golang vendoring feature
|
||||||
|
* Fixed issue [#57](https://github.com/hpcloud/tail/issues/57) related to reopen deleted file
|
||||||
|
|
||||||
|
## July, 2015
|
||||||
|
|
||||||
|
* Fix inotify watcher leak; remove `Cleanup` (#51)
|
||||||
|
|
||||||
|
# API v0 (gopkg.in/hpcloud/tail.v0)
|
||||||
|
|
||||||
|
## June, 2015
|
||||||
|
|
||||||
|
* Don't return partial lines (PR #40)
|
||||||
|
* Use stable version of fsnotify (#46)
|
||||||
|
|
||||||
|
## July, 2014
|
||||||
|
|
||||||
|
* Fix tail for Windows (PR #36)
|
||||||
|
|
||||||
|
## May, 2014
|
||||||
|
|
||||||
|
* Improved rate limiting using leaky bucket (PR #29)
|
||||||
|
* Fix odd line splitting (PR #30)
|
||||||
|
|
||||||
|
## Apr, 2014
|
||||||
|
|
||||||
|
* LimitRate now discards read buffer (PR #28)
|
||||||
|
* allow reading of longer lines if MaxLineSize is unset (PR #24)
|
||||||
|
* updated deps.json to latest fsnotify (441bbc86b1)
|
||||||
|
|
||||||
|
## Feb, 2014
|
||||||
|
|
||||||
|
* added `Config.Logger` to suppress library logging
|
||||||
|
|
||||||
|
## Nov, 2013
|
||||||
|
|
||||||
|
* add Cleanup to remove leaky inotify watches (PR #20)
|
||||||
|
|
||||||
|
## Aug, 2013
|
||||||
|
|
||||||
|
* redesigned Location field (PR #12)
|
||||||
|
* add tail.Tell (PR #14)
|
||||||
|
|
||||||
|
## July, 2013
|
||||||
|
|
||||||
|
* Rate limiting (PR #10)
|
||||||
|
|
||||||
|
## May, 2013
|
||||||
|
|
||||||
|
* Detect file deletions/renames in polling file watcher (PR #1)
|
||||||
|
* Detect file truncation
|
||||||
|
* Fix potential race condition when reopening the file (issue 5)
|
||||||
|
* Fix potential blocking of `tail.Stop` (issue 4)
|
||||||
|
* Fix uncleaned up ChangeEvents goroutines after calling tail.Stop
|
||||||
|
* Support Follow=false
|
||||||
|
|
||||||
|
## Feb, 2013
|
||||||
|
|
||||||
|
* Initial open source release
|
||||||
19
vendor/github.com/hpcloud/tail/Dockerfile
generated
vendored
Normal file
19
vendor/github.com/hpcloud/tail/Dockerfile
generated
vendored
Normal file
|
|
@ -0,0 +1,19 @@
|
||||||
|
FROM golang
|
||||||
|
|
||||||
|
RUN mkdir -p $GOPATH/src/github.com/hpcloud/tail/
|
||||||
|
ADD . $GOPATH/src/github.com/hpcloud/tail/
|
||||||
|
|
||||||
|
# expecting to fetch dependencies successfully.
|
||||||
|
RUN go get -v github.com/hpcloud/tail
|
||||||
|
|
||||||
|
# expecting to run the test successfully.
|
||||||
|
RUN go test -v github.com/hpcloud/tail
|
||||||
|
|
||||||
|
# expecting to install successfully
|
||||||
|
RUN go install -v github.com/hpcloud/tail
|
||||||
|
RUN go install -v github.com/hpcloud/tail/cmd/gotail
|
||||||
|
|
||||||
|
RUN $GOPATH/bin/gotail -h || true
|
||||||
|
|
||||||
|
ENV PATH $GOPATH/bin:$PATH
|
||||||
|
CMD ["gotail"]
|
||||||
21
vendor/github.com/hpcloud/tail/LICENSE.txt
generated
vendored
Normal file
21
vendor/github.com/hpcloud/tail/LICENSE.txt
generated
vendored
Normal file
|
|
@ -0,0 +1,21 @@
|
||||||
|
# The MIT License (MIT)
|
||||||
|
|
||||||
|
# © Copyright 2015 Hewlett Packard Enterprise Development LP
|
||||||
|
Copyright (c) 2014 ActiveState
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
11
vendor/github.com/hpcloud/tail/Makefile
generated
vendored
Normal file
11
vendor/github.com/hpcloud/tail/Makefile
generated
vendored
Normal file
|
|
@ -0,0 +1,11 @@
|
||||||
|
default: test
|
||||||
|
|
||||||
|
test: *.go
|
||||||
|
go test -v -race ./...
|
||||||
|
|
||||||
|
fmt:
|
||||||
|
gofmt -w .
|
||||||
|
|
||||||
|
# Run the test in an isolated environment.
|
||||||
|
fulltest:
|
||||||
|
docker build -t hpcloud/tail .
|
||||||
28
vendor/github.com/hpcloud/tail/README.md
generated
vendored
Normal file
28
vendor/github.com/hpcloud/tail/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,28 @@
|
||||||
|
[](https://travis-ci.org/hpcloud/tail)
|
||||||
|
[](https://ci.appveyor.com/project/HelionCloudFoundry/tail)
|
||||||
|
|
||||||
|
# Go package for tail-ing files
|
||||||
|
|
||||||
|
A Go package striving to emulate the features of the BSD `tail` program.
|
||||||
|
|
||||||
|
```Go
|
||||||
|
t, err := tail.TailFile("/var/log/nginx.log", tail.Config{Follow: true})
|
||||||
|
for line := range t.Lines {
|
||||||
|
fmt.Println(line.Text)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
See [API documentation](http://godoc.org/github.com/hpcloud/tail).
|
||||||
|
|
||||||
|
## Log rotation
|
||||||
|
|
||||||
|
Tail comes with full support for truncation/move detection as it is
|
||||||
|
designed to work with log rotation tools.
|
||||||
|
|
||||||
|
## Installing
|
||||||
|
|
||||||
|
go get github.com/hpcloud/tail/...
|
||||||
|
|
||||||
|
## Windows support
|
||||||
|
|
||||||
|
This package [needs assistance](https://github.com/hpcloud/tail/labels/Windows) for full Windows support.
|
||||||
11
vendor/github.com/hpcloud/tail/appveyor.yml
generated
vendored
Normal file
11
vendor/github.com/hpcloud/tail/appveyor.yml
generated
vendored
Normal file
|
|
@ -0,0 +1,11 @@
|
||||||
|
version: 0.{build}
|
||||||
|
skip_tags: true
|
||||||
|
cache: C:\Users\appveyor\AppData\Local\NuGet\Cache
|
||||||
|
build_script:
|
||||||
|
- SET GOPATH=c:\workspace
|
||||||
|
- go test -v -race ./...
|
||||||
|
test: off
|
||||||
|
clone_folder: c:\workspace\src\github.com\hpcloud\tail
|
||||||
|
branches:
|
||||||
|
only:
|
||||||
|
- master
|
||||||
7
vendor/github.com/hpcloud/tail/ratelimiter/Licence
generated
vendored
Normal file
7
vendor/github.com/hpcloud/tail/ratelimiter/Licence
generated
vendored
Normal file
|
|
@ -0,0 +1,7 @@
|
||||||
|
Copyright (C) 2013 99designs
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
97
vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go
generated
vendored
Normal file
97
vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go
generated
vendored
Normal file
|
|
@ -0,0 +1,97 @@
|
||||||
|
// Package ratelimiter implements the Leaky Bucket ratelimiting algorithm with memcached and in-memory backends.
|
||||||
|
package ratelimiter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type LeakyBucket struct {
|
||||||
|
Size uint16
|
||||||
|
Fill float64
|
||||||
|
LeakInterval time.Duration // time.Duration for 1 unit of size to leak
|
||||||
|
Lastupdate time.Time
|
||||||
|
Now func() time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewLeakyBucket(size uint16, leakInterval time.Duration) *LeakyBucket {
|
||||||
|
bucket := LeakyBucket{
|
||||||
|
Size: size,
|
||||||
|
Fill: 0,
|
||||||
|
LeakInterval: leakInterval,
|
||||||
|
Now: time.Now,
|
||||||
|
Lastupdate: time.Now(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return &bucket
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *LeakyBucket) updateFill() {
|
||||||
|
now := b.Now()
|
||||||
|
if b.Fill > 0 {
|
||||||
|
elapsed := now.Sub(b.Lastupdate)
|
||||||
|
|
||||||
|
b.Fill -= float64(elapsed) / float64(b.LeakInterval)
|
||||||
|
if b.Fill < 0 {
|
||||||
|
b.Fill = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.Lastupdate = now
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *LeakyBucket) Pour(amount uint16) bool {
|
||||||
|
b.updateFill()
|
||||||
|
|
||||||
|
var newfill float64 = b.Fill + float64(amount)
|
||||||
|
|
||||||
|
if newfill > float64(b.Size) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
b.Fill = newfill
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// The time at which this bucket will be completely drained
|
||||||
|
func (b *LeakyBucket) DrainedAt() time.Time {
|
||||||
|
return b.Lastupdate.Add(time.Duration(b.Fill * float64(b.LeakInterval)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// The duration until this bucket is completely drained
|
||||||
|
func (b *LeakyBucket) TimeToDrain() time.Duration {
|
||||||
|
return b.DrainedAt().Sub(b.Now())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *LeakyBucket) TimeSinceLastUpdate() time.Duration {
|
||||||
|
return b.Now().Sub(b.Lastupdate)
|
||||||
|
}
|
||||||
|
|
||||||
|
type LeakyBucketSer struct {
|
||||||
|
Size uint16
|
||||||
|
Fill float64
|
||||||
|
LeakInterval time.Duration // time.Duration for 1 unit of size to leak
|
||||||
|
Lastupdate time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *LeakyBucket) Serialise() *LeakyBucketSer {
|
||||||
|
bucket := LeakyBucketSer{
|
||||||
|
Size: b.Size,
|
||||||
|
Fill: b.Fill,
|
||||||
|
LeakInterval: b.LeakInterval,
|
||||||
|
Lastupdate: b.Lastupdate,
|
||||||
|
}
|
||||||
|
|
||||||
|
return &bucket
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *LeakyBucketSer) DeSerialise() *LeakyBucket {
|
||||||
|
bucket := LeakyBucket{
|
||||||
|
Size: b.Size,
|
||||||
|
Fill: b.Fill,
|
||||||
|
LeakInterval: b.LeakInterval,
|
||||||
|
Lastupdate: b.Lastupdate,
|
||||||
|
Now: time.Now,
|
||||||
|
}
|
||||||
|
|
||||||
|
return &bucket
|
||||||
|
}
|
||||||
58
vendor/github.com/hpcloud/tail/ratelimiter/memory.go
generated
vendored
Normal file
58
vendor/github.com/hpcloud/tail/ratelimiter/memory.go
generated
vendored
Normal file
|
|
@ -0,0 +1,58 @@
|
||||||
|
package ratelimiter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const GC_SIZE int = 100
|
||||||
|
|
||||||
|
type Memory struct {
|
||||||
|
store map[string]LeakyBucket
|
||||||
|
lastGCCollected time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMemory() *Memory {
|
||||||
|
m := new(Memory)
|
||||||
|
m.store = make(map[string]LeakyBucket)
|
||||||
|
m.lastGCCollected = time.Now()
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Memory) GetBucketFor(key string) (*LeakyBucket, error) {
|
||||||
|
|
||||||
|
bucket, ok := m.store[key]
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("miss")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &bucket, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Memory) SetBucketFor(key string, bucket LeakyBucket) error {
|
||||||
|
|
||||||
|
if len(m.store) > GC_SIZE {
|
||||||
|
m.GarbageCollect()
|
||||||
|
}
|
||||||
|
|
||||||
|
m.store[key] = bucket
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Memory) GarbageCollect() {
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
// rate limit GC to once per minute
|
||||||
|
if now.Add(60*time.Second).Unix() > m.lastGCCollected.Unix() {
|
||||||
|
|
||||||
|
for key, bucket := range m.store {
|
||||||
|
// if the bucket is drained, then GC
|
||||||
|
if bucket.DrainedAt().Unix() > now.Unix() {
|
||||||
|
delete(m.store, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
m.lastGCCollected = now
|
||||||
|
}
|
||||||
|
}
|
||||||
6
vendor/github.com/hpcloud/tail/ratelimiter/storage.go
generated
vendored
Normal file
6
vendor/github.com/hpcloud/tail/ratelimiter/storage.go
generated
vendored
Normal file
|
|
@ -0,0 +1,6 @@
|
||||||
|
package ratelimiter
|
||||||
|
|
||||||
|
type Storage interface {
|
||||||
|
GetBucketFor(string) (*LeakyBucket, error)
|
||||||
|
SetBucketFor(string, LeakyBucket) error
|
||||||
|
}
|
||||||
438
vendor/github.com/hpcloud/tail/tail.go
generated
vendored
Normal file
438
vendor/github.com/hpcloud/tail/tail.go
generated
vendored
Normal file
|
|
@ -0,0 +1,438 @@
|
||||||
|
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||||
|
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||||
|
|
||||||
|
package tail
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hpcloud/tail/ratelimiter"
|
||||||
|
"github.com/hpcloud/tail/util"
|
||||||
|
"github.com/hpcloud/tail/watch"
|
||||||
|
"gopkg.in/tomb.v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrStop = fmt.Errorf("tail should now stop")
|
||||||
|
)
|
||||||
|
|
||||||
|
type Line struct {
|
||||||
|
Text string
|
||||||
|
Time time.Time
|
||||||
|
Err error // Error from tail
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewLine returns a Line with present time.
|
||||||
|
func NewLine(text string) *Line {
|
||||||
|
return &Line{text, time.Now(), nil}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SeekInfo represents arguments to `os.Seek`
|
||||||
|
type SeekInfo struct {
|
||||||
|
Offset int64
|
||||||
|
Whence int // os.SEEK_*
|
||||||
|
}
|
||||||
|
|
||||||
|
type logger interface {
|
||||||
|
Fatal(v ...interface{})
|
||||||
|
Fatalf(format string, v ...interface{})
|
||||||
|
Fatalln(v ...interface{})
|
||||||
|
Panic(v ...interface{})
|
||||||
|
Panicf(format string, v ...interface{})
|
||||||
|
Panicln(v ...interface{})
|
||||||
|
Print(v ...interface{})
|
||||||
|
Printf(format string, v ...interface{})
|
||||||
|
Println(v ...interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config is used to specify how a file must be tailed.
|
||||||
|
type Config struct {
|
||||||
|
// File-specifc
|
||||||
|
Location *SeekInfo // Seek to this location before tailing
|
||||||
|
ReOpen bool // Reopen recreated files (tail -F)
|
||||||
|
MustExist bool // Fail early if the file does not exist
|
||||||
|
Poll bool // Poll for file changes instead of using inotify
|
||||||
|
Pipe bool // Is a named pipe (mkfifo)
|
||||||
|
RateLimiter *ratelimiter.LeakyBucket
|
||||||
|
|
||||||
|
// Generic IO
|
||||||
|
Follow bool // Continue looking for new lines (tail -f)
|
||||||
|
MaxLineSize int // If non-zero, split longer lines into multiple lines
|
||||||
|
|
||||||
|
// Logger, when nil, is set to tail.DefaultLogger
|
||||||
|
// To disable logging: set field to tail.DiscardingLogger
|
||||||
|
Logger logger
|
||||||
|
}
|
||||||
|
|
||||||
|
type Tail struct {
|
||||||
|
Filename string
|
||||||
|
Lines chan *Line
|
||||||
|
Config
|
||||||
|
|
||||||
|
file *os.File
|
||||||
|
reader *bufio.Reader
|
||||||
|
|
||||||
|
watcher watch.FileWatcher
|
||||||
|
changes *watch.FileChanges
|
||||||
|
|
||||||
|
tomb.Tomb // provides: Done, Kill, Dying
|
||||||
|
|
||||||
|
lk sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// DefaultLogger is used when Config.Logger == nil
|
||||||
|
DefaultLogger = log.New(os.Stderr, "", log.LstdFlags)
|
||||||
|
// DiscardingLogger can be used to disable logging output
|
||||||
|
DiscardingLogger = log.New(ioutil.Discard, "", 0)
|
||||||
|
)
|
||||||
|
|
||||||
|
// TailFile begins tailing the file. Output stream is made available
|
||||||
|
// via the `Tail.Lines` channel. To handle errors during tailing,
|
||||||
|
// invoke the `Wait` or `Err` method after finishing reading from the
|
||||||
|
// `Lines` channel.
|
||||||
|
func TailFile(filename string, config Config) (*Tail, error) {
|
||||||
|
if config.ReOpen && !config.Follow {
|
||||||
|
util.Fatal("cannot set ReOpen without Follow.")
|
||||||
|
}
|
||||||
|
|
||||||
|
t := &Tail{
|
||||||
|
Filename: filename,
|
||||||
|
Lines: make(chan *Line),
|
||||||
|
Config: config,
|
||||||
|
}
|
||||||
|
|
||||||
|
// when Logger was not specified in config, use default logger
|
||||||
|
if t.Logger == nil {
|
||||||
|
t.Logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.Poll {
|
||||||
|
t.watcher = watch.NewPollingFileWatcher(filename)
|
||||||
|
} else {
|
||||||
|
t.watcher = watch.NewInotifyFileWatcher(filename)
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.MustExist {
|
||||||
|
var err error
|
||||||
|
t.file, err = OpenFile(t.Filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
go t.tailFileSync()
|
||||||
|
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the file's current position, like stdio's ftell().
|
||||||
|
// But this value is not very accurate.
|
||||||
|
// it may readed one line in the chan(tail.Lines),
|
||||||
|
// so it may lost one line.
|
||||||
|
func (tail *Tail) Tell() (offset int64, err error) {
|
||||||
|
if tail.file == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
offset, err = tail.file.Seek(0, os.SEEK_CUR)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
tail.lk.Lock()
|
||||||
|
defer tail.lk.Unlock()
|
||||||
|
if tail.reader == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
offset -= int64(tail.reader.Buffered())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop stops the tailing activity.
|
||||||
|
func (tail *Tail) Stop() error {
|
||||||
|
tail.Kill(nil)
|
||||||
|
return tail.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
// StopAtEOF stops tailing as soon as the end of the file is reached.
|
||||||
|
func (tail *Tail) StopAtEOF() error {
|
||||||
|
tail.Kill(errStopAtEOF)
|
||||||
|
return tail.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
var errStopAtEOF = errors.New("tail: stop at eof")
|
||||||
|
|
||||||
|
func (tail *Tail) close() {
|
||||||
|
close(tail.Lines)
|
||||||
|
tail.closeFile()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tail *Tail) closeFile() {
|
||||||
|
if tail.file != nil {
|
||||||
|
tail.file.Close()
|
||||||
|
tail.file = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tail *Tail) reopen() error {
|
||||||
|
tail.closeFile()
|
||||||
|
for {
|
||||||
|
var err error
|
||||||
|
tail.file, err = OpenFile(tail.Filename)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
tail.Logger.Printf("Waiting for %s to appear...", tail.Filename)
|
||||||
|
if err := tail.watcher.BlockUntilExists(&tail.Tomb); err != nil {
|
||||||
|
if err == tomb.ErrDying {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return fmt.Errorf("Failed to detect creation of %s: %s", tail.Filename, err)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return fmt.Errorf("Unable to open file %s: %s", tail.Filename, err)
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tail *Tail) readLine() (string, error) {
|
||||||
|
tail.lk.Lock()
|
||||||
|
line, err := tail.reader.ReadString('\n')
|
||||||
|
tail.lk.Unlock()
|
||||||
|
if err != nil {
|
||||||
|
// Note ReadString "returns the data read before the error" in
|
||||||
|
// case of an error, including EOF, so we return it as is. The
|
||||||
|
// caller is expected to process it if err is EOF.
|
||||||
|
return line, err
|
||||||
|
}
|
||||||
|
|
||||||
|
line = strings.TrimRight(line, "\n")
|
||||||
|
|
||||||
|
return line, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tail *Tail) tailFileSync() {
|
||||||
|
defer tail.Done()
|
||||||
|
defer tail.close()
|
||||||
|
|
||||||
|
if !tail.MustExist {
|
||||||
|
// deferred first open.
|
||||||
|
err := tail.reopen()
|
||||||
|
if err != nil {
|
||||||
|
if err != tomb.ErrDying {
|
||||||
|
tail.Kill(err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seek to requested location on first open of the file.
|
||||||
|
if tail.Location != nil {
|
||||||
|
_, err := tail.file.Seek(tail.Location.Offset, tail.Location.Whence)
|
||||||
|
tail.Logger.Printf("Seeked %s - %+v\n", tail.Filename, tail.Location)
|
||||||
|
if err != nil {
|
||||||
|
tail.Killf("Seek error on %s: %s", tail.Filename, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tail.openReader()
|
||||||
|
|
||||||
|
var offset int64 = 0
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// Read line by line.
|
||||||
|
for {
|
||||||
|
// do not seek in named pipes
|
||||||
|
if !tail.Pipe {
|
||||||
|
// grab the position in case we need to back up in the event of a half-line
|
||||||
|
offset, err = tail.Tell()
|
||||||
|
if err != nil {
|
||||||
|
tail.Kill(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
line, err := tail.readLine()
|
||||||
|
|
||||||
|
// Process `line` even if err is EOF.
|
||||||
|
if err == nil {
|
||||||
|
cooloff := !tail.sendLine(line)
|
||||||
|
if cooloff {
|
||||||
|
// Wait a second before seeking till the end of
|
||||||
|
// file when rate limit is reached.
|
||||||
|
msg := fmt.Sprintf(
|
||||||
|
"Too much log activity; waiting a second " +
|
||||||
|
"before resuming tailing")
|
||||||
|
tail.Lines <- &Line{msg, time.Now(), fmt.Errorf(msg)}
|
||||||
|
select {
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
case <-tail.Dying():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := tail.seekEnd(); err != nil {
|
||||||
|
tail.Kill(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if err == io.EOF {
|
||||||
|
if !tail.Follow {
|
||||||
|
if line != "" {
|
||||||
|
tail.sendLine(line)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if tail.Follow && line != "" {
|
||||||
|
// this has the potential to never return the last line if
|
||||||
|
// it's not followed by a newline; seems a fair trade here
|
||||||
|
err := tail.seekTo(SeekInfo{Offset: offset, Whence: 0})
|
||||||
|
if err != nil {
|
||||||
|
tail.Kill(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// When EOF is reached, wait for more data to become
|
||||||
|
// available. Wait strategy is based on the `tail.watcher`
|
||||||
|
// implementation (inotify or polling).
|
||||||
|
err := tail.waitForChanges()
|
||||||
|
if err != nil {
|
||||||
|
if err != ErrStop {
|
||||||
|
tail.Kill(err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// non-EOF error
|
||||||
|
tail.Killf("Error reading %s: %s", tail.Filename, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-tail.Dying():
|
||||||
|
if tail.Err() == errStopAtEOF {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// waitForChanges waits until the file has been appended, deleted,
|
||||||
|
// moved or truncated. When moved or deleted - the file will be
|
||||||
|
// reopened if ReOpen is true. Truncated files are always reopened.
|
||||||
|
func (tail *Tail) waitForChanges() error {
|
||||||
|
if tail.changes == nil {
|
||||||
|
pos, err := tail.file.Seek(0, os.SEEK_CUR)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tail.changes, err = tail.watcher.ChangeEvents(&tail.Tomb, pos)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-tail.changes.Modified:
|
||||||
|
return nil
|
||||||
|
case <-tail.changes.Deleted:
|
||||||
|
tail.changes = nil
|
||||||
|
if tail.ReOpen {
|
||||||
|
// XXX: we must not log from a library.
|
||||||
|
tail.Logger.Printf("Re-opening moved/deleted file %s ...", tail.Filename)
|
||||||
|
if err := tail.reopen(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tail.Logger.Printf("Successfully reopened %s", tail.Filename)
|
||||||
|
tail.openReader()
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
tail.Logger.Printf("Stopping tail as file no longer exists: %s", tail.Filename)
|
||||||
|
return ErrStop
|
||||||
|
}
|
||||||
|
case <-tail.changes.Truncated:
|
||||||
|
// Always reopen truncated files (Follow is true)
|
||||||
|
tail.Logger.Printf("Re-opening truncated file %s ...", tail.Filename)
|
||||||
|
if err := tail.reopen(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tail.Logger.Printf("Successfully reopened truncated %s", tail.Filename)
|
||||||
|
tail.openReader()
|
||||||
|
return nil
|
||||||
|
case <-tail.Dying():
|
||||||
|
return ErrStop
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tail *Tail) openReader() {
|
||||||
|
if tail.MaxLineSize > 0 {
|
||||||
|
// add 2 to account for newline characters
|
||||||
|
tail.reader = bufio.NewReaderSize(tail.file, tail.MaxLineSize+2)
|
||||||
|
} else {
|
||||||
|
tail.reader = bufio.NewReader(tail.file)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tail *Tail) seekEnd() error {
|
||||||
|
return tail.seekTo(SeekInfo{Offset: 0, Whence: os.SEEK_END})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tail *Tail) seekTo(pos SeekInfo) error {
|
||||||
|
_, err := tail.file.Seek(pos.Offset, pos.Whence)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Seek error on %s: %s", tail.Filename, err)
|
||||||
|
}
|
||||||
|
// Reset the read buffer whenever the file is re-seek'ed
|
||||||
|
tail.reader.Reset(tail.file)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// sendLine sends the line(s) to Lines channel, splitting longer lines
|
||||||
|
// if necessary. Return false if rate limit is reached.
|
||||||
|
func (tail *Tail) sendLine(line string) bool {
|
||||||
|
now := time.Now()
|
||||||
|
lines := []string{line}
|
||||||
|
|
||||||
|
// Split longer lines
|
||||||
|
if tail.MaxLineSize > 0 && len(line) > tail.MaxLineSize {
|
||||||
|
lines = util.PartitionString(line, tail.MaxLineSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, line := range lines {
|
||||||
|
tail.Lines <- &Line{line, now, nil}
|
||||||
|
}
|
||||||
|
|
||||||
|
if tail.Config.RateLimiter != nil {
|
||||||
|
ok := tail.Config.RateLimiter.Pour(uint16(len(lines)))
|
||||||
|
if !ok {
|
||||||
|
tail.Logger.Printf("Leaky bucket full (%v); entering 1s cooloff period.\n",
|
||||||
|
tail.Filename)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cleanup removes inotify watches added by the tail package. This function is
|
||||||
|
// meant to be invoked from a process's exit handler. Linux kernel may not
|
||||||
|
// automatically remove inotify watches after the process exits.
|
||||||
|
func (tail *Tail) Cleanup() {
|
||||||
|
watch.Cleanup(tail.Filename)
|
||||||
|
}
|
||||||
11
vendor/github.com/hpcloud/tail/tail_posix.go
generated
vendored
Normal file
11
vendor/github.com/hpcloud/tail/tail_posix.go
generated
vendored
Normal file
|
|
@ -0,0 +1,11 @@
|
||||||
|
// +build linux darwin freebsd netbsd openbsd
|
||||||
|
|
||||||
|
package tail
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func OpenFile(name string) (file *os.File, err error) {
|
||||||
|
return os.Open(name)
|
||||||
|
}
|
||||||
12
vendor/github.com/hpcloud/tail/tail_windows.go
generated
vendored
Normal file
12
vendor/github.com/hpcloud/tail/tail_windows.go
generated
vendored
Normal file
|
|
@ -0,0 +1,12 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package tail
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/hpcloud/tail/winfile"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func OpenFile(name string) (file *os.File, err error) {
|
||||||
|
return winfile.OpenFile(name, os.O_RDONLY, 0)
|
||||||
|
}
|
||||||
48
vendor/github.com/hpcloud/tail/util/util.go
generated
vendored
Normal file
48
vendor/github.com/hpcloud/tail/util/util.go
generated
vendored
Normal file
|
|
@ -0,0 +1,48 @@
|
||||||
|
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||||
|
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||||
|
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"runtime/debug"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Logger struct {
|
||||||
|
*log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
var LOGGER = &Logger{log.New(os.Stderr, "", log.LstdFlags)}
|
||||||
|
|
||||||
|
// fatal is like panic except it displays only the current goroutine's stack.
|
||||||
|
func Fatal(format string, v ...interface{}) {
|
||||||
|
// https://github.com/hpcloud/log/blob/master/log.go#L45
|
||||||
|
LOGGER.Output(2, fmt.Sprintf("FATAL -- "+format, v...)+"\n"+string(debug.Stack()))
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// partitionString partitions the string into chunks of given size,
|
||||||
|
// with the last chunk of variable size.
|
||||||
|
func PartitionString(s string, chunkSize int) []string {
|
||||||
|
if chunkSize <= 0 {
|
||||||
|
panic("invalid chunkSize")
|
||||||
|
}
|
||||||
|
length := len(s)
|
||||||
|
chunks := 1 + length/chunkSize
|
||||||
|
start := 0
|
||||||
|
end := chunkSize
|
||||||
|
parts := make([]string, 0, chunks)
|
||||||
|
for {
|
||||||
|
if end > length {
|
||||||
|
end = length
|
||||||
|
}
|
||||||
|
parts = append(parts, s[start:end])
|
||||||
|
if end == length {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
start, end = end, end+chunkSize
|
||||||
|
}
|
||||||
|
return parts
|
||||||
|
}
|
||||||
36
vendor/github.com/hpcloud/tail/watch/filechanges.go
generated
vendored
Normal file
36
vendor/github.com/hpcloud/tail/watch/filechanges.go
generated
vendored
Normal file
|
|
@ -0,0 +1,36 @@
|
||||||
|
package watch
|
||||||
|
|
||||||
|
type FileChanges struct {
|
||||||
|
Modified chan bool // Channel to get notified of modifications
|
||||||
|
Truncated chan bool // Channel to get notified of truncations
|
||||||
|
Deleted chan bool // Channel to get notified of deletions/renames
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewFileChanges() *FileChanges {
|
||||||
|
return &FileChanges{
|
||||||
|
make(chan bool), make(chan bool), make(chan bool)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fc *FileChanges) NotifyModified() {
|
||||||
|
sendOnlyIfEmpty(fc.Modified)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fc *FileChanges) NotifyTruncated() {
|
||||||
|
sendOnlyIfEmpty(fc.Truncated)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fc *FileChanges) NotifyDeleted() {
|
||||||
|
sendOnlyIfEmpty(fc.Deleted)
|
||||||
|
}
|
||||||
|
|
||||||
|
// sendOnlyIfEmpty sends on a bool channel only if the channel has no
|
||||||
|
// backlog to be read by other goroutines. This concurrency pattern
|
||||||
|
// can be used to notify other goroutines if and only if they are
|
||||||
|
// looking for it (i.e., subsequent notifications can be compressed
|
||||||
|
// into one).
|
||||||
|
func sendOnlyIfEmpty(ch chan bool) {
|
||||||
|
select {
|
||||||
|
case ch <- true:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
128
vendor/github.com/hpcloud/tail/watch/inotify.go
generated
vendored
Normal file
128
vendor/github.com/hpcloud/tail/watch/inotify.go
generated
vendored
Normal file
|
|
@ -0,0 +1,128 @@
|
||||||
|
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||||
|
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||||
|
|
||||||
|
package watch
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/hpcloud/tail/util"
|
||||||
|
|
||||||
|
"gopkg.in/fsnotify.v1"
|
||||||
|
"gopkg.in/tomb.v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// InotifyFileWatcher uses inotify to monitor file changes.
|
||||||
|
type InotifyFileWatcher struct {
|
||||||
|
Filename string
|
||||||
|
Size int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewInotifyFileWatcher(filename string) *InotifyFileWatcher {
|
||||||
|
fw := &InotifyFileWatcher{filepath.Clean(filename), 0}
|
||||||
|
return fw
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fw *InotifyFileWatcher) BlockUntilExists(t *tomb.Tomb) error {
|
||||||
|
err := WatchCreate(fw.Filename)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer RemoveWatchCreate(fw.Filename)
|
||||||
|
|
||||||
|
// Do a real check now as the file might have been created before
|
||||||
|
// calling `WatchFlags` above.
|
||||||
|
if _, err = os.Stat(fw.Filename); !os.IsNotExist(err) {
|
||||||
|
// file exists, or stat returned an error.
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
events := Events(fw.Filename)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case evt, ok := <-events:
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("inotify watcher has been closed")
|
||||||
|
}
|
||||||
|
evtName, err := filepath.Abs(evt.Name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fwFilename, err := filepath.Abs(fw.Filename)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if evtName == fwFilename {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
case <-t.Dying():
|
||||||
|
return tomb.ErrDying
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) {
|
||||||
|
err := Watch(fw.Filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
changes := NewFileChanges()
|
||||||
|
fw.Size = pos
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer RemoveWatch(fw.Filename)
|
||||||
|
|
||||||
|
events := Events(fw.Filename)
|
||||||
|
|
||||||
|
for {
|
||||||
|
prevSize := fw.Size
|
||||||
|
|
||||||
|
var evt fsnotify.Event
|
||||||
|
var ok bool
|
||||||
|
|
||||||
|
select {
|
||||||
|
case evt, ok = <-events:
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case <-t.Dying():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case evt.Op&fsnotify.Remove == fsnotify.Remove:
|
||||||
|
fallthrough
|
||||||
|
|
||||||
|
case evt.Op&fsnotify.Rename == fsnotify.Rename:
|
||||||
|
changes.NotifyDeleted()
|
||||||
|
return
|
||||||
|
|
||||||
|
case evt.Op&fsnotify.Write == fsnotify.Write:
|
||||||
|
fi, err := os.Stat(fw.Filename)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
changes.NotifyDeleted()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// XXX: report this error back to the user
|
||||||
|
util.Fatal("Failed to stat file %v: %v", fw.Filename, err)
|
||||||
|
}
|
||||||
|
fw.Size = fi.Size()
|
||||||
|
|
||||||
|
if prevSize > 0 && prevSize > fw.Size {
|
||||||
|
changes.NotifyTruncated()
|
||||||
|
} else {
|
||||||
|
changes.NotifyModified()
|
||||||
|
}
|
||||||
|
prevSize = fw.Size
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return changes, nil
|
||||||
|
}
|
||||||
260
vendor/github.com/hpcloud/tail/watch/inotify_tracker.go
generated
vendored
Normal file
260
vendor/github.com/hpcloud/tail/watch/inotify_tracker.go
generated
vendored
Normal file
|
|
@ -0,0 +1,260 @@
|
||||||
|
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||||
|
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||||
|
|
||||||
|
package watch
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/hpcloud/tail/util"
|
||||||
|
|
||||||
|
"gopkg.in/fsnotify.v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
type InotifyTracker struct {
|
||||||
|
mux sync.Mutex
|
||||||
|
watcher *fsnotify.Watcher
|
||||||
|
chans map[string]chan fsnotify.Event
|
||||||
|
done map[string]chan bool
|
||||||
|
watchNums map[string]int
|
||||||
|
watch chan *watchInfo
|
||||||
|
remove chan *watchInfo
|
||||||
|
error chan error
|
||||||
|
}
|
||||||
|
|
||||||
|
type watchInfo struct {
|
||||||
|
op fsnotify.Op
|
||||||
|
fname string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (this *watchInfo) isCreate() bool {
|
||||||
|
return this.op == fsnotify.Create
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// globally shared InotifyTracker; ensures only one fsnotify.Watcher is used
|
||||||
|
shared *InotifyTracker
|
||||||
|
|
||||||
|
// these are used to ensure the shared InotifyTracker is run exactly once
|
||||||
|
once = sync.Once{}
|
||||||
|
goRun = func() {
|
||||||
|
shared = &InotifyTracker{
|
||||||
|
mux: sync.Mutex{},
|
||||||
|
chans: make(map[string]chan fsnotify.Event),
|
||||||
|
done: make(map[string]chan bool),
|
||||||
|
watchNums: make(map[string]int),
|
||||||
|
watch: make(chan *watchInfo),
|
||||||
|
remove: make(chan *watchInfo),
|
||||||
|
error: make(chan error),
|
||||||
|
}
|
||||||
|
go shared.run()
|
||||||
|
}
|
||||||
|
|
||||||
|
logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Watch signals the run goroutine to begin watching the input filename
|
||||||
|
func Watch(fname string) error {
|
||||||
|
return watch(&watchInfo{
|
||||||
|
fname: fname,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Watch create signals the run goroutine to begin watching the input filename
|
||||||
|
// if call the WatchCreate function, don't call the Cleanup, call the RemoveWatchCreate
|
||||||
|
func WatchCreate(fname string) error {
|
||||||
|
return watch(&watchInfo{
|
||||||
|
op: fsnotify.Create,
|
||||||
|
fname: fname,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func watch(winfo *watchInfo) error {
|
||||||
|
// start running the shared InotifyTracker if not already running
|
||||||
|
once.Do(goRun)
|
||||||
|
|
||||||
|
winfo.fname = filepath.Clean(winfo.fname)
|
||||||
|
shared.watch <- winfo
|
||||||
|
return <-shared.error
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveWatch signals the run goroutine to remove the watch for the input filename
|
||||||
|
func RemoveWatch(fname string) {
|
||||||
|
remove(&watchInfo{
|
||||||
|
fname: fname,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveWatch create signals the run goroutine to remove the watch for the input filename
|
||||||
|
func RemoveWatchCreate(fname string) {
|
||||||
|
remove(&watchInfo{
|
||||||
|
op: fsnotify.Create,
|
||||||
|
fname: fname,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func remove(winfo *watchInfo) {
|
||||||
|
// start running the shared InotifyTracker if not already running
|
||||||
|
once.Do(goRun)
|
||||||
|
|
||||||
|
winfo.fname = filepath.Clean(winfo.fname)
|
||||||
|
shared.mux.Lock()
|
||||||
|
done := shared.done[winfo.fname]
|
||||||
|
if done != nil {
|
||||||
|
delete(shared.done, winfo.fname)
|
||||||
|
close(done)
|
||||||
|
}
|
||||||
|
|
||||||
|
fname := winfo.fname
|
||||||
|
if winfo.isCreate() {
|
||||||
|
// Watch for new files to be created in the parent directory.
|
||||||
|
fname = filepath.Dir(fname)
|
||||||
|
}
|
||||||
|
shared.watchNums[fname]--
|
||||||
|
watchNum := shared.watchNums[fname]
|
||||||
|
if watchNum == 0 {
|
||||||
|
delete(shared.watchNums, fname)
|
||||||
|
}
|
||||||
|
shared.mux.Unlock()
|
||||||
|
|
||||||
|
// If we were the last ones to watch this file, unsubscribe from inotify.
|
||||||
|
// This needs to happen after releasing the lock because fsnotify waits
|
||||||
|
// synchronously for the kernel to acknowledge the removal of the watch
|
||||||
|
// for this file, which causes us to deadlock if we still held the lock.
|
||||||
|
if watchNum == 0 {
|
||||||
|
shared.watcher.Remove(fname)
|
||||||
|
}
|
||||||
|
shared.remove <- winfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// Events returns a channel to which FileEvents corresponding to the input filename
|
||||||
|
// will be sent. This channel will be closed when removeWatch is called on this
|
||||||
|
// filename.
|
||||||
|
func Events(fname string) <-chan fsnotify.Event {
|
||||||
|
shared.mux.Lock()
|
||||||
|
defer shared.mux.Unlock()
|
||||||
|
|
||||||
|
return shared.chans[fname]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cleanup removes the watch for the input filename if necessary.
|
||||||
|
func Cleanup(fname string) {
|
||||||
|
RemoveWatch(fname)
|
||||||
|
}
|
||||||
|
|
||||||
|
// watchFlags calls fsnotify.WatchFlags for the input filename and flags, creating
|
||||||
|
// a new Watcher if the previous Watcher was closed.
|
||||||
|
func (shared *InotifyTracker) addWatch(winfo *watchInfo) error {
|
||||||
|
shared.mux.Lock()
|
||||||
|
defer shared.mux.Unlock()
|
||||||
|
|
||||||
|
if shared.chans[winfo.fname] == nil {
|
||||||
|
shared.chans[winfo.fname] = make(chan fsnotify.Event)
|
||||||
|
shared.done[winfo.fname] = make(chan bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
fname := winfo.fname
|
||||||
|
if winfo.isCreate() {
|
||||||
|
// Watch for new files to be created in the parent directory.
|
||||||
|
fname = filepath.Dir(fname)
|
||||||
|
}
|
||||||
|
|
||||||
|
// already in inotify watch
|
||||||
|
if shared.watchNums[fname] > 0 {
|
||||||
|
shared.watchNums[fname]++
|
||||||
|
if winfo.isCreate() {
|
||||||
|
shared.watchNums[winfo.fname]++
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err := shared.watcher.Add(fname)
|
||||||
|
if err == nil {
|
||||||
|
shared.watchNums[fname]++
|
||||||
|
if winfo.isCreate() {
|
||||||
|
shared.watchNums[winfo.fname]++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// removeWatch calls fsnotify.RemoveWatch for the input filename and closes the
|
||||||
|
// corresponding events channel.
|
||||||
|
func (shared *InotifyTracker) removeWatch(winfo *watchInfo) {
|
||||||
|
shared.mux.Lock()
|
||||||
|
defer shared.mux.Unlock()
|
||||||
|
|
||||||
|
ch := shared.chans[winfo.fname]
|
||||||
|
if ch == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(shared.chans, winfo.fname)
|
||||||
|
close(ch)
|
||||||
|
|
||||||
|
if !winfo.isCreate() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
shared.watchNums[winfo.fname]--
|
||||||
|
if shared.watchNums[winfo.fname] == 0 {
|
||||||
|
delete(shared.watchNums, winfo.fname)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sendEvent sends the input event to the appropriate Tail.
|
||||||
|
func (shared *InotifyTracker) sendEvent(event fsnotify.Event) {
|
||||||
|
name := filepath.Clean(event.Name)
|
||||||
|
|
||||||
|
shared.mux.Lock()
|
||||||
|
ch := shared.chans[name]
|
||||||
|
done := shared.done[name]
|
||||||
|
shared.mux.Unlock()
|
||||||
|
|
||||||
|
if ch != nil && done != nil {
|
||||||
|
select {
|
||||||
|
case ch <- event:
|
||||||
|
case <-done:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// run starts the goroutine in which the shared struct reads events from its
|
||||||
|
// Watcher's Event channel and sends the events to the appropriate Tail.
|
||||||
|
func (shared *InotifyTracker) run() {
|
||||||
|
watcher, err := fsnotify.NewWatcher()
|
||||||
|
if err != nil {
|
||||||
|
util.Fatal("failed to create Watcher")
|
||||||
|
}
|
||||||
|
shared.watcher = watcher
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case winfo := <-shared.watch:
|
||||||
|
shared.error <- shared.addWatch(winfo)
|
||||||
|
|
||||||
|
case winfo := <-shared.remove:
|
||||||
|
shared.removeWatch(winfo)
|
||||||
|
|
||||||
|
case event, open := <-shared.watcher.Events:
|
||||||
|
if !open {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
shared.sendEvent(event)
|
||||||
|
|
||||||
|
case err, open := <-shared.watcher.Errors:
|
||||||
|
if !open {
|
||||||
|
return
|
||||||
|
} else if err != nil {
|
||||||
|
sysErr, ok := err.(*os.SyscallError)
|
||||||
|
if !ok || sysErr.Err != syscall.EINTR {
|
||||||
|
logger.Printf("Error in Watcher Error channel: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
118
vendor/github.com/hpcloud/tail/watch/polling.go
generated
vendored
Normal file
118
vendor/github.com/hpcloud/tail/watch/polling.go
generated
vendored
Normal file
|
|
@ -0,0 +1,118 @@
|
||||||
|
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||||
|
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||||
|
|
||||||
|
package watch
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hpcloud/tail/util"
|
||||||
|
"gopkg.in/tomb.v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PollingFileWatcher polls the file for changes.
|
||||||
|
type PollingFileWatcher struct {
|
||||||
|
Filename string
|
||||||
|
Size int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPollingFileWatcher(filename string) *PollingFileWatcher {
|
||||||
|
fw := &PollingFileWatcher{filename, 0}
|
||||||
|
return fw
|
||||||
|
}
|
||||||
|
|
||||||
|
var POLL_DURATION time.Duration
|
||||||
|
|
||||||
|
func (fw *PollingFileWatcher) BlockUntilExists(t *tomb.Tomb) error {
|
||||||
|
for {
|
||||||
|
if _, err := os.Stat(fw.Filename); err == nil {
|
||||||
|
return nil
|
||||||
|
} else if !os.IsNotExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-time.After(POLL_DURATION):
|
||||||
|
continue
|
||||||
|
case <-t.Dying():
|
||||||
|
return tomb.ErrDying
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fw *PollingFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) {
|
||||||
|
origFi, err := os.Stat(fw.Filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
changes := NewFileChanges()
|
||||||
|
var prevModTime time.Time
|
||||||
|
|
||||||
|
// XXX: use tomb.Tomb to cleanly manage these goroutines. replace
|
||||||
|
// the fatal (below) with tomb's Kill.
|
||||||
|
|
||||||
|
fw.Size = pos
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
prevSize := fw.Size
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-t.Dying():
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(POLL_DURATION)
|
||||||
|
fi, err := os.Stat(fw.Filename)
|
||||||
|
if err != nil {
|
||||||
|
// Windows cannot delete a file if a handle is still open (tail keeps one open)
|
||||||
|
// so it gives access denied to anything trying to read it until all handles are released.
|
||||||
|
if os.IsNotExist(err) || (runtime.GOOS == "windows" && os.IsPermission(err)) {
|
||||||
|
// File does not exist (has been deleted).
|
||||||
|
changes.NotifyDeleted()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// XXX: report this error back to the user
|
||||||
|
util.Fatal("Failed to stat file %v: %v", fw.Filename, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// File got moved/renamed?
|
||||||
|
if !os.SameFile(origFi, fi) {
|
||||||
|
changes.NotifyDeleted()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// File got truncated?
|
||||||
|
fw.Size = fi.Size()
|
||||||
|
if prevSize > 0 && prevSize > fw.Size {
|
||||||
|
changes.NotifyTruncated()
|
||||||
|
prevSize = fw.Size
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// File got bigger?
|
||||||
|
if prevSize > 0 && prevSize < fw.Size {
|
||||||
|
changes.NotifyModified()
|
||||||
|
prevSize = fw.Size
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
prevSize = fw.Size
|
||||||
|
|
||||||
|
// File was appended to (changed)?
|
||||||
|
modTime := fi.ModTime()
|
||||||
|
if modTime != prevModTime {
|
||||||
|
prevModTime = modTime
|
||||||
|
changes.NotifyModified()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return changes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
POLL_DURATION = 250 * time.Millisecond
|
||||||
|
}
|
||||||
20
vendor/github.com/hpcloud/tail/watch/watch.go
generated
vendored
Normal file
20
vendor/github.com/hpcloud/tail/watch/watch.go
generated
vendored
Normal file
|
|
@ -0,0 +1,20 @@
|
||||||
|
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||||
|
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||||
|
|
||||||
|
package watch
|
||||||
|
|
||||||
|
import "gopkg.in/tomb.v1"
|
||||||
|
|
||||||
|
// FileWatcher monitors file-level events.
|
||||||
|
type FileWatcher interface {
|
||||||
|
// BlockUntilExists blocks until the file comes into existence.
|
||||||
|
BlockUntilExists(*tomb.Tomb) error
|
||||||
|
|
||||||
|
// ChangeEvents reports on changes to a file, be it modification,
|
||||||
|
// deletion, renames or truncations. Returned FileChanges group of
|
||||||
|
// channels will be closed, thus become unusable, after a deletion
|
||||||
|
// or truncation event.
|
||||||
|
// In order to properly report truncations, ChangeEvents requires
|
||||||
|
// the caller to pass their current offset in the file.
|
||||||
|
ChangeEvents(*tomb.Tomb, int64) (*FileChanges, error)
|
||||||
|
}
|
||||||
92
vendor/github.com/hpcloud/tail/winfile/winfile.go
generated
vendored
Normal file
92
vendor/github.com/hpcloud/tail/winfile/winfile.go
generated
vendored
Normal file
|
|
@ -0,0 +1,92 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package winfile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// issue also described here
|
||||||
|
//https://codereview.appspot.com/8203043/
|
||||||
|
|
||||||
|
// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L218
|
||||||
|
func Open(path string, mode int, perm uint32) (fd syscall.Handle, err error) {
|
||||||
|
if len(path) == 0 {
|
||||||
|
return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND
|
||||||
|
}
|
||||||
|
pathp, err := syscall.UTF16PtrFromString(path)
|
||||||
|
if err != nil {
|
||||||
|
return syscall.InvalidHandle, err
|
||||||
|
}
|
||||||
|
var access uint32
|
||||||
|
switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) {
|
||||||
|
case syscall.O_RDONLY:
|
||||||
|
access = syscall.GENERIC_READ
|
||||||
|
case syscall.O_WRONLY:
|
||||||
|
access = syscall.GENERIC_WRITE
|
||||||
|
case syscall.O_RDWR:
|
||||||
|
access = syscall.GENERIC_READ | syscall.GENERIC_WRITE
|
||||||
|
}
|
||||||
|
if mode&syscall.O_CREAT != 0 {
|
||||||
|
access |= syscall.GENERIC_WRITE
|
||||||
|
}
|
||||||
|
if mode&syscall.O_APPEND != 0 {
|
||||||
|
access &^= syscall.GENERIC_WRITE
|
||||||
|
access |= syscall.FILE_APPEND_DATA
|
||||||
|
}
|
||||||
|
sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE | syscall.FILE_SHARE_DELETE)
|
||||||
|
var sa *syscall.SecurityAttributes
|
||||||
|
if mode&syscall.O_CLOEXEC == 0 {
|
||||||
|
sa = makeInheritSa()
|
||||||
|
}
|
||||||
|
var createmode uint32
|
||||||
|
switch {
|
||||||
|
case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL):
|
||||||
|
createmode = syscall.CREATE_NEW
|
||||||
|
case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC):
|
||||||
|
createmode = syscall.CREATE_ALWAYS
|
||||||
|
case mode&syscall.O_CREAT == syscall.O_CREAT:
|
||||||
|
createmode = syscall.OPEN_ALWAYS
|
||||||
|
case mode&syscall.O_TRUNC == syscall.O_TRUNC:
|
||||||
|
createmode = syscall.TRUNCATE_EXISTING
|
||||||
|
default:
|
||||||
|
createmode = syscall.OPEN_EXISTING
|
||||||
|
}
|
||||||
|
h, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, syscall.FILE_ATTRIBUTE_NORMAL, 0)
|
||||||
|
return h, e
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L211
|
||||||
|
func makeInheritSa() *syscall.SecurityAttributes {
|
||||||
|
var sa syscall.SecurityAttributes
|
||||||
|
sa.Length = uint32(unsafe.Sizeof(sa))
|
||||||
|
sa.InheritHandle = 1
|
||||||
|
return &sa
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_windows.go#L133
|
||||||
|
func OpenFile(name string, flag int, perm os.FileMode) (file *os.File, err error) {
|
||||||
|
r, e := Open(name, flag|syscall.O_CLOEXEC, syscallMode(perm))
|
||||||
|
if e != nil {
|
||||||
|
return nil, e
|
||||||
|
}
|
||||||
|
return os.NewFile(uintptr(r), name), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_posix.go#L61
|
||||||
|
func syscallMode(i os.FileMode) (o uint32) {
|
||||||
|
o |= uint32(i.Perm())
|
||||||
|
if i&os.ModeSetuid != 0 {
|
||||||
|
o |= syscall.S_ISUID
|
||||||
|
}
|
||||||
|
if i&os.ModeSetgid != 0 {
|
||||||
|
o |= syscall.S_ISGID
|
||||||
|
}
|
||||||
|
if i&os.ModeSticky != 0 {
|
||||||
|
o |= syscall.S_ISVTX
|
||||||
|
}
|
||||||
|
// No mapping for Go's ModeTemporary (plan9 only).
|
||||||
|
return
|
||||||
|
}
|
||||||
7
vendor/github.com/onsi/ginkgo/.gitignore
generated
vendored
Normal file
7
vendor/github.com/onsi/ginkgo/.gitignore
generated
vendored
Normal file
|
|
@ -0,0 +1,7 @@
|
||||||
|
.DS_Store
|
||||||
|
TODO
|
||||||
|
tmp/**/*
|
||||||
|
*.coverprofile
|
||||||
|
.vscode
|
||||||
|
.idea/
|
||||||
|
*.log
|
||||||
18
vendor/github.com/onsi/ginkgo/.travis.yml
generated
vendored
Normal file
18
vendor/github.com/onsi/ginkgo/.travis.yml
generated
vendored
Normal file
|
|
@ -0,0 +1,18 @@
|
||||||
|
language: go
|
||||||
|
go:
|
||||||
|
- 1.10.x
|
||||||
|
- 1.11.x
|
||||||
|
- 1.12.x
|
||||||
|
- tip
|
||||||
|
|
||||||
|
# allow internal package imports, necessary for forked repositories
|
||||||
|
go_import_path: github.com/onsi/ginkgo
|
||||||
|
|
||||||
|
install:
|
||||||
|
- go get -v -t ./...
|
||||||
|
- go get golang.org/x/tools/cmd/cover
|
||||||
|
- go get github.com/onsi/gomega
|
||||||
|
- go install github.com/onsi/ginkgo/ginkgo
|
||||||
|
- export PATH=$PATH:$HOME/gopath/bin
|
||||||
|
|
||||||
|
script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace && go vet
|
||||||
258
vendor/github.com/onsi/ginkgo/CHANGELOG.md
generated
vendored
Normal file
258
vendor/github.com/onsi/ginkgo/CHANGELOG.md
generated
vendored
Normal file
|
|
@ -0,0 +1,258 @@
|
||||||
|
## 1.10.3
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- Set go_import_path in travis.yml to allow internal packages in forks (#607) [3b721db]
|
||||||
|
- Add integration test [d90e0dc]
|
||||||
|
- Fix coverage files combining [e5dde8c]
|
||||||
|
- A new CLI option: -ginkgo.reportFile <file path> (#601) [034fd25]
|
||||||
|
|
||||||
|
## 1.10.2
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- speed up table entry generateIt() (#609) [5049dc5]
|
||||||
|
- Fix. Write errors to stderr instead of stdout (#610) [7bb3091]
|
||||||
|
|
||||||
|
## 1.10.1
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- stack backtrace: fix skipping (#600) [2a4c0bd]
|
||||||
|
|
||||||
|
## 1.10.0
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- stack backtrace: fix alignment and skipping [66915d6]
|
||||||
|
- fix typo in documentation [8f97b93]
|
||||||
|
|
||||||
|
## 1.9.0
|
||||||
|
|
||||||
|
### Features
|
||||||
|
- Option to print output into report, when tests have passed [0545415]
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- Fixed typos in comments [0ecbc58]
|
||||||
|
- gofmt code [a7f8bfb]
|
||||||
|
- Simplify code [7454d00]
|
||||||
|
- Simplify concatenation, incrementation and function assignment [4825557]
|
||||||
|
- Avoid unnecessary conversions [9d9403c]
|
||||||
|
- JUnit: include more detailed information about panic [19cca4b]
|
||||||
|
- Print help to stdout when the user asks for help [4cb7441]
|
||||||
|
|
||||||
|
|
||||||
|
## 1.8.0
|
||||||
|
|
||||||
|
### New Features
|
||||||
|
- allow config of the vet flag for `go test` (#562) [3cd45fa]
|
||||||
|
- Support projects using go modules [d56ee76]
|
||||||
|
|
||||||
|
### Fixes and Minor Improvements
|
||||||
|
- chore(godoc): fixes typos in Measurement funcs [dbaca8e]
|
||||||
|
- Optimize focus to avoid allocations [f493786]
|
||||||
|
- Ensure generated test file names are underscored [505cc35]
|
||||||
|
|
||||||
|
## 1.7.0
|
||||||
|
|
||||||
|
### New Features
|
||||||
|
- Add JustAfterEach (#484) [0d4f080]
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- Correctly round suite time in junit reporter [2445fc1]
|
||||||
|
- Avoid using -i argument to go test for Golang 1.10+ [46bbc26]
|
||||||
|
|
||||||
|
## 1.6.0
|
||||||
|
|
||||||
|
### New Features
|
||||||
|
- add --debug flag to emit node output to files (#499) [39febac]
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- fix: for `go vet` to pass [69338ec]
|
||||||
|
- docs: fix for contributing instructions [7004cb1]
|
||||||
|
- consolidate and streamline contribution docs (#494) [d848015]
|
||||||
|
- Make generated Junit file compatable with "Maven Surefire" (#488) [e51bee6]
|
||||||
|
- all: gofmt [000d317]
|
||||||
|
- Increase eventually timeout to 30s [c73579c]
|
||||||
|
- Clarify asynchronous test behaviour [294d8f4]
|
||||||
|
- Travis badge should only show master [26d2143]
|
||||||
|
|
||||||
|
## 1.5.0 5/10/2018
|
||||||
|
|
||||||
|
### New Features
|
||||||
|
- Supports go v1.10 (#443, #446, #451) [e873237, 468e89e, e37dbfe, a37f4c0, c0b857d, bca5260, 4177ca8]
|
||||||
|
- Add a When() synonym for Context() (#386) [747514b, 7484dad, 7354a07, dd826c8]
|
||||||
|
- Re-add noisySkippings flag [652e15c]
|
||||||
|
- Allow coverage to be displayed for focused specs (#367) [11459a8]
|
||||||
|
- Handle -outputdir flag (#364) [228e3a8]
|
||||||
|
- Handle -coverprofile flag (#355) [43392d5]
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- When using custom reporters register the custom reporters *before* the default reporter. This allows users to see the output of any print statements in their customer reporters. (#365) [8382b23]
|
||||||
|
- When running a test and calculating the coverage using the `-coverprofile` and `-outputdir` flags, Ginkgo fails with an error if the directory does not exist. This is due to an [issue in go 1.10](https://github.com/golang/go/issues/24588) (#446) [b36a6e0]
|
||||||
|
- `unfocus` command ignores vendor folder (#459) [e5e551c, c556e43, a3b6351, 9a820dd]
|
||||||
|
- Ignore packages whose tests are all ignored by go (#456) [7430ca7, 6d8be98]
|
||||||
|
- Increase the threshold when checking time measuments (#455) [2f714bf, 68f622c]
|
||||||
|
- Fix race condition in coverage tests (#423) [a5a8ff7, ab9c08b]
|
||||||
|
- Add an extra new line after reporting spec run completion for test2json [874520d]
|
||||||
|
- added name name field to junit reported testsuite [ae61c63]
|
||||||
|
- Do not set the run time of a spec when the dryRun flag is used (#438) [457e2d9, ba8e856]
|
||||||
|
- Process FWhen and FSpecify when unfocusing (#434) [9008c7b, ee65bd, df87dfe]
|
||||||
|
- Synchronise the access to the state of specs to avoid race conditions (#430) [7d481bc, ae6829d]
|
||||||
|
- Added Duration on GinkgoTestDescription (#383) [5f49dad, 528417e, 0747408, 329d7ed]
|
||||||
|
- Fix Ginkgo stack trace on failure for Specify (#415) [b977ede, 65ca40e, 6c46eb8]
|
||||||
|
- Update README with Go 1.6+, Golang -> Go (#409) [17f6b97, bc14b66, 20d1598]
|
||||||
|
- Use fmt.Errorf instead of errors.New(fmt.Sprintf (#401) [a299f56, 44e2eaa]
|
||||||
|
- Imports in generated code should follow conventions (#398) [0bec0b0, e8536d8]
|
||||||
|
- Prevent data race error when Recording a benchmark value from multiple go routines (#390) [c0c4881, 7a241e9]
|
||||||
|
- Replace GOPATH in Environment [4b883f0]
|
||||||
|
|
||||||
|
|
||||||
|
## 1.4.0 7/16/2017
|
||||||
|
|
||||||
|
- `ginkgo` now provides a hint if you accidentally forget to run `ginkgo bootstrap` to generate a `*_suite_test.go` file that actually invokes the Ginkgo test runner. [#345](https://github.com/onsi/ginkgo/pull/345)
|
||||||
|
- thanks to improvements in `go test -c` `ginkgo` no longer needs to fix Go's compilation output to ensure compilation errors are expressed relative to the CWD. [#357]
|
||||||
|
- `ginkgo watch -watchRegExp=...` allows you to specify a custom regular expression to watch. Only files matching the regular expression are watched for changes (the default is `\.go$`) [#356]
|
||||||
|
- `ginkgo` now always emits compilation output. Previously, only failed compilation output was printed out. [#277]
|
||||||
|
- `ginkgo -requireSuite` now fails the test run if there are `*_test.go` files but `go test` fails to detect any tests. Typically this means you forgot to run `ginkgo bootstrap` to generate a suite file. [#344]
|
||||||
|
- `ginkgo -timeout=DURATION` allows you to adjust the timeout for the entire test suite (default is 24 hours) [#248]
|
||||||
|
|
||||||
|
## 1.3.0 3/28/2017
|
||||||
|
|
||||||
|
Improvements:
|
||||||
|
|
||||||
|
- Significantly improved parallel test distribution. Now instead of pre-sharding test cases across workers (which can result in idle workers and poor test performance) Ginkgo uses a shared queue to keep all workers busy until all tests are complete. This improves test-time performance and consistency.
|
||||||
|
- `Skip(message)` can be used to skip the current test.
|
||||||
|
- Added `extensions/table` - a Ginkgo DSL for [Table Driven Tests](http://onsi.github.io/ginkgo/#table-driven-tests)
|
||||||
|
- Add `GinkgoRandomSeed()` - shorthand for `config.GinkgoConfig.RandomSeed`
|
||||||
|
- Support for retrying flaky tests with `--flakeAttempts`
|
||||||
|
- `ginkgo ./...` now recurses as you'd expect
|
||||||
|
- Added `Specify` a synonym for `It`
|
||||||
|
- Support colorise on Windows
|
||||||
|
- Broader support for various go compilation flags in the `ginkgo` CLI
|
||||||
|
|
||||||
|
Bug Fixes:
|
||||||
|
|
||||||
|
- Ginkgo tests now fail when you `panic(nil)` (#167)
|
||||||
|
|
||||||
|
## 1.2.0 5/31/2015
|
||||||
|
|
||||||
|
Improvements
|
||||||
|
|
||||||
|
- `ginkgo -coverpkg` calls down to `go test -coverpkg` (#160)
|
||||||
|
- `ginkgo -afterSuiteHook COMMAND` invokes the passed-in `COMMAND` after a test suite completes (#152)
|
||||||
|
- Relaxed requirement for Go 1.4+. `ginkgo` now works with Go v1.3+ (#166)
|
||||||
|
|
||||||
|
## 1.2.0-beta
|
||||||
|
|
||||||
|
Ginkgo now requires Go 1.4+
|
||||||
|
|
||||||
|
Improvements:
|
||||||
|
|
||||||
|
- Call reporters in reverse order when announcing spec completion -- allows custom reporters to emit output before the default reporter does.
|
||||||
|
- Improved focus behavior. Now, this:
|
||||||
|
|
||||||
|
```golang
|
||||||
|
FDescribe("Some describe", func() {
|
||||||
|
It("A", func() {})
|
||||||
|
|
||||||
|
FIt("B", func() {})
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
will run `B` but *not* `A`. This tends to be a common usage pattern when in the thick of writing and debugging tests.
|
||||||
|
- When `SIGINT` is received, Ginkgo will emit the contents of the `GinkgoWriter` before running the `AfterSuite`. Useful for debugging stuck tests.
|
||||||
|
- When `--progress` is set, Ginkgo will write test progress (in particular, Ginkgo will say when it is about to run a BeforeEach, AfterEach, It, etc...) to the `GinkgoWriter`. This is useful for debugging stuck tests and tests that generate many logs.
|
||||||
|
- Improved output when an error occurs in a setup or teardown block.
|
||||||
|
- When `--dryRun` is set, Ginkgo will walk the spec tree and emit to its reporter *without* actually running anything. Best paired with `-v` to understand which specs will run in which order.
|
||||||
|
- Add `By` to help document long `It`s. `By` simply writes to the `GinkgoWriter`.
|
||||||
|
- Add support for precompiled tests:
|
||||||
|
- `ginkgo build <path-to-package>` will now compile the package, producing a file named `package.test`
|
||||||
|
- The compiled `package.test` file can be run directly. This runs the tests in series.
|
||||||
|
- To run precompiled tests in parallel, you can run: `ginkgo -p package.test`
|
||||||
|
- Support `bootstrap`ping and `generate`ing [Agouti](http://agouti.org) specs.
|
||||||
|
- `ginkgo generate` and `ginkgo bootstrap` now honor the package name already defined in a given directory
|
||||||
|
- The `ginkgo` CLI ignores `SIGQUIT`. Prevents its stack dump from interlacing with the underlying test suite's stack dump.
|
||||||
|
- The `ginkgo` CLI now compiles tests into a temporary directory instead of the package directory. This necessitates upgrading to Go v1.4+.
|
||||||
|
- `ginkgo -notify` now works on Linux
|
||||||
|
|
||||||
|
Bug Fixes:
|
||||||
|
|
||||||
|
- If --skipPackages is used and all packages are skipped, Ginkgo should exit 0.
|
||||||
|
- Fix tempfile leak when running in parallel
|
||||||
|
- Fix incorrect failure message when a panic occurs during a parallel test run
|
||||||
|
- Fixed an issue where a pending test within a focused context (or a focused test within a pending context) would skip all other tests.
|
||||||
|
- Be more consistent about handling SIGTERM as well as SIGINT
|
||||||
|
- When interupted while concurrently compiling test suites in the background, Ginkgo now cleans up the compiled artifacts.
|
||||||
|
- Fixed a long standing bug where `ginkgo -p` would hang if a process spawned by one of the Ginkgo parallel nodes does not exit. (Hooray!)
|
||||||
|
|
||||||
|
## 1.1.0 (8/2/2014)
|
||||||
|
|
||||||
|
No changes, just dropping the beta.
|
||||||
|
|
||||||
|
## 1.1.0-beta (7/22/2014)
|
||||||
|
New Features:
|
||||||
|
|
||||||
|
- `ginkgo watch` now monitors packages *and their dependencies* for changes. The depth of the dependency tree can be modified with the `-depth` flag.
|
||||||
|
- Test suites with a programmatic focus (`FIt`, `FDescribe`, etc...) exit with non-zero status code, even when they pass. This allows CI systems to detect accidental commits of focused test suites.
|
||||||
|
- `ginkgo -p` runs the testsuite in parallel with an auto-detected number of nodes.
|
||||||
|
- `ginkgo -tags=TAG_LIST` passes a list of tags down to the `go build` command.
|
||||||
|
- `ginkgo --failFast` aborts the test suite after the first failure.
|
||||||
|
- `ginkgo generate file_1 file_2` can take multiple file arguments.
|
||||||
|
- Ginkgo now summarizes any spec failures that occured at the end of the test run.
|
||||||
|
- `ginkgo --randomizeSuites` will run tests *suites* in random order using the generated/passed-in seed.
|
||||||
|
|
||||||
|
Improvements:
|
||||||
|
|
||||||
|
- `ginkgo -skipPackage` now takes a comma-separated list of strings. If the *relative path* to a package matches one of the entries in the comma-separated list, that package is skipped.
|
||||||
|
- `ginkgo --untilItFails` no longer recompiles between attempts.
|
||||||
|
- Ginkgo now panics when a runnable node (`It`, `BeforeEach`, `JustBeforeEach`, `AfterEach`, `Measure`) is nested within another runnable node. This is always a mistake. Any test suites that panic because of this change should be fixed.
|
||||||
|
|
||||||
|
Bug Fixes:
|
||||||
|
|
||||||
|
- `ginkgo boostrap` and `ginkgo generate` no longer fail when dealing with `hyphen-separated-packages`.
|
||||||
|
- parallel specs are now better distributed across nodes - fixed a crashing bug where (for example) distributing 11 tests across 7 nodes would panic
|
||||||
|
|
||||||
|
## 1.0.0 (5/24/2014)
|
||||||
|
New Features:
|
||||||
|
|
||||||
|
- Add `GinkgoParallelNode()` - shorthand for `config.GinkgoConfig.ParallelNode`
|
||||||
|
|
||||||
|
Improvements:
|
||||||
|
|
||||||
|
- When compilation fails, the compilation output is rewritten to present a correct *relative* path. Allows ⌘-clicking in iTerm open the file in your text editor.
|
||||||
|
- `--untilItFails` and `ginkgo watch` now generate new random seeds between test runs, unless a particular random seed is specified.
|
||||||
|
|
||||||
|
Bug Fixes:
|
||||||
|
|
||||||
|
- `-cover` now generates a correctly combined coverprofile when running with in parallel with multiple `-node`s.
|
||||||
|
- Print out the contents of the `GinkgoWriter` when `BeforeSuite` or `AfterSuite` fail.
|
||||||
|
- Fix all remaining race conditions in Ginkgo's test suite.
|
||||||
|
|
||||||
|
## 1.0.0-beta (4/14/2014)
|
||||||
|
Breaking changes:
|
||||||
|
|
||||||
|
- `thirdparty/gomocktestreporter` is gone. Use `GinkgoT()` instead
|
||||||
|
- Modified the Reporter interface
|
||||||
|
- `watch` is now a subcommand, not a flag.
|
||||||
|
|
||||||
|
DSL changes:
|
||||||
|
|
||||||
|
- `BeforeSuite` and `AfterSuite` for setting up and tearing down test suites.
|
||||||
|
- `AfterSuite` is triggered on interrupt (`^C`) as well as exit.
|
||||||
|
- `SynchronizedBeforeSuite` and `SynchronizedAfterSuite` for setting up and tearing down singleton resources across parallel nodes.
|
||||||
|
|
||||||
|
CLI changes:
|
||||||
|
|
||||||
|
- `watch` is now a subcommand, not a flag
|
||||||
|
- `--nodot` flag can be passed to `ginkgo generate` and `ginkgo bootstrap` to avoid dot imports. This explicitly imports all exported identifiers in Ginkgo and Gomega. Refreshing this list can be done by running `ginkgo nodot`
|
||||||
|
- Additional arguments can be passed to specs. Pass them after the `--` separator
|
||||||
|
- `--skipPackage` flag takes a regexp and ignores any packages with package names passing said regexp.
|
||||||
|
- `--trace` flag prints out full stack traces when errors occur, not just the line at which the error occurs.
|
||||||
|
|
||||||
|
Misc:
|
||||||
|
|
||||||
|
- Start using semantic versioning
|
||||||
|
- Start maintaining changelog
|
||||||
|
|
||||||
|
Major refactor:
|
||||||
|
|
||||||
|
- Pull out Ginkgo's internal to `internal`
|
||||||
|
- Rename `example` everywhere to `spec`
|
||||||
|
- Much more!
|
||||||
33
vendor/github.com/onsi/ginkgo/CONTRIBUTING.md
generated
vendored
Normal file
33
vendor/github.com/onsi/ginkgo/CONTRIBUTING.md
generated
vendored
Normal file
|
|
@ -0,0 +1,33 @@
|
||||||
|
# Contributing to Ginkgo
|
||||||
|
|
||||||
|
Your contributions to Ginkgo are essential for its long-term maintenance and improvement.
|
||||||
|
|
||||||
|
- Please **open an issue first** - describe what problem you are trying to solve and give the community a forum for input and feedback ahead of investing time in writing code!
|
||||||
|
- Ensure adequate test coverage:
|
||||||
|
- When adding to the Ginkgo library, add unit and/or integration tests (under the `integration` folder).
|
||||||
|
- When adding to the Ginkgo CLI, note that there are very few unit tests. Please add an integration test.
|
||||||
|
- Update the documentation. Ginko uses `godoc` comments and documentation on the `gh-pages` branch.
|
||||||
|
If relevant, please submit a docs PR to that branch alongside your code PR.
|
||||||
|
|
||||||
|
Thanks for supporting Ginkgo!
|
||||||
|
|
||||||
|
## Setup
|
||||||
|
|
||||||
|
Fork the repo, then:
|
||||||
|
|
||||||
|
```
|
||||||
|
go get github.com/onsi/ginkgo
|
||||||
|
go get github.com/onsi/gomega/...
|
||||||
|
cd $GOPATH/src/github.com/onsi/ginkgo
|
||||||
|
git remote add fork git@github.com:<NAME>/ginkgo.git
|
||||||
|
|
||||||
|
ginkgo -r -p # ensure tests are green
|
||||||
|
go vet ./... # ensure linter is happy
|
||||||
|
```
|
||||||
|
|
||||||
|
## Making the PR
|
||||||
|
- go to a new branch `git checkout -b my-feature`
|
||||||
|
- make your changes
|
||||||
|
- run tests and linter again (see above)
|
||||||
|
- `git push fork`
|
||||||
|
- open PR 🎉
|
||||||
20
vendor/github.com/onsi/ginkgo/LICENSE
generated
vendored
Normal file
20
vendor/github.com/onsi/ginkgo/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,20 @@
|
||||||
|
Copyright (c) 2013-2014 Onsi Fakhouri
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
a copy of this software and associated documentation files (the
|
||||||
|
"Software"), to deal in the Software without restriction, including
|
||||||
|
without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be
|
||||||
|
included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
|
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
121
vendor/github.com/onsi/ginkgo/README.md
generated
vendored
Normal file
121
vendor/github.com/onsi/ginkgo/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,121 @@
|
||||||
|

|
||||||
|
|
||||||
|
[](https://travis-ci.org/onsi/ginkgo)
|
||||||
|
|
||||||
|
Jump to the [docs](http://onsi.github.io/ginkgo/) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)!
|
||||||
|
|
||||||
|
If you have a question, comment, bug report, feature request, etc. please open a GitHub issue.
|
||||||
|
|
||||||
|
## Feature List
|
||||||
|
|
||||||
|
- Ginkgo uses Go's `testing` package and can live alongside your existing `testing` tests. It's easy to [bootstrap](http://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first tests](http://onsi.github.io/ginkgo/#adding-specs-to-a-suite)
|
||||||
|
|
||||||
|
- Structure your BDD-style tests expressively:
|
||||||
|
- Nestable [`Describe`, `Context` and `When` container blocks](http://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context)
|
||||||
|
- [`BeforeEach` and `AfterEach` blocks](http://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and teardown
|
||||||
|
- [`It` and `Specify` blocks](http://onsi.github.io/ginkgo/#individual-specs-) that hold your assertions
|
||||||
|
- [`JustBeforeEach` blocks](http://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach) that separate creation from configuration (also known as the subject action pattern).
|
||||||
|
- [`BeforeSuite` and `AfterSuite` blocks](http://onsi.github.io/ginkgo/#global-setup-and-teardown-beforesuite-and-aftersuite) to prep for and cleanup after a suite.
|
||||||
|
|
||||||
|
- A comprehensive test runner that lets you:
|
||||||
|
- Mark specs as [pending](http://onsi.github.io/ginkgo/#pending-specs)
|
||||||
|
- [Focus](http://onsi.github.io/ginkgo/#focused-specs) individual specs, and groups of specs, either programmatically or on the command line
|
||||||
|
- Run your tests in [random order](http://onsi.github.io/ginkgo/#spec-permutation), and then reuse random seeds to replicate the same order.
|
||||||
|
- Break up your test suite into parallel processes for straightforward [test parallelization](http://onsi.github.io/ginkgo/#parallel-specs)
|
||||||
|
|
||||||
|
- `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](http://onsi.github.io/ginkgo/#running-tests) and [generating](http://onsi.github.io/ginkgo/#generators) test files. Here are a few choice examples:
|
||||||
|
- `ginkgo -nodes=N` runs your tests in `N` parallel processes and print out coherent output in realtime
|
||||||
|
- `ginkgo -cover` runs your tests using Go's code coverage tool
|
||||||
|
- `ginkgo convert` converts an XUnit-style `testing` package to a Ginkgo-style package
|
||||||
|
- `ginkgo -focus="REGEXP"` and `ginkgo -skip="REGEXP"` allow you to specify a subset of tests to run via regular expression
|
||||||
|
- `ginkgo -r` runs all tests suites under the current directory
|
||||||
|
- `ginkgo -v` prints out identifying information for each tests just before it runs
|
||||||
|
|
||||||
|
And much more: run `ginkgo help` for details!
|
||||||
|
|
||||||
|
The `ginkgo` CLI is convenient, but purely optional -- Ginkgo works just fine with `go test`
|
||||||
|
|
||||||
|
- `ginkgo watch` [watches](https://onsi.github.io/ginkgo/#watching-for-changes) packages *and their dependencies* for changes, then reruns tests. Run tests immediately as you develop!
|
||||||
|
|
||||||
|
- Built-in support for testing [asynchronicity](http://onsi.github.io/ginkgo/#asynchronous-tests)
|
||||||
|
|
||||||
|
- Built-in support for [benchmarking](http://onsi.github.io/ginkgo/#benchmark-tests) your code. Control the number of benchmark samples as you gather runtimes and other, arbitrary, bits of numerical information about your code.
|
||||||
|
|
||||||
|
- [Completions for Sublime Text](https://github.com/onsi/ginkgo-sublime-completions): just use [Package Control](https://sublime.wbond.net/) to install `Ginkgo Completions`.
|
||||||
|
|
||||||
|
- [Completions for VSCode](https://github.com/onsi/vscode-ginkgo): just use VSCode's extension installer to install `vscode-ginkgo`.
|
||||||
|
|
||||||
|
- Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify). Check out the [docs](http://onsi.github.io/ginkgo/#third-party-integrations) for details.
|
||||||
|
|
||||||
|
- A modular architecture that lets you easily:
|
||||||
|
- Write [custom reporters](http://onsi.github.io/ginkgo/#writing-custom-reporters) (for example, Ginkgo comes with a [JUnit XML reporter](http://onsi.github.io/ginkgo/#generating-junit-xml-output) and a TeamCity reporter).
|
||||||
|
- [Adapt an existing matcher library (or write your own!)](http://onsi.github.io/ginkgo/#using-other-matcher-libraries) to work with Ginkgo
|
||||||
|
|
||||||
|
## [Gomega](http://github.com/onsi/gomega): Ginkgo's Preferred Matcher Library
|
||||||
|
|
||||||
|
Ginkgo is best paired with Gomega. Learn more about Gomega [here](http://onsi.github.io/gomega/)
|
||||||
|
|
||||||
|
## [Agouti](http://github.com/sclevine/agouti): A Go Acceptance Testing Framework
|
||||||
|
|
||||||
|
Agouti allows you run WebDriver integration tests. Learn more about Agouti [here](http://agouti.org)
|
||||||
|
|
||||||
|
## Set Me Up!
|
||||||
|
|
||||||
|
You'll need the Go command-line tools. Ginkgo is tested with Go 1.6+, but preferably you should get the latest. Follow the [installation instructions](https://golang.org/doc/install) if you don't have it installed.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
|
||||||
|
go get -u github.com/onsi/ginkgo/ginkgo # installs the ginkgo CLI
|
||||||
|
go get -u github.com/onsi/gomega/... # fetches the matcher library
|
||||||
|
|
||||||
|
cd path/to/package/you/want/to/test
|
||||||
|
|
||||||
|
ginkgo bootstrap # set up a new ginkgo suite
|
||||||
|
ginkgo generate # will create a sample test file. edit this file and add your tests then...
|
||||||
|
|
||||||
|
go test # to run your tests
|
||||||
|
|
||||||
|
ginkgo # also runs your tests
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
## I'm new to Go: What are my testing options?
|
||||||
|
|
||||||
|
Of course, I heartily recommend [Ginkgo](https://github.com/onsi/ginkgo) and [Gomega](https://github.com/onsi/gomega). Both packages are seeing heavy, daily, production use on a number of projects and boast a mature and comprehensive feature-set.
|
||||||
|
|
||||||
|
With that said, it's great to know what your options are :)
|
||||||
|
|
||||||
|
### What Go gives you out of the box
|
||||||
|
|
||||||
|
Testing is a first class citizen in Go, however Go's built-in testing primitives are somewhat limited: The [testing](http://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library.
|
||||||
|
|
||||||
|
### Matcher libraries for Go's XUnit style tests
|
||||||
|
|
||||||
|
A number of matcher libraries have been written to augment Go's built-in XUnit style tests. Here are two that have gained traction:
|
||||||
|
|
||||||
|
- [testify](https://github.com/stretchr/testify)
|
||||||
|
- [gocheck](http://labix.org/gocheck)
|
||||||
|
|
||||||
|
You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomega) in [XUnit style tests](http://onsi.github.io/gomega/#using-gomega-with-golangs-xunitstyle-tests)
|
||||||
|
|
||||||
|
### BDD style testing frameworks
|
||||||
|
|
||||||
|
There are a handful of BDD-style testing frameworks written for Go. Here are a few:
|
||||||
|
|
||||||
|
- [Ginkgo](https://github.com/onsi/ginkgo) ;)
|
||||||
|
- [GoConvey](https://github.com/smartystreets/goconvey)
|
||||||
|
- [Goblin](https://github.com/franela/goblin)
|
||||||
|
- [Mao](https://github.com/azer/mao)
|
||||||
|
- [Zen](https://github.com/pranavraja/zen)
|
||||||
|
|
||||||
|
Finally, @shageman has [put together](https://github.com/shageman/gotestit) a comprehensive comparison of Go testing libraries.
|
||||||
|
|
||||||
|
Go explore!
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
Ginkgo is MIT-Licensed
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
See [CONTRIBUTING.md](CONTRIBUTING.md)
|
||||||
14
vendor/github.com/onsi/ginkgo/RELEASING.md
generated
vendored
Normal file
14
vendor/github.com/onsi/ginkgo/RELEASING.md
generated
vendored
Normal file
|
|
@ -0,0 +1,14 @@
|
||||||
|
A Ginkgo release is a tagged git sha and a GitHub release. To cut a release:
|
||||||
|
|
||||||
|
1. Ensure CHANGELOG.md is up to date.
|
||||||
|
- Use `git log --pretty=format:'- %s [%h]' HEAD...vX.X.X` to list all the commits since the last release
|
||||||
|
- Categorize the changes into
|
||||||
|
- Breaking Changes (requires a major version)
|
||||||
|
- New Features (minor version)
|
||||||
|
- Fixes (fix version)
|
||||||
|
- Maintenance (which in general should not be mentioned in `CHANGELOG.md` as they have no user impact)
|
||||||
|
1. Update `VERSION` in `config/config.go`
|
||||||
|
1. Create a commit with the version number as the commit message (e.g. `v1.3.0`)
|
||||||
|
1. Tag the commit with the version number as the tag name (e.g. `v1.3.0`)
|
||||||
|
1. Push the commit and tag to GitHub
|
||||||
|
1. Create a new [GitHub release](https://help.github.com/articles/creating-releases/) with the version number as the tag (e.g. `v1.3.0`). List the key changes in the release notes.
|
||||||
213
vendor/github.com/onsi/ginkgo/config/config.go
generated
vendored
Normal file
213
vendor/github.com/onsi/ginkgo/config/config.go
generated
vendored
Normal file
|
|
@ -0,0 +1,213 @@
|
||||||
|
/*
|
||||||
|
Ginkgo accepts a number of configuration options.
|
||||||
|
|
||||||
|
These are documented [here](http://onsi.github.io/ginkgo/#the_ginkgo_cli)
|
||||||
|
|
||||||
|
You can also learn more via
|
||||||
|
|
||||||
|
ginkgo help
|
||||||
|
|
||||||
|
or (I kid you not):
|
||||||
|
|
||||||
|
go test -asdf
|
||||||
|
*/
|
||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
const VERSION = "1.10.3"
|
||||||
|
|
||||||
|
type GinkgoConfigType struct {
|
||||||
|
RandomSeed int64
|
||||||
|
RandomizeAllSpecs bool
|
||||||
|
RegexScansFilePath bool
|
||||||
|
FocusString string
|
||||||
|
SkipString string
|
||||||
|
SkipMeasurements bool
|
||||||
|
FailOnPending bool
|
||||||
|
FailFast bool
|
||||||
|
FlakeAttempts int
|
||||||
|
EmitSpecProgress bool
|
||||||
|
DryRun bool
|
||||||
|
DebugParallel bool
|
||||||
|
|
||||||
|
ParallelNode int
|
||||||
|
ParallelTotal int
|
||||||
|
SyncHost string
|
||||||
|
StreamHost string
|
||||||
|
}
|
||||||
|
|
||||||
|
var GinkgoConfig = GinkgoConfigType{}
|
||||||
|
|
||||||
|
type DefaultReporterConfigType struct {
|
||||||
|
NoColor bool
|
||||||
|
SlowSpecThreshold float64
|
||||||
|
NoisyPendings bool
|
||||||
|
NoisySkippings bool
|
||||||
|
Succinct bool
|
||||||
|
Verbose bool
|
||||||
|
FullTrace bool
|
||||||
|
ReportPassed bool
|
||||||
|
ReportFile string
|
||||||
|
}
|
||||||
|
|
||||||
|
var DefaultReporterConfig = DefaultReporterConfigType{}
|
||||||
|
|
||||||
|
func processPrefix(prefix string) string {
|
||||||
|
if prefix != "" {
|
||||||
|
prefix += "."
|
||||||
|
}
|
||||||
|
return prefix
|
||||||
|
}
|
||||||
|
|
||||||
|
func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) {
|
||||||
|
prefix = processPrefix(prefix)
|
||||||
|
flagSet.Int64Var(&(GinkgoConfig.RandomSeed), prefix+"seed", time.Now().Unix(), "The seed used to randomize the spec suite.")
|
||||||
|
flagSet.BoolVar(&(GinkgoConfig.RandomizeAllSpecs), prefix+"randomizeAllSpecs", false, "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When groups.")
|
||||||
|
flagSet.BoolVar(&(GinkgoConfig.SkipMeasurements), prefix+"skipMeasurements", false, "If set, ginkgo will skip any measurement specs.")
|
||||||
|
flagSet.BoolVar(&(GinkgoConfig.FailOnPending), prefix+"failOnPending", false, "If set, ginkgo will mark the test suite as failed if any specs are pending.")
|
||||||
|
flagSet.BoolVar(&(GinkgoConfig.FailFast), prefix+"failFast", false, "If set, ginkgo will stop running a test suite after a failure occurs.")
|
||||||
|
|
||||||
|
flagSet.BoolVar(&(GinkgoConfig.DryRun), prefix+"dryRun", false, "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v.")
|
||||||
|
|
||||||
|
flagSet.StringVar(&(GinkgoConfig.FocusString), prefix+"focus", "", "If set, ginkgo will only run specs that match this regular expression.")
|
||||||
|
flagSet.StringVar(&(GinkgoConfig.SkipString), prefix+"skip", "", "If set, ginkgo will only run specs that do not match this regular expression.")
|
||||||
|
|
||||||
|
flagSet.BoolVar(&(GinkgoConfig.RegexScansFilePath), prefix+"regexScansFilePath", false, "If set, ginkgo regex matching also will look at the file path (code location).")
|
||||||
|
|
||||||
|
flagSet.IntVar(&(GinkgoConfig.FlakeAttempts), prefix+"flakeAttempts", 1, "Make up to this many attempts to run each spec. Please note that if any of the attempts succeed, the suite will not be failed. But any failures will still be recorded.")
|
||||||
|
|
||||||
|
flagSet.BoolVar(&(GinkgoConfig.EmitSpecProgress), prefix+"progress", false, "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter.")
|
||||||
|
|
||||||
|
flagSet.BoolVar(&(GinkgoConfig.DebugParallel), prefix+"debug", false, "If set, ginkgo will emit node output to files when running in parallel.")
|
||||||
|
|
||||||
|
if includeParallelFlags {
|
||||||
|
flagSet.IntVar(&(GinkgoConfig.ParallelNode), prefix+"parallel.node", 1, "This worker node's (one-indexed) node number. For running specs in parallel.")
|
||||||
|
flagSet.IntVar(&(GinkgoConfig.ParallelTotal), prefix+"parallel.total", 1, "The total number of worker nodes. For running specs in parallel.")
|
||||||
|
flagSet.StringVar(&(GinkgoConfig.SyncHost), prefix+"parallel.synchost", "", "The address for the server that will synchronize the running nodes.")
|
||||||
|
flagSet.StringVar(&(GinkgoConfig.StreamHost), prefix+"parallel.streamhost", "", "The address for the server that the running nodes should stream data to.")
|
||||||
|
}
|
||||||
|
|
||||||
|
flagSet.BoolVar(&(DefaultReporterConfig.NoColor), prefix+"noColor", false, "If set, suppress color output in default reporter.")
|
||||||
|
flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter.")
|
||||||
|
flagSet.BoolVar(&(DefaultReporterConfig.NoisyPendings), prefix+"noisyPendings", true, "If set, default reporter will shout about pending tests.")
|
||||||
|
flagSet.BoolVar(&(DefaultReporterConfig.NoisySkippings), prefix+"noisySkippings", true, "If set, default reporter will shout about skipping tests.")
|
||||||
|
flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.")
|
||||||
|
flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report")
|
||||||
|
flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs")
|
||||||
|
flagSet.BoolVar(&(DefaultReporterConfig.ReportPassed), prefix+"reportPassed", false, "If set, default reporter prints out captured output of passed tests.")
|
||||||
|
flagSet.StringVar(&(DefaultReporterConfig.ReportFile), prefix+"reportFile", "", "Override the default reporter output file path.")
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultReporterConfigType) []string {
|
||||||
|
prefix = processPrefix(prefix)
|
||||||
|
result := make([]string, 0)
|
||||||
|
|
||||||
|
if ginkgo.RandomSeed > 0 {
|
||||||
|
result = append(result, fmt.Sprintf("--%sseed=%d", prefix, ginkgo.RandomSeed))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ginkgo.RandomizeAllSpecs {
|
||||||
|
result = append(result, fmt.Sprintf("--%srandomizeAllSpecs", prefix))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ginkgo.SkipMeasurements {
|
||||||
|
result = append(result, fmt.Sprintf("--%sskipMeasurements", prefix))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ginkgo.FailOnPending {
|
||||||
|
result = append(result, fmt.Sprintf("--%sfailOnPending", prefix))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ginkgo.FailFast {
|
||||||
|
result = append(result, fmt.Sprintf("--%sfailFast", prefix))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ginkgo.DryRun {
|
||||||
|
result = append(result, fmt.Sprintf("--%sdryRun", prefix))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ginkgo.FocusString != "" {
|
||||||
|
result = append(result, fmt.Sprintf("--%sfocus=%s", prefix, ginkgo.FocusString))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ginkgo.SkipString != "" {
|
||||||
|
result = append(result, fmt.Sprintf("--%sskip=%s", prefix, ginkgo.SkipString))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ginkgo.FlakeAttempts > 1 {
|
||||||
|
result = append(result, fmt.Sprintf("--%sflakeAttempts=%d", prefix, ginkgo.FlakeAttempts))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ginkgo.EmitSpecProgress {
|
||||||
|
result = append(result, fmt.Sprintf("--%sprogress", prefix))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ginkgo.DebugParallel {
|
||||||
|
result = append(result, fmt.Sprintf("--%sdebug", prefix))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ginkgo.ParallelNode != 0 {
|
||||||
|
result = append(result, fmt.Sprintf("--%sparallel.node=%d", prefix, ginkgo.ParallelNode))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ginkgo.ParallelTotal != 0 {
|
||||||
|
result = append(result, fmt.Sprintf("--%sparallel.total=%d", prefix, ginkgo.ParallelTotal))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ginkgo.StreamHost != "" {
|
||||||
|
result = append(result, fmt.Sprintf("--%sparallel.streamhost=%s", prefix, ginkgo.StreamHost))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ginkgo.SyncHost != "" {
|
||||||
|
result = append(result, fmt.Sprintf("--%sparallel.synchost=%s", prefix, ginkgo.SyncHost))
|
||||||
|
}
|
||||||
|
|
||||||
|
if ginkgo.RegexScansFilePath {
|
||||||
|
result = append(result, fmt.Sprintf("--%sregexScansFilePath", prefix))
|
||||||
|
}
|
||||||
|
|
||||||
|
if reporter.NoColor {
|
||||||
|
result = append(result, fmt.Sprintf("--%snoColor", prefix))
|
||||||
|
}
|
||||||
|
|
||||||
|
if reporter.SlowSpecThreshold > 0 {
|
||||||
|
result = append(result, fmt.Sprintf("--%sslowSpecThreshold=%.5f", prefix, reporter.SlowSpecThreshold))
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reporter.NoisyPendings {
|
||||||
|
result = append(result, fmt.Sprintf("--%snoisyPendings=false", prefix))
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reporter.NoisySkippings {
|
||||||
|
result = append(result, fmt.Sprintf("--%snoisySkippings=false", prefix))
|
||||||
|
}
|
||||||
|
|
||||||
|
if reporter.Verbose {
|
||||||
|
result = append(result, fmt.Sprintf("--%sv", prefix))
|
||||||
|
}
|
||||||
|
|
||||||
|
if reporter.Succinct {
|
||||||
|
result = append(result, fmt.Sprintf("--%ssuccinct", prefix))
|
||||||
|
}
|
||||||
|
|
||||||
|
if reporter.FullTrace {
|
||||||
|
result = append(result, fmt.Sprintf("--%strace", prefix))
|
||||||
|
}
|
||||||
|
|
||||||
|
if reporter.ReportPassed {
|
||||||
|
result = append(result, fmt.Sprintf("--%sreportPassed", prefix))
|
||||||
|
}
|
||||||
|
|
||||||
|
if reporter.ReportFile != "" {
|
||||||
|
result = append(result, fmt.Sprintf("--%sreportFile=%s", prefix, reporter.ReportFile))
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
624
vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
generated
vendored
Normal file
624
vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
generated
vendored
Normal file
|
|
@ -0,0 +1,624 @@
|
||||||
|
/*
|
||||||
|
Ginkgo is a BDD-style testing framework for Golang
|
||||||
|
|
||||||
|
The godoc documentation describes Ginkgo's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/ginkgo/
|
||||||
|
|
||||||
|
Ginkgo's preferred matcher library is [Gomega](http://github.com/onsi/gomega)
|
||||||
|
|
||||||
|
Ginkgo on Github: http://github.com/onsi/ginkgo
|
||||||
|
|
||||||
|
Ginkgo is MIT-Licensed
|
||||||
|
*/
|
||||||
|
package ginkgo
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/config"
|
||||||
|
"github.com/onsi/ginkgo/internal/codelocation"
|
||||||
|
"github.com/onsi/ginkgo/internal/failer"
|
||||||
|
"github.com/onsi/ginkgo/internal/remote"
|
||||||
|
"github.com/onsi/ginkgo/internal/suite"
|
||||||
|
"github.com/onsi/ginkgo/internal/testingtproxy"
|
||||||
|
"github.com/onsi/ginkgo/internal/writer"
|
||||||
|
"github.com/onsi/ginkgo/reporters"
|
||||||
|
"github.com/onsi/ginkgo/reporters/stenographer"
|
||||||
|
colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
const GINKGO_VERSION = config.VERSION
|
||||||
|
const GINKGO_PANIC = `
|
||||||
|
Your test failed.
|
||||||
|
Ginkgo panics to prevent subsequent assertions from running.
|
||||||
|
Normally Ginkgo rescues this panic so you shouldn't see it.
|
||||||
|
|
||||||
|
But, if you make an assertion in a goroutine, Ginkgo can't capture the panic.
|
||||||
|
To circumvent this, you should call
|
||||||
|
|
||||||
|
defer GinkgoRecover()
|
||||||
|
|
||||||
|
at the top of the goroutine that caused this panic.
|
||||||
|
`
|
||||||
|
const defaultTimeout = 1
|
||||||
|
|
||||||
|
var globalSuite *suite.Suite
|
||||||
|
var globalFailer *failer.Failer
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
config.Flags(flag.CommandLine, "ginkgo", true)
|
||||||
|
GinkgoWriter = writer.New(os.Stdout)
|
||||||
|
globalFailer = failer.New()
|
||||||
|
globalSuite = suite.New(globalFailer)
|
||||||
|
}
|
||||||
|
|
||||||
|
//GinkgoWriter implements an io.Writer
|
||||||
|
//When running in verbose mode any writes to GinkgoWriter will be immediately printed
|
||||||
|
//to stdout. Otherwise, GinkgoWriter will buffer any writes produced during the current test and flush them to screen
|
||||||
|
//only if the current test fails.
|
||||||
|
var GinkgoWriter io.Writer
|
||||||
|
|
||||||
|
//The interface by which Ginkgo receives *testing.T
|
||||||
|
type GinkgoTestingT interface {
|
||||||
|
Fail()
|
||||||
|
}
|
||||||
|
|
||||||
|
//GinkgoRandomSeed returns the seed used to randomize spec execution order. It is
|
||||||
|
//useful for seeding your own pseudorandom number generators (PRNGs) to ensure
|
||||||
|
//consistent executions from run to run, where your tests contain variability (for
|
||||||
|
//example, when selecting random test data).
|
||||||
|
func GinkgoRandomSeed() int64 {
|
||||||
|
return config.GinkgoConfig.RandomSeed
|
||||||
|
}
|
||||||
|
|
||||||
|
//GinkgoParallelNode returns the parallel node number for the current ginkgo process
|
||||||
|
//The node number is 1-indexed
|
||||||
|
func GinkgoParallelNode() int {
|
||||||
|
return config.GinkgoConfig.ParallelNode
|
||||||
|
}
|
||||||
|
|
||||||
|
//Some matcher libraries or legacy codebases require a *testing.T
|
||||||
|
//GinkgoT implements an interface analogous to *testing.T and can be used if
|
||||||
|
//the library in question accepts *testing.T through an interface
|
||||||
|
//
|
||||||
|
// For example, with testify:
|
||||||
|
// assert.Equal(GinkgoT(), 123, 123, "they should be equal")
|
||||||
|
//
|
||||||
|
// Or with gomock:
|
||||||
|
// gomock.NewController(GinkgoT())
|
||||||
|
//
|
||||||
|
// GinkgoT() takes an optional offset argument that can be used to get the
|
||||||
|
// correct line number associated with the failure.
|
||||||
|
func GinkgoT(optionalOffset ...int) GinkgoTInterface {
|
||||||
|
offset := 3
|
||||||
|
if len(optionalOffset) > 0 {
|
||||||
|
offset = optionalOffset[0]
|
||||||
|
}
|
||||||
|
return testingtproxy.New(GinkgoWriter, Fail, offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
//The interface returned by GinkgoT(). This covers most of the methods
|
||||||
|
//in the testing package's T.
|
||||||
|
type GinkgoTInterface interface {
|
||||||
|
Fail()
|
||||||
|
Error(args ...interface{})
|
||||||
|
Errorf(format string, args ...interface{})
|
||||||
|
FailNow()
|
||||||
|
Fatal(args ...interface{})
|
||||||
|
Fatalf(format string, args ...interface{})
|
||||||
|
Log(args ...interface{})
|
||||||
|
Logf(format string, args ...interface{})
|
||||||
|
Failed() bool
|
||||||
|
Parallel()
|
||||||
|
Skip(args ...interface{})
|
||||||
|
Skipf(format string, args ...interface{})
|
||||||
|
SkipNow()
|
||||||
|
Skipped() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
//Custom Ginkgo test reporters must implement the Reporter interface.
|
||||||
|
//
|
||||||
|
//The custom reporter is passed in a SuiteSummary when the suite begins and ends,
|
||||||
|
//and a SpecSummary just before a spec begins and just after a spec ends
|
||||||
|
type Reporter reporters.Reporter
|
||||||
|
|
||||||
|
//Asynchronous specs are given a channel of the Done type. You must close or write to the channel
|
||||||
|
//to tell Ginkgo that your async test is done.
|
||||||
|
type Done chan<- interface{}
|
||||||
|
|
||||||
|
//GinkgoTestDescription represents the information about the current running test returned by CurrentGinkgoTestDescription
|
||||||
|
// FullTestText: a concatenation of ComponentTexts and the TestText
|
||||||
|
// ComponentTexts: a list of all texts for the Describes & Contexts leading up to the current test
|
||||||
|
// TestText: the text in the actual It or Measure node
|
||||||
|
// IsMeasurement: true if the current test is a measurement
|
||||||
|
// FileName: the name of the file containing the current test
|
||||||
|
// LineNumber: the line number for the current test
|
||||||
|
// Failed: if the current test has failed, this will be true (useful in an AfterEach)
|
||||||
|
type GinkgoTestDescription struct {
|
||||||
|
FullTestText string
|
||||||
|
ComponentTexts []string
|
||||||
|
TestText string
|
||||||
|
|
||||||
|
IsMeasurement bool
|
||||||
|
|
||||||
|
FileName string
|
||||||
|
LineNumber int
|
||||||
|
|
||||||
|
Failed bool
|
||||||
|
Duration time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
//CurrentGinkgoTestDescripton returns information about the current running test.
|
||||||
|
func CurrentGinkgoTestDescription() GinkgoTestDescription {
|
||||||
|
summary, ok := globalSuite.CurrentRunningSpecSummary()
|
||||||
|
if !ok {
|
||||||
|
return GinkgoTestDescription{}
|
||||||
|
}
|
||||||
|
|
||||||
|
subjectCodeLocation := summary.ComponentCodeLocations[len(summary.ComponentCodeLocations)-1]
|
||||||
|
|
||||||
|
return GinkgoTestDescription{
|
||||||
|
ComponentTexts: summary.ComponentTexts[1:],
|
||||||
|
FullTestText: strings.Join(summary.ComponentTexts[1:], " "),
|
||||||
|
TestText: summary.ComponentTexts[len(summary.ComponentTexts)-1],
|
||||||
|
IsMeasurement: summary.IsMeasurement,
|
||||||
|
FileName: subjectCodeLocation.FileName,
|
||||||
|
LineNumber: subjectCodeLocation.LineNumber,
|
||||||
|
Failed: summary.HasFailureState(),
|
||||||
|
Duration: summary.RunTime,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//Measurement tests receive a Benchmarker.
|
||||||
|
//
|
||||||
|
//You use the Time() function to time how long the passed in body function takes to run
|
||||||
|
//You use the RecordValue() function to track arbitrary numerical measurements.
|
||||||
|
//The RecordValueWithPrecision() function can be used alternatively to provide the unit
|
||||||
|
//and resolution of the numeric measurement.
|
||||||
|
//The optional info argument is passed to the test reporter and can be used to
|
||||||
|
// provide the measurement data to a custom reporter with context.
|
||||||
|
//
|
||||||
|
//See http://onsi.github.io/ginkgo/#benchmark_tests for more details
|
||||||
|
type Benchmarker interface {
|
||||||
|
Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration)
|
||||||
|
RecordValue(name string, value float64, info ...interface{})
|
||||||
|
RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
//RunSpecs is the entry point for the Ginkgo test runner.
|
||||||
|
//You must call this within a Golang testing TestX(t *testing.T) function.
|
||||||
|
//
|
||||||
|
//To bootstrap a test suite you can use the Ginkgo CLI:
|
||||||
|
//
|
||||||
|
// ginkgo bootstrap
|
||||||
|
func RunSpecs(t GinkgoTestingT, description string) bool {
|
||||||
|
specReporters := []Reporter{buildDefaultReporter()}
|
||||||
|
if config.DefaultReporterConfig.ReportFile != "" {
|
||||||
|
reportFile := config.DefaultReporterConfig.ReportFile
|
||||||
|
specReporters[0] = reporters.NewJUnitReporter(reportFile)
|
||||||
|
return RunSpecsWithDefaultAndCustomReporters(t, description, specReporters)
|
||||||
|
}
|
||||||
|
return RunSpecsWithCustomReporters(t, description, specReporters)
|
||||||
|
}
|
||||||
|
|
||||||
|
//To run your tests with Ginkgo's default reporter and your custom reporter(s), replace
|
||||||
|
//RunSpecs() with this method.
|
||||||
|
func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool {
|
||||||
|
specReporters = append(specReporters, buildDefaultReporter())
|
||||||
|
return RunSpecsWithCustomReporters(t, description, specReporters)
|
||||||
|
}
|
||||||
|
|
||||||
|
//To run your tests with your custom reporter(s) (and *not* Ginkgo's default reporter), replace
|
||||||
|
//RunSpecs() with this method. Note that parallel tests will not work correctly without the default reporter
|
||||||
|
func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool {
|
||||||
|
writer := GinkgoWriter.(*writer.Writer)
|
||||||
|
writer.SetStream(config.DefaultReporterConfig.Verbose)
|
||||||
|
reporters := make([]reporters.Reporter, len(specReporters))
|
||||||
|
for i, reporter := range specReporters {
|
||||||
|
reporters[i] = reporter
|
||||||
|
}
|
||||||
|
passed, hasFocusedTests := globalSuite.Run(t, description, reporters, writer, config.GinkgoConfig)
|
||||||
|
if passed && hasFocusedTests && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" {
|
||||||
|
fmt.Println("PASS | FOCUSED")
|
||||||
|
os.Exit(types.GINKGO_FOCUS_EXIT_CODE)
|
||||||
|
}
|
||||||
|
return passed
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildDefaultReporter() Reporter {
|
||||||
|
remoteReportingServer := config.GinkgoConfig.StreamHost
|
||||||
|
if remoteReportingServer == "" {
|
||||||
|
stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor, config.GinkgoConfig.FlakeAttempts > 1, colorable.NewColorableStdout())
|
||||||
|
return reporters.NewDefaultReporter(config.DefaultReporterConfig, stenographer)
|
||||||
|
} else {
|
||||||
|
debugFile := ""
|
||||||
|
if config.GinkgoConfig.DebugParallel {
|
||||||
|
debugFile = fmt.Sprintf("ginkgo-node-%d.log", config.GinkgoConfig.ParallelNode)
|
||||||
|
}
|
||||||
|
return remote.NewForwardingReporter(config.DefaultReporterConfig, remoteReportingServer, &http.Client{}, remote.NewOutputInterceptor(), GinkgoWriter.(*writer.Writer), debugFile)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//Skip notifies Ginkgo that the current spec was skipped.
|
||||||
|
func Skip(message string, callerSkip ...int) {
|
||||||
|
skip := 0
|
||||||
|
if len(callerSkip) > 0 {
|
||||||
|
skip = callerSkip[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
globalFailer.Skip(message, codelocation.New(skip+1))
|
||||||
|
panic(GINKGO_PANIC)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Fail notifies Ginkgo that the current spec has failed. (Gomega will call Fail for you automatically when an assertion fails.)
|
||||||
|
func Fail(message string, callerSkip ...int) {
|
||||||
|
skip := 0
|
||||||
|
if len(callerSkip) > 0 {
|
||||||
|
skip = callerSkip[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
globalFailer.Fail(message, codelocation.New(skip+1))
|
||||||
|
panic(GINKGO_PANIC)
|
||||||
|
}
|
||||||
|
|
||||||
|
//GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail`
|
||||||
|
//Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that
|
||||||
|
//calls out to Gomega
|
||||||
|
//
|
||||||
|
//Here's why: Ginkgo's `Fail` method records the failure and then panics to prevent
|
||||||
|
//further assertions from running. This panic must be recovered. Ginkgo does this for you
|
||||||
|
//if the panic originates in a Ginkgo node (an It, BeforeEach, etc...)
|
||||||
|
//
|
||||||
|
//Unfortunately, if a panic originates on a goroutine *launched* from one of these nodes there's no
|
||||||
|
//way for Ginkgo to rescue the panic. To do this, you must remember to `defer GinkgoRecover()` at the top of such a goroutine.
|
||||||
|
func GinkgoRecover() {
|
||||||
|
e := recover()
|
||||||
|
if e != nil {
|
||||||
|
globalFailer.Panic(codelocation.New(1), e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//Describe blocks allow you to organize your specs. A Describe block can contain any number of
|
||||||
|
//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
|
||||||
|
//
|
||||||
|
//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally
|
||||||
|
//equivalent. The difference is purely semantic -- you typically Describe the behavior of an object
|
||||||
|
//or method and, within that Describe, outline a number of Contexts and Whens.
|
||||||
|
func Describe(text string, body func()) bool {
|
||||||
|
globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//You can focus the tests within a describe block using FDescribe
|
||||||
|
func FDescribe(text string, body func()) bool {
|
||||||
|
globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//You can mark the tests within a describe block as pending using PDescribe
|
||||||
|
func PDescribe(text string, body func()) bool {
|
||||||
|
globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//You can mark the tests within a describe block as pending using XDescribe
|
||||||
|
func XDescribe(text string, body func()) bool {
|
||||||
|
globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//Context blocks allow you to organize your specs. A Context block can contain any number of
|
||||||
|
//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
|
||||||
|
//
|
||||||
|
//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally
|
||||||
|
//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object
|
||||||
|
//or method and, within that Describe, outline a number of Contexts and Whens.
|
||||||
|
func Context(text string, body func()) bool {
|
||||||
|
globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//You can focus the tests within a describe block using FContext
|
||||||
|
func FContext(text string, body func()) bool {
|
||||||
|
globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//You can mark the tests within a describe block as pending using PContext
|
||||||
|
func PContext(text string, body func()) bool {
|
||||||
|
globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//You can mark the tests within a describe block as pending using XContext
|
||||||
|
func XContext(text string, body func()) bool {
|
||||||
|
globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//When blocks allow you to organize your specs. A When block can contain any number of
|
||||||
|
//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
|
||||||
|
//
|
||||||
|
//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally
|
||||||
|
//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object
|
||||||
|
//or method and, within that Describe, outline a number of Contexts and Whens.
|
||||||
|
func When(text string, body func()) bool {
|
||||||
|
globalSuite.PushContainerNode("when "+text, body, types.FlagTypeNone, codelocation.New(1))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//You can focus the tests within a describe block using FWhen
|
||||||
|
func FWhen(text string, body func()) bool {
|
||||||
|
globalSuite.PushContainerNode("when "+text, body, types.FlagTypeFocused, codelocation.New(1))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//You can mark the tests within a describe block as pending using PWhen
|
||||||
|
func PWhen(text string, body func()) bool {
|
||||||
|
globalSuite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//You can mark the tests within a describe block as pending using XWhen
|
||||||
|
func XWhen(text string, body func()) bool {
|
||||||
|
globalSuite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//It blocks contain your test code and assertions. You cannot nest any other Ginkgo blocks
|
||||||
|
//within an It block.
|
||||||
|
//
|
||||||
|
//Ginkgo will normally run It blocks synchronously. To perform asynchronous tests, pass a
|
||||||
|
//function that accepts a Done channel. When you do this, you can also provide an optional timeout.
|
||||||
|
func It(text string, body interface{}, timeout ...float64) bool {
|
||||||
|
globalSuite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//You can focus individual Its using FIt
|
||||||
|
func FIt(text string, body interface{}, timeout ...float64) bool {
|
||||||
|
globalSuite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//You can mark Its as pending using PIt
|
||||||
|
func PIt(text string, _ ...interface{}) bool {
|
||||||
|
globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//You can mark Its as pending using XIt
|
||||||
|
func XIt(text string, _ ...interface{}) bool {
|
||||||
|
globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//Specify blocks are aliases for It blocks and allow for more natural wording in situations
|
||||||
|
//which "It" does not fit into a natural sentence flow. All the same protocols apply for Specify blocks
|
||||||
|
//which apply to It blocks.
|
||||||
|
func Specify(text string, body interface{}, timeout ...float64) bool {
|
||||||
|
globalSuite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//You can focus individual Specifys using FSpecify
|
||||||
|
func FSpecify(text string, body interface{}, timeout ...float64) bool {
|
||||||
|
globalSuite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//You can mark Specifys as pending using PSpecify
|
||||||
|
func PSpecify(text string, is ...interface{}) bool {
|
||||||
|
globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//You can mark Specifys as pending using XSpecify
|
||||||
|
func XSpecify(text string, is ...interface{}) bool {
|
||||||
|
globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//By allows you to better document large Its.
|
||||||
|
//
|
||||||
|
//Generally you should try to keep your Its short and to the point. This is not always possible, however,
|
||||||
|
//especially in the context of integration tests that capture a particular workflow.
|
||||||
|
//
|
||||||
|
//By allows you to document such flows. By must be called within a runnable node (It, BeforeEach, Measure, etc...)
|
||||||
|
//By will simply log the passed in text to the GinkgoWriter. If By is handed a function it will immediately run the function.
|
||||||
|
func By(text string, callbacks ...func()) {
|
||||||
|
preamble := "\x1b[1mSTEP\x1b[0m"
|
||||||
|
if config.DefaultReporterConfig.NoColor {
|
||||||
|
preamble = "STEP"
|
||||||
|
}
|
||||||
|
fmt.Fprintln(GinkgoWriter, preamble+": "+text)
|
||||||
|
if len(callbacks) == 1 {
|
||||||
|
callbacks[0]()
|
||||||
|
}
|
||||||
|
if len(callbacks) > 1 {
|
||||||
|
panic("just one callback per By, please")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//Measure blocks run the passed in body function repeatedly (determined by the samples argument)
|
||||||
|
//and accumulate metrics provided to the Benchmarker by the body function.
|
||||||
|
//
|
||||||
|
//The body function must have the signature:
|
||||||
|
// func(b Benchmarker)
|
||||||
|
func Measure(text string, body interface{}, samples int) bool {
|
||||||
|
globalSuite.PushMeasureNode(text, body, types.FlagTypeNone, codelocation.New(1), samples)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//You can focus individual Measures using FMeasure
|
||||||
|
func FMeasure(text string, body interface{}, samples int) bool {
|
||||||
|
globalSuite.PushMeasureNode(text, body, types.FlagTypeFocused, codelocation.New(1), samples)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//You can mark Measurements as pending using PMeasure
|
||||||
|
func PMeasure(text string, _ ...interface{}) bool {
|
||||||
|
globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//You can mark Measurements as pending using XMeasure
|
||||||
|
func XMeasure(text string, _ ...interface{}) bool {
|
||||||
|
globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//BeforeSuite blocks are run just once before any specs are run. When running in parallel, each
|
||||||
|
//parallel node process will call BeforeSuite.
|
||||||
|
//
|
||||||
|
//BeforeSuite blocks can be made asynchronous by providing a body function that accepts a Done channel
|
||||||
|
//
|
||||||
|
//You may only register *one* BeforeSuite handler per test suite. You typically do so in your bootstrap file at the top level.
|
||||||
|
func BeforeSuite(body interface{}, timeout ...float64) bool {
|
||||||
|
globalSuite.SetBeforeSuiteNode(body, codelocation.New(1), parseTimeout(timeout...))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//AfterSuite blocks are *always* run after all the specs regardless of whether specs have passed or failed.
|
||||||
|
//Moreover, if Ginkgo receives an interrupt signal (^C) it will attempt to run the AfterSuite before exiting.
|
||||||
|
//
|
||||||
|
//When running in parallel, each parallel node process will call AfterSuite.
|
||||||
|
//
|
||||||
|
//AfterSuite blocks can be made asynchronous by providing a body function that accepts a Done channel
|
||||||
|
//
|
||||||
|
//You may only register *one* AfterSuite handler per test suite. You typically do so in your bootstrap file at the top level.
|
||||||
|
func AfterSuite(body interface{}, timeout ...float64) bool {
|
||||||
|
globalSuite.SetAfterSuiteNode(body, codelocation.New(1), parseTimeout(timeout...))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//SynchronizedBeforeSuite blocks are primarily meant to solve the problem of setting up singleton external resources shared across
|
||||||
|
//nodes when running tests in parallel. For example, say you have a shared database that you can only start one instance of that
|
||||||
|
//must be used in your tests. When running in parallel, only one node should set up the database and all other nodes should wait
|
||||||
|
//until that node is done before running.
|
||||||
|
//
|
||||||
|
//SynchronizedBeforeSuite accomplishes this by taking *two* function arguments. The first is only run on parallel node #1. The second is
|
||||||
|
//run on all nodes, but *only* after the first function completes successfully. Ginkgo also makes it possible to send data from the first function (on Node 1)
|
||||||
|
//to the second function (on all the other nodes).
|
||||||
|
//
|
||||||
|
//The functions have the following signatures. The first function (which only runs on node 1) has the signature:
|
||||||
|
//
|
||||||
|
// func() []byte
|
||||||
|
//
|
||||||
|
//or, to run asynchronously:
|
||||||
|
//
|
||||||
|
// func(done Done) []byte
|
||||||
|
//
|
||||||
|
//The byte array returned by the first function is then passed to the second function, which has the signature:
|
||||||
|
//
|
||||||
|
// func(data []byte)
|
||||||
|
//
|
||||||
|
//or, to run asynchronously:
|
||||||
|
//
|
||||||
|
// func(data []byte, done Done)
|
||||||
|
//
|
||||||
|
//Here's a simple pseudo-code example that starts a shared database on Node 1 and shares the database's address with the other nodes:
|
||||||
|
//
|
||||||
|
// var dbClient db.Client
|
||||||
|
// var dbRunner db.Runner
|
||||||
|
//
|
||||||
|
// var _ = SynchronizedBeforeSuite(func() []byte {
|
||||||
|
// dbRunner = db.NewRunner()
|
||||||
|
// err := dbRunner.Start()
|
||||||
|
// Ω(err).ShouldNot(HaveOccurred())
|
||||||
|
// return []byte(dbRunner.URL)
|
||||||
|
// }, func(data []byte) {
|
||||||
|
// dbClient = db.NewClient()
|
||||||
|
// err := dbClient.Connect(string(data))
|
||||||
|
// Ω(err).ShouldNot(HaveOccurred())
|
||||||
|
// })
|
||||||
|
func SynchronizedBeforeSuite(node1Body interface{}, allNodesBody interface{}, timeout ...float64) bool {
|
||||||
|
globalSuite.SetSynchronizedBeforeSuiteNode(
|
||||||
|
node1Body,
|
||||||
|
allNodesBody,
|
||||||
|
codelocation.New(1),
|
||||||
|
parseTimeout(timeout...),
|
||||||
|
)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//SynchronizedAfterSuite blocks complement the SynchronizedBeforeSuite blocks in solving the problem of setting up
|
||||||
|
//external singleton resources shared across nodes when running tests in parallel.
|
||||||
|
//
|
||||||
|
//SynchronizedAfterSuite accomplishes this by taking *two* function arguments. The first runs on all nodes. The second runs only on parallel node #1
|
||||||
|
//and *only* after all other nodes have finished and exited. This ensures that node 1, and any resources it is running, remain alive until
|
||||||
|
//all other nodes are finished.
|
||||||
|
//
|
||||||
|
//Both functions have the same signature: either func() or func(done Done) to run asynchronously.
|
||||||
|
//
|
||||||
|
//Here's a pseudo-code example that complements that given in SynchronizedBeforeSuite. Here, SynchronizedAfterSuite is used to tear down the shared database
|
||||||
|
//only after all nodes have finished:
|
||||||
|
//
|
||||||
|
// var _ = SynchronizedAfterSuite(func() {
|
||||||
|
// dbClient.Cleanup()
|
||||||
|
// }, func() {
|
||||||
|
// dbRunner.Stop()
|
||||||
|
// })
|
||||||
|
func SynchronizedAfterSuite(allNodesBody interface{}, node1Body interface{}, timeout ...float64) bool {
|
||||||
|
globalSuite.SetSynchronizedAfterSuiteNode(
|
||||||
|
allNodesBody,
|
||||||
|
node1Body,
|
||||||
|
codelocation.New(1),
|
||||||
|
parseTimeout(timeout...),
|
||||||
|
)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//BeforeEach blocks are run before It blocks. When multiple BeforeEach blocks are defined in nested
|
||||||
|
//Describe and Context blocks the outermost BeforeEach blocks are run first.
|
||||||
|
//
|
||||||
|
//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts
|
||||||
|
//a Done channel
|
||||||
|
func BeforeEach(body interface{}, timeout ...float64) bool {
|
||||||
|
globalSuite.PushBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//JustBeforeEach blocks are run before It blocks but *after* all BeforeEach blocks. For more details,
|
||||||
|
//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_)
|
||||||
|
//
|
||||||
|
//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts
|
||||||
|
//a Done channel
|
||||||
|
func JustBeforeEach(body interface{}, timeout ...float64) bool {
|
||||||
|
globalSuite.PushJustBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//JustAfterEach blocks are run after It blocks but *before* all AfterEach blocks. For more details,
|
||||||
|
//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_)
|
||||||
|
//
|
||||||
|
//Like It blocks, JustAfterEach blocks can be made asynchronous by providing a body function that accepts
|
||||||
|
//a Done channel
|
||||||
|
func JustAfterEach(body interface{}, timeout ...float64) bool {
|
||||||
|
globalSuite.PushJustAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
//AfterEach blocks are run after It blocks. When multiple AfterEach blocks are defined in nested
|
||||||
|
//Describe and Context blocks the innermost AfterEach blocks are run first.
|
||||||
|
//
|
||||||
|
//Like It blocks, AfterEach blocks can be made asynchronous by providing a body function that accepts
|
||||||
|
//a Done channel
|
||||||
|
func AfterEach(body interface{}, timeout ...float64) bool {
|
||||||
|
globalSuite.PushAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseTimeout(timeout ...float64) time.Duration {
|
||||||
|
if len(timeout) == 0 {
|
||||||
|
return time.Duration(defaultTimeout * int64(time.Second))
|
||||||
|
} else {
|
||||||
|
return time.Duration(timeout[0] * float64(time.Second))
|
||||||
|
}
|
||||||
|
}
|
||||||
48
vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go
generated
vendored
Normal file
48
vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go
generated
vendored
Normal file
|
|
@ -0,0 +1,48 @@
|
||||||
|
package codelocation
|
||||||
|
|
||||||
|
import (
|
||||||
|
"regexp"
|
||||||
|
"runtime"
|
||||||
|
"runtime/debug"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
func New(skip int) types.CodeLocation {
|
||||||
|
_, file, line, _ := runtime.Caller(skip + 1)
|
||||||
|
stackTrace := PruneStack(string(debug.Stack()), skip+1)
|
||||||
|
return types.CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PruneStack removes references to functions that are internal to Ginkgo
|
||||||
|
// and the Go runtime from a stack string and a certain number of stack entries
|
||||||
|
// at the beginning of the stack. The stack string has the format
|
||||||
|
// as returned by runtime/debug.Stack. The leading goroutine information is
|
||||||
|
// optional and always removed if present. Beware that runtime/debug.Stack
|
||||||
|
// adds itself as first entry, so typically skip must be >= 1 to remove that
|
||||||
|
// entry.
|
||||||
|
func PruneStack(fullStackTrace string, skip int) string {
|
||||||
|
stack := strings.Split(fullStackTrace, "\n")
|
||||||
|
// Ensure that the even entries are the method names and the
|
||||||
|
// the odd entries the source code information.
|
||||||
|
if len(stack) > 0 && strings.HasPrefix(stack[0], "goroutine ") {
|
||||||
|
// Ignore "goroutine 29 [running]:" line.
|
||||||
|
stack = stack[1:]
|
||||||
|
}
|
||||||
|
// The "+1" is for skipping over the initial entry, which is
|
||||||
|
// runtime/debug.Stack() itself.
|
||||||
|
if len(stack) > 2*(skip+1) {
|
||||||
|
stack = stack[2*(skip+1):]
|
||||||
|
}
|
||||||
|
prunedStack := []string{}
|
||||||
|
re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
|
||||||
|
for i := 0; i < len(stack)/2; i++ {
|
||||||
|
// We filter out based on the source code file name.
|
||||||
|
if !re.Match([]byte(stack[i*2+1])) {
|
||||||
|
prunedStack = append(prunedStack, stack[i*2])
|
||||||
|
prunedStack = append(prunedStack, stack[i*2+1])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return strings.Join(prunedStack, "\n")
|
||||||
|
}
|
||||||
151
vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go
generated
vendored
Normal file
151
vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go
generated
vendored
Normal file
|
|
@ -0,0 +1,151 @@
|
||||||
|
package containernode
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type subjectOrContainerNode struct {
|
||||||
|
containerNode *ContainerNode
|
||||||
|
subjectNode leafnodes.SubjectNode
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n subjectOrContainerNode) text() string {
|
||||||
|
if n.containerNode != nil {
|
||||||
|
return n.containerNode.Text()
|
||||||
|
} else {
|
||||||
|
return n.subjectNode.Text()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type CollatedNodes struct {
|
||||||
|
Containers []*ContainerNode
|
||||||
|
Subject leafnodes.SubjectNode
|
||||||
|
}
|
||||||
|
|
||||||
|
type ContainerNode struct {
|
||||||
|
text string
|
||||||
|
flag types.FlagType
|
||||||
|
codeLocation types.CodeLocation
|
||||||
|
|
||||||
|
setupNodes []leafnodes.BasicNode
|
||||||
|
subjectAndContainerNodes []subjectOrContainerNode
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(text string, flag types.FlagType, codeLocation types.CodeLocation) *ContainerNode {
|
||||||
|
return &ContainerNode{
|
||||||
|
text: text,
|
||||||
|
flag: flag,
|
||||||
|
codeLocation: codeLocation,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (container *ContainerNode) Shuffle(r *rand.Rand) {
|
||||||
|
sort.Sort(container)
|
||||||
|
permutation := r.Perm(len(container.subjectAndContainerNodes))
|
||||||
|
shuffledNodes := make([]subjectOrContainerNode, len(container.subjectAndContainerNodes))
|
||||||
|
for i, j := range permutation {
|
||||||
|
shuffledNodes[i] = container.subjectAndContainerNodes[j]
|
||||||
|
}
|
||||||
|
container.subjectAndContainerNodes = shuffledNodes
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ContainerNode) BackPropagateProgrammaticFocus() bool {
|
||||||
|
if node.flag == types.FlagTypePending {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
shouldUnfocus := false
|
||||||
|
for _, subjectOrContainerNode := range node.subjectAndContainerNodes {
|
||||||
|
if subjectOrContainerNode.containerNode != nil {
|
||||||
|
shouldUnfocus = subjectOrContainerNode.containerNode.BackPropagateProgrammaticFocus() || shouldUnfocus
|
||||||
|
} else {
|
||||||
|
shouldUnfocus = (subjectOrContainerNode.subjectNode.Flag() == types.FlagTypeFocused) || shouldUnfocus
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if shouldUnfocus {
|
||||||
|
if node.flag == types.FlagTypeFocused {
|
||||||
|
node.flag = types.FlagTypeNone
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return node.flag == types.FlagTypeFocused
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ContainerNode) Collate() []CollatedNodes {
|
||||||
|
return node.collate([]*ContainerNode{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ContainerNode) collate(enclosingContainers []*ContainerNode) []CollatedNodes {
|
||||||
|
collated := make([]CollatedNodes, 0)
|
||||||
|
|
||||||
|
containers := make([]*ContainerNode, len(enclosingContainers))
|
||||||
|
copy(containers, enclosingContainers)
|
||||||
|
containers = append(containers, node)
|
||||||
|
|
||||||
|
for _, subjectOrContainer := range node.subjectAndContainerNodes {
|
||||||
|
if subjectOrContainer.containerNode != nil {
|
||||||
|
collated = append(collated, subjectOrContainer.containerNode.collate(containers)...)
|
||||||
|
} else {
|
||||||
|
collated = append(collated, CollatedNodes{
|
||||||
|
Containers: containers,
|
||||||
|
Subject: subjectOrContainer.subjectNode,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return collated
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ContainerNode) PushContainerNode(container *ContainerNode) {
|
||||||
|
node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{containerNode: container})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ContainerNode) PushSubjectNode(subject leafnodes.SubjectNode) {
|
||||||
|
node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{subjectNode: subject})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ContainerNode) PushSetupNode(setupNode leafnodes.BasicNode) {
|
||||||
|
node.setupNodes = append(node.setupNodes, setupNode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ContainerNode) SetupNodesOfType(nodeType types.SpecComponentType) []leafnodes.BasicNode {
|
||||||
|
nodes := []leafnodes.BasicNode{}
|
||||||
|
for _, setupNode := range node.setupNodes {
|
||||||
|
if setupNode.Type() == nodeType {
|
||||||
|
nodes = append(nodes, setupNode)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ContainerNode) Text() string {
|
||||||
|
return node.text
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ContainerNode) CodeLocation() types.CodeLocation {
|
||||||
|
return node.codeLocation
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ContainerNode) Flag() types.FlagType {
|
||||||
|
return node.flag
|
||||||
|
}
|
||||||
|
|
||||||
|
//sort.Interface
|
||||||
|
|
||||||
|
func (node *ContainerNode) Len() int {
|
||||||
|
return len(node.subjectAndContainerNodes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ContainerNode) Less(i, j int) bool {
|
||||||
|
return node.subjectAndContainerNodes[i].text() < node.subjectAndContainerNodes[j].text()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ContainerNode) Swap(i, j int) {
|
||||||
|
node.subjectAndContainerNodes[i], node.subjectAndContainerNodes[j] = node.subjectAndContainerNodes[j], node.subjectAndContainerNodes[i]
|
||||||
|
}
|
||||||
92
vendor/github.com/onsi/ginkgo/internal/failer/failer.go
generated
vendored
Normal file
92
vendor/github.com/onsi/ginkgo/internal/failer/failer.go
generated
vendored
Normal file
|
|
@ -0,0 +1,92 @@
|
||||||
|
package failer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Failer struct {
|
||||||
|
lock *sync.Mutex
|
||||||
|
failure types.SpecFailure
|
||||||
|
state types.SpecState
|
||||||
|
}
|
||||||
|
|
||||||
|
func New() *Failer {
|
||||||
|
return &Failer{
|
||||||
|
lock: &sync.Mutex{},
|
||||||
|
state: types.SpecStatePassed,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
if f.state == types.SpecStatePassed {
|
||||||
|
f.state = types.SpecStatePanicked
|
||||||
|
f.failure = types.SpecFailure{
|
||||||
|
Message: "Test Panicked",
|
||||||
|
Location: location,
|
||||||
|
ForwardedPanic: fmt.Sprintf("%v", forwardedPanic),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Failer) Timeout(location types.CodeLocation) {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
if f.state == types.SpecStatePassed {
|
||||||
|
f.state = types.SpecStateTimedOut
|
||||||
|
f.failure = types.SpecFailure{
|
||||||
|
Message: "Timed out",
|
||||||
|
Location: location,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Failer) Fail(message string, location types.CodeLocation) {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
if f.state == types.SpecStatePassed {
|
||||||
|
f.state = types.SpecStateFailed
|
||||||
|
f.failure = types.SpecFailure{
|
||||||
|
Message: message,
|
||||||
|
Location: location,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Failer) Drain(componentType types.SpecComponentType, componentIndex int, componentCodeLocation types.CodeLocation) (types.SpecFailure, types.SpecState) {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
failure := f.failure
|
||||||
|
outcome := f.state
|
||||||
|
if outcome != types.SpecStatePassed {
|
||||||
|
failure.ComponentType = componentType
|
||||||
|
failure.ComponentIndex = componentIndex
|
||||||
|
failure.ComponentCodeLocation = componentCodeLocation
|
||||||
|
}
|
||||||
|
|
||||||
|
f.state = types.SpecStatePassed
|
||||||
|
f.failure = types.SpecFailure{}
|
||||||
|
|
||||||
|
return failure, outcome
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Failer) Skip(message string, location types.CodeLocation) {
|
||||||
|
f.lock.Lock()
|
||||||
|
defer f.lock.Unlock()
|
||||||
|
|
||||||
|
if f.state == types.SpecStatePassed {
|
||||||
|
f.state = types.SpecStateSkipped
|
||||||
|
f.failure = types.SpecFailure{
|
||||||
|
Message: message,
|
||||||
|
Location: location,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
103
vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
generated
vendored
Normal file
103
vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
generated
vendored
Normal file
|
|
@ -0,0 +1,103 @@
|
||||||
|
package leafnodes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type benchmarker struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
measurements map[string]*types.SpecMeasurement
|
||||||
|
orderCounter int
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBenchmarker() *benchmarker {
|
||||||
|
return &benchmarker{
|
||||||
|
measurements: make(map[string]*types.SpecMeasurement),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *benchmarker) Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) {
|
||||||
|
t := time.Now()
|
||||||
|
body()
|
||||||
|
elapsedTime = time.Since(t)
|
||||||
|
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
measurement := b.getMeasurement(name, "Fastest Time", "Slowest Time", "Average Time", "s", 3, info...)
|
||||||
|
measurement.Results = append(measurement.Results, elapsedTime.Seconds())
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *benchmarker) RecordValue(name string, value float64, info ...interface{}) {
|
||||||
|
b.mu.Lock()
|
||||||
|
measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", "", 3, info...)
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
measurement.Results = append(measurement.Results, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *benchmarker) RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{}) {
|
||||||
|
b.mu.Lock()
|
||||||
|
measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", units, precision, info...)
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
measurement.Results = append(measurement.Results, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *benchmarker) getMeasurement(name string, smallestLabel string, largestLabel string, averageLabel string, units string, precision int, info ...interface{}) *types.SpecMeasurement {
|
||||||
|
measurement, ok := b.measurements[name]
|
||||||
|
if !ok {
|
||||||
|
var computedInfo interface{}
|
||||||
|
computedInfo = nil
|
||||||
|
if len(info) > 0 {
|
||||||
|
computedInfo = info[0]
|
||||||
|
}
|
||||||
|
measurement = &types.SpecMeasurement{
|
||||||
|
Name: name,
|
||||||
|
Info: computedInfo,
|
||||||
|
Order: b.orderCounter,
|
||||||
|
SmallestLabel: smallestLabel,
|
||||||
|
LargestLabel: largestLabel,
|
||||||
|
AverageLabel: averageLabel,
|
||||||
|
Units: units,
|
||||||
|
Precision: precision,
|
||||||
|
Results: make([]float64, 0),
|
||||||
|
}
|
||||||
|
b.measurements[name] = measurement
|
||||||
|
b.orderCounter++
|
||||||
|
}
|
||||||
|
|
||||||
|
return measurement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *benchmarker) measurementsReport() map[string]*types.SpecMeasurement {
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
for _, measurement := range b.measurements {
|
||||||
|
measurement.Smallest = math.MaxFloat64
|
||||||
|
measurement.Largest = -math.MaxFloat64
|
||||||
|
sum := float64(0)
|
||||||
|
sumOfSquares := float64(0)
|
||||||
|
|
||||||
|
for _, result := range measurement.Results {
|
||||||
|
if result > measurement.Largest {
|
||||||
|
measurement.Largest = result
|
||||||
|
}
|
||||||
|
if result < measurement.Smallest {
|
||||||
|
measurement.Smallest = result
|
||||||
|
}
|
||||||
|
sum += result
|
||||||
|
sumOfSquares += result * result
|
||||||
|
}
|
||||||
|
|
||||||
|
n := float64(len(measurement.Results))
|
||||||
|
measurement.Average = sum / n
|
||||||
|
measurement.StdDeviation = math.Sqrt(sumOfSquares/n - (sum/n)*(sum/n))
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.measurements
|
||||||
|
}
|
||||||
19
vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go
generated
vendored
Normal file
19
vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go
generated
vendored
Normal file
|
|
@ -0,0 +1,19 @@
|
||||||
|
package leafnodes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type BasicNode interface {
|
||||||
|
Type() types.SpecComponentType
|
||||||
|
Run() (types.SpecState, types.SpecFailure)
|
||||||
|
CodeLocation() types.CodeLocation
|
||||||
|
}
|
||||||
|
|
||||||
|
type SubjectNode interface {
|
||||||
|
BasicNode
|
||||||
|
|
||||||
|
Text() string
|
||||||
|
Flag() types.FlagType
|
||||||
|
Samples() int
|
||||||
|
}
|
||||||
47
vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go
generated
vendored
Normal file
47
vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go
generated
vendored
Normal file
|
|
@ -0,0 +1,47 @@
|
||||||
|
package leafnodes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/internal/failer"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ItNode struct {
|
||||||
|
runner *runner
|
||||||
|
|
||||||
|
flag types.FlagType
|
||||||
|
text string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *ItNode {
|
||||||
|
return &ItNode{
|
||||||
|
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeIt, componentIndex),
|
||||||
|
flag: flag,
|
||||||
|
text: text,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ItNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
|
||||||
|
return node.runner.run()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ItNode) Type() types.SpecComponentType {
|
||||||
|
return types.SpecComponentTypeIt
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ItNode) Text() string {
|
||||||
|
return node.text
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ItNode) Flag() types.FlagType {
|
||||||
|
return node.flag
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ItNode) CodeLocation() types.CodeLocation {
|
||||||
|
return node.runner.codeLocation
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *ItNode) Samples() int {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
62
vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go
generated
vendored
Normal file
62
vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go
generated
vendored
Normal file
|
|
@ -0,0 +1,62 @@
|
||||||
|
package leafnodes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/internal/failer"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MeasureNode struct {
|
||||||
|
runner *runner
|
||||||
|
|
||||||
|
text string
|
||||||
|
flag types.FlagType
|
||||||
|
samples int
|
||||||
|
benchmarker *benchmarker
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int, failer *failer.Failer, componentIndex int) *MeasureNode {
|
||||||
|
benchmarker := newBenchmarker()
|
||||||
|
|
||||||
|
wrappedBody := func() {
|
||||||
|
reflect.ValueOf(body).Call([]reflect.Value{reflect.ValueOf(benchmarker)})
|
||||||
|
}
|
||||||
|
|
||||||
|
return &MeasureNode{
|
||||||
|
runner: newRunner(wrappedBody, codeLocation, 0, failer, types.SpecComponentTypeMeasure, componentIndex),
|
||||||
|
|
||||||
|
text: text,
|
||||||
|
flag: flag,
|
||||||
|
samples: samples,
|
||||||
|
benchmarker: benchmarker,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *MeasureNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
|
||||||
|
return node.runner.run()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *MeasureNode) MeasurementsReport() map[string]*types.SpecMeasurement {
|
||||||
|
return node.benchmarker.measurementsReport()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *MeasureNode) Type() types.SpecComponentType {
|
||||||
|
return types.SpecComponentTypeMeasure
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *MeasureNode) Text() string {
|
||||||
|
return node.text
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *MeasureNode) Flag() types.FlagType {
|
||||||
|
return node.flag
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *MeasureNode) CodeLocation() types.CodeLocation {
|
||||||
|
return node.runner.codeLocation
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *MeasureNode) Samples() int {
|
||||||
|
return node.samples
|
||||||
|
}
|
||||||
117
vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go
generated
vendored
Normal file
117
vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go
generated
vendored
Normal file
|
|
@ -0,0 +1,117 @@
|
||||||
|
package leafnodes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/internal/codelocation"
|
||||||
|
"github.com/onsi/ginkgo/internal/failer"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type runner struct {
|
||||||
|
isAsync bool
|
||||||
|
asyncFunc func(chan<- interface{})
|
||||||
|
syncFunc func()
|
||||||
|
codeLocation types.CodeLocation
|
||||||
|
timeoutThreshold time.Duration
|
||||||
|
nodeType types.SpecComponentType
|
||||||
|
componentIndex int
|
||||||
|
failer *failer.Failer
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRunner(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, nodeType types.SpecComponentType, componentIndex int) *runner {
|
||||||
|
bodyType := reflect.TypeOf(body)
|
||||||
|
if bodyType.Kind() != reflect.Func {
|
||||||
|
panic(fmt.Sprintf("Expected a function but got something else at %v", codeLocation))
|
||||||
|
}
|
||||||
|
|
||||||
|
runner := &runner{
|
||||||
|
codeLocation: codeLocation,
|
||||||
|
timeoutThreshold: timeout,
|
||||||
|
failer: failer,
|
||||||
|
nodeType: nodeType,
|
||||||
|
componentIndex: componentIndex,
|
||||||
|
}
|
||||||
|
|
||||||
|
switch bodyType.NumIn() {
|
||||||
|
case 0:
|
||||||
|
runner.syncFunc = body.(func())
|
||||||
|
return runner
|
||||||
|
case 1:
|
||||||
|
if !(bodyType.In(0).Kind() == reflect.Chan && bodyType.In(0).Elem().Kind() == reflect.Interface) {
|
||||||
|
panic(fmt.Sprintf("Must pass a Done channel to function at %v", codeLocation))
|
||||||
|
}
|
||||||
|
|
||||||
|
wrappedBody := func(done chan<- interface{}) {
|
||||||
|
bodyValue := reflect.ValueOf(body)
|
||||||
|
bodyValue.Call([]reflect.Value{reflect.ValueOf(done)})
|
||||||
|
}
|
||||||
|
|
||||||
|
runner.isAsync = true
|
||||||
|
runner.asyncFunc = wrappedBody
|
||||||
|
return runner
|
||||||
|
}
|
||||||
|
|
||||||
|
panic(fmt.Sprintf("Too many arguments to function at %v", codeLocation))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *runner) run() (outcome types.SpecState, failure types.SpecFailure) {
|
||||||
|
if r.isAsync {
|
||||||
|
return r.runAsync()
|
||||||
|
} else {
|
||||||
|
return r.runSync()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure) {
|
||||||
|
done := make(chan interface{}, 1)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
finished := false
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if e := recover(); e != nil || !finished {
|
||||||
|
r.failer.Panic(codelocation.New(2), e)
|
||||||
|
select {
|
||||||
|
case <-done:
|
||||||
|
break
|
||||||
|
default:
|
||||||
|
close(done)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
r.asyncFunc(done)
|
||||||
|
finished = true
|
||||||
|
}()
|
||||||
|
|
||||||
|
// If this goroutine gets no CPU time before the select block,
|
||||||
|
// the <-done case may complete even if the test took longer than the timeoutThreshold.
|
||||||
|
// This can cause flaky behaviour, but we haven't seen it in the wild.
|
||||||
|
select {
|
||||||
|
case <-done:
|
||||||
|
case <-time.After(r.timeoutThreshold):
|
||||||
|
r.failer.Timeout(r.codeLocation)
|
||||||
|
}
|
||||||
|
|
||||||
|
failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
func (r *runner) runSync() (outcome types.SpecState, failure types.SpecFailure) {
|
||||||
|
finished := false
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if e := recover(); e != nil || !finished {
|
||||||
|
r.failer.Panic(codelocation.New(2), e)
|
||||||
|
}
|
||||||
|
|
||||||
|
failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation)
|
||||||
|
}()
|
||||||
|
|
||||||
|
r.syncFunc()
|
||||||
|
finished = true
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
48
vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go
generated
vendored
Normal file
48
vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go
generated
vendored
Normal file
|
|
@ -0,0 +1,48 @@
|
||||||
|
package leafnodes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/internal/failer"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SetupNode struct {
|
||||||
|
runner *runner
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *SetupNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
|
||||||
|
return node.runner.run()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *SetupNode) Type() types.SpecComponentType {
|
||||||
|
return node.runner.nodeType
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *SetupNode) CodeLocation() types.CodeLocation {
|
||||||
|
return node.runner.codeLocation
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
|
||||||
|
return &SetupNode{
|
||||||
|
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeEach, componentIndex),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
|
||||||
|
return &SetupNode{
|
||||||
|
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterEach, componentIndex),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
|
||||||
|
return &SetupNode{
|
||||||
|
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustBeforeEach, componentIndex),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewJustAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
|
||||||
|
return &SetupNode{
|
||||||
|
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustAfterEach, componentIndex),
|
||||||
|
}
|
||||||
|
}
|
||||||
55
vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go
generated
vendored
Normal file
55
vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go
generated
vendored
Normal file
|
|
@ -0,0 +1,55 @@
|
||||||
|
package leafnodes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/internal/failer"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SuiteNode interface {
|
||||||
|
Run(parallelNode int, parallelTotal int, syncHost string) bool
|
||||||
|
Passed() bool
|
||||||
|
Summary() *types.SetupSummary
|
||||||
|
}
|
||||||
|
|
||||||
|
type simpleSuiteNode struct {
|
||||||
|
runner *runner
|
||||||
|
outcome types.SpecState
|
||||||
|
failure types.SpecFailure
|
||||||
|
runTime time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *simpleSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
|
||||||
|
t := time.Now()
|
||||||
|
node.outcome, node.failure = node.runner.run()
|
||||||
|
node.runTime = time.Since(t)
|
||||||
|
|
||||||
|
return node.outcome == types.SpecStatePassed
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *simpleSuiteNode) Passed() bool {
|
||||||
|
return node.outcome == types.SpecStatePassed
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *simpleSuiteNode) Summary() *types.SetupSummary {
|
||||||
|
return &types.SetupSummary{
|
||||||
|
ComponentType: node.runner.nodeType,
|
||||||
|
CodeLocation: node.runner.codeLocation,
|
||||||
|
State: node.outcome,
|
||||||
|
RunTime: node.runTime,
|
||||||
|
Failure: node.failure,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
||||||
|
return &simpleSuiteNode{
|
||||||
|
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
||||||
|
return &simpleSuiteNode{
|
||||||
|
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
90
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
generated
vendored
Normal file
90
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
generated
vendored
Normal file
|
|
@ -0,0 +1,90 @@
|
||||||
|
package leafnodes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/internal/failer"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type synchronizedAfterSuiteNode struct {
|
||||||
|
runnerA *runner
|
||||||
|
runnerB *runner
|
||||||
|
|
||||||
|
outcome types.SpecState
|
||||||
|
failure types.SpecFailure
|
||||||
|
runTime time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
||||||
|
return &synchronizedAfterSuiteNode{
|
||||||
|
runnerA: newRunner(bodyA, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
|
||||||
|
runnerB: newRunner(bodyB, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *synchronizedAfterSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
|
||||||
|
node.outcome, node.failure = node.runnerA.run()
|
||||||
|
|
||||||
|
if parallelNode == 1 {
|
||||||
|
if parallelTotal > 1 {
|
||||||
|
node.waitUntilOtherNodesAreDone(syncHost)
|
||||||
|
}
|
||||||
|
|
||||||
|
outcome, failure := node.runnerB.run()
|
||||||
|
|
||||||
|
if node.outcome == types.SpecStatePassed {
|
||||||
|
node.outcome, node.failure = outcome, failure
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return node.outcome == types.SpecStatePassed
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *synchronizedAfterSuiteNode) Passed() bool {
|
||||||
|
return node.outcome == types.SpecStatePassed
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *synchronizedAfterSuiteNode) Summary() *types.SetupSummary {
|
||||||
|
return &types.SetupSummary{
|
||||||
|
ComponentType: node.runnerA.nodeType,
|
||||||
|
CodeLocation: node.runnerA.codeLocation,
|
||||||
|
State: node.outcome,
|
||||||
|
RunTime: node.runTime,
|
||||||
|
Failure: node.failure,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *synchronizedAfterSuiteNode) waitUntilOtherNodesAreDone(syncHost string) {
|
||||||
|
for {
|
||||||
|
if node.canRun(syncHost) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *synchronizedAfterSuiteNode) canRun(syncHost string) bool {
|
||||||
|
resp, err := http.Get(syncHost + "/RemoteAfterSuiteData")
|
||||||
|
if err != nil || resp.StatusCode != http.StatusOK {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
|
||||||
|
afterSuiteData := types.RemoteAfterSuiteData{}
|
||||||
|
err = json.Unmarshal(body, &afterSuiteData)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return afterSuiteData.CanRun
|
||||||
|
}
|
||||||
181
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
generated
vendored
Normal file
181
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
generated
vendored
Normal file
|
|
@ -0,0 +1,181 @@
|
||||||
|
package leafnodes
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"reflect"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/internal/failer"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type synchronizedBeforeSuiteNode struct {
|
||||||
|
runnerA *runner
|
||||||
|
runnerB *runner
|
||||||
|
|
||||||
|
data []byte
|
||||||
|
|
||||||
|
outcome types.SpecState
|
||||||
|
failure types.SpecFailure
|
||||||
|
runTime time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
||||||
|
node := &synchronizedBeforeSuiteNode{}
|
||||||
|
|
||||||
|
node.runnerA = newRunner(node.wrapA(bodyA), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0)
|
||||||
|
node.runnerB = newRunner(node.wrapB(bodyB), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0)
|
||||||
|
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *synchronizedBeforeSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
|
||||||
|
t := time.Now()
|
||||||
|
defer func() {
|
||||||
|
node.runTime = time.Since(t)
|
||||||
|
}()
|
||||||
|
|
||||||
|
if parallelNode == 1 {
|
||||||
|
node.outcome, node.failure = node.runA(parallelTotal, syncHost)
|
||||||
|
} else {
|
||||||
|
node.outcome, node.failure = node.waitForA(syncHost)
|
||||||
|
}
|
||||||
|
|
||||||
|
if node.outcome != types.SpecStatePassed {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
node.outcome, node.failure = node.runnerB.run()
|
||||||
|
|
||||||
|
return node.outcome == types.SpecStatePassed
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *synchronizedBeforeSuiteNode) runA(parallelTotal int, syncHost string) (types.SpecState, types.SpecFailure) {
|
||||||
|
outcome, failure := node.runnerA.run()
|
||||||
|
|
||||||
|
if parallelTotal > 1 {
|
||||||
|
state := types.RemoteBeforeSuiteStatePassed
|
||||||
|
if outcome != types.SpecStatePassed {
|
||||||
|
state = types.RemoteBeforeSuiteStateFailed
|
||||||
|
}
|
||||||
|
json := (types.RemoteBeforeSuiteData{
|
||||||
|
Data: node.data,
|
||||||
|
State: state,
|
||||||
|
}).ToJSON()
|
||||||
|
http.Post(syncHost+"/BeforeSuiteState", "application/json", bytes.NewBuffer(json))
|
||||||
|
}
|
||||||
|
|
||||||
|
return outcome, failure
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *synchronizedBeforeSuiteNode) waitForA(syncHost string) (types.SpecState, types.SpecFailure) {
|
||||||
|
failure := func(message string) types.SpecFailure {
|
||||||
|
return types.SpecFailure{
|
||||||
|
Message: message,
|
||||||
|
Location: node.runnerA.codeLocation,
|
||||||
|
ComponentType: node.runnerA.nodeType,
|
||||||
|
ComponentIndex: node.runnerA.componentIndex,
|
||||||
|
ComponentCodeLocation: node.runnerA.codeLocation,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
resp, err := http.Get(syncHost + "/BeforeSuiteState")
|
||||||
|
if err != nil || resp.StatusCode != http.StatusOK {
|
||||||
|
return types.SpecStateFailed, failure("Failed to fetch BeforeSuite state")
|
||||||
|
}
|
||||||
|
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return types.SpecStateFailed, failure("Failed to read BeforeSuite state")
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
|
||||||
|
beforeSuiteData := types.RemoteBeforeSuiteData{}
|
||||||
|
err = json.Unmarshal(body, &beforeSuiteData)
|
||||||
|
if err != nil {
|
||||||
|
return types.SpecStateFailed, failure("Failed to decode BeforeSuite state")
|
||||||
|
}
|
||||||
|
|
||||||
|
switch beforeSuiteData.State {
|
||||||
|
case types.RemoteBeforeSuiteStatePassed:
|
||||||
|
node.data = beforeSuiteData.Data
|
||||||
|
return types.SpecStatePassed, types.SpecFailure{}
|
||||||
|
case types.RemoteBeforeSuiteStateFailed:
|
||||||
|
return types.SpecStateFailed, failure("BeforeSuite on Node 1 failed")
|
||||||
|
case types.RemoteBeforeSuiteStateDisappeared:
|
||||||
|
return types.SpecStateFailed, failure("Node 1 disappeared before completing BeforeSuite")
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *synchronizedBeforeSuiteNode) Passed() bool {
|
||||||
|
return node.outcome == types.SpecStatePassed
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *synchronizedBeforeSuiteNode) Summary() *types.SetupSummary {
|
||||||
|
return &types.SetupSummary{
|
||||||
|
ComponentType: node.runnerA.nodeType,
|
||||||
|
CodeLocation: node.runnerA.codeLocation,
|
||||||
|
State: node.outcome,
|
||||||
|
RunTime: node.runTime,
|
||||||
|
Failure: node.failure,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *synchronizedBeforeSuiteNode) wrapA(bodyA interface{}) interface{} {
|
||||||
|
typeA := reflect.TypeOf(bodyA)
|
||||||
|
if typeA.Kind() != reflect.Func {
|
||||||
|
panic("SynchronizedBeforeSuite expects a function as its first argument")
|
||||||
|
}
|
||||||
|
|
||||||
|
takesNothing := typeA.NumIn() == 0
|
||||||
|
takesADoneChannel := typeA.NumIn() == 1 && typeA.In(0).Kind() == reflect.Chan && typeA.In(0).Elem().Kind() == reflect.Interface
|
||||||
|
returnsBytes := typeA.NumOut() == 1 && typeA.Out(0).Kind() == reflect.Slice && typeA.Out(0).Elem().Kind() == reflect.Uint8
|
||||||
|
|
||||||
|
if !((takesNothing || takesADoneChannel) && returnsBytes) {
|
||||||
|
panic("SynchronizedBeforeSuite's first argument should be a function that returns []byte and either takes no arguments or takes a Done channel.")
|
||||||
|
}
|
||||||
|
|
||||||
|
if takesADoneChannel {
|
||||||
|
return func(done chan<- interface{}) {
|
||||||
|
out := reflect.ValueOf(bodyA).Call([]reflect.Value{reflect.ValueOf(done)})
|
||||||
|
node.data = out[0].Interface().([]byte)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return func() {
|
||||||
|
out := reflect.ValueOf(bodyA).Call([]reflect.Value{})
|
||||||
|
node.data = out[0].Interface().([]byte)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *synchronizedBeforeSuiteNode) wrapB(bodyB interface{}) interface{} {
|
||||||
|
typeB := reflect.TypeOf(bodyB)
|
||||||
|
if typeB.Kind() != reflect.Func {
|
||||||
|
panic("SynchronizedBeforeSuite expects a function as its second argument")
|
||||||
|
}
|
||||||
|
|
||||||
|
returnsNothing := typeB.NumOut() == 0
|
||||||
|
takesBytesOnly := typeB.NumIn() == 1 && typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8
|
||||||
|
takesBytesAndDone := typeB.NumIn() == 2 &&
|
||||||
|
typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8 &&
|
||||||
|
typeB.In(1).Kind() == reflect.Chan && typeB.In(1).Elem().Kind() == reflect.Interface
|
||||||
|
|
||||||
|
if !((takesBytesOnly || takesBytesAndDone) && returnsNothing) {
|
||||||
|
panic("SynchronizedBeforeSuite's second argument should be a function that returns nothing and either takes []byte or ([]byte, Done)")
|
||||||
|
}
|
||||||
|
|
||||||
|
if takesBytesAndDone {
|
||||||
|
return func(done chan<- interface{}) {
|
||||||
|
reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data), reflect.ValueOf(done)})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return func() {
|
||||||
|
reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data)})
|
||||||
|
}
|
||||||
|
}
|
||||||
249
vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
generated
vendored
Normal file
249
vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
generated
vendored
Normal file
|
|
@ -0,0 +1,249 @@
|
||||||
|
/*
|
||||||
|
|
||||||
|
Aggregator is a reporter used by the Ginkgo CLI to aggregate and present parallel test output
|
||||||
|
coherently as tests complete. You shouldn't need to use this in your code. To run tests in parallel:
|
||||||
|
|
||||||
|
ginkgo -nodes=N
|
||||||
|
|
||||||
|
where N is the number of nodes you desire.
|
||||||
|
*/
|
||||||
|
package remote
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/config"
|
||||||
|
"github.com/onsi/ginkgo/reporters/stenographer"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type configAndSuite struct {
|
||||||
|
config config.GinkgoConfigType
|
||||||
|
summary *types.SuiteSummary
|
||||||
|
}
|
||||||
|
|
||||||
|
type Aggregator struct {
|
||||||
|
nodeCount int
|
||||||
|
config config.DefaultReporterConfigType
|
||||||
|
stenographer stenographer.Stenographer
|
||||||
|
result chan bool
|
||||||
|
|
||||||
|
suiteBeginnings chan configAndSuite
|
||||||
|
aggregatedSuiteBeginnings []configAndSuite
|
||||||
|
|
||||||
|
beforeSuites chan *types.SetupSummary
|
||||||
|
aggregatedBeforeSuites []*types.SetupSummary
|
||||||
|
|
||||||
|
afterSuites chan *types.SetupSummary
|
||||||
|
aggregatedAfterSuites []*types.SetupSummary
|
||||||
|
|
||||||
|
specCompletions chan *types.SpecSummary
|
||||||
|
completedSpecs []*types.SpecSummary
|
||||||
|
|
||||||
|
suiteEndings chan *types.SuiteSummary
|
||||||
|
aggregatedSuiteEndings []*types.SuiteSummary
|
||||||
|
specs []*types.SpecSummary
|
||||||
|
|
||||||
|
startTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewAggregator(nodeCount int, result chan bool, config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *Aggregator {
|
||||||
|
aggregator := &Aggregator{
|
||||||
|
nodeCount: nodeCount,
|
||||||
|
result: result,
|
||||||
|
config: config,
|
||||||
|
stenographer: stenographer,
|
||||||
|
|
||||||
|
suiteBeginnings: make(chan configAndSuite),
|
||||||
|
beforeSuites: make(chan *types.SetupSummary),
|
||||||
|
afterSuites: make(chan *types.SetupSummary),
|
||||||
|
specCompletions: make(chan *types.SpecSummary),
|
||||||
|
suiteEndings: make(chan *types.SuiteSummary),
|
||||||
|
}
|
||||||
|
|
||||||
|
go aggregator.mux()
|
||||||
|
|
||||||
|
return aggregator
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aggregator *Aggregator) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||||
|
aggregator.suiteBeginnings <- configAndSuite{config, summary}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aggregator *Aggregator) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||||
|
aggregator.beforeSuites <- setupSummary
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aggregator *Aggregator) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||||
|
aggregator.afterSuites <- setupSummary
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aggregator *Aggregator) SpecWillRun(specSummary *types.SpecSummary) {
|
||||||
|
//noop
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aggregator *Aggregator) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||||
|
aggregator.specCompletions <- specSummary
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aggregator *Aggregator) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||||
|
aggregator.suiteEndings <- summary
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aggregator *Aggregator) mux() {
|
||||||
|
loop:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case configAndSuite := <-aggregator.suiteBeginnings:
|
||||||
|
aggregator.registerSuiteBeginning(configAndSuite)
|
||||||
|
case setupSummary := <-aggregator.beforeSuites:
|
||||||
|
aggregator.registerBeforeSuite(setupSummary)
|
||||||
|
case setupSummary := <-aggregator.afterSuites:
|
||||||
|
aggregator.registerAfterSuite(setupSummary)
|
||||||
|
case specSummary := <-aggregator.specCompletions:
|
||||||
|
aggregator.registerSpecCompletion(specSummary)
|
||||||
|
case suite := <-aggregator.suiteEndings:
|
||||||
|
finished, passed := aggregator.registerSuiteEnding(suite)
|
||||||
|
if finished {
|
||||||
|
aggregator.result <- passed
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aggregator *Aggregator) registerSuiteBeginning(configAndSuite configAndSuite) {
|
||||||
|
aggregator.aggregatedSuiteBeginnings = append(aggregator.aggregatedSuiteBeginnings, configAndSuite)
|
||||||
|
|
||||||
|
if len(aggregator.aggregatedSuiteBeginnings) == 1 {
|
||||||
|
aggregator.startTime = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
aggregator.stenographer.AnnounceSuite(configAndSuite.summary.SuiteDescription, configAndSuite.config.RandomSeed, configAndSuite.config.RandomizeAllSpecs, aggregator.config.Succinct)
|
||||||
|
|
||||||
|
totalNumberOfSpecs := 0
|
||||||
|
if len(aggregator.aggregatedSuiteBeginnings) > 0 {
|
||||||
|
totalNumberOfSpecs = configAndSuite.summary.NumberOfSpecsBeforeParallelization
|
||||||
|
}
|
||||||
|
|
||||||
|
aggregator.stenographer.AnnounceTotalNumberOfSpecs(totalNumberOfSpecs, aggregator.config.Succinct)
|
||||||
|
aggregator.stenographer.AnnounceAggregatedParallelRun(aggregator.nodeCount, aggregator.config.Succinct)
|
||||||
|
aggregator.flushCompletedSpecs()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aggregator *Aggregator) registerBeforeSuite(setupSummary *types.SetupSummary) {
|
||||||
|
aggregator.aggregatedBeforeSuites = append(aggregator.aggregatedBeforeSuites, setupSummary)
|
||||||
|
aggregator.flushCompletedSpecs()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aggregator *Aggregator) registerAfterSuite(setupSummary *types.SetupSummary) {
|
||||||
|
aggregator.aggregatedAfterSuites = append(aggregator.aggregatedAfterSuites, setupSummary)
|
||||||
|
aggregator.flushCompletedSpecs()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aggregator *Aggregator) registerSpecCompletion(specSummary *types.SpecSummary) {
|
||||||
|
aggregator.completedSpecs = append(aggregator.completedSpecs, specSummary)
|
||||||
|
aggregator.specs = append(aggregator.specs, specSummary)
|
||||||
|
aggregator.flushCompletedSpecs()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aggregator *Aggregator) flushCompletedSpecs() {
|
||||||
|
if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, setupSummary := range aggregator.aggregatedBeforeSuites {
|
||||||
|
aggregator.announceBeforeSuite(setupSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, specSummary := range aggregator.completedSpecs {
|
||||||
|
aggregator.announceSpec(specSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, setupSummary := range aggregator.aggregatedAfterSuites {
|
||||||
|
aggregator.announceAfterSuite(setupSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
aggregator.aggregatedBeforeSuites = []*types.SetupSummary{}
|
||||||
|
aggregator.completedSpecs = []*types.SpecSummary{}
|
||||||
|
aggregator.aggregatedAfterSuites = []*types.SetupSummary{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aggregator *Aggregator) announceBeforeSuite(setupSummary *types.SetupSummary) {
|
||||||
|
aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
|
||||||
|
if setupSummary.State != types.SpecStatePassed {
|
||||||
|
aggregator.stenographer.AnnounceBeforeSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aggregator *Aggregator) announceAfterSuite(setupSummary *types.SetupSummary) {
|
||||||
|
aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
|
||||||
|
if setupSummary.State != types.SpecStatePassed {
|
||||||
|
aggregator.stenographer.AnnounceAfterSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) {
|
||||||
|
if aggregator.config.Verbose && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped {
|
||||||
|
aggregator.stenographer.AnnounceSpecWillRun(specSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
aggregator.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput)
|
||||||
|
|
||||||
|
switch specSummary.State {
|
||||||
|
case types.SpecStatePassed:
|
||||||
|
if specSummary.IsMeasurement {
|
||||||
|
aggregator.stenographer.AnnounceSuccesfulMeasurement(specSummary, aggregator.config.Succinct)
|
||||||
|
} else if specSummary.RunTime.Seconds() >= aggregator.config.SlowSpecThreshold {
|
||||||
|
aggregator.stenographer.AnnounceSuccesfulSlowSpec(specSummary, aggregator.config.Succinct)
|
||||||
|
} else {
|
||||||
|
aggregator.stenographer.AnnounceSuccesfulSpec(specSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
case types.SpecStatePending:
|
||||||
|
aggregator.stenographer.AnnouncePendingSpec(specSummary, aggregator.config.NoisyPendings && !aggregator.config.Succinct)
|
||||||
|
case types.SpecStateSkipped:
|
||||||
|
aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct || !aggregator.config.NoisySkippings, aggregator.config.FullTrace)
|
||||||
|
case types.SpecStateTimedOut:
|
||||||
|
aggregator.stenographer.AnnounceSpecTimedOut(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||||
|
case types.SpecStatePanicked:
|
||||||
|
aggregator.stenographer.AnnounceSpecPanicked(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||||
|
case types.SpecStateFailed:
|
||||||
|
aggregator.stenographer.AnnounceSpecFailed(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aggregator *Aggregator) registerSuiteEnding(suite *types.SuiteSummary) (finished bool, passed bool) {
|
||||||
|
aggregator.aggregatedSuiteEndings = append(aggregator.aggregatedSuiteEndings, suite)
|
||||||
|
if len(aggregator.aggregatedSuiteEndings) < aggregator.nodeCount {
|
||||||
|
return false, false
|
||||||
|
}
|
||||||
|
|
||||||
|
aggregatedSuiteSummary := &types.SuiteSummary{}
|
||||||
|
aggregatedSuiteSummary.SuiteSucceeded = true
|
||||||
|
|
||||||
|
for _, suiteSummary := range aggregator.aggregatedSuiteEndings {
|
||||||
|
if !suiteSummary.SuiteSucceeded {
|
||||||
|
aggregatedSuiteSummary.SuiteSucceeded = false
|
||||||
|
}
|
||||||
|
|
||||||
|
aggregatedSuiteSummary.NumberOfSpecsThatWillBeRun += suiteSummary.NumberOfSpecsThatWillBeRun
|
||||||
|
aggregatedSuiteSummary.NumberOfTotalSpecs += suiteSummary.NumberOfTotalSpecs
|
||||||
|
aggregatedSuiteSummary.NumberOfPassedSpecs += suiteSummary.NumberOfPassedSpecs
|
||||||
|
aggregatedSuiteSummary.NumberOfFailedSpecs += suiteSummary.NumberOfFailedSpecs
|
||||||
|
aggregatedSuiteSummary.NumberOfPendingSpecs += suiteSummary.NumberOfPendingSpecs
|
||||||
|
aggregatedSuiteSummary.NumberOfSkippedSpecs += suiteSummary.NumberOfSkippedSpecs
|
||||||
|
aggregatedSuiteSummary.NumberOfFlakedSpecs += suiteSummary.NumberOfFlakedSpecs
|
||||||
|
}
|
||||||
|
|
||||||
|
aggregatedSuiteSummary.RunTime = time.Since(aggregator.startTime)
|
||||||
|
|
||||||
|
aggregator.stenographer.SummarizeFailures(aggregator.specs)
|
||||||
|
aggregator.stenographer.AnnounceSpecRunCompletion(aggregatedSuiteSummary, aggregator.config.Succinct)
|
||||||
|
|
||||||
|
return true, aggregatedSuiteSummary.SuiteSucceeded
|
||||||
|
}
|
||||||
147
vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
generated
vendored
Normal file
147
vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
generated
vendored
Normal file
|
|
@ -0,0 +1,147 @@
|
||||||
|
package remote
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/internal/writer"
|
||||||
|
"github.com/onsi/ginkgo/reporters"
|
||||||
|
"github.com/onsi/ginkgo/reporters/stenographer"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/config"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
//An interface to net/http's client to allow the injection of fakes under test
|
||||||
|
type Poster interface {
|
||||||
|
Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
The ForwardingReporter is a Ginkgo reporter that forwards information to
|
||||||
|
a Ginkgo remote server.
|
||||||
|
|
||||||
|
When streaming parallel test output, this repoter is automatically installed by Ginkgo.
|
||||||
|
|
||||||
|
This is accomplished by passing in the GINKGO_REMOTE_REPORTING_SERVER environment variable to `go test`, the Ginkgo test runner
|
||||||
|
detects this environment variable (which should contain the host of the server) and automatically installs a ForwardingReporter
|
||||||
|
in place of Ginkgo's DefaultReporter.
|
||||||
|
*/
|
||||||
|
|
||||||
|
type ForwardingReporter struct {
|
||||||
|
serverHost string
|
||||||
|
poster Poster
|
||||||
|
outputInterceptor OutputInterceptor
|
||||||
|
debugMode bool
|
||||||
|
debugFile *os.File
|
||||||
|
nestedReporter *reporters.DefaultReporter
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewForwardingReporter(config config.DefaultReporterConfigType, serverHost string, poster Poster, outputInterceptor OutputInterceptor, ginkgoWriter *writer.Writer, debugFile string) *ForwardingReporter {
|
||||||
|
reporter := &ForwardingReporter{
|
||||||
|
serverHost: serverHost,
|
||||||
|
poster: poster,
|
||||||
|
outputInterceptor: outputInterceptor,
|
||||||
|
}
|
||||||
|
|
||||||
|
if debugFile != "" {
|
||||||
|
var err error
|
||||||
|
reporter.debugMode = true
|
||||||
|
reporter.debugFile, err = os.Create(debugFile)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err.Error())
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !config.Verbose {
|
||||||
|
//if verbose is true then the GinkgoWriter emits to stdout. Don't _also_ redirect GinkgoWriter output as that will result in duplication.
|
||||||
|
ginkgoWriter.AndRedirectTo(reporter.debugFile)
|
||||||
|
}
|
||||||
|
outputInterceptor.StreamTo(reporter.debugFile) //This is not working
|
||||||
|
|
||||||
|
stenographer := stenographer.New(false, true, reporter.debugFile)
|
||||||
|
config.Succinct = false
|
||||||
|
config.Verbose = true
|
||||||
|
config.FullTrace = true
|
||||||
|
reporter.nestedReporter = reporters.NewDefaultReporter(config, stenographer)
|
||||||
|
}
|
||||||
|
|
||||||
|
return reporter
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *ForwardingReporter) post(path string, data interface{}) {
|
||||||
|
encoded, _ := json.Marshal(data)
|
||||||
|
buffer := bytes.NewBuffer(encoded)
|
||||||
|
reporter.poster.Post(reporter.serverHost+path, "application/json", buffer)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *ForwardingReporter) SpecSuiteWillBegin(conf config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||||
|
data := struct {
|
||||||
|
Config config.GinkgoConfigType `json:"config"`
|
||||||
|
Summary *types.SuiteSummary `json:"suite-summary"`
|
||||||
|
}{
|
||||||
|
conf,
|
||||||
|
summary,
|
||||||
|
}
|
||||||
|
|
||||||
|
reporter.outputInterceptor.StartInterceptingOutput()
|
||||||
|
if reporter.debugMode {
|
||||||
|
reporter.nestedReporter.SpecSuiteWillBegin(conf, summary)
|
||||||
|
reporter.debugFile.Sync()
|
||||||
|
}
|
||||||
|
reporter.post("/SpecSuiteWillBegin", data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *ForwardingReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||||
|
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||||
|
reporter.outputInterceptor.StartInterceptingOutput()
|
||||||
|
setupSummary.CapturedOutput = output
|
||||||
|
if reporter.debugMode {
|
||||||
|
reporter.nestedReporter.BeforeSuiteDidRun(setupSummary)
|
||||||
|
reporter.debugFile.Sync()
|
||||||
|
}
|
||||||
|
reporter.post("/BeforeSuiteDidRun", setupSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *ForwardingReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||||
|
if reporter.debugMode {
|
||||||
|
reporter.nestedReporter.SpecWillRun(specSummary)
|
||||||
|
reporter.debugFile.Sync()
|
||||||
|
}
|
||||||
|
reporter.post("/SpecWillRun", specSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *ForwardingReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||||
|
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||||
|
reporter.outputInterceptor.StartInterceptingOutput()
|
||||||
|
specSummary.CapturedOutput = output
|
||||||
|
if reporter.debugMode {
|
||||||
|
reporter.nestedReporter.SpecDidComplete(specSummary)
|
||||||
|
reporter.debugFile.Sync()
|
||||||
|
}
|
||||||
|
reporter.post("/SpecDidComplete", specSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *ForwardingReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||||
|
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||||
|
reporter.outputInterceptor.StartInterceptingOutput()
|
||||||
|
setupSummary.CapturedOutput = output
|
||||||
|
if reporter.debugMode {
|
||||||
|
reporter.nestedReporter.AfterSuiteDidRun(setupSummary)
|
||||||
|
reporter.debugFile.Sync()
|
||||||
|
}
|
||||||
|
reporter.post("/AfterSuiteDidRun", setupSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *ForwardingReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||||
|
reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||||
|
if reporter.debugMode {
|
||||||
|
reporter.nestedReporter.SpecSuiteDidEnd(summary)
|
||||||
|
reporter.debugFile.Sync()
|
||||||
|
}
|
||||||
|
reporter.post("/SpecSuiteDidEnd", summary)
|
||||||
|
}
|
||||||
13
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go
generated
vendored
Normal file
13
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go
generated
vendored
Normal file
|
|
@ -0,0 +1,13 @@
|
||||||
|
package remote
|
||||||
|
|
||||||
|
import "os"
|
||||||
|
|
||||||
|
/*
|
||||||
|
The OutputInterceptor is used by the ForwardingReporter to
|
||||||
|
intercept and capture all stdin and stderr output during a test run.
|
||||||
|
*/
|
||||||
|
type OutputInterceptor interface {
|
||||||
|
StartInterceptingOutput() error
|
||||||
|
StopInterceptingAndReturnOutput() (string, error)
|
||||||
|
StreamTo(*os.File)
|
||||||
|
}
|
||||||
83
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
generated
vendored
Normal file
83
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
generated
vendored
Normal file
|
|
@ -0,0 +1,83 @@
|
||||||
|
// +build freebsd openbsd netbsd dragonfly darwin linux solaris
|
||||||
|
|
||||||
|
package remote
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/hpcloud/tail"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewOutputInterceptor() OutputInterceptor {
|
||||||
|
return &outputInterceptor{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type outputInterceptor struct {
|
||||||
|
redirectFile *os.File
|
||||||
|
streamTarget *os.File
|
||||||
|
intercepting bool
|
||||||
|
tailer *tail.Tail
|
||||||
|
doneTailing chan bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (interceptor *outputInterceptor) StartInterceptingOutput() error {
|
||||||
|
if interceptor.intercepting {
|
||||||
|
return errors.New("Already intercepting output!")
|
||||||
|
}
|
||||||
|
interceptor.intercepting = true
|
||||||
|
|
||||||
|
var err error
|
||||||
|
|
||||||
|
interceptor.redirectFile, err = ioutil.TempFile("", "ginkgo-output")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call a function in ./syscall_dup_*.go
|
||||||
|
// If building for everything other than linux_arm64,
|
||||||
|
// use a "normal" syscall.Dup2(oldfd, newfd) call. If building for linux_arm64 (which doesn't have syscall.Dup2)
|
||||||
|
// call syscall.Dup3(oldfd, newfd, 0). They are nearly identical, see: http://linux.die.net/man/2/dup3
|
||||||
|
syscallDup(int(interceptor.redirectFile.Fd()), 1)
|
||||||
|
syscallDup(int(interceptor.redirectFile.Fd()), 2)
|
||||||
|
|
||||||
|
if interceptor.streamTarget != nil {
|
||||||
|
interceptor.tailer, _ = tail.TailFile(interceptor.redirectFile.Name(), tail.Config{Follow: true})
|
||||||
|
interceptor.doneTailing = make(chan bool)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for line := range interceptor.tailer.Lines {
|
||||||
|
interceptor.streamTarget.Write([]byte(line.Text + "\n"))
|
||||||
|
}
|
||||||
|
close(interceptor.doneTailing)
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
|
||||||
|
if !interceptor.intercepting {
|
||||||
|
return "", errors.New("Not intercepting output!")
|
||||||
|
}
|
||||||
|
|
||||||
|
interceptor.redirectFile.Close()
|
||||||
|
output, err := ioutil.ReadFile(interceptor.redirectFile.Name())
|
||||||
|
os.Remove(interceptor.redirectFile.Name())
|
||||||
|
|
||||||
|
interceptor.intercepting = false
|
||||||
|
|
||||||
|
if interceptor.streamTarget != nil {
|
||||||
|
interceptor.tailer.Stop()
|
||||||
|
interceptor.tailer.Cleanup()
|
||||||
|
<-interceptor.doneTailing
|
||||||
|
interceptor.streamTarget.Sync()
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(output), err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (interceptor *outputInterceptor) StreamTo(out *os.File) {
|
||||||
|
interceptor.streamTarget = out
|
||||||
|
}
|
||||||
36
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
generated
vendored
Normal file
36
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
generated
vendored
Normal file
|
|
@ -0,0 +1,36 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package remote
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewOutputInterceptor() OutputInterceptor {
|
||||||
|
return &outputInterceptor{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type outputInterceptor struct {
|
||||||
|
intercepting bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (interceptor *outputInterceptor) StartInterceptingOutput() error {
|
||||||
|
if interceptor.intercepting {
|
||||||
|
return errors.New("Already intercepting output!")
|
||||||
|
}
|
||||||
|
interceptor.intercepting = true
|
||||||
|
|
||||||
|
// not working on windows...
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
|
||||||
|
// not working on windows...
|
||||||
|
interceptor.intercepting = false
|
||||||
|
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (interceptor *outputInterceptor) StreamTo(*os.File) {}
|
||||||
224
vendor/github.com/onsi/ginkgo/internal/remote/server.go
generated
vendored
Normal file
224
vendor/github.com/onsi/ginkgo/internal/remote/server.go
generated
vendored
Normal file
|
|
@ -0,0 +1,224 @@
|
||||||
|
/*
|
||||||
|
|
||||||
|
The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
|
||||||
|
This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
|
||||||
|
|
||||||
|
*/
|
||||||
|
|
||||||
|
package remote
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/internal/spec_iterator"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/config"
|
||||||
|
"github.com/onsi/ginkgo/reporters"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
Server spins up on an automatically selected port and listens for communication from the forwarding reporter.
|
||||||
|
It then forwards that communication to attached reporters.
|
||||||
|
*/
|
||||||
|
type Server struct {
|
||||||
|
listener net.Listener
|
||||||
|
reporters []reporters.Reporter
|
||||||
|
alives []func() bool
|
||||||
|
lock *sync.Mutex
|
||||||
|
beforeSuiteData types.RemoteBeforeSuiteData
|
||||||
|
parallelTotal int
|
||||||
|
counter int
|
||||||
|
}
|
||||||
|
|
||||||
|
//Create a new server, automatically selecting a port
|
||||||
|
func NewServer(parallelTotal int) (*Server, error) {
|
||||||
|
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &Server{
|
||||||
|
listener: listener,
|
||||||
|
lock: &sync.Mutex{},
|
||||||
|
alives: make([]func() bool, parallelTotal),
|
||||||
|
beforeSuiteData: types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending},
|
||||||
|
parallelTotal: parallelTotal,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//Start the server. You don't need to `go s.Start()`, just `s.Start()`
|
||||||
|
func (server *Server) Start() {
|
||||||
|
httpServer := &http.Server{}
|
||||||
|
mux := http.NewServeMux()
|
||||||
|
httpServer.Handler = mux
|
||||||
|
|
||||||
|
//streaming endpoints
|
||||||
|
mux.HandleFunc("/SpecSuiteWillBegin", server.specSuiteWillBegin)
|
||||||
|
mux.HandleFunc("/BeforeSuiteDidRun", server.beforeSuiteDidRun)
|
||||||
|
mux.HandleFunc("/AfterSuiteDidRun", server.afterSuiteDidRun)
|
||||||
|
mux.HandleFunc("/SpecWillRun", server.specWillRun)
|
||||||
|
mux.HandleFunc("/SpecDidComplete", server.specDidComplete)
|
||||||
|
mux.HandleFunc("/SpecSuiteDidEnd", server.specSuiteDidEnd)
|
||||||
|
|
||||||
|
//synchronization endpoints
|
||||||
|
mux.HandleFunc("/BeforeSuiteState", server.handleBeforeSuiteState)
|
||||||
|
mux.HandleFunc("/RemoteAfterSuiteData", server.handleRemoteAfterSuiteData)
|
||||||
|
mux.HandleFunc("/counter", server.handleCounter)
|
||||||
|
mux.HandleFunc("/has-counter", server.handleHasCounter) //for backward compatibility
|
||||||
|
|
||||||
|
go httpServer.Serve(server.listener)
|
||||||
|
}
|
||||||
|
|
||||||
|
//Stop the server
|
||||||
|
func (server *Server) Close() {
|
||||||
|
server.listener.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
//The address the server can be reached it. Pass this into the `ForwardingReporter`.
|
||||||
|
func (server *Server) Address() string {
|
||||||
|
return "http://" + server.listener.Addr().String()
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// Streaming Endpoints
|
||||||
|
//
|
||||||
|
|
||||||
|
//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters`
|
||||||
|
func (server *Server) readAll(request *http.Request) []byte {
|
||||||
|
defer request.Body.Close()
|
||||||
|
body, _ := ioutil.ReadAll(request.Body)
|
||||||
|
return body
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *Server) RegisterReporters(reporters ...reporters.Reporter) {
|
||||||
|
server.reporters = reporters
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *Server) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
body := server.readAll(request)
|
||||||
|
|
||||||
|
var data struct {
|
||||||
|
Config config.GinkgoConfigType `json:"config"`
|
||||||
|
Summary *types.SuiteSummary `json:"suite-summary"`
|
||||||
|
}
|
||||||
|
|
||||||
|
json.Unmarshal(body, &data)
|
||||||
|
|
||||||
|
for _, reporter := range server.reporters {
|
||||||
|
reporter.SpecSuiteWillBegin(data.Config, data.Summary)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *Server) beforeSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
body := server.readAll(request)
|
||||||
|
var setupSummary *types.SetupSummary
|
||||||
|
json.Unmarshal(body, &setupSummary)
|
||||||
|
|
||||||
|
for _, reporter := range server.reporters {
|
||||||
|
reporter.BeforeSuiteDidRun(setupSummary)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *Server) afterSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
body := server.readAll(request)
|
||||||
|
var setupSummary *types.SetupSummary
|
||||||
|
json.Unmarshal(body, &setupSummary)
|
||||||
|
|
||||||
|
for _, reporter := range server.reporters {
|
||||||
|
reporter.AfterSuiteDidRun(setupSummary)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *Server) specWillRun(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
body := server.readAll(request)
|
||||||
|
var specSummary *types.SpecSummary
|
||||||
|
json.Unmarshal(body, &specSummary)
|
||||||
|
|
||||||
|
for _, reporter := range server.reporters {
|
||||||
|
reporter.SpecWillRun(specSummary)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *Server) specDidComplete(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
body := server.readAll(request)
|
||||||
|
var specSummary *types.SpecSummary
|
||||||
|
json.Unmarshal(body, &specSummary)
|
||||||
|
|
||||||
|
for _, reporter := range server.reporters {
|
||||||
|
reporter.SpecDidComplete(specSummary)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *Server) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
body := server.readAll(request)
|
||||||
|
var suiteSummary *types.SuiteSummary
|
||||||
|
json.Unmarshal(body, &suiteSummary)
|
||||||
|
|
||||||
|
for _, reporter := range server.reporters {
|
||||||
|
reporter.SpecSuiteDidEnd(suiteSummary)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// Synchronization Endpoints
|
||||||
|
//
|
||||||
|
|
||||||
|
func (server *Server) RegisterAlive(node int, alive func() bool) {
|
||||||
|
server.lock.Lock()
|
||||||
|
defer server.lock.Unlock()
|
||||||
|
server.alives[node-1] = alive
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *Server) nodeIsAlive(node int) bool {
|
||||||
|
server.lock.Lock()
|
||||||
|
defer server.lock.Unlock()
|
||||||
|
alive := server.alives[node-1]
|
||||||
|
if alive == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return alive()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *Server) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
if request.Method == "POST" {
|
||||||
|
dec := json.NewDecoder(request.Body)
|
||||||
|
dec.Decode(&(server.beforeSuiteData))
|
||||||
|
} else {
|
||||||
|
beforeSuiteData := server.beforeSuiteData
|
||||||
|
if beforeSuiteData.State == types.RemoteBeforeSuiteStatePending && !server.nodeIsAlive(1) {
|
||||||
|
beforeSuiteData.State = types.RemoteBeforeSuiteStateDisappeared
|
||||||
|
}
|
||||||
|
enc := json.NewEncoder(writer)
|
||||||
|
enc.Encode(beforeSuiteData)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *Server) handleRemoteAfterSuiteData(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
afterSuiteData := types.RemoteAfterSuiteData{
|
||||||
|
CanRun: true,
|
||||||
|
}
|
||||||
|
for i := 2; i <= server.parallelTotal; i++ {
|
||||||
|
afterSuiteData.CanRun = afterSuiteData.CanRun && !server.nodeIsAlive(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
enc := json.NewEncoder(writer)
|
||||||
|
enc.Encode(afterSuiteData)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *Server) handleCounter(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
c := spec_iterator.Counter{}
|
||||||
|
server.lock.Lock()
|
||||||
|
c.Index = server.counter
|
||||||
|
server.counter++
|
||||||
|
server.lock.Unlock()
|
||||||
|
|
||||||
|
json.NewEncoder(writer).Encode(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (server *Server) handleHasCounter(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
writer.Write([]byte(""))
|
||||||
|
}
|
||||||
11
vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go
generated
vendored
Normal file
11
vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go
generated
vendored
Normal file
|
|
@ -0,0 +1,11 @@
|
||||||
|
// +build linux,arm64
|
||||||
|
|
||||||
|
package remote
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
// linux_arm64 doesn't have syscall.Dup2 which ginkgo uses, so
|
||||||
|
// use the nearly identical syscall.Dup3 instead
|
||||||
|
func syscallDup(oldfd int, newfd int) (err error) {
|
||||||
|
return syscall.Dup3(oldfd, newfd, 0)
|
||||||
|
}
|
||||||
9
vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go
generated
vendored
Normal file
9
vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go
generated
vendored
Normal file
|
|
@ -0,0 +1,9 @@
|
||||||
|
// +build solaris
|
||||||
|
|
||||||
|
package remote
|
||||||
|
|
||||||
|
import "golang.org/x/sys/unix"
|
||||||
|
|
||||||
|
func syscallDup(oldfd int, newfd int) (err error) {
|
||||||
|
return unix.Dup2(oldfd, newfd)
|
||||||
|
}
|
||||||
11
vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go
generated
vendored
Normal file
11
vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go
generated
vendored
Normal file
|
|
@ -0,0 +1,11 @@
|
||||||
|
// +build !linux !arm64
|
||||||
|
// +build !windows
|
||||||
|
// +build !solaris
|
||||||
|
|
||||||
|
package remote
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
func syscallDup(oldfd int, newfd int) (err error) {
|
||||||
|
return syscall.Dup2(oldfd, newfd)
|
||||||
|
}
|
||||||
247
vendor/github.com/onsi/ginkgo/internal/spec/spec.go
generated
vendored
Normal file
247
vendor/github.com/onsi/ginkgo/internal/spec/spec.go
generated
vendored
Normal file
|
|
@ -0,0 +1,247 @@
|
||||||
|
package spec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/internal/containernode"
|
||||||
|
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Spec struct {
|
||||||
|
subject leafnodes.SubjectNode
|
||||||
|
focused bool
|
||||||
|
announceProgress bool
|
||||||
|
|
||||||
|
containers []*containernode.ContainerNode
|
||||||
|
|
||||||
|
state types.SpecState
|
||||||
|
runTime time.Duration
|
||||||
|
startTime time.Time
|
||||||
|
failure types.SpecFailure
|
||||||
|
previousFailures bool
|
||||||
|
|
||||||
|
stateMutex *sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(subject leafnodes.SubjectNode, containers []*containernode.ContainerNode, announceProgress bool) *Spec {
|
||||||
|
spec := &Spec{
|
||||||
|
subject: subject,
|
||||||
|
containers: containers,
|
||||||
|
focused: subject.Flag() == types.FlagTypeFocused,
|
||||||
|
announceProgress: announceProgress,
|
||||||
|
stateMutex: &sync.Mutex{},
|
||||||
|
}
|
||||||
|
|
||||||
|
spec.processFlag(subject.Flag())
|
||||||
|
for i := len(containers) - 1; i >= 0; i-- {
|
||||||
|
spec.processFlag(containers[i].Flag())
|
||||||
|
}
|
||||||
|
|
||||||
|
return spec
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *Spec) processFlag(flag types.FlagType) {
|
||||||
|
if flag == types.FlagTypeFocused {
|
||||||
|
spec.focused = true
|
||||||
|
} else if flag == types.FlagTypePending {
|
||||||
|
spec.setState(types.SpecStatePending)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *Spec) Skip() {
|
||||||
|
spec.setState(types.SpecStateSkipped)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *Spec) Failed() bool {
|
||||||
|
return spec.getState() == types.SpecStateFailed || spec.getState() == types.SpecStatePanicked || spec.getState() == types.SpecStateTimedOut
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *Spec) Passed() bool {
|
||||||
|
return spec.getState() == types.SpecStatePassed
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *Spec) Flaked() bool {
|
||||||
|
return spec.getState() == types.SpecStatePassed && spec.previousFailures
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *Spec) Pending() bool {
|
||||||
|
return spec.getState() == types.SpecStatePending
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *Spec) Skipped() bool {
|
||||||
|
return spec.getState() == types.SpecStateSkipped
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *Spec) Focused() bool {
|
||||||
|
return spec.focused
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *Spec) IsMeasurement() bool {
|
||||||
|
return spec.subject.Type() == types.SpecComponentTypeMeasure
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *Spec) Summary(suiteID string) *types.SpecSummary {
|
||||||
|
componentTexts := make([]string, len(spec.containers)+1)
|
||||||
|
componentCodeLocations := make([]types.CodeLocation, len(spec.containers)+1)
|
||||||
|
|
||||||
|
for i, container := range spec.containers {
|
||||||
|
componentTexts[i] = container.Text()
|
||||||
|
componentCodeLocations[i] = container.CodeLocation()
|
||||||
|
}
|
||||||
|
|
||||||
|
componentTexts[len(spec.containers)] = spec.subject.Text()
|
||||||
|
componentCodeLocations[len(spec.containers)] = spec.subject.CodeLocation()
|
||||||
|
|
||||||
|
runTime := spec.runTime
|
||||||
|
if runTime == 0 && !spec.startTime.IsZero() {
|
||||||
|
runTime = time.Since(spec.startTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &types.SpecSummary{
|
||||||
|
IsMeasurement: spec.IsMeasurement(),
|
||||||
|
NumberOfSamples: spec.subject.Samples(),
|
||||||
|
ComponentTexts: componentTexts,
|
||||||
|
ComponentCodeLocations: componentCodeLocations,
|
||||||
|
State: spec.getState(),
|
||||||
|
RunTime: runTime,
|
||||||
|
Failure: spec.failure,
|
||||||
|
Measurements: spec.measurementsReport(),
|
||||||
|
SuiteID: suiteID,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *Spec) ConcatenatedString() string {
|
||||||
|
s := ""
|
||||||
|
for _, container := range spec.containers {
|
||||||
|
s += container.Text() + " "
|
||||||
|
}
|
||||||
|
|
||||||
|
return s + spec.subject.Text()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *Spec) Run(writer io.Writer) {
|
||||||
|
if spec.getState() == types.SpecStateFailed {
|
||||||
|
spec.previousFailures = true
|
||||||
|
}
|
||||||
|
|
||||||
|
spec.startTime = time.Now()
|
||||||
|
defer func() {
|
||||||
|
spec.runTime = time.Since(spec.startTime)
|
||||||
|
}()
|
||||||
|
|
||||||
|
for sample := 0; sample < spec.subject.Samples(); sample++ {
|
||||||
|
spec.runSample(sample, writer)
|
||||||
|
|
||||||
|
if spec.getState() != types.SpecStatePassed {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *Spec) getState() types.SpecState {
|
||||||
|
spec.stateMutex.Lock()
|
||||||
|
defer spec.stateMutex.Unlock()
|
||||||
|
return spec.state
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *Spec) setState(state types.SpecState) {
|
||||||
|
spec.stateMutex.Lock()
|
||||||
|
defer spec.stateMutex.Unlock()
|
||||||
|
spec.state = state
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *Spec) runSample(sample int, writer io.Writer) {
|
||||||
|
spec.setState(types.SpecStatePassed)
|
||||||
|
spec.failure = types.SpecFailure{}
|
||||||
|
innerMostContainerIndexToUnwind := -1
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
for i := innerMostContainerIndexToUnwind; i >= 0; i-- {
|
||||||
|
container := spec.containers[i]
|
||||||
|
for _, justAfterEach := range container.SetupNodesOfType(types.SpecComponentTypeJustAfterEach) {
|
||||||
|
spec.announceSetupNode(writer, "JustAfterEach", container, justAfterEach)
|
||||||
|
justAfterEachState, justAfterEachFailure := justAfterEach.Run()
|
||||||
|
if justAfterEachState != types.SpecStatePassed && spec.state == types.SpecStatePassed {
|
||||||
|
spec.state = justAfterEachState
|
||||||
|
spec.failure = justAfterEachFailure
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := innerMostContainerIndexToUnwind; i >= 0; i-- {
|
||||||
|
container := spec.containers[i]
|
||||||
|
for _, afterEach := range container.SetupNodesOfType(types.SpecComponentTypeAfterEach) {
|
||||||
|
spec.announceSetupNode(writer, "AfterEach", container, afterEach)
|
||||||
|
afterEachState, afterEachFailure := afterEach.Run()
|
||||||
|
if afterEachState != types.SpecStatePassed && spec.getState() == types.SpecStatePassed {
|
||||||
|
spec.setState(afterEachState)
|
||||||
|
spec.failure = afterEachFailure
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
for i, container := range spec.containers {
|
||||||
|
innerMostContainerIndexToUnwind = i
|
||||||
|
for _, beforeEach := range container.SetupNodesOfType(types.SpecComponentTypeBeforeEach) {
|
||||||
|
spec.announceSetupNode(writer, "BeforeEach", container, beforeEach)
|
||||||
|
s, f := beforeEach.Run()
|
||||||
|
spec.failure = f
|
||||||
|
spec.setState(s)
|
||||||
|
if spec.getState() != types.SpecStatePassed {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, container := range spec.containers {
|
||||||
|
for _, justBeforeEach := range container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach) {
|
||||||
|
spec.announceSetupNode(writer, "JustBeforeEach", container, justBeforeEach)
|
||||||
|
s, f := justBeforeEach.Run()
|
||||||
|
spec.failure = f
|
||||||
|
spec.setState(s)
|
||||||
|
if spec.getState() != types.SpecStatePassed {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
spec.announceSubject(writer, spec.subject)
|
||||||
|
s, f := spec.subject.Run()
|
||||||
|
spec.failure = f
|
||||||
|
spec.setState(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *Spec) announceSetupNode(writer io.Writer, nodeType string, container *containernode.ContainerNode, setupNode leafnodes.BasicNode) {
|
||||||
|
if spec.announceProgress {
|
||||||
|
s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, container.Text(), setupNode.CodeLocation().String())
|
||||||
|
writer.Write([]byte(s))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *Spec) announceSubject(writer io.Writer, subject leafnodes.SubjectNode) {
|
||||||
|
if spec.announceProgress {
|
||||||
|
nodeType := ""
|
||||||
|
switch subject.Type() {
|
||||||
|
case types.SpecComponentTypeIt:
|
||||||
|
nodeType = "It"
|
||||||
|
case types.SpecComponentTypeMeasure:
|
||||||
|
nodeType = "Measure"
|
||||||
|
}
|
||||||
|
s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, subject.Text(), subject.CodeLocation().String())
|
||||||
|
writer.Write([]byte(s))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (spec *Spec) measurementsReport() map[string]*types.SpecMeasurement {
|
||||||
|
if !spec.IsMeasurement() || spec.Failed() {
|
||||||
|
return map[string]*types.SpecMeasurement{}
|
||||||
|
}
|
||||||
|
|
||||||
|
return spec.subject.(*leafnodes.MeasureNode).MeasurementsReport()
|
||||||
|
}
|
||||||
144
vendor/github.com/onsi/ginkgo/internal/spec/specs.go
generated
vendored
Normal file
144
vendor/github.com/onsi/ginkgo/internal/spec/specs.go
generated
vendored
Normal file
|
|
@ -0,0 +1,144 @@
|
||||||
|
package spec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"regexp"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Specs struct {
|
||||||
|
specs []*Spec
|
||||||
|
names []string
|
||||||
|
|
||||||
|
hasProgrammaticFocus bool
|
||||||
|
RegexScansFilePath bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSpecs(specs []*Spec) *Specs {
|
||||||
|
names := make([]string, len(specs))
|
||||||
|
for i, spec := range specs {
|
||||||
|
names[i] = spec.ConcatenatedString()
|
||||||
|
}
|
||||||
|
return &Specs{
|
||||||
|
specs: specs,
|
||||||
|
names: names,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Specs) Specs() []*Spec {
|
||||||
|
return e.specs
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Specs) HasProgrammaticFocus() bool {
|
||||||
|
return e.hasProgrammaticFocus
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Specs) Shuffle(r *rand.Rand) {
|
||||||
|
sort.Sort(e)
|
||||||
|
permutation := r.Perm(len(e.specs))
|
||||||
|
shuffledSpecs := make([]*Spec, len(e.specs))
|
||||||
|
names := make([]string, len(e.specs))
|
||||||
|
for i, j := range permutation {
|
||||||
|
shuffledSpecs[i] = e.specs[j]
|
||||||
|
names[i] = e.names[j]
|
||||||
|
}
|
||||||
|
e.specs = shuffledSpecs
|
||||||
|
e.names = names
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Specs) ApplyFocus(description string, focusString string, skipString string) {
|
||||||
|
if focusString == "" && skipString == "" {
|
||||||
|
e.applyProgrammaticFocus()
|
||||||
|
} else {
|
||||||
|
e.applyRegExpFocusAndSkip(description, focusString, skipString)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Specs) applyProgrammaticFocus() {
|
||||||
|
e.hasProgrammaticFocus = false
|
||||||
|
for _, spec := range e.specs {
|
||||||
|
if spec.Focused() && !spec.Pending() {
|
||||||
|
e.hasProgrammaticFocus = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if e.hasProgrammaticFocus {
|
||||||
|
for _, spec := range e.specs {
|
||||||
|
if !spec.Focused() {
|
||||||
|
spec.Skip()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// toMatch returns a byte[] to be used by regex matchers. When adding new behaviours to the matching function,
|
||||||
|
// this is the place which we append to.
|
||||||
|
func (e *Specs) toMatch(description string, i int) []byte {
|
||||||
|
if i > len(e.names) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if e.RegexScansFilePath {
|
||||||
|
return []byte(
|
||||||
|
description + " " +
|
||||||
|
e.names[i] + " " +
|
||||||
|
e.specs[i].subject.CodeLocation().FileName)
|
||||||
|
} else {
|
||||||
|
return []byte(
|
||||||
|
description + " " +
|
||||||
|
e.names[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Specs) applyRegExpFocusAndSkip(description string, focusString string, skipString string) {
|
||||||
|
var focusFilter *regexp.Regexp
|
||||||
|
if focusString != "" {
|
||||||
|
focusFilter = regexp.MustCompile(focusString)
|
||||||
|
}
|
||||||
|
var skipFilter *regexp.Regexp
|
||||||
|
if skipString != "" {
|
||||||
|
skipFilter = regexp.MustCompile(skipString)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, spec := range e.specs {
|
||||||
|
matchesFocus := true
|
||||||
|
matchesSkip := false
|
||||||
|
|
||||||
|
toMatch := e.toMatch(description, i)
|
||||||
|
|
||||||
|
if focusFilter != nil {
|
||||||
|
matchesFocus = focusFilter.Match(toMatch)
|
||||||
|
}
|
||||||
|
|
||||||
|
if skipFilter != nil {
|
||||||
|
matchesSkip = skipFilter.Match(toMatch)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !matchesFocus || matchesSkip {
|
||||||
|
spec.Skip()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Specs) SkipMeasurements() {
|
||||||
|
for _, spec := range e.specs {
|
||||||
|
if spec.IsMeasurement() {
|
||||||
|
spec.Skip()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//sort.Interface
|
||||||
|
|
||||||
|
func (e *Specs) Len() int {
|
||||||
|
return len(e.specs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Specs) Less(i, j int) bool {
|
||||||
|
return e.names[i] < e.names[j]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Specs) Swap(i, j int) {
|
||||||
|
e.names[i], e.names[j] = e.names[j], e.names[i]
|
||||||
|
e.specs[i], e.specs[j] = e.specs[j], e.specs[i]
|
||||||
|
}
|
||||||
55
vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go
generated
vendored
Normal file
55
vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go
generated
vendored
Normal file
|
|
@ -0,0 +1,55 @@
|
||||||
|
package spec_iterator
|
||||||
|
|
||||||
|
func ParallelizedIndexRange(length int, parallelTotal int, parallelNode int) (startIndex int, count int) {
|
||||||
|
if length == 0 {
|
||||||
|
return 0, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// We have more nodes than tests. Trivial case.
|
||||||
|
if parallelTotal >= length {
|
||||||
|
if parallelNode > length {
|
||||||
|
return 0, 0
|
||||||
|
} else {
|
||||||
|
return parallelNode - 1, 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is the minimum amount of tests that a node will be required to run
|
||||||
|
minTestsPerNode := length / parallelTotal
|
||||||
|
|
||||||
|
// This is the maximum amount of tests that a node will be required to run
|
||||||
|
// The algorithm guarantees that this would be equal to at least the minimum amount
|
||||||
|
// and at most one more
|
||||||
|
maxTestsPerNode := minTestsPerNode
|
||||||
|
if length%parallelTotal != 0 {
|
||||||
|
maxTestsPerNode++
|
||||||
|
}
|
||||||
|
|
||||||
|
// Number of nodes that will have to run the maximum amount of tests per node
|
||||||
|
numMaxLoadNodes := length % parallelTotal
|
||||||
|
|
||||||
|
// Number of nodes that precede the current node and will have to run the maximum amount of tests per node
|
||||||
|
var numPrecedingMaxLoadNodes int
|
||||||
|
if parallelNode > numMaxLoadNodes {
|
||||||
|
numPrecedingMaxLoadNodes = numMaxLoadNodes
|
||||||
|
} else {
|
||||||
|
numPrecedingMaxLoadNodes = parallelNode - 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Number of nodes that precede the current node and will have to run the minimum amount of tests per node
|
||||||
|
var numPrecedingMinLoadNodes int
|
||||||
|
if parallelNode <= numMaxLoadNodes {
|
||||||
|
numPrecedingMinLoadNodes = 0
|
||||||
|
} else {
|
||||||
|
numPrecedingMinLoadNodes = parallelNode - numMaxLoadNodes - 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Evaluate the test start index and number of tests to run
|
||||||
|
startIndex = numPrecedingMaxLoadNodes*maxTestsPerNode + numPrecedingMinLoadNodes*minTestsPerNode
|
||||||
|
if parallelNode > numMaxLoadNodes {
|
||||||
|
count = minTestsPerNode
|
||||||
|
} else {
|
||||||
|
count = maxTestsPerNode
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
59
vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go
generated
vendored
Normal file
59
vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go
generated
vendored
Normal file
|
|
@ -0,0 +1,59 @@
|
||||||
|
package spec_iterator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/internal/spec"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ParallelIterator struct {
|
||||||
|
specs []*spec.Spec
|
||||||
|
host string
|
||||||
|
client *http.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewParallelIterator(specs []*spec.Spec, host string) *ParallelIterator {
|
||||||
|
return &ParallelIterator{
|
||||||
|
specs: specs,
|
||||||
|
host: host,
|
||||||
|
client: &http.Client{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ParallelIterator) Next() (*spec.Spec, error) {
|
||||||
|
resp, err := s.client.Get(s.host + "/counter")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return nil, fmt.Errorf("unexpected status code %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
var counter Counter
|
||||||
|
err = json.NewDecoder(resp.Body).Decode(&counter)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if counter.Index >= len(s.specs) {
|
||||||
|
return nil, ErrClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.specs[counter.Index], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ParallelIterator) NumberOfSpecsPriorToIteration() int {
|
||||||
|
return len(s.specs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
|
||||||
|
return -1, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
|
||||||
|
return -1, false
|
||||||
|
}
|
||||||
45
vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go
generated
vendored
Normal file
45
vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go
generated
vendored
Normal file
|
|
@ -0,0 +1,45 @@
|
||||||
|
package spec_iterator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/onsi/ginkgo/internal/spec"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SerialIterator struct {
|
||||||
|
specs []*spec.Spec
|
||||||
|
index int
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSerialIterator(specs []*spec.Spec) *SerialIterator {
|
||||||
|
return &SerialIterator{
|
||||||
|
specs: specs,
|
||||||
|
index: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SerialIterator) Next() (*spec.Spec, error) {
|
||||||
|
if s.index >= len(s.specs) {
|
||||||
|
return nil, ErrClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
spec := s.specs[s.index]
|
||||||
|
s.index += 1
|
||||||
|
return spec, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SerialIterator) NumberOfSpecsPriorToIteration() int {
|
||||||
|
return len(s.specs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SerialIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
|
||||||
|
return len(s.specs), true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SerialIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
|
||||||
|
count := 0
|
||||||
|
for _, s := range s.specs {
|
||||||
|
if !s.Skipped() && !s.Pending() {
|
||||||
|
count += 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return count, true
|
||||||
|
}
|
||||||
47
vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go
generated
vendored
Normal file
47
vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go
generated
vendored
Normal file
|
|
@ -0,0 +1,47 @@
|
||||||
|
package spec_iterator
|
||||||
|
|
||||||
|
import "github.com/onsi/ginkgo/internal/spec"
|
||||||
|
|
||||||
|
type ShardedParallelIterator struct {
|
||||||
|
specs []*spec.Spec
|
||||||
|
index int
|
||||||
|
maxIndex int
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewShardedParallelIterator(specs []*spec.Spec, total int, node int) *ShardedParallelIterator {
|
||||||
|
startIndex, count := ParallelizedIndexRange(len(specs), total, node)
|
||||||
|
|
||||||
|
return &ShardedParallelIterator{
|
||||||
|
specs: specs,
|
||||||
|
index: startIndex,
|
||||||
|
maxIndex: startIndex + count,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ShardedParallelIterator) Next() (*spec.Spec, error) {
|
||||||
|
if s.index >= s.maxIndex {
|
||||||
|
return nil, ErrClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
spec := s.specs[s.index]
|
||||||
|
s.index += 1
|
||||||
|
return spec, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ShardedParallelIterator) NumberOfSpecsPriorToIteration() int {
|
||||||
|
return len(s.specs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ShardedParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
|
||||||
|
return s.maxIndex - s.index, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ShardedParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
|
||||||
|
count := 0
|
||||||
|
for i := s.index; i < s.maxIndex; i += 1 {
|
||||||
|
if !s.specs[i].Skipped() && !s.specs[i].Pending() {
|
||||||
|
count += 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return count, true
|
||||||
|
}
|
||||||
20
vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go
generated
vendored
Normal file
20
vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go
generated
vendored
Normal file
|
|
@ -0,0 +1,20 @@
|
||||||
|
package spec_iterator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/internal/spec"
|
||||||
|
)
|
||||||
|
|
||||||
|
var ErrClosed = errors.New("no more specs to run")
|
||||||
|
|
||||||
|
type SpecIterator interface {
|
||||||
|
Next() (*spec.Spec, error)
|
||||||
|
NumberOfSpecsPriorToIteration() int
|
||||||
|
NumberOfSpecsToProcessIfKnown() (int, bool)
|
||||||
|
NumberOfSpecsThatWillBeRunIfKnown() (int, bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Counter struct {
|
||||||
|
Index int `json:"index"`
|
||||||
|
}
|
||||||
15
vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go
generated
vendored
Normal file
15
vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go
generated
vendored
Normal file
|
|
@ -0,0 +1,15 @@
|
||||||
|
package specrunner
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
func randomID() string {
|
||||||
|
b := make([]byte, 8)
|
||||||
|
_, err := rand.Read(b)
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%x-%x-%x-%x", b[0:2], b[2:4], b[4:6], b[6:8])
|
||||||
|
}
|
||||||
411
vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go
generated
vendored
Normal file
411
vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go
generated
vendored
Normal file
|
|
@ -0,0 +1,411 @@
|
||||||
|
package specrunner
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/internal/spec_iterator"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/config"
|
||||||
|
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||||
|
"github.com/onsi/ginkgo/internal/spec"
|
||||||
|
Writer "github.com/onsi/ginkgo/internal/writer"
|
||||||
|
"github.com/onsi/ginkgo/reporters"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SpecRunner struct {
|
||||||
|
description string
|
||||||
|
beforeSuiteNode leafnodes.SuiteNode
|
||||||
|
iterator spec_iterator.SpecIterator
|
||||||
|
afterSuiteNode leafnodes.SuiteNode
|
||||||
|
reporters []reporters.Reporter
|
||||||
|
startTime time.Time
|
||||||
|
suiteID string
|
||||||
|
runningSpec *spec.Spec
|
||||||
|
writer Writer.WriterInterface
|
||||||
|
config config.GinkgoConfigType
|
||||||
|
interrupted bool
|
||||||
|
processedSpecs []*spec.Spec
|
||||||
|
lock *sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(description string, beforeSuiteNode leafnodes.SuiteNode, iterator spec_iterator.SpecIterator, afterSuiteNode leafnodes.SuiteNode, reporters []reporters.Reporter, writer Writer.WriterInterface, config config.GinkgoConfigType) *SpecRunner {
|
||||||
|
return &SpecRunner{
|
||||||
|
description: description,
|
||||||
|
beforeSuiteNode: beforeSuiteNode,
|
||||||
|
iterator: iterator,
|
||||||
|
afterSuiteNode: afterSuiteNode,
|
||||||
|
reporters: reporters,
|
||||||
|
writer: writer,
|
||||||
|
config: config,
|
||||||
|
suiteID: randomID(),
|
||||||
|
lock: &sync.Mutex{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) Run() bool {
|
||||||
|
if runner.config.DryRun {
|
||||||
|
runner.performDryRun()
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
runner.reportSuiteWillBegin()
|
||||||
|
signalRegistered := make(chan struct{})
|
||||||
|
go runner.registerForInterrupts(signalRegistered)
|
||||||
|
<-signalRegistered
|
||||||
|
|
||||||
|
suitePassed := runner.runBeforeSuite()
|
||||||
|
|
||||||
|
if suitePassed {
|
||||||
|
suitePassed = runner.runSpecs()
|
||||||
|
}
|
||||||
|
|
||||||
|
runner.blockForeverIfInterrupted()
|
||||||
|
|
||||||
|
suitePassed = runner.runAfterSuite() && suitePassed
|
||||||
|
|
||||||
|
runner.reportSuiteDidEnd(suitePassed)
|
||||||
|
|
||||||
|
return suitePassed
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) performDryRun() {
|
||||||
|
runner.reportSuiteWillBegin()
|
||||||
|
|
||||||
|
if runner.beforeSuiteNode != nil {
|
||||||
|
summary := runner.beforeSuiteNode.Summary()
|
||||||
|
summary.State = types.SpecStatePassed
|
||||||
|
runner.reportBeforeSuite(summary)
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
spec, err := runner.iterator.Next()
|
||||||
|
if err == spec_iterator.ErrClosed {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("failed to iterate over tests:\n" + err.Error())
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
runner.processedSpecs = append(runner.processedSpecs, spec)
|
||||||
|
|
||||||
|
summary := spec.Summary(runner.suiteID)
|
||||||
|
runner.reportSpecWillRun(summary)
|
||||||
|
if summary.State == types.SpecStateInvalid {
|
||||||
|
summary.State = types.SpecStatePassed
|
||||||
|
}
|
||||||
|
runner.reportSpecDidComplete(summary, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
if runner.afterSuiteNode != nil {
|
||||||
|
summary := runner.afterSuiteNode.Summary()
|
||||||
|
summary.State = types.SpecStatePassed
|
||||||
|
runner.reportAfterSuite(summary)
|
||||||
|
}
|
||||||
|
|
||||||
|
runner.reportSuiteDidEnd(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) runBeforeSuite() bool {
|
||||||
|
if runner.beforeSuiteNode == nil || runner.wasInterrupted() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
runner.writer.Truncate()
|
||||||
|
conf := runner.config
|
||||||
|
passed := runner.beforeSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost)
|
||||||
|
if !passed {
|
||||||
|
runner.writer.DumpOut()
|
||||||
|
}
|
||||||
|
runner.reportBeforeSuite(runner.beforeSuiteNode.Summary())
|
||||||
|
return passed
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) runAfterSuite() bool {
|
||||||
|
if runner.afterSuiteNode == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
runner.writer.Truncate()
|
||||||
|
conf := runner.config
|
||||||
|
passed := runner.afterSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost)
|
||||||
|
if !passed {
|
||||||
|
runner.writer.DumpOut()
|
||||||
|
}
|
||||||
|
runner.reportAfterSuite(runner.afterSuiteNode.Summary())
|
||||||
|
return passed
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) runSpecs() bool {
|
||||||
|
suiteFailed := false
|
||||||
|
skipRemainingSpecs := false
|
||||||
|
for {
|
||||||
|
spec, err := runner.iterator.Next()
|
||||||
|
if err == spec_iterator.ErrClosed {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("failed to iterate over tests:\n" + err.Error())
|
||||||
|
suiteFailed = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
runner.processedSpecs = append(runner.processedSpecs, spec)
|
||||||
|
|
||||||
|
if runner.wasInterrupted() {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if skipRemainingSpecs {
|
||||||
|
spec.Skip()
|
||||||
|
}
|
||||||
|
|
||||||
|
if !spec.Skipped() && !spec.Pending() {
|
||||||
|
if passed := runner.runSpec(spec); !passed {
|
||||||
|
suiteFailed = true
|
||||||
|
}
|
||||||
|
} else if spec.Pending() && runner.config.FailOnPending {
|
||||||
|
runner.reportSpecWillRun(spec.Summary(runner.suiteID))
|
||||||
|
suiteFailed = true
|
||||||
|
runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
|
||||||
|
} else {
|
||||||
|
runner.reportSpecWillRun(spec.Summary(runner.suiteID))
|
||||||
|
runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
|
||||||
|
}
|
||||||
|
|
||||||
|
if spec.Failed() && runner.config.FailFast {
|
||||||
|
skipRemainingSpecs = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return !suiteFailed
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) runSpec(spec *spec.Spec) (passed bool) {
|
||||||
|
maxAttempts := 1
|
||||||
|
if runner.config.FlakeAttempts > 0 {
|
||||||
|
// uninitialized configs count as 1
|
||||||
|
maxAttempts = runner.config.FlakeAttempts
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < maxAttempts; i++ {
|
||||||
|
runner.reportSpecWillRun(spec.Summary(runner.suiteID))
|
||||||
|
runner.runningSpec = spec
|
||||||
|
spec.Run(runner.writer)
|
||||||
|
runner.runningSpec = nil
|
||||||
|
runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
|
||||||
|
if !spec.Failed() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) CurrentSpecSummary() (*types.SpecSummary, bool) {
|
||||||
|
if runner.runningSpec == nil {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
return runner.runningSpec.Summary(runner.suiteID), true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) registerForInterrupts(signalRegistered chan struct{}) {
|
||||||
|
c := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||||
|
close(signalRegistered)
|
||||||
|
|
||||||
|
<-c
|
||||||
|
signal.Stop(c)
|
||||||
|
runner.markInterrupted()
|
||||||
|
go runner.registerForHardInterrupts()
|
||||||
|
runner.writer.DumpOutWithHeader(`
|
||||||
|
Received interrupt. Emitting contents of GinkgoWriter...
|
||||||
|
---------------------------------------------------------
|
||||||
|
`)
|
||||||
|
if runner.afterSuiteNode != nil {
|
||||||
|
fmt.Fprint(os.Stderr, `
|
||||||
|
---------------------------------------------------------
|
||||||
|
Received interrupt. Running AfterSuite...
|
||||||
|
^C again to terminate immediately
|
||||||
|
`)
|
||||||
|
runner.runAfterSuite()
|
||||||
|
}
|
||||||
|
runner.reportSuiteDidEnd(false)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) registerForHardInterrupts() {
|
||||||
|
c := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||||
|
|
||||||
|
<-c
|
||||||
|
fmt.Fprintln(os.Stderr, "\nReceived second interrupt. Shutting down.")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) blockForeverIfInterrupted() {
|
||||||
|
runner.lock.Lock()
|
||||||
|
interrupted := runner.interrupted
|
||||||
|
runner.lock.Unlock()
|
||||||
|
|
||||||
|
if interrupted {
|
||||||
|
select {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) markInterrupted() {
|
||||||
|
runner.lock.Lock()
|
||||||
|
defer runner.lock.Unlock()
|
||||||
|
runner.interrupted = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) wasInterrupted() bool {
|
||||||
|
runner.lock.Lock()
|
||||||
|
defer runner.lock.Unlock()
|
||||||
|
return runner.interrupted
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) reportSuiteWillBegin() {
|
||||||
|
runner.startTime = time.Now()
|
||||||
|
summary := runner.suiteWillBeginSummary()
|
||||||
|
for _, reporter := range runner.reporters {
|
||||||
|
reporter.SpecSuiteWillBegin(runner.config, summary)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) reportBeforeSuite(summary *types.SetupSummary) {
|
||||||
|
for _, reporter := range runner.reporters {
|
||||||
|
reporter.BeforeSuiteDidRun(summary)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) reportAfterSuite(summary *types.SetupSummary) {
|
||||||
|
for _, reporter := range runner.reporters {
|
||||||
|
reporter.AfterSuiteDidRun(summary)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) reportSpecWillRun(summary *types.SpecSummary) {
|
||||||
|
runner.writer.Truncate()
|
||||||
|
|
||||||
|
for _, reporter := range runner.reporters {
|
||||||
|
reporter.SpecWillRun(summary)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) reportSpecDidComplete(summary *types.SpecSummary, failed bool) {
|
||||||
|
if len(summary.CapturedOutput) == 0 {
|
||||||
|
summary.CapturedOutput = string(runner.writer.Bytes())
|
||||||
|
}
|
||||||
|
for i := len(runner.reporters) - 1; i >= 1; i-- {
|
||||||
|
runner.reporters[i].SpecDidComplete(summary)
|
||||||
|
}
|
||||||
|
|
||||||
|
if failed {
|
||||||
|
runner.writer.DumpOut()
|
||||||
|
}
|
||||||
|
|
||||||
|
runner.reporters[0].SpecDidComplete(summary)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) reportSuiteDidEnd(success bool) {
|
||||||
|
summary := runner.suiteDidEndSummary(success)
|
||||||
|
summary.RunTime = time.Since(runner.startTime)
|
||||||
|
for _, reporter := range runner.reporters {
|
||||||
|
reporter.SpecSuiteDidEnd(summary)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) countSpecsThatRanSatisfying(filter func(ex *spec.Spec) bool) (count int) {
|
||||||
|
count = 0
|
||||||
|
|
||||||
|
for _, spec := range runner.processedSpecs {
|
||||||
|
if filter(spec) {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) suiteDidEndSummary(success bool) *types.SuiteSummary {
|
||||||
|
numberOfSpecsThatWillBeRun := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
|
||||||
|
return !ex.Skipped() && !ex.Pending()
|
||||||
|
})
|
||||||
|
|
||||||
|
numberOfPendingSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
|
||||||
|
return ex.Pending()
|
||||||
|
})
|
||||||
|
|
||||||
|
numberOfSkippedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
|
||||||
|
return ex.Skipped()
|
||||||
|
})
|
||||||
|
|
||||||
|
numberOfPassedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
|
||||||
|
return ex.Passed()
|
||||||
|
})
|
||||||
|
|
||||||
|
numberOfFlakedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
|
||||||
|
return ex.Flaked()
|
||||||
|
})
|
||||||
|
|
||||||
|
numberOfFailedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
|
||||||
|
return ex.Failed()
|
||||||
|
})
|
||||||
|
|
||||||
|
if runner.beforeSuiteNode != nil && !runner.beforeSuiteNode.Passed() && !runner.config.DryRun {
|
||||||
|
var known bool
|
||||||
|
numberOfSpecsThatWillBeRun, known = runner.iterator.NumberOfSpecsThatWillBeRunIfKnown()
|
||||||
|
if !known {
|
||||||
|
numberOfSpecsThatWillBeRun = runner.iterator.NumberOfSpecsPriorToIteration()
|
||||||
|
}
|
||||||
|
numberOfFailedSpecs = numberOfSpecsThatWillBeRun
|
||||||
|
}
|
||||||
|
|
||||||
|
return &types.SuiteSummary{
|
||||||
|
SuiteDescription: runner.description,
|
||||||
|
SuiteSucceeded: success,
|
||||||
|
SuiteID: runner.suiteID,
|
||||||
|
|
||||||
|
NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(),
|
||||||
|
NumberOfTotalSpecs: len(runner.processedSpecs),
|
||||||
|
NumberOfSpecsThatWillBeRun: numberOfSpecsThatWillBeRun,
|
||||||
|
NumberOfPendingSpecs: numberOfPendingSpecs,
|
||||||
|
NumberOfSkippedSpecs: numberOfSkippedSpecs,
|
||||||
|
NumberOfPassedSpecs: numberOfPassedSpecs,
|
||||||
|
NumberOfFailedSpecs: numberOfFailedSpecs,
|
||||||
|
NumberOfFlakedSpecs: numberOfFlakedSpecs,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *SpecRunner) suiteWillBeginSummary() *types.SuiteSummary {
|
||||||
|
numTotal, known := runner.iterator.NumberOfSpecsToProcessIfKnown()
|
||||||
|
if !known {
|
||||||
|
numTotal = -1
|
||||||
|
}
|
||||||
|
|
||||||
|
numToRun, known := runner.iterator.NumberOfSpecsThatWillBeRunIfKnown()
|
||||||
|
if !known {
|
||||||
|
numToRun = -1
|
||||||
|
}
|
||||||
|
|
||||||
|
return &types.SuiteSummary{
|
||||||
|
SuiteDescription: runner.description,
|
||||||
|
SuiteID: runner.suiteID,
|
||||||
|
|
||||||
|
NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(),
|
||||||
|
NumberOfTotalSpecs: numTotal,
|
||||||
|
NumberOfSpecsThatWillBeRun: numToRun,
|
||||||
|
NumberOfPendingSpecs: -1,
|
||||||
|
NumberOfSkippedSpecs: -1,
|
||||||
|
NumberOfPassedSpecs: -1,
|
||||||
|
NumberOfFailedSpecs: -1,
|
||||||
|
NumberOfFlakedSpecs: -1,
|
||||||
|
}
|
||||||
|
}
|
||||||
190
vendor/github.com/onsi/ginkgo/internal/suite/suite.go
generated
vendored
Normal file
190
vendor/github.com/onsi/ginkgo/internal/suite/suite.go
generated
vendored
Normal file
|
|
@ -0,0 +1,190 @@
|
||||||
|
package suite
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/internal/spec_iterator"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/config"
|
||||||
|
"github.com/onsi/ginkgo/internal/containernode"
|
||||||
|
"github.com/onsi/ginkgo/internal/failer"
|
||||||
|
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||||
|
"github.com/onsi/ginkgo/internal/spec"
|
||||||
|
"github.com/onsi/ginkgo/internal/specrunner"
|
||||||
|
"github.com/onsi/ginkgo/internal/writer"
|
||||||
|
"github.com/onsi/ginkgo/reporters"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ginkgoTestingT interface {
|
||||||
|
Fail()
|
||||||
|
}
|
||||||
|
|
||||||
|
type Suite struct {
|
||||||
|
topLevelContainer *containernode.ContainerNode
|
||||||
|
currentContainer *containernode.ContainerNode
|
||||||
|
containerIndex int
|
||||||
|
beforeSuiteNode leafnodes.SuiteNode
|
||||||
|
afterSuiteNode leafnodes.SuiteNode
|
||||||
|
runner *specrunner.SpecRunner
|
||||||
|
failer *failer.Failer
|
||||||
|
running bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(failer *failer.Failer) *Suite {
|
||||||
|
topLevelContainer := containernode.New("[Top Level]", types.FlagTypeNone, types.CodeLocation{})
|
||||||
|
|
||||||
|
return &Suite{
|
||||||
|
topLevelContainer: topLevelContainer,
|
||||||
|
currentContainer: topLevelContainer,
|
||||||
|
failer: failer,
|
||||||
|
containerIndex: 1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) Run(t ginkgoTestingT, description string, reporters []reporters.Reporter, writer writer.WriterInterface, config config.GinkgoConfigType) (bool, bool) {
|
||||||
|
if config.ParallelTotal < 1 {
|
||||||
|
panic("ginkgo.parallel.total must be >= 1")
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.ParallelNode > config.ParallelTotal || config.ParallelNode < 1 {
|
||||||
|
panic("ginkgo.parallel.node is one-indexed and must be <= ginkgo.parallel.total")
|
||||||
|
}
|
||||||
|
|
||||||
|
r := rand.New(rand.NewSource(config.RandomSeed))
|
||||||
|
suite.topLevelContainer.Shuffle(r)
|
||||||
|
iterator, hasProgrammaticFocus := suite.generateSpecsIterator(description, config)
|
||||||
|
suite.runner = specrunner.New(description, suite.beforeSuiteNode, iterator, suite.afterSuiteNode, reporters, writer, config)
|
||||||
|
|
||||||
|
suite.running = true
|
||||||
|
success := suite.runner.Run()
|
||||||
|
if !success {
|
||||||
|
t.Fail()
|
||||||
|
}
|
||||||
|
return success, hasProgrammaticFocus
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) generateSpecsIterator(description string, config config.GinkgoConfigType) (spec_iterator.SpecIterator, bool) {
|
||||||
|
specsSlice := []*spec.Spec{}
|
||||||
|
suite.topLevelContainer.BackPropagateProgrammaticFocus()
|
||||||
|
for _, collatedNodes := range suite.topLevelContainer.Collate() {
|
||||||
|
specsSlice = append(specsSlice, spec.New(collatedNodes.Subject, collatedNodes.Containers, config.EmitSpecProgress))
|
||||||
|
}
|
||||||
|
|
||||||
|
specs := spec.NewSpecs(specsSlice)
|
||||||
|
specs.RegexScansFilePath = config.RegexScansFilePath
|
||||||
|
|
||||||
|
if config.RandomizeAllSpecs {
|
||||||
|
specs.Shuffle(rand.New(rand.NewSource(config.RandomSeed)))
|
||||||
|
}
|
||||||
|
|
||||||
|
specs.ApplyFocus(description, config.FocusString, config.SkipString)
|
||||||
|
|
||||||
|
if config.SkipMeasurements {
|
||||||
|
specs.SkipMeasurements()
|
||||||
|
}
|
||||||
|
|
||||||
|
var iterator spec_iterator.SpecIterator
|
||||||
|
|
||||||
|
if config.ParallelTotal > 1 {
|
||||||
|
iterator = spec_iterator.NewParallelIterator(specs.Specs(), config.SyncHost)
|
||||||
|
resp, err := http.Get(config.SyncHost + "/has-counter")
|
||||||
|
if err != nil || resp.StatusCode != http.StatusOK {
|
||||||
|
iterator = spec_iterator.NewShardedParallelIterator(specs.Specs(), config.ParallelTotal, config.ParallelNode)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
iterator = spec_iterator.NewSerialIterator(specs.Specs())
|
||||||
|
}
|
||||||
|
|
||||||
|
return iterator, specs.HasProgrammaticFocus()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) CurrentRunningSpecSummary() (*types.SpecSummary, bool) {
|
||||||
|
return suite.runner.CurrentSpecSummary()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) SetBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||||
|
if suite.beforeSuiteNode != nil {
|
||||||
|
panic("You may only call BeforeSuite once!")
|
||||||
|
}
|
||||||
|
suite.beforeSuiteNode = leafnodes.NewBeforeSuiteNode(body, codeLocation, timeout, suite.failer)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) SetAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||||
|
if suite.afterSuiteNode != nil {
|
||||||
|
panic("You may only call AfterSuite once!")
|
||||||
|
}
|
||||||
|
suite.afterSuiteNode = leafnodes.NewAfterSuiteNode(body, codeLocation, timeout, suite.failer)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) SetSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||||
|
if suite.beforeSuiteNode != nil {
|
||||||
|
panic("You may only call BeforeSuite once!")
|
||||||
|
}
|
||||||
|
suite.beforeSuiteNode = leafnodes.NewSynchronizedBeforeSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) SetSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||||
|
if suite.afterSuiteNode != nil {
|
||||||
|
panic("You may only call AfterSuite once!")
|
||||||
|
}
|
||||||
|
suite.afterSuiteNode = leafnodes.NewSynchronizedAfterSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) PushContainerNode(text string, body func(), flag types.FlagType, codeLocation types.CodeLocation) {
|
||||||
|
container := containernode.New(text, flag, codeLocation)
|
||||||
|
suite.currentContainer.PushContainerNode(container)
|
||||||
|
|
||||||
|
previousContainer := suite.currentContainer
|
||||||
|
suite.currentContainer = container
|
||||||
|
suite.containerIndex++
|
||||||
|
|
||||||
|
body()
|
||||||
|
|
||||||
|
suite.containerIndex--
|
||||||
|
suite.currentContainer = previousContainer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) PushItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||||
|
if suite.running {
|
||||||
|
suite.failer.Fail("You may only call It from within a Describe, Context or When", codeLocation)
|
||||||
|
}
|
||||||
|
suite.currentContainer.PushSubjectNode(leafnodes.NewItNode(text, body, flag, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) PushMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int) {
|
||||||
|
if suite.running {
|
||||||
|
suite.failer.Fail("You may only call Measure from within a Describe, Context or When", codeLocation)
|
||||||
|
}
|
||||||
|
suite.currentContainer.PushSubjectNode(leafnodes.NewMeasureNode(text, body, flag, codeLocation, samples, suite.failer, suite.containerIndex))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) PushBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||||
|
if suite.running {
|
||||||
|
suite.failer.Fail("You may only call BeforeEach from within a Describe, Context or When", codeLocation)
|
||||||
|
}
|
||||||
|
suite.currentContainer.PushSetupNode(leafnodes.NewBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) PushJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||||
|
if suite.running {
|
||||||
|
suite.failer.Fail("You may only call JustBeforeEach from within a Describe, Context or When", codeLocation)
|
||||||
|
}
|
||||||
|
suite.currentContainer.PushSetupNode(leafnodes.NewJustBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) PushJustAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||||
|
if suite.running {
|
||||||
|
suite.failer.Fail("You may only call JustAfterEach from within a Describe or Context", codeLocation)
|
||||||
|
}
|
||||||
|
suite.currentContainer.PushSetupNode(leafnodes.NewJustAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (suite *Suite) PushAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||||
|
if suite.running {
|
||||||
|
suite.failer.Fail("You may only call AfterEach from within a Describe, Context or When", codeLocation)
|
||||||
|
}
|
||||||
|
suite.currentContainer.PushSetupNode(leafnodes.NewAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||||
|
}
|
||||||
76
vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go
generated
vendored
Normal file
76
vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go
generated
vendored
Normal file
|
|
@ -0,0 +1,76 @@
|
||||||
|
package testingtproxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
type failFunc func(message string, callerSkip ...int)
|
||||||
|
|
||||||
|
func New(writer io.Writer, fail failFunc, offset int) *ginkgoTestingTProxy {
|
||||||
|
return &ginkgoTestingTProxy{
|
||||||
|
fail: fail,
|
||||||
|
offset: offset,
|
||||||
|
writer: writer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type ginkgoTestingTProxy struct {
|
||||||
|
fail failFunc
|
||||||
|
offset int
|
||||||
|
writer io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Error(args ...interface{}) {
|
||||||
|
t.fail(fmt.Sprintln(args...), t.offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Errorf(format string, args ...interface{}) {
|
||||||
|
t.fail(fmt.Sprintf(format, args...), t.offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Fail() {
|
||||||
|
t.fail("failed", t.offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) FailNow() {
|
||||||
|
t.fail("failed", t.offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) {
|
||||||
|
t.fail(fmt.Sprintln(args...), t.offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) {
|
||||||
|
t.fail(fmt.Sprintf(format, args...), t.offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Log(args ...interface{}) {
|
||||||
|
fmt.Fprintln(t.writer, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) {
|
||||||
|
t.Log(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Failed() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Parallel() {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Skip(args ...interface{}) {
|
||||||
|
fmt.Println(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) {
|
||||||
|
t.Skip(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) SkipNow() {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *ginkgoTestingTProxy) Skipped() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
36
vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go
generated
vendored
Normal file
36
vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go
generated
vendored
Normal file
|
|
@ -0,0 +1,36 @@
|
||||||
|
package writer
|
||||||
|
|
||||||
|
type FakeGinkgoWriter struct {
|
||||||
|
EventStream []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewFake() *FakeGinkgoWriter {
|
||||||
|
return &FakeGinkgoWriter{
|
||||||
|
EventStream: []string{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (writer *FakeGinkgoWriter) AddEvent(event string) {
|
||||||
|
writer.EventStream = append(writer.EventStream, event)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (writer *FakeGinkgoWriter) Truncate() {
|
||||||
|
writer.EventStream = append(writer.EventStream, "TRUNCATE")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (writer *FakeGinkgoWriter) DumpOut() {
|
||||||
|
writer.EventStream = append(writer.EventStream, "DUMP")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (writer *FakeGinkgoWriter) DumpOutWithHeader(header string) {
|
||||||
|
writer.EventStream = append(writer.EventStream, "DUMP_WITH_HEADER: "+header)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (writer *FakeGinkgoWriter) Bytes() []byte {
|
||||||
|
writer.EventStream = append(writer.EventStream, "BYTES")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (writer *FakeGinkgoWriter) Write(data []byte) (n int, err error) {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
89
vendor/github.com/onsi/ginkgo/internal/writer/writer.go
generated
vendored
Normal file
89
vendor/github.com/onsi/ginkgo/internal/writer/writer.go
generated
vendored
Normal file
|
|
@ -0,0 +1,89 @@
|
||||||
|
package writer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type WriterInterface interface {
|
||||||
|
io.Writer
|
||||||
|
|
||||||
|
Truncate()
|
||||||
|
DumpOut()
|
||||||
|
DumpOutWithHeader(header string)
|
||||||
|
Bytes() []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type Writer struct {
|
||||||
|
buffer *bytes.Buffer
|
||||||
|
outWriter io.Writer
|
||||||
|
lock *sync.Mutex
|
||||||
|
stream bool
|
||||||
|
redirector io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(outWriter io.Writer) *Writer {
|
||||||
|
return &Writer{
|
||||||
|
buffer: &bytes.Buffer{},
|
||||||
|
lock: &sync.Mutex{},
|
||||||
|
outWriter: outWriter,
|
||||||
|
stream: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) AndRedirectTo(writer io.Writer) {
|
||||||
|
w.redirector = writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) SetStream(stream bool) {
|
||||||
|
w.lock.Lock()
|
||||||
|
defer w.lock.Unlock()
|
||||||
|
w.stream = stream
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) Write(b []byte) (n int, err error) {
|
||||||
|
w.lock.Lock()
|
||||||
|
defer w.lock.Unlock()
|
||||||
|
|
||||||
|
n, err = w.buffer.Write(b)
|
||||||
|
if w.redirector != nil {
|
||||||
|
w.redirector.Write(b)
|
||||||
|
}
|
||||||
|
if w.stream {
|
||||||
|
return w.outWriter.Write(b)
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) Truncate() {
|
||||||
|
w.lock.Lock()
|
||||||
|
defer w.lock.Unlock()
|
||||||
|
w.buffer.Reset()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) DumpOut() {
|
||||||
|
w.lock.Lock()
|
||||||
|
defer w.lock.Unlock()
|
||||||
|
if !w.stream {
|
||||||
|
w.buffer.WriteTo(w.outWriter)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) Bytes() []byte {
|
||||||
|
w.lock.Lock()
|
||||||
|
defer w.lock.Unlock()
|
||||||
|
b := w.buffer.Bytes()
|
||||||
|
copied := make([]byte, len(b))
|
||||||
|
copy(copied, b)
|
||||||
|
return copied
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Writer) DumpOutWithHeader(header string) {
|
||||||
|
w.lock.Lock()
|
||||||
|
defer w.lock.Unlock()
|
||||||
|
if !w.stream && w.buffer.Len() > 0 {
|
||||||
|
w.outWriter.Write([]byte(header))
|
||||||
|
w.buffer.WriteTo(w.outWriter)
|
||||||
|
}
|
||||||
|
}
|
||||||
87
vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
generated
vendored
Normal file
87
vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
generated
vendored
Normal file
|
|
@ -0,0 +1,87 @@
|
||||||
|
/*
|
||||||
|
Ginkgo's Default Reporter
|
||||||
|
|
||||||
|
A number of command line flags are available to tweak Ginkgo's default output.
|
||||||
|
|
||||||
|
These are documented [here](http://onsi.github.io/ginkgo/#running_tests)
|
||||||
|
*/
|
||||||
|
package reporters
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/onsi/ginkgo/config"
|
||||||
|
"github.com/onsi/ginkgo/reporters/stenographer"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type DefaultReporter struct {
|
||||||
|
config config.DefaultReporterConfigType
|
||||||
|
stenographer stenographer.Stenographer
|
||||||
|
specSummaries []*types.SpecSummary
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDefaultReporter(config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *DefaultReporter {
|
||||||
|
return &DefaultReporter{
|
||||||
|
config: config,
|
||||||
|
stenographer: stenographer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *DefaultReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||||
|
reporter.stenographer.AnnounceSuite(summary.SuiteDescription, config.RandomSeed, config.RandomizeAllSpecs, reporter.config.Succinct)
|
||||||
|
if config.ParallelTotal > 1 {
|
||||||
|
reporter.stenographer.AnnounceParallelRun(config.ParallelNode, config.ParallelTotal, reporter.config.Succinct)
|
||||||
|
} else {
|
||||||
|
reporter.stenographer.AnnounceNumberOfSpecs(summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, reporter.config.Succinct)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *DefaultReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||||
|
if setupSummary.State != types.SpecStatePassed {
|
||||||
|
reporter.stenographer.AnnounceBeforeSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *DefaultReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||||
|
if setupSummary.State != types.SpecStatePassed {
|
||||||
|
reporter.stenographer.AnnounceAfterSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *DefaultReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||||
|
if reporter.config.Verbose && !reporter.config.Succinct && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped {
|
||||||
|
reporter.stenographer.AnnounceSpecWillRun(specSummary)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *DefaultReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||||
|
switch specSummary.State {
|
||||||
|
case types.SpecStatePassed:
|
||||||
|
if specSummary.IsMeasurement {
|
||||||
|
reporter.stenographer.AnnounceSuccesfulMeasurement(specSummary, reporter.config.Succinct)
|
||||||
|
} else if specSummary.RunTime.Seconds() >= reporter.config.SlowSpecThreshold {
|
||||||
|
reporter.stenographer.AnnounceSuccesfulSlowSpec(specSummary, reporter.config.Succinct)
|
||||||
|
} else {
|
||||||
|
reporter.stenographer.AnnounceSuccesfulSpec(specSummary)
|
||||||
|
if reporter.config.ReportPassed {
|
||||||
|
reporter.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case types.SpecStatePending:
|
||||||
|
reporter.stenographer.AnnouncePendingSpec(specSummary, reporter.config.NoisyPendings && !reporter.config.Succinct)
|
||||||
|
case types.SpecStateSkipped:
|
||||||
|
reporter.stenographer.AnnounceSkippedSpec(specSummary, reporter.config.Succinct || !reporter.config.NoisySkippings, reporter.config.FullTrace)
|
||||||
|
case types.SpecStateTimedOut:
|
||||||
|
reporter.stenographer.AnnounceSpecTimedOut(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||||
|
case types.SpecStatePanicked:
|
||||||
|
reporter.stenographer.AnnounceSpecPanicked(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||||
|
case types.SpecStateFailed:
|
||||||
|
reporter.stenographer.AnnounceSpecFailed(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||||
|
}
|
||||||
|
|
||||||
|
reporter.specSummaries = append(reporter.specSummaries, specSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *DefaultReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||||
|
reporter.stenographer.SummarizeFailures(reporter.specSummaries)
|
||||||
|
reporter.stenographer.AnnounceSpecRunCompletion(summary, reporter.config.Succinct)
|
||||||
|
}
|
||||||
59
vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go
generated
vendored
Normal file
59
vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go
generated
vendored
Normal file
|
|
@ -0,0 +1,59 @@
|
||||||
|
package reporters
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/onsi/ginkgo/config"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
//FakeReporter is useful for testing purposes
|
||||||
|
type FakeReporter struct {
|
||||||
|
Config config.GinkgoConfigType
|
||||||
|
|
||||||
|
BeginSummary *types.SuiteSummary
|
||||||
|
BeforeSuiteSummary *types.SetupSummary
|
||||||
|
SpecWillRunSummaries []*types.SpecSummary
|
||||||
|
SpecSummaries []*types.SpecSummary
|
||||||
|
AfterSuiteSummary *types.SetupSummary
|
||||||
|
EndSummary *types.SuiteSummary
|
||||||
|
|
||||||
|
SpecWillRunStub func(specSummary *types.SpecSummary)
|
||||||
|
SpecDidCompleteStub func(specSummary *types.SpecSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewFakeReporter() *FakeReporter {
|
||||||
|
return &FakeReporter{
|
||||||
|
SpecWillRunSummaries: make([]*types.SpecSummary, 0),
|
||||||
|
SpecSummaries: make([]*types.SpecSummary, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fakeR *FakeReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||||
|
fakeR.Config = config
|
||||||
|
fakeR.BeginSummary = summary
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fakeR *FakeReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||||
|
fakeR.BeforeSuiteSummary = setupSummary
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fakeR *FakeReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||||
|
if fakeR.SpecWillRunStub != nil {
|
||||||
|
fakeR.SpecWillRunStub(specSummary)
|
||||||
|
}
|
||||||
|
fakeR.SpecWillRunSummaries = append(fakeR.SpecWillRunSummaries, specSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fakeR *FakeReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||||
|
if fakeR.SpecDidCompleteStub != nil {
|
||||||
|
fakeR.SpecDidCompleteStub(specSummary)
|
||||||
|
}
|
||||||
|
fakeR.SpecSummaries = append(fakeR.SpecSummaries, specSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fakeR *FakeReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||||
|
fakeR.AfterSuiteSummary = setupSummary
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fakeR *FakeReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||||
|
fakeR.EndSummary = summary
|
||||||
|
}
|
||||||
182
vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go
generated
vendored
Normal file
182
vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go
generated
vendored
Normal file
|
|
@ -0,0 +1,182 @@
|
||||||
|
/*
|
||||||
|
|
||||||
|
JUnit XML Reporter for Ginkgo
|
||||||
|
|
||||||
|
For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output
|
||||||
|
|
||||||
|
*/
|
||||||
|
|
||||||
|
package reporters
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/xml"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/config"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type JUnitTestSuite struct {
|
||||||
|
XMLName xml.Name `xml:"testsuite"`
|
||||||
|
TestCases []JUnitTestCase `xml:"testcase"`
|
||||||
|
Name string `xml:"name,attr"`
|
||||||
|
Tests int `xml:"tests,attr"`
|
||||||
|
Failures int `xml:"failures,attr"`
|
||||||
|
Errors int `xml:"errors,attr"`
|
||||||
|
Time float64 `xml:"time,attr"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type JUnitTestCase struct {
|
||||||
|
Name string `xml:"name,attr"`
|
||||||
|
ClassName string `xml:"classname,attr"`
|
||||||
|
PassedMessage *JUnitPassedMessage `xml:"passed,omitempty"`
|
||||||
|
FailureMessage *JUnitFailureMessage `xml:"failure,omitempty"`
|
||||||
|
Skipped *JUnitSkipped `xml:"skipped,omitempty"`
|
||||||
|
Time float64 `xml:"time,attr"`
|
||||||
|
SystemOut string `xml:"system-out,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type JUnitPassedMessage struct {
|
||||||
|
Message string `xml:",chardata"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type JUnitFailureMessage struct {
|
||||||
|
Type string `xml:"type,attr"`
|
||||||
|
Message string `xml:",chardata"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type JUnitSkipped struct {
|
||||||
|
XMLName xml.Name `xml:"skipped"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type JUnitReporter struct {
|
||||||
|
suite JUnitTestSuite
|
||||||
|
filename string
|
||||||
|
testSuiteName string
|
||||||
|
ReporterConfig config.DefaultReporterConfigType
|
||||||
|
}
|
||||||
|
|
||||||
|
//NewJUnitReporter creates a new JUnit XML reporter. The XML will be stored in the passed in filename.
|
||||||
|
func NewJUnitReporter(filename string) *JUnitReporter {
|
||||||
|
return &JUnitReporter{
|
||||||
|
filename: filename,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *JUnitReporter) SpecSuiteWillBegin(ginkgoConfig config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||||
|
reporter.suite = JUnitTestSuite{
|
||||||
|
Name: summary.SuiteDescription,
|
||||||
|
TestCases: []JUnitTestCase{},
|
||||||
|
}
|
||||||
|
reporter.testSuiteName = summary.SuiteDescription
|
||||||
|
reporter.ReporterConfig = config.DefaultReporterConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *JUnitReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *JUnitReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||||
|
reporter.handleSetupSummary("BeforeSuite", setupSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *JUnitReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||||
|
reporter.handleSetupSummary("AfterSuite", setupSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
func failureMessage(failure types.SpecFailure) string {
|
||||||
|
return fmt.Sprintf("%s\n%s\n%s", failure.ComponentCodeLocation.String(), failure.Message, failure.Location.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *JUnitReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) {
|
||||||
|
if setupSummary.State != types.SpecStatePassed {
|
||||||
|
testCase := JUnitTestCase{
|
||||||
|
Name: name,
|
||||||
|
ClassName: reporter.testSuiteName,
|
||||||
|
}
|
||||||
|
|
||||||
|
testCase.FailureMessage = &JUnitFailureMessage{
|
||||||
|
Type: reporter.failureTypeForState(setupSummary.State),
|
||||||
|
Message: failureMessage(setupSummary.Failure),
|
||||||
|
}
|
||||||
|
testCase.SystemOut = setupSummary.CapturedOutput
|
||||||
|
testCase.Time = setupSummary.RunTime.Seconds()
|
||||||
|
reporter.suite.TestCases = append(reporter.suite.TestCases, testCase)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||||
|
testCase := JUnitTestCase{
|
||||||
|
Name: strings.Join(specSummary.ComponentTexts[1:], " "),
|
||||||
|
ClassName: reporter.testSuiteName,
|
||||||
|
}
|
||||||
|
if reporter.ReporterConfig.ReportPassed && specSummary.State == types.SpecStatePassed {
|
||||||
|
testCase.PassedMessage = &JUnitPassedMessage{
|
||||||
|
Message: specSummary.CapturedOutput,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked {
|
||||||
|
testCase.FailureMessage = &JUnitFailureMessage{
|
||||||
|
Type: reporter.failureTypeForState(specSummary.State),
|
||||||
|
Message: failureMessage(specSummary.Failure),
|
||||||
|
}
|
||||||
|
if specSummary.State == types.SpecStatePanicked {
|
||||||
|
testCase.FailureMessage.Message += fmt.Sprintf("\n\nPanic: %s\n\nFull stack:\n%s",
|
||||||
|
specSummary.Failure.ForwardedPanic,
|
||||||
|
specSummary.Failure.Location.FullStackTrace)
|
||||||
|
}
|
||||||
|
testCase.SystemOut = specSummary.CapturedOutput
|
||||||
|
}
|
||||||
|
if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending {
|
||||||
|
testCase.Skipped = &JUnitSkipped{}
|
||||||
|
}
|
||||||
|
testCase.Time = specSummary.RunTime.Seconds()
|
||||||
|
reporter.suite.TestCases = append(reporter.suite.TestCases, testCase)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *JUnitReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||||
|
reporter.suite.Tests = summary.NumberOfSpecsThatWillBeRun
|
||||||
|
reporter.suite.Time = math.Trunc(summary.RunTime.Seconds()*1000) / 1000
|
||||||
|
reporter.suite.Failures = summary.NumberOfFailedSpecs
|
||||||
|
reporter.suite.Errors = 0
|
||||||
|
if reporter.ReporterConfig.ReportFile != "" {
|
||||||
|
reporter.filename = reporter.ReporterConfig.ReportFile
|
||||||
|
fmt.Printf("\nJUnit path was configured: %s\n", reporter.filename)
|
||||||
|
}
|
||||||
|
filePath, _ := filepath.Abs(reporter.filename)
|
||||||
|
dirPath := filepath.Dir(filePath)
|
||||||
|
err := os.MkdirAll(dirPath, os.ModePerm)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("\nFailed to create JUnit directory: %s\n\t%s", filePath, err.Error())
|
||||||
|
}
|
||||||
|
file, err := os.Create(filePath)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Failed to create JUnit report file: %s\n\t%s", filePath, err.Error())
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
file.WriteString(xml.Header)
|
||||||
|
encoder := xml.NewEncoder(file)
|
||||||
|
encoder.Indent(" ", " ")
|
||||||
|
err = encoder.Encode(reporter.suite)
|
||||||
|
if err == nil {
|
||||||
|
fmt.Fprintf(os.Stdout, "\nJUnit report was created: %s\n", filePath)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(os.Stderr,"\nFailed to generate JUnit report data:\n\t%s", err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reporter *JUnitReporter) failureTypeForState(state types.SpecState) string {
|
||||||
|
switch state {
|
||||||
|
case types.SpecStateFailed:
|
||||||
|
return "Failure"
|
||||||
|
case types.SpecStateTimedOut:
|
||||||
|
return "Timeout"
|
||||||
|
case types.SpecStatePanicked:
|
||||||
|
return "Panic"
|
||||||
|
default:
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
}
|
||||||
15
vendor/github.com/onsi/ginkgo/reporters/reporter.go
generated
vendored
Normal file
15
vendor/github.com/onsi/ginkgo/reporters/reporter.go
generated
vendored
Normal file
|
|
@ -0,0 +1,15 @@
|
||||||
|
package reporters
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/onsi/ginkgo/config"
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Reporter interface {
|
||||||
|
SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary)
|
||||||
|
BeforeSuiteDidRun(setupSummary *types.SetupSummary)
|
||||||
|
SpecWillRun(specSummary *types.SpecSummary)
|
||||||
|
SpecDidComplete(specSummary *types.SpecSummary)
|
||||||
|
AfterSuiteDidRun(setupSummary *types.SetupSummary)
|
||||||
|
SpecSuiteDidEnd(summary *types.SuiteSummary)
|
||||||
|
}
|
||||||
64
vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go
generated
vendored
Normal file
64
vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go
generated
vendored
Normal file
|
|
@ -0,0 +1,64 @@
|
||||||
|
package stenographer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s *consoleStenographer) colorize(colorCode string, format string, args ...interface{}) string {
|
||||||
|
var out string
|
||||||
|
|
||||||
|
if len(args) > 0 {
|
||||||
|
out = fmt.Sprintf(format, args...)
|
||||||
|
} else {
|
||||||
|
out = format
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.color {
|
||||||
|
return fmt.Sprintf("%s%s%s", colorCode, out, defaultStyle)
|
||||||
|
} else {
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) printBanner(text string, bannerCharacter string) {
|
||||||
|
fmt.Fprintln(s.w, text)
|
||||||
|
fmt.Fprintln(s.w, strings.Repeat(bannerCharacter, len(text)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) printNewLine() {
|
||||||
|
fmt.Fprintln(s.w, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) printDelimiter() {
|
||||||
|
fmt.Fprintln(s.w, s.colorize(grayColor, "%s", strings.Repeat("-", 30)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) print(indentation int, format string, args ...interface{}) {
|
||||||
|
fmt.Fprint(s.w, s.indent(indentation, format, args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) println(indentation int, format string, args ...interface{}) {
|
||||||
|
fmt.Fprintln(s.w, s.indent(indentation, format, args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) indent(indentation int, format string, args ...interface{}) string {
|
||||||
|
var text string
|
||||||
|
|
||||||
|
if len(args) > 0 {
|
||||||
|
text = fmt.Sprintf(format, args...)
|
||||||
|
} else {
|
||||||
|
text = format
|
||||||
|
}
|
||||||
|
|
||||||
|
stringArray := strings.Split(text, "\n")
|
||||||
|
padding := ""
|
||||||
|
if indentation >= 0 {
|
||||||
|
padding = strings.Repeat(" ", indentation)
|
||||||
|
}
|
||||||
|
for i, s := range stringArray {
|
||||||
|
stringArray[i] = fmt.Sprintf("%s%s", padding, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(stringArray, "\n")
|
||||||
|
}
|
||||||
142
vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go
generated
vendored
Normal file
142
vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go
generated
vendored
Normal file
|
|
@ -0,0 +1,142 @@
|
||||||
|
package stenographer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewFakeStenographerCall(method string, args ...interface{}) FakeStenographerCall {
|
||||||
|
return FakeStenographerCall{
|
||||||
|
Method: method,
|
||||||
|
Args: args,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type FakeStenographer struct {
|
||||||
|
calls []FakeStenographerCall
|
||||||
|
lock *sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
type FakeStenographerCall struct {
|
||||||
|
Method string
|
||||||
|
Args []interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewFakeStenographer() *FakeStenographer {
|
||||||
|
stenographer := &FakeStenographer{
|
||||||
|
lock: &sync.Mutex{},
|
||||||
|
}
|
||||||
|
stenographer.Reset()
|
||||||
|
return stenographer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) Calls() []FakeStenographerCall {
|
||||||
|
stenographer.lock.Lock()
|
||||||
|
defer stenographer.lock.Unlock()
|
||||||
|
|
||||||
|
return stenographer.calls
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) Reset() {
|
||||||
|
stenographer.lock.Lock()
|
||||||
|
defer stenographer.lock.Unlock()
|
||||||
|
|
||||||
|
stenographer.calls = make([]FakeStenographerCall, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) CallsTo(method string) []FakeStenographerCall {
|
||||||
|
stenographer.lock.Lock()
|
||||||
|
defer stenographer.lock.Unlock()
|
||||||
|
|
||||||
|
results := make([]FakeStenographerCall, 0)
|
||||||
|
for _, call := range stenographer.calls {
|
||||||
|
if call.Method == method {
|
||||||
|
results = append(results, call)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return results
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) registerCall(method string, args ...interface{}) {
|
||||||
|
stenographer.lock.Lock()
|
||||||
|
defer stenographer.lock.Unlock()
|
||||||
|
|
||||||
|
stenographer.calls = append(stenographer.calls, NewFakeStenographerCall(method, args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) {
|
||||||
|
stenographer.registerCall("AnnounceSuite", description, randomSeed, randomizingAll, succinct)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) {
|
||||||
|
stenographer.registerCall("AnnounceAggregatedParallelRun", nodes, succinct)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) {
|
||||||
|
stenographer.registerCall("AnnounceParallelRun", node, nodes, succinct)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) {
|
||||||
|
stenographer.registerCall("AnnounceNumberOfSpecs", specsToRun, total, succinct)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) {
|
||||||
|
stenographer.registerCall("AnnounceTotalNumberOfSpecs", total, succinct)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) {
|
||||||
|
stenographer.registerCall("AnnounceSpecRunCompletion", summary, succinct)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) {
|
||||||
|
stenographer.registerCall("AnnounceSpecWillRun", spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
||||||
|
stenographer.registerCall("AnnounceBeforeSuiteFailure", summary, succinct, fullTrace)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
||||||
|
stenographer.registerCall("AnnounceAfterSuiteFailure", summary, succinct, fullTrace)
|
||||||
|
}
|
||||||
|
func (stenographer *FakeStenographer) AnnounceCapturedOutput(output string) {
|
||||||
|
stenographer.registerCall("AnnounceCapturedOutput", output)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) {
|
||||||
|
stenographer.registerCall("AnnounceSuccesfulSpec", spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) {
|
||||||
|
stenographer.registerCall("AnnounceSuccesfulSlowSpec", spec, succinct)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) {
|
||||||
|
stenographer.registerCall("AnnounceSuccesfulMeasurement", spec, succinct)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) {
|
||||||
|
stenographer.registerCall("AnnouncePendingSpec", spec, noisy)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||||
|
stenographer.registerCall("AnnounceSkippedSpec", spec, succinct, fullTrace)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||||
|
stenographer.registerCall("AnnounceSpecTimedOut", spec, succinct, fullTrace)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||||
|
stenographer.registerCall("AnnounceSpecPanicked", spec, succinct, fullTrace)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||||
|
stenographer.registerCall("AnnounceSpecFailed", spec, succinct, fullTrace)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (stenographer *FakeStenographer) SummarizeFailures(summaries []*types.SpecSummary) {
|
||||||
|
stenographer.registerCall("SummarizeFailures", summaries)
|
||||||
|
}
|
||||||
572
vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
generated
vendored
Normal file
572
vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
generated
vendored
Normal file
|
|
@ -0,0 +1,572 @@
|
||||||
|
/*
|
||||||
|
The stenographer is used by Ginkgo's reporters to generate output.
|
||||||
|
|
||||||
|
Move along, nothing to see here.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package stenographer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
const defaultStyle = "\x1b[0m"
|
||||||
|
const boldStyle = "\x1b[1m"
|
||||||
|
const redColor = "\x1b[91m"
|
||||||
|
const greenColor = "\x1b[32m"
|
||||||
|
const yellowColor = "\x1b[33m"
|
||||||
|
const cyanColor = "\x1b[36m"
|
||||||
|
const grayColor = "\x1b[90m"
|
||||||
|
const lightGrayColor = "\x1b[37m"
|
||||||
|
|
||||||
|
type cursorStateType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
cursorStateTop cursorStateType = iota
|
||||||
|
cursorStateStreaming
|
||||||
|
cursorStateMidBlock
|
||||||
|
cursorStateEndBlock
|
||||||
|
)
|
||||||
|
|
||||||
|
type Stenographer interface {
|
||||||
|
AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool)
|
||||||
|
AnnounceAggregatedParallelRun(nodes int, succinct bool)
|
||||||
|
AnnounceParallelRun(node int, nodes int, succinct bool)
|
||||||
|
AnnounceTotalNumberOfSpecs(total int, succinct bool)
|
||||||
|
AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool)
|
||||||
|
AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool)
|
||||||
|
|
||||||
|
AnnounceSpecWillRun(spec *types.SpecSummary)
|
||||||
|
AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool)
|
||||||
|
AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool)
|
||||||
|
|
||||||
|
AnnounceCapturedOutput(output string)
|
||||||
|
|
||||||
|
AnnounceSuccesfulSpec(spec *types.SpecSummary)
|
||||||
|
AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool)
|
||||||
|
AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool)
|
||||||
|
|
||||||
|
AnnouncePendingSpec(spec *types.SpecSummary, noisy bool)
|
||||||
|
AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool)
|
||||||
|
|
||||||
|
AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool)
|
||||||
|
AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool)
|
||||||
|
AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool)
|
||||||
|
|
||||||
|
SummarizeFailures(summaries []*types.SpecSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(color bool, enableFlakes bool, writer io.Writer) Stenographer {
|
||||||
|
denoter := "•"
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
denoter = "+"
|
||||||
|
}
|
||||||
|
return &consoleStenographer{
|
||||||
|
color: color,
|
||||||
|
denoter: denoter,
|
||||||
|
cursorState: cursorStateTop,
|
||||||
|
enableFlakes: enableFlakes,
|
||||||
|
w: writer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type consoleStenographer struct {
|
||||||
|
color bool
|
||||||
|
denoter string
|
||||||
|
cursorState cursorStateType
|
||||||
|
enableFlakes bool
|
||||||
|
w io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
var alternatingColors = []string{defaultStyle, grayColor}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) {
|
||||||
|
if succinct {
|
||||||
|
s.print(0, "[%d] %s ", randomSeed, s.colorize(boldStyle, description))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.printBanner(fmt.Sprintf("Running Suite: %s", description), "=")
|
||||||
|
s.print(0, "Random Seed: %s", s.colorize(boldStyle, "%d", randomSeed))
|
||||||
|
if randomizingAll {
|
||||||
|
s.print(0, " - Will randomize all specs")
|
||||||
|
}
|
||||||
|
s.printNewLine()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) {
|
||||||
|
if succinct {
|
||||||
|
s.print(0, "- node #%d ", node)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.println(0,
|
||||||
|
"Parallel test node %s/%s.",
|
||||||
|
s.colorize(boldStyle, "%d", node),
|
||||||
|
s.colorize(boldStyle, "%d", nodes),
|
||||||
|
)
|
||||||
|
s.printNewLine()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) {
|
||||||
|
if succinct {
|
||||||
|
s.print(0, "- %d nodes ", nodes)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.println(0,
|
||||||
|
"Running in parallel across %s nodes",
|
||||||
|
s.colorize(boldStyle, "%d", nodes),
|
||||||
|
)
|
||||||
|
s.printNewLine()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) {
|
||||||
|
if succinct {
|
||||||
|
s.print(0, "- %d/%d specs ", specsToRun, total)
|
||||||
|
s.stream()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.println(0,
|
||||||
|
"Will run %s of %s specs",
|
||||||
|
s.colorize(boldStyle, "%d", specsToRun),
|
||||||
|
s.colorize(boldStyle, "%d", total),
|
||||||
|
)
|
||||||
|
|
||||||
|
s.printNewLine()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) {
|
||||||
|
if succinct {
|
||||||
|
s.print(0, "- %d specs ", total)
|
||||||
|
s.stream()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.println(0,
|
||||||
|
"Will run %s specs",
|
||||||
|
s.colorize(boldStyle, "%d", total),
|
||||||
|
)
|
||||||
|
|
||||||
|
s.printNewLine()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) {
|
||||||
|
if succinct && summary.SuiteSucceeded {
|
||||||
|
s.print(0, " %s %s ", s.colorize(greenColor, "SUCCESS!"), summary.RunTime)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.printNewLine()
|
||||||
|
color := greenColor
|
||||||
|
if !summary.SuiteSucceeded {
|
||||||
|
color = redColor
|
||||||
|
}
|
||||||
|
s.println(0, s.colorize(boldStyle+color, "Ran %d of %d Specs in %.3f seconds", summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, summary.RunTime.Seconds()))
|
||||||
|
|
||||||
|
status := ""
|
||||||
|
if summary.SuiteSucceeded {
|
||||||
|
status = s.colorize(boldStyle+greenColor, "SUCCESS!")
|
||||||
|
} else {
|
||||||
|
status = s.colorize(boldStyle+redColor, "FAIL!")
|
||||||
|
}
|
||||||
|
|
||||||
|
flakes := ""
|
||||||
|
if s.enableFlakes {
|
||||||
|
flakes = " | " + s.colorize(yellowColor+boldStyle, "%d Flaked", summary.NumberOfFlakedSpecs)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.print(0,
|
||||||
|
"%s -- %s | %s | %s | %s\n",
|
||||||
|
status,
|
||||||
|
s.colorize(greenColor+boldStyle, "%d Passed", summary.NumberOfPassedSpecs),
|
||||||
|
s.colorize(redColor+boldStyle, "%d Failed", summary.NumberOfFailedSpecs)+flakes,
|
||||||
|
s.colorize(yellowColor+boldStyle, "%d Pending", summary.NumberOfPendingSpecs),
|
||||||
|
s.colorize(cyanColor+boldStyle, "%d Skipped", summary.NumberOfSkippedSpecs),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) {
|
||||||
|
s.startBlock()
|
||||||
|
for i, text := range spec.ComponentTexts[1 : len(spec.ComponentTexts)-1] {
|
||||||
|
s.print(0, s.colorize(alternatingColors[i%2], text)+" ")
|
||||||
|
}
|
||||||
|
|
||||||
|
indentation := 0
|
||||||
|
if len(spec.ComponentTexts) > 2 {
|
||||||
|
indentation = 1
|
||||||
|
s.printNewLine()
|
||||||
|
}
|
||||||
|
index := len(spec.ComponentTexts) - 1
|
||||||
|
s.print(indentation, s.colorize(boldStyle, spec.ComponentTexts[index]))
|
||||||
|
s.printNewLine()
|
||||||
|
s.print(indentation, s.colorize(lightGrayColor, spec.ComponentCodeLocations[index].String()))
|
||||||
|
s.printNewLine()
|
||||||
|
s.midBlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
||||||
|
s.announceSetupFailure("BeforeSuite", summary, succinct, fullTrace)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
||||||
|
s.announceSetupFailure("AfterSuite", summary, succinct, fullTrace)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) announceSetupFailure(name string, summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
||||||
|
s.startBlock()
|
||||||
|
var message string
|
||||||
|
switch summary.State {
|
||||||
|
case types.SpecStateFailed:
|
||||||
|
message = "Failure"
|
||||||
|
case types.SpecStatePanicked:
|
||||||
|
message = "Panic"
|
||||||
|
case types.SpecStateTimedOut:
|
||||||
|
message = "Timeout"
|
||||||
|
}
|
||||||
|
|
||||||
|
s.println(0, s.colorize(redColor+boldStyle, "%s [%.3f seconds]", message, summary.RunTime.Seconds()))
|
||||||
|
|
||||||
|
indentation := s.printCodeLocationBlock([]string{name}, []types.CodeLocation{summary.CodeLocation}, summary.ComponentType, 0, summary.State, true)
|
||||||
|
|
||||||
|
s.printNewLine()
|
||||||
|
s.printFailure(indentation, summary.State, summary.Failure, fullTrace)
|
||||||
|
|
||||||
|
s.endBlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnounceCapturedOutput(output string) {
|
||||||
|
if output == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
s.startBlock()
|
||||||
|
s.println(0, output)
|
||||||
|
s.midBlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) {
|
||||||
|
s.print(0, s.colorize(greenColor, s.denoter))
|
||||||
|
s.stream()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) {
|
||||||
|
s.printBlockWithMessage(
|
||||||
|
s.colorize(greenColor, "%s [SLOW TEST:%.3f seconds]", s.denoter, spec.RunTime.Seconds()),
|
||||||
|
"",
|
||||||
|
spec,
|
||||||
|
succinct,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) {
|
||||||
|
s.printBlockWithMessage(
|
||||||
|
s.colorize(greenColor, "%s [MEASUREMENT]", s.denoter),
|
||||||
|
s.measurementReport(spec, succinct),
|
||||||
|
spec,
|
||||||
|
succinct,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) {
|
||||||
|
if noisy {
|
||||||
|
s.printBlockWithMessage(
|
||||||
|
s.colorize(yellowColor, "P [PENDING]"),
|
||||||
|
"",
|
||||||
|
spec,
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
s.print(0, s.colorize(yellowColor, "P"))
|
||||||
|
s.stream()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||||
|
// Skips at runtime will have a non-empty spec.Failure. All others should be succinct.
|
||||||
|
if succinct || spec.Failure == (types.SpecFailure{}) {
|
||||||
|
s.print(0, s.colorize(cyanColor, "S"))
|
||||||
|
s.stream()
|
||||||
|
} else {
|
||||||
|
s.startBlock()
|
||||||
|
s.println(0, s.colorize(cyanColor+boldStyle, "S [SKIPPING]%s [%.3f seconds]", s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds()))
|
||||||
|
|
||||||
|
indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct)
|
||||||
|
|
||||||
|
s.printNewLine()
|
||||||
|
s.printSkip(indentation, spec.Failure)
|
||||||
|
s.endBlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||||
|
s.printSpecFailure(fmt.Sprintf("%s... Timeout", s.denoter), spec, succinct, fullTrace)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||||
|
s.printSpecFailure(fmt.Sprintf("%s! Panic", s.denoter), spec, succinct, fullTrace)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||||
|
s.printSpecFailure(fmt.Sprintf("%s Failure", s.denoter), spec, succinct, fullTrace)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) SummarizeFailures(summaries []*types.SpecSummary) {
|
||||||
|
failingSpecs := []*types.SpecSummary{}
|
||||||
|
|
||||||
|
for _, summary := range summaries {
|
||||||
|
if summary.HasFailureState() {
|
||||||
|
failingSpecs = append(failingSpecs, summary)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(failingSpecs) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
s.printNewLine()
|
||||||
|
s.printNewLine()
|
||||||
|
plural := "s"
|
||||||
|
if len(failingSpecs) == 1 {
|
||||||
|
plural = ""
|
||||||
|
}
|
||||||
|
s.println(0, s.colorize(redColor+boldStyle, "Summarizing %d Failure%s:", len(failingSpecs), plural))
|
||||||
|
for _, summary := range failingSpecs {
|
||||||
|
s.printNewLine()
|
||||||
|
if summary.HasFailureState() {
|
||||||
|
if summary.TimedOut() {
|
||||||
|
s.print(0, s.colorize(redColor+boldStyle, "[Timeout...] "))
|
||||||
|
} else if summary.Panicked() {
|
||||||
|
s.print(0, s.colorize(redColor+boldStyle, "[Panic!] "))
|
||||||
|
} else if summary.Failed() {
|
||||||
|
s.print(0, s.colorize(redColor+boldStyle, "[Fail] "))
|
||||||
|
}
|
||||||
|
s.printSpecContext(summary.ComponentTexts, summary.ComponentCodeLocations, summary.Failure.ComponentType, summary.Failure.ComponentIndex, summary.State, true)
|
||||||
|
s.printNewLine()
|
||||||
|
s.println(0, s.colorize(lightGrayColor, summary.Failure.Location.String()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) startBlock() {
|
||||||
|
if s.cursorState == cursorStateStreaming {
|
||||||
|
s.printNewLine()
|
||||||
|
s.printDelimiter()
|
||||||
|
} else if s.cursorState == cursorStateMidBlock {
|
||||||
|
s.printNewLine()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) midBlock() {
|
||||||
|
s.cursorState = cursorStateMidBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) endBlock() {
|
||||||
|
s.printDelimiter()
|
||||||
|
s.cursorState = cursorStateEndBlock
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) stream() {
|
||||||
|
s.cursorState = cursorStateStreaming
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) printBlockWithMessage(header string, message string, spec *types.SpecSummary, succinct bool) {
|
||||||
|
s.startBlock()
|
||||||
|
s.println(0, header)
|
||||||
|
|
||||||
|
indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, types.SpecComponentTypeInvalid, 0, spec.State, succinct)
|
||||||
|
|
||||||
|
if message != "" {
|
||||||
|
s.printNewLine()
|
||||||
|
s.println(indentation, message)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.endBlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) printSpecFailure(message string, spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||||
|
s.startBlock()
|
||||||
|
s.println(0, s.colorize(redColor+boldStyle, "%s%s [%.3f seconds]", message, s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds()))
|
||||||
|
|
||||||
|
indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct)
|
||||||
|
|
||||||
|
s.printNewLine()
|
||||||
|
s.printFailure(indentation, spec.State, spec.Failure, fullTrace)
|
||||||
|
s.endBlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) failureContext(failedComponentType types.SpecComponentType) string {
|
||||||
|
switch failedComponentType {
|
||||||
|
case types.SpecComponentTypeBeforeSuite:
|
||||||
|
return " in Suite Setup (BeforeSuite)"
|
||||||
|
case types.SpecComponentTypeAfterSuite:
|
||||||
|
return " in Suite Teardown (AfterSuite)"
|
||||||
|
case types.SpecComponentTypeBeforeEach:
|
||||||
|
return " in Spec Setup (BeforeEach)"
|
||||||
|
case types.SpecComponentTypeJustBeforeEach:
|
||||||
|
return " in Spec Setup (JustBeforeEach)"
|
||||||
|
case types.SpecComponentTypeAfterEach:
|
||||||
|
return " in Spec Teardown (AfterEach)"
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) printSkip(indentation int, spec types.SpecFailure) {
|
||||||
|
s.println(indentation, s.colorize(cyanColor, spec.Message))
|
||||||
|
s.printNewLine()
|
||||||
|
s.println(indentation, spec.Location.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) printFailure(indentation int, state types.SpecState, failure types.SpecFailure, fullTrace bool) {
|
||||||
|
if state == types.SpecStatePanicked {
|
||||||
|
s.println(indentation, s.colorize(redColor+boldStyle, failure.Message))
|
||||||
|
s.println(indentation, s.colorize(redColor, failure.ForwardedPanic))
|
||||||
|
s.println(indentation, failure.Location.String())
|
||||||
|
s.printNewLine()
|
||||||
|
s.println(indentation, s.colorize(redColor, "Full Stack Trace"))
|
||||||
|
s.println(indentation, failure.Location.FullStackTrace)
|
||||||
|
} else {
|
||||||
|
s.println(indentation, s.colorize(redColor, failure.Message))
|
||||||
|
s.printNewLine()
|
||||||
|
s.println(indentation, failure.Location.String())
|
||||||
|
if fullTrace {
|
||||||
|
s.printNewLine()
|
||||||
|
s.println(indentation, s.colorize(redColor, "Full Stack Trace"))
|
||||||
|
s.println(indentation, failure.Location.FullStackTrace)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) printSpecContext(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int {
|
||||||
|
startIndex := 1
|
||||||
|
indentation := 0
|
||||||
|
|
||||||
|
if len(componentTexts) == 1 {
|
||||||
|
startIndex = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := startIndex; i < len(componentTexts); i++ {
|
||||||
|
if (state.IsFailure() || state == types.SpecStateSkipped) && i == failedComponentIndex {
|
||||||
|
color := redColor
|
||||||
|
if state == types.SpecStateSkipped {
|
||||||
|
color = cyanColor
|
||||||
|
}
|
||||||
|
blockType := ""
|
||||||
|
switch failedComponentType {
|
||||||
|
case types.SpecComponentTypeBeforeSuite:
|
||||||
|
blockType = "BeforeSuite"
|
||||||
|
case types.SpecComponentTypeAfterSuite:
|
||||||
|
blockType = "AfterSuite"
|
||||||
|
case types.SpecComponentTypeBeforeEach:
|
||||||
|
blockType = "BeforeEach"
|
||||||
|
case types.SpecComponentTypeJustBeforeEach:
|
||||||
|
blockType = "JustBeforeEach"
|
||||||
|
case types.SpecComponentTypeAfterEach:
|
||||||
|
blockType = "AfterEach"
|
||||||
|
case types.SpecComponentTypeIt:
|
||||||
|
blockType = "It"
|
||||||
|
case types.SpecComponentTypeMeasure:
|
||||||
|
blockType = "Measurement"
|
||||||
|
}
|
||||||
|
if succinct {
|
||||||
|
s.print(0, s.colorize(color+boldStyle, "[%s] %s ", blockType, componentTexts[i]))
|
||||||
|
} else {
|
||||||
|
s.println(indentation, s.colorize(color+boldStyle, "%s [%s]", componentTexts[i], blockType))
|
||||||
|
s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i]))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if succinct {
|
||||||
|
s.print(0, s.colorize(alternatingColors[i%2], "%s ", componentTexts[i]))
|
||||||
|
} else {
|
||||||
|
s.println(indentation, componentTexts[i])
|
||||||
|
s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
indentation++
|
||||||
|
}
|
||||||
|
|
||||||
|
return indentation
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) printCodeLocationBlock(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int {
|
||||||
|
indentation := s.printSpecContext(componentTexts, componentCodeLocations, failedComponentType, failedComponentIndex, state, succinct)
|
||||||
|
|
||||||
|
if succinct {
|
||||||
|
if len(componentTexts) > 0 {
|
||||||
|
s.printNewLine()
|
||||||
|
s.print(0, s.colorize(lightGrayColor, "%s", componentCodeLocations[len(componentCodeLocations)-1]))
|
||||||
|
}
|
||||||
|
s.printNewLine()
|
||||||
|
indentation = 1
|
||||||
|
} else {
|
||||||
|
indentation--
|
||||||
|
}
|
||||||
|
|
||||||
|
return indentation
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) orderedMeasurementKeys(measurements map[string]*types.SpecMeasurement) []string {
|
||||||
|
orderedKeys := make([]string, len(measurements))
|
||||||
|
for key, measurement := range measurements {
|
||||||
|
orderedKeys[measurement.Order] = key
|
||||||
|
}
|
||||||
|
return orderedKeys
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *consoleStenographer) measurementReport(spec *types.SpecSummary, succinct bool) string {
|
||||||
|
if len(spec.Measurements) == 0 {
|
||||||
|
return "Found no measurements"
|
||||||
|
}
|
||||||
|
|
||||||
|
message := []string{}
|
||||||
|
orderedKeys := s.orderedMeasurementKeys(spec.Measurements)
|
||||||
|
|
||||||
|
if succinct {
|
||||||
|
message = append(message, fmt.Sprintf("%s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples)))
|
||||||
|
for _, key := range orderedKeys {
|
||||||
|
measurement := spec.Measurements[key]
|
||||||
|
message = append(message, fmt.Sprintf(" %s - %s: %s%s, %s: %s%s ± %s%s, %s: %s%s",
|
||||||
|
s.colorize(boldStyle, "%s", measurement.Name),
|
||||||
|
measurement.SmallestLabel,
|
||||||
|
s.colorize(greenColor, measurement.PrecisionFmt(), measurement.Smallest),
|
||||||
|
measurement.Units,
|
||||||
|
measurement.AverageLabel,
|
||||||
|
s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.Average),
|
||||||
|
measurement.Units,
|
||||||
|
s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.StdDeviation),
|
||||||
|
measurement.Units,
|
||||||
|
measurement.LargestLabel,
|
||||||
|
s.colorize(redColor, measurement.PrecisionFmt(), measurement.Largest),
|
||||||
|
measurement.Units,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
message = append(message, fmt.Sprintf("Ran %s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples)))
|
||||||
|
for _, key := range orderedKeys {
|
||||||
|
measurement := spec.Measurements[key]
|
||||||
|
info := ""
|
||||||
|
if measurement.Info != nil {
|
||||||
|
message = append(message, fmt.Sprintf("%v", measurement.Info))
|
||||||
|
}
|
||||||
|
|
||||||
|
message = append(message, fmt.Sprintf("%s:\n%s %s: %s%s\n %s: %s%s\n %s: %s%s ± %s%s",
|
||||||
|
s.colorize(boldStyle, "%s", measurement.Name),
|
||||||
|
info,
|
||||||
|
measurement.SmallestLabel,
|
||||||
|
s.colorize(greenColor, measurement.PrecisionFmt(), measurement.Smallest),
|
||||||
|
measurement.Units,
|
||||||
|
measurement.LargestLabel,
|
||||||
|
s.colorize(redColor, measurement.PrecisionFmt(), measurement.Largest),
|
||||||
|
measurement.Units,
|
||||||
|
measurement.AverageLabel,
|
||||||
|
s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.Average),
|
||||||
|
measurement.Units,
|
||||||
|
s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.StdDeviation),
|
||||||
|
measurement.Units,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(message, "\n")
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue