mirror of
https://github.com/k8snetworkplumbingwg/multus-cni.git
synced 2025-04-27 19:25:28 +00:00
Initial commit
This commit is contained in:
commit
64e8857e21
200
Godeps/Godeps.json
generated
Normal file
200
Godeps/Godeps.json
generated
Normal file
@ -0,0 +1,200 @@
|
||||
{
|
||||
"ImportPath": "github.com/Intel-Corp/multus-cni",
|
||||
"GoVersion": "go1.6",
|
||||
"GodepVersion": "v74",
|
||||
"Packages": [
|
||||
"./..."
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/containernetworking/cni/pkg/invoke",
|
||||
"Comment": "v0.3.0-19-gfbe1a87",
|
||||
"Rev": "fbe1a87250aed33da9a57abbd76b08eb85ec85c1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containernetworking/cni/pkg/ip",
|
||||
"Comment": "v0.3.0-19-gfbe1a87",
|
||||
"Rev": "fbe1a87250aed33da9a57abbd76b08eb85ec85c1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containernetworking/cni/pkg/ipam",
|
||||
"Comment": "v0.3.0-19-gfbe1a87",
|
||||
"Rev": "fbe1a87250aed33da9a57abbd76b08eb85ec85c1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containernetworking/cni/pkg/ns",
|
||||
"Comment": "v0.3.0-19-gfbe1a87",
|
||||
"Rev": "fbe1a87250aed33da9a57abbd76b08eb85ec85c1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containernetworking/cni/pkg/skel",
|
||||
"Comment": "v0.3.0-19-gfbe1a87",
|
||||
"Rev": "fbe1a87250aed33da9a57abbd76b08eb85ec85c1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containernetworking/cni/pkg/testutils",
|
||||
"Comment": "v0.3.0-19-gfbe1a87",
|
||||
"Rev": "fbe1a87250aed33da9a57abbd76b08eb85ec85c1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/containernetworking/cni/pkg/types",
|
||||
"Comment": "v0.3.0-19-gfbe1a87",
|
||||
"Rev": "fbe1a87250aed33da9a57abbd76b08eb85ec85c1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/coreos/go-iptables/iptables",
|
||||
"Rev": "90456be57fcb8185b264b77ce42a9539df42df25"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/config",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/codelocation",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/containernode",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/failer",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/leafnodes",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/remote",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/spec",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/specrunner",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/suite",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/testingtproxy",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/writer",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/reporters",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/reporters/stenographer",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/types",
|
||||
"Comment": "v1.2.0-29-g7f8ab55",
|
||||
"Rev": "7f8ab55aaf3b86885aa55b762e803744d1674700"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/format",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/internal/assertion",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/internal/asyncassertion",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/internal/oraclematcher",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/internal/testingtsupport",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/matchers",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/matchers/support/goraph/bipartitegraph",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/matchers/support/goraph/edge",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/matchers/support/goraph/node",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/matchers/support/goraph/util",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega/types",
|
||||
"Comment": "v1.0-71-g2152b45",
|
||||
"Rev": "2152b45fa28a361beba9aab0885972323a444e28"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vishvananda/netlink",
|
||||
"Rev": "77483a0e697ebcc2584461e9624b611767e7d203"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vishvananda/netlink/nl",
|
||||
"Rev": "77483a0e697ebcc2584461e9624b611767e7d203"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/vishvananda/netns",
|
||||
"Rev": "8ba1072b58e0c2a240eb5f6120165c7776c3e7b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/sys/unix",
|
||||
"Rev": "e11762ca30adc5b39fdbfd8c4250dabeb8e456d3"
|
||||
}
|
||||
]
|
||||
}
|
5
Godeps/Readme
generated
Normal file
5
Godeps/Readme
generated
Normal file
@ -0,0 +1,5 @@
|
||||
This directory tree is generated automatically by godep.
|
||||
|
||||
Please do not edit.
|
||||
|
||||
See https://github.com/tools/godep for more information.
|
202
LICENSE
Normal file
202
LICENSE
Normal file
@ -0,0 +1,202 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
82
README.md
Normal file
82
README.md
Normal file
@ -0,0 +1,82 @@
|
||||
# MULTUS CNI plugin
|
||||
|
||||
Please read [CNI](https://github.com/containernetworking/cni) for more information on CNI.
|
||||
|
||||
Multus is the latin word for “Multi”
|
||||
|
||||
As name suggest, it act as the Multi plugin in the Kubernetes and provides the Multi interface support in pod
|
||||
|
||||
It is generic to run with other plugins like ptp, local-host, with flannel, with different IPAM and networks.
|
||||
|
||||
It contact between the container runtime and other plugins, and it not having any of it own net configuration, it call other plugins like flannel/calico to do real net conf job. Multus reuse the concept of invoking the delegates in the flannel, it group the multi plugins into delegates and invoke each other in the sequential order, according to the JSON scheme in the cni configuration.
|
||||
|
||||
## Build
|
||||
|
||||
This plugin requires Go 1.5+ to build.
|
||||
|
||||
Go 1.5 users will need to set `GO15VENDOREXPERIMENT=1` to get vendored dependencies. This flag is set by default in 1.6.
|
||||
|
||||
```
|
||||
#./build
|
||||
```
|
||||
## Network configuration reference
|
||||
|
||||
* `name` (string, required): the name of the network
|
||||
* `type` (string, required): "multus"
|
||||
* `delegate` (([]map,required): number of delegate details in the Multus
|
||||
* `masterplugin` (bool,required): master plugin to report back to container
|
||||
|
||||
## Usage
|
||||
|
||||
Given the following network configuration:
|
||||
|
||||
```
|
||||
# # tee /etc/cni/net.d/multus-cni.conf <<-'EOF'
|
||||
{
|
||||
"name": "minion1-multus-demo-network",
|
||||
"type": "multus",
|
||||
"delegates": [
|
||||
{
|
||||
"type": "sriov",
|
||||
"if0": "enp12s0f0",
|
||||
"if0name": "north0",
|
||||
"createmac": true,
|
||||
"ipam": {
|
||||
"type": "host-local",
|
||||
"subnet": "10.56.217.0/24",
|
||||
"rangeStart": "10.56.217.131",
|
||||
"rangeEnd": "10.56.217.190",
|
||||
"routes": [
|
||||
{ "dst": "0.0.0.0/0" }
|
||||
],
|
||||
"gateway": "10.56.217.1"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "sriov",
|
||||
"if0": "enp12s0f1",
|
||||
"if0name": "south0",
|
||||
"ipam": {
|
||||
"type": "host-local",
|
||||
"subnet": "10.56.217.0/24",
|
||||
"rangeStart": "10.56.217.100",
|
||||
"rangeEnd": "10.56.217.130",
|
||||
"routes": [
|
||||
{ "dst": "0.0.0.0/0" }
|
||||
],
|
||||
"gateway": "10.56.217.1"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "flannel",
|
||||
"masterplugin": true,
|
||||
"delegate": {
|
||||
"isDefaultGateway": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
|
||||
```
|
||||
Testing the Multus CNI with docker, and make sure that the multus, sriov and flannel binaries are in the /opt/cni/bin directories and follow the steps as mention in the [CNI](https://github.com/containernetworking/cni)
|
17
build
Executable file
17
build
Executable file
@ -0,0 +1,17 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
ORG_PATH="github.com/Intel-Corp"
|
||||
REPO_PATH="${ORG_PATH}/multus-cni"
|
||||
|
||||
if [ ! -h gopath/src/${REPO_PATH} ]; then
|
||||
mkdir -p gopath/src/${ORG_PATH}
|
||||
ln -s ../../../.. gopath/src/${REPO_PATH} || exit 255
|
||||
fi
|
||||
|
||||
export GO15VENDOREXPERIMENT=1
|
||||
export GOBIN=${PWD}/bin
|
||||
export GOPATH=${PWD}/gopath
|
||||
|
||||
echo "Building plugins"
|
||||
go install "$@" ${REPO_PATH}/multus
|
285
multus/multus.go
Normal file
285
multus/multus.go
Normal file
@ -0,0 +1,285 @@
|
||||
// Copyright 2015 CNI authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This is a "Multi-plugin".It is a fork of flannel CNI
|
||||
// It reads other plugin netconf, and then invoke them, e.g.
|
||||
// flannel or sriov plugin.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containernetworking/cni/pkg/invoke"
|
||||
"github.com/containernetworking/cni/pkg/skel"
|
||||
"github.com/containernetworking/cni/pkg/types"
|
||||
)
|
||||
|
||||
const defaultCNIDir="/var/lib/cni/multus"
|
||||
var masterpluginEnabled bool
|
||||
|
||||
type NetConf struct {
|
||||
types.NetConf
|
||||
CNIDir string `json:"cniDir"`
|
||||
Delegates []map[string]interface{} `json:"delegates"`
|
||||
}
|
||||
|
||||
//taken from cni/plugins/meta/flannel/flannel.go
|
||||
func isString(i interface{}) bool {
|
||||
_, ok := i.(string)
|
||||
return ok
|
||||
}
|
||||
|
||||
func isBool(i interface{}) bool {
|
||||
_, ok := i.(bool)
|
||||
return ok
|
||||
}
|
||||
|
||||
func loadNetConf(bytes []byte) (*NetConf, error) {
|
||||
netconf := &NetConf{}
|
||||
if err := json.Unmarshal(bytes, netconf); err != nil {
|
||||
return nil, fmt.Errorf("failed to load netconf: %v", err)
|
||||
}
|
||||
|
||||
if netconf.Delegates == nil {
|
||||
return nil, fmt.Errorf(`"delegates" is must, refer README.md`)
|
||||
}
|
||||
|
||||
if netconf.CNIDir == "" {
|
||||
netconf.CNIDir = defaultCNIDir
|
||||
}
|
||||
|
||||
return netconf, nil
|
||||
}
|
||||
|
||||
func saveScratchNetConf(containerID, dataDir string, netconf []byte) error {
|
||||
if err := os.MkdirAll(dataDir, 0700); err != nil {
|
||||
return fmt.Errorf("failed to create the multus data directory(%q): %v", dataDir, err)
|
||||
}
|
||||
|
||||
path := filepath.Join(dataDir, containerID)
|
||||
|
||||
err := ioutil.WriteFile(path, netconf, 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write container data in the path(%q): %v", path, err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func consumeScratchNetConf(containerID, dataDir string) ([]byte, error) {
|
||||
path := filepath.Join(dataDir, containerID)
|
||||
defer os.Remove(path)
|
||||
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read container data in the path(%q): %v", path, err)
|
||||
}
|
||||
|
||||
return data, err
|
||||
}
|
||||
|
||||
func getifname() (f func() string) {
|
||||
var interfaceIndex int
|
||||
f = func() string {
|
||||
ifname := fmt.Sprintf("net%d",interfaceIndex)
|
||||
interfaceIndex++
|
||||
return ifname
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func saveDelegates(containerID, dataDir string, delegates []map[string]interface{}) error {
|
||||
delegatesBytes, err := json.Marshal(delegates)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error serializing delegate netconf: %v", err)
|
||||
}
|
||||
|
||||
if err = saveScratchNetConf(containerID, dataDir, delegatesBytes); err != nil {
|
||||
return fmt.Errorf("error in saving the delegates : %v", err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func checkDelegate(netconf map[string]interface{}) error {
|
||||
if netconf["type"] == nil {
|
||||
return fmt.Errorf("delegate must have the field 'type'")
|
||||
}
|
||||
|
||||
if !isString(netconf["type"]) {
|
||||
return fmt.Errorf("delegate field 'type' must be a string")
|
||||
}
|
||||
|
||||
if netconf["masterplugin"] != nil {
|
||||
if !isBool(netconf["masterplugin"]) {
|
||||
return fmt.Errorf("delegate field 'masterplugin' must be a bool")
|
||||
}
|
||||
}
|
||||
|
||||
if netconf["masterplugin"] != nil {
|
||||
if netconf["masterplugin"].(bool) != false && masterpluginEnabled != true {
|
||||
masterpluginEnabled = true
|
||||
} else if netconf["masterplugin"].(bool) != false && masterpluginEnabled == true {
|
||||
return fmt.Errorf("only one delegate can have 'masterplugin'")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func isMasterplugin(netconf map[string]interface{}) bool {
|
||||
if netconf["masterplugin"] == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if netconf["masterplugin"].(bool) == true {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
func delegateAdd(podif func() string, argif string, netconf map[string]interface{}, onlyMaster bool) (bool, error) {
|
||||
netconfBytes, err := json.Marshal(netconf)
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("Multus: error serializing multus delegate netconf: %v", err)
|
||||
}
|
||||
|
||||
if isMasterplugin(netconf) != onlyMaster {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if !isMasterplugin(netconf) {
|
||||
if os.Setenv("CNI_IFNAME", podif()) != nil {
|
||||
return true, fmt.Errorf("Multus: error in setting CNI_IFNAME")
|
||||
}
|
||||
} else {
|
||||
if os.Setenv("CNI_IFNAME", argif) != nil {
|
||||
return true, fmt.Errorf("Multus: error in setting CNI_IFNAME")
|
||||
}
|
||||
}
|
||||
|
||||
result, err := invoke.DelegateAdd(netconf["type"].(string), netconfBytes)
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("Multus: error in invoke Delegate add - %q: %v", netconf["type"].(string), err)
|
||||
}
|
||||
|
||||
if !isMasterplugin(netconf) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, result.Print()
|
||||
}
|
||||
|
||||
func delegateDel(podif func() string, argif string, netconf map[string]interface{}) error {
|
||||
netconfBytes, err := json.Marshal(netconf)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Multus: error serializing multus delegate netconf: %v", err)
|
||||
}
|
||||
|
||||
if !isMasterplugin(netconf) {
|
||||
if os.Setenv("CNI_IFNAME", podif()) != nil {
|
||||
return fmt.Errorf("Multus: error in setting CNI_IFNAME")
|
||||
}
|
||||
} else {
|
||||
if os.Setenv("CNI_IFNAME", argif) != nil {
|
||||
return fmt.Errorf("Multus: error in setting CNI_IFNAME")
|
||||
}
|
||||
}
|
||||
|
||||
err = invoke.DelegateDel(netconf["type"].(string), netconfBytes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Multus: error in invoke Delegate del - %q: %v", netconf["type"].(string), err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func cmdAdd(args *skel.CmdArgs) error {
|
||||
var result error
|
||||
n, err := loadNetConf(args.StdinData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, delegate := range n.Delegates {
|
||||
if err := checkDelegate(delegate); err != nil {
|
||||
return fmt.Errorf("Multus: Err in delegate conf: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := saveDelegates(args.ContainerID, n.CNIDir, n.Delegates); err != nil {
|
||||
return fmt.Errorf("Multus: Err in saving the delegates: %v", err)
|
||||
}
|
||||
|
||||
podifName := getifname()
|
||||
for _, delegate := range n.Delegates {
|
||||
err,r := delegateAdd(podifName, args.IfName, delegate, true)
|
||||
if(err != true) {
|
||||
result = r
|
||||
} else if (err != false) && r !=nil {
|
||||
return r
|
||||
}
|
||||
}
|
||||
|
||||
for _, delegate := range n.Delegates {
|
||||
err,r := delegateAdd(podifName, args.IfName, delegate, false)
|
||||
if(err != true) {
|
||||
result = r
|
||||
} else if (err != false) && r !=nil {
|
||||
return r
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func cmdDel(args *skel.CmdArgs) error {
|
||||
var result error
|
||||
in, err := loadNetConf(args.StdinData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
netconfBytes, err := consumeScratchNetConf(args.ContainerID, in.CNIDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Multus: Err in reading the delegates: %v", err)
|
||||
}
|
||||
|
||||
var Delegates []map[string]interface{}
|
||||
if err := json.Unmarshal(netconfBytes, &Delegates); err != nil {
|
||||
return fmt.Errorf("Multus: failed to load netconf: %v", err)
|
||||
}
|
||||
|
||||
podifName := getifname()
|
||||
for _, delegate := range Delegates {
|
||||
r := delegateDel(podifName, args.IfName, delegate)
|
||||
if r != nil {
|
||||
return r
|
||||
}
|
||||
result = r
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func main() {
|
||||
skel.PluginMain(cmdAdd, cmdDel)
|
||||
}
|
202
vendor/github.com/containernetworking/cni/LICENSE
generated
vendored
Normal file
202
vendor/github.com/containernetworking/cni/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
76
vendor/github.com/containernetworking/cni/pkg/invoke/args.go
generated
vendored
Normal file
76
vendor/github.com/containernetworking/cni/pkg/invoke/args.go
generated
vendored
Normal file
@ -0,0 +1,76 @@
|
||||
// Copyright 2015 CNI authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package invoke
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type CNIArgs interface {
|
||||
// For use with os/exec; i.e., return nil to inherit the
|
||||
// environment from this process
|
||||
AsEnv() []string
|
||||
}
|
||||
|
||||
type inherited struct{}
|
||||
|
||||
var inheritArgsFromEnv inherited
|
||||
|
||||
func (_ *inherited) AsEnv() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func ArgsFromEnv() CNIArgs {
|
||||
return &inheritArgsFromEnv
|
||||
}
|
||||
|
||||
type Args struct {
|
||||
Command string
|
||||
ContainerID string
|
||||
NetNS string
|
||||
PluginArgs [][2]string
|
||||
PluginArgsStr string
|
||||
IfName string
|
||||
Path string
|
||||
}
|
||||
|
||||
func (args *Args) AsEnv() []string {
|
||||
env := os.Environ()
|
||||
pluginArgsStr := args.PluginArgsStr
|
||||
if pluginArgsStr == "" {
|
||||
pluginArgsStr = stringify(args.PluginArgs)
|
||||
}
|
||||
|
||||
env = append(env,
|
||||
"CNI_COMMAND="+args.Command,
|
||||
"CNI_CONTAINERID="+args.ContainerID,
|
||||
"CNI_NETNS="+args.NetNS,
|
||||
"CNI_ARGS="+pluginArgsStr,
|
||||
"CNI_IFNAME="+args.IfName,
|
||||
"CNI_PATH="+args.Path)
|
||||
return env
|
||||
}
|
||||
|
||||
// taken from rkt/networking/net_plugin.go
|
||||
func stringify(pluginArgs [][2]string) string {
|
||||
entries := make([]string, len(pluginArgs))
|
||||
|
||||
for i, kv := range pluginArgs {
|
||||
entries[i] = strings.Join(kv[:], "=")
|
||||
}
|
||||
|
||||
return strings.Join(entries, ";")
|
||||
}
|
53
vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go
generated
vendored
Normal file
53
vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
// Copyright 2016 CNI authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package invoke
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containernetworking/cni/pkg/types"
|
||||
)
|
||||
|
||||
func DelegateAdd(delegatePlugin string, netconf []byte) (*types.Result, error) {
|
||||
if os.Getenv("CNI_COMMAND") != "ADD" {
|
||||
return nil, fmt.Errorf("CNI_COMMAND is not ADD")
|
||||
}
|
||||
|
||||
paths := strings.Split(os.Getenv("CNI_PATH"), ":")
|
||||
|
||||
pluginPath, err := FindInPath(delegatePlugin, paths)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ExecPluginWithResult(pluginPath, netconf, ArgsFromEnv())
|
||||
}
|
||||
|
||||
func DelegateDel(delegatePlugin string, netconf []byte) error {
|
||||
if os.Getenv("CNI_COMMAND") != "DEL" {
|
||||
return fmt.Errorf("CNI_COMMAND is not DEL")
|
||||
}
|
||||
|
||||
paths := strings.Split(os.Getenv("CNI_PATH"), ":")
|
||||
|
||||
pluginPath, err := FindInPath(delegatePlugin, paths)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ExecPluginWithoutResult(pluginPath, netconf, ArgsFromEnv())
|
||||
}
|
75
vendor/github.com/containernetworking/cni/pkg/invoke/exec.go
generated
vendored
Normal file
75
vendor/github.com/containernetworking/cni/pkg/invoke/exec.go
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
||||
// Copyright 2015 CNI authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package invoke
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
|
||||
"github.com/containernetworking/cni/pkg/types"
|
||||
)
|
||||
|
||||
func pluginErr(err error, output []byte) error {
|
||||
if _, ok := err.(*exec.ExitError); ok {
|
||||
emsg := types.Error{}
|
||||
if perr := json.Unmarshal(output, &emsg); perr != nil {
|
||||
return fmt.Errorf("netplugin failed but error parsing its diagnostic message %q: %v", string(output), perr)
|
||||
}
|
||||
details := ""
|
||||
if emsg.Details != "" {
|
||||
details = fmt.Sprintf("; %v", emsg.Details)
|
||||
}
|
||||
return fmt.Errorf("%v%v", emsg.Msg, details)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func ExecPluginWithResult(pluginPath string, netconf []byte, args CNIArgs) (*types.Result, error) {
|
||||
stdoutBytes, err := execPlugin(pluginPath, netconf, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := &types.Result{}
|
||||
err = json.Unmarshal(stdoutBytes, res)
|
||||
return res, err
|
||||
}
|
||||
|
||||
func ExecPluginWithoutResult(pluginPath string, netconf []byte, args CNIArgs) error {
|
||||
_, err := execPlugin(pluginPath, netconf, args)
|
||||
return err
|
||||
}
|
||||
|
||||
func execPlugin(pluginPath string, netconf []byte, args CNIArgs) ([]byte, error) {
|
||||
stdout := &bytes.Buffer{}
|
||||
|
||||
c := exec.Cmd{
|
||||
Env: args.AsEnv(),
|
||||
Path: pluginPath,
|
||||
Args: []string{pluginPath},
|
||||
Stdin: bytes.NewBuffer(netconf),
|
||||
Stdout: stdout,
|
||||
Stderr: os.Stderr,
|
||||
}
|
||||
if err := c.Run(); err != nil {
|
||||
return nil, pluginErr(err, stdout.Bytes())
|
||||
}
|
||||
|
||||
return stdout.Bytes(), nil
|
||||
}
|
47
vendor/github.com/containernetworking/cni/pkg/invoke/find.go
generated
vendored
Normal file
47
vendor/github.com/containernetworking/cni/pkg/invoke/find.go
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
// Copyright 2015 CNI authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package invoke
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// FindInPath returns the full path of the plugin by searching in the provided path
|
||||
func FindInPath(plugin string, paths []string) (string, error) {
|
||||
if plugin == "" {
|
||||
return "", fmt.Errorf("no plugin name provided")
|
||||
}
|
||||
|
||||
if len(paths) == 0 {
|
||||
return "", fmt.Errorf("no paths provided")
|
||||
}
|
||||
|
||||
var fullpath string
|
||||
for _, path := range paths {
|
||||
full := filepath.Join(path, plugin)
|
||||
if fi, err := os.Stat(full); err == nil && fi.Mode().IsRegular() {
|
||||
fullpath = full
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if fullpath == "" {
|
||||
return "", fmt.Errorf("failed to find plugin %q in path %s", plugin, paths)
|
||||
}
|
||||
|
||||
return fullpath, nil
|
||||
}
|
51
vendor/github.com/containernetworking/cni/pkg/ip/cidr.go
generated
vendored
Normal file
51
vendor/github.com/containernetworking/cni/pkg/ip/cidr.go
generated
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
// Copyright 2015 CNI authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ip
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"net"
|
||||
)
|
||||
|
||||
// NextIP returns IP incremented by 1
|
||||
func NextIP(ip net.IP) net.IP {
|
||||
i := ipToInt(ip)
|
||||
return intToIP(i.Add(i, big.NewInt(1)))
|
||||
}
|
||||
|
||||
// PrevIP returns IP decremented by 1
|
||||
func PrevIP(ip net.IP) net.IP {
|
||||
i := ipToInt(ip)
|
||||
return intToIP(i.Sub(i, big.NewInt(1)))
|
||||
}
|
||||
|
||||
func ipToInt(ip net.IP) *big.Int {
|
||||
if v := ip.To4(); v != nil {
|
||||
return big.NewInt(0).SetBytes(v)
|
||||
}
|
||||
return big.NewInt(0).SetBytes(ip.To16())
|
||||
}
|
||||
|
||||
func intToIP(i *big.Int) net.IP {
|
||||
return net.IP(i.Bytes())
|
||||
}
|
||||
|
||||
// Network masks off the host portion of the IP
|
||||
func Network(ipn *net.IPNet) *net.IPNet {
|
||||
return &net.IPNet{
|
||||
IP: ipn.IP.Mask(ipn.Mask),
|
||||
Mask: ipn.Mask,
|
||||
}
|
||||
}
|
31
vendor/github.com/containernetworking/cni/pkg/ip/ipforward.go
generated
vendored
Normal file
31
vendor/github.com/containernetworking/cni/pkg/ip/ipforward.go
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
// Copyright 2015 CNI authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ip
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
func EnableIP4Forward() error {
|
||||
return echo1("/proc/sys/net/ipv4/ip_forward")
|
||||
}
|
||||
|
||||
func EnableIP6Forward() error {
|
||||
return echo1("/proc/sys/net/ipv6/conf/all/forwarding")
|
||||
}
|
||||
|
||||
func echo1(f string) error {
|
||||
return ioutil.WriteFile(f, []byte("1"), 0644)
|
||||
}
|
66
vendor/github.com/containernetworking/cni/pkg/ip/ipmasq.go
generated
vendored
Normal file
66
vendor/github.com/containernetworking/cni/pkg/ip/ipmasq.go
generated
vendored
Normal file
@ -0,0 +1,66 @@
|
||||
// Copyright 2015 CNI authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ip
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/coreos/go-iptables/iptables"
|
||||
)
|
||||
|
||||
// SetupIPMasq installs iptables rules to masquerade traffic
|
||||
// coming from ipn and going outside of it
|
||||
func SetupIPMasq(ipn *net.IPNet, chain string, comment string) error {
|
||||
ipt, err := iptables.New()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to locate iptables: %v", err)
|
||||
}
|
||||
|
||||
if err = ipt.NewChain("nat", chain); err != nil {
|
||||
if err.(*iptables.Error).ExitStatus() != 1 {
|
||||
// TODO(eyakubovich): assumes exit status 1 implies chain exists
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err = ipt.AppendUnique("nat", chain, "-d", ipn.String(), "-j", "ACCEPT", "-m", "comment", "--comment", comment); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = ipt.AppendUnique("nat", chain, "!", "-d", "224.0.0.0/4", "-j", "MASQUERADE", "-m", "comment", "--comment", comment); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ipt.AppendUnique("nat", "POSTROUTING", "-s", ipn.String(), "-j", chain, "-m", "comment", "--comment", comment)
|
||||
}
|
||||
|
||||
// TeardownIPMasq undoes the effects of SetupIPMasq
|
||||
func TeardownIPMasq(ipn *net.IPNet, chain string, comment string) error {
|
||||
ipt, err := iptables.New()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to locate iptables: %v", err)
|
||||
}
|
||||
|
||||
if err = ipt.Delete("nat", "POSTROUTING", "-s", ipn.String(), "-j", chain, "-m", "comment", "--comment", comment); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = ipt.ClearChain("nat", chain); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ipt.DeleteChain("nat", chain)
|
||||
}
|
153
vendor/github.com/containernetworking/cni/pkg/ip/link.go
generated
vendored
Normal file
153
vendor/github.com/containernetworking/cni/pkg/ip/link.go
generated
vendored
Normal file
@ -0,0 +1,153 @@
|
||||
// Copyright 2015 CNI authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ip
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
|
||||
"github.com/containernetworking/cni/pkg/ns"
|
||||
"github.com/vishvananda/netlink"
|
||||
)
|
||||
|
||||
func makeVethPair(name, peer string, mtu int) (netlink.Link, error) {
|
||||
veth := &netlink.Veth{
|
||||
LinkAttrs: netlink.LinkAttrs{
|
||||
Name: name,
|
||||
Flags: net.FlagUp,
|
||||
MTU: mtu,
|
||||
},
|
||||
PeerName: peer,
|
||||
}
|
||||
if err := netlink.LinkAdd(veth); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return veth, nil
|
||||
}
|
||||
|
||||
func makeVeth(name string, mtu int) (peerName string, veth netlink.Link, err error) {
|
||||
for i := 0; i < 10; i++ {
|
||||
peerName, err = RandomVethName()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
veth, err = makeVethPair(name, peerName, mtu)
|
||||
switch {
|
||||
case err == nil:
|
||||
return
|
||||
|
||||
case os.IsExist(err):
|
||||
continue
|
||||
|
||||
default:
|
||||
err = fmt.Errorf("failed to make veth pair: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// should really never be hit
|
||||
err = fmt.Errorf("failed to find a unique veth name")
|
||||
return
|
||||
}
|
||||
|
||||
// RandomVethName returns string "veth" with random prefix (hashed from entropy)
|
||||
func RandomVethName() (string, error) {
|
||||
entropy := make([]byte, 4)
|
||||
_, err := rand.Reader.Read(entropy)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to generate random veth name: %v", err)
|
||||
}
|
||||
|
||||
// NetworkManager (recent versions) will ignore veth devices that start with "veth"
|
||||
return fmt.Sprintf("veth%x", entropy), nil
|
||||
}
|
||||
|
||||
// SetupVeth sets up a virtual ethernet link.
|
||||
// Should be in container netns, and will switch back to hostNS to set the host
|
||||
// veth end up.
|
||||
func SetupVeth(contVethName string, mtu int, hostNS ns.NetNS) (hostVeth, contVeth netlink.Link, err error) {
|
||||
var hostVethName string
|
||||
hostVethName, contVeth, err = makeVeth(contVethName, mtu)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = netlink.LinkSetUp(contVeth); err != nil {
|
||||
err = fmt.Errorf("failed to set %q up: %v", contVethName, err)
|
||||
return
|
||||
}
|
||||
|
||||
hostVeth, err = netlink.LinkByName(hostVethName)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to lookup %q: %v", hostVethName, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = netlink.LinkSetNsFd(hostVeth, int(hostNS.Fd())); err != nil {
|
||||
err = fmt.Errorf("failed to move veth to host netns: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
err = hostNS.Do(func(_ ns.NetNS) error {
|
||||
hostVeth, err := netlink.LinkByName(hostVethName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to lookup %q in %q: %v", hostVethName, hostNS.Path(), err)
|
||||
}
|
||||
|
||||
if err = netlink.LinkSetUp(hostVeth); err != nil {
|
||||
return fmt.Errorf("failed to set %q up: %v", hostVethName, err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// DelLinkByName removes an interface link.
|
||||
func DelLinkByName(ifName string) error {
|
||||
iface, err := netlink.LinkByName(ifName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to lookup %q: %v", ifName, err)
|
||||
}
|
||||
|
||||
if err = netlink.LinkDel(iface); err != nil {
|
||||
return fmt.Errorf("failed to delete %q: %v", ifName, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DelLinkByNameAddr remove an interface returns its IP address
|
||||
// of the specified family
|
||||
func DelLinkByNameAddr(ifName string, family int) (*net.IPNet, error) {
|
||||
iface, err := netlink.LinkByName(ifName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to lookup %q: %v", ifName, err)
|
||||
}
|
||||
|
||||
addrs, err := netlink.AddrList(iface, family)
|
||||
if err != nil || len(addrs) == 0 {
|
||||
return nil, fmt.Errorf("failed to get IP addresses for %q: %v", ifName, err)
|
||||
}
|
||||
|
||||
if err = netlink.LinkDel(iface); err != nil {
|
||||
return nil, fmt.Errorf("failed to delete %q: %v", ifName, err)
|
||||
}
|
||||
|
||||
return addrs[0].IPNet, nil
|
||||
}
|
47
vendor/github.com/containernetworking/cni/pkg/ip/route.go
generated
vendored
Normal file
47
vendor/github.com/containernetworking/cni/pkg/ip/route.go
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
// Copyright 2015 CNI authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ip
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/vishvananda/netlink"
|
||||
)
|
||||
|
||||
// AddDefaultRoute sets the default route on the given gateway.
|
||||
func AddDefaultRoute(gw net.IP, dev netlink.Link) error {
|
||||
_, defNet, _ := net.ParseCIDR("0.0.0.0/0")
|
||||
return AddRoute(defNet, gw, dev)
|
||||
}
|
||||
|
||||
// AddRoute adds a universally-scoped route to a device.
|
||||
func AddRoute(ipn *net.IPNet, gw net.IP, dev netlink.Link) error {
|
||||
return netlink.RouteAdd(&netlink.Route{
|
||||
LinkIndex: dev.Attrs().Index,
|
||||
Scope: netlink.SCOPE_UNIVERSE,
|
||||
Dst: ipn,
|
||||
Gw: gw,
|
||||
})
|
||||
}
|
||||
|
||||
// AddHostRoute adds a host-scoped route to a device.
|
||||
func AddHostRoute(ipn *net.IPNet, gw net.IP, dev netlink.Link) error {
|
||||
return netlink.RouteAdd(&netlink.Route{
|
||||
LinkIndex: dev.Attrs().Index,
|
||||
Scope: netlink.SCOPE_HOST,
|
||||
Dst: ipn,
|
||||
Gw: gw,
|
||||
})
|
||||
}
|
68
vendor/github.com/containernetworking/cni/pkg/ipam/ipam.go
generated
vendored
Normal file
68
vendor/github.com/containernetworking/cni/pkg/ipam/ipam.go
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
// Copyright 2015 CNI authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ipam
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/containernetworking/cni/pkg/invoke"
|
||||
"github.com/containernetworking/cni/pkg/ip"
|
||||
"github.com/containernetworking/cni/pkg/types"
|
||||
|
||||
"github.com/vishvananda/netlink"
|
||||
)
|
||||
|
||||
func ExecAdd(plugin string, netconf []byte) (*types.Result, error) {
|
||||
return invoke.DelegateAdd(plugin, netconf)
|
||||
}
|
||||
|
||||
func ExecDel(plugin string, netconf []byte) error {
|
||||
return invoke.DelegateDel(plugin, netconf)
|
||||
}
|
||||
|
||||
// ConfigureIface takes the result of IPAM plugin and
|
||||
// applies to the ifName interface
|
||||
func ConfigureIface(ifName string, res *types.Result) error {
|
||||
link, err := netlink.LinkByName(ifName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to lookup %q: %v", ifName, err)
|
||||
}
|
||||
|
||||
if err := netlink.LinkSetUp(link); err != nil {
|
||||
return fmt.Errorf("failed to set %q UP: %v", ifName, err)
|
||||
}
|
||||
|
||||
// TODO(eyakubovich): IPv6
|
||||
addr := &netlink.Addr{IPNet: &res.IP4.IP, Label: ""}
|
||||
if err = netlink.AddrAdd(link, addr); err != nil {
|
||||
return fmt.Errorf("failed to add IP addr to %q: %v", ifName, err)
|
||||
}
|
||||
|
||||
for _, r := range res.IP4.Routes {
|
||||
gw := r.GW
|
||||
if gw == nil {
|
||||
gw = res.IP4.Gateway
|
||||
}
|
||||
if err = ip.AddRoute(&r.Dst, gw, link); err != nil {
|
||||
// we skip over duplicate routes as we assume the first one wins
|
||||
if !os.IsExist(err) {
|
||||
return fmt.Errorf("failed to add route '%v via %v dev %v': %v", r.Dst, gw, ifName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
31
vendor/github.com/containernetworking/cni/pkg/ns/README.md
generated
vendored
Normal file
31
vendor/github.com/containernetworking/cni/pkg/ns/README.md
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
### Namespaces, Threads, and Go
|
||||
On Linux each OS thread can have a different network namespace. Go's thread scheduling model switches goroutines between OS threads based on OS thread load and whether the goroutine would block other goroutines. This can result in a goroutine switching network namespaces without notice and lead to errors in your code.
|
||||
|
||||
### Namespace Switching
|
||||
Switching namespaces with the `ns.Set()` method is not recommended without additional strategies to prevent unexpected namespace changes when your goroutines switch OS threads.
|
||||
|
||||
Go provides the `runtime.LockOSThread()` function to ensure a specific goroutine executes on its current OS thread and prevents any other goroutine from running in that thread until the locked one exits. Careful usage of `LockOSThread()` and goroutines can provide good control over which network namespace a given goroutine executes in.
|
||||
|
||||
For example, you cannot rely on the `ns.Set()` namespace being the current namespace after the `Set()` call unless you do two things. First, the goroutine calling `Set()` must have previously called `LockOSThread()`. Second, you must ensure `runtime.UnlockOSThread()` is not called somewhere in-between. You also cannot rely on the initial network namespace remaining the current network namespace if any other code in your program switches namespaces, unless you have already called `LockOSThread()` in that goroutine. Note that `LockOSThread()` prevents the Go scheduler from optimally scheduling goroutines for best performance, so `LockOSThread()` should only be used in small, isolated goroutines that release the lock quickly.
|
||||
|
||||
### Do() The Recommended Thing
|
||||
The `ns.Do()` method provides control over network namespaces for you by implementing these strategies. All code dependent on a particular network namespace should be wrapped in the `ns.Do()` method to ensure the correct namespace is selected for the duration of your code. For example:
|
||||
|
||||
```go
|
||||
targetNs, err := ns.NewNS()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = targetNs.Do(func(hostNs ns.NetNS) error {
|
||||
dummy := &netlink.Dummy{
|
||||
LinkAttrs: netlink.LinkAttrs{
|
||||
Name: "dummy0",
|
||||
},
|
||||
}
|
||||
return netlink.LinkAdd(dummy)
|
||||
})
|
||||
```
|
||||
|
||||
### Further Reading
|
||||
- https://github.com/golang/go/wiki/LockOSThread
|
||||
- http://morsmachine.dk/go-scheduler
|
315
vendor/github.com/containernetworking/cni/pkg/ns/ns.go
generated
vendored
Normal file
315
vendor/github.com/containernetworking/cni/pkg/ns/ns.go
generated
vendored
Normal file
@ -0,0 +1,315 @@
|
||||
// Copyright 2015 CNI authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ns
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type NetNS interface {
|
||||
// Executes the passed closure in this object's network namespace,
|
||||
// attempting to restore the original namespace before returning.
|
||||
// However, since each OS thread can have a different network namespace,
|
||||
// and Go's thread scheduling is highly variable, callers cannot
|
||||
// guarantee any specific namespace is set unless operations that
|
||||
// require that namespace are wrapped with Do(). Also, no code called
|
||||
// from Do() should call runtime.UnlockOSThread(), or the risk
|
||||
// of executing code in an incorrect namespace will be greater. See
|
||||
// https://github.com/golang/go/wiki/LockOSThread for further details.
|
||||
Do(toRun func(NetNS) error) error
|
||||
|
||||
// Sets the current network namespace to this object's network namespace.
|
||||
// Note that since Go's thread scheduling is highly variable, callers
|
||||
// cannot guarantee the requested namespace will be the current namespace
|
||||
// after this function is called; to ensure this wrap operations that
|
||||
// require the namespace with Do() instead.
|
||||
Set() error
|
||||
|
||||
// Returns the filesystem path representing this object's network namespace
|
||||
Path() string
|
||||
|
||||
// Returns a file descriptor representing this object's network namespace
|
||||
Fd() uintptr
|
||||
|
||||
// Cleans up this instance of the network namespace; if this instance
|
||||
// is the last user the namespace will be destroyed
|
||||
Close() error
|
||||
}
|
||||
|
||||
type netNS struct {
|
||||
file *os.File
|
||||
mounted bool
|
||||
closed bool
|
||||
}
|
||||
|
||||
func getCurrentThreadNetNSPath() string {
|
||||
// /proc/self/ns/net returns the namespace of the main thread, not
|
||||
// of whatever thread this goroutine is running on. Make sure we
|
||||
// use the thread's net namespace since the thread is switching around
|
||||
return fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), unix.Gettid())
|
||||
}
|
||||
|
||||
// Returns an object representing the current OS thread's network namespace
|
||||
func GetCurrentNS() (NetNS, error) {
|
||||
return GetNS(getCurrentThreadNetNSPath())
|
||||
}
|
||||
|
||||
const (
|
||||
// https://github.com/torvalds/linux/blob/master/include/uapi/linux/magic.h
|
||||
NSFS_MAGIC = 0x6e736673
|
||||
PROCFS_MAGIC = 0x9fa0
|
||||
)
|
||||
|
||||
type NSPathNotExistErr struct{ msg string }
|
||||
|
||||
func (e NSPathNotExistErr) Error() string { return e.msg }
|
||||
|
||||
type NSPathNotNSErr struct{ msg string }
|
||||
|
||||
func (e NSPathNotNSErr) Error() string { return e.msg }
|
||||
|
||||
func IsNSorErr(nspath string) error {
|
||||
stat := syscall.Statfs_t{}
|
||||
if err := syscall.Statfs(nspath, &stat); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = NSPathNotExistErr{msg: fmt.Sprintf("failed to Statfs %q: %v", nspath, err)}
|
||||
} else {
|
||||
err = fmt.Errorf("failed to Statfs %q: %v", nspath, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
switch stat.Type {
|
||||
case PROCFS_MAGIC:
|
||||
// Kernel < 3.19
|
||||
|
||||
validPathContent := "ns/"
|
||||
validName := strings.Contains(nspath, validPathContent)
|
||||
if !validName {
|
||||
return NSPathNotNSErr{msg: fmt.Sprintf("path %q doesn't contain %q", nspath, validPathContent)}
|
||||
}
|
||||
|
||||
return nil
|
||||
case NSFS_MAGIC:
|
||||
// Kernel >= 3.19
|
||||
|
||||
return nil
|
||||
default:
|
||||
return NSPathNotNSErr{msg: fmt.Sprintf("unknown FS magic on %q: %x", nspath, stat.Type)}
|
||||
}
|
||||
}
|
||||
|
||||
// Returns an object representing the namespace referred to by @path
|
||||
func GetNS(nspath string) (NetNS, error) {
|
||||
err := IsNSorErr(nspath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fd, err := os.Open(nspath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &netNS{file: fd}, nil
|
||||
}
|
||||
|
||||
// Creates a new persistent network namespace and returns an object
|
||||
// representing that namespace, without switching to it
|
||||
func NewNS() (NetNS, error) {
|
||||
const nsRunDir = "/var/run/netns"
|
||||
|
||||
b := make([]byte, 16)
|
||||
_, err := rand.Reader.Read(b)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate random netns name: %v", err)
|
||||
}
|
||||
|
||||
err = os.MkdirAll(nsRunDir, 0755)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// create an empty file at the mount point
|
||||
nsName := fmt.Sprintf("cni-%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])
|
||||
nsPath := path.Join(nsRunDir, nsName)
|
||||
mountPointFd, err := os.Create(nsPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mountPointFd.Close()
|
||||
|
||||
// Ensure the mount point is cleaned up on errors; if the namespace
|
||||
// was successfully mounted this will have no effect because the file
|
||||
// is in-use
|
||||
defer os.RemoveAll(nsPath)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
// do namespace work in a dedicated goroutine, so that we can safely
|
||||
// Lock/Unlock OSThread without upsetting the lock/unlock state of
|
||||
// the caller of this function
|
||||
var fd *os.File
|
||||
go (func() {
|
||||
defer wg.Done()
|
||||
runtime.LockOSThread()
|
||||
|
||||
var origNS NetNS
|
||||
origNS, err = GetNS(getCurrentThreadNetNSPath())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer origNS.Close()
|
||||
|
||||
// create a new netns on the current thread
|
||||
err = unix.Unshare(unix.CLONE_NEWNET)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer origNS.Set()
|
||||
|
||||
// bind mount the new netns from the current thread onto the mount point
|
||||
err = unix.Mount(getCurrentThreadNetNSPath(), nsPath, "none", unix.MS_BIND, "")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
fd, err = os.Open(nsPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
})()
|
||||
wg.Wait()
|
||||
|
||||
if err != nil {
|
||||
unix.Unmount(nsPath, unix.MNT_DETACH)
|
||||
return nil, fmt.Errorf("failed to create namespace: %v", err)
|
||||
}
|
||||
|
||||
return &netNS{file: fd, mounted: true}, nil
|
||||
}
|
||||
|
||||
func (ns *netNS) Path() string {
|
||||
return ns.file.Name()
|
||||
}
|
||||
|
||||
func (ns *netNS) Fd() uintptr {
|
||||
return ns.file.Fd()
|
||||
}
|
||||
|
||||
func (ns *netNS) errorIfClosed() error {
|
||||
if ns.closed {
|
||||
return fmt.Errorf("%q has already been closed", ns.file.Name())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ns *netNS) Close() error {
|
||||
if err := ns.errorIfClosed(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ns.file.Close(); err != nil {
|
||||
return fmt.Errorf("Failed to close %q: %v", ns.file.Name(), err)
|
||||
}
|
||||
ns.closed = true
|
||||
|
||||
if ns.mounted {
|
||||
if err := unix.Unmount(ns.file.Name(), unix.MNT_DETACH); err != nil {
|
||||
return fmt.Errorf("Failed to unmount namespace %s: %v", ns.file.Name(), err)
|
||||
}
|
||||
if err := os.RemoveAll(ns.file.Name()); err != nil {
|
||||
return fmt.Errorf("Failed to clean up namespace %s: %v", ns.file.Name(), err)
|
||||
}
|
||||
ns.mounted = false
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ns *netNS) Do(toRun func(NetNS) error) error {
|
||||
if err := ns.errorIfClosed(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
containedCall := func(hostNS NetNS) error {
|
||||
threadNS, err := GetNS(getCurrentThreadNetNSPath())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open current netns: %v", err)
|
||||
}
|
||||
defer threadNS.Close()
|
||||
|
||||
// switch to target namespace
|
||||
if err = ns.Set(); err != nil {
|
||||
return fmt.Errorf("error switching to ns %v: %v", ns.file.Name(), err)
|
||||
}
|
||||
defer threadNS.Set() // switch back
|
||||
|
||||
return toRun(hostNS)
|
||||
}
|
||||
|
||||
// save a handle to current network namespace
|
||||
hostNS, err := GetNS(getCurrentThreadNetNSPath())
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to open current namespace: %v", err)
|
||||
}
|
||||
defer hostNS.Close()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
var innerError error
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
runtime.LockOSThread()
|
||||
innerError = containedCall(hostNS)
|
||||
}()
|
||||
wg.Wait()
|
||||
|
||||
return innerError
|
||||
}
|
||||
|
||||
func (ns *netNS) Set() error {
|
||||
if err := ns.errorIfClosed(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, _, err := unix.Syscall(unix.SYS_SETNS, ns.Fd(), uintptr(unix.CLONE_NEWNET), 0); err != 0 {
|
||||
return fmt.Errorf("Error switching to ns %v: %v", ns.file.Name(), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithNetNSPath executes the passed closure under the given network
|
||||
// namespace, restoring the original namespace afterwards.
|
||||
func WithNetNSPath(nspath string, toRun func(NetNS) error) error {
|
||||
ns, err := GetNS(nspath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ns.Close()
|
||||
return ns.Do(toRun)
|
||||
}
|
161
vendor/github.com/containernetworking/cni/pkg/skel/skel.go
generated
vendored
Normal file
161
vendor/github.com/containernetworking/cni/pkg/skel/skel.go
generated
vendored
Normal file
@ -0,0 +1,161 @@
|
||||
// Copyright 2014 CNI authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package skel provides skeleton code for a CNI plugin.
|
||||
// In particular, it implements argument parsing and validation.
|
||||
package skel
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/containernetworking/cni/pkg/types"
|
||||
)
|
||||
|
||||
// CmdArgs captures all the arguments passed in to the plugin
|
||||
// via both env vars and stdin
|
||||
type CmdArgs struct {
|
||||
ContainerID string
|
||||
Netns string
|
||||
IfName string
|
||||
Args string
|
||||
Path string
|
||||
StdinData []byte
|
||||
}
|
||||
|
||||
type reqForCmdEntry map[string]bool
|
||||
|
||||
// PluginMain is the "main" for a plugin. It accepts
|
||||
// two callback functions for add and del commands.
|
||||
func PluginMain(cmdAdd, cmdDel func(_ *CmdArgs) error) {
|
||||
var cmd, contID, netns, ifName, args, path string
|
||||
|
||||
vars := []struct {
|
||||
name string
|
||||
val *string
|
||||
reqForCmd reqForCmdEntry
|
||||
}{
|
||||
{
|
||||
"CNI_COMMAND",
|
||||
&cmd,
|
||||
reqForCmdEntry{
|
||||
"ADD": true,
|
||||
"DEL": true,
|
||||
},
|
||||
},
|
||||
{
|
||||
"CNI_CONTAINERID",
|
||||
&contID,
|
||||
reqForCmdEntry{
|
||||
"ADD": false,
|
||||
"DEL": false,
|
||||
},
|
||||
},
|
||||
{
|
||||
"CNI_NETNS",
|
||||
&netns,
|
||||
reqForCmdEntry{
|
||||
"ADD": true,
|
||||
"DEL": false,
|
||||
},
|
||||
},
|
||||
{
|
||||
"CNI_IFNAME",
|
||||
&ifName,
|
||||
reqForCmdEntry{
|
||||
"ADD": true,
|
||||
"DEL": true,
|
||||
},
|
||||
},
|
||||
{
|
||||
"CNI_ARGS",
|
||||
&args,
|
||||
reqForCmdEntry{
|
||||
"ADD": false,
|
||||
"DEL": false,
|
||||
},
|
||||
},
|
||||
{
|
||||
"CNI_PATH",
|
||||
&path,
|
||||
reqForCmdEntry{
|
||||
"ADD": true,
|
||||
"DEL": true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
argsMissing := false
|
||||
for _, v := range vars {
|
||||
*v.val = os.Getenv(v.name)
|
||||
if v.reqForCmd[cmd] && *v.val == "" {
|
||||
log.Printf("%v env variable missing", v.name)
|
||||
argsMissing = true
|
||||
}
|
||||
}
|
||||
|
||||
if argsMissing {
|
||||
dieMsg("required env variables missing")
|
||||
}
|
||||
|
||||
stdinData, err := ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
dieMsg("error reading from stdin: %v", err)
|
||||
}
|
||||
|
||||
cmdArgs := &CmdArgs{
|
||||
ContainerID: contID,
|
||||
Netns: netns,
|
||||
IfName: ifName,
|
||||
Args: args,
|
||||
Path: path,
|
||||
StdinData: stdinData,
|
||||
}
|
||||
|
||||
switch cmd {
|
||||
case "ADD":
|
||||
err = cmdAdd(cmdArgs)
|
||||
|
||||
case "DEL":
|
||||
err = cmdDel(cmdArgs)
|
||||
|
||||
default:
|
||||
dieMsg("unknown CNI_COMMAND: %v", cmd)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if e, ok := err.(*types.Error); ok {
|
||||
// don't wrap Error in Error
|
||||
dieErr(e)
|
||||
}
|
||||
dieMsg(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func dieMsg(f string, args ...interface{}) {
|
||||
e := &types.Error{
|
||||
Code: 100,
|
||||
Msg: fmt.Sprintf(f, args...),
|
||||
}
|
||||
dieErr(e)
|
||||
}
|
||||
|
||||
func dieErr(e *types.Error) {
|
||||
if err := e.Print(); err != nil {
|
||||
log.Print("Error writing error JSON to stdout: ", err)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
77
vendor/github.com/containernetworking/cni/pkg/testutils/cmd.go
generated
vendored
Normal file
77
vendor/github.com/containernetworking/cni/pkg/testutils/cmd.go
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
// Copyright 2016 CNI authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package testutils
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/containernetworking/cni/pkg/types"
|
||||
)
|
||||
|
||||
func envCleanup() {
|
||||
os.Unsetenv("CNI_COMMAND")
|
||||
os.Unsetenv("CNI_PATH")
|
||||
os.Unsetenv("CNI_NETNS")
|
||||
os.Unsetenv("CNI_IFNAME")
|
||||
}
|
||||
|
||||
func CmdAddWithResult(cniNetns, cniIfname string, f func() error) (*types.Result, error) {
|
||||
os.Setenv("CNI_COMMAND", "ADD")
|
||||
os.Setenv("CNI_PATH", os.Getenv("PATH"))
|
||||
os.Setenv("CNI_NETNS", cniNetns)
|
||||
os.Setenv("CNI_IFNAME", cniIfname)
|
||||
defer envCleanup()
|
||||
|
||||
// Redirect stdout to capture plugin result
|
||||
oldStdout := os.Stdout
|
||||
r, w, err := os.Pipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
os.Stdout = w
|
||||
err = f()
|
||||
w.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// parse the result
|
||||
out, err := ioutil.ReadAll(r)
|
||||
os.Stdout = oldStdout
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := types.Result{}
|
||||
err = json.Unmarshal(out, &result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func CmdDelWithResult(cniNetns, cniIfname string, f func() error) error {
|
||||
os.Setenv("CNI_COMMAND", "DEL")
|
||||
os.Setenv("CNI_PATH", os.Getenv("PATH"))
|
||||
os.Setenv("CNI_NETNS", cniNetns)
|
||||
os.Setenv("CNI_IFNAME", cniIfname)
|
||||
defer envCleanup()
|
||||
|
||||
return f()
|
||||
}
|
101
vendor/github.com/containernetworking/cni/pkg/types/args.go
generated
vendored
Normal file
101
vendor/github.com/containernetworking/cni/pkg/types/args.go
generated
vendored
Normal file
@ -0,0 +1,101 @@
|
||||
// Copyright 2015 CNI authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// UnmarshallableBool typedef for builtin bool
|
||||
// because builtin type's methods can't be declared
|
||||
type UnmarshallableBool bool
|
||||
|
||||
// UnmarshalText implements the encoding.TextUnmarshaler interface.
|
||||
// Returns boolean true if the string is "1" or "[Tt]rue"
|
||||
// Returns boolean false if the string is "0" or "[Ff]alse"
|
||||
func (b *UnmarshallableBool) UnmarshalText(data []byte) error {
|
||||
s := strings.ToLower(string(data))
|
||||
switch s {
|
||||
case "1", "true":
|
||||
*b = true
|
||||
case "0", "false":
|
||||
*b = false
|
||||
default:
|
||||
return fmt.Errorf("Boolean unmarshal error: invalid input %s", s)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshallableString typedef for builtin string
|
||||
type UnmarshallableString string
|
||||
|
||||
// UnmarshalText implements the encoding.TextUnmarshaler interface.
|
||||
// Returns the string
|
||||
func (s *UnmarshallableString) UnmarshalText(data []byte) error {
|
||||
*s = UnmarshallableString(data)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CommonArgs contains the IgnoreUnknown argument
|
||||
// and must be embedded by all Arg structs
|
||||
type CommonArgs struct {
|
||||
IgnoreUnknown UnmarshallableBool `json:"ignoreunknown,omitempty"`
|
||||
}
|
||||
|
||||
// GetKeyField is a helper function to receive Values
|
||||
// Values that represent a pointer to a struct
|
||||
func GetKeyField(keyString string, v reflect.Value) reflect.Value {
|
||||
return v.Elem().FieldByName(keyString)
|
||||
}
|
||||
|
||||
// LoadArgs parses args from a string in the form "K=V;K2=V2;..."
|
||||
func LoadArgs(args string, container interface{}) error {
|
||||
if args == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
containerValue := reflect.ValueOf(container)
|
||||
|
||||
pairs := strings.Split(args, ";")
|
||||
unknownArgs := []string{}
|
||||
for _, pair := range pairs {
|
||||
kv := strings.Split(pair, "=")
|
||||
if len(kv) != 2 {
|
||||
return fmt.Errorf("ARGS: invalid pair %q", pair)
|
||||
}
|
||||
keyString := kv[0]
|
||||
valueString := kv[1]
|
||||
keyField := GetKeyField(keyString, containerValue)
|
||||
if !keyField.IsValid() {
|
||||
unknownArgs = append(unknownArgs, pair)
|
||||
continue
|
||||
}
|
||||
|
||||
u := keyField.Addr().Interface().(encoding.TextUnmarshaler)
|
||||
err := u.UnmarshalText([]byte(valueString))
|
||||
if err != nil {
|
||||
return fmt.Errorf("ARGS: error parsing value of pair %q: %v)", pair, err)
|
||||
}
|
||||
}
|
||||
|
||||
isIgnoreUnknown := GetKeyField("IgnoreUnknown", containerValue).Bool()
|
||||
if len(unknownArgs) > 0 && !isIgnoreUnknown {
|
||||
return fmt.Errorf("ARGS: unknown args %q", unknownArgs)
|
||||
}
|
||||
return nil
|
||||
}
|
191
vendor/github.com/containernetworking/cni/pkg/types/types.go
generated
vendored
Normal file
191
vendor/github.com/containernetworking/cni/pkg/types/types.go
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
||||
// Copyright 2015 CNI authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
)
|
||||
|
||||
// like net.IPNet but adds JSON marshalling and unmarshalling
|
||||
type IPNet net.IPNet
|
||||
|
||||
// ParseCIDR takes a string like "10.2.3.1/24" and
|
||||
// return IPNet with "10.2.3.1" and /24 mask
|
||||
func ParseCIDR(s string) (*net.IPNet, error) {
|
||||
ip, ipn, err := net.ParseCIDR(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ipn.IP = ip
|
||||
return ipn, nil
|
||||
}
|
||||
|
||||
func (n IPNet) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal((*net.IPNet)(&n).String())
|
||||
}
|
||||
|
||||
func (n *IPNet) UnmarshalJSON(data []byte) error {
|
||||
var s string
|
||||
if err := json.Unmarshal(data, &s); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tmp, err := ParseCIDR(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*n = IPNet(*tmp)
|
||||
return nil
|
||||
}
|
||||
|
||||
// NetConf describes a network.
|
||||
type NetConf struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Type string `json:"type,omitempty"`
|
||||
IPAM struct {
|
||||
Type string `json:"type,omitempty"`
|
||||
} `json:"ipam,omitempty"`
|
||||
DNS DNS `json:"dns"`
|
||||
}
|
||||
|
||||
// Result is what gets returned from the plugin (via stdout) to the caller
|
||||
type Result struct {
|
||||
IP4 *IPConfig `json:"ip4,omitempty"`
|
||||
IP6 *IPConfig `json:"ip6,omitempty"`
|
||||
DNS DNS `json:"dns,omitempty"`
|
||||
}
|
||||
|
||||
func (r *Result) Print() error {
|
||||
return prettyPrint(r)
|
||||
}
|
||||
|
||||
// String returns a formatted string in the form of "[IP4: $1,][ IP6: $2,] DNS: $3" where
|
||||
// $1 represents the receiver's IPv4, $2 represents the receiver's IPv6 and $3 the
|
||||
// receiver's DNS. If $1 or $2 are nil, they won't be present in the returned string.
|
||||
func (r *Result) String() string {
|
||||
var str string
|
||||
if r.IP4 != nil {
|
||||
str = fmt.Sprintf("IP4:%+v, ", *r.IP4)
|
||||
}
|
||||
if r.IP6 != nil {
|
||||
str += fmt.Sprintf("IP6:%+v, ", *r.IP6)
|
||||
}
|
||||
return fmt.Sprintf("%sDNS:%+v", str, r.DNS)
|
||||
}
|
||||
|
||||
// IPConfig contains values necessary to configure an interface
|
||||
type IPConfig struct {
|
||||
IP net.IPNet
|
||||
Gateway net.IP
|
||||
Routes []Route
|
||||
}
|
||||
|
||||
// DNS contains values interesting for DNS resolvers
|
||||
type DNS struct {
|
||||
Nameservers []string `json:"nameservers,omitempty"`
|
||||
Domain string `json:"domain,omitempty"`
|
||||
Search []string `json:"search,omitempty"`
|
||||
Options []string `json:"options,omitempty"`
|
||||
}
|
||||
|
||||
type Route struct {
|
||||
Dst net.IPNet
|
||||
GW net.IP
|
||||
}
|
||||
|
||||
type Error struct {
|
||||
Code uint `json:"code"`
|
||||
Msg string `json:"msg"`
|
||||
Details string `json:"details,omitempty"`
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
return e.Msg
|
||||
}
|
||||
|
||||
func (e *Error) Print() error {
|
||||
return prettyPrint(e)
|
||||
}
|
||||
|
||||
// net.IPNet is not JSON (un)marshallable so this duality is needed
|
||||
// for our custom IPNet type
|
||||
|
||||
// JSON (un)marshallable types
|
||||
type ipConfig struct {
|
||||
IP IPNet `json:"ip"`
|
||||
Gateway net.IP `json:"gateway,omitempty"`
|
||||
Routes []Route `json:"routes,omitempty"`
|
||||
}
|
||||
|
||||
type route struct {
|
||||
Dst IPNet `json:"dst"`
|
||||
GW net.IP `json:"gw,omitempty"`
|
||||
}
|
||||
|
||||
func (c *IPConfig) MarshalJSON() ([]byte, error) {
|
||||
ipc := ipConfig{
|
||||
IP: IPNet(c.IP),
|
||||
Gateway: c.Gateway,
|
||||
Routes: c.Routes,
|
||||
}
|
||||
|
||||
return json.Marshal(ipc)
|
||||
}
|
||||
|
||||
func (c *IPConfig) UnmarshalJSON(data []byte) error {
|
||||
ipc := ipConfig{}
|
||||
if err := json.Unmarshal(data, &ipc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.IP = net.IPNet(ipc.IP)
|
||||
c.Gateway = ipc.Gateway
|
||||
c.Routes = ipc.Routes
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Route) UnmarshalJSON(data []byte) error {
|
||||
rt := route{}
|
||||
if err := json.Unmarshal(data, &rt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Dst = net.IPNet(rt.Dst)
|
||||
r.GW = rt.GW
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Route) MarshalJSON() ([]byte, error) {
|
||||
rt := route{
|
||||
Dst: IPNet(r.Dst),
|
||||
GW: r.GW,
|
||||
}
|
||||
|
||||
return json.Marshal(rt)
|
||||
}
|
||||
|
||||
func prettyPrint(obj interface{}) error {
|
||||
data, err := json.MarshalIndent(obj, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = os.Stdout.Write(data)
|
||||
return err
|
||||
}
|
191
vendor/github.com/coreos/go-iptables/LICENSE
generated
vendored
Normal file
191
vendor/github.com/coreos/go-iptables/LICENSE
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, and
|
||||
distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
||||
owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all other entities
|
||||
that control, are controlled by, or are under common control with that entity.
|
||||
For the purposes of this definition, "control" means (i) the power, direct or
|
||||
indirect, to cause the direction or management of such entity, whether by
|
||||
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||
permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications, including
|
||||
but not limited to software source code, documentation source, and configuration
|
||||
files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical transformation or
|
||||
translation of a Source form, including but not limited to compiled object code,
|
||||
generated documentation, and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
||||
available under the License, as indicated by a copyright notice that is included
|
||||
in or attached to the work (an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
||||
is based on (or derived from) the Work and for which the editorial revisions,
|
||||
annotations, elaborations, or other modifications represent, as a whole, an
|
||||
original work of authorship. For the purposes of this License, Derivative Works
|
||||
shall not include works that remain separable from, or merely link (or bind by
|
||||
name) to the interfaces of, the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including the original version
|
||||
of the Work and any modifications or additions to that Work or Derivative Works
|
||||
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
||||
by the copyright owner or by an individual or Legal Entity authorized to submit
|
||||
on behalf of the copyright owner. For the purposes of this definition,
|
||||
"submitted" means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems, and
|
||||
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
||||
the purpose of discussing and improving the Work, but excluding communication
|
||||
that is conspicuously marked or otherwise designated in writing by the copyright
|
||||
owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
||||
of whom a Contribution has been received by Licensor and subsequently
|
||||
incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the Work and such
|
||||
Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable (except as stated in this section) patent license to make, have
|
||||
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
||||
such license applies only to those patent claims licensable by such Contributor
|
||||
that are necessarily infringed by their Contribution(s) alone or by combination
|
||||
of their Contribution(s) with the Work to which such Contribution(s) was
|
||||
submitted. If You institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
||||
Contribution incorporated within the Work constitutes direct or contributory
|
||||
patent infringement, then any patent licenses granted to You under this License
|
||||
for that Work shall terminate as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution.
|
||||
|
||||
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
||||
in any medium, with or without modifications, and in Source or Object form,
|
||||
provided that You meet the following conditions:
|
||||
|
||||
You must give any other recipients of the Work or Derivative Works a copy of
|
||||
this License; and
|
||||
You must cause any modified files to carry prominent notices stating that You
|
||||
changed the files; and
|
||||
You must retain, in the Source form of any Derivative Works that You distribute,
|
||||
all copyright, patent, trademark, and attribution notices from the Source form
|
||||
of the Work, excluding those notices that do not pertain to any part of the
|
||||
Derivative Works; and
|
||||
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
||||
Derivative Works that You distribute must include a readable copy of the
|
||||
attribution notices contained within such NOTICE file, excluding those notices
|
||||
that do not pertain to any part of the Derivative Works, in at least one of the
|
||||
following places: within a NOTICE text file distributed as part of the
|
||||
Derivative Works; within the Source form or documentation, if provided along
|
||||
with the Derivative Works; or, within a display generated by the Derivative
|
||||
Works, if and wherever such third-party notices normally appear. The contents of
|
||||
the NOTICE file are for informational purposes only and do not modify the
|
||||
License. You may add Your own attribution notices within Derivative Works that
|
||||
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
||||
provided that such additional attribution notices cannot be construed as
|
||||
modifying the License.
|
||||
You may add Your own copyright statement to Your modifications and may provide
|
||||
additional or different license terms and conditions for use, reproduction, or
|
||||
distribution of Your modifications, or for any such Derivative Works as a whole,
|
||||
provided Your use, reproduction, and distribution of the Work otherwise complies
|
||||
with the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions.
|
||||
|
||||
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
||||
for inclusion in the Work by You to the Licensor shall be under the terms and
|
||||
conditions of this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
||||
any separate license agreement you may have executed with Licensor regarding
|
||||
such Contributions.
|
||||
|
||||
6. Trademarks.
|
||||
|
||||
This License does not grant permission to use the trade names, trademarks,
|
||||
service marks, or product names of the Licensor, except as required for
|
||||
reasonable and customary use in describing the origin of the Work and
|
||||
reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty.
|
||||
|
||||
Unless required by applicable law or agreed to in writing, Licensor provides the
|
||||
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||
including, without limitation, any warranties or conditions of TITLE,
|
||||
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
||||
solely responsible for determining the appropriateness of using or
|
||||
redistributing the Work and assume any risks associated with Your exercise of
|
||||
permissions under this License.
|
||||
|
||||
8. Limitation of Liability.
|
||||
|
||||
In no event and under no legal theory, whether in tort (including negligence),
|
||||
contract, or otherwise, unless required by applicable law (such as deliberate
|
||||
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special, incidental,
|
||||
or consequential damages of any character arising as a result of this License or
|
||||
out of the use or inability to use the Work (including but not limited to
|
||||
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
||||
any and all other commercial damages or losses), even if such Contributor has
|
||||
been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability.
|
||||
|
||||
While redistributing the Work or Derivative Works thereof, You may choose to
|
||||
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
||||
other liability obligations and/or rights consistent with this License. However,
|
||||
in accepting such obligations, You may act only on Your own behalf and on Your
|
||||
sole responsibility, not on behalf of any other Contributor, and only if You
|
||||
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason of your
|
||||
accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work
|
||||
|
||||
To apply the Apache License to your work, attach the following boilerplate
|
||||
notice, with the fields enclosed by brackets "[]" replaced with your own
|
||||
identifying information. (Don't include the brackets!) The text should be
|
||||
enclosed in the appropriate comment syntax for the file format. We also
|
||||
recommend that a file or class name and description of purpose be included on
|
||||
the same "printed page" as the copyright notice for easier identification within
|
||||
third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
300
vendor/github.com/coreos/go-iptables/iptables/iptables.go
generated
vendored
Normal file
300
vendor/github.com/coreos/go-iptables/iptables/iptables.go
generated
vendored
Normal file
@ -0,0 +1,300 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package iptables
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// Adds the output of stderr to exec.ExitError
|
||||
type Error struct {
|
||||
exec.ExitError
|
||||
msg string
|
||||
}
|
||||
|
||||
func (e *Error) ExitStatus() int {
|
||||
return e.Sys().(syscall.WaitStatus).ExitStatus()
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("exit status %v: %v", e.ExitStatus(), e.msg)
|
||||
}
|
||||
|
||||
type IPTables struct {
|
||||
path string
|
||||
hasCheck bool
|
||||
hasWait bool
|
||||
|
||||
fmu *fileLock
|
||||
}
|
||||
|
||||
func New() (*IPTables, error) {
|
||||
path, err := exec.LookPath("iptables")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
checkPresent, waitPresent, err := getIptablesCommandSupport()
|
||||
if err != nil {
|
||||
log.Printf("Error checking iptables version, assuming version at least 1.4.20: %v", err)
|
||||
checkPresent = true
|
||||
waitPresent = true
|
||||
}
|
||||
ipt := IPTables{
|
||||
path: path,
|
||||
hasCheck: checkPresent,
|
||||
hasWait: waitPresent,
|
||||
}
|
||||
if !waitPresent {
|
||||
ipt.fmu, err = newXtablesFileLock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &ipt, nil
|
||||
}
|
||||
|
||||
// Exists checks if given rulespec in specified table/chain exists
|
||||
func (ipt *IPTables) Exists(table, chain string, rulespec ...string) (bool, error) {
|
||||
if !ipt.hasCheck {
|
||||
return ipt.existsForOldIptables(table, chain, rulespec)
|
||||
|
||||
}
|
||||
cmd := append([]string{"-t", table, "-C", chain}, rulespec...)
|
||||
err := ipt.run(cmd...)
|
||||
switch {
|
||||
case err == nil:
|
||||
return true, nil
|
||||
case err.(*Error).ExitStatus() == 1:
|
||||
return false, nil
|
||||
default:
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
// Insert inserts rulespec to specified table/chain (in specified pos)
|
||||
func (ipt *IPTables) Insert(table, chain string, pos int, rulespec ...string) error {
|
||||
cmd := append([]string{"-t", table, "-I", chain, strconv.Itoa(pos)}, rulespec...)
|
||||
return ipt.run(cmd...)
|
||||
}
|
||||
|
||||
// Append appends rulespec to specified table/chain
|
||||
func (ipt *IPTables) Append(table, chain string, rulespec ...string) error {
|
||||
cmd := append([]string{"-t", table, "-A", chain}, rulespec...)
|
||||
return ipt.run(cmd...)
|
||||
}
|
||||
|
||||
// AppendUnique acts like Append except that it won't add a duplicate
|
||||
func (ipt *IPTables) AppendUnique(table, chain string, rulespec ...string) error {
|
||||
exists, err := ipt.Exists(table, chain, rulespec...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return ipt.Append(table, chain, rulespec...)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes rulespec in specified table/chain
|
||||
func (ipt *IPTables) Delete(table, chain string, rulespec ...string) error {
|
||||
cmd := append([]string{"-t", table, "-D", chain}, rulespec...)
|
||||
return ipt.run(cmd...)
|
||||
}
|
||||
|
||||
// List rules in specified table/chain
|
||||
func (ipt *IPTables) List(table, chain string) ([]string, error) {
|
||||
args := []string{"-t", table, "-S", chain}
|
||||
var stdout bytes.Buffer
|
||||
if err := ipt.runWithOutput(args, &stdout); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rules := strings.Split(stdout.String(), "\n")
|
||||
if len(rules) > 0 && rules[len(rules)-1] == "" {
|
||||
rules = rules[:len(rules)-1]
|
||||
}
|
||||
|
||||
return rules, nil
|
||||
}
|
||||
|
||||
func (ipt *IPTables) NewChain(table, chain string) error {
|
||||
return ipt.run("-t", table, "-N", chain)
|
||||
}
|
||||
|
||||
// ClearChain flushed (deletes all rules) in the specified table/chain.
|
||||
// If the chain does not exist, a new one will be created
|
||||
func (ipt *IPTables) ClearChain(table, chain string) error {
|
||||
err := ipt.NewChain(table, chain)
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
return nil
|
||||
case err.(*Error).ExitStatus() == 1:
|
||||
// chain already exists. Flush (clear) it.
|
||||
return ipt.run("-t", table, "-F", chain)
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// RenameChain renames the old chain to the new one.
|
||||
func (ipt *IPTables) RenameChain(table, oldChain, newChain string) error {
|
||||
return ipt.run("-t", table, "-E", oldChain, newChain)
|
||||
}
|
||||
|
||||
// DeleteChain deletes the chain in the specified table.
|
||||
// The chain must be empty
|
||||
func (ipt *IPTables) DeleteChain(table, chain string) error {
|
||||
return ipt.run("-t", table, "-X", chain)
|
||||
}
|
||||
|
||||
// run runs an iptables command with the given arguments, ignoring
|
||||
// any stdout output
|
||||
func (ipt *IPTables) run(args ...string) error {
|
||||
return ipt.runWithOutput(args, nil)
|
||||
}
|
||||
|
||||
// runWithOutput runs an iptables command with the given arguments,
|
||||
// writing any stdout output to the given writer
|
||||
func (ipt *IPTables) runWithOutput(args []string, stdout io.Writer) error {
|
||||
args = append([]string{ipt.path}, args...)
|
||||
if ipt.hasWait {
|
||||
args = append(args, "--wait")
|
||||
} else {
|
||||
ul, err := ipt.fmu.tryLock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ul.Unlock()
|
||||
}
|
||||
|
||||
var stderr bytes.Buffer
|
||||
cmd := exec.Cmd{
|
||||
Path: ipt.path,
|
||||
Args: args,
|
||||
Stdout: stdout,
|
||||
Stderr: &stderr,
|
||||
}
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return &Error{*(err.(*exec.ExitError)), stderr.String()}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Checks if iptables has the "-C" and "--wait" flag
|
||||
func getIptablesCommandSupport() (bool, bool, error) {
|
||||
vstring, err := getIptablesVersionString()
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
v1, v2, v3, err := extractIptablesVersion(vstring)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
return iptablesHasCheckCommand(v1, v2, v3), iptablesHasWaitCommand(v1, v2, v3), nil
|
||||
}
|
||||
|
||||
// getIptablesVersion returns the first three components of the iptables version.
|
||||
// e.g. "iptables v1.3.66" would return (1, 3, 66, nil)
|
||||
func extractIptablesVersion(str string) (int, int, int, error) {
|
||||
versionMatcher := regexp.MustCompile("v([0-9]+)\\.([0-9]+)\\.([0-9]+)")
|
||||
result := versionMatcher.FindStringSubmatch(str)
|
||||
if result == nil {
|
||||
return 0, 0, 0, fmt.Errorf("no iptables version found in string: %s", str)
|
||||
}
|
||||
|
||||
v1, err := strconv.Atoi(result[1])
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
|
||||
v2, err := strconv.Atoi(result[2])
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
|
||||
v3, err := strconv.Atoi(result[3])
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
|
||||
return v1, v2, v3, nil
|
||||
}
|
||||
|
||||
// Runs "iptables --version" to get the version string
|
||||
func getIptablesVersionString() (string, error) {
|
||||
cmd := exec.Command("iptables", "--version")
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return out.String(), nil
|
||||
}
|
||||
|
||||
// Checks if an iptables version is after 1.4.11, when --check was added
|
||||
func iptablesHasCheckCommand(v1 int, v2 int, v3 int) bool {
|
||||
if v1 > 1 {
|
||||
return true
|
||||
}
|
||||
if v1 == 1 && v2 > 4 {
|
||||
return true
|
||||
}
|
||||
if v1 == 1 && v2 == 4 && v3 >= 11 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Checks if an iptables version is after 1.4.20, when --wait was added
|
||||
func iptablesHasWaitCommand(v1 int, v2 int, v3 int) bool {
|
||||
if v1 > 1 {
|
||||
return true
|
||||
}
|
||||
if v1 == 1 && v2 > 4 {
|
||||
return true
|
||||
}
|
||||
if v1 == 1 && v2 == 4 && v3 >= 20 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Checks if a rule specification exists for a table
|
||||
func (ipt *IPTables) existsForOldIptables(table, chain string, rulespec []string) (bool, error) {
|
||||
rs := strings.Join(append([]string{"-A", chain}, rulespec...), " ")
|
||||
args := []string{"-t", table, "-S"}
|
||||
var stdout bytes.Buffer
|
||||
err := ipt.runWithOutput(args, &stdout)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return strings.Contains(stdout.String(), rs), nil
|
||||
}
|
84
vendor/github.com/coreos/go-iptables/iptables/lock.go
generated
vendored
Normal file
84
vendor/github.com/coreos/go-iptables/iptables/lock.go
generated
vendored
Normal file
@ -0,0 +1,84 @@
|
||||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package iptables
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sync"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
// In earlier versions of iptables, the xtables lock was implemented
|
||||
// via a Unix socket, but now flock is used via this lockfile:
|
||||
// http://git.netfilter.org/iptables/commit/?id=aa562a660d1555b13cffbac1e744033e91f82707
|
||||
// Note the LSB-conforming "/run" directory does not exist on old
|
||||
// distributions, so assume "/var" is symlinked
|
||||
xtablesLockFilePath = "/var/run/xtables.lock"
|
||||
|
||||
defaultFilePerm = 0600
|
||||
)
|
||||
|
||||
type Unlocker interface {
|
||||
Unlock() error
|
||||
}
|
||||
|
||||
type nopUnlocker struct{}
|
||||
|
||||
func (_ nopUnlocker) Unlock() error { return nil }
|
||||
|
||||
type fileLock struct {
|
||||
// mu is used to protect against concurrent invocations from within this process
|
||||
mu sync.Mutex
|
||||
fd int
|
||||
}
|
||||
|
||||
// tryLock takes an exclusive lock on the xtables lock file without blocking.
|
||||
// This is best-effort only: if the exclusive lock would block (i.e. because
|
||||
// another process already holds it), no error is returned. Otherwise, any
|
||||
// error encountered during the locking operation is returned.
|
||||
// The returned Unlocker should be used to release the lock when the caller is
|
||||
// done invoking iptables commands.
|
||||
func (l *fileLock) tryLock() (Unlocker, error) {
|
||||
l.mu.Lock()
|
||||
err := syscall.Flock(l.fd, syscall.LOCK_EX|syscall.LOCK_NB)
|
||||
switch err {
|
||||
case syscall.EWOULDBLOCK:
|
||||
l.mu.Unlock()
|
||||
return nopUnlocker{}, nil
|
||||
case nil:
|
||||
return l, nil
|
||||
default:
|
||||
l.mu.Unlock()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Unlock closes the underlying file, which implicitly unlocks it as well. It
|
||||
// also unlocks the associated mutex.
|
||||
func (l *fileLock) Unlock() error {
|
||||
defer l.mu.Unlock()
|
||||
return syscall.Close(l.fd)
|
||||
}
|
||||
|
||||
// newXtablesFileLock opens a new lock on the xtables lockfile without
|
||||
// acquiring the lock
|
||||
func newXtablesFileLock() (*fileLock, error) {
|
||||
fd, err := syscall.Open(xtablesLockFilePath, os.O_CREATE, defaultFilePerm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &fileLock{fd: fd}, nil
|
||||
}
|
4
vendor/github.com/onsi/ginkgo/.gitignore
generated
vendored
Normal file
4
vendor/github.com/onsi/ginkgo/.gitignore
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
.DS_Store
|
||||
TODO
|
||||
tmp/**/*
|
||||
*.coverprofile
|
15
vendor/github.com/onsi/ginkgo/.travis.yml
generated
vendored
Normal file
15
vendor/github.com/onsi/ginkgo/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.3
|
||||
- 1.4
|
||||
- 1.5
|
||||
- tip
|
||||
|
||||
install:
|
||||
- go get -v -t ./...
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get github.com/onsi/gomega
|
||||
- go install github.com/onsi/ginkgo/ginkgo
|
||||
- export PATH=$PATH:$HOME/gopath/bin
|
||||
|
||||
script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace
|
136
vendor/github.com/onsi/ginkgo/CHANGELOG.md
generated
vendored
Normal file
136
vendor/github.com/onsi/ginkgo/CHANGELOG.md
generated
vendored
Normal file
@ -0,0 +1,136 @@
|
||||
## HEAD
|
||||
|
||||
Improvements:
|
||||
|
||||
- `Skip(message)` can be used to skip the current test.
|
||||
- Added `extensions/table` - a Ginkgo DSL for [Table Driven Tests](http://onsi.github.io/ginkgo/#table-driven-tests)
|
||||
|
||||
Bug Fixes:
|
||||
|
||||
- Ginkgo tests now fail when you `panic(nil)` (#167)
|
||||
|
||||
## 1.2.0 5/31/2015
|
||||
|
||||
Improvements
|
||||
|
||||
- `ginkgo -coverpkg` calls down to `go test -coverpkg` (#160)
|
||||
- `ginkgo -afterSuiteHook COMMAND` invokes the passed-in `COMMAND` after a test suite completes (#152)
|
||||
- Relaxed requirement for Go 1.4+. `ginkgo` now works with Go v1.3+ (#166)
|
||||
|
||||
## 1.2.0-beta
|
||||
|
||||
Ginkgo now requires Go 1.4+
|
||||
|
||||
Improvements:
|
||||
|
||||
- Call reporters in reverse order when announcing spec completion -- allows custom reporters to emit output before the default reporter does.
|
||||
- Improved focus behavior. Now, this:
|
||||
|
||||
```golang
|
||||
FDescribe("Some describe", func() {
|
||||
It("A", func() {})
|
||||
|
||||
FIt("B", func() {})
|
||||
})
|
||||
```
|
||||
|
||||
will run `B` but *not* `A`. This tends to be a common usage pattern when in the thick of writing and debugging tests.
|
||||
- When `SIGINT` is received, Ginkgo will emit the contents of the `GinkgoWriter` before running the `AfterSuite`. Useful for debugging stuck tests.
|
||||
- When `--progress` is set, Ginkgo will write test progress (in particular, Ginkgo will say when it is about to run a BeforeEach, AfterEach, It, etc...) to the `GinkgoWriter`. This is useful for debugging stuck tests and tests that generate many logs.
|
||||
- Improved output when an error occurs in a setup or teardown block.
|
||||
- When `--dryRun` is set, Ginkgo will walk the spec tree and emit to its reporter *without* actually running anything. Best paired with `-v` to understand which specs will run in which order.
|
||||
- Add `By` to help document long `It`s. `By` simply writes to the `GinkgoWriter`.
|
||||
- Add support for precompiled tests:
|
||||
- `ginkgo build <path-to-package>` will now compile the package, producing a file named `package.test`
|
||||
- The compiled `package.test` file can be run directly. This runs the tests in series.
|
||||
- To run precompiled tests in parallel, you can run: `ginkgo -p package.test`
|
||||
- Support `bootstrap`ping and `generate`ing [Agouti](http://agouti.org) specs.
|
||||
- `ginkgo generate` and `ginkgo bootstrap` now honor the package name already defined in a given directory
|
||||
- The `ginkgo` CLI ignores `SIGQUIT`. Prevents its stack dump from interlacing with the underlying test suite's stack dump.
|
||||
- The `ginkgo` CLI now compiles tests into a temporary directory instead of the package directory. This necessitates upgrading to Go v1.4+.
|
||||
- `ginkgo -notify` now works on Linux
|
||||
|
||||
Bug Fixes:
|
||||
|
||||
- If --skipPackages is used and all packages are skipped, Ginkgo should exit 0.
|
||||
- Fix tempfile leak when running in parallel
|
||||
- Fix incorrect failure message when a panic occurs during a parallel test run
|
||||
- Fixed an issue where a pending test within a focused context (or a focused test within a pending context) would skip all other tests.
|
||||
- Be more consistent about handling SIGTERM as well as SIGINT
|
||||
- When interupted while concurrently compiling test suites in the background, Ginkgo now cleans up the compiled artifacts.
|
||||
- Fixed a long standing bug where `ginkgo -p` would hang if a process spawned by one of the Ginkgo parallel nodes does not exit. (Hooray!)
|
||||
|
||||
## 1.1.0 (8/2/2014)
|
||||
|
||||
No changes, just dropping the beta.
|
||||
|
||||
## 1.1.0-beta (7/22/2014)
|
||||
New Features:
|
||||
|
||||
- `ginkgo watch` now monitors packages *and their dependencies* for changes. The depth of the dependency tree can be modified with the `-depth` flag.
|
||||
- Test suites with a programmatic focus (`FIt`, `FDescribe`, etc...) exit with non-zero status code, evne when they pass. This allows CI systems to detect accidental commits of focused test suites.
|
||||
- `ginkgo -p` runs the testsuite in parallel with an auto-detected number of nodes.
|
||||
- `ginkgo -tags=TAG_LIST` passes a list of tags down to the `go build` command.
|
||||
- `ginkgo --failFast` aborts the test suite after the first failure.
|
||||
- `ginkgo generate file_1 file_2` can take multiple file arguments.
|
||||
- Ginkgo now summarizes any spec failures that occured at the end of the test run.
|
||||
- `ginkgo --randomizeSuites` will run tests *suites* in random order using the generated/passed-in seed.
|
||||
|
||||
Improvements:
|
||||
|
||||
- `ginkgo -skipPackage` now takes a comma-separated list of strings. If the *relative path* to a package matches one of the entries in the comma-separated list, that package is skipped.
|
||||
- `ginkgo --untilItFails` no longer recompiles between attempts.
|
||||
- Ginkgo now panics when a runnable node (`It`, `BeforeEach`, `JustBeforeEach`, `AfterEach`, `Measure`) is nested within another runnable node. This is always a mistake. Any test suites that panic because of this change should be fixed.
|
||||
|
||||
Bug Fixes:
|
||||
|
||||
- `ginkgo boostrap` and `ginkgo generate` no longer fail when dealing with `hyphen-separated-packages`.
|
||||
- parallel specs are now better distributed across nodes - fixed a crashing bug where (for example) distributing 11 tests across 7 nodes would panic
|
||||
|
||||
## 1.0.0 (5/24/2014)
|
||||
New Features:
|
||||
|
||||
- Add `GinkgoParallelNode()` - shorthand for `config.GinkgoConfig.ParallelNode`
|
||||
|
||||
Improvements:
|
||||
|
||||
- When compilation fails, the compilation output is rewritten to present a correct *relative* path. Allows ⌘-clicking in iTerm open the file in your text editor.
|
||||
- `--untilItFails` and `ginkgo watch` now generate new random seeds between test runs, unless a particular random seed is specified.
|
||||
|
||||
Bug Fixes:
|
||||
|
||||
- `-cover` now generates a correctly combined coverprofile when running with in parallel with multiple `-node`s.
|
||||
- Print out the contents of the `GinkgoWriter` when `BeforeSuite` or `AfterSuite` fail.
|
||||
- Fix all remaining race conditions in Ginkgo's test suite.
|
||||
|
||||
## 1.0.0-beta (4/14/2014)
|
||||
Breaking changes:
|
||||
|
||||
- `thirdparty/gomocktestreporter` is gone. Use `GinkgoT()` instead
|
||||
- Modified the Reporter interface
|
||||
- `watch` is now a subcommand, not a flag.
|
||||
|
||||
DSL changes:
|
||||
|
||||
- `BeforeSuite` and `AfterSuite` for setting up and tearing down test suites.
|
||||
- `AfterSuite` is triggered on interrupt (`^C`) as well as exit.
|
||||
- `SynchronizedBeforeSuite` and `SynchronizedAfterSuite` for setting up and tearing down singleton resources across parallel nodes.
|
||||
|
||||
CLI changes:
|
||||
|
||||
- `watch` is now a subcommand, not a flag
|
||||
- `--nodot` flag can be passed to `ginkgo generate` and `ginkgo bootstrap` to avoid dot imports. This explicitly imports all exported identifiers in Ginkgo and Gomega. Refreshing this list can be done by running `ginkgo nodot`
|
||||
- Additional arguments can be passed to specs. Pass them after the `--` separator
|
||||
- `--skipPackage` flag takes a regexp and ignores any packages with package names passing said regexp.
|
||||
- `--trace` flag prints out full stack traces when errors occur, not just the line at which the error occurs.
|
||||
|
||||
Misc:
|
||||
|
||||
- Start using semantic versioning
|
||||
- Start maintaining changelog
|
||||
|
||||
Major refactor:
|
||||
|
||||
- Pull out Ginkgo's internal to `internal`
|
||||
- Rename `example` everywhere to `spec`
|
||||
- Much more!
|
20
vendor/github.com/onsi/ginkgo/LICENSE
generated
vendored
Normal file
20
vendor/github.com/onsi/ginkgo/LICENSE
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
Copyright (c) 2013-2014 Onsi Fakhouri
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
115
vendor/github.com/onsi/ginkgo/README.md
generated
vendored
Normal file
115
vendor/github.com/onsi/ginkgo/README.md
generated
vendored
Normal file
@ -0,0 +1,115 @@
|
||||

|
||||
|
||||
[](https://travis-ci.org/onsi/ginkgo)
|
||||
|
||||
Jump to the [docs](http://onsi.github.io/ginkgo/) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)!
|
||||
|
||||
To discuss Ginkgo and get updates, join the [google group](https://groups.google.com/d/forum/ginkgo-and-gomega).
|
||||
|
||||
## Feature List
|
||||
|
||||
- Ginkgo uses Go's `testing` package and can live alongside your existing `testing` tests. It's easy to [bootstrap](http://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first tests](http://onsi.github.io/ginkgo/#adding-specs-to-a-suite)
|
||||
|
||||
- Structure your BDD-style tests expressively:
|
||||
- Nestable [`Describe` and `Context` container blocks](http://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context)
|
||||
- [`BeforeEach` and `AfterEach` blocks](http://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and teardown
|
||||
- [`It` blocks](http://onsi.github.io/ginkgo/#individual-specs-) that hold your assertions
|
||||
- [`JustBeforeEach` blocks](http://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach) that separate creation from configuration (also known as the subject action pattern).
|
||||
- [`BeforeSuite` and `AfterSuite` blocks](http://onsi.github.io/ginkgo/#global-setup-and-teardown-beforesuite-and-aftersuite) to prep for and cleanup after a suite.
|
||||
|
||||
- A comprehensive test runner that lets you:
|
||||
- Mark specs as [pending](http://onsi.github.io/ginkgo/#pending-specs)
|
||||
- [Focus](http://onsi.github.io/ginkgo/#focused-specs) individual specs, and groups of specs, either programmatically or on the command line
|
||||
- Run your tests in [random order](http://onsi.github.io/ginkgo/#spec-permutation), and then reuse random seeds to replicate the same order.
|
||||
- Break up your test suite into parallel processes for straightforward [test parallelization](http://onsi.github.io/ginkgo/#parallel-specs)
|
||||
|
||||
- `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](http://onsi.github.io/ginkgo/#running-tests) and [generating](http://onsi.github.io/ginkgo/#generators) test files. Here are a few choice examples:
|
||||
- `ginkgo -nodes=N` runs your tests in `N` parallel processes and print out coherent output in realtime
|
||||
- `ginkgo -cover` runs your tests using Golang's code coverage tool
|
||||
- `ginkgo convert` converts an XUnit-style `testing` package to a Ginkgo-style package
|
||||
- `ginkgo -focus="REGEXP"` and `ginkgo -skip="REGEXP"` allow you to specify a subset of tests to run via regular expression
|
||||
- `ginkgo -r` runs all tests suites under the current directory
|
||||
- `ginkgo -v` prints out identifying information for each tests just before it runs
|
||||
|
||||
And much more: run `ginkgo help` for details!
|
||||
|
||||
The `ginkgo` CLI is convenient, but purely optional -- Ginkgo works just fine with `go test`
|
||||
|
||||
- `ginkgo watch` [watches](https://onsi.github.io/ginkgo/#watching-for-changes) packages *and their dependencies* for changes, then reruns tests. Run tests immediately as you develop!
|
||||
|
||||
- Built-in support for testing [asynchronicity](http://onsi.github.io/ginkgo/#asynchronous-tests)
|
||||
|
||||
- Built-in support for [benchmarking](http://onsi.github.io/ginkgo/#benchmark-tests) your code. Control the number of benchmark samples as you gather runtimes and other, arbitrary, bits of numerical information about your code.
|
||||
|
||||
- [Completions for Sublime Text](https://github.com/onsi/ginkgo-sublime-completions): just use [Package Control](https://sublime.wbond.net/) to install `Ginkgo Completions`.
|
||||
|
||||
- Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify). Check out the [docs](http://onsi.github.io/ginkgo/#third-party-integrations) for details.
|
||||
|
||||
- A modular architecture that lets you easily:
|
||||
- Write [custom reporters](http://onsi.github.io/ginkgo/#writing-custom-reporters) (for example, Ginkgo comes with a [JUnit XML reporter](http://onsi.github.io/ginkgo/#generating-junit-xml-output) and a TeamCity reporter).
|
||||
- [Adapt an existing matcher library (or write your own!)](http://onsi.github.io/ginkgo/#using-other-matcher-libraries) to work with Ginkgo
|
||||
|
||||
## [Gomega](http://github.com/onsi/gomega): Ginkgo's Preferred Matcher Library
|
||||
|
||||
Ginkgo is best paired with Gomega. Learn more about Gomega [here](http://onsi.github.io/gomega/)
|
||||
|
||||
## [Agouti](http://github.com/sclevine/agouti): A Golang Acceptance Testing Framework
|
||||
|
||||
Agouti allows you run WebDriver integration tests. Learn more about Agouti [here](http://agouti.org)
|
||||
|
||||
## Set Me Up!
|
||||
|
||||
You'll need Golang v1.3+ (Ubuntu users: you probably have Golang v1.0 -- you'll need to upgrade!)
|
||||
|
||||
```bash
|
||||
|
||||
go get github.com/onsi/ginkgo/ginkgo # installs the ginkgo CLI
|
||||
go get github.com/onsi/gomega # fetches the matcher library
|
||||
|
||||
cd path/to/package/you/want/to/test
|
||||
|
||||
ginkgo bootstrap # set up a new ginkgo suite
|
||||
ginkgo generate # will create a sample test file. edit this file and add your tests then...
|
||||
|
||||
go test # to run your tests
|
||||
|
||||
ginkgo # also runs your tests
|
||||
|
||||
```
|
||||
|
||||
## I'm new to Go: What are my testing options?
|
||||
|
||||
Of course, I heartily recommend [Ginkgo](https://github.com/onsi/ginkgo) and [Gomega](https://github.com/onsi/gomega). Both packages are seeing heavy, daily, production use on a number of projects and boast a mature and comprehensive feature-set.
|
||||
|
||||
With that said, it's great to know what your options are :)
|
||||
|
||||
### What Golang gives you out of the box
|
||||
|
||||
Testing is a first class citizen in Golang, however Go's built-in testing primitives are somewhat limited: The [testing](http://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library.
|
||||
|
||||
### Matcher libraries for Golang's XUnit style tests
|
||||
|
||||
A number of matcher libraries have been written to augment Go's built-in XUnit style tests. Here are two that have gained traction:
|
||||
|
||||
- [testify](https://github.com/stretchr/testify)
|
||||
- [gocheck](http://labix.org/gocheck)
|
||||
|
||||
You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomega) in [XUnit style tests](http://onsi.github.io/gomega/#using-gomega-with-golangs-xunitstyle-tests)
|
||||
|
||||
### BDD style testing frameworks
|
||||
|
||||
There are a handful of BDD-style testing frameworks written for Golang. Here are a few:
|
||||
|
||||
- [Ginkgo](https://github.com/onsi/ginkgo) ;)
|
||||
- [GoConvey](https://github.com/smartystreets/goconvey)
|
||||
- [Goblin](https://github.com/franela/goblin)
|
||||
- [Mao](https://github.com/azer/mao)
|
||||
- [Zen](https://github.com/pranavraja/zen)
|
||||
|
||||
Finally, @shageman has [put together](https://github.com/shageman/gotestit) a comprehensive comparison of golang testing libraries.
|
||||
|
||||
Go explore!
|
||||
|
||||
## License
|
||||
|
||||
Ginkgo is MIT-Licensed
|
170
vendor/github.com/onsi/ginkgo/config/config.go
generated
vendored
Normal file
170
vendor/github.com/onsi/ginkgo/config/config.go
generated
vendored
Normal file
@ -0,0 +1,170 @@
|
||||
/*
|
||||
Ginkgo accepts a number of configuration options.
|
||||
|
||||
These are documented [here](http://onsi.github.io/ginkgo/#the_ginkgo_cli)
|
||||
|
||||
You can also learn more via
|
||||
|
||||
ginkgo help
|
||||
|
||||
or (I kid you not):
|
||||
|
||||
go test -asdf
|
||||
*/
|
||||
package config
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"time"
|
||||
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const VERSION = "1.2.0"
|
||||
|
||||
type GinkgoConfigType struct {
|
||||
RandomSeed int64
|
||||
RandomizeAllSpecs bool
|
||||
FocusString string
|
||||
SkipString string
|
||||
SkipMeasurements bool
|
||||
FailOnPending bool
|
||||
FailFast bool
|
||||
EmitSpecProgress bool
|
||||
DryRun bool
|
||||
|
||||
ParallelNode int
|
||||
ParallelTotal int
|
||||
SyncHost string
|
||||
StreamHost string
|
||||
}
|
||||
|
||||
var GinkgoConfig = GinkgoConfigType{}
|
||||
|
||||
type DefaultReporterConfigType struct {
|
||||
NoColor bool
|
||||
SlowSpecThreshold float64
|
||||
NoisyPendings bool
|
||||
Succinct bool
|
||||
Verbose bool
|
||||
FullTrace bool
|
||||
}
|
||||
|
||||
var DefaultReporterConfig = DefaultReporterConfigType{}
|
||||
|
||||
func processPrefix(prefix string) string {
|
||||
if prefix != "" {
|
||||
prefix = prefix + "."
|
||||
}
|
||||
return prefix
|
||||
}
|
||||
|
||||
func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) {
|
||||
prefix = processPrefix(prefix)
|
||||
flagSet.Int64Var(&(GinkgoConfig.RandomSeed), prefix+"seed", time.Now().Unix(), "The seed used to randomize the spec suite.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.RandomizeAllSpecs), prefix+"randomizeAllSpecs", false, "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe/Context groups.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.SkipMeasurements), prefix+"skipMeasurements", false, "If set, ginkgo will skip any measurement specs.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.FailOnPending), prefix+"failOnPending", false, "If set, ginkgo will mark the test suite as failed if any specs are pending.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.FailFast), prefix+"failFast", false, "If set, ginkgo will stop running a test suite after a failure occurs.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.DryRun), prefix+"dryRun", false, "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v.")
|
||||
flagSet.StringVar(&(GinkgoConfig.FocusString), prefix+"focus", "", "If set, ginkgo will only run specs that match this regular expression.")
|
||||
flagSet.StringVar(&(GinkgoConfig.SkipString), prefix+"skip", "", "If set, ginkgo will only run specs that do not match this regular expression.")
|
||||
flagSet.BoolVar(&(GinkgoConfig.EmitSpecProgress), prefix+"progress", false, "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter.")
|
||||
|
||||
if includeParallelFlags {
|
||||
flagSet.IntVar(&(GinkgoConfig.ParallelNode), prefix+"parallel.node", 1, "This worker node's (one-indexed) node number. For running specs in parallel.")
|
||||
flagSet.IntVar(&(GinkgoConfig.ParallelTotal), prefix+"parallel.total", 1, "The total number of worker nodes. For running specs in parallel.")
|
||||
flagSet.StringVar(&(GinkgoConfig.SyncHost), prefix+"parallel.synchost", "", "The address for the server that will synchronize the running nodes.")
|
||||
flagSet.StringVar(&(GinkgoConfig.StreamHost), prefix+"parallel.streamhost", "", "The address for the server that the running nodes should stream data to.")
|
||||
}
|
||||
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.NoColor), prefix+"noColor", false, "If set, suppress color output in default reporter.")
|
||||
flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter (default: 5 seconds).")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.NoisyPendings), prefix+"noisyPendings", true, "If set, default reporter will shout about pending tests.")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report")
|
||||
flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs")
|
||||
}
|
||||
|
||||
func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultReporterConfigType) []string {
|
||||
prefix = processPrefix(prefix)
|
||||
result := make([]string, 0)
|
||||
|
||||
if ginkgo.RandomSeed > 0 {
|
||||
result = append(result, fmt.Sprintf("--%sseed=%d", prefix, ginkgo.RandomSeed))
|
||||
}
|
||||
|
||||
if ginkgo.RandomizeAllSpecs {
|
||||
result = append(result, fmt.Sprintf("--%srandomizeAllSpecs", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.SkipMeasurements {
|
||||
result = append(result, fmt.Sprintf("--%sskipMeasurements", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.FailOnPending {
|
||||
result = append(result, fmt.Sprintf("--%sfailOnPending", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.FailFast {
|
||||
result = append(result, fmt.Sprintf("--%sfailFast", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.DryRun {
|
||||
result = append(result, fmt.Sprintf("--%sdryRun", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.FocusString != "" {
|
||||
result = append(result, fmt.Sprintf("--%sfocus=%s", prefix, ginkgo.FocusString))
|
||||
}
|
||||
|
||||
if ginkgo.SkipString != "" {
|
||||
result = append(result, fmt.Sprintf("--%sskip=%s", prefix, ginkgo.SkipString))
|
||||
}
|
||||
|
||||
if ginkgo.EmitSpecProgress {
|
||||
result = append(result, fmt.Sprintf("--%sprogress", prefix))
|
||||
}
|
||||
|
||||
if ginkgo.ParallelNode != 0 {
|
||||
result = append(result, fmt.Sprintf("--%sparallel.node=%d", prefix, ginkgo.ParallelNode))
|
||||
}
|
||||
|
||||
if ginkgo.ParallelTotal != 0 {
|
||||
result = append(result, fmt.Sprintf("--%sparallel.total=%d", prefix, ginkgo.ParallelTotal))
|
||||
}
|
||||
|
||||
if ginkgo.StreamHost != "" {
|
||||
result = append(result, fmt.Sprintf("--%sparallel.streamhost=%s", prefix, ginkgo.StreamHost))
|
||||
}
|
||||
|
||||
if ginkgo.SyncHost != "" {
|
||||
result = append(result, fmt.Sprintf("--%sparallel.synchost=%s", prefix, ginkgo.SyncHost))
|
||||
}
|
||||
|
||||
if reporter.NoColor {
|
||||
result = append(result, fmt.Sprintf("--%snoColor", prefix))
|
||||
}
|
||||
|
||||
if reporter.SlowSpecThreshold > 0 {
|
||||
result = append(result, fmt.Sprintf("--%sslowSpecThreshold=%.5f", prefix, reporter.SlowSpecThreshold))
|
||||
}
|
||||
|
||||
if !reporter.NoisyPendings {
|
||||
result = append(result, fmt.Sprintf("--%snoisyPendings=false", prefix))
|
||||
}
|
||||
|
||||
if reporter.Verbose {
|
||||
result = append(result, fmt.Sprintf("--%sv", prefix))
|
||||
}
|
||||
|
||||
if reporter.Succinct {
|
||||
result = append(result, fmt.Sprintf("--%ssuccinct", prefix))
|
||||
}
|
||||
|
||||
if reporter.FullTrace {
|
||||
result = append(result, fmt.Sprintf("--%strace", prefix))
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
536
vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
generated
vendored
Normal file
536
vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
generated
vendored
Normal file
@ -0,0 +1,536 @@
|
||||
/*
|
||||
Ginkgo is a BDD-style testing framework for Golang
|
||||
|
||||
The godoc documentation describes Ginkgo's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/ginkgo/
|
||||
|
||||
Ginkgo's preferred matcher library is [Gomega](http://github.com/onsi/gomega)
|
||||
|
||||
Ginkgo on Github: http://github.com/onsi/ginkgo
|
||||
|
||||
Ginkgo is MIT-Licensed
|
||||
*/
|
||||
package ginkgo
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/internal/codelocation"
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/internal/remote"
|
||||
"github.com/onsi/ginkgo/internal/suite"
|
||||
"github.com/onsi/ginkgo/internal/testingtproxy"
|
||||
"github.com/onsi/ginkgo/internal/writer"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
"github.com/onsi/ginkgo/reporters/stenographer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
const GINKGO_VERSION = config.VERSION
|
||||
const GINKGO_PANIC = `
|
||||
Your test failed.
|
||||
Ginkgo panics to prevent subsequent assertions from running.
|
||||
Normally Ginkgo rescues this panic so you shouldn't see it.
|
||||
|
||||
But, if you make an assertion in a goroutine, Ginkgo can't capture the panic.
|
||||
To circumvent this, you should call
|
||||
|
||||
defer GinkgoRecover()
|
||||
|
||||
at the top of the goroutine that caused this panic.
|
||||
`
|
||||
const defaultTimeout = 1
|
||||
|
||||
var globalSuite *suite.Suite
|
||||
var globalFailer *failer.Failer
|
||||
|
||||
func init() {
|
||||
config.Flags(flag.CommandLine, "ginkgo", true)
|
||||
GinkgoWriter = writer.New(os.Stdout)
|
||||
globalFailer = failer.New()
|
||||
globalSuite = suite.New(globalFailer)
|
||||
}
|
||||
|
||||
//GinkgoWriter implements an io.Writer
|
||||
//When running in verbose mode any writes to GinkgoWriter will be immediately printed
|
||||
//to stdout. Otherwise, GinkgoWriter will buffer any writes produced during the current test and flush them to screen
|
||||
//only if the current test fails.
|
||||
var GinkgoWriter io.Writer
|
||||
|
||||
//The interface by which Ginkgo receives *testing.T
|
||||
type GinkgoTestingT interface {
|
||||
Fail()
|
||||
}
|
||||
|
||||
//GinkgoParallelNode returns the parallel node number for the current ginkgo process
|
||||
//The node number is 1-indexed
|
||||
func GinkgoParallelNode() int {
|
||||
return config.GinkgoConfig.ParallelNode
|
||||
}
|
||||
|
||||
//Some matcher libraries or legacy codebases require a *testing.T
|
||||
//GinkgoT implements an interface analogous to *testing.T and can be used if
|
||||
//the library in question accepts *testing.T through an interface
|
||||
//
|
||||
// For example, with testify:
|
||||
// assert.Equal(GinkgoT(), 123, 123, "they should be equal")
|
||||
//
|
||||
// Or with gomock:
|
||||
// gomock.NewController(GinkgoT())
|
||||
//
|
||||
// GinkgoT() takes an optional offset argument that can be used to get the
|
||||
// correct line number associated with the failure.
|
||||
func GinkgoT(optionalOffset ...int) GinkgoTInterface {
|
||||
offset := 3
|
||||
if len(optionalOffset) > 0 {
|
||||
offset = optionalOffset[0]
|
||||
}
|
||||
return testingtproxy.New(GinkgoWriter, Fail, offset)
|
||||
}
|
||||
|
||||
//The interface returned by GinkgoT(). This covers most of the methods
|
||||
//in the testing package's T.
|
||||
type GinkgoTInterface interface {
|
||||
Fail()
|
||||
Error(args ...interface{})
|
||||
Errorf(format string, args ...interface{})
|
||||
FailNow()
|
||||
Fatal(args ...interface{})
|
||||
Fatalf(format string, args ...interface{})
|
||||
Log(args ...interface{})
|
||||
Logf(format string, args ...interface{})
|
||||
Failed() bool
|
||||
Parallel()
|
||||
Skip(args ...interface{})
|
||||
Skipf(format string, args ...interface{})
|
||||
SkipNow()
|
||||
Skipped() bool
|
||||
}
|
||||
|
||||
//Custom Ginkgo test reporters must implement the Reporter interface.
|
||||
//
|
||||
//The custom reporter is passed in a SuiteSummary when the suite begins and ends,
|
||||
//and a SpecSummary just before a spec begins and just after a spec ends
|
||||
type Reporter reporters.Reporter
|
||||
|
||||
//Asynchronous specs are given a channel of the Done type. You must close or write to the channel
|
||||
//to tell Ginkgo that your async test is done.
|
||||
type Done chan<- interface{}
|
||||
|
||||
//GinkgoTestDescription represents the information about the current running test returned by CurrentGinkgoTestDescription
|
||||
// FullTestText: a concatenation of ComponentTexts and the TestText
|
||||
// ComponentTexts: a list of all texts for the Describes & Contexts leading up to the current test
|
||||
// TestText: the text in the actual It or Measure node
|
||||
// IsMeasurement: true if the current test is a measurement
|
||||
// FileName: the name of the file containing the current test
|
||||
// LineNumber: the line number for the current test
|
||||
// Failed: if the current test has failed, this will be true (useful in an AfterEach)
|
||||
type GinkgoTestDescription struct {
|
||||
FullTestText string
|
||||
ComponentTexts []string
|
||||
TestText string
|
||||
|
||||
IsMeasurement bool
|
||||
|
||||
FileName string
|
||||
LineNumber int
|
||||
|
||||
Failed bool
|
||||
}
|
||||
|
||||
//CurrentGinkgoTestDescripton returns information about the current running test.
|
||||
func CurrentGinkgoTestDescription() GinkgoTestDescription {
|
||||
summary, ok := globalSuite.CurrentRunningSpecSummary()
|
||||
if !ok {
|
||||
return GinkgoTestDescription{}
|
||||
}
|
||||
|
||||
subjectCodeLocation := summary.ComponentCodeLocations[len(summary.ComponentCodeLocations)-1]
|
||||
|
||||
return GinkgoTestDescription{
|
||||
ComponentTexts: summary.ComponentTexts[1:],
|
||||
FullTestText: strings.Join(summary.ComponentTexts[1:], " "),
|
||||
TestText: summary.ComponentTexts[len(summary.ComponentTexts)-1],
|
||||
IsMeasurement: summary.IsMeasurement,
|
||||
FileName: subjectCodeLocation.FileName,
|
||||
LineNumber: subjectCodeLocation.LineNumber,
|
||||
Failed: summary.HasFailureState(),
|
||||
}
|
||||
}
|
||||
|
||||
//Measurement tests receive a Benchmarker.
|
||||
//
|
||||
//You use the Time() function to time how long the passed in body function takes to run
|
||||
//You use the RecordValue() function to track arbitrary numerical measurements.
|
||||
//The optional info argument is passed to the test reporter and can be used to
|
||||
// provide the measurement data to a custom reporter with context.
|
||||
//
|
||||
//See http://onsi.github.io/ginkgo/#benchmark_tests for more details
|
||||
type Benchmarker interface {
|
||||
Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration)
|
||||
RecordValue(name string, value float64, info ...interface{})
|
||||
}
|
||||
|
||||
//RunSpecs is the entry point for the Ginkgo test runner.
|
||||
//You must call this within a Golang testing TestX(t *testing.T) function.
|
||||
//
|
||||
//To bootstrap a test suite you can use the Ginkgo CLI:
|
||||
//
|
||||
// ginkgo bootstrap
|
||||
func RunSpecs(t GinkgoTestingT, description string) bool {
|
||||
specReporters := []Reporter{buildDefaultReporter()}
|
||||
return RunSpecsWithCustomReporters(t, description, specReporters)
|
||||
}
|
||||
|
||||
//To run your tests with Ginkgo's default reporter and your custom reporter(s), replace
|
||||
//RunSpecs() with this method.
|
||||
func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool {
|
||||
specReporters = append([]Reporter{buildDefaultReporter()}, specReporters...)
|
||||
return RunSpecsWithCustomReporters(t, description, specReporters)
|
||||
}
|
||||
|
||||
//To run your tests with your custom reporter(s) (and *not* Ginkgo's default reporter), replace
|
||||
//RunSpecs() with this method. Note that parallel tests will not work correctly without the default reporter
|
||||
func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool {
|
||||
writer := GinkgoWriter.(*writer.Writer)
|
||||
writer.SetStream(config.DefaultReporterConfig.Verbose)
|
||||
reporters := make([]reporters.Reporter, len(specReporters))
|
||||
for i, reporter := range specReporters {
|
||||
reporters[i] = reporter
|
||||
}
|
||||
passed, hasFocusedTests := globalSuite.Run(t, description, reporters, writer, config.GinkgoConfig)
|
||||
if passed && hasFocusedTests {
|
||||
fmt.Println("PASS | FOCUSED")
|
||||
os.Exit(types.GINKGO_FOCUS_EXIT_CODE)
|
||||
}
|
||||
return passed
|
||||
}
|
||||
|
||||
func buildDefaultReporter() Reporter {
|
||||
remoteReportingServer := config.GinkgoConfig.StreamHost
|
||||
if remoteReportingServer == "" {
|
||||
stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor)
|
||||
return reporters.NewDefaultReporter(config.DefaultReporterConfig, stenographer)
|
||||
} else {
|
||||
return remote.NewForwardingReporter(remoteReportingServer, &http.Client{}, remote.NewOutputInterceptor())
|
||||
}
|
||||
}
|
||||
|
||||
//Skip notifies Ginkgo that the current spec should be skipped.
|
||||
func Skip(message string, callerSkip ...int) {
|
||||
skip := 0
|
||||
if len(callerSkip) > 0 {
|
||||
skip = callerSkip[0]
|
||||
}
|
||||
|
||||
globalFailer.Skip(message, codelocation.New(skip+1))
|
||||
panic(GINKGO_PANIC)
|
||||
}
|
||||
|
||||
//Fail notifies Ginkgo that the current spec has failed. (Gomega will call Fail for you automatically when an assertion fails.)
|
||||
func Fail(message string, callerSkip ...int) {
|
||||
skip := 0
|
||||
if len(callerSkip) > 0 {
|
||||
skip = callerSkip[0]
|
||||
}
|
||||
|
||||
globalFailer.Fail(message, codelocation.New(skip+1))
|
||||
panic(GINKGO_PANIC)
|
||||
}
|
||||
|
||||
//GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail`
|
||||
//Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that
|
||||
//calls out to Gomega
|
||||
//
|
||||
//Here's why: Ginkgo's `Fail` method records the failure and then panics to prevent
|
||||
//further assertions from running. This panic must be recovered. Ginkgo does this for you
|
||||
//if the panic originates in a Ginkgo node (an It, BeforeEach, etc...)
|
||||
//
|
||||
//Unfortunately, if a panic originates on a goroutine *launched* from one of these nodes there's no
|
||||
//way for Ginkgo to rescue the panic. To do this, you must remember to `defer GinkgoRecover()` at the top of such a goroutine.
|
||||
func GinkgoRecover() {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
globalFailer.Panic(codelocation.New(1), e)
|
||||
}
|
||||
}
|
||||
|
||||
//Describe blocks allow you to organize your specs. A Describe block can contain any number of
|
||||
//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
|
||||
//
|
||||
//In addition you can nest Describe and Context blocks. Describe and Context blocks are functionally
|
||||
//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object
|
||||
//or method and, within that Describe, outline a number of Contexts.
|
||||
func Describe(text string, body func()) bool {
|
||||
globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can focus the tests within a describe block using FDescribe
|
||||
func FDescribe(text string, body func()) bool {
|
||||
globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark the tests within a describe block as pending using PDescribe
|
||||
func PDescribe(text string, body func()) bool {
|
||||
globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark the tests within a describe block as pending using XDescribe
|
||||
func XDescribe(text string, body func()) bool {
|
||||
globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//Context blocks allow you to organize your specs. A Context block can contain any number of
|
||||
//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
|
||||
//
|
||||
//In addition you can nest Describe and Context blocks. Describe and Context blocks are functionally
|
||||
//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object
|
||||
//or method and, within that Describe, outline a number of Contexts.
|
||||
func Context(text string, body func()) bool {
|
||||
globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can focus the tests within a describe block using FContext
|
||||
func FContext(text string, body func()) bool {
|
||||
globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark the tests within a describe block as pending using PContext
|
||||
func PContext(text string, body func()) bool {
|
||||
globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark the tests within a describe block as pending using XContext
|
||||
func XContext(text string, body func()) bool {
|
||||
globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
|
||||
return true
|
||||
}
|
||||
|
||||
//It blocks contain your test code and assertions. You cannot nest any other Ginkgo blocks
|
||||
//within an It block.
|
||||
//
|
||||
//Ginkgo will normally run It blocks synchronously. To perform asynchronous tests, pass a
|
||||
//function that accepts a Done channel. When you do this, you can also provide an optional timeout.
|
||||
func It(text string, body interface{}, timeout ...float64) bool {
|
||||
globalSuite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can focus individual Its using FIt
|
||||
func FIt(text string, body interface{}, timeout ...float64) bool {
|
||||
globalSuite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...))
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark Its as pending using PIt
|
||||
func PIt(text string, _ ...interface{}) bool {
|
||||
globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark Its as pending using XIt
|
||||
func XIt(text string, _ ...interface{}) bool {
|
||||
globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
|
||||
return true
|
||||
}
|
||||
|
||||
//By allows you to better document large Its.
|
||||
//
|
||||
//Generally you should try to keep your Its short and to the point. This is not always possible, however,
|
||||
//especially in the context of integration tests that capture a particular workflow.
|
||||
//
|
||||
//By allows you to document such flows. By must be called within a runnable node (It, BeforeEach, Measure, etc...)
|
||||
//By will simply log the passed in text to the GinkgoWriter. If By is handed a function it will immediately run the function.
|
||||
func By(text string, callbacks ...func()) {
|
||||
preamble := "\x1b[1mSTEP\x1b[0m"
|
||||
if config.DefaultReporterConfig.NoColor {
|
||||
preamble = "STEP"
|
||||
}
|
||||
fmt.Fprintln(GinkgoWriter, preamble+": "+text)
|
||||
if len(callbacks) == 1 {
|
||||
callbacks[0]()
|
||||
}
|
||||
if len(callbacks) > 1 {
|
||||
panic("just one callback per By, please")
|
||||
}
|
||||
}
|
||||
|
||||
//Measure blocks run the passed in body function repeatedly (determined by the samples argument)
|
||||
//and accumulate metrics provided to the Benchmarker by the body function.
|
||||
//
|
||||
//The body function must have the signature:
|
||||
// func(b Benchmarker)
|
||||
func Measure(text string, body interface{}, samples int) bool {
|
||||
globalSuite.PushMeasureNode(text, body, types.FlagTypeNone, codelocation.New(1), samples)
|
||||
return true
|
||||
}
|
||||
|
||||
//You can focus individual Measures using FMeasure
|
||||
func FMeasure(text string, body interface{}, samples int) bool {
|
||||
globalSuite.PushMeasureNode(text, body, types.FlagTypeFocused, codelocation.New(1), samples)
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark Maeasurements as pending using PMeasure
|
||||
func PMeasure(text string, _ ...interface{}) bool {
|
||||
globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
|
||||
return true
|
||||
}
|
||||
|
||||
//You can mark Maeasurements as pending using XMeasure
|
||||
func XMeasure(text string, _ ...interface{}) bool {
|
||||
globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
|
||||
return true
|
||||
}
|
||||
|
||||
//BeforeSuite blocks are run just once before any specs are run. When running in parallel, each
|
||||
//parallel node process will call BeforeSuite.
|
||||
//
|
||||
//BeforeSuite blocks can be made asynchronous by providing a body function that accepts a Done channel
|
||||
//
|
||||
//You may only register *one* BeforeSuite handler per test suite. You typically do so in your bootstrap file at the top level.
|
||||
func BeforeSuite(body interface{}, timeout ...float64) bool {
|
||||
globalSuite.SetBeforeSuiteNode(body, codelocation.New(1), parseTimeout(timeout...))
|
||||
return true
|
||||
}
|
||||
|
||||
//AfterSuite blocks are *always* run after all the specs regardless of whether specs have passed or failed.
|
||||
//Moreover, if Ginkgo receives an interrupt signal (^C) it will attempt to run the AfterSuite before exiting.
|
||||
//
|
||||
//When running in parallel, each parallel node process will call AfterSuite.
|
||||
//
|
||||
//AfterSuite blocks can be made asynchronous by providing a body function that accepts a Done channel
|
||||
//
|
||||
//You may only register *one* AfterSuite handler per test suite. You typically do so in your bootstrap file at the top level.
|
||||
func AfterSuite(body interface{}, timeout ...float64) bool {
|
||||
globalSuite.SetAfterSuiteNode(body, codelocation.New(1), parseTimeout(timeout...))
|
||||
return true
|
||||
}
|
||||
|
||||
//SynchronizedBeforeSuite blocks are primarily meant to solve the problem of setting up singleton external resources shared across
|
||||
//nodes when running tests in parallel. For example, say you have a shared database that you can only start one instance of that
|
||||
//must be used in your tests. When running in parallel, only one node should set up the database and all other nodes should wait
|
||||
//until that node is done before running.
|
||||
//
|
||||
//SynchronizedBeforeSuite accomplishes this by taking *two* function arguments. The first is only run on parallel node #1. The second is
|
||||
//run on all nodes, but *only* after the first function completes succesfully. Ginkgo also makes it possible to send data from the first function (on Node 1)
|
||||
//to the second function (on all the other nodes).
|
||||
//
|
||||
//The functions have the following signatures. The first function (which only runs on node 1) has the signature:
|
||||
//
|
||||
// func() []byte
|
||||
//
|
||||
//or, to run asynchronously:
|
||||
//
|
||||
// func(done Done) []byte
|
||||
//
|
||||
//The byte array returned by the first function is then passed to the second function, which has the signature:
|
||||
//
|
||||
// func(data []byte)
|
||||
//
|
||||
//or, to run asynchronously:
|
||||
//
|
||||
// func(data []byte, done Done)
|
||||
//
|
||||
//Here's a simple pseudo-code example that starts a shared database on Node 1 and shares the database's address with the other nodes:
|
||||
//
|
||||
// var dbClient db.Client
|
||||
// var dbRunner db.Runner
|
||||
//
|
||||
// var _ = SynchronizedBeforeSuite(func() []byte {
|
||||
// dbRunner = db.NewRunner()
|
||||
// err := dbRunner.Start()
|
||||
// Ω(err).ShouldNot(HaveOccurred())
|
||||
// return []byte(dbRunner.URL)
|
||||
// }, func(data []byte) {
|
||||
// dbClient = db.NewClient()
|
||||
// err := dbClient.Connect(string(data))
|
||||
// Ω(err).ShouldNot(HaveOccurred())
|
||||
// })
|
||||
func SynchronizedBeforeSuite(node1Body interface{}, allNodesBody interface{}, timeout ...float64) bool {
|
||||
globalSuite.SetSynchronizedBeforeSuiteNode(
|
||||
node1Body,
|
||||
allNodesBody,
|
||||
codelocation.New(1),
|
||||
parseTimeout(timeout...),
|
||||
)
|
||||
return true
|
||||
}
|
||||
|
||||
//SynchronizedAfterSuite blocks complement the SynchronizedBeforeSuite blocks in solving the problem of setting up
|
||||
//external singleton resources shared across nodes when running tests in parallel.
|
||||
//
|
||||
//SynchronizedAfterSuite accomplishes this by taking *two* function arguments. The first runs on all nodes. The second runs only on parallel node #1
|
||||
//and *only* after all other nodes have finished and exited. This ensures that node 1, and any resources it is running, remain alive until
|
||||
//all other nodes are finished.
|
||||
//
|
||||
//Both functions have the same signature: either func() or func(done Done) to run asynchronously.
|
||||
//
|
||||
//Here's a pseudo-code example that complements that given in SynchronizedBeforeSuite. Here, SynchronizedAfterSuite is used to tear down the shared database
|
||||
//only after all nodes have finished:
|
||||
//
|
||||
// var _ = SynchronizedAfterSuite(func() {
|
||||
// dbClient.Cleanup()
|
||||
// }, func() {
|
||||
// dbRunner.Stop()
|
||||
// })
|
||||
func SynchronizedAfterSuite(allNodesBody interface{}, node1Body interface{}, timeout ...float64) bool {
|
||||
globalSuite.SetSynchronizedAfterSuiteNode(
|
||||
allNodesBody,
|
||||
node1Body,
|
||||
codelocation.New(1),
|
||||
parseTimeout(timeout...),
|
||||
)
|
||||
return true
|
||||
}
|
||||
|
||||
//BeforeEach blocks are run before It blocks. When multiple BeforeEach blocks are defined in nested
|
||||
//Describe and Context blocks the outermost BeforeEach blocks are run first.
|
||||
//
|
||||
//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts
|
||||
//a Done channel
|
||||
func BeforeEach(body interface{}, timeout ...float64) bool {
|
||||
globalSuite.PushBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...))
|
||||
return true
|
||||
}
|
||||
|
||||
//JustBeforeEach blocks are run before It blocks but *after* all BeforeEach blocks. For more details,
|
||||
//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_)
|
||||
//
|
||||
//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts
|
||||
//a Done channel
|
||||
func JustBeforeEach(body interface{}, timeout ...float64) bool {
|
||||
globalSuite.PushJustBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...))
|
||||
return true
|
||||
}
|
||||
|
||||
//AfterEach blocks are run after It blocks. When multiple AfterEach blocks are defined in nested
|
||||
//Describe and Context blocks the innermost AfterEach blocks are run first.
|
||||
//
|
||||
//Like It blocks, AfterEach blocks can be made asynchronous by providing a body function that accepts
|
||||
//a Done channel
|
||||
func AfterEach(body interface{}, timeout ...float64) bool {
|
||||
globalSuite.PushAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...))
|
||||
return true
|
||||
}
|
||||
|
||||
func parseTimeout(timeout ...float64) time.Duration {
|
||||
if len(timeout) == 0 {
|
||||
return time.Duration(defaultTimeout * int64(time.Second))
|
||||
} else {
|
||||
return time.Duration(timeout[0] * float64(time.Second))
|
||||
}
|
||||
}
|
32
vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go
generated
vendored
Normal file
32
vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
package codelocation
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
func New(skip int) types.CodeLocation {
|
||||
_, file, line, _ := runtime.Caller(skip + 1)
|
||||
stackTrace := PruneStack(string(debug.Stack()), skip)
|
||||
return types.CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace}
|
||||
}
|
||||
|
||||
func PruneStack(fullStackTrace string, skip int) string {
|
||||
stack := strings.Split(fullStackTrace, "\n")
|
||||
if len(stack) > 2*(skip+1) {
|
||||
stack = stack[2*(skip+1):]
|
||||
}
|
||||
prunedStack := []string{}
|
||||
re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
|
||||
for i := 0; i < len(stack)/2; i++ {
|
||||
if !re.Match([]byte(stack[i*2])) {
|
||||
prunedStack = append(prunedStack, stack[i*2])
|
||||
prunedStack = append(prunedStack, stack[i*2+1])
|
||||
}
|
||||
}
|
||||
return strings.Join(prunedStack, "\n")
|
||||
}
|
151
vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go
generated
vendored
Normal file
151
vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go
generated
vendored
Normal file
@ -0,0 +1,151 @@
|
||||
package containernode
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sort"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type subjectOrContainerNode struct {
|
||||
containerNode *ContainerNode
|
||||
subjectNode leafnodes.SubjectNode
|
||||
}
|
||||
|
||||
func (n subjectOrContainerNode) text() string {
|
||||
if n.containerNode != nil {
|
||||
return n.containerNode.Text()
|
||||
} else {
|
||||
return n.subjectNode.Text()
|
||||
}
|
||||
}
|
||||
|
||||
type CollatedNodes struct {
|
||||
Containers []*ContainerNode
|
||||
Subject leafnodes.SubjectNode
|
||||
}
|
||||
|
||||
type ContainerNode struct {
|
||||
text string
|
||||
flag types.FlagType
|
||||
codeLocation types.CodeLocation
|
||||
|
||||
setupNodes []leafnodes.BasicNode
|
||||
subjectAndContainerNodes []subjectOrContainerNode
|
||||
}
|
||||
|
||||
func New(text string, flag types.FlagType, codeLocation types.CodeLocation) *ContainerNode {
|
||||
return &ContainerNode{
|
||||
text: text,
|
||||
flag: flag,
|
||||
codeLocation: codeLocation,
|
||||
}
|
||||
}
|
||||
|
||||
func (container *ContainerNode) Shuffle(r *rand.Rand) {
|
||||
sort.Sort(container)
|
||||
permutation := r.Perm(len(container.subjectAndContainerNodes))
|
||||
shuffledNodes := make([]subjectOrContainerNode, len(container.subjectAndContainerNodes))
|
||||
for i, j := range permutation {
|
||||
shuffledNodes[i] = container.subjectAndContainerNodes[j]
|
||||
}
|
||||
container.subjectAndContainerNodes = shuffledNodes
|
||||
}
|
||||
|
||||
func (node *ContainerNode) BackPropagateProgrammaticFocus() bool {
|
||||
if node.flag == types.FlagTypePending {
|
||||
return false
|
||||
}
|
||||
|
||||
shouldUnfocus := false
|
||||
for _, subjectOrContainerNode := range node.subjectAndContainerNodes {
|
||||
if subjectOrContainerNode.containerNode != nil {
|
||||
shouldUnfocus = subjectOrContainerNode.containerNode.BackPropagateProgrammaticFocus() || shouldUnfocus
|
||||
} else {
|
||||
shouldUnfocus = (subjectOrContainerNode.subjectNode.Flag() == types.FlagTypeFocused) || shouldUnfocus
|
||||
}
|
||||
}
|
||||
|
||||
if shouldUnfocus {
|
||||
if node.flag == types.FlagTypeFocused {
|
||||
node.flag = types.FlagTypeNone
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
return node.flag == types.FlagTypeFocused
|
||||
}
|
||||
|
||||
func (node *ContainerNode) Collate() []CollatedNodes {
|
||||
return node.collate([]*ContainerNode{})
|
||||
}
|
||||
|
||||
func (node *ContainerNode) collate(enclosingContainers []*ContainerNode) []CollatedNodes {
|
||||
collated := make([]CollatedNodes, 0)
|
||||
|
||||
containers := make([]*ContainerNode, len(enclosingContainers))
|
||||
copy(containers, enclosingContainers)
|
||||
containers = append(containers, node)
|
||||
|
||||
for _, subjectOrContainer := range node.subjectAndContainerNodes {
|
||||
if subjectOrContainer.containerNode != nil {
|
||||
collated = append(collated, subjectOrContainer.containerNode.collate(containers)...)
|
||||
} else {
|
||||
collated = append(collated, CollatedNodes{
|
||||
Containers: containers,
|
||||
Subject: subjectOrContainer.subjectNode,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return collated
|
||||
}
|
||||
|
||||
func (node *ContainerNode) PushContainerNode(container *ContainerNode) {
|
||||
node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{containerNode: container})
|
||||
}
|
||||
|
||||
func (node *ContainerNode) PushSubjectNode(subject leafnodes.SubjectNode) {
|
||||
node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{subjectNode: subject})
|
||||
}
|
||||
|
||||
func (node *ContainerNode) PushSetupNode(setupNode leafnodes.BasicNode) {
|
||||
node.setupNodes = append(node.setupNodes, setupNode)
|
||||
}
|
||||
|
||||
func (node *ContainerNode) SetupNodesOfType(nodeType types.SpecComponentType) []leafnodes.BasicNode {
|
||||
nodes := []leafnodes.BasicNode{}
|
||||
for _, setupNode := range node.setupNodes {
|
||||
if setupNode.Type() == nodeType {
|
||||
nodes = append(nodes, setupNode)
|
||||
}
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
func (node *ContainerNode) Text() string {
|
||||
return node.text
|
||||
}
|
||||
|
||||
func (node *ContainerNode) CodeLocation() types.CodeLocation {
|
||||
return node.codeLocation
|
||||
}
|
||||
|
||||
func (node *ContainerNode) Flag() types.FlagType {
|
||||
return node.flag
|
||||
}
|
||||
|
||||
//sort.Interface
|
||||
|
||||
func (node *ContainerNode) Len() int {
|
||||
return len(node.subjectAndContainerNodes)
|
||||
}
|
||||
|
||||
func (node *ContainerNode) Less(i, j int) bool {
|
||||
return node.subjectAndContainerNodes[i].text() < node.subjectAndContainerNodes[j].text()
|
||||
}
|
||||
|
||||
func (node *ContainerNode) Swap(i, j int) {
|
||||
node.subjectAndContainerNodes[i], node.subjectAndContainerNodes[j] = node.subjectAndContainerNodes[j], node.subjectAndContainerNodes[i]
|
||||
}
|
92
vendor/github.com/onsi/ginkgo/internal/failer/failer.go
generated
vendored
Normal file
92
vendor/github.com/onsi/ginkgo/internal/failer/failer.go
generated
vendored
Normal file
@ -0,0 +1,92 @@
|
||||
package failer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type Failer struct {
|
||||
lock *sync.Mutex
|
||||
failure types.SpecFailure
|
||||
state types.SpecState
|
||||
}
|
||||
|
||||
func New() *Failer {
|
||||
return &Failer{
|
||||
lock: &sync.Mutex{},
|
||||
state: types.SpecStatePassed,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if f.state == types.SpecStatePassed {
|
||||
f.state = types.SpecStatePanicked
|
||||
f.failure = types.SpecFailure{
|
||||
Message: "Test Panicked",
|
||||
Location: location,
|
||||
ForwardedPanic: fmt.Sprintf("%v", forwardedPanic),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Failer) Timeout(location types.CodeLocation) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if f.state == types.SpecStatePassed {
|
||||
f.state = types.SpecStateTimedOut
|
||||
f.failure = types.SpecFailure{
|
||||
Message: "Timed out",
|
||||
Location: location,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Failer) Fail(message string, location types.CodeLocation) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if f.state == types.SpecStatePassed {
|
||||
f.state = types.SpecStateFailed
|
||||
f.failure = types.SpecFailure{
|
||||
Message: message,
|
||||
Location: location,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Failer) Drain(componentType types.SpecComponentType, componentIndex int, componentCodeLocation types.CodeLocation) (types.SpecFailure, types.SpecState) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
failure := f.failure
|
||||
outcome := f.state
|
||||
if outcome != types.SpecStatePassed {
|
||||
failure.ComponentType = componentType
|
||||
failure.ComponentIndex = componentIndex
|
||||
failure.ComponentCodeLocation = componentCodeLocation
|
||||
}
|
||||
|
||||
f.state = types.SpecStatePassed
|
||||
f.failure = types.SpecFailure{}
|
||||
|
||||
return failure, outcome
|
||||
}
|
||||
|
||||
func (f *Failer) Skip(message string, location types.CodeLocation) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if f.state == types.SpecStatePassed {
|
||||
f.state = types.SpecStateSkipped
|
||||
f.failure = types.SpecFailure{
|
||||
Message: message,
|
||||
Location: location,
|
||||
}
|
||||
}
|
||||
}
|
95
vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
generated
vendored
Normal file
95
vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
generated
vendored
Normal file
@ -0,0 +1,95 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type benchmarker struct {
|
||||
mu sync.Mutex
|
||||
measurements map[string]*types.SpecMeasurement
|
||||
orderCounter int
|
||||
}
|
||||
|
||||
func newBenchmarker() *benchmarker {
|
||||
return &benchmarker{
|
||||
measurements: make(map[string]*types.SpecMeasurement, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *benchmarker) Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) {
|
||||
t := time.Now()
|
||||
body()
|
||||
elapsedTime = time.Since(t)
|
||||
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
measurement := b.getMeasurement(name, "Fastest Time", "Slowest Time", "Average Time", "s", info...)
|
||||
measurement.Results = append(measurement.Results, elapsedTime.Seconds())
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (b *benchmarker) RecordValue(name string, value float64, info ...interface{}) {
|
||||
measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", "", info...)
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
measurement.Results = append(measurement.Results, value)
|
||||
}
|
||||
|
||||
func (b *benchmarker) getMeasurement(name string, smallestLabel string, largestLabel string, averageLabel string, units string, info ...interface{}) *types.SpecMeasurement {
|
||||
measurement, ok := b.measurements[name]
|
||||
if !ok {
|
||||
var computedInfo interface{}
|
||||
computedInfo = nil
|
||||
if len(info) > 0 {
|
||||
computedInfo = info[0]
|
||||
}
|
||||
measurement = &types.SpecMeasurement{
|
||||
Name: name,
|
||||
Info: computedInfo,
|
||||
Order: b.orderCounter,
|
||||
SmallestLabel: smallestLabel,
|
||||
LargestLabel: largestLabel,
|
||||
AverageLabel: averageLabel,
|
||||
Units: units,
|
||||
Results: make([]float64, 0),
|
||||
}
|
||||
b.measurements[name] = measurement
|
||||
b.orderCounter++
|
||||
}
|
||||
|
||||
return measurement
|
||||
}
|
||||
|
||||
func (b *benchmarker) measurementsReport() map[string]*types.SpecMeasurement {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
for _, measurement := range b.measurements {
|
||||
measurement.Smallest = math.MaxFloat64
|
||||
measurement.Largest = -math.MaxFloat64
|
||||
sum := float64(0)
|
||||
sumOfSquares := float64(0)
|
||||
|
||||
for _, result := range measurement.Results {
|
||||
if result > measurement.Largest {
|
||||
measurement.Largest = result
|
||||
}
|
||||
if result < measurement.Smallest {
|
||||
measurement.Smallest = result
|
||||
}
|
||||
sum += result
|
||||
sumOfSquares += result * result
|
||||
}
|
||||
|
||||
n := float64(len(measurement.Results))
|
||||
measurement.Average = sum / n
|
||||
measurement.StdDeviation = math.Sqrt(sumOfSquares/n - (sum/n)*(sum/n))
|
||||
}
|
||||
|
||||
return b.measurements
|
||||
}
|
19
vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go
generated
vendored
Normal file
19
vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type BasicNode interface {
|
||||
Type() types.SpecComponentType
|
||||
Run() (types.SpecState, types.SpecFailure)
|
||||
CodeLocation() types.CodeLocation
|
||||
}
|
||||
|
||||
type SubjectNode interface {
|
||||
BasicNode
|
||||
|
||||
Text() string
|
||||
Flag() types.FlagType
|
||||
Samples() int
|
||||
}
|
46
vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go
generated
vendored
Normal file
46
vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ItNode struct {
|
||||
runner *runner
|
||||
|
||||
flag types.FlagType
|
||||
text string
|
||||
}
|
||||
|
||||
func NewItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *ItNode {
|
||||
return &ItNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeIt, componentIndex),
|
||||
flag: flag,
|
||||
text: text,
|
||||
}
|
||||
}
|
||||
|
||||
func (node *ItNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
return node.runner.run()
|
||||
}
|
||||
|
||||
func (node *ItNode) Type() types.SpecComponentType {
|
||||
return types.SpecComponentTypeIt
|
||||
}
|
||||
|
||||
func (node *ItNode) Text() string {
|
||||
return node.text
|
||||
}
|
||||
|
||||
func (node *ItNode) Flag() types.FlagType {
|
||||
return node.flag
|
||||
}
|
||||
|
||||
func (node *ItNode) CodeLocation() types.CodeLocation {
|
||||
return node.runner.codeLocation
|
||||
}
|
||||
|
||||
func (node *ItNode) Samples() int {
|
||||
return 1
|
||||
}
|
61
vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go
generated
vendored
Normal file
61
vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go
generated
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type MeasureNode struct {
|
||||
runner *runner
|
||||
|
||||
text string
|
||||
flag types.FlagType
|
||||
samples int
|
||||
benchmarker *benchmarker
|
||||
}
|
||||
|
||||
func NewMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int, failer *failer.Failer, componentIndex int) *MeasureNode {
|
||||
benchmarker := newBenchmarker()
|
||||
|
||||
wrappedBody := func() {
|
||||
reflect.ValueOf(body).Call([]reflect.Value{reflect.ValueOf(benchmarker)})
|
||||
}
|
||||
|
||||
return &MeasureNode{
|
||||
runner: newRunner(wrappedBody, codeLocation, 0, failer, types.SpecComponentTypeMeasure, componentIndex),
|
||||
|
||||
text: text,
|
||||
flag: flag,
|
||||
samples: samples,
|
||||
benchmarker: benchmarker,
|
||||
}
|
||||
}
|
||||
|
||||
func (node *MeasureNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
return node.runner.run()
|
||||
}
|
||||
|
||||
func (node *MeasureNode) MeasurementsReport() map[string]*types.SpecMeasurement {
|
||||
return node.benchmarker.measurementsReport()
|
||||
}
|
||||
|
||||
func (node *MeasureNode) Type() types.SpecComponentType {
|
||||
return types.SpecComponentTypeMeasure
|
||||
}
|
||||
|
||||
func (node *MeasureNode) Text() string {
|
||||
return node.text
|
||||
}
|
||||
|
||||
func (node *MeasureNode) Flag() types.FlagType {
|
||||
return node.flag
|
||||
}
|
||||
|
||||
func (node *MeasureNode) CodeLocation() types.CodeLocation {
|
||||
return node.runner.codeLocation
|
||||
}
|
||||
|
||||
func (node *MeasureNode) Samples() int {
|
||||
return node.samples
|
||||
}
|
113
vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go
generated
vendored
Normal file
113
vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go
generated
vendored
Normal file
@ -0,0 +1,113 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/onsi/ginkgo/internal/codelocation"
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
type runner struct {
|
||||
isAsync bool
|
||||
asyncFunc func(chan<- interface{})
|
||||
syncFunc func()
|
||||
codeLocation types.CodeLocation
|
||||
timeoutThreshold time.Duration
|
||||
nodeType types.SpecComponentType
|
||||
componentIndex int
|
||||
failer *failer.Failer
|
||||
}
|
||||
|
||||
func newRunner(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, nodeType types.SpecComponentType, componentIndex int) *runner {
|
||||
bodyType := reflect.TypeOf(body)
|
||||
if bodyType.Kind() != reflect.Func {
|
||||
panic(fmt.Sprintf("Expected a function but got something else at %v", codeLocation))
|
||||
}
|
||||
|
||||
runner := &runner{
|
||||
codeLocation: codeLocation,
|
||||
timeoutThreshold: timeout,
|
||||
failer: failer,
|
||||
nodeType: nodeType,
|
||||
componentIndex: componentIndex,
|
||||
}
|
||||
|
||||
switch bodyType.NumIn() {
|
||||
case 0:
|
||||
runner.syncFunc = body.(func())
|
||||
return runner
|
||||
case 1:
|
||||
if !(bodyType.In(0).Kind() == reflect.Chan && bodyType.In(0).Elem().Kind() == reflect.Interface) {
|
||||
panic(fmt.Sprintf("Must pass a Done channel to function at %v", codeLocation))
|
||||
}
|
||||
|
||||
wrappedBody := func(done chan<- interface{}) {
|
||||
bodyValue := reflect.ValueOf(body)
|
||||
bodyValue.Call([]reflect.Value{reflect.ValueOf(done)})
|
||||
}
|
||||
|
||||
runner.isAsync = true
|
||||
runner.asyncFunc = wrappedBody
|
||||
return runner
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("Too many arguments to function at %v", codeLocation))
|
||||
}
|
||||
|
||||
func (r *runner) run() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
if r.isAsync {
|
||||
return r.runAsync()
|
||||
} else {
|
||||
return r.runSync()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
done := make(chan interface{}, 1)
|
||||
|
||||
go func() {
|
||||
finished := false
|
||||
|
||||
defer func() {
|
||||
if e := recover(); e != nil || !finished {
|
||||
r.failer.Panic(codelocation.New(2), e)
|
||||
select {
|
||||
case <-done:
|
||||
break
|
||||
default:
|
||||
close(done)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
r.asyncFunc(done)
|
||||
finished = true
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(r.timeoutThreshold):
|
||||
r.failer.Timeout(r.codeLocation)
|
||||
}
|
||||
|
||||
failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation)
|
||||
return
|
||||
}
|
||||
func (r *runner) runSync() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
finished := false
|
||||
|
||||
defer func() {
|
||||
if e := recover(); e != nil || !finished {
|
||||
r.failer.Panic(codelocation.New(2), e)
|
||||
}
|
||||
|
||||
failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation)
|
||||
}()
|
||||
|
||||
r.syncFunc()
|
||||
finished = true
|
||||
|
||||
return
|
||||
}
|
41
vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go
generated
vendored
Normal file
41
vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"time"
|
||||
)
|
||||
|
||||
type SetupNode struct {
|
||||
runner *runner
|
||||
}
|
||||
|
||||
func (node *SetupNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
|
||||
return node.runner.run()
|
||||
}
|
||||
|
||||
func (node *SetupNode) Type() types.SpecComponentType {
|
||||
return node.runner.nodeType
|
||||
}
|
||||
|
||||
func (node *SetupNode) CodeLocation() types.CodeLocation {
|
||||
return node.runner.codeLocation
|
||||
}
|
||||
|
||||
func NewBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
|
||||
return &SetupNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeEach, componentIndex),
|
||||
}
|
||||
}
|
||||
|
||||
func NewAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
|
||||
return &SetupNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterEach, componentIndex),
|
||||
}
|
||||
}
|
||||
|
||||
func NewJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
|
||||
return &SetupNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustBeforeEach, componentIndex),
|
||||
}
|
||||
}
|
54
vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go
generated
vendored
Normal file
54
vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"time"
|
||||
)
|
||||
|
||||
type SuiteNode interface {
|
||||
Run(parallelNode int, parallelTotal int, syncHost string) bool
|
||||
Passed() bool
|
||||
Summary() *types.SetupSummary
|
||||
}
|
||||
|
||||
type simpleSuiteNode struct {
|
||||
runner *runner
|
||||
outcome types.SpecState
|
||||
failure types.SpecFailure
|
||||
runTime time.Duration
|
||||
}
|
||||
|
||||
func (node *simpleSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
|
||||
t := time.Now()
|
||||
node.outcome, node.failure = node.runner.run()
|
||||
node.runTime = time.Since(t)
|
||||
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *simpleSuiteNode) Passed() bool {
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *simpleSuiteNode) Summary() *types.SetupSummary {
|
||||
return &types.SetupSummary{
|
||||
ComponentType: node.runner.nodeType,
|
||||
CodeLocation: node.runner.codeLocation,
|
||||
State: node.outcome,
|
||||
RunTime: node.runTime,
|
||||
Failure: node.failure,
|
||||
}
|
||||
}
|
||||
|
||||
func NewBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
||||
return &simpleSuiteNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func NewAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
||||
return &simpleSuiteNode{
|
||||
runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
|
||||
}
|
||||
}
|
89
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
generated
vendored
Normal file
89
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
generated
vendored
Normal file
@ -0,0 +1,89 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
type synchronizedAfterSuiteNode struct {
|
||||
runnerA *runner
|
||||
runnerB *runner
|
||||
|
||||
outcome types.SpecState
|
||||
failure types.SpecFailure
|
||||
runTime time.Duration
|
||||
}
|
||||
|
||||
func NewSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
||||
return &synchronizedAfterSuiteNode{
|
||||
runnerA: newRunner(bodyA, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
|
||||
runnerB: newRunner(bodyB, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (node *synchronizedAfterSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
|
||||
node.outcome, node.failure = node.runnerA.run()
|
||||
|
||||
if parallelNode == 1 {
|
||||
if parallelTotal > 1 {
|
||||
node.waitUntilOtherNodesAreDone(syncHost)
|
||||
}
|
||||
|
||||
outcome, failure := node.runnerB.run()
|
||||
|
||||
if node.outcome == types.SpecStatePassed {
|
||||
node.outcome, node.failure = outcome, failure
|
||||
}
|
||||
}
|
||||
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *synchronizedAfterSuiteNode) Passed() bool {
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *synchronizedAfterSuiteNode) Summary() *types.SetupSummary {
|
||||
return &types.SetupSummary{
|
||||
ComponentType: node.runnerA.nodeType,
|
||||
CodeLocation: node.runnerA.codeLocation,
|
||||
State: node.outcome,
|
||||
RunTime: node.runTime,
|
||||
Failure: node.failure,
|
||||
}
|
||||
}
|
||||
|
||||
func (node *synchronizedAfterSuiteNode) waitUntilOtherNodesAreDone(syncHost string) {
|
||||
for {
|
||||
if node.canRun(syncHost) {
|
||||
return
|
||||
}
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
func (node *synchronizedAfterSuiteNode) canRun(syncHost string) bool {
|
||||
resp, err := http.Get(syncHost + "/RemoteAfterSuiteData")
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
return false
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
afterSuiteData := types.RemoteAfterSuiteData{}
|
||||
err = json.Unmarshal(body, &afterSuiteData)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return afterSuiteData.CanRun
|
||||
}
|
182
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
generated
vendored
Normal file
182
vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
generated
vendored
Normal file
@ -0,0 +1,182 @@
|
||||
package leafnodes
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
type synchronizedBeforeSuiteNode struct {
|
||||
runnerA *runner
|
||||
runnerB *runner
|
||||
|
||||
data []byte
|
||||
|
||||
outcome types.SpecState
|
||||
failure types.SpecFailure
|
||||
runTime time.Duration
|
||||
}
|
||||
|
||||
func NewSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
|
||||
node := &synchronizedBeforeSuiteNode{}
|
||||
|
||||
node.runnerA = newRunner(node.wrapA(bodyA), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0)
|
||||
node.runnerB = newRunner(node.wrapB(bodyB), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0)
|
||||
|
||||
return node
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
|
||||
t := time.Now()
|
||||
defer func() {
|
||||
node.runTime = time.Since(t)
|
||||
}()
|
||||
|
||||
if parallelNode == 1 {
|
||||
node.outcome, node.failure = node.runA(parallelTotal, syncHost)
|
||||
} else {
|
||||
node.outcome, node.failure = node.waitForA(syncHost)
|
||||
}
|
||||
|
||||
if node.outcome != types.SpecStatePassed {
|
||||
return false
|
||||
}
|
||||
node.outcome, node.failure = node.runnerB.run()
|
||||
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) runA(parallelTotal int, syncHost string) (types.SpecState, types.SpecFailure) {
|
||||
outcome, failure := node.runnerA.run()
|
||||
|
||||
if parallelTotal > 1 {
|
||||
state := types.RemoteBeforeSuiteStatePassed
|
||||
if outcome != types.SpecStatePassed {
|
||||
state = types.RemoteBeforeSuiteStateFailed
|
||||
}
|
||||
json := (types.RemoteBeforeSuiteData{
|
||||
Data: node.data,
|
||||
State: state,
|
||||
}).ToJSON()
|
||||
http.Post(syncHost+"/BeforeSuiteState", "application/json", bytes.NewBuffer(json))
|
||||
}
|
||||
|
||||
return outcome, failure
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) waitForA(syncHost string) (types.SpecState, types.SpecFailure) {
|
||||
failure := func(message string) types.SpecFailure {
|
||||
return types.SpecFailure{
|
||||
Message: message,
|
||||
Location: node.runnerA.codeLocation,
|
||||
ComponentType: node.runnerA.nodeType,
|
||||
ComponentIndex: node.runnerA.componentIndex,
|
||||
ComponentCodeLocation: node.runnerA.codeLocation,
|
||||
}
|
||||
}
|
||||
for {
|
||||
resp, err := http.Get(syncHost + "/BeforeSuiteState")
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
return types.SpecStateFailed, failure("Failed to fetch BeforeSuite state")
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return types.SpecStateFailed, failure("Failed to read BeforeSuite state")
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
beforeSuiteData := types.RemoteBeforeSuiteData{}
|
||||
err = json.Unmarshal(body, &beforeSuiteData)
|
||||
if err != nil {
|
||||
return types.SpecStateFailed, failure("Failed to decode BeforeSuite state")
|
||||
}
|
||||
|
||||
switch beforeSuiteData.State {
|
||||
case types.RemoteBeforeSuiteStatePassed:
|
||||
node.data = beforeSuiteData.Data
|
||||
return types.SpecStatePassed, types.SpecFailure{}
|
||||
case types.RemoteBeforeSuiteStateFailed:
|
||||
return types.SpecStateFailed, failure("BeforeSuite on Node 1 failed")
|
||||
case types.RemoteBeforeSuiteStateDisappeared:
|
||||
return types.SpecStateFailed, failure("Node 1 disappeared before completing BeforeSuite")
|
||||
}
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
|
||||
return types.SpecStateFailed, failure("Shouldn't get here!")
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) Passed() bool {
|
||||
return node.outcome == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) Summary() *types.SetupSummary {
|
||||
return &types.SetupSummary{
|
||||
ComponentType: node.runnerA.nodeType,
|
||||
CodeLocation: node.runnerA.codeLocation,
|
||||
State: node.outcome,
|
||||
RunTime: node.runTime,
|
||||
Failure: node.failure,
|
||||
}
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) wrapA(bodyA interface{}) interface{} {
|
||||
typeA := reflect.TypeOf(bodyA)
|
||||
if typeA.Kind() != reflect.Func {
|
||||
panic("SynchronizedBeforeSuite expects a function as its first argument")
|
||||
}
|
||||
|
||||
takesNothing := typeA.NumIn() == 0
|
||||
takesADoneChannel := typeA.NumIn() == 1 && typeA.In(0).Kind() == reflect.Chan && typeA.In(0).Elem().Kind() == reflect.Interface
|
||||
returnsBytes := typeA.NumOut() == 1 && typeA.Out(0).Kind() == reflect.Slice && typeA.Out(0).Elem().Kind() == reflect.Uint8
|
||||
|
||||
if !((takesNothing || takesADoneChannel) && returnsBytes) {
|
||||
panic("SynchronizedBeforeSuite's first argument should be a function that returns []byte and either takes no arguments or takes a Done channel.")
|
||||
}
|
||||
|
||||
if takesADoneChannel {
|
||||
return func(done chan<- interface{}) {
|
||||
out := reflect.ValueOf(bodyA).Call([]reflect.Value{reflect.ValueOf(done)})
|
||||
node.data = out[0].Interface().([]byte)
|
||||
}
|
||||
}
|
||||
|
||||
return func() {
|
||||
out := reflect.ValueOf(bodyA).Call([]reflect.Value{})
|
||||
node.data = out[0].Interface().([]byte)
|
||||
}
|
||||
}
|
||||
|
||||
func (node *synchronizedBeforeSuiteNode) wrapB(bodyB interface{}) interface{} {
|
||||
typeB := reflect.TypeOf(bodyB)
|
||||
if typeB.Kind() != reflect.Func {
|
||||
panic("SynchronizedBeforeSuite expects a function as its second argument")
|
||||
}
|
||||
|
||||
returnsNothing := typeB.NumOut() == 0
|
||||
takesBytesOnly := typeB.NumIn() == 1 && typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8
|
||||
takesBytesAndDone := typeB.NumIn() == 2 &&
|
||||
typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8 &&
|
||||
typeB.In(1).Kind() == reflect.Chan && typeB.In(1).Elem().Kind() == reflect.Interface
|
||||
|
||||
if !((takesBytesOnly || takesBytesAndDone) && returnsNothing) {
|
||||
panic("SynchronizedBeforeSuite's second argument should be a function that returns nothing and either takes []byte or ([]byte, Done)")
|
||||
}
|
||||
|
||||
if takesBytesAndDone {
|
||||
return func(done chan<- interface{}) {
|
||||
reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data), reflect.ValueOf(done)})
|
||||
}
|
||||
}
|
||||
|
||||
return func() {
|
||||
reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data)})
|
||||
}
|
||||
}
|
250
vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
generated
vendored
Normal file
250
vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
generated
vendored
Normal file
@ -0,0 +1,250 @@
|
||||
/*
|
||||
|
||||
Aggregator is a reporter used by the Ginkgo CLI to aggregate and present parallel test output
|
||||
coherently as tests complete. You shouldn't need to use this in your code. To run tests in parallel:
|
||||
|
||||
ginkgo -nodes=N
|
||||
|
||||
where N is the number of nodes you desire.
|
||||
*/
|
||||
package remote
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/reporters/stenographer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type configAndSuite struct {
|
||||
config config.GinkgoConfigType
|
||||
summary *types.SuiteSummary
|
||||
}
|
||||
|
||||
type Aggregator struct {
|
||||
nodeCount int
|
||||
config config.DefaultReporterConfigType
|
||||
stenographer stenographer.Stenographer
|
||||
result chan bool
|
||||
|
||||
suiteBeginnings chan configAndSuite
|
||||
aggregatedSuiteBeginnings []configAndSuite
|
||||
|
||||
beforeSuites chan *types.SetupSummary
|
||||
aggregatedBeforeSuites []*types.SetupSummary
|
||||
|
||||
afterSuites chan *types.SetupSummary
|
||||
aggregatedAfterSuites []*types.SetupSummary
|
||||
|
||||
specCompletions chan *types.SpecSummary
|
||||
completedSpecs []*types.SpecSummary
|
||||
|
||||
suiteEndings chan *types.SuiteSummary
|
||||
aggregatedSuiteEndings []*types.SuiteSummary
|
||||
specs []*types.SpecSummary
|
||||
|
||||
startTime time.Time
|
||||
}
|
||||
|
||||
func NewAggregator(nodeCount int, result chan bool, config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *Aggregator {
|
||||
aggregator := &Aggregator{
|
||||
nodeCount: nodeCount,
|
||||
result: result,
|
||||
config: config,
|
||||
stenographer: stenographer,
|
||||
|
||||
suiteBeginnings: make(chan configAndSuite, 0),
|
||||
beforeSuites: make(chan *types.SetupSummary, 0),
|
||||
afterSuites: make(chan *types.SetupSummary, 0),
|
||||
specCompletions: make(chan *types.SpecSummary, 0),
|
||||
suiteEndings: make(chan *types.SuiteSummary, 0),
|
||||
}
|
||||
|
||||
go aggregator.mux()
|
||||
|
||||
return aggregator
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
aggregator.suiteBeginnings <- configAndSuite{config, summary}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
aggregator.beforeSuites <- setupSummary
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
aggregator.afterSuites <- setupSummary
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) SpecWillRun(specSummary *types.SpecSummary) {
|
||||
//noop
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||
aggregator.specCompletions <- specSummary
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
aggregator.suiteEndings <- summary
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) mux() {
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case configAndSuite := <-aggregator.suiteBeginnings:
|
||||
aggregator.registerSuiteBeginning(configAndSuite)
|
||||
case setupSummary := <-aggregator.beforeSuites:
|
||||
aggregator.registerBeforeSuite(setupSummary)
|
||||
case setupSummary := <-aggregator.afterSuites:
|
||||
aggregator.registerAfterSuite(setupSummary)
|
||||
case specSummary := <-aggregator.specCompletions:
|
||||
aggregator.registerSpecCompletion(specSummary)
|
||||
case suite := <-aggregator.suiteEndings:
|
||||
finished, passed := aggregator.registerSuiteEnding(suite)
|
||||
if finished {
|
||||
aggregator.result <- passed
|
||||
break loop
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) registerSuiteBeginning(configAndSuite configAndSuite) {
|
||||
aggregator.aggregatedSuiteBeginnings = append(aggregator.aggregatedSuiteBeginnings, configAndSuite)
|
||||
|
||||
if len(aggregator.aggregatedSuiteBeginnings) == 1 {
|
||||
aggregator.startTime = time.Now()
|
||||
}
|
||||
|
||||
if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
|
||||
return
|
||||
}
|
||||
|
||||
aggregator.stenographer.AnnounceSuite(configAndSuite.summary.SuiteDescription, configAndSuite.config.RandomSeed, configAndSuite.config.RandomizeAllSpecs, aggregator.config.Succinct)
|
||||
|
||||
numberOfSpecsToRun := 0
|
||||
totalNumberOfSpecs := 0
|
||||
for _, configAndSuite := range aggregator.aggregatedSuiteBeginnings {
|
||||
numberOfSpecsToRun += configAndSuite.summary.NumberOfSpecsThatWillBeRun
|
||||
totalNumberOfSpecs += configAndSuite.summary.NumberOfTotalSpecs
|
||||
}
|
||||
|
||||
aggregator.stenographer.AnnounceNumberOfSpecs(numberOfSpecsToRun, totalNumberOfSpecs, aggregator.config.Succinct)
|
||||
aggregator.stenographer.AnnounceAggregatedParallelRun(aggregator.nodeCount, aggregator.config.Succinct)
|
||||
aggregator.flushCompletedSpecs()
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) registerBeforeSuite(setupSummary *types.SetupSummary) {
|
||||
aggregator.aggregatedBeforeSuites = append(aggregator.aggregatedBeforeSuites, setupSummary)
|
||||
aggregator.flushCompletedSpecs()
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) registerAfterSuite(setupSummary *types.SetupSummary) {
|
||||
aggregator.aggregatedAfterSuites = append(aggregator.aggregatedAfterSuites, setupSummary)
|
||||
aggregator.flushCompletedSpecs()
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) registerSpecCompletion(specSummary *types.SpecSummary) {
|
||||
aggregator.completedSpecs = append(aggregator.completedSpecs, specSummary)
|
||||
aggregator.specs = append(aggregator.specs, specSummary)
|
||||
aggregator.flushCompletedSpecs()
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) flushCompletedSpecs() {
|
||||
if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
|
||||
return
|
||||
}
|
||||
|
||||
for _, setupSummary := range aggregator.aggregatedBeforeSuites {
|
||||
aggregator.announceBeforeSuite(setupSummary)
|
||||
}
|
||||
|
||||
for _, specSummary := range aggregator.completedSpecs {
|
||||
aggregator.announceSpec(specSummary)
|
||||
}
|
||||
|
||||
for _, setupSummary := range aggregator.aggregatedAfterSuites {
|
||||
aggregator.announceAfterSuite(setupSummary)
|
||||
}
|
||||
|
||||
aggregator.aggregatedBeforeSuites = []*types.SetupSummary{}
|
||||
aggregator.completedSpecs = []*types.SpecSummary{}
|
||||
aggregator.aggregatedAfterSuites = []*types.SetupSummary{}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) announceBeforeSuite(setupSummary *types.SetupSummary) {
|
||||
aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
|
||||
if setupSummary.State != types.SpecStatePassed {
|
||||
aggregator.stenographer.AnnounceBeforeSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) announceAfterSuite(setupSummary *types.SetupSummary) {
|
||||
aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
|
||||
if setupSummary.State != types.SpecStatePassed {
|
||||
aggregator.stenographer.AnnounceAfterSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) {
|
||||
if aggregator.config.Verbose && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped {
|
||||
aggregator.stenographer.AnnounceSpecWillRun(specSummary)
|
||||
}
|
||||
|
||||
aggregator.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput)
|
||||
|
||||
switch specSummary.State {
|
||||
case types.SpecStatePassed:
|
||||
if specSummary.IsMeasurement {
|
||||
aggregator.stenographer.AnnounceSuccesfulMeasurement(specSummary, aggregator.config.Succinct)
|
||||
} else if specSummary.RunTime.Seconds() >= aggregator.config.SlowSpecThreshold {
|
||||
aggregator.stenographer.AnnounceSuccesfulSlowSpec(specSummary, aggregator.config.Succinct)
|
||||
} else {
|
||||
aggregator.stenographer.AnnounceSuccesfulSpec(specSummary)
|
||||
}
|
||||
|
||||
case types.SpecStatePending:
|
||||
aggregator.stenographer.AnnouncePendingSpec(specSummary, aggregator.config.NoisyPendings && !aggregator.config.Succinct)
|
||||
case types.SpecStateSkipped:
|
||||
aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
case types.SpecStateTimedOut:
|
||||
aggregator.stenographer.AnnounceSpecTimedOut(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
case types.SpecStatePanicked:
|
||||
aggregator.stenographer.AnnounceSpecPanicked(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
case types.SpecStateFailed:
|
||||
aggregator.stenographer.AnnounceSpecFailed(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
|
||||
}
|
||||
}
|
||||
|
||||
func (aggregator *Aggregator) registerSuiteEnding(suite *types.SuiteSummary) (finished bool, passed bool) {
|
||||
aggregator.aggregatedSuiteEndings = append(aggregator.aggregatedSuiteEndings, suite)
|
||||
if len(aggregator.aggregatedSuiteEndings) < aggregator.nodeCount {
|
||||
return false, false
|
||||
}
|
||||
|
||||
aggregatedSuiteSummary := &types.SuiteSummary{}
|
||||
aggregatedSuiteSummary.SuiteSucceeded = true
|
||||
|
||||
for _, suiteSummary := range aggregator.aggregatedSuiteEndings {
|
||||
if suiteSummary.SuiteSucceeded == false {
|
||||
aggregatedSuiteSummary.SuiteSucceeded = false
|
||||
}
|
||||
|
||||
aggregatedSuiteSummary.NumberOfSpecsThatWillBeRun += suiteSummary.NumberOfSpecsThatWillBeRun
|
||||
aggregatedSuiteSummary.NumberOfTotalSpecs += suiteSummary.NumberOfTotalSpecs
|
||||
aggregatedSuiteSummary.NumberOfPassedSpecs += suiteSummary.NumberOfPassedSpecs
|
||||
aggregatedSuiteSummary.NumberOfFailedSpecs += suiteSummary.NumberOfFailedSpecs
|
||||
aggregatedSuiteSummary.NumberOfPendingSpecs += suiteSummary.NumberOfPendingSpecs
|
||||
aggregatedSuiteSummary.NumberOfSkippedSpecs += suiteSummary.NumberOfSkippedSpecs
|
||||
}
|
||||
|
||||
aggregatedSuiteSummary.RunTime = time.Since(aggregator.startTime)
|
||||
|
||||
aggregator.stenographer.SummarizeFailures(aggregator.specs)
|
||||
aggregator.stenographer.AnnounceSpecRunCompletion(aggregatedSuiteSummary, aggregator.config.Succinct)
|
||||
|
||||
return true, aggregatedSuiteSummary.SuiteSucceeded
|
||||
}
|
90
vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
generated
vendored
Normal file
90
vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
generated
vendored
Normal file
@ -0,0 +1,90 @@
|
||||
package remote
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
//An interface to net/http's client to allow the injection of fakes under test
|
||||
type Poster interface {
|
||||
Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error)
|
||||
}
|
||||
|
||||
/*
|
||||
The ForwardingReporter is a Ginkgo reporter that forwards information to
|
||||
a Ginkgo remote server.
|
||||
|
||||
When streaming parallel test output, this repoter is automatically installed by Ginkgo.
|
||||
|
||||
This is accomplished by passing in the GINKGO_REMOTE_REPORTING_SERVER environment variable to `go test`, the Ginkgo test runner
|
||||
detects this environment variable (which should contain the host of the server) and automatically installs a ForwardingReporter
|
||||
in place of Ginkgo's DefaultReporter.
|
||||
*/
|
||||
|
||||
type ForwardingReporter struct {
|
||||
serverHost string
|
||||
poster Poster
|
||||
outputInterceptor OutputInterceptor
|
||||
}
|
||||
|
||||
func NewForwardingReporter(serverHost string, poster Poster, outputInterceptor OutputInterceptor) *ForwardingReporter {
|
||||
return &ForwardingReporter{
|
||||
serverHost: serverHost,
|
||||
poster: poster,
|
||||
outputInterceptor: outputInterceptor,
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) post(path string, data interface{}) {
|
||||
encoded, _ := json.Marshal(data)
|
||||
buffer := bytes.NewBuffer(encoded)
|
||||
reporter.poster.Post(reporter.serverHost+path, "application/json", buffer)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) SpecSuiteWillBegin(conf config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
data := struct {
|
||||
Config config.GinkgoConfigType `json:"config"`
|
||||
Summary *types.SuiteSummary `json:"suite-summary"`
|
||||
}{
|
||||
conf,
|
||||
summary,
|
||||
}
|
||||
|
||||
reporter.outputInterceptor.StartInterceptingOutput()
|
||||
reporter.post("/SpecSuiteWillBegin", data)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
reporter.outputInterceptor.StartInterceptingOutput()
|
||||
setupSummary.CapturedOutput = output
|
||||
reporter.post("/BeforeSuiteDidRun", setupSummary)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||
reporter.post("/SpecWillRun", specSummary)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
reporter.outputInterceptor.StartInterceptingOutput()
|
||||
specSummary.CapturedOutput = output
|
||||
reporter.post("/SpecDidComplete", specSummary)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
reporter.outputInterceptor.StartInterceptingOutput()
|
||||
setupSummary.CapturedOutput = output
|
||||
reporter.post("/AfterSuiteDidRun", setupSummary)
|
||||
}
|
||||
|
||||
func (reporter *ForwardingReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
reporter.outputInterceptor.StopInterceptingAndReturnOutput()
|
||||
reporter.post("/SpecSuiteDidEnd", summary)
|
||||
}
|
10
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go
generated
vendored
Normal file
10
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
package remote
|
||||
|
||||
/*
|
||||
The OutputInterceptor is used by the ForwardingReporter to
|
||||
intercept and capture all stdin and stderr output during a test run.
|
||||
*/
|
||||
type OutputInterceptor interface {
|
||||
StartInterceptingOutput() error
|
||||
StopInterceptingAndReturnOutput() (string, error)
|
||||
}
|
52
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
generated
vendored
Normal file
52
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
// +build freebsd openbsd netbsd dragonfly darwin linux
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func NewOutputInterceptor() OutputInterceptor {
|
||||
return &outputInterceptor{}
|
||||
}
|
||||
|
||||
type outputInterceptor struct {
|
||||
redirectFile *os.File
|
||||
intercepting bool
|
||||
}
|
||||
|
||||
func (interceptor *outputInterceptor) StartInterceptingOutput() error {
|
||||
if interceptor.intercepting {
|
||||
return errors.New("Already intercepting output!")
|
||||
}
|
||||
interceptor.intercepting = true
|
||||
|
||||
var err error
|
||||
|
||||
interceptor.redirectFile, err = ioutil.TempFile("", "ginkgo-output")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
syscall.Dup2(int(interceptor.redirectFile.Fd()), 1)
|
||||
syscall.Dup2(int(interceptor.redirectFile.Fd()), 2)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
|
||||
if !interceptor.intercepting {
|
||||
return "", errors.New("Not intercepting output!")
|
||||
}
|
||||
|
||||
interceptor.redirectFile.Close()
|
||||
output, err := ioutil.ReadFile(interceptor.redirectFile.Name())
|
||||
os.Remove(interceptor.redirectFile.Name())
|
||||
|
||||
interceptor.intercepting = false
|
||||
|
||||
return string(output), err
|
||||
}
|
33
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
generated
vendored
Normal file
33
vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
// +build windows
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
func NewOutputInterceptor() OutputInterceptor {
|
||||
return &outputInterceptor{}
|
||||
}
|
||||
|
||||
type outputInterceptor struct {
|
||||
intercepting bool
|
||||
}
|
||||
|
||||
func (interceptor *outputInterceptor) StartInterceptingOutput() error {
|
||||
if interceptor.intercepting {
|
||||
return errors.New("Already intercepting output!")
|
||||
}
|
||||
interceptor.intercepting = true
|
||||
|
||||
// not working on windows...
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
|
||||
// not working on windows...
|
||||
interceptor.intercepting = false
|
||||
|
||||
return "", nil
|
||||
}
|
204
vendor/github.com/onsi/ginkgo/internal/remote/server.go
generated
vendored
Normal file
204
vendor/github.com/onsi/ginkgo/internal/remote/server.go
generated
vendored
Normal file
@ -0,0 +1,204 @@
|
||||
/*
|
||||
|
||||
The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
|
||||
This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
|
||||
|
||||
*/
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
)
|
||||
|
||||
/*
|
||||
Server spins up on an automatically selected port and listens for communication from the forwarding reporter.
|
||||
It then forwards that communication to attached reporters.
|
||||
*/
|
||||
type Server struct {
|
||||
listener net.Listener
|
||||
reporters []reporters.Reporter
|
||||
alives []func() bool
|
||||
lock *sync.Mutex
|
||||
beforeSuiteData types.RemoteBeforeSuiteData
|
||||
parallelTotal int
|
||||
}
|
||||
|
||||
//Create a new server, automatically selecting a port
|
||||
func NewServer(parallelTotal int) (*Server, error) {
|
||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Server{
|
||||
listener: listener,
|
||||
lock: &sync.Mutex{},
|
||||
alives: make([]func() bool, parallelTotal),
|
||||
beforeSuiteData: types.RemoteBeforeSuiteData{nil, types.RemoteBeforeSuiteStatePending},
|
||||
parallelTotal: parallelTotal,
|
||||
}, nil
|
||||
}
|
||||
|
||||
//Start the server. You don't need to `go s.Start()`, just `s.Start()`
|
||||
func (server *Server) Start() {
|
||||
httpServer := &http.Server{}
|
||||
mux := http.NewServeMux()
|
||||
httpServer.Handler = mux
|
||||
|
||||
//streaming endpoints
|
||||
mux.HandleFunc("/SpecSuiteWillBegin", server.specSuiteWillBegin)
|
||||
mux.HandleFunc("/BeforeSuiteDidRun", server.beforeSuiteDidRun)
|
||||
mux.HandleFunc("/AfterSuiteDidRun", server.afterSuiteDidRun)
|
||||
mux.HandleFunc("/SpecWillRun", server.specWillRun)
|
||||
mux.HandleFunc("/SpecDidComplete", server.specDidComplete)
|
||||
mux.HandleFunc("/SpecSuiteDidEnd", server.specSuiteDidEnd)
|
||||
|
||||
//synchronization endpoints
|
||||
mux.HandleFunc("/BeforeSuiteState", server.handleBeforeSuiteState)
|
||||
mux.HandleFunc("/RemoteAfterSuiteData", server.handleRemoteAfterSuiteData)
|
||||
|
||||
go httpServer.Serve(server.listener)
|
||||
}
|
||||
|
||||
//Stop the server
|
||||
func (server *Server) Close() {
|
||||
server.listener.Close()
|
||||
}
|
||||
|
||||
//The address the server can be reached it. Pass this into the `ForwardingReporter`.
|
||||
func (server *Server) Address() string {
|
||||
return "http://" + server.listener.Addr().String()
|
||||
}
|
||||
|
||||
//
|
||||
// Streaming Endpoints
|
||||
//
|
||||
|
||||
//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters`
|
||||
func (server *Server) readAll(request *http.Request) []byte {
|
||||
defer request.Body.Close()
|
||||
body, _ := ioutil.ReadAll(request.Body)
|
||||
return body
|
||||
}
|
||||
|
||||
func (server *Server) RegisterReporters(reporters ...reporters.Reporter) {
|
||||
server.reporters = reporters
|
||||
}
|
||||
|
||||
func (server *Server) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
|
||||
var data struct {
|
||||
Config config.GinkgoConfigType `json:"config"`
|
||||
Summary *types.SuiteSummary `json:"suite-summary"`
|
||||
}
|
||||
|
||||
json.Unmarshal(body, &data)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.SpecSuiteWillBegin(data.Config, data.Summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) beforeSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
var setupSummary *types.SetupSummary
|
||||
json.Unmarshal(body, &setupSummary)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.BeforeSuiteDidRun(setupSummary)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) afterSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
var setupSummary *types.SetupSummary
|
||||
json.Unmarshal(body, &setupSummary)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.AfterSuiteDidRun(setupSummary)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) specWillRun(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
var specSummary *types.SpecSummary
|
||||
json.Unmarshal(body, &specSummary)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.SpecWillRun(specSummary)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) specDidComplete(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
var specSummary *types.SpecSummary
|
||||
json.Unmarshal(body, &specSummary)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.SpecDidComplete(specSummary)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) {
|
||||
body := server.readAll(request)
|
||||
var suiteSummary *types.SuiteSummary
|
||||
json.Unmarshal(body, &suiteSummary)
|
||||
|
||||
for _, reporter := range server.reporters {
|
||||
reporter.SpecSuiteDidEnd(suiteSummary)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Synchronization Endpoints
|
||||
//
|
||||
|
||||
func (server *Server) RegisterAlive(node int, alive func() bool) {
|
||||
server.lock.Lock()
|
||||
defer server.lock.Unlock()
|
||||
server.alives[node-1] = alive
|
||||
}
|
||||
|
||||
func (server *Server) nodeIsAlive(node int) bool {
|
||||
server.lock.Lock()
|
||||
defer server.lock.Unlock()
|
||||
alive := server.alives[node-1]
|
||||
if alive == nil {
|
||||
return true
|
||||
}
|
||||
return alive()
|
||||
}
|
||||
|
||||
func (server *Server) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
|
||||
if request.Method == "POST" {
|
||||
dec := json.NewDecoder(request.Body)
|
||||
dec.Decode(&(server.beforeSuiteData))
|
||||
} else {
|
||||
beforeSuiteData := server.beforeSuiteData
|
||||
if beforeSuiteData.State == types.RemoteBeforeSuiteStatePending && !server.nodeIsAlive(1) {
|
||||
beforeSuiteData.State = types.RemoteBeforeSuiteStateDisappeared
|
||||
}
|
||||
enc := json.NewEncoder(writer)
|
||||
enc.Encode(beforeSuiteData)
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) handleRemoteAfterSuiteData(writer http.ResponseWriter, request *http.Request) {
|
||||
afterSuiteData := types.RemoteAfterSuiteData{
|
||||
CanRun: true,
|
||||
}
|
||||
for i := 2; i <= server.parallelTotal; i++ {
|
||||
afterSuiteData.CanRun = afterSuiteData.CanRun && !server.nodeIsAlive(i)
|
||||
}
|
||||
|
||||
enc := json.NewEncoder(writer)
|
||||
enc.Encode(afterSuiteData)
|
||||
}
|
55
vendor/github.com/onsi/ginkgo/internal/spec/index_computer.go
generated
vendored
Normal file
55
vendor/github.com/onsi/ginkgo/internal/spec/index_computer.go
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
package spec
|
||||
|
||||
func ParallelizedIndexRange(length int, parallelTotal int, parallelNode int) (startIndex int, count int) {
|
||||
if length == 0 {
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
// We have more nodes than tests. Trivial case.
|
||||
if parallelTotal >= length {
|
||||
if parallelNode > length {
|
||||
return 0, 0
|
||||
} else {
|
||||
return parallelNode - 1, 1
|
||||
}
|
||||
}
|
||||
|
||||
// This is the minimum amount of tests that a node will be required to run
|
||||
minTestsPerNode := length / parallelTotal
|
||||
|
||||
// This is the maximum amount of tests that a node will be required to run
|
||||
// The algorithm guarantees that this would be equal to at least the minimum amount
|
||||
// and at most one more
|
||||
maxTestsPerNode := minTestsPerNode
|
||||
if length%parallelTotal != 0 {
|
||||
maxTestsPerNode++
|
||||
}
|
||||
|
||||
// Number of nodes that will have to run the maximum amount of tests per node
|
||||
numMaxLoadNodes := length % parallelTotal
|
||||
|
||||
// Number of nodes that precede the current node and will have to run the maximum amount of tests per node
|
||||
var numPrecedingMaxLoadNodes int
|
||||
if parallelNode > numMaxLoadNodes {
|
||||
numPrecedingMaxLoadNodes = numMaxLoadNodes
|
||||
} else {
|
||||
numPrecedingMaxLoadNodes = parallelNode - 1
|
||||
}
|
||||
|
||||
// Number of nodes that precede the current node and will have to run the minimum amount of tests per node
|
||||
var numPrecedingMinLoadNodes int
|
||||
if parallelNode <= numMaxLoadNodes {
|
||||
numPrecedingMinLoadNodes = 0
|
||||
} else {
|
||||
numPrecedingMinLoadNodes = parallelNode - numMaxLoadNodes - 1
|
||||
}
|
||||
|
||||
// Evaluate the test start index and number of tests to run
|
||||
startIndex = numPrecedingMaxLoadNodes*maxTestsPerNode + numPrecedingMinLoadNodes*minTestsPerNode
|
||||
if parallelNode > numMaxLoadNodes {
|
||||
count = minTestsPerNode
|
||||
} else {
|
||||
count = maxTestsPerNode
|
||||
}
|
||||
return
|
||||
}
|
197
vendor/github.com/onsi/ginkgo/internal/spec/spec.go
generated
vendored
Normal file
197
vendor/github.com/onsi/ginkgo/internal/spec/spec.go
generated
vendored
Normal file
@ -0,0 +1,197 @@
|
||||
package spec
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/containernode"
|
||||
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type Spec struct {
|
||||
subject leafnodes.SubjectNode
|
||||
focused bool
|
||||
announceProgress bool
|
||||
|
||||
containers []*containernode.ContainerNode
|
||||
|
||||
state types.SpecState
|
||||
runTime time.Duration
|
||||
failure types.SpecFailure
|
||||
}
|
||||
|
||||
func New(subject leafnodes.SubjectNode, containers []*containernode.ContainerNode, announceProgress bool) *Spec {
|
||||
spec := &Spec{
|
||||
subject: subject,
|
||||
containers: containers,
|
||||
focused: subject.Flag() == types.FlagTypeFocused,
|
||||
announceProgress: announceProgress,
|
||||
}
|
||||
|
||||
spec.processFlag(subject.Flag())
|
||||
for i := len(containers) - 1; i >= 0; i-- {
|
||||
spec.processFlag(containers[i].Flag())
|
||||
}
|
||||
|
||||
return spec
|
||||
}
|
||||
|
||||
func (spec *Spec) processFlag(flag types.FlagType) {
|
||||
if flag == types.FlagTypeFocused {
|
||||
spec.focused = true
|
||||
} else if flag == types.FlagTypePending {
|
||||
spec.state = types.SpecStatePending
|
||||
}
|
||||
}
|
||||
|
||||
func (spec *Spec) Skip() {
|
||||
spec.state = types.SpecStateSkipped
|
||||
}
|
||||
|
||||
func (spec *Spec) Failed() bool {
|
||||
return spec.state == types.SpecStateFailed || spec.state == types.SpecStatePanicked || spec.state == types.SpecStateTimedOut
|
||||
}
|
||||
|
||||
func (spec *Spec) Passed() bool {
|
||||
return spec.state == types.SpecStatePassed
|
||||
}
|
||||
|
||||
func (spec *Spec) Pending() bool {
|
||||
return spec.state == types.SpecStatePending
|
||||
}
|
||||
|
||||
func (spec *Spec) Skipped() bool {
|
||||
return spec.state == types.SpecStateSkipped
|
||||
}
|
||||
|
||||
func (spec *Spec) Focused() bool {
|
||||
return spec.focused
|
||||
}
|
||||
|
||||
func (spec *Spec) IsMeasurement() bool {
|
||||
return spec.subject.Type() == types.SpecComponentTypeMeasure
|
||||
}
|
||||
|
||||
func (spec *Spec) Summary(suiteID string) *types.SpecSummary {
|
||||
componentTexts := make([]string, len(spec.containers)+1)
|
||||
componentCodeLocations := make([]types.CodeLocation, len(spec.containers)+1)
|
||||
|
||||
for i, container := range spec.containers {
|
||||
componentTexts[i] = container.Text()
|
||||
componentCodeLocations[i] = container.CodeLocation()
|
||||
}
|
||||
|
||||
componentTexts[len(spec.containers)] = spec.subject.Text()
|
||||
componentCodeLocations[len(spec.containers)] = spec.subject.CodeLocation()
|
||||
|
||||
return &types.SpecSummary{
|
||||
IsMeasurement: spec.IsMeasurement(),
|
||||
NumberOfSamples: spec.subject.Samples(),
|
||||
ComponentTexts: componentTexts,
|
||||
ComponentCodeLocations: componentCodeLocations,
|
||||
State: spec.state,
|
||||
RunTime: spec.runTime,
|
||||
Failure: spec.failure,
|
||||
Measurements: spec.measurementsReport(),
|
||||
SuiteID: suiteID,
|
||||
}
|
||||
}
|
||||
|
||||
func (spec *Spec) ConcatenatedString() string {
|
||||
s := ""
|
||||
for _, container := range spec.containers {
|
||||
s += container.Text() + " "
|
||||
}
|
||||
|
||||
return s + spec.subject.Text()
|
||||
}
|
||||
|
||||
func (spec *Spec) Run(writer io.Writer) {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
spec.runTime = time.Since(startTime)
|
||||
}()
|
||||
|
||||
for sample := 0; sample < spec.subject.Samples(); sample++ {
|
||||
spec.runSample(sample, writer)
|
||||
|
||||
if spec.state != types.SpecStatePassed {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (spec *Spec) runSample(sample int, writer io.Writer) {
|
||||
spec.state = types.SpecStatePassed
|
||||
spec.failure = types.SpecFailure{}
|
||||
innerMostContainerIndexToUnwind := -1
|
||||
|
||||
defer func() {
|
||||
for i := innerMostContainerIndexToUnwind; i >= 0; i-- {
|
||||
container := spec.containers[i]
|
||||
for _, afterEach := range container.SetupNodesOfType(types.SpecComponentTypeAfterEach) {
|
||||
spec.announceSetupNode(writer, "AfterEach", container, afterEach)
|
||||
afterEachState, afterEachFailure := afterEach.Run()
|
||||
if afterEachState != types.SpecStatePassed && spec.state == types.SpecStatePassed {
|
||||
spec.state = afterEachState
|
||||
spec.failure = afterEachFailure
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for i, container := range spec.containers {
|
||||
innerMostContainerIndexToUnwind = i
|
||||
for _, beforeEach := range container.SetupNodesOfType(types.SpecComponentTypeBeforeEach) {
|
||||
spec.announceSetupNode(writer, "BeforeEach", container, beforeEach)
|
||||
spec.state, spec.failure = beforeEach.Run()
|
||||
if spec.state != types.SpecStatePassed {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, container := range spec.containers {
|
||||
for _, justBeforeEach := range container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach) {
|
||||
spec.announceSetupNode(writer, "JustBeforeEach", container, justBeforeEach)
|
||||
spec.state, spec.failure = justBeforeEach.Run()
|
||||
if spec.state != types.SpecStatePassed {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
spec.announceSubject(writer, spec.subject)
|
||||
spec.state, spec.failure = spec.subject.Run()
|
||||
}
|
||||
|
||||
func (spec *Spec) announceSetupNode(writer io.Writer, nodeType string, container *containernode.ContainerNode, setupNode leafnodes.BasicNode) {
|
||||
if spec.announceProgress {
|
||||
s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, container.Text(), setupNode.CodeLocation().String())
|
||||
writer.Write([]byte(s))
|
||||
}
|
||||
}
|
||||
|
||||
func (spec *Spec) announceSubject(writer io.Writer, subject leafnodes.SubjectNode) {
|
||||
if spec.announceProgress {
|
||||
nodeType := ""
|
||||
switch subject.Type() {
|
||||
case types.SpecComponentTypeIt:
|
||||
nodeType = "It"
|
||||
case types.SpecComponentTypeMeasure:
|
||||
nodeType = "Measure"
|
||||
}
|
||||
s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, subject.Text(), subject.CodeLocation().String())
|
||||
writer.Write([]byte(s))
|
||||
}
|
||||
}
|
||||
|
||||
func (spec *Spec) measurementsReport() map[string]*types.SpecMeasurement {
|
||||
if !spec.IsMeasurement() || spec.Failed() {
|
||||
return map[string]*types.SpecMeasurement{}
|
||||
}
|
||||
|
||||
return spec.subject.(*leafnodes.MeasureNode).MeasurementsReport()
|
||||
}
|
122
vendor/github.com/onsi/ginkgo/internal/spec/specs.go
generated
vendored
Normal file
122
vendor/github.com/onsi/ginkgo/internal/spec/specs.go
generated
vendored
Normal file
@ -0,0 +1,122 @@
|
||||
package spec
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"regexp"
|
||||
"sort"
|
||||
)
|
||||
|
||||
type Specs struct {
|
||||
specs []*Spec
|
||||
numberOfOriginalSpecs int
|
||||
hasProgrammaticFocus bool
|
||||
}
|
||||
|
||||
func NewSpecs(specs []*Spec) *Specs {
|
||||
return &Specs{
|
||||
specs: specs,
|
||||
numberOfOriginalSpecs: len(specs),
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Specs) Specs() []*Spec {
|
||||
return e.specs
|
||||
}
|
||||
|
||||
func (e *Specs) NumberOfOriginalSpecs() int {
|
||||
return e.numberOfOriginalSpecs
|
||||
}
|
||||
|
||||
func (e *Specs) HasProgrammaticFocus() bool {
|
||||
return e.hasProgrammaticFocus
|
||||
}
|
||||
|
||||
func (e *Specs) Shuffle(r *rand.Rand) {
|
||||
sort.Sort(e)
|
||||
permutation := r.Perm(len(e.specs))
|
||||
shuffledSpecs := make([]*Spec, len(e.specs))
|
||||
for i, j := range permutation {
|
||||
shuffledSpecs[i] = e.specs[j]
|
||||
}
|
||||
e.specs = shuffledSpecs
|
||||
}
|
||||
|
||||
func (e *Specs) ApplyFocus(description string, focusString string, skipString string) {
|
||||
if focusString == "" && skipString == "" {
|
||||
e.applyProgrammaticFocus()
|
||||
} else {
|
||||
e.applyRegExpFocus(description, focusString, skipString)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Specs) applyProgrammaticFocus() {
|
||||
e.hasProgrammaticFocus = false
|
||||
for _, spec := range e.specs {
|
||||
if spec.Focused() && !spec.Pending() {
|
||||
e.hasProgrammaticFocus = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if e.hasProgrammaticFocus {
|
||||
for _, spec := range e.specs {
|
||||
if !spec.Focused() {
|
||||
spec.Skip()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Specs) applyRegExpFocus(description string, focusString string, skipString string) {
|
||||
for _, spec := range e.specs {
|
||||
matchesFocus := true
|
||||
matchesSkip := false
|
||||
|
||||
toMatch := []byte(description + " " + spec.ConcatenatedString())
|
||||
|
||||
if focusString != "" {
|
||||
focusFilter := regexp.MustCompile(focusString)
|
||||
matchesFocus = focusFilter.Match([]byte(toMatch))
|
||||
}
|
||||
|
||||
if skipString != "" {
|
||||
skipFilter := regexp.MustCompile(skipString)
|
||||
matchesSkip = skipFilter.Match([]byte(toMatch))
|
||||
}
|
||||
|
||||
if !matchesFocus || matchesSkip {
|
||||
spec.Skip()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Specs) SkipMeasurements() {
|
||||
for _, spec := range e.specs {
|
||||
if spec.IsMeasurement() {
|
||||
spec.Skip()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Specs) TrimForParallelization(total int, node int) {
|
||||
startIndex, count := ParallelizedIndexRange(len(e.specs), total, node)
|
||||
if count == 0 {
|
||||
e.specs = make([]*Spec, 0)
|
||||
} else {
|
||||
e.specs = e.specs[startIndex : startIndex+count]
|
||||
}
|
||||
}
|
||||
|
||||
//sort.Interface
|
||||
|
||||
func (e *Specs) Len() int {
|
||||
return len(e.specs)
|
||||
}
|
||||
|
||||
func (e *Specs) Less(i, j int) bool {
|
||||
return e.specs[i].ConcatenatedString() < e.specs[j].ConcatenatedString()
|
||||
}
|
||||
|
||||
func (e *Specs) Swap(i, j int) {
|
||||
e.specs[i], e.specs[j] = e.specs[j], e.specs[i]
|
||||
}
|
15
vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go
generated
vendored
Normal file
15
vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
package specrunner
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func randomID() string {
|
||||
b := make([]byte, 8)
|
||||
_, err := rand.Read(b)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("%x-%x-%x-%x", b[0:2], b[2:4], b[4:6], b[6:8])
|
||||
}
|
324
vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go
generated
vendored
Normal file
324
vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go
generated
vendored
Normal file
@ -0,0 +1,324 @@
|
||||
package specrunner
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||
"github.com/onsi/ginkgo/internal/spec"
|
||||
Writer "github.com/onsi/ginkgo/internal/writer"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
|
||||
"time"
|
||||
)
|
||||
|
||||
type SpecRunner struct {
|
||||
description string
|
||||
beforeSuiteNode leafnodes.SuiteNode
|
||||
specs *spec.Specs
|
||||
afterSuiteNode leafnodes.SuiteNode
|
||||
reporters []reporters.Reporter
|
||||
startTime time.Time
|
||||
suiteID string
|
||||
runningSpec *spec.Spec
|
||||
writer Writer.WriterInterface
|
||||
config config.GinkgoConfigType
|
||||
interrupted bool
|
||||
lock *sync.Mutex
|
||||
}
|
||||
|
||||
func New(description string, beforeSuiteNode leafnodes.SuiteNode, specs *spec.Specs, afterSuiteNode leafnodes.SuiteNode, reporters []reporters.Reporter, writer Writer.WriterInterface, config config.GinkgoConfigType) *SpecRunner {
|
||||
return &SpecRunner{
|
||||
description: description,
|
||||
beforeSuiteNode: beforeSuiteNode,
|
||||
specs: specs,
|
||||
afterSuiteNode: afterSuiteNode,
|
||||
reporters: reporters,
|
||||
writer: writer,
|
||||
config: config,
|
||||
suiteID: randomID(),
|
||||
lock: &sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) Run() bool {
|
||||
if runner.config.DryRun {
|
||||
runner.performDryRun()
|
||||
return true
|
||||
}
|
||||
|
||||
runner.reportSuiteWillBegin()
|
||||
go runner.registerForInterrupts()
|
||||
|
||||
suitePassed := runner.runBeforeSuite()
|
||||
|
||||
if suitePassed {
|
||||
suitePassed = runner.runSpecs()
|
||||
}
|
||||
|
||||
runner.blockForeverIfInterrupted()
|
||||
|
||||
suitePassed = runner.runAfterSuite() && suitePassed
|
||||
|
||||
runner.reportSuiteDidEnd(suitePassed)
|
||||
|
||||
return suitePassed
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) performDryRun() {
|
||||
runner.reportSuiteWillBegin()
|
||||
|
||||
if runner.beforeSuiteNode != nil {
|
||||
summary := runner.beforeSuiteNode.Summary()
|
||||
summary.State = types.SpecStatePassed
|
||||
runner.reportBeforeSuite(summary)
|
||||
}
|
||||
|
||||
for _, spec := range runner.specs.Specs() {
|
||||
summary := spec.Summary(runner.suiteID)
|
||||
runner.reportSpecWillRun(summary)
|
||||
if summary.State == types.SpecStateInvalid {
|
||||
summary.State = types.SpecStatePassed
|
||||
}
|
||||
runner.reportSpecDidComplete(summary, false)
|
||||
}
|
||||
|
||||
if runner.afterSuiteNode != nil {
|
||||
summary := runner.afterSuiteNode.Summary()
|
||||
summary.State = types.SpecStatePassed
|
||||
runner.reportAfterSuite(summary)
|
||||
}
|
||||
|
||||
runner.reportSuiteDidEnd(true)
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) runBeforeSuite() bool {
|
||||
if runner.beforeSuiteNode == nil || runner.wasInterrupted() {
|
||||
return true
|
||||
}
|
||||
|
||||
runner.writer.Truncate()
|
||||
conf := runner.config
|
||||
passed := runner.beforeSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost)
|
||||
if !passed {
|
||||
runner.writer.DumpOut()
|
||||
}
|
||||
runner.reportBeforeSuite(runner.beforeSuiteNode.Summary())
|
||||
return passed
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) runAfterSuite() bool {
|
||||
if runner.afterSuiteNode == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
runner.writer.Truncate()
|
||||
conf := runner.config
|
||||
passed := runner.afterSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost)
|
||||
if !passed {
|
||||
runner.writer.DumpOut()
|
||||
}
|
||||
runner.reportAfterSuite(runner.afterSuiteNode.Summary())
|
||||
return passed
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) runSpecs() bool {
|
||||
suiteFailed := false
|
||||
skipRemainingSpecs := false
|
||||
for _, spec := range runner.specs.Specs() {
|
||||
if runner.wasInterrupted() {
|
||||
return suiteFailed
|
||||
}
|
||||
if skipRemainingSpecs {
|
||||
spec.Skip()
|
||||
}
|
||||
runner.reportSpecWillRun(spec.Summary(runner.suiteID))
|
||||
|
||||
if !spec.Skipped() && !spec.Pending() {
|
||||
runner.runningSpec = spec
|
||||
spec.Run(runner.writer)
|
||||
runner.runningSpec = nil
|
||||
if spec.Failed() {
|
||||
suiteFailed = true
|
||||
}
|
||||
} else if spec.Pending() && runner.config.FailOnPending {
|
||||
suiteFailed = true
|
||||
}
|
||||
|
||||
runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
|
||||
|
||||
if spec.Failed() && runner.config.FailFast {
|
||||
skipRemainingSpecs = true
|
||||
}
|
||||
}
|
||||
|
||||
return !suiteFailed
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) CurrentSpecSummary() (*types.SpecSummary, bool) {
|
||||
if runner.runningSpec == nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return runner.runningSpec.Summary(runner.suiteID), true
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) registerForInterrupts() {
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||
|
||||
<-c
|
||||
signal.Stop(c)
|
||||
runner.markInterrupted()
|
||||
go runner.registerForHardInterrupts()
|
||||
runner.writer.DumpOutWithHeader(`
|
||||
Received interrupt. Emitting contents of GinkgoWriter...
|
||||
---------------------------------------------------------
|
||||
`)
|
||||
if runner.afterSuiteNode != nil {
|
||||
fmt.Fprint(os.Stderr, `
|
||||
---------------------------------------------------------
|
||||
Received interrupt. Running AfterSuite...
|
||||
^C again to terminate immediately
|
||||
`)
|
||||
runner.runAfterSuite()
|
||||
}
|
||||
runner.reportSuiteDidEnd(false)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) registerForHardInterrupts() {
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||
|
||||
<-c
|
||||
fmt.Fprintln(os.Stderr, "\nReceived second interrupt. Shutting down.")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) blockForeverIfInterrupted() {
|
||||
runner.lock.Lock()
|
||||
interrupted := runner.interrupted
|
||||
runner.lock.Unlock()
|
||||
|
||||
if interrupted {
|
||||
select {}
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) markInterrupted() {
|
||||
runner.lock.Lock()
|
||||
defer runner.lock.Unlock()
|
||||
runner.interrupted = true
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) wasInterrupted() bool {
|
||||
runner.lock.Lock()
|
||||
defer runner.lock.Unlock()
|
||||
return runner.interrupted
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) reportSuiteWillBegin() {
|
||||
runner.startTime = time.Now()
|
||||
summary := runner.summary(true)
|
||||
for _, reporter := range runner.reporters {
|
||||
reporter.SpecSuiteWillBegin(runner.config, summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) reportBeforeSuite(summary *types.SetupSummary) {
|
||||
for _, reporter := range runner.reporters {
|
||||
reporter.BeforeSuiteDidRun(summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) reportAfterSuite(summary *types.SetupSummary) {
|
||||
for _, reporter := range runner.reporters {
|
||||
reporter.AfterSuiteDidRun(summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) reportSpecWillRun(summary *types.SpecSummary) {
|
||||
runner.writer.Truncate()
|
||||
|
||||
for _, reporter := range runner.reporters {
|
||||
reporter.SpecWillRun(summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) reportSpecDidComplete(summary *types.SpecSummary, failed bool) {
|
||||
for i := len(runner.reporters) - 1; i >= 1; i-- {
|
||||
runner.reporters[i].SpecDidComplete(summary)
|
||||
}
|
||||
|
||||
if failed {
|
||||
runner.writer.DumpOut()
|
||||
}
|
||||
|
||||
runner.reporters[0].SpecDidComplete(summary)
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) reportSuiteDidEnd(success bool) {
|
||||
summary := runner.summary(success)
|
||||
summary.RunTime = time.Since(runner.startTime)
|
||||
for _, reporter := range runner.reporters {
|
||||
reporter.SpecSuiteDidEnd(summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) countSpecsSatisfying(filter func(ex *spec.Spec) bool) (count int) {
|
||||
count = 0
|
||||
|
||||
for _, spec := range runner.specs.Specs() {
|
||||
if filter(spec) {
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) summary(success bool) *types.SuiteSummary {
|
||||
numberOfSpecsThatWillBeRun := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
|
||||
return !ex.Skipped() && !ex.Pending()
|
||||
})
|
||||
|
||||
numberOfPendingSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
|
||||
return ex.Pending()
|
||||
})
|
||||
|
||||
numberOfSkippedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
|
||||
return ex.Skipped()
|
||||
})
|
||||
|
||||
numberOfPassedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
|
||||
return ex.Passed()
|
||||
})
|
||||
|
||||
numberOfFailedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
|
||||
return ex.Failed()
|
||||
})
|
||||
|
||||
if runner.beforeSuiteNode != nil && !runner.beforeSuiteNode.Passed() && !runner.config.DryRun {
|
||||
numberOfFailedSpecs = numberOfSpecsThatWillBeRun
|
||||
}
|
||||
|
||||
return &types.SuiteSummary{
|
||||
SuiteDescription: runner.description,
|
||||
SuiteSucceeded: success,
|
||||
SuiteID: runner.suiteID,
|
||||
|
||||
NumberOfSpecsBeforeParallelization: runner.specs.NumberOfOriginalSpecs(),
|
||||
NumberOfTotalSpecs: len(runner.specs.Specs()),
|
||||
NumberOfSpecsThatWillBeRun: numberOfSpecsThatWillBeRun,
|
||||
NumberOfPendingSpecs: numberOfPendingSpecs,
|
||||
NumberOfSkippedSpecs: numberOfSkippedSpecs,
|
||||
NumberOfPassedSpecs: numberOfPassedSpecs,
|
||||
NumberOfFailedSpecs: numberOfFailedSpecs,
|
||||
}
|
||||
}
|
171
vendor/github.com/onsi/ginkgo/internal/suite/suite.go
generated
vendored
Normal file
171
vendor/github.com/onsi/ginkgo/internal/suite/suite.go
generated
vendored
Normal file
@ -0,0 +1,171 @@
|
||||
package suite
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/internal/containernode"
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||
"github.com/onsi/ginkgo/internal/spec"
|
||||
"github.com/onsi/ginkgo/internal/specrunner"
|
||||
"github.com/onsi/ginkgo/internal/writer"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type ginkgoTestingT interface {
|
||||
Fail()
|
||||
}
|
||||
|
||||
type Suite struct {
|
||||
topLevelContainer *containernode.ContainerNode
|
||||
currentContainer *containernode.ContainerNode
|
||||
containerIndex int
|
||||
beforeSuiteNode leafnodes.SuiteNode
|
||||
afterSuiteNode leafnodes.SuiteNode
|
||||
runner *specrunner.SpecRunner
|
||||
failer *failer.Failer
|
||||
running bool
|
||||
}
|
||||
|
||||
func New(failer *failer.Failer) *Suite {
|
||||
topLevelContainer := containernode.New("[Top Level]", types.FlagTypeNone, types.CodeLocation{})
|
||||
|
||||
return &Suite{
|
||||
topLevelContainer: topLevelContainer,
|
||||
currentContainer: topLevelContainer,
|
||||
failer: failer,
|
||||
containerIndex: 1,
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *Suite) Run(t ginkgoTestingT, description string, reporters []reporters.Reporter, writer writer.WriterInterface, config config.GinkgoConfigType) (bool, bool) {
|
||||
if config.ParallelTotal < 1 {
|
||||
panic("ginkgo.parallel.total must be >= 1")
|
||||
}
|
||||
|
||||
if config.ParallelNode > config.ParallelTotal || config.ParallelNode < 1 {
|
||||
panic("ginkgo.parallel.node is one-indexed and must be <= ginkgo.parallel.total")
|
||||
}
|
||||
|
||||
r := rand.New(rand.NewSource(config.RandomSeed))
|
||||
suite.topLevelContainer.Shuffle(r)
|
||||
specs := suite.generateSpecs(description, config)
|
||||
suite.runner = specrunner.New(description, suite.beforeSuiteNode, specs, suite.afterSuiteNode, reporters, writer, config)
|
||||
|
||||
suite.running = true
|
||||
success := suite.runner.Run()
|
||||
if !success {
|
||||
t.Fail()
|
||||
}
|
||||
return success, specs.HasProgrammaticFocus()
|
||||
}
|
||||
|
||||
func (suite *Suite) generateSpecs(description string, config config.GinkgoConfigType) *spec.Specs {
|
||||
specsSlice := []*spec.Spec{}
|
||||
suite.topLevelContainer.BackPropagateProgrammaticFocus()
|
||||
for _, collatedNodes := range suite.topLevelContainer.Collate() {
|
||||
specsSlice = append(specsSlice, spec.New(collatedNodes.Subject, collatedNodes.Containers, config.EmitSpecProgress))
|
||||
}
|
||||
|
||||
specs := spec.NewSpecs(specsSlice)
|
||||
|
||||
if config.RandomizeAllSpecs {
|
||||
specs.Shuffle(rand.New(rand.NewSource(config.RandomSeed)))
|
||||
}
|
||||
|
||||
specs.ApplyFocus(description, config.FocusString, config.SkipString)
|
||||
|
||||
if config.SkipMeasurements {
|
||||
specs.SkipMeasurements()
|
||||
}
|
||||
|
||||
if config.ParallelTotal > 1 {
|
||||
specs.TrimForParallelization(config.ParallelTotal, config.ParallelNode)
|
||||
}
|
||||
|
||||
return specs
|
||||
}
|
||||
|
||||
func (suite *Suite) CurrentRunningSpecSummary() (*types.SpecSummary, bool) {
|
||||
return suite.runner.CurrentSpecSummary()
|
||||
}
|
||||
|
||||
func (suite *Suite) SetBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.beforeSuiteNode != nil {
|
||||
panic("You may only call BeforeSuite once!")
|
||||
}
|
||||
suite.beforeSuiteNode = leafnodes.NewBeforeSuiteNode(body, codeLocation, timeout, suite.failer)
|
||||
}
|
||||
|
||||
func (suite *Suite) SetAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.afterSuiteNode != nil {
|
||||
panic("You may only call AfterSuite once!")
|
||||
}
|
||||
suite.afterSuiteNode = leafnodes.NewAfterSuiteNode(body, codeLocation, timeout, suite.failer)
|
||||
}
|
||||
|
||||
func (suite *Suite) SetSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.beforeSuiteNode != nil {
|
||||
panic("You may only call BeforeSuite once!")
|
||||
}
|
||||
suite.beforeSuiteNode = leafnodes.NewSynchronizedBeforeSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer)
|
||||
}
|
||||
|
||||
func (suite *Suite) SetSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.afterSuiteNode != nil {
|
||||
panic("You may only call AfterSuite once!")
|
||||
}
|
||||
suite.afterSuiteNode = leafnodes.NewSynchronizedAfterSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer)
|
||||
}
|
||||
|
||||
func (suite *Suite) PushContainerNode(text string, body func(), flag types.FlagType, codeLocation types.CodeLocation) {
|
||||
container := containernode.New(text, flag, codeLocation)
|
||||
suite.currentContainer.PushContainerNode(container)
|
||||
|
||||
previousContainer := suite.currentContainer
|
||||
suite.currentContainer = container
|
||||
suite.containerIndex++
|
||||
|
||||
body()
|
||||
|
||||
suite.containerIndex--
|
||||
suite.currentContainer = previousContainer
|
||||
}
|
||||
|
||||
func (suite *Suite) PushItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.running {
|
||||
suite.failer.Fail("You may only call It from within a Describe or Context", codeLocation)
|
||||
}
|
||||
suite.currentContainer.PushSubjectNode(leafnodes.NewItNode(text, body, flag, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||
}
|
||||
|
||||
func (suite *Suite) PushMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int) {
|
||||
if suite.running {
|
||||
suite.failer.Fail("You may only call Measure from within a Describe or Context", codeLocation)
|
||||
}
|
||||
suite.currentContainer.PushSubjectNode(leafnodes.NewMeasureNode(text, body, flag, codeLocation, samples, suite.failer, suite.containerIndex))
|
||||
}
|
||||
|
||||
func (suite *Suite) PushBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.running {
|
||||
suite.failer.Fail("You may only call BeforeEach from within a Describe or Context", codeLocation)
|
||||
}
|
||||
suite.currentContainer.PushSetupNode(leafnodes.NewBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||
}
|
||||
|
||||
func (suite *Suite) PushJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.running {
|
||||
suite.failer.Fail("You may only call JustBeforeEach from within a Describe or Context", codeLocation)
|
||||
}
|
||||
suite.currentContainer.PushSetupNode(leafnodes.NewJustBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||
}
|
||||
|
||||
func (suite *Suite) PushAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
|
||||
if suite.running {
|
||||
suite.failer.Fail("You may only call AfterEach from within a Describe or Context", codeLocation)
|
||||
}
|
||||
suite.currentContainer.PushSetupNode(leafnodes.NewAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
|
||||
}
|
76
vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go
generated
vendored
Normal file
76
vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go
generated
vendored
Normal file
@ -0,0 +1,76 @@
|
||||
package testingtproxy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
type failFunc func(message string, callerSkip ...int)
|
||||
|
||||
func New(writer io.Writer, fail failFunc, offset int) *ginkgoTestingTProxy {
|
||||
return &ginkgoTestingTProxy{
|
||||
fail: fail,
|
||||
offset: offset,
|
||||
writer: writer,
|
||||
}
|
||||
}
|
||||
|
||||
type ginkgoTestingTProxy struct {
|
||||
fail failFunc
|
||||
offset int
|
||||
writer io.Writer
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Error(args ...interface{}) {
|
||||
t.fail(fmt.Sprintln(args...), t.offset)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Errorf(format string, args ...interface{}) {
|
||||
t.fail(fmt.Sprintf(format, args...), t.offset)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Fail() {
|
||||
t.fail("failed", t.offset)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) FailNow() {
|
||||
t.fail("failed", t.offset)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) {
|
||||
t.fail(fmt.Sprintln(args...), t.offset)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) {
|
||||
t.fail(fmt.Sprintf(format, args...), t.offset)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Log(args ...interface{}) {
|
||||
fmt.Fprintln(t.writer, args...)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) {
|
||||
fmt.Fprintf(t.writer, format, args...)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Failed() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Parallel() {
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Skip(args ...interface{}) {
|
||||
fmt.Println(args...)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) {
|
||||
fmt.Printf(format, args...)
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) SkipNow() {
|
||||
}
|
||||
|
||||
func (t *ginkgoTestingTProxy) Skipped() bool {
|
||||
return false
|
||||
}
|
31
vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go
generated
vendored
Normal file
31
vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
package writer
|
||||
|
||||
type FakeGinkgoWriter struct {
|
||||
EventStream []string
|
||||
}
|
||||
|
||||
func NewFake() *FakeGinkgoWriter {
|
||||
return &FakeGinkgoWriter{
|
||||
EventStream: []string{},
|
||||
}
|
||||
}
|
||||
|
||||
func (writer *FakeGinkgoWriter) AddEvent(event string) {
|
||||
writer.EventStream = append(writer.EventStream, event)
|
||||
}
|
||||
|
||||
func (writer *FakeGinkgoWriter) Truncate() {
|
||||
writer.EventStream = append(writer.EventStream, "TRUNCATE")
|
||||
}
|
||||
|
||||
func (writer *FakeGinkgoWriter) DumpOut() {
|
||||
writer.EventStream = append(writer.EventStream, "DUMP")
|
||||
}
|
||||
|
||||
func (writer *FakeGinkgoWriter) DumpOutWithHeader(header string) {
|
||||
writer.EventStream = append(writer.EventStream, "DUMP_WITH_HEADER: "+header)
|
||||
}
|
||||
|
||||
func (writer *FakeGinkgoWriter) Write(data []byte) (n int, err error) {
|
||||
return 0, nil
|
||||
}
|
71
vendor/github.com/onsi/ginkgo/internal/writer/writer.go
generated
vendored
Normal file
71
vendor/github.com/onsi/ginkgo/internal/writer/writer.go
generated
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
package writer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type WriterInterface interface {
|
||||
io.Writer
|
||||
|
||||
Truncate()
|
||||
DumpOut()
|
||||
DumpOutWithHeader(header string)
|
||||
}
|
||||
|
||||
type Writer struct {
|
||||
buffer *bytes.Buffer
|
||||
outWriter io.Writer
|
||||
lock *sync.Mutex
|
||||
stream bool
|
||||
}
|
||||
|
||||
func New(outWriter io.Writer) *Writer {
|
||||
return &Writer{
|
||||
buffer: &bytes.Buffer{},
|
||||
lock: &sync.Mutex{},
|
||||
outWriter: outWriter,
|
||||
stream: true,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) SetStream(stream bool) {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
w.stream = stream
|
||||
}
|
||||
|
||||
func (w *Writer) Write(b []byte) (n int, err error) {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
|
||||
if w.stream {
|
||||
return w.outWriter.Write(b)
|
||||
} else {
|
||||
return w.buffer.Write(b)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) Truncate() {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
w.buffer.Reset()
|
||||
}
|
||||
|
||||
func (w *Writer) DumpOut() {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
if !w.stream {
|
||||
w.buffer.WriteTo(w.outWriter)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) DumpOutWithHeader(header string) {
|
||||
w.lock.Lock()
|
||||
defer w.lock.Unlock()
|
||||
if !w.stream && w.buffer.Len() > 0 {
|
||||
w.outWriter.Write([]byte(header))
|
||||
w.buffer.WriteTo(w.outWriter)
|
||||
}
|
||||
}
|
83
vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
generated
vendored
Normal file
83
vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
generated
vendored
Normal file
@ -0,0 +1,83 @@
|
||||
/*
|
||||
Ginkgo's Default Reporter
|
||||
|
||||
A number of command line flags are available to tweak Ginkgo's default output.
|
||||
|
||||
These are documented [here](http://onsi.github.io/ginkgo/#running_tests)
|
||||
*/
|
||||
package reporters
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/reporters/stenographer"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type DefaultReporter struct {
|
||||
config config.DefaultReporterConfigType
|
||||
stenographer stenographer.Stenographer
|
||||
specSummaries []*types.SpecSummary
|
||||
}
|
||||
|
||||
func NewDefaultReporter(config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *DefaultReporter {
|
||||
return &DefaultReporter{
|
||||
config: config,
|
||||
stenographer: stenographer,
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *DefaultReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
reporter.stenographer.AnnounceSuite(summary.SuiteDescription, config.RandomSeed, config.RandomizeAllSpecs, reporter.config.Succinct)
|
||||
if config.ParallelTotal > 1 {
|
||||
reporter.stenographer.AnnounceParallelRun(config.ParallelNode, config.ParallelTotal, summary.NumberOfTotalSpecs, summary.NumberOfSpecsBeforeParallelization, reporter.config.Succinct)
|
||||
}
|
||||
reporter.stenographer.AnnounceNumberOfSpecs(summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, reporter.config.Succinct)
|
||||
}
|
||||
|
||||
func (reporter *DefaultReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
if setupSummary.State != types.SpecStatePassed {
|
||||
reporter.stenographer.AnnounceBeforeSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *DefaultReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
if setupSummary.State != types.SpecStatePassed {
|
||||
reporter.stenographer.AnnounceAfterSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *DefaultReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||
if reporter.config.Verbose && !reporter.config.Succinct && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped {
|
||||
reporter.stenographer.AnnounceSpecWillRun(specSummary)
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *DefaultReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||
switch specSummary.State {
|
||||
case types.SpecStatePassed:
|
||||
if specSummary.IsMeasurement {
|
||||
reporter.stenographer.AnnounceSuccesfulMeasurement(specSummary, reporter.config.Succinct)
|
||||
} else if specSummary.RunTime.Seconds() >= reporter.config.SlowSpecThreshold {
|
||||
reporter.stenographer.AnnounceSuccesfulSlowSpec(specSummary, reporter.config.Succinct)
|
||||
} else {
|
||||
reporter.stenographer.AnnounceSuccesfulSpec(specSummary)
|
||||
}
|
||||
case types.SpecStatePending:
|
||||
reporter.stenographer.AnnouncePendingSpec(specSummary, reporter.config.NoisyPendings && !reporter.config.Succinct)
|
||||
case types.SpecStateSkipped:
|
||||
reporter.stenographer.AnnounceSkippedSpec(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||
case types.SpecStateTimedOut:
|
||||
reporter.stenographer.AnnounceSpecTimedOut(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||
case types.SpecStatePanicked:
|
||||
reporter.stenographer.AnnounceSpecPanicked(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||
case types.SpecStateFailed:
|
||||
reporter.stenographer.AnnounceSpecFailed(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
|
||||
}
|
||||
|
||||
reporter.specSummaries = append(reporter.specSummaries, specSummary)
|
||||
}
|
||||
|
||||
func (reporter *DefaultReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
reporter.stenographer.SummarizeFailures(reporter.specSummaries)
|
||||
reporter.stenographer.AnnounceSpecRunCompletion(summary, reporter.config.Succinct)
|
||||
}
|
59
vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go
generated
vendored
Normal file
59
vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go
generated
vendored
Normal file
@ -0,0 +1,59 @@
|
||||
package reporters
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
//FakeReporter is useful for testing purposes
|
||||
type FakeReporter struct {
|
||||
Config config.GinkgoConfigType
|
||||
|
||||
BeginSummary *types.SuiteSummary
|
||||
BeforeSuiteSummary *types.SetupSummary
|
||||
SpecWillRunSummaries []*types.SpecSummary
|
||||
SpecSummaries []*types.SpecSummary
|
||||
AfterSuiteSummary *types.SetupSummary
|
||||
EndSummary *types.SuiteSummary
|
||||
|
||||
SpecWillRunStub func(specSummary *types.SpecSummary)
|
||||
SpecDidCompleteStub func(specSummary *types.SpecSummary)
|
||||
}
|
||||
|
||||
func NewFakeReporter() *FakeReporter {
|
||||
return &FakeReporter{
|
||||
SpecWillRunSummaries: make([]*types.SpecSummary, 0),
|
||||
SpecSummaries: make([]*types.SpecSummary, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (fakeR *FakeReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
fakeR.Config = config
|
||||
fakeR.BeginSummary = summary
|
||||
}
|
||||
|
||||
func (fakeR *FakeReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
fakeR.BeforeSuiteSummary = setupSummary
|
||||
}
|
||||
|
||||
func (fakeR *FakeReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||
if fakeR.SpecWillRunStub != nil {
|
||||
fakeR.SpecWillRunStub(specSummary)
|
||||
}
|
||||
fakeR.SpecWillRunSummaries = append(fakeR.SpecWillRunSummaries, specSummary)
|
||||
}
|
||||
|
||||
func (fakeR *FakeReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||
if fakeR.SpecDidCompleteStub != nil {
|
||||
fakeR.SpecDidCompleteStub(specSummary)
|
||||
}
|
||||
fakeR.SpecSummaries = append(fakeR.SpecSummaries, specSummary)
|
||||
}
|
||||
|
||||
func (fakeR *FakeReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
fakeR.AfterSuiteSummary = setupSummary
|
||||
}
|
||||
|
||||
func (fakeR *FakeReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
fakeR.EndSummary = summary
|
||||
}
|
139
vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go
generated
vendored
Normal file
139
vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go
generated
vendored
Normal file
@ -0,0 +1,139 @@
|
||||
/*
|
||||
|
||||
JUnit XML Reporter for Ginkgo
|
||||
|
||||
For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output
|
||||
|
||||
*/
|
||||
|
||||
package reporters
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type JUnitTestSuite struct {
|
||||
XMLName xml.Name `xml:"testsuite"`
|
||||
TestCases []JUnitTestCase `xml:"testcase"`
|
||||
Tests int `xml:"tests,attr"`
|
||||
Failures int `xml:"failures,attr"`
|
||||
Time float64 `xml:"time,attr"`
|
||||
}
|
||||
|
||||
type JUnitTestCase struct {
|
||||
Name string `xml:"name,attr"`
|
||||
ClassName string `xml:"classname,attr"`
|
||||
FailureMessage *JUnitFailureMessage `xml:"failure,omitempty"`
|
||||
Skipped *JUnitSkipped `xml:"skipped,omitempty"`
|
||||
Time float64 `xml:"time,attr"`
|
||||
}
|
||||
|
||||
type JUnitFailureMessage struct {
|
||||
Type string `xml:"type,attr"`
|
||||
Message string `xml:",chardata"`
|
||||
}
|
||||
|
||||
type JUnitSkipped struct {
|
||||
XMLName xml.Name `xml:"skipped"`
|
||||
}
|
||||
|
||||
type JUnitReporter struct {
|
||||
suite JUnitTestSuite
|
||||
filename string
|
||||
testSuiteName string
|
||||
}
|
||||
|
||||
//NewJUnitReporter creates a new JUnit XML reporter. The XML will be stored in the passed in filename.
|
||||
func NewJUnitReporter(filename string) *JUnitReporter {
|
||||
return &JUnitReporter{
|
||||
filename: filename,
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
reporter.suite = JUnitTestSuite{
|
||||
Tests: summary.NumberOfSpecsThatWillBeRun,
|
||||
TestCases: []JUnitTestCase{},
|
||||
}
|
||||
reporter.testSuiteName = summary.SuiteDescription
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
reporter.handleSetupSummary("BeforeSuite", setupSummary)
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
reporter.handleSetupSummary("AfterSuite", setupSummary)
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) {
|
||||
if setupSummary.State != types.SpecStatePassed {
|
||||
testCase := JUnitTestCase{
|
||||
Name: name,
|
||||
ClassName: reporter.testSuiteName,
|
||||
}
|
||||
|
||||
testCase.FailureMessage = &JUnitFailureMessage{
|
||||
Type: reporter.failureTypeForState(setupSummary.State),
|
||||
Message: fmt.Sprintf("%s\n%s", setupSummary.Failure.ComponentCodeLocation.String(), setupSummary.Failure.Message),
|
||||
}
|
||||
testCase.Time = setupSummary.RunTime.Seconds()
|
||||
reporter.suite.TestCases = append(reporter.suite.TestCases, testCase)
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||
testCase := JUnitTestCase{
|
||||
Name: strings.Join(specSummary.ComponentTexts[1:], " "),
|
||||
ClassName: reporter.testSuiteName,
|
||||
}
|
||||
if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked {
|
||||
testCase.FailureMessage = &JUnitFailureMessage{
|
||||
Type: reporter.failureTypeForState(specSummary.State),
|
||||
Message: fmt.Sprintf("%s\n%s", specSummary.Failure.ComponentCodeLocation.String(), specSummary.Failure.Message),
|
||||
}
|
||||
}
|
||||
if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending {
|
||||
testCase.Skipped = &JUnitSkipped{}
|
||||
}
|
||||
testCase.Time = specSummary.RunTime.Seconds()
|
||||
reporter.suite.TestCases = append(reporter.suite.TestCases, testCase)
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
reporter.suite.Time = summary.RunTime.Seconds()
|
||||
reporter.suite.Failures = summary.NumberOfFailedSpecs
|
||||
file, err := os.Create(reporter.filename)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to create JUnit report file: %s\n\t%s", reporter.filename, err.Error())
|
||||
}
|
||||
defer file.Close()
|
||||
file.WriteString(xml.Header)
|
||||
encoder := xml.NewEncoder(file)
|
||||
encoder.Indent(" ", " ")
|
||||
err = encoder.Encode(reporter.suite)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to generate JUnit report\n\t%s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) failureTypeForState(state types.SpecState) string {
|
||||
switch state {
|
||||
case types.SpecStateFailed:
|
||||
return "Failure"
|
||||
case types.SpecStateTimedOut:
|
||||
return "Timeout"
|
||||
case types.SpecStatePanicked:
|
||||
return "Panic"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
15
vendor/github.com/onsi/ginkgo/reporters/reporter.go
generated
vendored
Normal file
15
vendor/github.com/onsi/ginkgo/reporters/reporter.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
package reporters
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type Reporter interface {
|
||||
SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary)
|
||||
BeforeSuiteDidRun(setupSummary *types.SetupSummary)
|
||||
SpecWillRun(specSummary *types.SpecSummary)
|
||||
SpecDidComplete(specSummary *types.SpecSummary)
|
||||
AfterSuiteDidRun(setupSummary *types.SetupSummary)
|
||||
SpecSuiteDidEnd(summary *types.SuiteSummary)
|
||||
}
|
64
vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go
generated
vendored
Normal file
64
vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
package stenographer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (s *consoleStenographer) colorize(colorCode string, format string, args ...interface{}) string {
|
||||
var out string
|
||||
|
||||
if len(args) > 0 {
|
||||
out = fmt.Sprintf(format, args...)
|
||||
} else {
|
||||
out = format
|
||||
}
|
||||
|
||||
if s.color {
|
||||
return fmt.Sprintf("%s%s%s", colorCode, out, defaultStyle)
|
||||
} else {
|
||||
return out
|
||||
}
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printBanner(text string, bannerCharacter string) {
|
||||
fmt.Println(text)
|
||||
fmt.Println(strings.Repeat(bannerCharacter, len(text)))
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printNewLine() {
|
||||
fmt.Println("")
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printDelimiter() {
|
||||
fmt.Println(s.colorize(grayColor, "%s", strings.Repeat("-", 30)))
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) print(indentation int, format string, args ...interface{}) {
|
||||
fmt.Print(s.indent(indentation, format, args...))
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) println(indentation int, format string, args ...interface{}) {
|
||||
fmt.Println(s.indent(indentation, format, args...))
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) indent(indentation int, format string, args ...interface{}) string {
|
||||
var text string
|
||||
|
||||
if len(args) > 0 {
|
||||
text = fmt.Sprintf(format, args...)
|
||||
} else {
|
||||
text = format
|
||||
}
|
||||
|
||||
stringArray := strings.Split(text, "\n")
|
||||
padding := ""
|
||||
if indentation >= 0 {
|
||||
padding = strings.Repeat(" ", indentation)
|
||||
}
|
||||
for i, s := range stringArray {
|
||||
stringArray[i] = fmt.Sprintf("%s%s", padding, s)
|
||||
}
|
||||
|
||||
return strings.Join(stringArray, "\n")
|
||||
}
|
138
vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go
generated
vendored
Normal file
138
vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go
generated
vendored
Normal file
@ -0,0 +1,138 @@
|
||||
package stenographer
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
func NewFakeStenographerCall(method string, args ...interface{}) FakeStenographerCall {
|
||||
return FakeStenographerCall{
|
||||
Method: method,
|
||||
Args: args,
|
||||
}
|
||||
}
|
||||
|
||||
type FakeStenographer struct {
|
||||
calls []FakeStenographerCall
|
||||
lock *sync.Mutex
|
||||
}
|
||||
|
||||
type FakeStenographerCall struct {
|
||||
Method string
|
||||
Args []interface{}
|
||||
}
|
||||
|
||||
func NewFakeStenographer() *FakeStenographer {
|
||||
stenographer := &FakeStenographer{
|
||||
lock: &sync.Mutex{},
|
||||
}
|
||||
stenographer.Reset()
|
||||
return stenographer
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) Calls() []FakeStenographerCall {
|
||||
stenographer.lock.Lock()
|
||||
defer stenographer.lock.Unlock()
|
||||
|
||||
return stenographer.calls
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) Reset() {
|
||||
stenographer.lock.Lock()
|
||||
defer stenographer.lock.Unlock()
|
||||
|
||||
stenographer.calls = make([]FakeStenographerCall, 0)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) CallsTo(method string) []FakeStenographerCall {
|
||||
stenographer.lock.Lock()
|
||||
defer stenographer.lock.Unlock()
|
||||
|
||||
results := make([]FakeStenographerCall, 0)
|
||||
for _, call := range stenographer.calls {
|
||||
if call.Method == method {
|
||||
results = append(results, call)
|
||||
}
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) registerCall(method string, args ...interface{}) {
|
||||
stenographer.lock.Lock()
|
||||
defer stenographer.lock.Unlock()
|
||||
|
||||
stenographer.calls = append(stenographer.calls, NewFakeStenographerCall(method, args...))
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) {
|
||||
stenographer.registerCall("AnnounceSuite", description, randomSeed, randomizingAll, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) {
|
||||
stenographer.registerCall("AnnounceAggregatedParallelRun", nodes, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool) {
|
||||
stenographer.registerCall("AnnounceParallelRun", node, nodes, specsToRun, totalSpecs, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) {
|
||||
stenographer.registerCall("AnnounceNumberOfSpecs", specsToRun, total, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) {
|
||||
stenographer.registerCall("AnnounceSpecRunCompletion", summary, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) {
|
||||
stenographer.registerCall("AnnounceSpecWillRun", spec)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
||||
stenographer.registerCall("AnnounceBeforeSuiteFailure", summary, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
||||
stenographer.registerCall("AnnounceAfterSuiteFailure", summary, succinct, fullTrace)
|
||||
}
|
||||
func (stenographer *FakeStenographer) AnnounceCapturedOutput(output string) {
|
||||
stenographer.registerCall("AnnounceCapturedOutput", output)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) {
|
||||
stenographer.registerCall("AnnounceSuccesfulSpec", spec)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) {
|
||||
stenographer.registerCall("AnnounceSuccesfulSlowSpec", spec, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) {
|
||||
stenographer.registerCall("AnnounceSuccesfulMeasurement", spec, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) {
|
||||
stenographer.registerCall("AnnouncePendingSpec", spec, noisy)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
stenographer.registerCall("AnnounceSkippedSpec", spec, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
stenographer.registerCall("AnnounceSpecTimedOut", spec, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
stenographer.registerCall("AnnounceSpecPanicked", spec, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
stenographer.registerCall("AnnounceSpecFailed", spec, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) SummarizeFailures(summaries []*types.SpecSummary) {
|
||||
stenographer.registerCall("SummarizeFailures", summaries)
|
||||
}
|
549
vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
generated
vendored
Normal file
549
vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
generated
vendored
Normal file
@ -0,0 +1,549 @@
|
||||
/*
|
||||
The stenographer is used by Ginkgo's reporters to generate output.
|
||||
|
||||
Move along, nothing to see here.
|
||||
*/
|
||||
|
||||
package stenographer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
const defaultStyle = "\x1b[0m"
|
||||
const boldStyle = "\x1b[1m"
|
||||
const redColor = "\x1b[91m"
|
||||
const greenColor = "\x1b[32m"
|
||||
const yellowColor = "\x1b[33m"
|
||||
const cyanColor = "\x1b[36m"
|
||||
const grayColor = "\x1b[90m"
|
||||
const lightGrayColor = "\x1b[37m"
|
||||
|
||||
type cursorStateType int
|
||||
|
||||
const (
|
||||
cursorStateTop cursorStateType = iota
|
||||
cursorStateStreaming
|
||||
cursorStateMidBlock
|
||||
cursorStateEndBlock
|
||||
)
|
||||
|
||||
type Stenographer interface {
|
||||
AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool)
|
||||
AnnounceAggregatedParallelRun(nodes int, succinct bool)
|
||||
AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool)
|
||||
AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool)
|
||||
AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool)
|
||||
|
||||
AnnounceSpecWillRun(spec *types.SpecSummary)
|
||||
AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool)
|
||||
AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool)
|
||||
|
||||
AnnounceCapturedOutput(output string)
|
||||
|
||||
AnnounceSuccesfulSpec(spec *types.SpecSummary)
|
||||
AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool)
|
||||
AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool)
|
||||
|
||||
AnnouncePendingSpec(spec *types.SpecSummary, noisy bool)
|
||||
AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool)
|
||||
|
||||
AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool)
|
||||
AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool)
|
||||
AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool)
|
||||
|
||||
SummarizeFailures(summaries []*types.SpecSummary)
|
||||
}
|
||||
|
||||
func New(color bool) Stenographer {
|
||||
denoter := "•"
|
||||
if runtime.GOOS == "windows" {
|
||||
denoter = "+"
|
||||
}
|
||||
return &consoleStenographer{
|
||||
color: color,
|
||||
denoter: denoter,
|
||||
cursorState: cursorStateTop,
|
||||
}
|
||||
}
|
||||
|
||||
type consoleStenographer struct {
|
||||
color bool
|
||||
denoter string
|
||||
cursorState cursorStateType
|
||||
}
|
||||
|
||||
var alternatingColors = []string{defaultStyle, grayColor}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) {
|
||||
if succinct {
|
||||
s.print(0, "[%d] %s ", randomSeed, s.colorize(boldStyle, description))
|
||||
return
|
||||
}
|
||||
s.printBanner(fmt.Sprintf("Running Suite: %s", description), "=")
|
||||
s.print(0, "Random Seed: %s", s.colorize(boldStyle, "%d", randomSeed))
|
||||
if randomizingAll {
|
||||
s.print(0, " - Will randomize all specs")
|
||||
}
|
||||
s.printNewLine()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool) {
|
||||
if succinct {
|
||||
s.print(0, "- node #%d ", node)
|
||||
return
|
||||
}
|
||||
s.println(0,
|
||||
"Parallel test node %s/%s. Assigned %s of %s specs.",
|
||||
s.colorize(boldStyle, "%d", node),
|
||||
s.colorize(boldStyle, "%d", nodes),
|
||||
s.colorize(boldStyle, "%d", specsToRun),
|
||||
s.colorize(boldStyle, "%d", totalSpecs),
|
||||
)
|
||||
s.printNewLine()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) {
|
||||
if succinct {
|
||||
s.print(0, "- %d nodes ", nodes)
|
||||
return
|
||||
}
|
||||
s.println(0,
|
||||
"Running in parallel across %s nodes",
|
||||
s.colorize(boldStyle, "%d", nodes),
|
||||
)
|
||||
s.printNewLine()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) {
|
||||
if succinct {
|
||||
s.print(0, "- %d/%d specs ", specsToRun, total)
|
||||
s.stream()
|
||||
return
|
||||
}
|
||||
s.println(0,
|
||||
"Will run %s of %s specs",
|
||||
s.colorize(boldStyle, "%d", specsToRun),
|
||||
s.colorize(boldStyle, "%d", total),
|
||||
)
|
||||
|
||||
s.printNewLine()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) {
|
||||
if succinct && summary.SuiteSucceeded {
|
||||
s.print(0, " %s %s ", s.colorize(greenColor, "SUCCESS!"), summary.RunTime)
|
||||
return
|
||||
}
|
||||
s.printNewLine()
|
||||
color := greenColor
|
||||
if !summary.SuiteSucceeded {
|
||||
color = redColor
|
||||
}
|
||||
s.println(0, s.colorize(boldStyle+color, "Ran %d of %d Specs in %.3f seconds", summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, summary.RunTime.Seconds()))
|
||||
|
||||
status := ""
|
||||
if summary.SuiteSucceeded {
|
||||
status = s.colorize(boldStyle+greenColor, "SUCCESS!")
|
||||
} else {
|
||||
status = s.colorize(boldStyle+redColor, "FAIL!")
|
||||
}
|
||||
|
||||
s.print(0,
|
||||
"%s -- %s | %s | %s | %s ",
|
||||
status,
|
||||
s.colorize(greenColor+boldStyle, "%d Passed", summary.NumberOfPassedSpecs),
|
||||
s.colorize(redColor+boldStyle, "%d Failed", summary.NumberOfFailedSpecs),
|
||||
s.colorize(yellowColor+boldStyle, "%d Pending", summary.NumberOfPendingSpecs),
|
||||
s.colorize(cyanColor+boldStyle, "%d Skipped", summary.NumberOfSkippedSpecs),
|
||||
)
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) {
|
||||
s.startBlock()
|
||||
for i, text := range spec.ComponentTexts[1 : len(spec.ComponentTexts)-1] {
|
||||
s.print(0, s.colorize(alternatingColors[i%2], text)+" ")
|
||||
}
|
||||
|
||||
indentation := 0
|
||||
if len(spec.ComponentTexts) > 2 {
|
||||
indentation = 1
|
||||
s.printNewLine()
|
||||
}
|
||||
index := len(spec.ComponentTexts) - 1
|
||||
s.print(indentation, s.colorize(boldStyle, spec.ComponentTexts[index]))
|
||||
s.printNewLine()
|
||||
s.print(indentation, s.colorize(lightGrayColor, spec.ComponentCodeLocations[index].String()))
|
||||
s.printNewLine()
|
||||
s.midBlock()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
||||
s.announceSetupFailure("BeforeSuite", summary, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
||||
s.announceSetupFailure("AfterSuite", summary, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) announceSetupFailure(name string, summary *types.SetupSummary, succinct bool, fullTrace bool) {
|
||||
s.startBlock()
|
||||
var message string
|
||||
switch summary.State {
|
||||
case types.SpecStateFailed:
|
||||
message = "Failure"
|
||||
case types.SpecStatePanicked:
|
||||
message = "Panic"
|
||||
case types.SpecStateTimedOut:
|
||||
message = "Timeout"
|
||||
}
|
||||
|
||||
s.println(0, s.colorize(redColor+boldStyle, "%s [%.3f seconds]", message, summary.RunTime.Seconds()))
|
||||
|
||||
indentation := s.printCodeLocationBlock([]string{name}, []types.CodeLocation{summary.CodeLocation}, summary.ComponentType, 0, summary.State, true)
|
||||
|
||||
s.printNewLine()
|
||||
s.printFailure(indentation, summary.State, summary.Failure, fullTrace)
|
||||
|
||||
s.endBlock()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceCapturedOutput(output string) {
|
||||
if output == "" {
|
||||
return
|
||||
}
|
||||
|
||||
s.startBlock()
|
||||
s.println(0, output)
|
||||
s.midBlock()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) {
|
||||
s.print(0, s.colorize(greenColor, s.denoter))
|
||||
s.stream()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) {
|
||||
s.printBlockWithMessage(
|
||||
s.colorize(greenColor, "%s [SLOW TEST:%.3f seconds]", s.denoter, spec.RunTime.Seconds()),
|
||||
"",
|
||||
spec,
|
||||
succinct,
|
||||
)
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) {
|
||||
s.printBlockWithMessage(
|
||||
s.colorize(greenColor, "%s [MEASUREMENT]", s.denoter),
|
||||
s.measurementReport(spec, succinct),
|
||||
spec,
|
||||
succinct,
|
||||
)
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) {
|
||||
if noisy {
|
||||
s.printBlockWithMessage(
|
||||
s.colorize(yellowColor, "P [PENDING]"),
|
||||
"",
|
||||
spec,
|
||||
false,
|
||||
)
|
||||
} else {
|
||||
s.print(0, s.colorize(yellowColor, "P"))
|
||||
s.stream()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
// Skips at runtime will have a non-empty spec.Failure. All others should be succinct.
|
||||
if succinct || spec.Failure == (types.SpecFailure{}) {
|
||||
s.print(0, s.colorize(cyanColor, "S"))
|
||||
s.stream()
|
||||
} else {
|
||||
s.startBlock()
|
||||
s.println(0, s.colorize(cyanColor+boldStyle, "S [SKIPPING]%s [%.3f seconds]", s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds()))
|
||||
|
||||
indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct)
|
||||
|
||||
s.printNewLine()
|
||||
s.printSkip(indentation, spec.Failure)
|
||||
s.endBlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
s.printSpecFailure(fmt.Sprintf("%s... Timeout", s.denoter), spec, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
s.printSpecFailure(fmt.Sprintf("%s! Panic", s.denoter), spec, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
s.printSpecFailure(fmt.Sprintf("%s Failure", s.denoter), spec, succinct, fullTrace)
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) SummarizeFailures(summaries []*types.SpecSummary) {
|
||||
failingSpecs := []*types.SpecSummary{}
|
||||
|
||||
for _, summary := range summaries {
|
||||
if summary.HasFailureState() {
|
||||
failingSpecs = append(failingSpecs, summary)
|
||||
}
|
||||
}
|
||||
|
||||
if len(failingSpecs) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
s.printNewLine()
|
||||
s.printNewLine()
|
||||
plural := "s"
|
||||
if len(failingSpecs) == 1 {
|
||||
plural = ""
|
||||
}
|
||||
s.println(0, s.colorize(redColor+boldStyle, "Summarizing %d Failure%s:", len(failingSpecs), plural))
|
||||
for _, summary := range failingSpecs {
|
||||
s.printNewLine()
|
||||
if summary.HasFailureState() {
|
||||
if summary.TimedOut() {
|
||||
s.print(0, s.colorize(redColor+boldStyle, "[Timeout...] "))
|
||||
} else if summary.Panicked() {
|
||||
s.print(0, s.colorize(redColor+boldStyle, "[Panic!] "))
|
||||
} else if summary.Failed() {
|
||||
s.print(0, s.colorize(redColor+boldStyle, "[Fail] "))
|
||||
}
|
||||
s.printSpecContext(summary.ComponentTexts, summary.ComponentCodeLocations, summary.Failure.ComponentType, summary.Failure.ComponentIndex, summary.State, true)
|
||||
s.printNewLine()
|
||||
s.println(0, s.colorize(lightGrayColor, summary.Failure.Location.String()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) startBlock() {
|
||||
if s.cursorState == cursorStateStreaming {
|
||||
s.printNewLine()
|
||||
s.printDelimiter()
|
||||
} else if s.cursorState == cursorStateMidBlock {
|
||||
s.printNewLine()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) midBlock() {
|
||||
s.cursorState = cursorStateMidBlock
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) endBlock() {
|
||||
s.printDelimiter()
|
||||
s.cursorState = cursorStateEndBlock
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) stream() {
|
||||
s.cursorState = cursorStateStreaming
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printBlockWithMessage(header string, message string, spec *types.SpecSummary, succinct bool) {
|
||||
s.startBlock()
|
||||
s.println(0, header)
|
||||
|
||||
indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, types.SpecComponentTypeInvalid, 0, spec.State, succinct)
|
||||
|
||||
if message != "" {
|
||||
s.printNewLine()
|
||||
s.println(indentation, message)
|
||||
}
|
||||
|
||||
s.endBlock()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printSpecFailure(message string, spec *types.SpecSummary, succinct bool, fullTrace bool) {
|
||||
s.startBlock()
|
||||
s.println(0, s.colorize(redColor+boldStyle, "%s%s [%.3f seconds]", message, s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds()))
|
||||
|
||||
indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct)
|
||||
|
||||
s.printNewLine()
|
||||
s.printFailure(indentation, spec.State, spec.Failure, fullTrace)
|
||||
s.endBlock()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) failureContext(failedComponentType types.SpecComponentType) string {
|
||||
switch failedComponentType {
|
||||
case types.SpecComponentTypeBeforeSuite:
|
||||
return " in Suite Setup (BeforeSuite)"
|
||||
case types.SpecComponentTypeAfterSuite:
|
||||
return " in Suite Teardown (AfterSuite)"
|
||||
case types.SpecComponentTypeBeforeEach:
|
||||
return " in Spec Setup (BeforeEach)"
|
||||
case types.SpecComponentTypeJustBeforeEach:
|
||||
return " in Spec Setup (JustBeforeEach)"
|
||||
case types.SpecComponentTypeAfterEach:
|
||||
return " in Spec Teardown (AfterEach)"
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printSkip(indentation int, spec types.SpecFailure) {
|
||||
s.println(indentation, s.colorize(cyanColor, spec.Message))
|
||||
s.printNewLine()
|
||||
s.println(indentation, spec.Location.String())
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printFailure(indentation int, state types.SpecState, failure types.SpecFailure, fullTrace bool) {
|
||||
if state == types.SpecStatePanicked {
|
||||
s.println(indentation, s.colorize(redColor+boldStyle, failure.Message))
|
||||
s.println(indentation, s.colorize(redColor, failure.ForwardedPanic))
|
||||
s.println(indentation, failure.Location.String())
|
||||
s.printNewLine()
|
||||
s.println(indentation, s.colorize(redColor, "Full Stack Trace"))
|
||||
s.println(indentation, failure.Location.FullStackTrace)
|
||||
} else {
|
||||
s.println(indentation, s.colorize(redColor, failure.Message))
|
||||
s.printNewLine()
|
||||
s.println(indentation, failure.Location.String())
|
||||
if fullTrace {
|
||||
s.printNewLine()
|
||||
s.println(indentation, s.colorize(redColor, "Full Stack Trace"))
|
||||
s.println(indentation, failure.Location.FullStackTrace)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printSpecContext(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int {
|
||||
startIndex := 1
|
||||
indentation := 0
|
||||
|
||||
if len(componentTexts) == 1 {
|
||||
startIndex = 0
|
||||
}
|
||||
|
||||
for i := startIndex; i < len(componentTexts); i++ {
|
||||
if (state.IsFailure() || state == types.SpecStateSkipped) && i == failedComponentIndex {
|
||||
color := redColor
|
||||
if state == types.SpecStateSkipped {
|
||||
color = cyanColor
|
||||
}
|
||||
blockType := ""
|
||||
switch failedComponentType {
|
||||
case types.SpecComponentTypeBeforeSuite:
|
||||
blockType = "BeforeSuite"
|
||||
case types.SpecComponentTypeAfterSuite:
|
||||
blockType = "AfterSuite"
|
||||
case types.SpecComponentTypeBeforeEach:
|
||||
blockType = "BeforeEach"
|
||||
case types.SpecComponentTypeJustBeforeEach:
|
||||
blockType = "JustBeforeEach"
|
||||
case types.SpecComponentTypeAfterEach:
|
||||
blockType = "AfterEach"
|
||||
case types.SpecComponentTypeIt:
|
||||
blockType = "It"
|
||||
case types.SpecComponentTypeMeasure:
|
||||
blockType = "Measurement"
|
||||
}
|
||||
if succinct {
|
||||
s.print(0, s.colorize(color+boldStyle, "[%s] %s ", blockType, componentTexts[i]))
|
||||
} else {
|
||||
s.println(indentation, s.colorize(color+boldStyle, "%s [%s]", componentTexts[i], blockType))
|
||||
s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i]))
|
||||
}
|
||||
} else {
|
||||
if succinct {
|
||||
s.print(0, s.colorize(alternatingColors[i%2], "%s ", componentTexts[i]))
|
||||
} else {
|
||||
s.println(indentation, componentTexts[i])
|
||||
s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i]))
|
||||
}
|
||||
}
|
||||
indentation++
|
||||
}
|
||||
|
||||
return indentation
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) printCodeLocationBlock(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int {
|
||||
indentation := s.printSpecContext(componentTexts, componentCodeLocations, failedComponentType, failedComponentIndex, state, succinct)
|
||||
|
||||
if succinct {
|
||||
if len(componentTexts) > 0 {
|
||||
s.printNewLine()
|
||||
s.print(0, s.colorize(lightGrayColor, "%s", componentCodeLocations[len(componentCodeLocations)-1]))
|
||||
}
|
||||
s.printNewLine()
|
||||
indentation = 1
|
||||
} else {
|
||||
indentation--
|
||||
}
|
||||
|
||||
return indentation
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) orderedMeasurementKeys(measurements map[string]*types.SpecMeasurement) []string {
|
||||
orderedKeys := make([]string, len(measurements))
|
||||
for key, measurement := range measurements {
|
||||
orderedKeys[measurement.Order] = key
|
||||
}
|
||||
return orderedKeys
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) measurementReport(spec *types.SpecSummary, succinct bool) string {
|
||||
if len(spec.Measurements) == 0 {
|
||||
return "Found no measurements"
|
||||
}
|
||||
|
||||
message := []string{}
|
||||
orderedKeys := s.orderedMeasurementKeys(spec.Measurements)
|
||||
|
||||
if succinct {
|
||||
message = append(message, fmt.Sprintf("%s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples)))
|
||||
for _, key := range orderedKeys {
|
||||
measurement := spec.Measurements[key]
|
||||
message = append(message, fmt.Sprintf(" %s - %s: %s%s, %s: %s%s ± %s%s, %s: %s%s",
|
||||
s.colorize(boldStyle, "%s", measurement.Name),
|
||||
measurement.SmallestLabel,
|
||||
s.colorize(greenColor, "%.3f", measurement.Smallest),
|
||||
measurement.Units,
|
||||
measurement.AverageLabel,
|
||||
s.colorize(cyanColor, "%.3f", measurement.Average),
|
||||
measurement.Units,
|
||||
s.colorize(cyanColor, "%.3f", measurement.StdDeviation),
|
||||
measurement.Units,
|
||||
measurement.LargestLabel,
|
||||
s.colorize(redColor, "%.3f", measurement.Largest),
|
||||
measurement.Units,
|
||||
))
|
||||
}
|
||||
} else {
|
||||
message = append(message, fmt.Sprintf("Ran %s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples)))
|
||||
for _, key := range orderedKeys {
|
||||
measurement := spec.Measurements[key]
|
||||
info := ""
|
||||
if measurement.Info != nil {
|
||||
message = append(message, fmt.Sprintf("%v", measurement.Info))
|
||||
}
|
||||
|
||||
message = append(message, fmt.Sprintf("%s:\n%s %s: %s%s\n %s: %s%s\n %s: %s%s ± %s%s",
|
||||
s.colorize(boldStyle, "%s", measurement.Name),
|
||||
info,
|
||||
measurement.SmallestLabel,
|
||||
s.colorize(greenColor, "%.3f", measurement.Smallest),
|
||||
measurement.Units,
|
||||
measurement.LargestLabel,
|
||||
s.colorize(redColor, "%.3f", measurement.Largest),
|
||||
measurement.Units,
|
||||
measurement.AverageLabel,
|
||||
s.colorize(cyanColor, "%.3f", measurement.Average),
|
||||
measurement.Units,
|
||||
s.colorize(cyanColor, "%.3f", measurement.StdDeviation),
|
||||
measurement.Units,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(message, "\n")
|
||||
}
|
92
vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go
generated
vendored
Normal file
92
vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go
generated
vendored
Normal file
@ -0,0 +1,92 @@
|
||||
/*
|
||||
|
||||
TeamCity Reporter for Ginkgo
|
||||
|
||||
Makes use of TeamCity's support for Service Messages
|
||||
http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-ReportingTests
|
||||
*/
|
||||
|
||||
package reporters
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
messageId = "##teamcity"
|
||||
)
|
||||
|
||||
type TeamCityReporter struct {
|
||||
writer io.Writer
|
||||
testSuiteName string
|
||||
}
|
||||
|
||||
func NewTeamCityReporter(writer io.Writer) *TeamCityReporter {
|
||||
return &TeamCityReporter{
|
||||
writer: writer,
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *TeamCityReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
reporter.testSuiteName = escape(summary.SuiteDescription)
|
||||
fmt.Fprintf(reporter.writer, "%s[testSuiteStarted name='%s']", messageId, reporter.testSuiteName)
|
||||
}
|
||||
|
||||
func (reporter *TeamCityReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
reporter.handleSetupSummary("BeforeSuite", setupSummary)
|
||||
}
|
||||
|
||||
func (reporter *TeamCityReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
reporter.handleSetupSummary("AfterSuite", setupSummary)
|
||||
}
|
||||
|
||||
func (reporter *TeamCityReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) {
|
||||
if setupSummary.State != types.SpecStatePassed {
|
||||
testName := escape(name)
|
||||
fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']", messageId, testName)
|
||||
message := escape(setupSummary.Failure.ComponentCodeLocation.String())
|
||||
details := escape(setupSummary.Failure.Message)
|
||||
fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']", messageId, testName, message, details)
|
||||
durationInMilliseconds := setupSummary.RunTime.Seconds() * 1000
|
||||
fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']", messageId, testName, durationInMilliseconds)
|
||||
}
|
||||
}
|
||||
|
||||
func (reporter *TeamCityReporter) SpecWillRun(specSummary *types.SpecSummary) {
|
||||
testName := escape(strings.Join(specSummary.ComponentTexts[1:], " "))
|
||||
fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']", messageId, testName)
|
||||
}
|
||||
|
||||
func (reporter *TeamCityReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||
testName := escape(strings.Join(specSummary.ComponentTexts[1:], " "))
|
||||
|
||||
if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked {
|
||||
message := escape(specSummary.Failure.ComponentCodeLocation.String())
|
||||
details := escape(specSummary.Failure.Message)
|
||||
fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']", messageId, testName, message, details)
|
||||
}
|
||||
if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending {
|
||||
fmt.Fprintf(reporter.writer, "%s[testIgnored name='%s']", messageId, testName)
|
||||
}
|
||||
|
||||
durationInMilliseconds := specSummary.RunTime.Seconds() * 1000
|
||||
fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']", messageId, testName, durationInMilliseconds)
|
||||
}
|
||||
|
||||
func (reporter *TeamCityReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
fmt.Fprintf(reporter.writer, "%s[testSuiteFinished name='%s']", messageId, reporter.testSuiteName)
|
||||
}
|
||||
|
||||
func escape(output string) string {
|
||||
output = strings.Replace(output, "|", "||", -1)
|
||||
output = strings.Replace(output, "'", "|'", -1)
|
||||
output = strings.Replace(output, "\n", "|n", -1)
|
||||
output = strings.Replace(output, "\r", "|r", -1)
|
||||
output = strings.Replace(output, "[", "|[", -1)
|
||||
output = strings.Replace(output, "]", "|]", -1)
|
||||
return output
|
||||
}
|
15
vendor/github.com/onsi/ginkgo/types/code_location.go
generated
vendored
Normal file
15
vendor/github.com/onsi/ginkgo/types/code_location.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type CodeLocation struct {
|
||||
FileName string
|
||||
LineNumber int
|
||||
FullStackTrace string
|
||||
}
|
||||
|
||||
func (codeLocation CodeLocation) String() string {
|
||||
return fmt.Sprintf("%s:%d", codeLocation.FileName, codeLocation.LineNumber)
|
||||
}
|
30
vendor/github.com/onsi/ginkgo/types/synchronization.go
generated
vendored
Normal file
30
vendor/github.com/onsi/ginkgo/types/synchronization.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
type RemoteBeforeSuiteState int
|
||||
|
||||
const (
|
||||
RemoteBeforeSuiteStateInvalid RemoteBeforeSuiteState = iota
|
||||
|
||||
RemoteBeforeSuiteStatePending
|
||||
RemoteBeforeSuiteStatePassed
|
||||
RemoteBeforeSuiteStateFailed
|
||||
RemoteBeforeSuiteStateDisappeared
|
||||
)
|
||||
|
||||
type RemoteBeforeSuiteData struct {
|
||||
Data []byte
|
||||
State RemoteBeforeSuiteState
|
||||
}
|
||||
|
||||
func (r RemoteBeforeSuiteData) ToJSON() []byte {
|
||||
data, _ := json.Marshal(r)
|
||||
return data
|
||||
}
|
||||
|
||||
type RemoteAfterSuiteData struct {
|
||||
CanRun bool
|
||||
}
|
143
vendor/github.com/onsi/ginkgo/types/types.go
generated
vendored
Normal file
143
vendor/github.com/onsi/ginkgo/types/types.go
generated
vendored
Normal file
@ -0,0 +1,143 @@
|
||||
package types
|
||||
|
||||
import "time"
|
||||
|
||||
const GINKGO_FOCUS_EXIT_CODE = 197
|
||||
|
||||
type SuiteSummary struct {
|
||||
SuiteDescription string
|
||||
SuiteSucceeded bool
|
||||
SuiteID string
|
||||
|
||||
NumberOfSpecsBeforeParallelization int
|
||||
NumberOfTotalSpecs int
|
||||
NumberOfSpecsThatWillBeRun int
|
||||
NumberOfPendingSpecs int
|
||||
NumberOfSkippedSpecs int
|
||||
NumberOfPassedSpecs int
|
||||
NumberOfFailedSpecs int
|
||||
RunTime time.Duration
|
||||
}
|
||||
|
||||
type SpecSummary struct {
|
||||
ComponentTexts []string
|
||||
ComponentCodeLocations []CodeLocation
|
||||
|
||||
State SpecState
|
||||
RunTime time.Duration
|
||||
Failure SpecFailure
|
||||
IsMeasurement bool
|
||||
NumberOfSamples int
|
||||
Measurements map[string]*SpecMeasurement
|
||||
|
||||
CapturedOutput string
|
||||
SuiteID string
|
||||
}
|
||||
|
||||
func (s SpecSummary) HasFailureState() bool {
|
||||
return s.State.IsFailure()
|
||||
}
|
||||
|
||||
func (s SpecSummary) TimedOut() bool {
|
||||
return s.State == SpecStateTimedOut
|
||||
}
|
||||
|
||||
func (s SpecSummary) Panicked() bool {
|
||||
return s.State == SpecStatePanicked
|
||||
}
|
||||
|
||||
func (s SpecSummary) Failed() bool {
|
||||
return s.State == SpecStateFailed
|
||||
}
|
||||
|
||||
func (s SpecSummary) Passed() bool {
|
||||
return s.State == SpecStatePassed
|
||||
}
|
||||
|
||||
func (s SpecSummary) Skipped() bool {
|
||||
return s.State == SpecStateSkipped
|
||||
}
|
||||
|
||||
func (s SpecSummary) Pending() bool {
|
||||
return s.State == SpecStatePending
|
||||
}
|
||||
|
||||
type SetupSummary struct {
|
||||
ComponentType SpecComponentType
|
||||
CodeLocation CodeLocation
|
||||
|
||||
State SpecState
|
||||
RunTime time.Duration
|
||||
Failure SpecFailure
|
||||
|
||||
CapturedOutput string
|
||||
SuiteID string
|
||||
}
|
||||
|
||||
type SpecFailure struct {
|
||||
Message string
|
||||
Location CodeLocation
|
||||
ForwardedPanic string
|
||||
|
||||
ComponentIndex int
|
||||
ComponentType SpecComponentType
|
||||
ComponentCodeLocation CodeLocation
|
||||
}
|
||||
|
||||
type SpecMeasurement struct {
|
||||
Name string
|
||||
Info interface{}
|
||||
Order int
|
||||
|
||||
Results []float64
|
||||
|
||||
Smallest float64
|
||||
Largest float64
|
||||
Average float64
|
||||
StdDeviation float64
|
||||
|
||||
SmallestLabel string
|
||||
LargestLabel string
|
||||
AverageLabel string
|
||||
Units string
|
||||
}
|
||||
|
||||
type SpecState uint
|
||||
|
||||
const (
|
||||
SpecStateInvalid SpecState = iota
|
||||
|
||||
SpecStatePending
|
||||
SpecStateSkipped
|
||||
SpecStatePassed
|
||||
SpecStateFailed
|
||||
SpecStatePanicked
|
||||
SpecStateTimedOut
|
||||
)
|
||||
|
||||
func (state SpecState) IsFailure() bool {
|
||||
return state == SpecStateTimedOut || state == SpecStatePanicked || state == SpecStateFailed
|
||||
}
|
||||
|
||||
type SpecComponentType uint
|
||||
|
||||
const (
|
||||
SpecComponentTypeInvalid SpecComponentType = iota
|
||||
|
||||
SpecComponentTypeContainer
|
||||
SpecComponentTypeBeforeSuite
|
||||
SpecComponentTypeAfterSuite
|
||||
SpecComponentTypeBeforeEach
|
||||
SpecComponentTypeJustBeforeEach
|
||||
SpecComponentTypeAfterEach
|
||||
SpecComponentTypeIt
|
||||
SpecComponentTypeMeasure
|
||||
)
|
||||
|
||||
type FlagType uint
|
||||
|
||||
const (
|
||||
FlagTypeNone FlagType = iota
|
||||
FlagTypeFocused
|
||||
FlagTypePending
|
||||
)
|
3
vendor/github.com/onsi/gomega/.gitignore
generated
vendored
Normal file
3
vendor/github.com/onsi/gomega/.gitignore
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
.DS_Store
|
||||
*.test
|
||||
.
|
11
vendor/github.com/onsi/gomega/.travis.yml
generated
vendored
Normal file
11
vendor/github.com/onsi/gomega/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.4
|
||||
- 1.5
|
||||
|
||||
install:
|
||||
- go get -v ./...
|
||||
- go get github.com/onsi/ginkgo
|
||||
- go install github.com/onsi/ginkgo/ginkgo
|
||||
|
||||
script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --failOnPending --randomizeSuites --race
|
70
vendor/github.com/onsi/gomega/CHANGELOG.md
generated
vendored
Normal file
70
vendor/github.com/onsi/gomega/CHANGELOG.md
generated
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
## HEAD
|
||||
|
||||
Improvements:
|
||||
|
||||
- Added `BeSent` which attempts to send a value down a channel and fails if the attempt blocks. Can be paired with `Eventually` to safely send a value down a channel with a timeout.
|
||||
- `Ω`, `Expect`, `Eventually`, and `Consistently` now immediately `panic` if there is no registered fail handler. This is always a mistake that can hide failing tests.
|
||||
- `Receive()` no longer errors when passed a closed channel, it's perfectly fine to attempt to read from a closed channel so Ω(c).Should(Receive()) always fails and Ω(c).ShoudlNot(Receive()) always passes with a closed channel.
|
||||
- Added `HavePrefix` and `HaveSuffix` matchers.
|
||||
- `ghttp` can now handle concurrent requests.
|
||||
- Added `Succeed` which allows one to write `Ω(MyFunction()).Should(Succeed())`.
|
||||
- Improved `ghttp`'s behavior around failing assertions and panics:
|
||||
- If a registered handler makes a failing assertion `ghttp` will return `500`.
|
||||
- If a registered handler panics, `ghttp` will return `500` *and* fail the test. This is new behavior that may cause existing code to break. This code is almost certainly incorrect and creating a false positive.
|
||||
- `ghttp` servers can take an `io.Writer`. `ghttp` will write a line to the writer when each request arrives.
|
||||
- Added `WithTransform` matcher to allow munging input data before feeding into the relevant matcher
|
||||
- Added boolean `And`, `Or`, and `Not` matchers to allow creating composite matchers
|
||||
|
||||
Bug Fixes:
|
||||
- gexec: `session.Wait` now uses `EventuallyWithOffset` to get the right line number in the failure.
|
||||
- `ContainElement` no longer bails if a passed-in matcher errors.
|
||||
|
||||
## 1.0 (8/2/2014)
|
||||
|
||||
No changes. Dropping "beta" from the version number.
|
||||
|
||||
## 1.0.0-beta (7/8/2014)
|
||||
Breaking Changes:
|
||||
|
||||
- Changed OmegaMatcher interface. Instead of having `Match` return failure messages, two new methods `FailureMessage` and `NegatedFailureMessage` are called instead.
|
||||
- Moved and renamed OmegaFailHandler to types.GomegaFailHandler and OmegaMatcher to types.GomegaMatcher. Any references to OmegaMatcher in any custom matchers will need to be changed to point to types.GomegaMatcher
|
||||
|
||||
New Test-Support Features:
|
||||
|
||||
- `ghttp`: supports testing http clients
|
||||
- Provides a flexible fake http server
|
||||
- Provides a collection of chainable http handlers that perform assertions.
|
||||
- `gbytes`: supports making ordered assertions against streams of data
|
||||
- Provides a `gbytes.Buffer`
|
||||
- Provides a `Say` matcher to perform ordered assertions against output data
|
||||
- `gexec`: supports testing external processes
|
||||
- Provides support for building Go binaries
|
||||
- Wraps and starts `exec.Cmd` commands
|
||||
- Makes it easy to assert against stdout and stderr
|
||||
- Makes it easy to send signals and wait for processes to exit
|
||||
- Provides an `Exit` matcher to assert against exit code.
|
||||
|
||||
DSL Changes:
|
||||
|
||||
- `Eventually` and `Consistently` can accept `time.Duration` interval and polling inputs.
|
||||
- The default timeouts for `Eventually` and `Consistently` are now configurable.
|
||||
|
||||
New Matchers:
|
||||
|
||||
- `ConsistOf`: order-independent assertion against the elements of an array/slice or keys of a map.
|
||||
- `BeTemporally`: like `BeNumerically` but for `time.Time`
|
||||
- `HaveKeyWithValue`: asserts a map has a given key with the given value.
|
||||
|
||||
Updated Matchers:
|
||||
|
||||
- `Receive` matcher can take a matcher as an argument and passes only if the channel under test receives an objet that satisfies the passed-in matcher.
|
||||
- Matchers that implement `MatchMayChangeInTheFuture(actual interface{}) bool` can inform `Eventually` and/or `Consistently` when a match has no chance of changing status in the future. For example, `Receive` returns `false` when a channel is closed.
|
||||
|
||||
Misc:
|
||||
|
||||
- Start using semantic versioning
|
||||
- Start maintaining changelog
|
||||
|
||||
Major refactor:
|
||||
|
||||
- Pull out Gomega's internal to `internal`
|
20
vendor/github.com/onsi/gomega/LICENSE
generated
vendored
Normal file
20
vendor/github.com/onsi/gomega/LICENSE
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
Copyright (c) 2013-2014 Onsi Fakhouri
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
17
vendor/github.com/onsi/gomega/README.md
generated
vendored
Normal file
17
vendor/github.com/onsi/gomega/README.md
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||

|
||||
|
||||
[](https://travis-ci.org/onsi/gomega)
|
||||
|
||||
Jump straight to the [docs](http://onsi.github.io/gomega/) to learn about Gomega, including a list of [all available matchers](http://onsi.github.io/gomega/#provided-matchers).
|
||||
|
||||
To discuss Gomega and get updates, join the [google group](https://groups.google.com/d/forum/ginkgo-and-gomega).
|
||||
|
||||
## [Ginkgo](http://github.com/onsi/ginkgo): a BDD Testing Framework for Golang
|
||||
|
||||
Learn more about Ginkgo [here](http://onsi.github.io/ginkgo/)
|
||||
|
||||
## License
|
||||
|
||||
Gomega is MIT-Licensed
|
||||
|
||||
The `ConsistOf` matcher uses [goraph](https://github.com/amitkgupta/goraph) which is embedded in the source to simplify distribution. goraph has an MIT license.
|
276
vendor/github.com/onsi/gomega/format/format.go
generated
vendored
Normal file
276
vendor/github.com/onsi/gomega/format/format.go
generated
vendored
Normal file
@ -0,0 +1,276 @@
|
||||
/*
|
||||
Gomega's format package pretty-prints objects. It explores input objects recursively and generates formatted, indented output with type information.
|
||||
*/
|
||||
package format
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Use MaxDepth to set the maximum recursion depth when printing deeply nested objects
|
||||
var MaxDepth = uint(10)
|
||||
|
||||
/*
|
||||
By default, all objects (even those that implement fmt.Stringer and fmt.GoStringer) are recursively inspected to generate output.
|
||||
|
||||
Set UseStringerRepresentation = true to use GoString (for fmt.GoStringers) or String (for fmt.Stringer) instead.
|
||||
|
||||
Note that GoString and String don't always have all the information you need to understand why a test failed!
|
||||
*/
|
||||
var UseStringerRepresentation = false
|
||||
|
||||
//The default indentation string emitted by the format package
|
||||
var Indent = " "
|
||||
|
||||
var longFormThreshold = 20
|
||||
|
||||
/*
|
||||
Generates a formatted matcher success/failure message of the form:
|
||||
|
||||
Expected
|
||||
<pretty printed actual>
|
||||
<message>
|
||||
<pretty printed expected>
|
||||
|
||||
If expected is omited, then the message looks like:
|
||||
|
||||
Expected
|
||||
<pretty printed actual>
|
||||
<message>
|
||||
*/
|
||||
func Message(actual interface{}, message string, expected ...interface{}) string {
|
||||
if len(expected) == 0 {
|
||||
return fmt.Sprintf("Expected\n%s\n%s", Object(actual, 1), message)
|
||||
} else {
|
||||
return fmt.Sprintf("Expected\n%s\n%s\n%s", Object(actual, 1), message, Object(expected[0], 1))
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Pretty prints the passed in object at the passed in indentation level.
|
||||
|
||||
Object recurses into deeply nested objects emitting pretty-printed representations of their components.
|
||||
|
||||
Modify format.MaxDepth to control how deep the recursion is allowed to go
|
||||
Set format.UseStringerRepresentation to true to return object.GoString() or object.String() when available instead of
|
||||
recursing into the object.
|
||||
*/
|
||||
func Object(object interface{}, indentation uint) string {
|
||||
indent := strings.Repeat(Indent, int(indentation))
|
||||
value := reflect.ValueOf(object)
|
||||
return fmt.Sprintf("%s<%s>: %s", indent, formatType(object), formatValue(value, indentation))
|
||||
}
|
||||
|
||||
/*
|
||||
IndentString takes a string and indents each line by the specified amount.
|
||||
*/
|
||||
func IndentString(s string, indentation uint) string {
|
||||
components := strings.Split(s, "\n")
|
||||
result := ""
|
||||
indent := strings.Repeat(Indent, int(indentation))
|
||||
for i, component := range components {
|
||||
result += indent + component
|
||||
if i < len(components)-1 {
|
||||
result += "\n"
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func formatType(object interface{}) string {
|
||||
t := reflect.TypeOf(object)
|
||||
if t == nil {
|
||||
return "nil"
|
||||
}
|
||||
switch t.Kind() {
|
||||
case reflect.Chan:
|
||||
v := reflect.ValueOf(object)
|
||||
return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap())
|
||||
case reflect.Ptr:
|
||||
return fmt.Sprintf("%T | %p", object, object)
|
||||
case reflect.Slice:
|
||||
v := reflect.ValueOf(object)
|
||||
return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap())
|
||||
case reflect.Map:
|
||||
v := reflect.ValueOf(object)
|
||||
return fmt.Sprintf("%T | len:%d", object, v.Len())
|
||||
default:
|
||||
return fmt.Sprintf("%T", object)
|
||||
}
|
||||
}
|
||||
|
||||
func formatValue(value reflect.Value, indentation uint) string {
|
||||
if indentation > MaxDepth {
|
||||
return "..."
|
||||
}
|
||||
|
||||
if isNilValue(value) {
|
||||
return "nil"
|
||||
}
|
||||
|
||||
if UseStringerRepresentation {
|
||||
if value.CanInterface() {
|
||||
obj := value.Interface()
|
||||
switch x := obj.(type) {
|
||||
case fmt.GoStringer:
|
||||
return x.GoString()
|
||||
case fmt.Stringer:
|
||||
return x.String()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch value.Kind() {
|
||||
case reflect.Bool:
|
||||
return fmt.Sprintf("%v", value.Bool())
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return fmt.Sprintf("%v", value.Int())
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return fmt.Sprintf("%v", value.Uint())
|
||||
case reflect.Uintptr:
|
||||
return fmt.Sprintf("0x%x", value.Uint())
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return fmt.Sprintf("%v", value.Float())
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return fmt.Sprintf("%v", value.Complex())
|
||||
case reflect.Chan:
|
||||
return fmt.Sprintf("0x%x", value.Pointer())
|
||||
case reflect.Func:
|
||||
return fmt.Sprintf("0x%x", value.Pointer())
|
||||
case reflect.Ptr:
|
||||
return formatValue(value.Elem(), indentation)
|
||||
case reflect.Slice:
|
||||
if value.Type().Elem().Kind() == reflect.Uint8 {
|
||||
return formatString(value.Bytes(), indentation)
|
||||
}
|
||||
return formatSlice(value, indentation)
|
||||
case reflect.String:
|
||||
return formatString(value.String(), indentation)
|
||||
case reflect.Array:
|
||||
return formatSlice(value, indentation)
|
||||
case reflect.Map:
|
||||
return formatMap(value, indentation)
|
||||
case reflect.Struct:
|
||||
return formatStruct(value, indentation)
|
||||
case reflect.Interface:
|
||||
return formatValue(value.Elem(), indentation)
|
||||
default:
|
||||
if value.CanInterface() {
|
||||
return fmt.Sprintf("%#v", value.Interface())
|
||||
} else {
|
||||
return fmt.Sprintf("%#v", value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func formatString(object interface{}, indentation uint) string {
|
||||
if indentation == 1 {
|
||||
s := fmt.Sprintf("%s", object)
|
||||
components := strings.Split(s, "\n")
|
||||
result := ""
|
||||
for i, component := range components {
|
||||
if i == 0 {
|
||||
result += component
|
||||
} else {
|
||||
result += Indent + component
|
||||
}
|
||||
if i < len(components)-1 {
|
||||
result += "\n"
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s", result)
|
||||
} else {
|
||||
return fmt.Sprintf("%q", object)
|
||||
}
|
||||
}
|
||||
|
||||
func formatSlice(v reflect.Value, indentation uint) string {
|
||||
l := v.Len()
|
||||
result := make([]string, l)
|
||||
longest := 0
|
||||
for i := 0; i < l; i++ {
|
||||
result[i] = formatValue(v.Index(i), indentation+1)
|
||||
if len(result[i]) > longest {
|
||||
longest = len(result[i])
|
||||
}
|
||||
}
|
||||
|
||||
if longest > longFormThreshold {
|
||||
indenter := strings.Repeat(Indent, int(indentation))
|
||||
return fmt.Sprintf("[\n%s%s,\n%s]", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter)
|
||||
} else {
|
||||
return fmt.Sprintf("[%s]", strings.Join(result, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
func formatMap(v reflect.Value, indentation uint) string {
|
||||
l := v.Len()
|
||||
result := make([]string, l)
|
||||
|
||||
longest := 0
|
||||
for i, key := range v.MapKeys() {
|
||||
value := v.MapIndex(key)
|
||||
result[i] = fmt.Sprintf("%s: %s", formatValue(key, 0), formatValue(value, indentation+1))
|
||||
if len(result[i]) > longest {
|
||||
longest = len(result[i])
|
||||
}
|
||||
}
|
||||
|
||||
if longest > longFormThreshold {
|
||||
indenter := strings.Repeat(Indent, int(indentation))
|
||||
return fmt.Sprintf("{\n%s%s,\n%s}", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter)
|
||||
} else {
|
||||
return fmt.Sprintf("{%s}", strings.Join(result, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
func formatStruct(v reflect.Value, indentation uint) string {
|
||||
t := v.Type()
|
||||
|
||||
l := v.NumField()
|
||||
result := []string{}
|
||||
longest := 0
|
||||
for i := 0; i < l; i++ {
|
||||
structField := t.Field(i)
|
||||
fieldEntry := v.Field(i)
|
||||
representation := fmt.Sprintf("%s: %s", structField.Name, formatValue(fieldEntry, indentation+1))
|
||||
result = append(result, representation)
|
||||
if len(representation) > longest {
|
||||
longest = len(representation)
|
||||
}
|
||||
}
|
||||
if longest > longFormThreshold {
|
||||
indenter := strings.Repeat(Indent, int(indentation))
|
||||
return fmt.Sprintf("{\n%s%s,\n%s}", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter)
|
||||
} else {
|
||||
return fmt.Sprintf("{%s}", strings.Join(result, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
func isNilValue(a reflect.Value) bool {
|
||||
switch a.Kind() {
|
||||
case reflect.Invalid:
|
||||
return true
|
||||
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return a.IsNil()
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func isNil(a interface{}) bool {
|
||||
if a == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
switch reflect.TypeOf(a).Kind() {
|
||||
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return reflect.ValueOf(a).IsNil()
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
335
vendor/github.com/onsi/gomega/gomega_dsl.go
generated
vendored
Normal file
335
vendor/github.com/onsi/gomega/gomega_dsl.go
generated
vendored
Normal file
@ -0,0 +1,335 @@
|
||||
/*
|
||||
Gomega is the Ginkgo BDD-style testing framework's preferred matcher library.
|
||||
|
||||
The godoc documentation describes Gomega's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/gomega/
|
||||
|
||||
Gomega on Github: http://github.com/onsi/gomega
|
||||
|
||||
Learn more about Ginkgo online: http://onsi.github.io/ginkgo
|
||||
|
||||
Ginkgo on Github: http://github.com/onsi/ginkgo
|
||||
|
||||
Gomega is MIT-Licensed
|
||||
*/
|
||||
package gomega
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/gomega/internal/assertion"
|
||||
"github.com/onsi/gomega/internal/asyncassertion"
|
||||
"github.com/onsi/gomega/internal/testingtsupport"
|
||||
"github.com/onsi/gomega/types"
|
||||
)
|
||||
|
||||
const GOMEGA_VERSION = "1.0"
|
||||
|
||||
const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil.
|
||||
If you're using Ginkgo then you probably forgot to put your assertion in an It().
|
||||
Alternatively, you may have forgotten to register a fail handler with RegisterFailHandler() or RegisterTestingT().
|
||||
`
|
||||
|
||||
var globalFailHandler types.GomegaFailHandler
|
||||
|
||||
var defaultEventuallyTimeout = time.Second
|
||||
var defaultEventuallyPollingInterval = 10 * time.Millisecond
|
||||
var defaultConsistentlyDuration = 100 * time.Millisecond
|
||||
var defaultConsistentlyPollingInterval = 10 * time.Millisecond
|
||||
|
||||
//RegisterFailHandler connects Ginkgo to Gomega. When a matcher fails
|
||||
//the fail handler passed into RegisterFailHandler is called.
|
||||
func RegisterFailHandler(handler types.GomegaFailHandler) {
|
||||
globalFailHandler = handler
|
||||
}
|
||||
|
||||
//RegisterTestingT connects Gomega to Golang's XUnit style
|
||||
//Testing.T tests. You'll need to call this at the top of each XUnit style test:
|
||||
//
|
||||
// func TestFarmHasCow(t *testing.T) {
|
||||
// RegisterTestingT(t)
|
||||
//
|
||||
// f := farm.New([]string{"Cow", "Horse"})
|
||||
// Expect(f.HasCow()).To(BeTrue(), "Farm should have cow")
|
||||
// }
|
||||
//
|
||||
// Note that this *testing.T is registered *globally* by Gomega (this is why you don't have to
|
||||
// pass `t` down to the matcher itself). This means that you cannot run the XUnit style tests
|
||||
// in parallel as the global fail handler cannot point to more than one testing.T at a time.
|
||||
//
|
||||
// (As an aside: Ginkgo gets around this limitation by running parallel tests in different *processes*).
|
||||
func RegisterTestingT(t types.GomegaTestingT) {
|
||||
RegisterFailHandler(testingtsupport.BuildTestingTGomegaFailHandler(t))
|
||||
}
|
||||
|
||||
//InterceptGomegaHandlers runs a given callback and returns an array of
|
||||
//failure messages generated by any Gomega assertions within the callback.
|
||||
//
|
||||
//This is accomplished by temporarily replacing the *global* fail handler
|
||||
//with a fail handler that simply annotates failures. The original fail handler
|
||||
//is reset when InterceptGomegaFailures returns.
|
||||
//
|
||||
//This is most useful when testing custom matchers, but can also be used to check
|
||||
//on a value using a Gomega assertion without causing a test failure.
|
||||
func InterceptGomegaFailures(f func()) []string {
|
||||
originalHandler := globalFailHandler
|
||||
failures := []string{}
|
||||
RegisterFailHandler(func(message string, callerSkip ...int) {
|
||||
failures = append(failures, message)
|
||||
})
|
||||
f()
|
||||
RegisterFailHandler(originalHandler)
|
||||
return failures
|
||||
}
|
||||
|
||||
//Ω wraps an actual value allowing assertions to be made on it:
|
||||
// Ω("foo").Should(Equal("foo"))
|
||||
//
|
||||
//If Ω is passed more than one argument it will pass the *first* argument to the matcher.
|
||||
//All subsequent arguments will be required to be nil/zero.
|
||||
//
|
||||
//This is convenient if you want to make an assertion on a method/function that returns
|
||||
//a value and an error - a common patter in Go.
|
||||
//
|
||||
//For example, given a function with signature:
|
||||
// func MyAmazingThing() (int, error)
|
||||
//
|
||||
//Then:
|
||||
// Ω(MyAmazingThing()).Should(Equal(3))
|
||||
//Will succeed only if `MyAmazingThing()` returns `(3, nil)`
|
||||
//
|
||||
//Ω and Expect are identical
|
||||
func Ω(actual interface{}, extra ...interface{}) GomegaAssertion {
|
||||
return ExpectWithOffset(0, actual, extra...)
|
||||
}
|
||||
|
||||
//Expect wraps an actual value allowing assertions to be made on it:
|
||||
// Expect("foo").To(Equal("foo"))
|
||||
//
|
||||
//If Expect is passed more than one argument it will pass the *first* argument to the matcher.
|
||||
//All subsequent arguments will be required to be nil/zero.
|
||||
//
|
||||
//This is convenient if you want to make an assertion on a method/function that returns
|
||||
//a value and an error - a common patter in Go.
|
||||
//
|
||||
//For example, given a function with signature:
|
||||
// func MyAmazingThing() (int, error)
|
||||
//
|
||||
//Then:
|
||||
// Expect(MyAmazingThing()).Should(Equal(3))
|
||||
//Will succeed only if `MyAmazingThing()` returns `(3, nil)`
|
||||
//
|
||||
//Expect and Ω are identical
|
||||
func Expect(actual interface{}, extra ...interface{}) GomegaAssertion {
|
||||
return ExpectWithOffset(0, actual, extra...)
|
||||
}
|
||||
|
||||
//ExpectWithOffset wraps an actual value allowing assertions to be made on it:
|
||||
// ExpectWithOffset(1, "foo").To(Equal("foo"))
|
||||
//
|
||||
//Unlike `Expect` and `Ω`, `ExpectWithOffset` takes an additional integer argument
|
||||
//this is used to modify the call-stack offset when computing line numbers.
|
||||
//
|
||||
//This is most useful in helper functions that make assertions. If you want Gomega's
|
||||
//error message to refer to the calling line in the test (as opposed to the line in the helper function)
|
||||
//set the first argument of `ExpectWithOffset` appropriately.
|
||||
func ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) GomegaAssertion {
|
||||
if globalFailHandler == nil {
|
||||
panic(nilFailHandlerPanic)
|
||||
}
|
||||
return assertion.New(actual, globalFailHandler, offset, extra...)
|
||||
}
|
||||
|
||||
//Eventually wraps an actual value allowing assertions to be made on it.
|
||||
//The assertion is tried periodically until it passes or a timeout occurs.
|
||||
//
|
||||
//Both the timeout and polling interval are configurable as optional arguments:
|
||||
//The first optional argument is the timeout
|
||||
//The second optional argument is the polling interval
|
||||
//
|
||||
//Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the
|
||||
//last case they are interpreted as seconds.
|
||||
//
|
||||
//If Eventually is passed an actual that is a function taking no arguments and returning at least one value,
|
||||
//then Eventually will call the function periodically and try the matcher against the function's first return value.
|
||||
//
|
||||
//Example:
|
||||
//
|
||||
// Eventually(func() int {
|
||||
// return thingImPolling.Count()
|
||||
// }).Should(BeNumerically(">=", 17))
|
||||
//
|
||||
//Note that this example could be rewritten:
|
||||
//
|
||||
// Eventually(thingImPolling.Count).Should(BeNumerically(">=", 17))
|
||||
//
|
||||
//If the function returns more than one value, then Eventually will pass the first value to the matcher and
|
||||
//assert that all other values are nil/zero.
|
||||
//This allows you to pass Eventually a function that returns a value and an error - a common pattern in Go.
|
||||
//
|
||||
//For example, consider a method that returns a value and an error:
|
||||
// func FetchFromDB() (string, error)
|
||||
//
|
||||
//Then
|
||||
// Eventually(FetchFromDB).Should(Equal("hasselhoff"))
|
||||
//
|
||||
//Will pass only if the the returned error is nil and the returned string passes the matcher.
|
||||
//
|
||||
//Eventually's default timeout is 1 second, and its default polling interval is 10ms
|
||||
func Eventually(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
|
||||
return EventuallyWithOffset(0, actual, intervals...)
|
||||
}
|
||||
|
||||
//EventuallyWithOffset operates like Eventually but takes an additional
|
||||
//initial argument to indicate an offset in the call stack. This is useful when building helper
|
||||
//functions that contain matchers. To learn more, read about `ExpectWithOffset`.
|
||||
func EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
|
||||
if globalFailHandler == nil {
|
||||
panic(nilFailHandlerPanic)
|
||||
}
|
||||
timeoutInterval := defaultEventuallyTimeout
|
||||
pollingInterval := defaultEventuallyPollingInterval
|
||||
if len(intervals) > 0 {
|
||||
timeoutInterval = toDuration(intervals[0])
|
||||
}
|
||||
if len(intervals) > 1 {
|
||||
pollingInterval = toDuration(intervals[1])
|
||||
}
|
||||
return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, globalFailHandler, timeoutInterval, pollingInterval, offset)
|
||||
}
|
||||
|
||||
//Consistently wraps an actual value allowing assertions to be made on it.
|
||||
//The assertion is tried periodically and is required to pass for a period of time.
|
||||
//
|
||||
//Both the total time and polling interval are configurable as optional arguments:
|
||||
//The first optional argument is the duration that Consistently will run for
|
||||
//The second optional argument is the polling interval
|
||||
//
|
||||
//Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the
|
||||
//last case they are interpreted as seconds.
|
||||
//
|
||||
//If Consistently is passed an actual that is a function taking no arguments and returning at least one value,
|
||||
//then Consistently will call the function periodically and try the matcher against the function's first return value.
|
||||
//
|
||||
//If the function returns more than one value, then Consistently will pass the first value to the matcher and
|
||||
//assert that all other values are nil/zero.
|
||||
//This allows you to pass Consistently a function that returns a value and an error - a common pattern in Go.
|
||||
//
|
||||
//Consistently is useful in cases where you want to assert that something *does not happen* over a period of tiem.
|
||||
//For example, you want to assert that a goroutine does *not* send data down a channel. In this case, you could:
|
||||
//
|
||||
// Consistently(channel).ShouldNot(Receive())
|
||||
//
|
||||
//Consistently's default duration is 100ms, and its default polling interval is 10ms
|
||||
func Consistently(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
|
||||
return ConsistentlyWithOffset(0, actual, intervals...)
|
||||
}
|
||||
|
||||
//ConsistentlyWithOffset operates like Consistnetly but takes an additional
|
||||
//initial argument to indicate an offset in the call stack. This is useful when building helper
|
||||
//functions that contain matchers. To learn more, read about `ExpectWithOffset`.
|
||||
func ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) GomegaAsyncAssertion {
|
||||
if globalFailHandler == nil {
|
||||
panic(nilFailHandlerPanic)
|
||||
}
|
||||
timeoutInterval := defaultConsistentlyDuration
|
||||
pollingInterval := defaultConsistentlyPollingInterval
|
||||
if len(intervals) > 0 {
|
||||
timeoutInterval = toDuration(intervals[0])
|
||||
}
|
||||
if len(intervals) > 1 {
|
||||
pollingInterval = toDuration(intervals[1])
|
||||
}
|
||||
return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, globalFailHandler, timeoutInterval, pollingInterval, offset)
|
||||
}
|
||||
|
||||
//Set the default timeout duration for Eventually. Eventually will repeatedly poll your condition until it succeeds, or until this timeout elapses.
|
||||
func SetDefaultEventuallyTimeout(t time.Duration) {
|
||||
defaultEventuallyTimeout = t
|
||||
}
|
||||
|
||||
//Set the default polling interval for Eventually.
|
||||
func SetDefaultEventuallyPollingInterval(t time.Duration) {
|
||||
defaultEventuallyPollingInterval = t
|
||||
}
|
||||
|
||||
//Set the default duration for Consistently. Consistently will verify that your condition is satsified for this long.
|
||||
func SetDefaultConsistentlyDuration(t time.Duration) {
|
||||
defaultConsistentlyDuration = t
|
||||
}
|
||||
|
||||
//Set the default polling interval for Consistently.
|
||||
func SetDefaultConsistentlyPollingInterval(t time.Duration) {
|
||||
defaultConsistentlyPollingInterval = t
|
||||
}
|
||||
|
||||
//GomegaAsyncAssertion is returned by Eventually and Consistently and polls the actual value passed into Eventually against
|
||||
//the matcher passed to the Should and ShouldNot methods.
|
||||
//
|
||||
//Both Should and ShouldNot take a variadic optionalDescription argument. This is passed on to
|
||||
//fmt.Sprintf() and is used to annotate failure messages. This allows you to make your failure messages more
|
||||
//descriptive
|
||||
//
|
||||
//Both Should and ShouldNot return a boolean that is true if the assertion passed and false if it failed.
|
||||
//
|
||||
//Example:
|
||||
//
|
||||
// Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.")
|
||||
// Consistently(myChannel).ShouldNot(Receive(), "Nothing should have come down the pipe.")
|
||||
type GomegaAsyncAssertion interface {
|
||||
Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
|
||||
ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
|
||||
}
|
||||
|
||||
//GomegaAssertion is returned by Ω and Expect and compares the actual value to the matcher
|
||||
//passed to the Should/ShouldNot and To/ToNot/NotTo methods.
|
||||
//
|
||||
//Typically Should/ShouldNot are used with Ω and To/ToNot/NotTo are used with Expect
|
||||
//though this is not enforced.
|
||||
//
|
||||
//All methods take a variadic optionalDescription argument. This is passed on to fmt.Sprintf()
|
||||
//and is used to annotate failure messages.
|
||||
//
|
||||
//All methods return a bool that is true if hte assertion passed and false if it failed.
|
||||
//
|
||||
//Example:
|
||||
//
|
||||
// Ω(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm)
|
||||
type GomegaAssertion interface {
|
||||
Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
|
||||
ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
|
||||
|
||||
To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
|
||||
ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
|
||||
NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
|
||||
}
|
||||
|
||||
//OmegaMatcher is deprecated in favor of the better-named and better-organized types.GomegaMatcher but sticks around to support existing code that uses it
|
||||
type OmegaMatcher types.GomegaMatcher
|
||||
|
||||
func toDuration(input interface{}) time.Duration {
|
||||
duration, ok := input.(time.Duration)
|
||||
if ok {
|
||||
return duration
|
||||
}
|
||||
|
||||
value := reflect.ValueOf(input)
|
||||
kind := reflect.TypeOf(input).Kind()
|
||||
|
||||
if reflect.Int <= kind && kind <= reflect.Int64 {
|
||||
return time.Duration(value.Int()) * time.Second
|
||||
} else if reflect.Uint <= kind && kind <= reflect.Uint64 {
|
||||
return time.Duration(value.Uint()) * time.Second
|
||||
} else if reflect.Float32 <= kind && kind <= reflect.Float64 {
|
||||
return time.Duration(value.Float() * float64(time.Second))
|
||||
} else if reflect.String == kind {
|
||||
duration, err := time.ParseDuration(value.String())
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("%#v is not a valid parsable duration string.", input))
|
||||
}
|
||||
return duration
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("%v is not a valid interval. Must be time.Duration, parsable duration string or a number.", input))
|
||||
}
|
98
vendor/github.com/onsi/gomega/internal/assertion/assertion.go
generated
vendored
Normal file
98
vendor/github.com/onsi/gomega/internal/assertion/assertion.go
generated
vendored
Normal file
@ -0,0 +1,98 @@
|
||||
package assertion
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/gomega/types"
|
||||
)
|
||||
|
||||
type Assertion struct {
|
||||
actualInput interface{}
|
||||
fail types.GomegaFailHandler
|
||||
offset int
|
||||
extra []interface{}
|
||||
}
|
||||
|
||||
func New(actualInput interface{}, fail types.GomegaFailHandler, offset int, extra ...interface{}) *Assertion {
|
||||
return &Assertion{
|
||||
actualInput: actualInput,
|
||||
fail: fail,
|
||||
offset: offset,
|
||||
extra: extra,
|
||||
}
|
||||
}
|
||||
|
||||
func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
|
||||
return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
|
||||
}
|
||||
|
||||
func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
|
||||
return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
|
||||
}
|
||||
|
||||
func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
|
||||
return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
|
||||
}
|
||||
|
||||
func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
|
||||
return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
|
||||
}
|
||||
|
||||
func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
|
||||
return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
|
||||
}
|
||||
|
||||
func (assertion *Assertion) buildDescription(optionalDescription ...interface{}) string {
|
||||
switch len(optionalDescription) {
|
||||
case 0:
|
||||
return ""
|
||||
default:
|
||||
return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n"
|
||||
}
|
||||
}
|
||||
|
||||
func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool {
|
||||
matches, err := matcher.Match(assertion.actualInput)
|
||||
description := assertion.buildDescription(optionalDescription...)
|
||||
if err != nil {
|
||||
assertion.fail(description+err.Error(), 2+assertion.offset)
|
||||
return false
|
||||
}
|
||||
if matches != desiredMatch {
|
||||
var message string
|
||||
if desiredMatch {
|
||||
message = matcher.FailureMessage(assertion.actualInput)
|
||||
} else {
|
||||
message = matcher.NegatedFailureMessage(assertion.actualInput)
|
||||
}
|
||||
assertion.fail(description+message, 2+assertion.offset)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (assertion *Assertion) vetExtras(optionalDescription ...interface{}) bool {
|
||||
success, message := vetExtras(assertion.extra)
|
||||
if success {
|
||||
return true
|
||||
}
|
||||
|
||||
description := assertion.buildDescription(optionalDescription...)
|
||||
assertion.fail(description+message, 2+assertion.offset)
|
||||
return false
|
||||
}
|
||||
|
||||
func vetExtras(extras []interface{}) (bool, string) {
|
||||
for i, extra := range extras {
|
||||
if extra != nil {
|
||||
zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface()
|
||||
if !reflect.DeepEqual(zeroValue, extra) {
|
||||
message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra)
|
||||
return false, message
|
||||
}
|
||||
}
|
||||
}
|
||||
return true, ""
|
||||
}
|
189
vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go
generated
vendored
Normal file
189
vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go
generated
vendored
Normal file
@ -0,0 +1,189 @@
|
||||
package asyncassertion
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/gomega/internal/oraclematcher"
|
||||
"github.com/onsi/gomega/types"
|
||||
)
|
||||
|
||||
type AsyncAssertionType uint
|
||||
|
||||
const (
|
||||
AsyncAssertionTypeEventually AsyncAssertionType = iota
|
||||
AsyncAssertionTypeConsistently
|
||||
)
|
||||
|
||||
type AsyncAssertion struct {
|
||||
asyncType AsyncAssertionType
|
||||
actualInput interface{}
|
||||
timeoutInterval time.Duration
|
||||
pollingInterval time.Duration
|
||||
fail types.GomegaFailHandler
|
||||
offset int
|
||||
}
|
||||
|
||||
func New(asyncType AsyncAssertionType, actualInput interface{}, fail types.GomegaFailHandler, timeoutInterval time.Duration, pollingInterval time.Duration, offset int) *AsyncAssertion {
|
||||
actualType := reflect.TypeOf(actualInput)
|
||||
if actualType.Kind() == reflect.Func {
|
||||
if actualType.NumIn() != 0 || actualType.NumOut() == 0 {
|
||||
panic("Expected a function with no arguments and one or more return values.")
|
||||
}
|
||||
}
|
||||
|
||||
return &AsyncAssertion{
|
||||
asyncType: asyncType,
|
||||
actualInput: actualInput,
|
||||
fail: fail,
|
||||
timeoutInterval: timeoutInterval,
|
||||
pollingInterval: pollingInterval,
|
||||
offset: offset,
|
||||
}
|
||||
}
|
||||
|
||||
func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
|
||||
return assertion.match(matcher, true, optionalDescription...)
|
||||
}
|
||||
|
||||
func (assertion *AsyncAssertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
|
||||
return assertion.match(matcher, false, optionalDescription...)
|
||||
}
|
||||
|
||||
func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interface{}) string {
|
||||
switch len(optionalDescription) {
|
||||
case 0:
|
||||
return ""
|
||||
default:
|
||||
return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n"
|
||||
}
|
||||
}
|
||||
|
||||
func (assertion *AsyncAssertion) actualInputIsAFunction() bool {
|
||||
actualType := reflect.TypeOf(assertion.actualInput)
|
||||
return actualType.Kind() == reflect.Func && actualType.NumIn() == 0 && actualType.NumOut() > 0
|
||||
}
|
||||
|
||||
func (assertion *AsyncAssertion) pollActual() (interface{}, error) {
|
||||
if assertion.actualInputIsAFunction() {
|
||||
values := reflect.ValueOf(assertion.actualInput).Call([]reflect.Value{})
|
||||
|
||||
extras := []interface{}{}
|
||||
for _, value := range values[1:] {
|
||||
extras = append(extras, value.Interface())
|
||||
}
|
||||
|
||||
success, message := vetExtras(extras)
|
||||
|
||||
if !success {
|
||||
return nil, errors.New(message)
|
||||
}
|
||||
|
||||
return values[0].Interface(), nil
|
||||
}
|
||||
|
||||
return assertion.actualInput, nil
|
||||
}
|
||||
|
||||
func (assertion *AsyncAssertion) matcherMayChange(matcher types.GomegaMatcher, value interface{}) bool {
|
||||
if assertion.actualInputIsAFunction() {
|
||||
return true
|
||||
}
|
||||
|
||||
return oraclematcher.MatchMayChangeInTheFuture(matcher, value)
|
||||
}
|
||||
|
||||
func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool {
|
||||
timer := time.Now()
|
||||
timeout := time.After(assertion.timeoutInterval)
|
||||
|
||||
description := assertion.buildDescription(optionalDescription...)
|
||||
|
||||
var matches bool
|
||||
var err error
|
||||
mayChange := true
|
||||
value, err := assertion.pollActual()
|
||||
if err == nil {
|
||||
mayChange = assertion.matcherMayChange(matcher, value)
|
||||
matches, err = matcher.Match(value)
|
||||
}
|
||||
|
||||
fail := func(preamble string) {
|
||||
errMsg := ""
|
||||
message := ""
|
||||
if err != nil {
|
||||
errMsg = "Error: " + err.Error()
|
||||
} else {
|
||||
if desiredMatch {
|
||||
message = matcher.FailureMessage(value)
|
||||
} else {
|
||||
message = matcher.NegatedFailureMessage(value)
|
||||
}
|
||||
}
|
||||
assertion.fail(fmt.Sprintf("%s after %.3fs.\n%s%s%s", preamble, time.Since(timer).Seconds(), description, message, errMsg), 3+assertion.offset)
|
||||
}
|
||||
|
||||
if assertion.asyncType == AsyncAssertionTypeEventually {
|
||||
for {
|
||||
if err == nil && matches == desiredMatch {
|
||||
return true
|
||||
}
|
||||
|
||||
if !mayChange {
|
||||
fail("No future change is possible. Bailing out early")
|
||||
return false
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(assertion.pollingInterval):
|
||||
value, err = assertion.pollActual()
|
||||
if err == nil {
|
||||
mayChange = assertion.matcherMayChange(matcher, value)
|
||||
matches, err = matcher.Match(value)
|
||||
}
|
||||
case <-timeout:
|
||||
fail("Timed out")
|
||||
return false
|
||||
}
|
||||
}
|
||||
} else if assertion.asyncType == AsyncAssertionTypeConsistently {
|
||||
for {
|
||||
if !(err == nil && matches == desiredMatch) {
|
||||
fail("Failed")
|
||||
return false
|
||||
}
|
||||
|
||||
if !mayChange {
|
||||
return true
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(assertion.pollingInterval):
|
||||
value, err = assertion.pollActual()
|
||||
if err == nil {
|
||||
mayChange = assertion.matcherMayChange(matcher, value)
|
||||
matches, err = matcher.Match(value)
|
||||
}
|
||||
case <-timeout:
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func vetExtras(extras []interface{}) (bool, string) {
|
||||
for i, extra := range extras {
|
||||
if extra != nil {
|
||||
zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface()
|
||||
if !reflect.DeepEqual(zeroValue, extra) {
|
||||
message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra)
|
||||
return false, message
|
||||
}
|
||||
}
|
||||
}
|
||||
return true, ""
|
||||
}
|
25
vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go
generated
vendored
Normal file
25
vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
package oraclematcher
|
||||
|
||||
import "github.com/onsi/gomega/types"
|
||||
|
||||
/*
|
||||
GomegaMatchers that also match the OracleMatcher interface can convey information about
|
||||
whether or not their result will change upon future attempts.
|
||||
|
||||
This allows `Eventually` and `Consistently` to short circuit if success becomes impossible.
|
||||
|
||||
For example, a process' exit code can never change. So, gexec's Exit matcher returns `true`
|
||||
for `MatchMayChangeInTheFuture` until the process exits, at which point it returns `false` forevermore.
|
||||
*/
|
||||
type OracleMatcher interface {
|
||||
MatchMayChangeInTheFuture(actual interface{}) bool
|
||||
}
|
||||
|
||||
func MatchMayChangeInTheFuture(matcher types.GomegaMatcher, value interface{}) bool {
|
||||
oracleMatcher, ok := matcher.(OracleMatcher)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
|
||||
return oracleMatcher.MatchMayChangeInTheFuture(value)
|
||||
}
|
40
vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go
generated
vendored
Normal file
40
vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
package testingtsupport
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/gomega/types"
|
||||
)
|
||||
|
||||
type gomegaTestingT interface {
|
||||
Errorf(format string, args ...interface{})
|
||||
}
|
||||
|
||||
func BuildTestingTGomegaFailHandler(t gomegaTestingT) types.GomegaFailHandler {
|
||||
return func(message string, callerSkip ...int) {
|
||||
skip := 1
|
||||
if len(callerSkip) > 0 {
|
||||
skip = callerSkip[0]
|
||||
}
|
||||
stackTrace := pruneStack(string(debug.Stack()), skip)
|
||||
t.Errorf("\n%s\n%s", stackTrace, message)
|
||||
}
|
||||
}
|
||||
|
||||
func pruneStack(fullStackTrace string, skip int) string {
|
||||
stack := strings.Split(fullStackTrace, "\n")
|
||||
if len(stack) > 2*(skip+1) {
|
||||
stack = stack[2*(skip+1):]
|
||||
}
|
||||
prunedStack := []string{}
|
||||
re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
|
||||
for i := 0; i < len(stack)/2; i++ {
|
||||
if !re.Match([]byte(stack[i*2])) {
|
||||
prunedStack = append(prunedStack, stack[i*2])
|
||||
prunedStack = append(prunedStack, stack[i*2+1])
|
||||
}
|
||||
}
|
||||
return strings.Join(prunedStack, "\n")
|
||||
}
|
393
vendor/github.com/onsi/gomega/matchers.go
generated
vendored
Normal file
393
vendor/github.com/onsi/gomega/matchers.go
generated
vendored
Normal file
@ -0,0 +1,393 @@
|
||||
package gomega
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/gomega/matchers"
|
||||
"github.com/onsi/gomega/types"
|
||||
)
|
||||
|
||||
//Equal uses reflect.DeepEqual to compare actual with expected. Equal is strict about
|
||||
//types when performing comparisons.
|
||||
//It is an error for both actual and expected to be nil. Use BeNil() instead.
|
||||
func Equal(expected interface{}) types.GomegaMatcher {
|
||||
return &matchers.EqualMatcher{
|
||||
Expected: expected,
|
||||
}
|
||||
}
|
||||
|
||||
//BeEquivalentTo is more lax than Equal, allowing equality between different types.
|
||||
//This is done by converting actual to have the type of expected before
|
||||
//attempting equality with reflect.DeepEqual.
|
||||
//It is an error for actual and expected to be nil. Use BeNil() instead.
|
||||
func BeEquivalentTo(expected interface{}) types.GomegaMatcher {
|
||||
return &matchers.BeEquivalentToMatcher{
|
||||
Expected: expected,
|
||||
}
|
||||
}
|
||||
|
||||
//BeNil succeeds if actual is nil
|
||||
func BeNil() types.GomegaMatcher {
|
||||
return &matchers.BeNilMatcher{}
|
||||
}
|
||||
|
||||
//BeTrue succeeds if actual is true
|
||||
func BeTrue() types.GomegaMatcher {
|
||||
return &matchers.BeTrueMatcher{}
|
||||
}
|
||||
|
||||
//BeFalse succeeds if actual is false
|
||||
func BeFalse() types.GomegaMatcher {
|
||||
return &matchers.BeFalseMatcher{}
|
||||
}
|
||||
|
||||
//HaveOccurred succeeds if actual is a non-nil error
|
||||
//The typical Go error checking pattern looks like:
|
||||
// err := SomethingThatMightFail()
|
||||
// Ω(err).ShouldNot(HaveOccurred())
|
||||
func HaveOccurred() types.GomegaMatcher {
|
||||
return &matchers.HaveOccurredMatcher{}
|
||||
}
|
||||
|
||||
//Succeed passes if actual is a nil error
|
||||
//Succeed is intended to be used with functions that return a single error value. Instead of
|
||||
// err := SomethingThatMightFail()
|
||||
// Ω(err).ShouldNot(HaveOccurred())
|
||||
//
|
||||
//You can write:
|
||||
// Ω(SomethingThatMightFail()).Should(Succeed())
|
||||
//
|
||||
//It is a mistake to use Succeed with a function that has multiple return values. Gomega's Ω and Expect
|
||||
//functions automatically trigger failure if any return values after the first return value are non-zero/non-nil.
|
||||
//This means that Ω(MultiReturnFunc()).ShouldNot(Succeed()) can never pass.
|
||||
func Succeed() types.GomegaMatcher {
|
||||
return &matchers.SucceedMatcher{}
|
||||
}
|
||||
|
||||
//MatchError succeeds if actual is a non-nil error that matches the passed in string/error.
|
||||
//
|
||||
//These are valid use-cases:
|
||||
// Ω(err).Should(MatchError("an error")) //asserts that err.Error() == "an error"
|
||||
// Ω(err).Should(MatchError(SomeError)) //asserts that err == SomeError (via reflect.DeepEqual)
|
||||
//
|
||||
//It is an error for err to be nil or an object that does not implement the Error interface
|
||||
func MatchError(expected interface{}) types.GomegaMatcher {
|
||||
return &matchers.MatchErrorMatcher{
|
||||
Expected: expected,
|
||||
}
|
||||
}
|
||||
|
||||
//BeClosed succeeds if actual is a closed channel.
|
||||
//It is an error to pass a non-channel to BeClosed, it is also an error to pass nil
|
||||
//
|
||||
//In order to check whether or not the channel is closed, Gomega must try to read from the channel
|
||||
//(even in the `ShouldNot(BeClosed())` case). You should keep this in mind if you wish to make subsequent assertions about
|
||||
//values coming down the channel.
|
||||
//
|
||||
//Also, if you are testing that a *buffered* channel is closed you must first read all values out of the channel before
|
||||
//asserting that it is closed (it is not possible to detect that a buffered-channel has been closed until all its buffered values are read).
|
||||
//
|
||||
//Finally, as a corollary: it is an error to check whether or not a send-only channel is closed.
|
||||
func BeClosed() types.GomegaMatcher {
|
||||
return &matchers.BeClosedMatcher{}
|
||||
}
|
||||
|
||||
//Receive succeeds if there is a value to be received on actual.
|
||||
//Actual must be a channel (and cannot be a send-only channel) -- anything else is an error.
|
||||
//
|
||||
//Receive returns immediately and never blocks:
|
||||
//
|
||||
//- If there is nothing on the channel `c` then Ω(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass.
|
||||
//
|
||||
//- If the channel `c` is closed then Ω(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass.
|
||||
//
|
||||
//- If there is something on the channel `c` ready to be read, then Ω(c).Should(Receive()) will pass and Ω(c).ShouldNot(Receive()) will fail.
|
||||
//
|
||||
//If you have a go-routine running in the background that will write to channel `c` you can:
|
||||
// Eventually(c).Should(Receive())
|
||||
//
|
||||
//This will timeout if nothing gets sent to `c` (you can modify the timeout interval as you normally do with `Eventually`)
|
||||
//
|
||||
//A similar use-case is to assert that no go-routine writes to a channel (for a period of time). You can do this with `Consistently`:
|
||||
// Consistently(c).ShouldNot(Receive())
|
||||
//
|
||||
//You can pass `Receive` a matcher. If you do so, it will match the received object against the matcher. For example:
|
||||
// Ω(c).Should(Receive(Equal("foo")))
|
||||
//
|
||||
//When given a matcher, `Receive` will always fail if there is nothing to be received on the channel.
|
||||
//
|
||||
//Passing Receive a matcher is especially useful when paired with Eventually:
|
||||
//
|
||||
// Eventually(c).Should(Receive(ContainSubstring("bar")))
|
||||
//
|
||||
//will repeatedly attempt to pull values out of `c` until a value matching "bar" is received.
|
||||
//
|
||||
//Finally, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type:
|
||||
// var myThing thing
|
||||
// Eventually(thingChan).Should(Receive(&myThing))
|
||||
// Ω(myThing.Sprocket).Should(Equal("foo"))
|
||||
// Ω(myThing.IsValid()).Should(BeTrue())
|
||||
func Receive(args ...interface{}) types.GomegaMatcher {
|
||||
var arg interface{}
|
||||
if len(args) > 0 {
|
||||
arg = args[0]
|
||||
}
|
||||
|
||||
return &matchers.ReceiveMatcher{
|
||||
Arg: arg,
|
||||
}
|
||||
}
|
||||
|
||||
//BeSent succeeds if a value can be sent to actual.
|
||||
//Actual must be a channel (and cannot be a receive-only channel) that can sent the type of the value passed into BeSent -- anything else is an error.
|
||||
//In addition, actual must not be closed.
|
||||
//
|
||||
//BeSent never blocks:
|
||||
//
|
||||
//- If the channel `c` is not ready to receive then Ω(c).Should(BeSent("foo")) will fail immediately
|
||||
//- If the channel `c` is eventually ready to receive then Eventually(c).Should(BeSent("foo")) will succeed.. presuming the channel becomes ready to receive before Eventually's timeout
|
||||
//- If the channel `c` is closed then Ω(c).Should(BeSent("foo")) and Ω(c).ShouldNot(BeSent("foo")) will both fail immediately
|
||||
//
|
||||
//Of course, the value is actually sent to the channel. The point of `BeSent` is less to make an assertion about the availability of the channel (which is typically an implementation detail that your test should not be concerned with).
|
||||
//Rather, the point of `BeSent` is to make it possible to easily and expressively write tests that can timeout on blocked channel sends.
|
||||
func BeSent(arg interface{}) types.GomegaMatcher {
|
||||
return &matchers.BeSentMatcher{
|
||||
Arg: arg,
|
||||
}
|
||||
}
|
||||
|
||||
//MatchRegexp succeeds if actual is a string or stringer that matches the
|
||||
//passed-in regexp. Optional arguments can be provided to construct a regexp
|
||||
//via fmt.Sprintf().
|
||||
func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher {
|
||||
return &matchers.MatchRegexpMatcher{
|
||||
Regexp: regexp,
|
||||
Args: args,
|
||||
}
|
||||
}
|
||||
|
||||
//ContainSubstring succeeds if actual is a string or stringer that contains the
|
||||
//passed-in regexp. Optional arguments can be provided to construct the substring
|
||||
//via fmt.Sprintf().
|
||||
func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher {
|
||||
return &matchers.ContainSubstringMatcher{
|
||||
Substr: substr,
|
||||
Args: args,
|
||||
}
|
||||
}
|
||||
|
||||
//HavePrefix succeeds if actual is a string or stringer that contains the
|
||||
//passed-in string as a prefix. Optional arguments can be provided to construct
|
||||
//via fmt.Sprintf().
|
||||
func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher {
|
||||
return &matchers.HavePrefixMatcher{
|
||||
Prefix: prefix,
|
||||
Args: args,
|
||||
}
|
||||
}
|
||||
|
||||
//HaveSuffix succeeds if actual is a string or stringer that contains the
|
||||
//passed-in string as a suffix. Optional arguments can be provided to construct
|
||||
//via fmt.Sprintf().
|
||||
func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher {
|
||||
return &matchers.HaveSuffixMatcher{
|
||||
Suffix: suffix,
|
||||
Args: args,
|
||||
}
|
||||
}
|
||||
|
||||
//MatchJSON succeeds if actual is a string or stringer of JSON that matches
|
||||
//the expected JSON. The JSONs are decoded and the resulting objects are compared via
|
||||
//reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter.
|
||||
func MatchJSON(json interface{}) types.GomegaMatcher {
|
||||
return &matchers.MatchJSONMatcher{
|
||||
JSONToMatch: json,
|
||||
}
|
||||
}
|
||||
|
||||
//BeEmpty succeeds if actual is empty. Actual must be of type string, array, map, chan, or slice.
|
||||
func BeEmpty() types.GomegaMatcher {
|
||||
return &matchers.BeEmptyMatcher{}
|
||||
}
|
||||
|
||||
//HaveLen succeeds if actual has the passed-in length. Actual must be of type string, array, map, chan, or slice.
|
||||
func HaveLen(count int) types.GomegaMatcher {
|
||||
return &matchers.HaveLenMatcher{
|
||||
Count: count,
|
||||
}
|
||||
}
|
||||
|
||||
//BeZero succeeds if actual is the zero value for its type or if actual is nil.
|
||||
func BeZero() types.GomegaMatcher {
|
||||
return &matchers.BeZeroMatcher{}
|
||||
}
|
||||
|
||||
//ContainElement succeeds if actual contains the passed in element.
|
||||
//By default ContainElement() uses Equal() to perform the match, however a
|
||||
//matcher can be passed in instead:
|
||||
// Ω([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubstring("Bar")))
|
||||
//
|
||||
//Actual must be an array, slice or map.
|
||||
//For maps, ContainElement searches through the map's values.
|
||||
func ContainElement(element interface{}) types.GomegaMatcher {
|
||||
return &matchers.ContainElementMatcher{
|
||||
Element: element,
|
||||
}
|
||||
}
|
||||
|
||||
//ConsistOf succeeds if actual contains preciely the elements passed into the matcher. The ordering of the elements does not matter.
|
||||
//By default ConsistOf() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples:
|
||||
//
|
||||
// Ω([]string{"Foo", "FooBar"}).Should(ConsistOf("FooBar", "Foo"))
|
||||
// Ω([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Bar"), "Foo"))
|
||||
// Ω([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Foo"), ContainSubstring("Foo")))
|
||||
//
|
||||
//Actual must be an array, slice or map. For maps, ConsistOf matches against the map's values.
|
||||
//
|
||||
//You typically pass variadic arguments to ConsistOf (as in the examples above). However, if you need to pass in a slice you can provided that it
|
||||
//is the only element passed in to ConsistOf:
|
||||
//
|
||||
// Ω([]string{"Foo", "FooBar"}).Should(ConsistOf([]string{"FooBar", "Foo"}))
|
||||
//
|
||||
//Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []interface{} are different types - hence the need for this special rule.
|
||||
func ConsistOf(elements ...interface{}) types.GomegaMatcher {
|
||||
return &matchers.ConsistOfMatcher{
|
||||
Elements: elements,
|
||||
}
|
||||
}
|
||||
|
||||
//HaveKey succeeds if actual is a map with the passed in key.
|
||||
//By default HaveKey uses Equal() to perform the match, however a
|
||||
//matcher can be passed in instead:
|
||||
// Ω(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKey(MatchRegexp(`.+Foo$`)))
|
||||
func HaveKey(key interface{}) types.GomegaMatcher {
|
||||
return &matchers.HaveKeyMatcher{
|
||||
Key: key,
|
||||
}
|
||||
}
|
||||
|
||||
//HaveKeyWithValue succeeds if actual is a map with the passed in key and value.
|
||||
//By default HaveKeyWithValue uses Equal() to perform the match, however a
|
||||
//matcher can be passed in instead:
|
||||
// Ω(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue("Foo", "Bar"))
|
||||
// Ω(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue(MatchRegexp(`.+Foo$`), "Bar"))
|
||||
func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher {
|
||||
return &matchers.HaveKeyWithValueMatcher{
|
||||
Key: key,
|
||||
Value: value,
|
||||
}
|
||||
}
|
||||
|
||||
//BeNumerically performs numerical assertions in a type-agnostic way.
|
||||
//Actual and expected should be numbers, though the specific type of
|
||||
//number is irrelevant (floa32, float64, uint8, etc...).
|
||||
//
|
||||
//There are six, self-explanatory, supported comparators:
|
||||
// Ω(1.0).Should(BeNumerically("==", 1))
|
||||
// Ω(1.0).Should(BeNumerically("~", 0.999, 0.01))
|
||||
// Ω(1.0).Should(BeNumerically(">", 0.9))
|
||||
// Ω(1.0).Should(BeNumerically(">=", 1.0))
|
||||
// Ω(1.0).Should(BeNumerically("<", 3))
|
||||
// Ω(1.0).Should(BeNumerically("<=", 1.0))
|
||||
func BeNumerically(comparator string, compareTo ...interface{}) types.GomegaMatcher {
|
||||
return &matchers.BeNumericallyMatcher{
|
||||
Comparator: comparator,
|
||||
CompareTo: compareTo,
|
||||
}
|
||||
}
|
||||
|
||||
//BeTemporally compares time.Time's like BeNumerically
|
||||
//Actual and expected must be time.Time. The comparators are the same as for BeNumerically
|
||||
// Ω(time.Now()).Should(BeTemporally(">", time.Time{}))
|
||||
// Ω(time.Now()).Should(BeTemporally("~", time.Now(), time.Second))
|
||||
func BeTemporally(comparator string, compareTo time.Time, threshold ...time.Duration) types.GomegaMatcher {
|
||||
return &matchers.BeTemporallyMatcher{
|
||||
Comparator: comparator,
|
||||
CompareTo: compareTo,
|
||||
Threshold: threshold,
|
||||
}
|
||||
}
|
||||
|
||||
//BeAssignableToTypeOf succeeds if actual is assignable to the type of expected.
|
||||
//It will return an error when one of the values is nil.
|
||||
// Ω(0).Should(BeAssignableToTypeOf(0)) // Same values
|
||||
// Ω(5).Should(BeAssignableToTypeOf(-1)) // different values same type
|
||||
// Ω("foo").Should(BeAssignableToTypeOf("bar")) // different values same type
|
||||
// Ω(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{}))
|
||||
func BeAssignableToTypeOf(expected interface{}) types.GomegaMatcher {
|
||||
return &matchers.AssignableToTypeOfMatcher{
|
||||
Expected: expected,
|
||||
}
|
||||
}
|
||||
|
||||
//Panic succeeds if actual is a function that, when invoked, panics.
|
||||
//Actual must be a function that takes no arguments and returns no results.
|
||||
func Panic() types.GomegaMatcher {
|
||||
return &matchers.PanicMatcher{}
|
||||
}
|
||||
|
||||
//BeAnExistingFile succeeds if a file exists.
|
||||
//Actual must be a string representing the abs path to the file being checked.
|
||||
func BeAnExistingFile() types.GomegaMatcher {
|
||||
return &matchers.BeAnExistingFileMatcher{}
|
||||
}
|
||||
|
||||
//BeARegularFile succeeds iff a file exists and is a regular file.
|
||||
//Actual must be a string representing the abs path to the file being checked.
|
||||
func BeARegularFile() types.GomegaMatcher {
|
||||
return &matchers.BeARegularFileMatcher{}
|
||||
}
|
||||
|
||||
//BeADirectory succeeds iff a file exists and is a directory.
|
||||
//Actual must be a string representing the abs path to the file being checked.
|
||||
func BeADirectory() types.GomegaMatcher {
|
||||
return &matchers.BeADirectoryMatcher{}
|
||||
}
|
||||
|
||||
//And succeeds only if all of the given matchers succeed.
|
||||
//The matchers are tried in order, and will fail-fast if one doesn't succeed.
|
||||
// Expect("hi").To(And(HaveLen(2), Equal("hi"))
|
||||
//
|
||||
//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
|
||||
func And(ms ...types.GomegaMatcher) types.GomegaMatcher {
|
||||
return &matchers.AndMatcher{Matchers: ms}
|
||||
}
|
||||
|
||||
//SatisfyAll is an alias for And().
|
||||
// Ω("hi").Should(SatisfyAll(HaveLen(2), Equal("hi")))
|
||||
func SatisfyAll(matchers ...types.GomegaMatcher) types.GomegaMatcher {
|
||||
return And(matchers...)
|
||||
}
|
||||
|
||||
//Or succeeds if any of the given matchers succeed.
|
||||
//The matchers are tried in order and will return immediately upon the first successful match.
|
||||
// Expect("hi").To(Or(HaveLen(3), HaveLen(2))
|
||||
//
|
||||
//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
|
||||
func Or(ms ...types.GomegaMatcher) types.GomegaMatcher {
|
||||
return &matchers.OrMatcher{Matchers: ms}
|
||||
}
|
||||
|
||||
//SatisfyAny is an alias for Or().
|
||||
// Expect("hi").SatisfyAny(Or(HaveLen(3), HaveLen(2))
|
||||
func SatisfyAny(matchers ...types.GomegaMatcher) types.GomegaMatcher {
|
||||
return Or(matchers...)
|
||||
}
|
||||
|
||||
//Not negates the given matcher; it succeeds if the given matcher fails.
|
||||
// Expect(1).To(Not(Equal(2))
|
||||
//
|
||||
//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
|
||||
func Not(matcher types.GomegaMatcher) types.GomegaMatcher {
|
||||
return &matchers.NotMatcher{Matcher: matcher}
|
||||
}
|
||||
|
||||
//WithTransform applies the `transform` to the actual value and matches it against `matcher`.
|
||||
//The given transform must be a function of one parameter that returns one value.
|
||||
// var plus1 = func(i int) int { return i + 1 }
|
||||
// Expect(1).To(WithTransform(plus1, Equal(2))
|
||||
//
|
||||
//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
|
||||
func WithTransform(transform interface{}, matcher types.GomegaMatcher) types.GomegaMatcher {
|
||||
return matchers.NewWithTransformMatcher(transform, matcher)
|
||||
}
|
64
vendor/github.com/onsi/gomega/matchers/and.go
generated
vendored
Normal file
64
vendor/github.com/onsi/gomega/matchers/and.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
"github.com/onsi/gomega/internal/oraclematcher"
|
||||
"github.com/onsi/gomega/types"
|
||||
)
|
||||
|
||||
type AndMatcher struct {
|
||||
Matchers []types.GomegaMatcher
|
||||
|
||||
// state
|
||||
firstFailedMatcher types.GomegaMatcher
|
||||
}
|
||||
|
||||
func (m *AndMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
m.firstFailedMatcher = nil
|
||||
for _, matcher := range m.Matchers {
|
||||
success, err := matcher.Match(actual)
|
||||
if !success || err != nil {
|
||||
m.firstFailedMatcher = matcher
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (m *AndMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return m.firstFailedMatcher.FailureMessage(actual)
|
||||
}
|
||||
|
||||
func (m *AndMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
// not the most beautiful list of matchers, but not bad either...
|
||||
return format.Message(actual, fmt.Sprintf("To not satisfy all of these matchers: %s", m.Matchers))
|
||||
}
|
||||
|
||||
func (m *AndMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
|
||||
/*
|
||||
Example with 3 matchers: A, B, C
|
||||
|
||||
Match evaluates them: T, F, <?> => F
|
||||
So match is currently F, what should MatchMayChangeInTheFuture() return?
|
||||
Seems like it only depends on B, since currently B MUST change to allow the result to become T
|
||||
|
||||
Match eval: T, T, T => T
|
||||
So match is currently T, what should MatchMayChangeInTheFuture() return?
|
||||
Seems to depend on ANY of them being able to change to F.
|
||||
*/
|
||||
|
||||
if m.firstFailedMatcher == nil {
|
||||
// so all matchers succeeded.. Any one of them changing would change the result.
|
||||
for _, matcher := range m.Matchers {
|
||||
if oraclematcher.MatchMayChangeInTheFuture(matcher, actual) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false // none of were going to change
|
||||
} else {
|
||||
// one of the matchers failed.. it must be able to change in order to affect the result
|
||||
return oraclematcher.MatchMayChangeInTheFuture(m.firstFailedMatcher, actual)
|
||||
}
|
||||
}
|
31
vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go
generated
vendored
Normal file
31
vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type AssignableToTypeOfMatcher struct {
|
||||
Expected interface{}
|
||||
}
|
||||
|
||||
func (matcher *AssignableToTypeOfMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
if actual == nil || matcher.Expected == nil {
|
||||
return false, fmt.Errorf("Refusing to compare <nil> to <nil>.\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
|
||||
}
|
||||
|
||||
actualType := reflect.TypeOf(actual)
|
||||
expectedType := reflect.TypeOf(matcher.Expected)
|
||||
|
||||
return actualType.AssignableTo(expectedType), nil
|
||||
}
|
||||
|
||||
func (matcher *AssignableToTypeOfMatcher) FailureMessage(actual interface{}) string {
|
||||
return format.Message(actual, fmt.Sprintf("to be assignable to the type: %T", matcher.Expected))
|
||||
}
|
||||
|
||||
func (matcher *AssignableToTypeOfMatcher) NegatedFailureMessage(actual interface{}) string {
|
||||
return format.Message(actual, fmt.Sprintf("not to be assignable to the type: %T", matcher.Expected))
|
||||
}
|
54
vendor/github.com/onsi/gomega/matchers/be_a_directory.go
generated
vendored
Normal file
54
vendor/github.com/onsi/gomega/matchers/be_a_directory.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type notADirectoryError struct {
|
||||
os.FileInfo
|
||||
}
|
||||
|
||||
func (t notADirectoryError) Error() string {
|
||||
fileInfo := os.FileInfo(t)
|
||||
switch {
|
||||
case fileInfo.Mode().IsRegular():
|
||||
return "file is a regular file"
|
||||
default:
|
||||
return fmt.Sprintf("file mode is: %s", fileInfo.Mode().String())
|
||||
}
|
||||
}
|
||||
|
||||
type BeADirectoryMatcher struct {
|
||||
expected interface{}
|
||||
err error
|
||||
}
|
||||
|
||||
func (matcher *BeADirectoryMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
actualFilename, ok := actual.(string)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("BeADirectoryMatcher matcher expects a file path")
|
||||
}
|
||||
|
||||
fileInfo, err := os.Stat(actualFilename)
|
||||
if err != nil {
|
||||
matcher.err = err
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if !fileInfo.Mode().IsDir() {
|
||||
matcher.err = notADirectoryError{fileInfo}
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (matcher *BeADirectoryMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, fmt.Sprintf("to be a directory: %s", matcher.err))
|
||||
}
|
||||
|
||||
func (matcher *BeADirectoryMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, fmt.Sprintf("not be a directory"))
|
||||
}
|
54
vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go
generated
vendored
Normal file
54
vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type notARegularFileError struct {
|
||||
os.FileInfo
|
||||
}
|
||||
|
||||
func (t notARegularFileError) Error() string {
|
||||
fileInfo := os.FileInfo(t)
|
||||
switch {
|
||||
case fileInfo.IsDir():
|
||||
return "file is a directory"
|
||||
default:
|
||||
return fmt.Sprintf("file mode is: %s", fileInfo.Mode().String())
|
||||
}
|
||||
}
|
||||
|
||||
type BeARegularFileMatcher struct {
|
||||
expected interface{}
|
||||
err error
|
||||
}
|
||||
|
||||
func (matcher *BeARegularFileMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
actualFilename, ok := actual.(string)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("BeARegularFileMatcher matcher expects a file path")
|
||||
}
|
||||
|
||||
fileInfo, err := os.Stat(actualFilename)
|
||||
if err != nil {
|
||||
matcher.err = err
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if !fileInfo.Mode().IsRegular() {
|
||||
matcher.err = notARegularFileError{fileInfo}
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (matcher *BeARegularFileMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, fmt.Sprintf("to be a regular file: %s", matcher.err))
|
||||
}
|
||||
|
||||
func (matcher *BeARegularFileMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, fmt.Sprintf("not be a regular file"))
|
||||
}
|
38
vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go
generated
vendored
Normal file
38
vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type BeAnExistingFileMatcher struct {
|
||||
expected interface{}
|
||||
}
|
||||
|
||||
func (matcher *BeAnExistingFileMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
actualFilename, ok := actual.(string)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("BeAnExistingFileMatcher matcher expects a file path")
|
||||
}
|
||||
|
||||
if _, err = os.Stat(actualFilename); err != nil {
|
||||
switch {
|
||||
case os.IsNotExist(err):
|
||||
return false, nil
|
||||
default:
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (matcher *BeAnExistingFileMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, fmt.Sprintf("to exist"))
|
||||
}
|
||||
|
||||
func (matcher *BeAnExistingFileMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, fmt.Sprintf("not to exist"))
|
||||
}
|
45
vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go
generated
vendored
Normal file
45
vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/onsi/gomega/format"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type BeClosedMatcher struct {
|
||||
}
|
||||
|
||||
func (matcher *BeClosedMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
if !isChan(actual) {
|
||||
return false, fmt.Errorf("BeClosed matcher expects a channel. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
channelType := reflect.TypeOf(actual)
|
||||
channelValue := reflect.ValueOf(actual)
|
||||
|
||||
if channelType.ChanDir() == reflect.SendDir {
|
||||
return false, fmt.Errorf("BeClosed matcher cannot determine if a send-only channel is closed or open. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
winnerIndex, _, open := reflect.Select([]reflect.SelectCase{
|
||||
reflect.SelectCase{Dir: reflect.SelectRecv, Chan: channelValue},
|
||||
reflect.SelectCase{Dir: reflect.SelectDefault},
|
||||
})
|
||||
|
||||
var closed bool
|
||||
if winnerIndex == 0 {
|
||||
closed = !open
|
||||
} else if winnerIndex == 1 {
|
||||
closed = false
|
||||
}
|
||||
|
||||
return closed, nil
|
||||
}
|
||||
|
||||
func (matcher *BeClosedMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "to be closed")
|
||||
}
|
||||
|
||||
func (matcher *BeClosedMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "to be open")
|
||||
}
|
26
vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go
generated
vendored
Normal file
26
vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type BeEmptyMatcher struct {
|
||||
}
|
||||
|
||||
func (matcher *BeEmptyMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
length, ok := lengthOf(actual)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("BeEmpty matcher expects a string/array/map/channel/slice. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
return length == 0, nil
|
||||
}
|
||||
|
||||
func (matcher *BeEmptyMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "to be empty")
|
||||
}
|
||||
|
||||
func (matcher *BeEmptyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "not to be empty")
|
||||
}
|
33
vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go
generated
vendored
Normal file
33
vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/onsi/gomega/format"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type BeEquivalentToMatcher struct {
|
||||
Expected interface{}
|
||||
}
|
||||
|
||||
func (matcher *BeEquivalentToMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
if actual == nil && matcher.Expected == nil {
|
||||
return false, fmt.Errorf("Both actual and expected must not be nil.")
|
||||
}
|
||||
|
||||
convertedActual := actual
|
||||
|
||||
if actual != nil && matcher.Expected != nil && reflect.TypeOf(actual).ConvertibleTo(reflect.TypeOf(matcher.Expected)) {
|
||||
convertedActual = reflect.ValueOf(actual).Convert(reflect.TypeOf(matcher.Expected)).Interface()
|
||||
}
|
||||
|
||||
return reflect.DeepEqual(convertedActual, matcher.Expected), nil
|
||||
}
|
||||
|
||||
func (matcher *BeEquivalentToMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "to be equivalent to", matcher.Expected)
|
||||
}
|
||||
|
||||
func (matcher *BeEquivalentToMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "not to be equivalent to", matcher.Expected)
|
||||
}
|
25
vendor/github.com/onsi/gomega/matchers/be_false_matcher.go
generated
vendored
Normal file
25
vendor/github.com/onsi/gomega/matchers/be_false_matcher.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type BeFalseMatcher struct {
|
||||
}
|
||||
|
||||
func (matcher *BeFalseMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
if !isBool(actual) {
|
||||
return false, fmt.Errorf("Expected a boolean. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
return actual == false, nil
|
||||
}
|
||||
|
||||
func (matcher *BeFalseMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "to be false")
|
||||
}
|
||||
|
||||
func (matcher *BeFalseMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "not to be false")
|
||||
}
|
18
vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go
generated
vendored
Normal file
18
vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
package matchers
|
||||
|
||||
import "github.com/onsi/gomega/format"
|
||||
|
||||
type BeNilMatcher struct {
|
||||
}
|
||||
|
||||
func (matcher *BeNilMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
return isNil(actual), nil
|
||||
}
|
||||
|
||||
func (matcher *BeNilMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "to be nil")
|
||||
}
|
||||
|
||||
func (matcher *BeNilMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "not to be nil")
|
||||
}
|
119
vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go
generated
vendored
Normal file
119
vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go
generated
vendored
Normal file
@ -0,0 +1,119 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/onsi/gomega/format"
|
||||
"math"
|
||||
)
|
||||
|
||||
type BeNumericallyMatcher struct {
|
||||
Comparator string
|
||||
CompareTo []interface{}
|
||||
}
|
||||
|
||||
func (matcher *BeNumericallyMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, fmt.Sprintf("to be %s", matcher.Comparator), matcher.CompareTo[0])
|
||||
}
|
||||
|
||||
func (matcher *BeNumericallyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, fmt.Sprintf("not to be %s", matcher.Comparator), matcher.CompareTo[0])
|
||||
}
|
||||
|
||||
func (matcher *BeNumericallyMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
if len(matcher.CompareTo) == 0 || len(matcher.CompareTo) > 2 {
|
||||
return false, fmt.Errorf("BeNumerically requires 1 or 2 CompareTo arguments. Got:\n%s", format.Object(matcher.CompareTo, 1))
|
||||
}
|
||||
if !isNumber(actual) {
|
||||
return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
if !isNumber(matcher.CompareTo[0]) {
|
||||
return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(matcher.CompareTo[0], 1))
|
||||
}
|
||||
if len(matcher.CompareTo) == 2 && !isNumber(matcher.CompareTo[1]) {
|
||||
return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(matcher.CompareTo[0], 1))
|
||||
}
|
||||
|
||||
switch matcher.Comparator {
|
||||
case "==", "~", ">", ">=", "<", "<=":
|
||||
default:
|
||||
return false, fmt.Errorf("Unknown comparator: %s", matcher.Comparator)
|
||||
}
|
||||
|
||||
if isFloat(actual) || isFloat(matcher.CompareTo[0]) {
|
||||
var secondOperand float64 = 1e-8
|
||||
if len(matcher.CompareTo) == 2 {
|
||||
secondOperand = toFloat(matcher.CompareTo[1])
|
||||
}
|
||||
success = matcher.matchFloats(toFloat(actual), toFloat(matcher.CompareTo[0]), secondOperand)
|
||||
} else if isInteger(actual) {
|
||||
var secondOperand int64 = 0
|
||||
if len(matcher.CompareTo) == 2 {
|
||||
secondOperand = toInteger(matcher.CompareTo[1])
|
||||
}
|
||||
success = matcher.matchIntegers(toInteger(actual), toInteger(matcher.CompareTo[0]), secondOperand)
|
||||
} else if isUnsignedInteger(actual) {
|
||||
var secondOperand uint64 = 0
|
||||
if len(matcher.CompareTo) == 2 {
|
||||
secondOperand = toUnsignedInteger(matcher.CompareTo[1])
|
||||
}
|
||||
success = matcher.matchUnsignedIntegers(toUnsignedInteger(actual), toUnsignedInteger(matcher.CompareTo[0]), secondOperand)
|
||||
} else {
|
||||
return false, fmt.Errorf("Failed to compare:\n%s\n%s:\n%s", format.Object(actual, 1), matcher.Comparator, format.Object(matcher.CompareTo[0], 1))
|
||||
}
|
||||
|
||||
return success, nil
|
||||
}
|
||||
|
||||
func (matcher *BeNumericallyMatcher) matchIntegers(actual, compareTo, threshold int64) (success bool) {
|
||||
switch matcher.Comparator {
|
||||
case "==", "~":
|
||||
diff := actual - compareTo
|
||||
return -threshold <= diff && diff <= threshold
|
||||
case ">":
|
||||
return (actual > compareTo)
|
||||
case ">=":
|
||||
return (actual >= compareTo)
|
||||
case "<":
|
||||
return (actual < compareTo)
|
||||
case "<=":
|
||||
return (actual <= compareTo)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (matcher *BeNumericallyMatcher) matchUnsignedIntegers(actual, compareTo, threshold uint64) (success bool) {
|
||||
switch matcher.Comparator {
|
||||
case "==", "~":
|
||||
if actual < compareTo {
|
||||
actual, compareTo = compareTo, actual
|
||||
}
|
||||
return actual-compareTo <= threshold
|
||||
case ">":
|
||||
return (actual > compareTo)
|
||||
case ">=":
|
||||
return (actual >= compareTo)
|
||||
case "<":
|
||||
return (actual < compareTo)
|
||||
case "<=":
|
||||
return (actual <= compareTo)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (matcher *BeNumericallyMatcher) matchFloats(actual, compareTo, threshold float64) (success bool) {
|
||||
switch matcher.Comparator {
|
||||
case "~":
|
||||
return math.Abs(actual-compareTo) <= threshold
|
||||
case "==":
|
||||
return (actual == compareTo)
|
||||
case ">":
|
||||
return (actual > compareTo)
|
||||
case ">=":
|
||||
return (actual >= compareTo)
|
||||
case "<":
|
||||
return (actual < compareTo)
|
||||
case "<=":
|
||||
return (actual <= compareTo)
|
||||
}
|
||||
return false
|
||||
}
|
71
vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go
generated
vendored
Normal file
71
vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go
generated
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type BeSentMatcher struct {
|
||||
Arg interface{}
|
||||
channelClosed bool
|
||||
}
|
||||
|
||||
func (matcher *BeSentMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
if !isChan(actual) {
|
||||
return false, fmt.Errorf("BeSent expects a channel. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
channelType := reflect.TypeOf(actual)
|
||||
channelValue := reflect.ValueOf(actual)
|
||||
|
||||
if channelType.ChanDir() == reflect.RecvDir {
|
||||
return false, fmt.Errorf("BeSent matcher cannot be passed a receive-only channel. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
argType := reflect.TypeOf(matcher.Arg)
|
||||
assignable := argType.AssignableTo(channelType.Elem())
|
||||
|
||||
if !assignable {
|
||||
return false, fmt.Errorf("Cannot pass:\n%s to the channel:\n%s\nThe types don't match.", format.Object(matcher.Arg, 1), format.Object(actual, 1))
|
||||
}
|
||||
|
||||
argValue := reflect.ValueOf(matcher.Arg)
|
||||
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
success = false
|
||||
err = fmt.Errorf("Cannot send to a closed channel")
|
||||
matcher.channelClosed = true
|
||||
}
|
||||
}()
|
||||
|
||||
winnerIndex, _, _ := reflect.Select([]reflect.SelectCase{
|
||||
reflect.SelectCase{Dir: reflect.SelectSend, Chan: channelValue, Send: argValue},
|
||||
reflect.SelectCase{Dir: reflect.SelectDefault},
|
||||
})
|
||||
|
||||
var didSend bool
|
||||
if winnerIndex == 0 {
|
||||
didSend = true
|
||||
}
|
||||
|
||||
return didSend, nil
|
||||
}
|
||||
|
||||
func (matcher *BeSentMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "to send:", matcher.Arg)
|
||||
}
|
||||
|
||||
func (matcher *BeSentMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "not to send:", matcher.Arg)
|
||||
}
|
||||
|
||||
func (matcher *BeSentMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
|
||||
if !isChan(actual) {
|
||||
return false
|
||||
}
|
||||
|
||||
return !matcher.channelClosed
|
||||
}
|
65
vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go
generated
vendored
Normal file
65
vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go
generated
vendored
Normal file
@ -0,0 +1,65 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/onsi/gomega/format"
|
||||
"time"
|
||||
)
|
||||
|
||||
type BeTemporallyMatcher struct {
|
||||
Comparator string
|
||||
CompareTo time.Time
|
||||
Threshold []time.Duration
|
||||
}
|
||||
|
||||
func (matcher *BeTemporallyMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, fmt.Sprintf("to be %s", matcher.Comparator), matcher.CompareTo)
|
||||
}
|
||||
|
||||
func (matcher *BeTemporallyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, fmt.Sprintf("not to be %s", matcher.Comparator), matcher.CompareTo)
|
||||
}
|
||||
|
||||
func (matcher *BeTemporallyMatcher) Match(actual interface{}) (bool, error) {
|
||||
// predicate to test for time.Time type
|
||||
isTime := func(t interface{}) bool {
|
||||
_, ok := t.(time.Time)
|
||||
return ok
|
||||
}
|
||||
|
||||
if !isTime(actual) {
|
||||
return false, fmt.Errorf("Expected a time.Time. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
switch matcher.Comparator {
|
||||
case "==", "~", ">", ">=", "<", "<=":
|
||||
default:
|
||||
return false, fmt.Errorf("Unknown comparator: %s", matcher.Comparator)
|
||||
}
|
||||
|
||||
var threshold = time.Millisecond
|
||||
if len(matcher.Threshold) == 1 {
|
||||
threshold = matcher.Threshold[0]
|
||||
}
|
||||
|
||||
return matcher.matchTimes(actual.(time.Time), matcher.CompareTo, threshold), nil
|
||||
}
|
||||
|
||||
func (matcher *BeTemporallyMatcher) matchTimes(actual, compareTo time.Time, threshold time.Duration) (success bool) {
|
||||
switch matcher.Comparator {
|
||||
case "==":
|
||||
return actual.Equal(compareTo)
|
||||
case "~":
|
||||
diff := actual.Sub(compareTo)
|
||||
return -threshold <= diff && diff <= threshold
|
||||
case ">":
|
||||
return actual.After(compareTo)
|
||||
case ">=":
|
||||
return !actual.Before(compareTo)
|
||||
case "<":
|
||||
return actual.Before(compareTo)
|
||||
case "<=":
|
||||
return !actual.After(compareTo)
|
||||
}
|
||||
return false
|
||||
}
|
25
vendor/github.com/onsi/gomega/matchers/be_true_matcher.go
generated
vendored
Normal file
25
vendor/github.com/onsi/gomega/matchers/be_true_matcher.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type BeTrueMatcher struct {
|
||||
}
|
||||
|
||||
func (matcher *BeTrueMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
if !isBool(actual) {
|
||||
return false, fmt.Errorf("Expected a boolean. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
return actual.(bool), nil
|
||||
}
|
||||
|
||||
func (matcher *BeTrueMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "to be true")
|
||||
}
|
||||
|
||||
func (matcher *BeTrueMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "not to be true")
|
||||
}
|
27
vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go
generated
vendored
Normal file
27
vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"github.com/onsi/gomega/format"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type BeZeroMatcher struct {
|
||||
}
|
||||
|
||||
func (matcher *BeZeroMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
if actual == nil {
|
||||
return true, nil
|
||||
}
|
||||
zeroValue := reflect.Zero(reflect.TypeOf(actual)).Interface()
|
||||
|
||||
return reflect.DeepEqual(zeroValue, actual), nil
|
||||
|
||||
}
|
||||
|
||||
func (matcher *BeZeroMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "to be zero-valued")
|
||||
}
|
||||
|
||||
func (matcher *BeZeroMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "not to be zero-valued")
|
||||
}
|
80
vendor/github.com/onsi/gomega/matchers/consist_of.go
generated
vendored
Normal file
80
vendor/github.com/onsi/gomega/matchers/consist_of.go
generated
vendored
Normal file
@ -0,0 +1,80 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
"github.com/onsi/gomega/matchers/support/goraph/bipartitegraph"
|
||||
)
|
||||
|
||||
type ConsistOfMatcher struct {
|
||||
Elements []interface{}
|
||||
}
|
||||
|
||||
func (matcher *ConsistOfMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
if !isArrayOrSlice(actual) && !isMap(actual) {
|
||||
return false, fmt.Errorf("ConsistOf matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
elements := matcher.Elements
|
||||
if len(matcher.Elements) == 1 && isArrayOrSlice(matcher.Elements[0]) {
|
||||
elements = []interface{}{}
|
||||
value := reflect.ValueOf(matcher.Elements[0])
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
elements = append(elements, value.Index(i).Interface())
|
||||
}
|
||||
}
|
||||
|
||||
matchers := []interface{}{}
|
||||
for _, element := range elements {
|
||||
matcher, isMatcher := element.(omegaMatcher)
|
||||
if !isMatcher {
|
||||
matcher = &EqualMatcher{Expected: element}
|
||||
}
|
||||
matchers = append(matchers, matcher)
|
||||
}
|
||||
|
||||
values := matcher.valuesOf(actual)
|
||||
|
||||
if len(values) != len(matchers) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
neighbours := func(v, m interface{}) (bool, error) {
|
||||
match, err := m.(omegaMatcher).Match(v)
|
||||
return match && err == nil, nil
|
||||
}
|
||||
|
||||
bipartiteGraph, err := bipartitegraph.NewBipartiteGraph(values, matchers, neighbours)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return len(bipartiteGraph.LargestMatching()) == len(values), nil
|
||||
}
|
||||
|
||||
func (matcher *ConsistOfMatcher) valuesOf(actual interface{}) []interface{} {
|
||||
value := reflect.ValueOf(actual)
|
||||
values := []interface{}{}
|
||||
if isMap(actual) {
|
||||
keys := value.MapKeys()
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
values = append(values, value.MapIndex(keys[i]).Interface())
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
values = append(values, value.Index(i).Interface())
|
||||
}
|
||||
}
|
||||
|
||||
return values
|
||||
}
|
||||
|
||||
func (matcher *ConsistOfMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "to consist of", matcher.Elements)
|
||||
}
|
||||
|
||||
func (matcher *ConsistOfMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "not to consist of", matcher.Elements)
|
||||
}
|
56
vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go
generated
vendored
Normal file
56
vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/onsi/gomega/format"
|
||||
)
|
||||
|
||||
type ContainElementMatcher struct {
|
||||
Element interface{}
|
||||
}
|
||||
|
||||
func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, err error) {
|
||||
if !isArrayOrSlice(actual) && !isMap(actual) {
|
||||
return false, fmt.Errorf("ContainElement matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1))
|
||||
}
|
||||
|
||||
elemMatcher, elementIsMatcher := matcher.Element.(omegaMatcher)
|
||||
if !elementIsMatcher {
|
||||
elemMatcher = &EqualMatcher{Expected: matcher.Element}
|
||||
}
|
||||
|
||||
value := reflect.ValueOf(actual)
|
||||
var keys []reflect.Value
|
||||
if isMap(actual) {
|
||||
keys = value.MapKeys()
|
||||
}
|
||||
var lastError error
|
||||
for i := 0; i < value.Len(); i++ {
|
||||
var success bool
|
||||
var err error
|
||||
if isMap(actual) {
|
||||
success, err = elemMatcher.Match(value.MapIndex(keys[i]).Interface())
|
||||
} else {
|
||||
success, err = elemMatcher.Match(value.Index(i).Interface())
|
||||
}
|
||||
if err != nil {
|
||||
lastError = err
|
||||
continue
|
||||
}
|
||||
if success {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, lastError
|
||||
}
|
||||
|
||||
func (matcher *ContainElementMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "to contain element matching", matcher.Element)
|
||||
}
|
||||
|
||||
func (matcher *ContainElementMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
return format.Message(actual, "not to contain element matching", matcher.Element)
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user