Add vendor dependencies as part git repo
This commit is contained in:
203
vendor/github.com/google/gnostic/LICENSE
generated
vendored
Normal file
203
vendor/github.com/google/gnostic/LICENSE
generated
vendored
Normal file
@ -0,0 +1,203 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
4
vendor/github.com/google/gnostic/compiler/README.md
generated
vendored
Normal file
4
vendor/github.com/google/gnostic/compiler/README.md
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
# Compiler support code
|
||||
|
||||
This directory contains compiler support code used by Gnostic and Gnostic
|
||||
extensions.
|
49
vendor/github.com/google/gnostic/compiler/context.go
generated
vendored
Normal file
49
vendor/github.com/google/gnostic/compiler/context.go
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
// Copyright 2017 Google LLC. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package compiler
|
||||
|
||||
import (
|
||||
yaml "gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// Context contains state of the compiler as it traverses a document.
|
||||
type Context struct {
|
||||
Parent *Context
|
||||
Name string
|
||||
Node *yaml.Node
|
||||
ExtensionHandlers *[]ExtensionHandler
|
||||
}
|
||||
|
||||
// NewContextWithExtensions returns a new object representing the compiler state
|
||||
func NewContextWithExtensions(name string, node *yaml.Node, parent *Context, extensionHandlers *[]ExtensionHandler) *Context {
|
||||
return &Context{Name: name, Node: node, Parent: parent, ExtensionHandlers: extensionHandlers}
|
||||
}
|
||||
|
||||
// NewContext returns a new object representing the compiler state
|
||||
func NewContext(name string, node *yaml.Node, parent *Context) *Context {
|
||||
if parent != nil {
|
||||
return &Context{Name: name, Node: node, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers}
|
||||
}
|
||||
return &Context{Name: name, Parent: parent, ExtensionHandlers: nil}
|
||||
}
|
||||
|
||||
// Description returns a text description of the compiler state
|
||||
func (context *Context) Description() string {
|
||||
name := context.Name
|
||||
if context.Parent != nil {
|
||||
name = context.Parent.Description() + "." + name
|
||||
}
|
||||
return name
|
||||
}
|
70
vendor/github.com/google/gnostic/compiler/error.go
generated
vendored
Normal file
70
vendor/github.com/google/gnostic/compiler/error.go
generated
vendored
Normal file
@ -0,0 +1,70 @@
|
||||
// Copyright 2017 Google LLC. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package compiler
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Error represents compiler errors and their location in the document.
|
||||
type Error struct {
|
||||
Context *Context
|
||||
Message string
|
||||
}
|
||||
|
||||
// NewError creates an Error.
|
||||
func NewError(context *Context, message string) *Error {
|
||||
return &Error{Context: context, Message: message}
|
||||
}
|
||||
|
||||
func (err *Error) locationDescription() string {
|
||||
if err.Context.Node != nil {
|
||||
return fmt.Sprintf("[%d,%d] %s", err.Context.Node.Line, err.Context.Node.Column, err.Context.Description())
|
||||
}
|
||||
return err.Context.Description()
|
||||
}
|
||||
|
||||
// Error returns the string value of an Error.
|
||||
func (err *Error) Error() string {
|
||||
if err.Context == nil {
|
||||
return err.Message
|
||||
}
|
||||
return err.locationDescription() + " " + err.Message
|
||||
}
|
||||
|
||||
// ErrorGroup is a container for groups of Error values.
|
||||
type ErrorGroup struct {
|
||||
Errors []error
|
||||
}
|
||||
|
||||
// NewErrorGroupOrNil returns a new ErrorGroup for a slice of errors or nil if the slice is empty.
|
||||
func NewErrorGroupOrNil(errors []error) error {
|
||||
if len(errors) == 0 {
|
||||
return nil
|
||||
} else if len(errors) == 1 {
|
||||
return errors[0]
|
||||
} else {
|
||||
return &ErrorGroup{Errors: errors}
|
||||
}
|
||||
}
|
||||
|
||||
func (group *ErrorGroup) Error() string {
|
||||
result := ""
|
||||
for i, err := range group.Errors {
|
||||
if i > 0 {
|
||||
result += "\n"
|
||||
}
|
||||
result += err.Error()
|
||||
}
|
||||
return result
|
||||
}
|
86
vendor/github.com/google/gnostic/compiler/extensions.go
generated
vendored
Normal file
86
vendor/github.com/google/gnostic/compiler/extensions.go
generated
vendored
Normal file
@ -0,0 +1,86 @@
|
||||
// Copyright 2017 Google LLC. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package compiler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes/any"
|
||||
yaml "gopkg.in/yaml.v3"
|
||||
|
||||
extensions "github.com/google/gnostic/extensions"
|
||||
)
|
||||
|
||||
// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions.
|
||||
type ExtensionHandler struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
// CallExtension calls a binary extension handler.
|
||||
func CallExtension(context *Context, in *yaml.Node, extensionName string) (handled bool, response *any.Any, err error) {
|
||||
if context == nil || context.ExtensionHandlers == nil {
|
||||
return false, nil, nil
|
||||
}
|
||||
handled = false
|
||||
for _, handler := range *(context.ExtensionHandlers) {
|
||||
response, err = handler.handle(in, extensionName)
|
||||
if response == nil {
|
||||
continue
|
||||
} else {
|
||||
handled = true
|
||||
break
|
||||
}
|
||||
}
|
||||
return handled, response, err
|
||||
}
|
||||
|
||||
func (extensionHandlers *ExtensionHandler) handle(in *yaml.Node, extensionName string) (*any.Any, error) {
|
||||
if extensionHandlers.Name != "" {
|
||||
yamlData, _ := yaml.Marshal(in)
|
||||
request := &extensions.ExtensionHandlerRequest{
|
||||
CompilerVersion: &extensions.Version{
|
||||
Major: 0,
|
||||
Minor: 1,
|
||||
Patch: 0,
|
||||
},
|
||||
Wrapper: &extensions.Wrapper{
|
||||
Version: "unknown", // TODO: set this to the type/version of spec being parsed.
|
||||
Yaml: string(yamlData),
|
||||
ExtensionName: extensionName,
|
||||
},
|
||||
}
|
||||
requestBytes, _ := proto.Marshal(request)
|
||||
cmd := exec.Command(extensionHandlers.Name)
|
||||
cmd.Stdin = bytes.NewReader(requestBytes)
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response := &extensions.ExtensionHandlerResponse{}
|
||||
err = proto.Unmarshal(output, response)
|
||||
if err != nil || !response.Handled {
|
||||
return nil, err
|
||||
}
|
||||
if len(response.Errors) != 0 {
|
||||
return nil, fmt.Errorf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Errors, ","))
|
||||
}
|
||||
return response.Value, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
397
vendor/github.com/google/gnostic/compiler/helpers.go
generated
vendored
Normal file
397
vendor/github.com/google/gnostic/compiler/helpers.go
generated
vendored
Normal file
@ -0,0 +1,397 @@
|
||||
// Copyright 2017 Google LLC. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package compiler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"github.com/google/gnostic/jsonschema"
|
||||
)
|
||||
|
||||
// compiler helper functions, usually called from generated code
|
||||
|
||||
// UnpackMap gets a *yaml.Node if possible.
|
||||
func UnpackMap(in *yaml.Node) (*yaml.Node, bool) {
|
||||
if in == nil {
|
||||
return nil, false
|
||||
}
|
||||
return in, true
|
||||
}
|
||||
|
||||
// SortedKeysForMap returns the sorted keys of a yamlv2.MapSlice.
|
||||
func SortedKeysForMap(m *yaml.Node) []string {
|
||||
keys := make([]string, 0)
|
||||
if m.Kind == yaml.MappingNode {
|
||||
for i := 0; i < len(m.Content); i += 2 {
|
||||
keys = append(keys, m.Content[i].Value)
|
||||
}
|
||||
}
|
||||
sort.Strings(keys)
|
||||
return keys
|
||||
}
|
||||
|
||||
// MapHasKey returns true if a yamlv2.MapSlice contains a specified key.
|
||||
func MapHasKey(m *yaml.Node, key string) bool {
|
||||
if m == nil {
|
||||
return false
|
||||
}
|
||||
if m.Kind == yaml.MappingNode {
|
||||
for i := 0; i < len(m.Content); i += 2 {
|
||||
itemKey := m.Content[i].Value
|
||||
if key == itemKey {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// MapValueForKey gets the value of a map value for a specified key.
|
||||
func MapValueForKey(m *yaml.Node, key string) *yaml.Node {
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
if m.Kind == yaml.MappingNode {
|
||||
for i := 0; i < len(m.Content); i += 2 {
|
||||
itemKey := m.Content[i].Value
|
||||
if key == itemKey {
|
||||
return m.Content[i+1]
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConvertInterfaceArrayToStringArray converts an array of interfaces to an array of strings, if possible.
|
||||
func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string {
|
||||
stringArray := make([]string, 0)
|
||||
for _, item := range interfaceArray {
|
||||
v, ok := item.(string)
|
||||
if ok {
|
||||
stringArray = append(stringArray, v)
|
||||
}
|
||||
}
|
||||
return stringArray
|
||||
}
|
||||
|
||||
// SequenceNodeForNode returns a node if it is a SequenceNode.
|
||||
func SequenceNodeForNode(node *yaml.Node) (*yaml.Node, bool) {
|
||||
if node.Kind != yaml.SequenceNode {
|
||||
return nil, false
|
||||
}
|
||||
return node, true
|
||||
}
|
||||
|
||||
// BoolForScalarNode returns the bool value of a node.
|
||||
func BoolForScalarNode(node *yaml.Node) (bool, bool) {
|
||||
if node == nil {
|
||||
return false, false
|
||||
}
|
||||
if node.Kind == yaml.DocumentNode {
|
||||
return BoolForScalarNode(node.Content[0])
|
||||
}
|
||||
if node.Kind != yaml.ScalarNode {
|
||||
return false, false
|
||||
}
|
||||
if node.Tag != "!!bool" {
|
||||
return false, false
|
||||
}
|
||||
v, err := strconv.ParseBool(node.Value)
|
||||
if err != nil {
|
||||
return false, false
|
||||
}
|
||||
return v, true
|
||||
}
|
||||
|
||||
// IntForScalarNode returns the integer value of a node.
|
||||
func IntForScalarNode(node *yaml.Node) (int64, bool) {
|
||||
if node == nil {
|
||||
return 0, false
|
||||
}
|
||||
if node.Kind == yaml.DocumentNode {
|
||||
return IntForScalarNode(node.Content[0])
|
||||
}
|
||||
if node.Kind != yaml.ScalarNode {
|
||||
return 0, false
|
||||
}
|
||||
if node.Tag != "!!int" {
|
||||
return 0, false
|
||||
}
|
||||
v, err := strconv.ParseInt(node.Value, 10, 64)
|
||||
if err != nil {
|
||||
return 0, false
|
||||
}
|
||||
return v, true
|
||||
}
|
||||
|
||||
// FloatForScalarNode returns the float value of a node.
|
||||
func FloatForScalarNode(node *yaml.Node) (float64, bool) {
|
||||
if node == nil {
|
||||
return 0.0, false
|
||||
}
|
||||
if node.Kind == yaml.DocumentNode {
|
||||
return FloatForScalarNode(node.Content[0])
|
||||
}
|
||||
if node.Kind != yaml.ScalarNode {
|
||||
return 0.0, false
|
||||
}
|
||||
if (node.Tag != "!!int") && (node.Tag != "!!float") {
|
||||
return 0.0, false
|
||||
}
|
||||
v, err := strconv.ParseFloat(node.Value, 64)
|
||||
if err != nil {
|
||||
return 0.0, false
|
||||
}
|
||||
return v, true
|
||||
}
|
||||
|
||||
// StringForScalarNode returns the string value of a node.
|
||||
func StringForScalarNode(node *yaml.Node) (string, bool) {
|
||||
if node == nil {
|
||||
return "", false
|
||||
}
|
||||
if node.Kind == yaml.DocumentNode {
|
||||
return StringForScalarNode(node.Content[0])
|
||||
}
|
||||
switch node.Kind {
|
||||
case yaml.ScalarNode:
|
||||
switch node.Tag {
|
||||
case "!!int":
|
||||
return node.Value, true
|
||||
case "!!str":
|
||||
return node.Value, true
|
||||
case "!!timestamp":
|
||||
return node.Value, true
|
||||
case "!!null":
|
||||
return "", true
|
||||
default:
|
||||
return "", false
|
||||
}
|
||||
default:
|
||||
return "", false
|
||||
}
|
||||
}
|
||||
|
||||
// StringArrayForSequenceNode converts a sequence node to an array of strings, if possible.
|
||||
func StringArrayForSequenceNode(node *yaml.Node) []string {
|
||||
stringArray := make([]string, 0)
|
||||
for _, item := range node.Content {
|
||||
v, ok := StringForScalarNode(item)
|
||||
if ok {
|
||||
stringArray = append(stringArray, v)
|
||||
}
|
||||
}
|
||||
return stringArray
|
||||
}
|
||||
|
||||
// MissingKeysInMap identifies which keys from a list of required keys are not in a map.
|
||||
func MissingKeysInMap(m *yaml.Node, requiredKeys []string) []string {
|
||||
missingKeys := make([]string, 0)
|
||||
for _, k := range requiredKeys {
|
||||
if !MapHasKey(m, k) {
|
||||
missingKeys = append(missingKeys, k)
|
||||
}
|
||||
}
|
||||
return missingKeys
|
||||
}
|
||||
|
||||
// InvalidKeysInMap returns keys in a map that don't match a list of allowed keys and patterns.
|
||||
func InvalidKeysInMap(m *yaml.Node, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string {
|
||||
invalidKeys := make([]string, 0)
|
||||
if m == nil || m.Kind != yaml.MappingNode {
|
||||
return invalidKeys
|
||||
}
|
||||
for i := 0; i < len(m.Content); i += 2 {
|
||||
key := m.Content[i].Value
|
||||
found := false
|
||||
// does the key match an allowed key?
|
||||
for _, allowedKey := range allowedKeys {
|
||||
if key == allowedKey {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
// does the key match an allowed pattern?
|
||||
for _, allowedPattern := range allowedPatterns {
|
||||
if allowedPattern.MatchString(key) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
invalidKeys = append(invalidKeys, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
return invalidKeys
|
||||
}
|
||||
|
||||
// NewNullNode creates a new Null node.
|
||||
func NewNullNode() *yaml.Node {
|
||||
node := &yaml.Node{
|
||||
Kind: yaml.ScalarNode,
|
||||
Tag: "!!null",
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// NewMappingNode creates a new Mapping node.
|
||||
func NewMappingNode() *yaml.Node {
|
||||
return &yaml.Node{
|
||||
Kind: yaml.MappingNode,
|
||||
Content: make([]*yaml.Node, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// NewSequenceNode creates a new Sequence node.
|
||||
func NewSequenceNode() *yaml.Node {
|
||||
node := &yaml.Node{
|
||||
Kind: yaml.SequenceNode,
|
||||
Content: make([]*yaml.Node, 0),
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// NewScalarNodeForString creates a new node to hold a string.
|
||||
func NewScalarNodeForString(s string) *yaml.Node {
|
||||
return &yaml.Node{
|
||||
Kind: yaml.ScalarNode,
|
||||
Tag: "!!str",
|
||||
Value: s,
|
||||
}
|
||||
}
|
||||
|
||||
// NewSequenceNodeForStringArray creates a new node to hold an array of strings.
|
||||
func NewSequenceNodeForStringArray(strings []string) *yaml.Node {
|
||||
node := &yaml.Node{
|
||||
Kind: yaml.SequenceNode,
|
||||
Content: make([]*yaml.Node, 0),
|
||||
}
|
||||
for _, s := range strings {
|
||||
node.Content = append(node.Content, NewScalarNodeForString(s))
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// NewScalarNodeForBool creates a new node to hold a bool.
|
||||
func NewScalarNodeForBool(b bool) *yaml.Node {
|
||||
return &yaml.Node{
|
||||
Kind: yaml.ScalarNode,
|
||||
Tag: "!!bool",
|
||||
Value: fmt.Sprintf("%t", b),
|
||||
}
|
||||
}
|
||||
|
||||
// NewScalarNodeForFloat creates a new node to hold a float.
|
||||
func NewScalarNodeForFloat(f float64) *yaml.Node {
|
||||
return &yaml.Node{
|
||||
Kind: yaml.ScalarNode,
|
||||
Tag: "!!float",
|
||||
Value: fmt.Sprintf("%g", f),
|
||||
}
|
||||
}
|
||||
|
||||
// NewScalarNodeForInt creates a new node to hold an integer.
|
||||
func NewScalarNodeForInt(i int64) *yaml.Node {
|
||||
return &yaml.Node{
|
||||
Kind: yaml.ScalarNode,
|
||||
Tag: "!!int",
|
||||
Value: fmt.Sprintf("%d", i),
|
||||
}
|
||||
}
|
||||
|
||||
// PluralProperties returns the string "properties" pluralized.
|
||||
func PluralProperties(count int) string {
|
||||
if count == 1 {
|
||||
return "property"
|
||||
}
|
||||
return "properties"
|
||||
}
|
||||
|
||||
// StringArrayContainsValue returns true if a string array contains a specified value.
|
||||
func StringArrayContainsValue(array []string, value string) bool {
|
||||
for _, item := range array {
|
||||
if item == value {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// StringArrayContainsValues returns true if a string array contains all of a list of specified values.
|
||||
func StringArrayContainsValues(array []string, values []string) bool {
|
||||
for _, value := range values {
|
||||
if !StringArrayContainsValue(array, value) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// StringValue returns the string value of an item.
|
||||
func StringValue(item interface{}) (value string, ok bool) {
|
||||
value, ok = item.(string)
|
||||
if ok {
|
||||
return value, ok
|
||||
}
|
||||
intValue, ok := item.(int)
|
||||
if ok {
|
||||
return strconv.Itoa(intValue), true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// Description returns a human-readable represention of an item.
|
||||
func Description(item interface{}) string {
|
||||
value, ok := item.(*yaml.Node)
|
||||
if ok {
|
||||
return jsonschema.Render(value)
|
||||
}
|
||||
return fmt.Sprintf("%+v", item)
|
||||
}
|
||||
|
||||
// Display returns a description of a node for use in error messages.
|
||||
func Display(node *yaml.Node) string {
|
||||
switch node.Kind {
|
||||
case yaml.ScalarNode:
|
||||
switch node.Tag {
|
||||
case "!!str":
|
||||
return fmt.Sprintf("%s (string)", node.Value)
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("%+v (%T)", node, node)
|
||||
}
|
||||
|
||||
// Marshal creates a yaml version of a structure in our preferred style
|
||||
func Marshal(in *yaml.Node) []byte {
|
||||
clearStyle(in)
|
||||
//bytes, _ := yaml.Marshal(&yaml.Node{Kind: yaml.DocumentNode, Content: []*yaml.Node{in}})
|
||||
bytes, _ := yaml.Marshal(in)
|
||||
|
||||
return bytes
|
||||
}
|
||||
|
||||
func clearStyle(node *yaml.Node) {
|
||||
node.Style = 0
|
||||
for _, c := range node.Content {
|
||||
clearStyle(c)
|
||||
}
|
||||
}
|
16
vendor/github.com/google/gnostic/compiler/main.go
generated
vendored
Normal file
16
vendor/github.com/google/gnostic/compiler/main.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
// Copyright 2017 Google LLC. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package compiler provides support functions to generated compiler code.
|
||||
package compiler
|
307
vendor/github.com/google/gnostic/compiler/reader.go
generated
vendored
Normal file
307
vendor/github.com/google/gnostic/compiler/reader.go
generated
vendored
Normal file
@ -0,0 +1,307 @@
|
||||
// Copyright 2017 Google LLC. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package compiler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
yaml "gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
var verboseReader = false
|
||||
|
||||
var fileCache map[string][]byte
|
||||
var infoCache map[string]*yaml.Node
|
||||
|
||||
var fileCacheEnable = true
|
||||
var infoCacheEnable = true
|
||||
|
||||
// These locks are used to synchronize accesses to the fileCache and infoCache
|
||||
// maps (above). They are global state and can throw thread-related errors
|
||||
// when modified from separate goroutines. The general strategy is to protect
|
||||
// all public functions in this file with mutex Lock() calls. As a result, to
|
||||
// avoid deadlock, these public functions should not call other public
|
||||
// functions, so some public functions have private equivalents.
|
||||
// In the future, we might consider replacing the maps with sync.Map and
|
||||
// eliminating these mutexes.
|
||||
var fileCacheMutex sync.Mutex
|
||||
var infoCacheMutex sync.Mutex
|
||||
|
||||
func initializeFileCache() {
|
||||
if fileCache == nil {
|
||||
fileCache = make(map[string][]byte, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func initializeInfoCache() {
|
||||
if infoCache == nil {
|
||||
infoCache = make(map[string]*yaml.Node, 0)
|
||||
}
|
||||
}
|
||||
|
||||
// EnableFileCache turns on file caching.
|
||||
func EnableFileCache() {
|
||||
fileCacheMutex.Lock()
|
||||
defer fileCacheMutex.Unlock()
|
||||
fileCacheEnable = true
|
||||
}
|
||||
|
||||
// EnableInfoCache turns on parsed info caching.
|
||||
func EnableInfoCache() {
|
||||
infoCacheMutex.Lock()
|
||||
defer infoCacheMutex.Unlock()
|
||||
infoCacheEnable = true
|
||||
}
|
||||
|
||||
// DisableFileCache turns off file caching.
|
||||
func DisableFileCache() {
|
||||
fileCacheMutex.Lock()
|
||||
defer fileCacheMutex.Unlock()
|
||||
fileCacheEnable = false
|
||||
}
|
||||
|
||||
// DisableInfoCache turns off parsed info caching.
|
||||
func DisableInfoCache() {
|
||||
infoCacheMutex.Lock()
|
||||
defer infoCacheMutex.Unlock()
|
||||
infoCacheEnable = false
|
||||
}
|
||||
|
||||
// RemoveFromFileCache removes an entry from the file cache.
|
||||
func RemoveFromFileCache(fileurl string) {
|
||||
fileCacheMutex.Lock()
|
||||
defer fileCacheMutex.Unlock()
|
||||
if !fileCacheEnable {
|
||||
return
|
||||
}
|
||||
initializeFileCache()
|
||||
delete(fileCache, fileurl)
|
||||
}
|
||||
|
||||
// RemoveFromInfoCache removes an entry from the info cache.
|
||||
func RemoveFromInfoCache(filename string) {
|
||||
infoCacheMutex.Lock()
|
||||
defer infoCacheMutex.Unlock()
|
||||
if !infoCacheEnable {
|
||||
return
|
||||
}
|
||||
initializeInfoCache()
|
||||
delete(infoCache, filename)
|
||||
}
|
||||
|
||||
// GetInfoCache returns the info cache map.
|
||||
func GetInfoCache() map[string]*yaml.Node {
|
||||
infoCacheMutex.Lock()
|
||||
defer infoCacheMutex.Unlock()
|
||||
if infoCache == nil {
|
||||
initializeInfoCache()
|
||||
}
|
||||
return infoCache
|
||||
}
|
||||
|
||||
// ClearFileCache clears the file cache.
|
||||
func ClearFileCache() {
|
||||
fileCacheMutex.Lock()
|
||||
defer fileCacheMutex.Unlock()
|
||||
fileCache = make(map[string][]byte, 0)
|
||||
}
|
||||
|
||||
// ClearInfoCache clears the info cache.
|
||||
func ClearInfoCache() {
|
||||
infoCacheMutex.Lock()
|
||||
defer infoCacheMutex.Unlock()
|
||||
infoCache = make(map[string]*yaml.Node)
|
||||
}
|
||||
|
||||
// ClearCaches clears all caches.
|
||||
func ClearCaches() {
|
||||
ClearFileCache()
|
||||
ClearInfoCache()
|
||||
}
|
||||
|
||||
// FetchFile gets a specified file from the local filesystem or a remote location.
|
||||
func FetchFile(fileurl string) ([]byte, error) {
|
||||
fileCacheMutex.Lock()
|
||||
defer fileCacheMutex.Unlock()
|
||||
return fetchFile(fileurl)
|
||||
}
|
||||
|
||||
func fetchFile(fileurl string) ([]byte, error) {
|
||||
var bytes []byte
|
||||
initializeFileCache()
|
||||
if fileCacheEnable {
|
||||
bytes, ok := fileCache[fileurl]
|
||||
if ok {
|
||||
if verboseReader {
|
||||
log.Printf("Cache hit %s", fileurl)
|
||||
}
|
||||
return bytes, nil
|
||||
}
|
||||
if verboseReader {
|
||||
log.Printf("Fetching %s", fileurl)
|
||||
}
|
||||
}
|
||||
response, err := http.Get(fileurl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
if response.StatusCode != 200 {
|
||||
return nil, fmt.Errorf("Error downloading %s: %s", fileurl, response.Status)
|
||||
}
|
||||
bytes, err = ioutil.ReadAll(response.Body)
|
||||
if fileCacheEnable && err == nil {
|
||||
fileCache[fileurl] = bytes
|
||||
}
|
||||
return bytes, err
|
||||
}
|
||||
|
||||
// ReadBytesForFile reads the bytes of a file.
|
||||
func ReadBytesForFile(filename string) ([]byte, error) {
|
||||
fileCacheMutex.Lock()
|
||||
defer fileCacheMutex.Unlock()
|
||||
return readBytesForFile(filename)
|
||||
}
|
||||
|
||||
func readBytesForFile(filename string) ([]byte, error) {
|
||||
// is the filename a url?
|
||||
fileurl, _ := url.Parse(filename)
|
||||
if fileurl.Scheme != "" {
|
||||
// yes, fetch it
|
||||
bytes, err := fetchFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bytes, nil
|
||||
}
|
||||
// no, it's a local filename
|
||||
bytes, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bytes, nil
|
||||
}
|
||||
|
||||
// ReadInfoFromBytes unmarshals a file as a *yaml.Node.
|
||||
func ReadInfoFromBytes(filename string, bytes []byte) (*yaml.Node, error) {
|
||||
infoCacheMutex.Lock()
|
||||
defer infoCacheMutex.Unlock()
|
||||
return readInfoFromBytes(filename, bytes)
|
||||
}
|
||||
|
||||
func readInfoFromBytes(filename string, bytes []byte) (*yaml.Node, error) {
|
||||
initializeInfoCache()
|
||||
if infoCacheEnable {
|
||||
cachedInfo, ok := infoCache[filename]
|
||||
if ok {
|
||||
if verboseReader {
|
||||
log.Printf("Cache hit info for file %s", filename)
|
||||
}
|
||||
return cachedInfo, nil
|
||||
}
|
||||
if verboseReader {
|
||||
log.Printf("Reading info for file %s", filename)
|
||||
}
|
||||
}
|
||||
var info yaml.Node
|
||||
err := yaml.Unmarshal(bytes, &info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if infoCacheEnable && len(filename) > 0 {
|
||||
infoCache[filename] = &info
|
||||
}
|
||||
return &info, nil
|
||||
}
|
||||
|
||||
// ReadInfoForRef reads a file and return the fragment needed to resolve a $ref.
|
||||
func ReadInfoForRef(basefile string, ref string) (*yaml.Node, error) {
|
||||
fileCacheMutex.Lock()
|
||||
defer fileCacheMutex.Unlock()
|
||||
infoCacheMutex.Lock()
|
||||
defer infoCacheMutex.Unlock()
|
||||
initializeInfoCache()
|
||||
if infoCacheEnable {
|
||||
info, ok := infoCache[ref]
|
||||
if ok {
|
||||
if verboseReader {
|
||||
log.Printf("Cache hit for ref %s#%s", basefile, ref)
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
if verboseReader {
|
||||
log.Printf("Reading info for ref %s#%s", basefile, ref)
|
||||
}
|
||||
}
|
||||
basedir, _ := filepath.Split(basefile)
|
||||
parts := strings.Split(ref, "#")
|
||||
var filename string
|
||||
if parts[0] != "" {
|
||||
filename = parts[0]
|
||||
if _, err := url.ParseRequestURI(parts[0]); err != nil {
|
||||
// It is not an URL, so the file is local
|
||||
filename = basedir + parts[0]
|
||||
}
|
||||
} else {
|
||||
filename = basefile
|
||||
}
|
||||
bytes, err := readBytesForFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info, err := readInfoFromBytes(filename, bytes)
|
||||
if info != nil && info.Kind == yaml.DocumentNode {
|
||||
info = info.Content[0]
|
||||
}
|
||||
if err != nil {
|
||||
log.Printf("File error: %v\n", err)
|
||||
} else {
|
||||
if info == nil {
|
||||
return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref))
|
||||
}
|
||||
if len(parts) > 1 {
|
||||
path := strings.Split(parts[1], "/")
|
||||
for i, key := range path {
|
||||
if i > 0 {
|
||||
m := info
|
||||
if true {
|
||||
found := false
|
||||
for i := 0; i < len(m.Content); i += 2 {
|
||||
if m.Content[i].Value == key {
|
||||
info = m.Content[i+1]
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
infoCache[ref] = nil
|
||||
return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if infoCacheEnable {
|
||||
infoCache[ref] = info
|
||||
}
|
||||
return info, nil
|
||||
}
|
13
vendor/github.com/google/gnostic/extensions/README.md
generated
vendored
Normal file
13
vendor/github.com/google/gnostic/extensions/README.md
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
# Extensions
|
||||
|
||||
**Extension Support is experimental.**
|
||||
|
||||
This directory contains support code for building Gnostic extensio handlers and
|
||||
associated examples.
|
||||
|
||||
Extension handlers can be used to compile vendor or specification extensions
|
||||
into protocol buffer structures.
|
||||
|
||||
Like plugins, extension handlers are built as separate executables. Extension
|
||||
bodies are written to extension handlers as serialized
|
||||
ExtensionHandlerRequests.
|
461
vendor/github.com/google/gnostic/extensions/extension.pb.go
generated
vendored
Normal file
461
vendor/github.com/google/gnostic/extensions/extension.pb.go
generated
vendored
Normal file
@ -0,0 +1,461 @@
|
||||
// Copyright 2017 Google LLC. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.26.0
|
||||
// protoc v3.18.1
|
||||
// source: extensions/extension.proto
|
||||
|
||||
package gnostic_extension_v1
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
anypb "google.golang.org/protobuf/types/known/anypb"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// The version number of Gnostic.
|
||||
type Version struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Major int32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"`
|
||||
Minor int32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"`
|
||||
Patch int32 `protobuf:"varint,3,opt,name=patch,proto3" json:"patch,omitempty"`
|
||||
// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
|
||||
// be empty for mainline stable releases.
|
||||
Suffix string `protobuf:"bytes,4,opt,name=suffix,proto3" json:"suffix,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Version) Reset() {
|
||||
*x = Version{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_extensions_extension_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Version) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Version) ProtoMessage() {}
|
||||
|
||||
func (x *Version) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_extensions_extension_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Version.ProtoReflect.Descriptor instead.
|
||||
func (*Version) Descriptor() ([]byte, []int) {
|
||||
return file_extensions_extension_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *Version) GetMajor() int32 {
|
||||
if x != nil {
|
||||
return x.Major
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Version) GetMinor() int32 {
|
||||
if x != nil {
|
||||
return x.Minor
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Version) GetPatch() int32 {
|
||||
if x != nil {
|
||||
return x.Patch
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Version) GetSuffix() string {
|
||||
if x != nil {
|
||||
return x.Suffix
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// An encoded Request is written to the ExtensionHandler's stdin.
|
||||
type ExtensionHandlerRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The extension to process.
|
||||
Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper,proto3" json:"wrapper,omitempty"`
|
||||
// The version number of Gnostic.
|
||||
CompilerVersion *Version `protobuf:"bytes,2,opt,name=compiler_version,json=compilerVersion,proto3" json:"compiler_version,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ExtensionHandlerRequest) Reset() {
|
||||
*x = ExtensionHandlerRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_extensions_extension_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ExtensionHandlerRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ExtensionHandlerRequest) ProtoMessage() {}
|
||||
|
||||
func (x *ExtensionHandlerRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_extensions_extension_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ExtensionHandlerRequest.ProtoReflect.Descriptor instead.
|
||||
func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) {
|
||||
return file_extensions_extension_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *ExtensionHandlerRequest) GetWrapper() *Wrapper {
|
||||
if x != nil {
|
||||
return x.Wrapper
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ExtensionHandlerRequest) GetCompilerVersion() *Version {
|
||||
if x != nil {
|
||||
return x.CompilerVersion
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The extensions writes an encoded ExtensionHandlerResponse to stdout.
|
||||
type ExtensionHandlerResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// true if the extension is handled by the extension handler; false otherwise
|
||||
Handled bool `protobuf:"varint,1,opt,name=handled,proto3" json:"handled,omitempty"`
|
||||
// Error message(s). If non-empty, the extension handling failed.
|
||||
// The extension handler process should exit with status code zero
|
||||
// even if it reports an error in this way.
|
||||
//
|
||||
// This should be used to indicate errors which prevent the extension from
|
||||
// operating as intended. Errors which indicate a problem in gnostic
|
||||
// itself -- such as the input Document being unparseable -- should be
|
||||
// reported by writing a message to stderr and exiting with a non-zero
|
||||
// status code.
|
||||
Errors []string `protobuf:"bytes,2,rep,name=errors,proto3" json:"errors,omitempty"`
|
||||
// text output
|
||||
Value *anypb.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ExtensionHandlerResponse) Reset() {
|
||||
*x = ExtensionHandlerResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_extensions_extension_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ExtensionHandlerResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ExtensionHandlerResponse) ProtoMessage() {}
|
||||
|
||||
func (x *ExtensionHandlerResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_extensions_extension_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ExtensionHandlerResponse.ProtoReflect.Descriptor instead.
|
||||
func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) {
|
||||
return file_extensions_extension_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *ExtensionHandlerResponse) GetHandled() bool {
|
||||
if x != nil {
|
||||
return x.Handled
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *ExtensionHandlerResponse) GetErrors() []string {
|
||||
if x != nil {
|
||||
return x.Errors
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ExtensionHandlerResponse) GetValue() *anypb.Any {
|
||||
if x != nil {
|
||||
return x.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Wrapper struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// version of the OpenAPI specification in which this extension was written.
|
||||
Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
|
||||
// Name of the extension.
|
||||
ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName,proto3" json:"extension_name,omitempty"`
|
||||
// YAML-formatted extension value.
|
||||
Yaml string `protobuf:"bytes,3,opt,name=yaml,proto3" json:"yaml,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Wrapper) Reset() {
|
||||
*x = Wrapper{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_extensions_extension_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Wrapper) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Wrapper) ProtoMessage() {}
|
||||
|
||||
func (x *Wrapper) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_extensions_extension_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Wrapper.ProtoReflect.Descriptor instead.
|
||||
func (*Wrapper) Descriptor() ([]byte, []int) {
|
||||
return file_extensions_extension_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *Wrapper) GetVersion() string {
|
||||
if x != nil {
|
||||
return x.Version
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Wrapper) GetExtensionName() string {
|
||||
if x != nil {
|
||||
return x.ExtensionName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Wrapper) GetYaml() string {
|
||||
if x != nil {
|
||||
return x.Yaml
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var File_extensions_extension_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_extensions_extension_proto_rawDesc = []byte{
|
||||
0x0a, 0x1a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x65, 0x78, 0x74,
|
||||
0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6e,
|
||||
0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e,
|
||||
0x76, 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a,
|
||||
0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f,
|
||||
0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14,
|
||||
0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d,
|
||||
0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20,
|
||||
0x01, 0x28, 0x05, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x75,
|
||||
0x66, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x75, 0x66, 0x66,
|
||||
0x69, 0x78, 0x22, 0x9c, 0x01, 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
|
||||
0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x37,
|
||||
0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
|
||||
0x1d, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73,
|
||||
0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x52, 0x07,
|
||||
0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x69,
|
||||
0x6c, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28,
|
||||
0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65,
|
||||
0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
|
||||
0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
|
||||
0x6e, 0x22, 0x78, 0x0a, 0x18, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x61,
|
||||
0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a,
|
||||
0x07, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07,
|
||||
0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72,
|
||||
0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12,
|
||||
0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14,
|
||||
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
|
||||
0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x5e, 0x0a, 0x07, 0x57,
|
||||
0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
|
||||
0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
|
||||
0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61,
|
||||
0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73,
|
||||
0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x18,
|
||||
0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x42, 0x4d, 0x0a, 0x0e, 0x6f,
|
||||
0x72, 0x67, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x76, 0x31, 0x42, 0x10, 0x47,
|
||||
0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x50,
|
||||
0x01, 0x5a, 0x21, 0x2e, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3b,
|
||||
0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
|
||||
0x6e, 0x5f, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x47, 0x4e, 0x58, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_extensions_extension_proto_rawDescOnce sync.Once
|
||||
file_extensions_extension_proto_rawDescData = file_extensions_extension_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_extensions_extension_proto_rawDescGZIP() []byte {
|
||||
file_extensions_extension_proto_rawDescOnce.Do(func() {
|
||||
file_extensions_extension_proto_rawDescData = protoimpl.X.CompressGZIP(file_extensions_extension_proto_rawDescData)
|
||||
})
|
||||
return file_extensions_extension_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_extensions_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
|
||||
var file_extensions_extension_proto_goTypes = []interface{}{
|
||||
(*Version)(nil), // 0: gnostic.extension.v1.Version
|
||||
(*ExtensionHandlerRequest)(nil), // 1: gnostic.extension.v1.ExtensionHandlerRequest
|
||||
(*ExtensionHandlerResponse)(nil), // 2: gnostic.extension.v1.ExtensionHandlerResponse
|
||||
(*Wrapper)(nil), // 3: gnostic.extension.v1.Wrapper
|
||||
(*anypb.Any)(nil), // 4: google.protobuf.Any
|
||||
}
|
||||
var file_extensions_extension_proto_depIdxs = []int32{
|
||||
3, // 0: gnostic.extension.v1.ExtensionHandlerRequest.wrapper:type_name -> gnostic.extension.v1.Wrapper
|
||||
0, // 1: gnostic.extension.v1.ExtensionHandlerRequest.compiler_version:type_name -> gnostic.extension.v1.Version
|
||||
4, // 2: gnostic.extension.v1.ExtensionHandlerResponse.value:type_name -> google.protobuf.Any
|
||||
3, // [3:3] is the sub-list for method output_type
|
||||
3, // [3:3] is the sub-list for method input_type
|
||||
3, // [3:3] is the sub-list for extension type_name
|
||||
3, // [3:3] is the sub-list for extension extendee
|
||||
0, // [0:3] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_extensions_extension_proto_init() }
|
||||
func file_extensions_extension_proto_init() {
|
||||
if File_extensions_extension_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_extensions_extension_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Version); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_extensions_extension_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ExtensionHandlerRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_extensions_extension_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ExtensionHandlerResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_extensions_extension_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Wrapper); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_extensions_extension_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 4,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_extensions_extension_proto_goTypes,
|
||||
DependencyIndexes: file_extensions_extension_proto_depIdxs,
|
||||
MessageInfos: file_extensions_extension_proto_msgTypes,
|
||||
}.Build()
|
||||
File_extensions_extension_proto = out.File
|
||||
file_extensions_extension_proto_rawDesc = nil
|
||||
file_extensions_extension_proto_goTypes = nil
|
||||
file_extensions_extension_proto_depIdxs = nil
|
||||
}
|
97
vendor/github.com/google/gnostic/extensions/extension.proto
generated
vendored
Normal file
97
vendor/github.com/google/gnostic/extensions/extension.proto
generated
vendored
Normal file
@ -0,0 +1,97 @@
|
||||
// Copyright 2017 Google LLC. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package gnostic.extension.v1;
|
||||
|
||||
import "google/protobuf/any.proto";
|
||||
|
||||
// This option lets the proto compiler generate Java code inside the package
|
||||
// name (see below) instead of inside an outer class. It creates a simpler
|
||||
// developer experience by reducing one-level of name nesting and be
|
||||
// consistent with most programming languages that don't support outer classes.
|
||||
option java_multiple_files = true;
|
||||
|
||||
// The Java outer classname should be the filename in UpperCamelCase. This
|
||||
// class is only used to hold proto descriptor, so developers don't need to
|
||||
// work with it directly.
|
||||
option java_outer_classname = "GnosticExtension";
|
||||
|
||||
// The Java package name must be proto package name with proper prefix.
|
||||
option java_package = "org.gnostic.v1";
|
||||
|
||||
// A reasonable prefix for the Objective-C symbols generated from the package.
|
||||
// It should at a minimum be 3 characters long, all uppercase, and convention
|
||||
// is to use an abbreviation of the package name. Something short, but
|
||||
// hopefully unique enough to not conflict with things that may come along in
|
||||
// the future. 'GPB' is reserved for the protocol buffer implementation itself.
|
||||
//
|
||||
// "Gnostic Extension"
|
||||
option objc_class_prefix = "GNX";
|
||||
|
||||
// The Go package name.
|
||||
option go_package = "./extensions;gnostic_extension_v1";
|
||||
|
||||
// The version number of Gnostic.
|
||||
message Version {
|
||||
int32 major = 1;
|
||||
int32 minor = 2;
|
||||
int32 patch = 3;
|
||||
// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
|
||||
// be empty for mainline stable releases.
|
||||
string suffix = 4;
|
||||
}
|
||||
|
||||
// An encoded Request is written to the ExtensionHandler's stdin.
|
||||
message ExtensionHandlerRequest {
|
||||
|
||||
// The extension to process.
|
||||
Wrapper wrapper = 1;
|
||||
|
||||
// The version number of Gnostic.
|
||||
Version compiler_version = 2;
|
||||
}
|
||||
|
||||
// The extensions writes an encoded ExtensionHandlerResponse to stdout.
|
||||
message ExtensionHandlerResponse {
|
||||
|
||||
// true if the extension is handled by the extension handler; false otherwise
|
||||
bool handled = 1;
|
||||
|
||||
// Error message(s). If non-empty, the extension handling failed.
|
||||
// The extension handler process should exit with status code zero
|
||||
// even if it reports an error in this way.
|
||||
//
|
||||
// This should be used to indicate errors which prevent the extension from
|
||||
// operating as intended. Errors which indicate a problem in gnostic
|
||||
// itself -- such as the input Document being unparseable -- should be
|
||||
// reported by writing a message to stderr and exiting with a non-zero
|
||||
// status code.
|
||||
repeated string errors = 2;
|
||||
|
||||
// text output
|
||||
google.protobuf.Any value = 3;
|
||||
}
|
||||
|
||||
message Wrapper {
|
||||
// version of the OpenAPI specification in which this extension was written.
|
||||
string version = 1;
|
||||
|
||||
// Name of the extension.
|
||||
string extension_name = 2;
|
||||
|
||||
// YAML-formatted extension value.
|
||||
string yaml = 3;
|
||||
}
|
64
vendor/github.com/google/gnostic/extensions/extensions.go
generated
vendored
Normal file
64
vendor/github.com/google/gnostic/extensions/extensions.go
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
// Copyright 2017 Google LLC. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package gnostic_extension_v1
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
)
|
||||
|
||||
type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error)
|
||||
|
||||
// Main implements the main program of an extension handler.
|
||||
func Main(handler extensionHandler) {
|
||||
// unpack the request
|
||||
data, err := ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
log.Println("File error:", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
if len(data) == 0 {
|
||||
log.Println("No input data.")
|
||||
os.Exit(1)
|
||||
}
|
||||
request := &ExtensionHandlerRequest{}
|
||||
err = proto.Unmarshal(data, request)
|
||||
if err != nil {
|
||||
log.Println("Input error:", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
// call the handler
|
||||
handled, output, err := handler(request.Wrapper.ExtensionName, request.Wrapper.Yaml)
|
||||
// respond with the output of the handler
|
||||
response := &ExtensionHandlerResponse{
|
||||
Handled: false, // default assumption
|
||||
Errors: make([]string, 0),
|
||||
}
|
||||
if err != nil {
|
||||
response.Errors = append(response.Errors, err.Error())
|
||||
} else if handled {
|
||||
response.Handled = true
|
||||
response.Value, err = ptypes.MarshalAny(output)
|
||||
if err != nil {
|
||||
response.Errors = append(response.Errors, err.Error())
|
||||
}
|
||||
}
|
||||
responseBytes, _ := proto.Marshal(response)
|
||||
os.Stdout.Write(responseBytes)
|
||||
}
|
4
vendor/github.com/google/gnostic/jsonschema/README.md
generated
vendored
Normal file
4
vendor/github.com/google/gnostic/jsonschema/README.md
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
# jsonschema
|
||||
|
||||
This directory contains code for reading, writing, and manipulating JSON
|
||||
schemas.
|
84
vendor/github.com/google/gnostic/jsonschema/base.go
generated
vendored
Normal file
84
vendor/github.com/google/gnostic/jsonschema/base.go
generated
vendored
Normal file
@ -0,0 +1,84 @@
|
||||
|
||||
// THIS FILE IS AUTOMATICALLY GENERATED.
|
||||
|
||||
package jsonschema
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
)
|
||||
|
||||
func baseSchemaBytes() ([]byte, error){
|
||||
return base64.StdEncoding.DecodeString(
|
||||
`ewogICAgImlkIjogImh0dHA6Ly9qc29uLXNjaGVtYS5vcmcvZHJhZnQtMDQvc2NoZW1hIyIsCiAgICAi
|
||||
JHNjaGVtYSI6ICJodHRwOi8vanNvbi1zY2hlbWEub3JnL2RyYWZ0LTA0L3NjaGVtYSMiLAogICAgImRl
|
||||
c2NyaXB0aW9uIjogIkNvcmUgc2NoZW1hIG1ldGEtc2NoZW1hIiwKICAgICJkZWZpbml0aW9ucyI6IHsK
|
||||
ICAgICAgICAic2NoZW1hQXJyYXkiOiB7CiAgICAgICAgICAgICJ0eXBlIjogImFycmF5IiwKICAgICAg
|
||||
ICAgICAgIm1pbkl0ZW1zIjogMSwKICAgICAgICAgICAgIml0ZW1zIjogeyAiJHJlZiI6ICIjIiB9CiAg
|
||||
ICAgICAgfSwKICAgICAgICAicG9zaXRpdmVJbnRlZ2VyIjogewogICAgICAgICAgICAidHlwZSI6ICJp
|
||||
bnRlZ2VyIiwKICAgICAgICAgICAgIm1pbmltdW0iOiAwCiAgICAgICAgfSwKICAgICAgICAicG9zaXRp
|
||||
dmVJbnRlZ2VyRGVmYXVsdDAiOiB7CiAgICAgICAgICAgICJhbGxPZiI6IFsgeyAiJHJlZiI6ICIjL2Rl
|
||||
ZmluaXRpb25zL3Bvc2l0aXZlSW50ZWdlciIgfSwgeyAiZGVmYXVsdCI6IDAgfSBdCiAgICAgICAgfSwK
|
||||
ICAgICAgICAic2ltcGxlVHlwZXMiOiB7CiAgICAgICAgICAgICJlbnVtIjogWyAiYXJyYXkiLCAiYm9v
|
||||
bGVhbiIsICJpbnRlZ2VyIiwgIm51bGwiLCAibnVtYmVyIiwgIm9iamVjdCIsICJzdHJpbmciIF0KICAg
|
||||
ICAgICB9LAogICAgICAgICJzdHJpbmdBcnJheSI6IHsKICAgICAgICAgICAgInR5cGUiOiAiYXJyYXki
|
||||
LAogICAgICAgICAgICAiaXRlbXMiOiB7ICJ0eXBlIjogInN0cmluZyIgfSwKICAgICAgICAgICAgIm1p
|
||||
bkl0ZW1zIjogMSwKICAgICAgICAgICAgInVuaXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgIH0KICAgIH0s
|
||||
CiAgICAidHlwZSI6ICJvYmplY3QiLAogICAgInByb3BlcnRpZXMiOiB7CiAgICAgICAgImlkIjogewog
|
||||
ICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciLAogICAgICAgICAgICAiZm9ybWF0IjogInVyaSIKICAg
|
||||
ICAgICB9LAogICAgICAgICIkc2NoZW1hIjogewogICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciLAog
|
||||
ICAgICAgICAgICAiZm9ybWF0IjogInVyaSIKICAgICAgICB9LAogICAgICAgICJ0aXRsZSI6IHsKICAg
|
||||
ICAgICAgICAgInR5cGUiOiAic3RyaW5nIgogICAgICAgIH0sCiAgICAgICAgImRlc2NyaXB0aW9uIjog
|
||||
ewogICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciCiAgICAgICAgfSwKICAgICAgICAiZGVmYXVsdCI6
|
||||
IHt9LAogICAgICAgICJtdWx0aXBsZU9mIjogewogICAgICAgICAgICAidHlwZSI6ICJudW1iZXIiLAog
|
||||
ICAgICAgICAgICAibWluaW11bSI6IDAsCiAgICAgICAgICAgICJleGNsdXNpdmVNaW5pbXVtIjogdHJ1
|
||||
ZQogICAgICAgIH0sCiAgICAgICAgIm1heGltdW0iOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm51bWJl
|
||||
ciIKICAgICAgICB9LAogICAgICAgICJleGNsdXNpdmVNYXhpbXVtIjogewogICAgICAgICAgICAidHlw
|
||||
ZSI6ICJib29sZWFuIiwKICAgICAgICAgICAgImRlZmF1bHQiOiBmYWxzZQogICAgICAgIH0sCiAgICAg
|
||||
ICAgIm1pbmltdW0iOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm51bWJlciIKICAgICAgICB9LAogICAg
|
||||
ICAgICJleGNsdXNpdmVNaW5pbXVtIjogewogICAgICAgICAgICAidHlwZSI6ICJib29sZWFuIiwKICAg
|
||||
ICAgICAgICAgImRlZmF1bHQiOiBmYWxzZQogICAgICAgIH0sCiAgICAgICAgIm1heExlbmd0aCI6IHsg
|
||||
IiRyZWYiOiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXIiIH0sCiAgICAgICAgIm1pbkxlbmd0
|
||||
aCI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXJEZWZhdWx0MCIgfSwKICAg
|
||||
ICAgICAicGF0dGVybiI6IHsKICAgICAgICAgICAgInR5cGUiOiAic3RyaW5nIiwKICAgICAgICAgICAg
|
||||
ImZvcm1hdCI6ICJyZWdleCIKICAgICAgICB9LAogICAgICAgICJhZGRpdGlvbmFsSXRlbXMiOiB7CiAg
|
||||
ICAgICAgICAgICJhbnlPZiI6IFsKICAgICAgICAgICAgICAgIHsgInR5cGUiOiAiYm9vbGVhbiIgfSwK
|
||||
ICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfQogICAgICAgICAgICBdLAogICAgICAgICAgICAi
|
||||
ZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAiaXRlbXMiOiB7CiAgICAgICAgICAgICJhbnlP
|
||||
ZiI6IFsKICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAgICAgIHsgIiRy
|
||||
ZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIgfQogICAgICAgICAgICBdLAogICAgICAgICAg
|
||||
ICAiZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAibWF4SXRlbXMiOiB7ICIkcmVmIjogIiMv
|
||||
ZGVmaW5pdGlvbnMvcG9zaXRpdmVJbnRlZ2VyIiB9LAogICAgICAgICJtaW5JdGVtcyI6IHsgIiRyZWYi
|
||||
OiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXJEZWZhdWx0MCIgfSwKICAgICAgICAidW5pcXVl
|
||||
SXRlbXMiOiB7CiAgICAgICAgICAgICJ0eXBlIjogImJvb2xlYW4iLAogICAgICAgICAgICAiZGVmYXVs
|
||||
dCI6IGZhbHNlCiAgICAgICAgfSwKICAgICAgICAibWF4UHJvcGVydGllcyI6IHsgIiRyZWYiOiAiIy9k
|
||||
ZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXIiIH0sCiAgICAgICAgIm1pblByb3BlcnRpZXMiOiB7ICIk
|
||||
cmVmIjogIiMvZGVmaW5pdGlvbnMvcG9zaXRpdmVJbnRlZ2VyRGVmYXVsdDAiIH0sCiAgICAgICAgInJl
|
||||
cXVpcmVkIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3N0cmluZ0FycmF5IiB9LAogICAgICAgICJh
|
||||
ZGRpdGlvbmFsUHJvcGVydGllcyI6IHsKICAgICAgICAgICAgImFueU9mIjogWwogICAgICAgICAgICAg
|
||||
ICAgeyAidHlwZSI6ICJib29sZWFuIiB9LAogICAgICAgICAgICAgICAgeyAiJHJlZiI6ICIjIiB9CiAg
|
||||
ICAgICAgICAgIF0sCiAgICAgICAgICAgICJkZWZhdWx0Ijoge30KICAgICAgICB9LAogICAgICAgICJk
|
||||
ZWZpbml0aW9ucyI6IHsKICAgICAgICAgICAgInR5cGUiOiAib2JqZWN0IiwKICAgICAgICAgICAgImFk
|
||||
ZGl0aW9uYWxQcm9wZXJ0aWVzIjogeyAiJHJlZiI6ICIjIiB9LAogICAgICAgICAgICAiZGVmYXVsdCI6
|
||||
IHt9CiAgICAgICAgfSwKICAgICAgICAicHJvcGVydGllcyI6IHsKICAgICAgICAgICAgInR5cGUiOiAi
|
||||
b2JqZWN0IiwKICAgICAgICAgICAgImFkZGl0aW9uYWxQcm9wZXJ0aWVzIjogeyAiJHJlZiI6ICIjIiB9
|
||||
LAogICAgICAgICAgICAiZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAicGF0dGVyblByb3Bl
|
||||
cnRpZXMiOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm9iamVjdCIsCiAgICAgICAgICAgICJhZGRpdGlv
|
||||
bmFsUHJvcGVydGllcyI6IHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAgImRlZmF1bHQiOiB7fQog
|
||||
ICAgICAgIH0sCiAgICAgICAgImRlcGVuZGVuY2llcyI6IHsKICAgICAgICAgICAgInR5cGUiOiAib2Jq
|
||||
ZWN0IiwKICAgICAgICAgICAgImFkZGl0aW9uYWxQcm9wZXJ0aWVzIjogewogICAgICAgICAgICAgICAg
|
||||
ImFueU9mIjogWwogICAgICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAg
|
||||
ICAgICAgICB7ICIkcmVmIjogIiMvZGVmaW5pdGlvbnMvc3RyaW5nQXJyYXkiIH0KICAgICAgICAgICAg
|
||||
ICAgIF0KICAgICAgICAgICAgfQogICAgICAgIH0sCiAgICAgICAgImVudW0iOiB7CiAgICAgICAgICAg
|
||||
ICJ0eXBlIjogImFycmF5IiwKICAgICAgICAgICAgIm1pbkl0ZW1zIjogMSwKICAgICAgICAgICAgInVu
|
||||
aXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgIH0sCiAgICAgICAgInR5cGUiOiB7CiAgICAgICAgICAgICJh
|
||||
bnlPZiI6IFsKICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zaW1wbGVUeXBl
|
||||
cyIgfSwKICAgICAgICAgICAgICAgIHsKICAgICAgICAgICAgICAgICAgICAidHlwZSI6ICJhcnJheSIs
|
||||
CiAgICAgICAgICAgICAgICAgICAgIml0ZW1zIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NpbXBs
|
||||
ZVR5cGVzIiB9LAogICAgICAgICAgICAgICAgICAgICJtaW5JdGVtcyI6IDEsCiAgICAgICAgICAgICAg
|
||||
ICAgICAgInVuaXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgICAgICAgICAgfQogICAgICAgICAgICBdCiAg
|
||||
ICAgICAgfSwKICAgICAgICAiYWxsT2YiOiB7ICIkcmVmIjogIiMvZGVmaW5pdGlvbnMvc2NoZW1hQXJy
|
||||
YXkiIH0sCiAgICAgICAgImFueU9mIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NjaGVtYUFycmF5
|
||||
IiB9LAogICAgICAgICJvbmVPZiI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIg
|
||||
fSwKICAgICAgICAibm90IjogeyAiJHJlZiI6ICIjIiB9CiAgICB9LAogICAgImRlcGVuZGVuY2llcyI6
|
||||
IHsKICAgICAgICAiZXhjbHVzaXZlTWF4aW11bSI6IFsgIm1heGltdW0iIF0sCiAgICAgICAgImV4Y2x1
|
||||
c2l2ZU1pbmltdW0iOiBbICJtaW5pbXVtIiBdCiAgICB9LAogICAgImRlZmF1bHQiOiB7fQp9Cg==`)}
|
229
vendor/github.com/google/gnostic/jsonschema/display.go
generated
vendored
Normal file
229
vendor/github.com/google/gnostic/jsonschema/display.go
generated
vendored
Normal file
@ -0,0 +1,229 @@
|
||||
// Copyright 2017 Google LLC. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package jsonschema
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
//
|
||||
// DISPLAY
|
||||
// The following methods display Schemas.
|
||||
//
|
||||
|
||||
// Description returns a string representation of a string or string array.
|
||||
func (s *StringOrStringArray) Description() string {
|
||||
if s.String != nil {
|
||||
return *s.String
|
||||
}
|
||||
if s.StringArray != nil {
|
||||
return strings.Join(*s.StringArray, ", ")
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Returns a string representation of a Schema.
|
||||
func (schema *Schema) String() string {
|
||||
return schema.describeSchema("")
|
||||
}
|
||||
|
||||
// Helper: Returns a string representation of a Schema indented by a specified string.
|
||||
func (schema *Schema) describeSchema(indent string) string {
|
||||
result := ""
|
||||
if schema.Schema != nil {
|
||||
result += indent + "$schema: " + *(schema.Schema) + "\n"
|
||||
}
|
||||
if schema.ID != nil {
|
||||
result += indent + "id: " + *(schema.ID) + "\n"
|
||||
}
|
||||
if schema.MultipleOf != nil {
|
||||
result += indent + fmt.Sprintf("multipleOf: %+v\n", *(schema.MultipleOf))
|
||||
}
|
||||
if schema.Maximum != nil {
|
||||
result += indent + fmt.Sprintf("maximum: %+v\n", *(schema.Maximum))
|
||||
}
|
||||
if schema.ExclusiveMaximum != nil {
|
||||
result += indent + fmt.Sprintf("exclusiveMaximum: %+v\n", *(schema.ExclusiveMaximum))
|
||||
}
|
||||
if schema.Minimum != nil {
|
||||
result += indent + fmt.Sprintf("minimum: %+v\n", *(schema.Minimum))
|
||||
}
|
||||
if schema.ExclusiveMinimum != nil {
|
||||
result += indent + fmt.Sprintf("exclusiveMinimum: %+v\n", *(schema.ExclusiveMinimum))
|
||||
}
|
||||
if schema.MaxLength != nil {
|
||||
result += indent + fmt.Sprintf("maxLength: %+v\n", *(schema.MaxLength))
|
||||
}
|
||||
if schema.MinLength != nil {
|
||||
result += indent + fmt.Sprintf("minLength: %+v\n", *(schema.MinLength))
|
||||
}
|
||||
if schema.Pattern != nil {
|
||||
result += indent + fmt.Sprintf("pattern: %+v\n", *(schema.Pattern))
|
||||
}
|
||||
if schema.AdditionalItems != nil {
|
||||
s := schema.AdditionalItems.Schema
|
||||
if s != nil {
|
||||
result += indent + "additionalItems:\n"
|
||||
result += s.describeSchema(indent + " ")
|
||||
} else {
|
||||
b := *(schema.AdditionalItems.Boolean)
|
||||
result += indent + fmt.Sprintf("additionalItems: %+v\n", b)
|
||||
}
|
||||
}
|
||||
if schema.Items != nil {
|
||||
result += indent + "items:\n"
|
||||
items := schema.Items
|
||||
if items.SchemaArray != nil {
|
||||
for i, s := range *(items.SchemaArray) {
|
||||
result += indent + " " + fmt.Sprintf("%d", i) + ":\n"
|
||||
result += s.describeSchema(indent + " " + " ")
|
||||
}
|
||||
} else if items.Schema != nil {
|
||||
result += items.Schema.describeSchema(indent + " " + " ")
|
||||
}
|
||||
}
|
||||
if schema.MaxItems != nil {
|
||||
result += indent + fmt.Sprintf("maxItems: %+v\n", *(schema.MaxItems))
|
||||
}
|
||||
if schema.MinItems != nil {
|
||||
result += indent + fmt.Sprintf("minItems: %+v\n", *(schema.MinItems))
|
||||
}
|
||||
if schema.UniqueItems != nil {
|
||||
result += indent + fmt.Sprintf("uniqueItems: %+v\n", *(schema.UniqueItems))
|
||||
}
|
||||
if schema.MaxProperties != nil {
|
||||
result += indent + fmt.Sprintf("maxProperties: %+v\n", *(schema.MaxProperties))
|
||||
}
|
||||
if schema.MinProperties != nil {
|
||||
result += indent + fmt.Sprintf("minProperties: %+v\n", *(schema.MinProperties))
|
||||
}
|
||||
if schema.Required != nil {
|
||||
result += indent + fmt.Sprintf("required: %+v\n", *(schema.Required))
|
||||
}
|
||||
if schema.AdditionalProperties != nil {
|
||||
s := schema.AdditionalProperties.Schema
|
||||
if s != nil {
|
||||
result += indent + "additionalProperties:\n"
|
||||
result += s.describeSchema(indent + " ")
|
||||
} else {
|
||||
b := *(schema.AdditionalProperties.Boolean)
|
||||
result += indent + fmt.Sprintf("additionalProperties: %+v\n", b)
|
||||
}
|
||||
}
|
||||
if schema.Properties != nil {
|
||||
result += indent + "properties:\n"
|
||||
for _, pair := range *(schema.Properties) {
|
||||
name := pair.Name
|
||||
s := pair.Value
|
||||
result += indent + " " + name + ":\n"
|
||||
result += s.describeSchema(indent + " " + " ")
|
||||
}
|
||||
}
|
||||
if schema.PatternProperties != nil {
|
||||
result += indent + "patternProperties:\n"
|
||||
for _, pair := range *(schema.PatternProperties) {
|
||||
name := pair.Name
|
||||
s := pair.Value
|
||||
result += indent + " " + name + ":\n"
|
||||
result += s.describeSchema(indent + " " + " ")
|
||||
}
|
||||
}
|
||||
if schema.Dependencies != nil {
|
||||
result += indent + "dependencies:\n"
|
||||
for _, pair := range *(schema.Dependencies) {
|
||||
name := pair.Name
|
||||
schemaOrStringArray := pair.Value
|
||||
s := schemaOrStringArray.Schema
|
||||
if s != nil {
|
||||
result += indent + " " + name + ":\n"
|
||||
result += s.describeSchema(indent + " " + " ")
|
||||
} else {
|
||||
a := schemaOrStringArray.StringArray
|
||||
if a != nil {
|
||||
result += indent + " " + name + ":\n"
|
||||
for _, s2 := range *a {
|
||||
result += indent + " " + " " + s2 + "\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
if schema.Enumeration != nil {
|
||||
result += indent + "enumeration:\n"
|
||||
for _, value := range *(schema.Enumeration) {
|
||||
if value.String != nil {
|
||||
result += indent + " " + fmt.Sprintf("%+v\n", *value.String)
|
||||
} else {
|
||||
result += indent + " " + fmt.Sprintf("%+v\n", *value.Bool)
|
||||
}
|
||||
}
|
||||
}
|
||||
if schema.Type != nil {
|
||||
result += indent + fmt.Sprintf("type: %+v\n", schema.Type.Description())
|
||||
}
|
||||
if schema.AllOf != nil {
|
||||
result += indent + "allOf:\n"
|
||||
for _, s := range *(schema.AllOf) {
|
||||
result += s.describeSchema(indent + " ")
|
||||
result += indent + "-\n"
|
||||
}
|
||||
}
|
||||
if schema.AnyOf != nil {
|
||||
result += indent + "anyOf:\n"
|
||||
for _, s := range *(schema.AnyOf) {
|
||||
result += s.describeSchema(indent + " ")
|
||||
result += indent + "-\n"
|
||||
}
|
||||
}
|
||||
if schema.OneOf != nil {
|
||||
result += indent + "oneOf:\n"
|
||||
for _, s := range *(schema.OneOf) {
|
||||
result += s.describeSchema(indent + " ")
|
||||
result += indent + "-\n"
|
||||
}
|
||||
}
|
||||
if schema.Not != nil {
|
||||
result += indent + "not:\n"
|
||||
result += schema.Not.describeSchema(indent + " ")
|
||||
}
|
||||
if schema.Definitions != nil {
|
||||
result += indent + "definitions:\n"
|
||||
for _, pair := range *(schema.Definitions) {
|
||||
name := pair.Name
|
||||
s := pair.Value
|
||||
result += indent + " " + name + ":\n"
|
||||
result += s.describeSchema(indent + " " + " ")
|
||||
}
|
||||
}
|
||||
if schema.Title != nil {
|
||||
result += indent + "title: " + *(schema.Title) + "\n"
|
||||
}
|
||||
if schema.Description != nil {
|
||||
result += indent + "description: " + *(schema.Description) + "\n"
|
||||
}
|
||||
if schema.Default != nil {
|
||||
result += indent + "default:\n"
|
||||
result += indent + fmt.Sprintf(" %+v\n", *(schema.Default))
|
||||
}
|
||||
if schema.Format != nil {
|
||||
result += indent + "format: " + *(schema.Format) + "\n"
|
||||
}
|
||||
if schema.Ref != nil {
|
||||
result += indent + "$ref: " + *(schema.Ref) + "\n"
|
||||
}
|
||||
return result
|
||||
}
|
228
vendor/github.com/google/gnostic/jsonschema/models.go
generated
vendored
Normal file
228
vendor/github.com/google/gnostic/jsonschema/models.go
generated
vendored
Normal file
@ -0,0 +1,228 @@
|
||||
// Copyright 2017 Google LLC. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package jsonschema supports the reading, writing, and manipulation
|
||||
// of JSON Schemas.
|
||||
package jsonschema
|
||||
|
||||
import "gopkg.in/yaml.v3"
|
||||
|
||||
// The Schema struct models a JSON Schema and, because schemas are
|
||||
// defined hierarchically, contains many references to itself.
|
||||
// All fields are pointers and are nil if the associated values
|
||||
// are not specified.
|
||||
type Schema struct {
|
||||
Schema *string // $schema
|
||||
ID *string // id keyword used for $ref resolution scope
|
||||
Ref *string // $ref, i.e. JSON Pointers
|
||||
|
||||
// http://json-schema.org/latest/json-schema-validation.html
|
||||
// 5.1. Validation keywords for numeric instances (number and integer)
|
||||
MultipleOf *SchemaNumber
|
||||
Maximum *SchemaNumber
|
||||
ExclusiveMaximum *bool
|
||||
Minimum *SchemaNumber
|
||||
ExclusiveMinimum *bool
|
||||
|
||||
// 5.2. Validation keywords for strings
|
||||
MaxLength *int64
|
||||
MinLength *int64
|
||||
Pattern *string
|
||||
|
||||
// 5.3. Validation keywords for arrays
|
||||
AdditionalItems *SchemaOrBoolean
|
||||
Items *SchemaOrSchemaArray
|
||||
MaxItems *int64
|
||||
MinItems *int64
|
||||
UniqueItems *bool
|
||||
|
||||
// 5.4. Validation keywords for objects
|
||||
MaxProperties *int64
|
||||
MinProperties *int64
|
||||
Required *[]string
|
||||
AdditionalProperties *SchemaOrBoolean
|
||||
Properties *[]*NamedSchema
|
||||
PatternProperties *[]*NamedSchema
|
||||
Dependencies *[]*NamedSchemaOrStringArray
|
||||
|
||||
// 5.5. Validation keywords for any instance type
|
||||
Enumeration *[]SchemaEnumValue
|
||||
Type *StringOrStringArray
|
||||
AllOf *[]*Schema
|
||||
AnyOf *[]*Schema
|
||||
OneOf *[]*Schema
|
||||
Not *Schema
|
||||
Definitions *[]*NamedSchema
|
||||
|
||||
// 6. Metadata keywords
|
||||
Title *string
|
||||
Description *string
|
||||
Default *yaml.Node
|
||||
|
||||
// 7. Semantic validation with "format"
|
||||
Format *string
|
||||
}
|
||||
|
||||
// These helper structs represent "combination" types that generally can
|
||||
// have values of one type or another. All are used to represent parts
|
||||
// of Schemas.
|
||||
|
||||
// SchemaNumber represents a value that can be either an Integer or a Float.
|
||||
type SchemaNumber struct {
|
||||
Integer *int64
|
||||
Float *float64
|
||||
}
|
||||
|
||||
// NewSchemaNumberWithInteger creates and returns a new object
|
||||
func NewSchemaNumberWithInteger(i int64) *SchemaNumber {
|
||||
result := &SchemaNumber{}
|
||||
result.Integer = &i
|
||||
return result
|
||||
}
|
||||
|
||||
// NewSchemaNumberWithFloat creates and returns a new object
|
||||
func NewSchemaNumberWithFloat(f float64) *SchemaNumber {
|
||||
result := &SchemaNumber{}
|
||||
result.Float = &f
|
||||
return result
|
||||
}
|
||||
|
||||
// SchemaOrBoolean represents a value that can be either a Schema or a Boolean.
|
||||
type SchemaOrBoolean struct {
|
||||
Schema *Schema
|
||||
Boolean *bool
|
||||
}
|
||||
|
||||
// NewSchemaOrBooleanWithSchema creates and returns a new object
|
||||
func NewSchemaOrBooleanWithSchema(s *Schema) *SchemaOrBoolean {
|
||||
result := &SchemaOrBoolean{}
|
||||
result.Schema = s
|
||||
return result
|
||||
}
|
||||
|
||||
// NewSchemaOrBooleanWithBoolean creates and returns a new object
|
||||
func NewSchemaOrBooleanWithBoolean(b bool) *SchemaOrBoolean {
|
||||
result := &SchemaOrBoolean{}
|
||||
result.Boolean = &b
|
||||
return result
|
||||
}
|
||||
|
||||
// StringOrStringArray represents a value that can be either
|
||||
// a String or an Array of Strings.
|
||||
type StringOrStringArray struct {
|
||||
String *string
|
||||
StringArray *[]string
|
||||
}
|
||||
|
||||
// NewStringOrStringArrayWithString creates and returns a new object
|
||||
func NewStringOrStringArrayWithString(s string) *StringOrStringArray {
|
||||
result := &StringOrStringArray{}
|
||||
result.String = &s
|
||||
return result
|
||||
}
|
||||
|
||||
// NewStringOrStringArrayWithStringArray creates and returns a new object
|
||||
func NewStringOrStringArrayWithStringArray(a []string) *StringOrStringArray {
|
||||
result := &StringOrStringArray{}
|
||||
result.StringArray = &a
|
||||
return result
|
||||
}
|
||||
|
||||
// SchemaOrStringArray represents a value that can be either
|
||||
// a Schema or an Array of Strings.
|
||||
type SchemaOrStringArray struct {
|
||||
Schema *Schema
|
||||
StringArray *[]string
|
||||
}
|
||||
|
||||
// SchemaOrSchemaArray represents a value that can be either
|
||||
// a Schema or an Array of Schemas.
|
||||
type SchemaOrSchemaArray struct {
|
||||
Schema *Schema
|
||||
SchemaArray *[]*Schema
|
||||
}
|
||||
|
||||
// NewSchemaOrSchemaArrayWithSchema creates and returns a new object
|
||||
func NewSchemaOrSchemaArrayWithSchema(s *Schema) *SchemaOrSchemaArray {
|
||||
result := &SchemaOrSchemaArray{}
|
||||
result.Schema = s
|
||||
return result
|
||||
}
|
||||
|
||||
// NewSchemaOrSchemaArrayWithSchemaArray creates and returns a new object
|
||||
func NewSchemaOrSchemaArrayWithSchemaArray(a []*Schema) *SchemaOrSchemaArray {
|
||||
result := &SchemaOrSchemaArray{}
|
||||
result.SchemaArray = &a
|
||||
return result
|
||||
}
|
||||
|
||||
// SchemaEnumValue represents a value that can be part of an
|
||||
// enumeration in a Schema.
|
||||
type SchemaEnumValue struct {
|
||||
String *string
|
||||
Bool *bool
|
||||
}
|
||||
|
||||
// NamedSchema is a name-value pair that is used to emulate maps
|
||||
// with ordered keys.
|
||||
type NamedSchema struct {
|
||||
Name string
|
||||
Value *Schema
|
||||
}
|
||||
|
||||
// NewNamedSchema creates and returns a new object
|
||||
func NewNamedSchema(name string, value *Schema) *NamedSchema {
|
||||
return &NamedSchema{Name: name, Value: value}
|
||||
}
|
||||
|
||||
// NamedSchemaOrStringArray is a name-value pair that is used
|
||||
// to emulate maps with ordered keys.
|
||||
type NamedSchemaOrStringArray struct {
|
||||
Name string
|
||||
Value *SchemaOrStringArray
|
||||
}
|
||||
|
||||
// Access named subschemas by name
|
||||
|
||||
func namedSchemaArrayElementWithName(array *[]*NamedSchema, name string) *Schema {
|
||||
if array == nil {
|
||||
return nil
|
||||
}
|
||||
for _, pair := range *array {
|
||||
if pair.Name == name {
|
||||
return pair.Value
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PropertyWithName returns the selected element.
|
||||
func (s *Schema) PropertyWithName(name string) *Schema {
|
||||
return namedSchemaArrayElementWithName(s.Properties, name)
|
||||
}
|
||||
|
||||
// PatternPropertyWithName returns the selected element.
|
||||
func (s *Schema) PatternPropertyWithName(name string) *Schema {
|
||||
return namedSchemaArrayElementWithName(s.PatternProperties, name)
|
||||
}
|
||||
|
||||
// DefinitionWithName returns the selected element.
|
||||
func (s *Schema) DefinitionWithName(name string) *Schema {
|
||||
return namedSchemaArrayElementWithName(s.Definitions, name)
|
||||
}
|
||||
|
||||
// AddProperty adds a named property.
|
||||
func (s *Schema) AddProperty(name string, property *Schema) {
|
||||
*s.Properties = append(*s.Properties, NewNamedSchema(name, property))
|
||||
}
|
394
vendor/github.com/google/gnostic/jsonschema/operations.go
generated
vendored
Normal file
394
vendor/github.com/google/gnostic/jsonschema/operations.go
generated
vendored
Normal file
@ -0,0 +1,394 @@
|
||||
// Copyright 2017 Google LLC. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package jsonschema
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
)
|
||||
|
||||
//
|
||||
// OPERATIONS
|
||||
// The following methods perform operations on Schemas.
|
||||
//
|
||||
|
||||
// IsEmpty returns true if no members of the Schema are specified.
|
||||
func (schema *Schema) IsEmpty() bool {
|
||||
return (schema.Schema == nil) &&
|
||||
(schema.ID == nil) &&
|
||||
(schema.MultipleOf == nil) &&
|
||||
(schema.Maximum == nil) &&
|
||||
(schema.ExclusiveMaximum == nil) &&
|
||||
(schema.Minimum == nil) &&
|
||||
(schema.ExclusiveMinimum == nil) &&
|
||||
(schema.MaxLength == nil) &&
|
||||
(schema.MinLength == nil) &&
|
||||
(schema.Pattern == nil) &&
|
||||
(schema.AdditionalItems == nil) &&
|
||||
(schema.Items == nil) &&
|
||||
(schema.MaxItems == nil) &&
|
||||
(schema.MinItems == nil) &&
|
||||
(schema.UniqueItems == nil) &&
|
||||
(schema.MaxProperties == nil) &&
|
||||
(schema.MinProperties == nil) &&
|
||||
(schema.Required == nil) &&
|
||||
(schema.AdditionalProperties == nil) &&
|
||||
(schema.Properties == nil) &&
|
||||
(schema.PatternProperties == nil) &&
|
||||
(schema.Dependencies == nil) &&
|
||||
(schema.Enumeration == nil) &&
|
||||
(schema.Type == nil) &&
|
||||
(schema.AllOf == nil) &&
|
||||
(schema.AnyOf == nil) &&
|
||||
(schema.OneOf == nil) &&
|
||||
(schema.Not == nil) &&
|
||||
(schema.Definitions == nil) &&
|
||||
(schema.Title == nil) &&
|
||||
(schema.Description == nil) &&
|
||||
(schema.Default == nil) &&
|
||||
(schema.Format == nil) &&
|
||||
(schema.Ref == nil)
|
||||
}
|
||||
|
||||
// IsEqual returns true if two schemas are equal.
|
||||
func (schema *Schema) IsEqual(schema2 *Schema) bool {
|
||||
return schema.String() == schema2.String()
|
||||
}
|
||||
|
||||
// SchemaOperation represents a function that can be applied to a Schema.
|
||||
type SchemaOperation func(schema *Schema, context string)
|
||||
|
||||
// Applies a specified function to a Schema and all of the Schemas that it contains.
|
||||
func (schema *Schema) applyToSchemas(operation SchemaOperation, context string) {
|
||||
|
||||
if schema.AdditionalItems != nil {
|
||||
s := schema.AdditionalItems.Schema
|
||||
if s != nil {
|
||||
s.applyToSchemas(operation, "AdditionalItems")
|
||||
}
|
||||
}
|
||||
|
||||
if schema.Items != nil {
|
||||
if schema.Items.SchemaArray != nil {
|
||||
for _, s := range *(schema.Items.SchemaArray) {
|
||||
s.applyToSchemas(operation, "Items.SchemaArray")
|
||||
}
|
||||
} else if schema.Items.Schema != nil {
|
||||
schema.Items.Schema.applyToSchemas(operation, "Items.Schema")
|
||||
}
|
||||
}
|
||||
|
||||
if schema.AdditionalProperties != nil {
|
||||
s := schema.AdditionalProperties.Schema
|
||||
if s != nil {
|
||||
s.applyToSchemas(operation, "AdditionalProperties")
|
||||
}
|
||||
}
|
||||
|
||||
if schema.Properties != nil {
|
||||
for _, pair := range *(schema.Properties) {
|
||||
s := pair.Value
|
||||
s.applyToSchemas(operation, "Properties")
|
||||
}
|
||||
}
|
||||
if schema.PatternProperties != nil {
|
||||
for _, pair := range *(schema.PatternProperties) {
|
||||
s := pair.Value
|
||||
s.applyToSchemas(operation, "PatternProperties")
|
||||
}
|
||||
}
|
||||
|
||||
if schema.Dependencies != nil {
|
||||
for _, pair := range *(schema.Dependencies) {
|
||||
schemaOrStringArray := pair.Value
|
||||
s := schemaOrStringArray.Schema
|
||||
if s != nil {
|
||||
s.applyToSchemas(operation, "Dependencies")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if schema.AllOf != nil {
|
||||
for _, s := range *(schema.AllOf) {
|
||||
s.applyToSchemas(operation, "AllOf")
|
||||
}
|
||||
}
|
||||
if schema.AnyOf != nil {
|
||||
for _, s := range *(schema.AnyOf) {
|
||||
s.applyToSchemas(operation, "AnyOf")
|
||||
}
|
||||
}
|
||||
if schema.OneOf != nil {
|
||||
for _, s := range *(schema.OneOf) {
|
||||
s.applyToSchemas(operation, "OneOf")
|
||||
}
|
||||
}
|
||||
if schema.Not != nil {
|
||||
schema.Not.applyToSchemas(operation, "Not")
|
||||
}
|
||||
|
||||
if schema.Definitions != nil {
|
||||
for _, pair := range *(schema.Definitions) {
|
||||
s := pair.Value
|
||||
s.applyToSchemas(operation, "Definitions")
|
||||
}
|
||||
}
|
||||
|
||||
operation(schema, context)
|
||||
}
|
||||
|
||||
// CopyProperties copies all non-nil properties from the source Schema to the schema Schema.
|
||||
func (schema *Schema) CopyProperties(source *Schema) {
|
||||
if source.Schema != nil {
|
||||
schema.Schema = source.Schema
|
||||
}
|
||||
if source.ID != nil {
|
||||
schema.ID = source.ID
|
||||
}
|
||||
if source.MultipleOf != nil {
|
||||
schema.MultipleOf = source.MultipleOf
|
||||
}
|
||||
if source.Maximum != nil {
|
||||
schema.Maximum = source.Maximum
|
||||
}
|
||||
if source.ExclusiveMaximum != nil {
|
||||
schema.ExclusiveMaximum = source.ExclusiveMaximum
|
||||
}
|
||||
if source.Minimum != nil {
|
||||
schema.Minimum = source.Minimum
|
||||
}
|
||||
if source.ExclusiveMinimum != nil {
|
||||
schema.ExclusiveMinimum = source.ExclusiveMinimum
|
||||
}
|
||||
if source.MaxLength != nil {
|
||||
schema.MaxLength = source.MaxLength
|
||||
}
|
||||
if source.MinLength != nil {
|
||||
schema.MinLength = source.MinLength
|
||||
}
|
||||
if source.Pattern != nil {
|
||||
schema.Pattern = source.Pattern
|
||||
}
|
||||
if source.AdditionalItems != nil {
|
||||
schema.AdditionalItems = source.AdditionalItems
|
||||
}
|
||||
if source.Items != nil {
|
||||
schema.Items = source.Items
|
||||
}
|
||||
if source.MaxItems != nil {
|
||||
schema.MaxItems = source.MaxItems
|
||||
}
|
||||
if source.MinItems != nil {
|
||||
schema.MinItems = source.MinItems
|
||||
}
|
||||
if source.UniqueItems != nil {
|
||||
schema.UniqueItems = source.UniqueItems
|
||||
}
|
||||
if source.MaxProperties != nil {
|
||||
schema.MaxProperties = source.MaxProperties
|
||||
}
|
||||
if source.MinProperties != nil {
|
||||
schema.MinProperties = source.MinProperties
|
||||
}
|
||||
if source.Required != nil {
|
||||
schema.Required = source.Required
|
||||
}
|
||||
if source.AdditionalProperties != nil {
|
||||
schema.AdditionalProperties = source.AdditionalProperties
|
||||
}
|
||||
if source.Properties != nil {
|
||||
schema.Properties = source.Properties
|
||||
}
|
||||
if source.PatternProperties != nil {
|
||||
schema.PatternProperties = source.PatternProperties
|
||||
}
|
||||
if source.Dependencies != nil {
|
||||
schema.Dependencies = source.Dependencies
|
||||
}
|
||||
if source.Enumeration != nil {
|
||||
schema.Enumeration = source.Enumeration
|
||||
}
|
||||
if source.Type != nil {
|
||||
schema.Type = source.Type
|
||||
}
|
||||
if source.AllOf != nil {
|
||||
schema.AllOf = source.AllOf
|
||||
}
|
||||
if source.AnyOf != nil {
|
||||
schema.AnyOf = source.AnyOf
|
||||
}
|
||||
if source.OneOf != nil {
|
||||
schema.OneOf = source.OneOf
|
||||
}
|
||||
if source.Not != nil {
|
||||
schema.Not = source.Not
|
||||
}
|
||||
if source.Definitions != nil {
|
||||
schema.Definitions = source.Definitions
|
||||
}
|
||||
if source.Title != nil {
|
||||
schema.Title = source.Title
|
||||
}
|
||||
if source.Description != nil {
|
||||
schema.Description = source.Description
|
||||
}
|
||||
if source.Default != nil {
|
||||
schema.Default = source.Default
|
||||
}
|
||||
if source.Format != nil {
|
||||
schema.Format = source.Format
|
||||
}
|
||||
if source.Ref != nil {
|
||||
schema.Ref = source.Ref
|
||||
}
|
||||
}
|
||||
|
||||
// TypeIs returns true if the Type of a Schema includes the specified type
|
||||
func (schema *Schema) TypeIs(typeName string) bool {
|
||||
if schema.Type != nil {
|
||||
// the schema Type is either a string or an array of strings
|
||||
if schema.Type.String != nil {
|
||||
return (*(schema.Type.String) == typeName)
|
||||
} else if schema.Type.StringArray != nil {
|
||||
for _, n := range *(schema.Type.StringArray) {
|
||||
if n == typeName {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ResolveRefs resolves "$ref" elements in a Schema and its children.
|
||||
// But if a reference refers to an object type, is inside a oneOf, or contains a oneOf,
|
||||
// the reference is kept and we expect downstream tools to separately model these
|
||||
// referenced schemas.
|
||||
func (schema *Schema) ResolveRefs() {
|
||||
rootSchema := schema
|
||||
count := 1
|
||||
for count > 0 {
|
||||
count = 0
|
||||
schema.applyToSchemas(
|
||||
func(schema *Schema, context string) {
|
||||
if schema.Ref != nil {
|
||||
resolvedRef, err := rootSchema.resolveJSONPointer(*(schema.Ref))
|
||||
if err != nil {
|
||||
log.Printf("%+v", err)
|
||||
} else if resolvedRef.TypeIs("object") {
|
||||
// don't substitute for objects, we'll model the referenced schema with a class
|
||||
} else if context == "OneOf" {
|
||||
// don't substitute for references inside oneOf declarations
|
||||
} else if resolvedRef.OneOf != nil {
|
||||
// don't substitute for references that contain oneOf declarations
|
||||
} else if resolvedRef.AdditionalProperties != nil {
|
||||
// don't substitute for references that look like objects
|
||||
} else {
|
||||
schema.Ref = nil
|
||||
schema.CopyProperties(resolvedRef)
|
||||
count++
|
||||
}
|
||||
}
|
||||
}, "")
|
||||
}
|
||||
}
|
||||
|
||||
// resolveJSONPointer resolves JSON pointers.
|
||||
// This current implementation is very crude and custom for OpenAPI 2.0 schemas.
|
||||
// It panics for any pointer that it is unable to resolve.
|
||||
func (schema *Schema) resolveJSONPointer(ref string) (result *Schema, err error) {
|
||||
parts := strings.Split(ref, "#")
|
||||
if len(parts) == 2 {
|
||||
documentName := parts[0] + "#"
|
||||
if documentName == "#" && schema.ID != nil {
|
||||
documentName = *(schema.ID)
|
||||
}
|
||||
path := parts[1]
|
||||
document := schemas[documentName]
|
||||
pathParts := strings.Split(path, "/")
|
||||
|
||||
// we currently do a very limited (hard-coded) resolution of certain paths and log errors for missed cases
|
||||
if len(pathParts) == 1 {
|
||||
return document, nil
|
||||
} else if len(pathParts) == 3 {
|
||||
switch pathParts[1] {
|
||||
case "definitions":
|
||||
dictionary := document.Definitions
|
||||
for _, pair := range *dictionary {
|
||||
if pair.Name == pathParts[2] {
|
||||
result = pair.Value
|
||||
}
|
||||
}
|
||||
case "properties":
|
||||
dictionary := document.Properties
|
||||
for _, pair := range *dictionary {
|
||||
if pair.Name == pathParts[2] {
|
||||
result = pair.Value
|
||||
}
|
||||
}
|
||||
default:
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if result == nil {
|
||||
return nil, fmt.Errorf("unresolved pointer: %+v", ref)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ResolveAllOfs replaces "allOf" elements by merging their properties into the parent Schema.
|
||||
func (schema *Schema) ResolveAllOfs() {
|
||||
schema.applyToSchemas(
|
||||
func(schema *Schema, context string) {
|
||||
if schema.AllOf != nil {
|
||||
for _, allOf := range *(schema.AllOf) {
|
||||
schema.CopyProperties(allOf)
|
||||
}
|
||||
schema.AllOf = nil
|
||||
}
|
||||
}, "resolveAllOfs")
|
||||
}
|
||||
|
||||
// ResolveAnyOfs replaces all "anyOf" elements with "oneOf".
|
||||
func (schema *Schema) ResolveAnyOfs() {
|
||||
schema.applyToSchemas(
|
||||
func(schema *Schema, context string) {
|
||||
if schema.AnyOf != nil {
|
||||
schema.OneOf = schema.AnyOf
|
||||
schema.AnyOf = nil
|
||||
}
|
||||
}, "resolveAnyOfs")
|
||||
}
|
||||
|
||||
// return a pointer to a copy of a passed-in string
|
||||
func stringptr(input string) (output *string) {
|
||||
return &input
|
||||
}
|
||||
|
||||
// CopyOfficialSchemaProperty copies a named property from the official JSON Schema definition
|
||||
func (schema *Schema) CopyOfficialSchemaProperty(name string) {
|
||||
*schema.Properties = append(*schema.Properties,
|
||||
NewNamedSchema(name,
|
||||
&Schema{Ref: stringptr("http://json-schema.org/draft-04/schema#/properties/" + name)}))
|
||||
}
|
||||
|
||||
// CopyOfficialSchemaProperties copies named properties from the official JSON Schema definition
|
||||
func (schema *Schema) CopyOfficialSchemaProperties(names []string) {
|
||||
for _, name := range names {
|
||||
schema.CopyOfficialSchemaProperty(name)
|
||||
}
|
||||
}
|
442
vendor/github.com/google/gnostic/jsonschema/reader.go
generated
vendored
Normal file
442
vendor/github.com/google/gnostic/jsonschema/reader.go
generated
vendored
Normal file
@ -0,0 +1,442 @@
|
||||
// Copyright 2017 Google LLC. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:generate go run generate-base.go
|
||||
|
||||
package jsonschema
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strconv"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// This is a global map of all known Schemas.
|
||||
// It is initialized when the first Schema is created and inserted.
|
||||
var schemas map[string]*Schema
|
||||
|
||||
// NewBaseSchema builds a schema object from an embedded json representation.
|
||||
func NewBaseSchema() (schema *Schema, err error) {
|
||||
b, err := baseSchemaBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var node yaml.Node
|
||||
err = yaml.Unmarshal(b, &node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewSchemaFromObject(&node), nil
|
||||
}
|
||||
|
||||
// NewSchemaFromFile reads a schema from a file.
|
||||
// Currently this assumes that schemas are stored in the source distribution of this project.
|
||||
func NewSchemaFromFile(filename string) (schema *Schema, err error) {
|
||||
file, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var node yaml.Node
|
||||
err = yaml.Unmarshal(file, &node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewSchemaFromObject(&node), nil
|
||||
}
|
||||
|
||||
// NewSchemaFromObject constructs a schema from a parsed JSON object.
|
||||
// Due to the complexity of the schema representation, this is a
|
||||
// custom reader and not the standard Go JSON reader (encoding/json).
|
||||
func NewSchemaFromObject(jsonData *yaml.Node) *Schema {
|
||||
switch jsonData.Kind {
|
||||
case yaml.DocumentNode:
|
||||
return NewSchemaFromObject(jsonData.Content[0])
|
||||
case yaml.MappingNode:
|
||||
schema := &Schema{}
|
||||
|
||||
for i := 0; i < len(jsonData.Content); i += 2 {
|
||||
k := jsonData.Content[i].Value
|
||||
v := jsonData.Content[i+1]
|
||||
|
||||
switch k {
|
||||
case "$schema":
|
||||
schema.Schema = schema.stringValue(v)
|
||||
case "id":
|
||||
schema.ID = schema.stringValue(v)
|
||||
|
||||
case "multipleOf":
|
||||
schema.MultipleOf = schema.numberValue(v)
|
||||
case "maximum":
|
||||
schema.Maximum = schema.numberValue(v)
|
||||
case "exclusiveMaximum":
|
||||
schema.ExclusiveMaximum = schema.boolValue(v)
|
||||
case "minimum":
|
||||
schema.Minimum = schema.numberValue(v)
|
||||
case "exclusiveMinimum":
|
||||
schema.ExclusiveMinimum = schema.boolValue(v)
|
||||
|
||||
case "maxLength":
|
||||
schema.MaxLength = schema.intValue(v)
|
||||
case "minLength":
|
||||
schema.MinLength = schema.intValue(v)
|
||||
case "pattern":
|
||||
schema.Pattern = schema.stringValue(v)
|
||||
|
||||
case "additionalItems":
|
||||
schema.AdditionalItems = schema.schemaOrBooleanValue(v)
|
||||
case "items":
|
||||
schema.Items = schema.schemaOrSchemaArrayValue(v)
|
||||
case "maxItems":
|
||||
schema.MaxItems = schema.intValue(v)
|
||||
case "minItems":
|
||||
schema.MinItems = schema.intValue(v)
|
||||
case "uniqueItems":
|
||||
schema.UniqueItems = schema.boolValue(v)
|
||||
|
||||
case "maxProperties":
|
||||
schema.MaxProperties = schema.intValue(v)
|
||||
case "minProperties":
|
||||
schema.MinProperties = schema.intValue(v)
|
||||
case "required":
|
||||
schema.Required = schema.arrayOfStringsValue(v)
|
||||
case "additionalProperties":
|
||||
schema.AdditionalProperties = schema.schemaOrBooleanValue(v)
|
||||
case "properties":
|
||||
schema.Properties = schema.mapOfSchemasValue(v)
|
||||
case "patternProperties":
|
||||
schema.PatternProperties = schema.mapOfSchemasValue(v)
|
||||
case "dependencies":
|
||||
schema.Dependencies = schema.mapOfSchemasOrStringArraysValue(v)
|
||||
|
||||
case "enum":
|
||||
schema.Enumeration = schema.arrayOfEnumValuesValue(v)
|
||||
|
||||
case "type":
|
||||
schema.Type = schema.stringOrStringArrayValue(v)
|
||||
case "allOf":
|
||||
schema.AllOf = schema.arrayOfSchemasValue(v)
|
||||
case "anyOf":
|
||||
schema.AnyOf = schema.arrayOfSchemasValue(v)
|
||||
case "oneOf":
|
||||
schema.OneOf = schema.arrayOfSchemasValue(v)
|
||||
case "not":
|
||||
schema.Not = NewSchemaFromObject(v)
|
||||
case "definitions":
|
||||
schema.Definitions = schema.mapOfSchemasValue(v)
|
||||
|
||||
case "title":
|
||||
schema.Title = schema.stringValue(v)
|
||||
case "description":
|
||||
schema.Description = schema.stringValue(v)
|
||||
|
||||
case "default":
|
||||
schema.Default = v
|
||||
|
||||
case "format":
|
||||
schema.Format = schema.stringValue(v)
|
||||
case "$ref":
|
||||
schema.Ref = schema.stringValue(v)
|
||||
default:
|
||||
fmt.Printf("UNSUPPORTED (%s)\n", k)
|
||||
}
|
||||
}
|
||||
|
||||
// insert schema in global map
|
||||
if schema.ID != nil {
|
||||
if schemas == nil {
|
||||
schemas = make(map[string]*Schema, 0)
|
||||
}
|
||||
schemas[*(schema.ID)] = schema
|
||||
}
|
||||
return schema
|
||||
|
||||
default:
|
||||
fmt.Printf("schemaValue: unexpected node %+v\n", jsonData)
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//
|
||||
// BUILDERS
|
||||
// The following methods build elements of Schemas from interface{} values.
|
||||
// Each returns nil if it is unable to build the desired element.
|
||||
//
|
||||
|
||||
// Gets the string value of an interface{} value if possible.
|
||||
func (schema *Schema) stringValue(v *yaml.Node) *string {
|
||||
switch v.Kind {
|
||||
case yaml.ScalarNode:
|
||||
return &v.Value
|
||||
default:
|
||||
fmt.Printf("stringValue: unexpected node %+v\n", v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Gets the numeric value of an interface{} value if possible.
|
||||
func (schema *Schema) numberValue(v *yaml.Node) *SchemaNumber {
|
||||
number := &SchemaNumber{}
|
||||
switch v.Kind {
|
||||
case yaml.ScalarNode:
|
||||
switch v.Tag {
|
||||
case "!!float":
|
||||
v2, _ := strconv.ParseFloat(v.Value, 64)
|
||||
number.Float = &v2
|
||||
return number
|
||||
case "!!int":
|
||||
v2, _ := strconv.ParseInt(v.Value, 10, 64)
|
||||
number.Integer = &v2
|
||||
return number
|
||||
default:
|
||||
fmt.Printf("stringValue: unexpected node %+v\n", v)
|
||||
}
|
||||
default:
|
||||
fmt.Printf("stringValue: unexpected node %+v\n", v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Gets the integer value of an interface{} value if possible.
|
||||
func (schema *Schema) intValue(v *yaml.Node) *int64 {
|
||||
switch v.Kind {
|
||||
case yaml.ScalarNode:
|
||||
switch v.Tag {
|
||||
case "!!float":
|
||||
v2, _ := strconv.ParseFloat(v.Value, 64)
|
||||
v3 := int64(v2)
|
||||
return &v3
|
||||
case "!!int":
|
||||
v2, _ := strconv.ParseInt(v.Value, 10, 64)
|
||||
return &v2
|
||||
default:
|
||||
fmt.Printf("intValue: unexpected node %+v\n", v)
|
||||
}
|
||||
default:
|
||||
fmt.Printf("intValue: unexpected node %+v\n", v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Gets the bool value of an interface{} value if possible.
|
||||
func (schema *Schema) boolValue(v *yaml.Node) *bool {
|
||||
switch v.Kind {
|
||||
case yaml.ScalarNode:
|
||||
switch v.Tag {
|
||||
case "!!bool":
|
||||
v2, _ := strconv.ParseBool(v.Value)
|
||||
return &v2
|
||||
default:
|
||||
fmt.Printf("boolValue: unexpected node %+v\n", v)
|
||||
}
|
||||
default:
|
||||
fmt.Printf("boolValue: unexpected node %+v\n", v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Gets a map of Schemas from an interface{} value if possible.
|
||||
func (schema *Schema) mapOfSchemasValue(v *yaml.Node) *[]*NamedSchema {
|
||||
switch v.Kind {
|
||||
case yaml.MappingNode:
|
||||
m := make([]*NamedSchema, 0)
|
||||
for i := 0; i < len(v.Content); i += 2 {
|
||||
k2 := v.Content[i].Value
|
||||
v2 := v.Content[i+1]
|
||||
pair := &NamedSchema{Name: k2, Value: NewSchemaFromObject(v2)}
|
||||
m = append(m, pair)
|
||||
}
|
||||
return &m
|
||||
default:
|
||||
fmt.Printf("mapOfSchemasValue: unexpected node %+v\n", v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Gets an array of Schemas from an interface{} value if possible.
|
||||
func (schema *Schema) arrayOfSchemasValue(v *yaml.Node) *[]*Schema {
|
||||
switch v.Kind {
|
||||
case yaml.SequenceNode:
|
||||
m := make([]*Schema, 0)
|
||||
for _, v2 := range v.Content {
|
||||
switch v2.Kind {
|
||||
case yaml.MappingNode:
|
||||
s := NewSchemaFromObject(v2)
|
||||
m = append(m, s)
|
||||
default:
|
||||
fmt.Printf("arrayOfSchemasValue: unexpected node %+v\n", v2)
|
||||
}
|
||||
}
|
||||
return &m
|
||||
case yaml.MappingNode:
|
||||
m := make([]*Schema, 0)
|
||||
s := NewSchemaFromObject(v)
|
||||
m = append(m, s)
|
||||
return &m
|
||||
default:
|
||||
fmt.Printf("arrayOfSchemasValue: unexpected node %+v\n", v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Gets a Schema or an array of Schemas from an interface{} value if possible.
|
||||
func (schema *Schema) schemaOrSchemaArrayValue(v *yaml.Node) *SchemaOrSchemaArray {
|
||||
switch v.Kind {
|
||||
case yaml.SequenceNode:
|
||||
m := make([]*Schema, 0)
|
||||
for _, v2 := range v.Content {
|
||||
switch v2.Kind {
|
||||
case yaml.MappingNode:
|
||||
s := NewSchemaFromObject(v2)
|
||||
m = append(m, s)
|
||||
default:
|
||||
fmt.Printf("schemaOrSchemaArrayValue: unexpected node %+v\n", v2)
|
||||
}
|
||||
}
|
||||
return &SchemaOrSchemaArray{SchemaArray: &m}
|
||||
case yaml.MappingNode:
|
||||
s := NewSchemaFromObject(v)
|
||||
return &SchemaOrSchemaArray{Schema: s}
|
||||
default:
|
||||
fmt.Printf("schemaOrSchemaArrayValue: unexpected node %+v\n", v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Gets an array of strings from an interface{} value if possible.
|
||||
func (schema *Schema) arrayOfStringsValue(v *yaml.Node) *[]string {
|
||||
switch v.Kind {
|
||||
case yaml.ScalarNode:
|
||||
a := []string{v.Value}
|
||||
return &a
|
||||
case yaml.SequenceNode:
|
||||
a := make([]string, 0)
|
||||
for _, v2 := range v.Content {
|
||||
switch v2.Kind {
|
||||
case yaml.ScalarNode:
|
||||
a = append(a, v2.Value)
|
||||
default:
|
||||
fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v2)
|
||||
}
|
||||
}
|
||||
return &a
|
||||
default:
|
||||
fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Gets a string or an array of strings from an interface{} value if possible.
|
||||
func (schema *Schema) stringOrStringArrayValue(v *yaml.Node) *StringOrStringArray {
|
||||
switch v.Kind {
|
||||
case yaml.ScalarNode:
|
||||
s := &StringOrStringArray{}
|
||||
s.String = &v.Value
|
||||
return s
|
||||
case yaml.SequenceNode:
|
||||
a := make([]string, 0)
|
||||
for _, v2 := range v.Content {
|
||||
switch v2.Kind {
|
||||
case yaml.ScalarNode:
|
||||
a = append(a, v2.Value)
|
||||
default:
|
||||
fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v2)
|
||||
}
|
||||
}
|
||||
s := &StringOrStringArray{}
|
||||
s.StringArray = &a
|
||||
return s
|
||||
default:
|
||||
fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Gets an array of enum values from an interface{} value if possible.
|
||||
func (schema *Schema) arrayOfEnumValuesValue(v *yaml.Node) *[]SchemaEnumValue {
|
||||
a := make([]SchemaEnumValue, 0)
|
||||
switch v.Kind {
|
||||
case yaml.SequenceNode:
|
||||
for _, v2 := range v.Content {
|
||||
switch v2.Kind {
|
||||
case yaml.ScalarNode:
|
||||
switch v2.Tag {
|
||||
case "!!str":
|
||||
a = append(a, SchemaEnumValue{String: &v2.Value})
|
||||
case "!!bool":
|
||||
v3, _ := strconv.ParseBool(v2.Value)
|
||||
a = append(a, SchemaEnumValue{Bool: &v3})
|
||||
default:
|
||||
fmt.Printf("arrayOfEnumValuesValue: unexpected type %s\n", v2.Tag)
|
||||
}
|
||||
default:
|
||||
fmt.Printf("arrayOfEnumValuesValue: unexpected node %+v\n", v2)
|
||||
}
|
||||
}
|
||||
default:
|
||||
fmt.Printf("arrayOfEnumValuesValue: unexpected node %+v\n", v)
|
||||
}
|
||||
return &a
|
||||
}
|
||||
|
||||
// Gets a map of schemas or string arrays from an interface{} value if possible.
|
||||
func (schema *Schema) mapOfSchemasOrStringArraysValue(v *yaml.Node) *[]*NamedSchemaOrStringArray {
|
||||
m := make([]*NamedSchemaOrStringArray, 0)
|
||||
switch v.Kind {
|
||||
case yaml.MappingNode:
|
||||
for i := 0; i < len(v.Content); i += 2 {
|
||||
k2 := v.Content[i].Value
|
||||
v2 := v.Content[i+1]
|
||||
switch v2.Kind {
|
||||
case yaml.SequenceNode:
|
||||
a := make([]string, 0)
|
||||
for _, v3 := range v2.Content {
|
||||
switch v3.Kind {
|
||||
case yaml.ScalarNode:
|
||||
a = append(a, v3.Value)
|
||||
default:
|
||||
fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v3)
|
||||
}
|
||||
}
|
||||
s := &SchemaOrStringArray{}
|
||||
s.StringArray = &a
|
||||
pair := &NamedSchemaOrStringArray{Name: k2, Value: s}
|
||||
m = append(m, pair)
|
||||
default:
|
||||
fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v2)
|
||||
}
|
||||
}
|
||||
default:
|
||||
fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v)
|
||||
}
|
||||
return &m
|
||||
}
|
||||
|
||||
// Gets a schema or a boolean value from an interface{} value if possible.
|
||||
func (schema *Schema) schemaOrBooleanValue(v *yaml.Node) *SchemaOrBoolean {
|
||||
schemaOrBoolean := &SchemaOrBoolean{}
|
||||
switch v.Kind {
|
||||
case yaml.ScalarNode:
|
||||
v2, _ := strconv.ParseBool(v.Value)
|
||||
schemaOrBoolean.Boolean = &v2
|
||||
case yaml.MappingNode:
|
||||
schemaOrBoolean.Schema = NewSchemaFromObject(v)
|
||||
default:
|
||||
fmt.Printf("schemaOrBooleanValue: unexpected node %+v\n", v)
|
||||
}
|
||||
return schemaOrBoolean
|
||||
}
|
150
vendor/github.com/google/gnostic/jsonschema/schema.json
generated
vendored
Normal file
150
vendor/github.com/google/gnostic/jsonschema/schema.json
generated
vendored
Normal file
@ -0,0 +1,150 @@
|
||||
{
|
||||
"id": "http://json-schema.org/draft-04/schema#",
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
"description": "Core schema meta-schema",
|
||||
"definitions": {
|
||||
"schemaArray": {
|
||||
"type": "array",
|
||||
"minItems": 1,
|
||||
"items": { "$ref": "#" }
|
||||
},
|
||||
"positiveInteger": {
|
||||
"type": "integer",
|
||||
"minimum": 0
|
||||
},
|
||||
"positiveIntegerDefault0": {
|
||||
"allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ]
|
||||
},
|
||||
"simpleTypes": {
|
||||
"enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ]
|
||||
},
|
||||
"stringArray": {
|
||||
"type": "array",
|
||||
"items": { "type": "string" },
|
||||
"minItems": 1,
|
||||
"uniqueItems": true
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string",
|
||||
"format": "uri"
|
||||
},
|
||||
"$schema": {
|
||||
"type": "string",
|
||||
"format": "uri"
|
||||
},
|
||||
"title": {
|
||||
"type": "string"
|
||||
},
|
||||
"description": {
|
||||
"type": "string"
|
||||
},
|
||||
"default": {},
|
||||
"multipleOf": {
|
||||
"type": "number",
|
||||
"minimum": 0,
|
||||
"exclusiveMinimum": true
|
||||
},
|
||||
"maximum": {
|
||||
"type": "number"
|
||||
},
|
||||
"exclusiveMaximum": {
|
||||
"type": "boolean",
|
||||
"default": false
|
||||
},
|
||||
"minimum": {
|
||||
"type": "number"
|
||||
},
|
||||
"exclusiveMinimum": {
|
||||
"type": "boolean",
|
||||
"default": false
|
||||
},
|
||||
"maxLength": { "$ref": "#/definitions/positiveInteger" },
|
||||
"minLength": { "$ref": "#/definitions/positiveIntegerDefault0" },
|
||||
"pattern": {
|
||||
"type": "string",
|
||||
"format": "regex"
|
||||
},
|
||||
"additionalItems": {
|
||||
"anyOf": [
|
||||
{ "type": "boolean" },
|
||||
{ "$ref": "#" }
|
||||
],
|
||||
"default": {}
|
||||
},
|
||||
"items": {
|
||||
"anyOf": [
|
||||
{ "$ref": "#" },
|
||||
{ "$ref": "#/definitions/schemaArray" }
|
||||
],
|
||||
"default": {}
|
||||
},
|
||||
"maxItems": { "$ref": "#/definitions/positiveInteger" },
|
||||
"minItems": { "$ref": "#/definitions/positiveIntegerDefault0" },
|
||||
"uniqueItems": {
|
||||
"type": "boolean",
|
||||
"default": false
|
||||
},
|
||||
"maxProperties": { "$ref": "#/definitions/positiveInteger" },
|
||||
"minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" },
|
||||
"required": { "$ref": "#/definitions/stringArray" },
|
||||
"additionalProperties": {
|
||||
"anyOf": [
|
||||
{ "type": "boolean" },
|
||||
{ "$ref": "#" }
|
||||
],
|
||||
"default": {}
|
||||
},
|
||||
"definitions": {
|
||||
"type": "object",
|
||||
"additionalProperties": { "$ref": "#" },
|
||||
"default": {}
|
||||
},
|
||||
"properties": {
|
||||
"type": "object",
|
||||
"additionalProperties": { "$ref": "#" },
|
||||
"default": {}
|
||||
},
|
||||
"patternProperties": {
|
||||
"type": "object",
|
||||
"additionalProperties": { "$ref": "#" },
|
||||
"default": {}
|
||||
},
|
||||
"dependencies": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"anyOf": [
|
||||
{ "$ref": "#" },
|
||||
{ "$ref": "#/definitions/stringArray" }
|
||||
]
|
||||
}
|
||||
},
|
||||
"enum": {
|
||||
"type": "array",
|
||||
"minItems": 1,
|
||||
"uniqueItems": true
|
||||
},
|
||||
"type": {
|
||||
"anyOf": [
|
||||
{ "$ref": "#/definitions/simpleTypes" },
|
||||
{
|
||||
"type": "array",
|
||||
"items": { "$ref": "#/definitions/simpleTypes" },
|
||||
"minItems": 1,
|
||||
"uniqueItems": true
|
||||
}
|
||||
]
|
||||
},
|
||||
"allOf": { "$ref": "#/definitions/schemaArray" },
|
||||
"anyOf": { "$ref": "#/definitions/schemaArray" },
|
||||
"oneOf": { "$ref": "#/definitions/schemaArray" },
|
||||
"not": { "$ref": "#" }
|
||||
},
|
||||
"dependencies": {
|
||||
"exclusiveMaximum": [ "maximum" ],
|
||||
"exclusiveMinimum": [ "minimum" ]
|
||||
},
|
||||
"default": {}
|
||||
}
|
369
vendor/github.com/google/gnostic/jsonschema/writer.go
generated
vendored
Normal file
369
vendor/github.com/google/gnostic/jsonschema/writer.go
generated
vendored
Normal file
@ -0,0 +1,369 @@
|
||||
// Copyright 2017 Google LLC. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package jsonschema
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
const indentation = " "
|
||||
|
||||
func renderMappingNode(node *yaml.Node, indent string) (result string) {
|
||||
result = "{\n"
|
||||
innerIndent := indent + indentation
|
||||
for i := 0; i < len(node.Content); i += 2 {
|
||||
// first print the key
|
||||
key := node.Content[i].Value
|
||||
result += fmt.Sprintf("%s\"%+v\": ", innerIndent, key)
|
||||
// then the value
|
||||
value := node.Content[i+1]
|
||||
switch value.Kind {
|
||||
case yaml.ScalarNode:
|
||||
result += "\"" + value.Value + "\""
|
||||
case yaml.MappingNode:
|
||||
result += renderMappingNode(value, innerIndent)
|
||||
case yaml.SequenceNode:
|
||||
result += renderSequenceNode(value, innerIndent)
|
||||
default:
|
||||
result += fmt.Sprintf("???MapItem(Key:%+v, Value:%T)", value, value)
|
||||
}
|
||||
if i < len(node.Content)-2 {
|
||||
result += ","
|
||||
}
|
||||
result += "\n"
|
||||
}
|
||||
|
||||
result += indent + "}"
|
||||
return result
|
||||
}
|
||||
|
||||
func renderSequenceNode(node *yaml.Node, indent string) (result string) {
|
||||
result = "[\n"
|
||||
innerIndent := indent + indentation
|
||||
for i := 0; i < len(node.Content); i++ {
|
||||
item := node.Content[i]
|
||||
switch item.Kind {
|
||||
case yaml.ScalarNode:
|
||||
result += innerIndent + "\"" + item.Value + "\""
|
||||
case yaml.MappingNode:
|
||||
result += innerIndent + renderMappingNode(item, innerIndent) + ""
|
||||
default:
|
||||
result += innerIndent + fmt.Sprintf("???ArrayItem(%+v)", item)
|
||||
}
|
||||
if i < len(node.Content)-1 {
|
||||
result += ","
|
||||
}
|
||||
result += "\n"
|
||||
}
|
||||
result += indent + "]"
|
||||
return result
|
||||
}
|
||||
|
||||
func renderStringArray(array []string, indent string) (result string) {
|
||||
result = "[\n"
|
||||
innerIndent := indent + indentation
|
||||
for i, item := range array {
|
||||
result += innerIndent + "\"" + item + "\""
|
||||
if i < len(array)-1 {
|
||||
result += ","
|
||||
}
|
||||
result += "\n"
|
||||
}
|
||||
result += indent + "]"
|
||||
return result
|
||||
}
|
||||
|
||||
// Render renders a yaml.Node as JSON
|
||||
func Render(node *yaml.Node) string {
|
||||
if node.Kind == yaml.DocumentNode {
|
||||
if len(node.Content) == 1 {
|
||||
return Render(node.Content[0])
|
||||
}
|
||||
} else if node.Kind == yaml.MappingNode {
|
||||
return renderMappingNode(node, "") + "\n"
|
||||
} else if node.Kind == yaml.SequenceNode {
|
||||
return renderSequenceNode(node, "") + "\n"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (object *SchemaNumber) nodeValue() *yaml.Node {
|
||||
if object.Integer != nil {
|
||||
return nodeForInt64(*object.Integer)
|
||||
} else if object.Float != nil {
|
||||
return nodeForFloat64(*object.Float)
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (object *SchemaOrBoolean) nodeValue() *yaml.Node {
|
||||
if object.Schema != nil {
|
||||
return object.Schema.nodeValue()
|
||||
} else if object.Boolean != nil {
|
||||
return nodeForBoolean(*object.Boolean)
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func nodeForStringArray(array []string) *yaml.Node {
|
||||
content := make([]*yaml.Node, 0)
|
||||
for _, item := range array {
|
||||
content = append(content, nodeForString(item))
|
||||
}
|
||||
return nodeForSequence(content)
|
||||
}
|
||||
|
||||
func nodeForSchemaArray(array []*Schema) *yaml.Node {
|
||||
content := make([]*yaml.Node, 0)
|
||||
for _, item := range array {
|
||||
content = append(content, item.nodeValue())
|
||||
}
|
||||
return nodeForSequence(content)
|
||||
}
|
||||
|
||||
func (object *StringOrStringArray) nodeValue() *yaml.Node {
|
||||
if object.String != nil {
|
||||
return nodeForString(*object.String)
|
||||
} else if object.StringArray != nil {
|
||||
return nodeForStringArray(*(object.StringArray))
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (object *SchemaOrStringArray) nodeValue() *yaml.Node {
|
||||
if object.Schema != nil {
|
||||
return object.Schema.nodeValue()
|
||||
} else if object.StringArray != nil {
|
||||
return nodeForStringArray(*(object.StringArray))
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (object *SchemaOrSchemaArray) nodeValue() *yaml.Node {
|
||||
if object.Schema != nil {
|
||||
return object.Schema.nodeValue()
|
||||
} else if object.SchemaArray != nil {
|
||||
return nodeForSchemaArray(*(object.SchemaArray))
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (object *SchemaEnumValue) nodeValue() *yaml.Node {
|
||||
if object.String != nil {
|
||||
return nodeForString(*object.String)
|
||||
} else if object.Bool != nil {
|
||||
return nodeForBoolean(*object.Bool)
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func nodeForNamedSchemaArray(array *[]*NamedSchema) *yaml.Node {
|
||||
content := make([]*yaml.Node, 0)
|
||||
for _, pair := range *(array) {
|
||||
content = appendPair(content, pair.Name, pair.Value.nodeValue())
|
||||
}
|
||||
return nodeForMapping(content)
|
||||
}
|
||||
|
||||
func nodeForNamedSchemaOrStringArray(array *[]*NamedSchemaOrStringArray) *yaml.Node {
|
||||
content := make([]*yaml.Node, 0)
|
||||
for _, pair := range *(array) {
|
||||
content = appendPair(content, pair.Name, pair.Value.nodeValue())
|
||||
}
|
||||
return nodeForMapping(content)
|
||||
}
|
||||
|
||||
func nodeForSchemaEnumArray(array *[]SchemaEnumValue) *yaml.Node {
|
||||
content := make([]*yaml.Node, 0)
|
||||
for _, item := range *array {
|
||||
content = append(content, item.nodeValue())
|
||||
}
|
||||
return nodeForSequence(content)
|
||||
}
|
||||
|
||||
func nodeForMapping(content []*yaml.Node) *yaml.Node {
|
||||
return &yaml.Node{
|
||||
Kind: yaml.MappingNode,
|
||||
Content: content,
|
||||
}
|
||||
}
|
||||
|
||||
func nodeForSequence(content []*yaml.Node) *yaml.Node {
|
||||
return &yaml.Node{
|
||||
Kind: yaml.SequenceNode,
|
||||
Content: content,
|
||||
}
|
||||
}
|
||||
|
||||
func nodeForString(value string) *yaml.Node {
|
||||
return &yaml.Node{
|
||||
Kind: yaml.ScalarNode,
|
||||
Tag: "!!str",
|
||||
Value: value,
|
||||
}
|
||||
}
|
||||
|
||||
func nodeForBoolean(value bool) *yaml.Node {
|
||||
return &yaml.Node{
|
||||
Kind: yaml.ScalarNode,
|
||||
Tag: "!!bool",
|
||||
Value: fmt.Sprintf("%t", value),
|
||||
}
|
||||
}
|
||||
|
||||
func nodeForInt64(value int64) *yaml.Node {
|
||||
return &yaml.Node{
|
||||
Kind: yaml.ScalarNode,
|
||||
Tag: "!!int",
|
||||
Value: fmt.Sprintf("%d", value),
|
||||
}
|
||||
}
|
||||
|
||||
func nodeForFloat64(value float64) *yaml.Node {
|
||||
return &yaml.Node{
|
||||
Kind: yaml.ScalarNode,
|
||||
Tag: "!!float",
|
||||
Value: fmt.Sprintf("%f", value),
|
||||
}
|
||||
}
|
||||
|
||||
func appendPair(nodes []*yaml.Node, name string, value *yaml.Node) []*yaml.Node {
|
||||
nodes = append(nodes, nodeForString(name))
|
||||
nodes = append(nodes, value)
|
||||
return nodes
|
||||
}
|
||||
|
||||
func (schema *Schema) nodeValue() *yaml.Node {
|
||||
n := &yaml.Node{Kind: yaml.MappingNode}
|
||||
content := make([]*yaml.Node, 0)
|
||||
if schema.Title != nil {
|
||||
content = appendPair(content, "title", nodeForString(*schema.Title))
|
||||
}
|
||||
if schema.ID != nil {
|
||||
content = appendPair(content, "id", nodeForString(*schema.ID))
|
||||
}
|
||||
if schema.Schema != nil {
|
||||
content = appendPair(content, "$schema", nodeForString(*schema.Schema))
|
||||
}
|
||||
if schema.Type != nil {
|
||||
content = appendPair(content, "type", schema.Type.nodeValue())
|
||||
}
|
||||
if schema.Items != nil {
|
||||
content = appendPair(content, "items", schema.Items.nodeValue())
|
||||
}
|
||||
if schema.Description != nil {
|
||||
content = appendPair(content, "description", nodeForString(*schema.Description))
|
||||
}
|
||||
if schema.Required != nil {
|
||||
content = appendPair(content, "required", nodeForStringArray(*schema.Required))
|
||||
}
|
||||
if schema.AdditionalProperties != nil {
|
||||
content = appendPair(content, "additionalProperties", schema.AdditionalProperties.nodeValue())
|
||||
}
|
||||
if schema.PatternProperties != nil {
|
||||
content = appendPair(content, "patternProperties", nodeForNamedSchemaArray(schema.PatternProperties))
|
||||
}
|
||||
if schema.Properties != nil {
|
||||
content = appendPair(content, "properties", nodeForNamedSchemaArray(schema.Properties))
|
||||
}
|
||||
if schema.Dependencies != nil {
|
||||
content = appendPair(content, "dependencies", nodeForNamedSchemaOrStringArray(schema.Dependencies))
|
||||
}
|
||||
if schema.Ref != nil {
|
||||
content = appendPair(content, "$ref", nodeForString(*schema.Ref))
|
||||
}
|
||||
if schema.MultipleOf != nil {
|
||||
content = appendPair(content, "multipleOf", schema.MultipleOf.nodeValue())
|
||||
}
|
||||
if schema.Maximum != nil {
|
||||
content = appendPair(content, "maximum", schema.Maximum.nodeValue())
|
||||
}
|
||||
if schema.ExclusiveMaximum != nil {
|
||||
content = appendPair(content, "exclusiveMaximum", nodeForBoolean(*schema.ExclusiveMaximum))
|
||||
}
|
||||
if schema.Minimum != nil {
|
||||
content = appendPair(content, "minimum", schema.Minimum.nodeValue())
|
||||
}
|
||||
if schema.ExclusiveMinimum != nil {
|
||||
content = appendPair(content, "exclusiveMinimum", nodeForBoolean(*schema.ExclusiveMinimum))
|
||||
}
|
||||
if schema.MaxLength != nil {
|
||||
content = appendPair(content, "maxLength", nodeForInt64(*schema.MaxLength))
|
||||
}
|
||||
if schema.MinLength != nil {
|
||||
content = appendPair(content, "minLength", nodeForInt64(*schema.MinLength))
|
||||
}
|
||||
if schema.Pattern != nil {
|
||||
content = appendPair(content, "pattern", nodeForString(*schema.Pattern))
|
||||
}
|
||||
if schema.AdditionalItems != nil {
|
||||
content = appendPair(content, "additionalItems", schema.AdditionalItems.nodeValue())
|
||||
}
|
||||
if schema.MaxItems != nil {
|
||||
content = appendPair(content, "maxItems", nodeForInt64(*schema.MaxItems))
|
||||
}
|
||||
if schema.MinItems != nil {
|
||||
content = appendPair(content, "minItems", nodeForInt64(*schema.MinItems))
|
||||
}
|
||||
if schema.UniqueItems != nil {
|
||||
content = appendPair(content, "uniqueItems", nodeForBoolean(*schema.UniqueItems))
|
||||
}
|
||||
if schema.MaxProperties != nil {
|
||||
content = appendPair(content, "maxProperties", nodeForInt64(*schema.MaxProperties))
|
||||
}
|
||||
if schema.MinProperties != nil {
|
||||
content = appendPair(content, "minProperties", nodeForInt64(*schema.MinProperties))
|
||||
}
|
||||
if schema.Enumeration != nil {
|
||||
content = appendPair(content, "enum", nodeForSchemaEnumArray(schema.Enumeration))
|
||||
}
|
||||
if schema.AllOf != nil {
|
||||
content = appendPair(content, "allOf", nodeForSchemaArray(*schema.AllOf))
|
||||
}
|
||||
if schema.AnyOf != nil {
|
||||
content = appendPair(content, "anyOf", nodeForSchemaArray(*schema.AnyOf))
|
||||
}
|
||||
if schema.OneOf != nil {
|
||||
content = appendPair(content, "oneOf", nodeForSchemaArray(*schema.OneOf))
|
||||
}
|
||||
if schema.Not != nil {
|
||||
content = appendPair(content, "not", schema.Not.nodeValue())
|
||||
}
|
||||
if schema.Definitions != nil {
|
||||
content = appendPair(content, "definitions", nodeForNamedSchemaArray(schema.Definitions))
|
||||
}
|
||||
if schema.Default != nil {
|
||||
// m = append(m, yaml.MapItem{Key: "default", Value: *schema.Default})
|
||||
}
|
||||
if schema.Format != nil {
|
||||
content = appendPair(content, "format", nodeForString(*schema.Format))
|
||||
}
|
||||
n.Content = content
|
||||
return n
|
||||
}
|
||||
|
||||
// JSONString returns a json representation of a schema.
|
||||
func (schema *Schema) JSONString() string {
|
||||
node := schema.nodeValue()
|
||||
return Render(node)
|
||||
}
|
8820
vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go
generated
vendored
Normal file
8820
vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
7342
vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.pb.go
generated
vendored
Normal file
7342
vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
666
vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.proto
generated
vendored
Normal file
666
vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.proto
generated
vendored
Normal file
@ -0,0 +1,666 @@
|
||||
// Copyright 2020 Google LLC. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// THIS FILE IS AUTOMATICALLY GENERATED.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package openapi.v2;
|
||||
|
||||
import "google/protobuf/any.proto";
|
||||
|
||||
// This option lets the proto compiler generate Java code inside the package
|
||||
// name (see below) instead of inside an outer class. It creates a simpler
|
||||
// developer experience by reducing one-level of name nesting and be
|
||||
// consistent with most programming languages that don't support outer classes.
|
||||
option java_multiple_files = true;
|
||||
|
||||
// The Java outer classname should be the filename in UpperCamelCase. This
|
||||
// class is only used to hold proto descriptor, so developers don't need to
|
||||
// work with it directly.
|
||||
option java_outer_classname = "OpenAPIProto";
|
||||
|
||||
// The Java package name must be proto package name with proper prefix.
|
||||
option java_package = "org.openapi_v2";
|
||||
|
||||
// A reasonable prefix for the Objective-C symbols generated from the package.
|
||||
// It should at a minimum be 3 characters long, all uppercase, and convention
|
||||
// is to use an abbreviation of the package name. Something short, but
|
||||
// hopefully unique enough to not conflict with things that may come along in
|
||||
// the future. 'GPB' is reserved for the protocol buffer implementation itself.
|
||||
option objc_class_prefix = "OAS";
|
||||
|
||||
// The Go package name.
|
||||
option go_package = "./openapiv2;openapi_v2";
|
||||
|
||||
message AdditionalPropertiesItem {
|
||||
oneof oneof {
|
||||
Schema schema = 1;
|
||||
bool boolean = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message Any {
|
||||
google.protobuf.Any value = 1;
|
||||
string yaml = 2;
|
||||
}
|
||||
|
||||
message ApiKeySecurity {
|
||||
string type = 1;
|
||||
string name = 2;
|
||||
string in = 3;
|
||||
string description = 4;
|
||||
repeated NamedAny vendor_extension = 5;
|
||||
}
|
||||
|
||||
message BasicAuthenticationSecurity {
|
||||
string type = 1;
|
||||
string description = 2;
|
||||
repeated NamedAny vendor_extension = 3;
|
||||
}
|
||||
|
||||
message BodyParameter {
|
||||
// A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed.
|
||||
string description = 1;
|
||||
// The name of the parameter.
|
||||
string name = 2;
|
||||
// Determines the location of the parameter.
|
||||
string in = 3;
|
||||
// Determines whether or not this parameter is required or optional.
|
||||
bool required = 4;
|
||||
Schema schema = 5;
|
||||
repeated NamedAny vendor_extension = 6;
|
||||
}
|
||||
|
||||
// Contact information for the owners of the API.
|
||||
message Contact {
|
||||
// The identifying name of the contact person/organization.
|
||||
string name = 1;
|
||||
// The URL pointing to the contact information.
|
||||
string url = 2;
|
||||
// The email address of the contact person/organization.
|
||||
string email = 3;
|
||||
repeated NamedAny vendor_extension = 4;
|
||||
}
|
||||
|
||||
message Default {
|
||||
repeated NamedAny additional_properties = 1;
|
||||
}
|
||||
|
||||
// One or more JSON objects describing the schemas being consumed and produced by the API.
|
||||
message Definitions {
|
||||
repeated NamedSchema additional_properties = 1;
|
||||
}
|
||||
|
||||
message Document {
|
||||
// The Swagger version of this document.
|
||||
string swagger = 1;
|
||||
Info info = 2;
|
||||
// The host (name or ip) of the API. Example: 'swagger.io'
|
||||
string host = 3;
|
||||
// The base path to the API. Example: '/api'.
|
||||
string base_path = 4;
|
||||
// The transfer protocol of the API.
|
||||
repeated string schemes = 5;
|
||||
// A list of MIME types accepted by the API.
|
||||
repeated string consumes = 6;
|
||||
// A list of MIME types the API can produce.
|
||||
repeated string produces = 7;
|
||||
Paths paths = 8;
|
||||
Definitions definitions = 9;
|
||||
ParameterDefinitions parameters = 10;
|
||||
ResponseDefinitions responses = 11;
|
||||
repeated SecurityRequirement security = 12;
|
||||
SecurityDefinitions security_definitions = 13;
|
||||
repeated Tag tags = 14;
|
||||
ExternalDocs external_docs = 15;
|
||||
repeated NamedAny vendor_extension = 16;
|
||||
}
|
||||
|
||||
message Examples {
|
||||
repeated NamedAny additional_properties = 1;
|
||||
}
|
||||
|
||||
// information about external documentation
|
||||
message ExternalDocs {
|
||||
string description = 1;
|
||||
string url = 2;
|
||||
repeated NamedAny vendor_extension = 3;
|
||||
}
|
||||
|
||||
// A deterministic version of a JSON Schema object.
|
||||
message FileSchema {
|
||||
string format = 1;
|
||||
string title = 2;
|
||||
string description = 3;
|
||||
Any default = 4;
|
||||
repeated string required = 5;
|
||||
string type = 6;
|
||||
bool read_only = 7;
|
||||
ExternalDocs external_docs = 8;
|
||||
Any example = 9;
|
||||
repeated NamedAny vendor_extension = 10;
|
||||
}
|
||||
|
||||
message FormDataParameterSubSchema {
|
||||
// Determines whether or not this parameter is required or optional.
|
||||
bool required = 1;
|
||||
// Determines the location of the parameter.
|
||||
string in = 2;
|
||||
// A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed.
|
||||
string description = 3;
|
||||
// The name of the parameter.
|
||||
string name = 4;
|
||||
// allows sending a parameter by name only or with an empty value.
|
||||
bool allow_empty_value = 5;
|
||||
string type = 6;
|
||||
string format = 7;
|
||||
PrimitivesItems items = 8;
|
||||
string collection_format = 9;
|
||||
Any default = 10;
|
||||
double maximum = 11;
|
||||
bool exclusive_maximum = 12;
|
||||
double minimum = 13;
|
||||
bool exclusive_minimum = 14;
|
||||
int64 max_length = 15;
|
||||
int64 min_length = 16;
|
||||
string pattern = 17;
|
||||
int64 max_items = 18;
|
||||
int64 min_items = 19;
|
||||
bool unique_items = 20;
|
||||
repeated Any enum = 21;
|
||||
double multiple_of = 22;
|
||||
repeated NamedAny vendor_extension = 23;
|
||||
}
|
||||
|
||||
message Header {
|
||||
string type = 1;
|
||||
string format = 2;
|
||||
PrimitivesItems items = 3;
|
||||
string collection_format = 4;
|
||||
Any default = 5;
|
||||
double maximum = 6;
|
||||
bool exclusive_maximum = 7;
|
||||
double minimum = 8;
|
||||
bool exclusive_minimum = 9;
|
||||
int64 max_length = 10;
|
||||
int64 min_length = 11;
|
||||
string pattern = 12;
|
||||
int64 max_items = 13;
|
||||
int64 min_items = 14;
|
||||
bool unique_items = 15;
|
||||
repeated Any enum = 16;
|
||||
double multiple_of = 17;
|
||||
string description = 18;
|
||||
repeated NamedAny vendor_extension = 19;
|
||||
}
|
||||
|
||||
message HeaderParameterSubSchema {
|
||||
// Determines whether or not this parameter is required or optional.
|
||||
bool required = 1;
|
||||
// Determines the location of the parameter.
|
||||
string in = 2;
|
||||
// A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed.
|
||||
string description = 3;
|
||||
// The name of the parameter.
|
||||
string name = 4;
|
||||
string type = 5;
|
||||
string format = 6;
|
||||
PrimitivesItems items = 7;
|
||||
string collection_format = 8;
|
||||
Any default = 9;
|
||||
double maximum = 10;
|
||||
bool exclusive_maximum = 11;
|
||||
double minimum = 12;
|
||||
bool exclusive_minimum = 13;
|
||||
int64 max_length = 14;
|
||||
int64 min_length = 15;
|
||||
string pattern = 16;
|
||||
int64 max_items = 17;
|
||||
int64 min_items = 18;
|
||||
bool unique_items = 19;
|
||||
repeated Any enum = 20;
|
||||
double multiple_of = 21;
|
||||
repeated NamedAny vendor_extension = 22;
|
||||
}
|
||||
|
||||
message Headers {
|
||||
repeated NamedHeader additional_properties = 1;
|
||||
}
|
||||
|
||||
// General information about the API.
|
||||
message Info {
|
||||
// A unique and precise title of the API.
|
||||
string title = 1;
|
||||
// A semantic version number of the API.
|
||||
string version = 2;
|
||||
// A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed.
|
||||
string description = 3;
|
||||
// The terms of service for the API.
|
||||
string terms_of_service = 4;
|
||||
Contact contact = 5;
|
||||
License license = 6;
|
||||
repeated NamedAny vendor_extension = 7;
|
||||
}
|
||||
|
||||
message ItemsItem {
|
||||
repeated Schema schema = 1;
|
||||
}
|
||||
|
||||
message JsonReference {
|
||||
string _ref = 1;
|
||||
string description = 2;
|
||||
}
|
||||
|
||||
message License {
|
||||
// The name of the license type. It's encouraged to use an OSI compatible license.
|
||||
string name = 1;
|
||||
// The URL pointing to the license.
|
||||
string url = 2;
|
||||
repeated NamedAny vendor_extension = 3;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs.
|
||||
message NamedAny {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
Any value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs.
|
||||
message NamedHeader {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
Header value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs.
|
||||
message NamedParameter {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
Parameter value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs.
|
||||
message NamedPathItem {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
PathItem value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs.
|
||||
message NamedResponse {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
Response value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs.
|
||||
message NamedResponseValue {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
ResponseValue value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs.
|
||||
message NamedSchema {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
Schema value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs.
|
||||
message NamedSecurityDefinitionsItem {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
SecurityDefinitionsItem value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of string as ordered (name,value) pairs.
|
||||
message NamedString {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
string value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs.
|
||||
message NamedStringArray {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
StringArray value = 2;
|
||||
}
|
||||
|
||||
message NonBodyParameter {
|
||||
oneof oneof {
|
||||
HeaderParameterSubSchema header_parameter_sub_schema = 1;
|
||||
FormDataParameterSubSchema form_data_parameter_sub_schema = 2;
|
||||
QueryParameterSubSchema query_parameter_sub_schema = 3;
|
||||
PathParameterSubSchema path_parameter_sub_schema = 4;
|
||||
}
|
||||
}
|
||||
|
||||
message Oauth2AccessCodeSecurity {
|
||||
string type = 1;
|
||||
string flow = 2;
|
||||
Oauth2Scopes scopes = 3;
|
||||
string authorization_url = 4;
|
||||
string token_url = 5;
|
||||
string description = 6;
|
||||
repeated NamedAny vendor_extension = 7;
|
||||
}
|
||||
|
||||
message Oauth2ApplicationSecurity {
|
||||
string type = 1;
|
||||
string flow = 2;
|
||||
Oauth2Scopes scopes = 3;
|
||||
string token_url = 4;
|
||||
string description = 5;
|
||||
repeated NamedAny vendor_extension = 6;
|
||||
}
|
||||
|
||||
message Oauth2ImplicitSecurity {
|
||||
string type = 1;
|
||||
string flow = 2;
|
||||
Oauth2Scopes scopes = 3;
|
||||
string authorization_url = 4;
|
||||
string description = 5;
|
||||
repeated NamedAny vendor_extension = 6;
|
||||
}
|
||||
|
||||
message Oauth2PasswordSecurity {
|
||||
string type = 1;
|
||||
string flow = 2;
|
||||
Oauth2Scopes scopes = 3;
|
||||
string token_url = 4;
|
||||
string description = 5;
|
||||
repeated NamedAny vendor_extension = 6;
|
||||
}
|
||||
|
||||
message Oauth2Scopes {
|
||||
repeated NamedString additional_properties = 1;
|
||||
}
|
||||
|
||||
message Operation {
|
||||
repeated string tags = 1;
|
||||
// A brief summary of the operation.
|
||||
string summary = 2;
|
||||
// A longer description of the operation, GitHub Flavored Markdown is allowed.
|
||||
string description = 3;
|
||||
ExternalDocs external_docs = 4;
|
||||
// A unique identifier of the operation.
|
||||
string operation_id = 5;
|
||||
// A list of MIME types the API can produce.
|
||||
repeated string produces = 6;
|
||||
// A list of MIME types the API can consume.
|
||||
repeated string consumes = 7;
|
||||
// The parameters needed to send a valid API call.
|
||||
repeated ParametersItem parameters = 8;
|
||||
Responses responses = 9;
|
||||
// The transfer protocol of the API.
|
||||
repeated string schemes = 10;
|
||||
bool deprecated = 11;
|
||||
repeated SecurityRequirement security = 12;
|
||||
repeated NamedAny vendor_extension = 13;
|
||||
}
|
||||
|
||||
message Parameter {
|
||||
oneof oneof {
|
||||
BodyParameter body_parameter = 1;
|
||||
NonBodyParameter non_body_parameter = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// One or more JSON representations for parameters
|
||||
message ParameterDefinitions {
|
||||
repeated NamedParameter additional_properties = 1;
|
||||
}
|
||||
|
||||
message ParametersItem {
|
||||
oneof oneof {
|
||||
Parameter parameter = 1;
|
||||
JsonReference json_reference = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message PathItem {
|
||||
string _ref = 1;
|
||||
Operation get = 2;
|
||||
Operation put = 3;
|
||||
Operation post = 4;
|
||||
Operation delete = 5;
|
||||
Operation options = 6;
|
||||
Operation head = 7;
|
||||
Operation patch = 8;
|
||||
// The parameters needed to send a valid API call.
|
||||
repeated ParametersItem parameters = 9;
|
||||
repeated NamedAny vendor_extension = 10;
|
||||
}
|
||||
|
||||
message PathParameterSubSchema {
|
||||
// Determines whether or not this parameter is required or optional.
|
||||
bool required = 1;
|
||||
// Determines the location of the parameter.
|
||||
string in = 2;
|
||||
// A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed.
|
||||
string description = 3;
|
||||
// The name of the parameter.
|
||||
string name = 4;
|
||||
string type = 5;
|
||||
string format = 6;
|
||||
PrimitivesItems items = 7;
|
||||
string collection_format = 8;
|
||||
Any default = 9;
|
||||
double maximum = 10;
|
||||
bool exclusive_maximum = 11;
|
||||
double minimum = 12;
|
||||
bool exclusive_minimum = 13;
|
||||
int64 max_length = 14;
|
||||
int64 min_length = 15;
|
||||
string pattern = 16;
|
||||
int64 max_items = 17;
|
||||
int64 min_items = 18;
|
||||
bool unique_items = 19;
|
||||
repeated Any enum = 20;
|
||||
double multiple_of = 21;
|
||||
repeated NamedAny vendor_extension = 22;
|
||||
}
|
||||
|
||||
// Relative paths to the individual endpoints. They must be relative to the 'basePath'.
|
||||
message Paths {
|
||||
repeated NamedAny vendor_extension = 1;
|
||||
repeated NamedPathItem path = 2;
|
||||
}
|
||||
|
||||
message PrimitivesItems {
|
||||
string type = 1;
|
||||
string format = 2;
|
||||
PrimitivesItems items = 3;
|
||||
string collection_format = 4;
|
||||
Any default = 5;
|
||||
double maximum = 6;
|
||||
bool exclusive_maximum = 7;
|
||||
double minimum = 8;
|
||||
bool exclusive_minimum = 9;
|
||||
int64 max_length = 10;
|
||||
int64 min_length = 11;
|
||||
string pattern = 12;
|
||||
int64 max_items = 13;
|
||||
int64 min_items = 14;
|
||||
bool unique_items = 15;
|
||||
repeated Any enum = 16;
|
||||
double multiple_of = 17;
|
||||
repeated NamedAny vendor_extension = 18;
|
||||
}
|
||||
|
||||
message Properties {
|
||||
repeated NamedSchema additional_properties = 1;
|
||||
}
|
||||
|
||||
message QueryParameterSubSchema {
|
||||
// Determines whether or not this parameter is required or optional.
|
||||
bool required = 1;
|
||||
// Determines the location of the parameter.
|
||||
string in = 2;
|
||||
// A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed.
|
||||
string description = 3;
|
||||
// The name of the parameter.
|
||||
string name = 4;
|
||||
// allows sending a parameter by name only or with an empty value.
|
||||
bool allow_empty_value = 5;
|
||||
string type = 6;
|
||||
string format = 7;
|
||||
PrimitivesItems items = 8;
|
||||
string collection_format = 9;
|
||||
Any default = 10;
|
||||
double maximum = 11;
|
||||
bool exclusive_maximum = 12;
|
||||
double minimum = 13;
|
||||
bool exclusive_minimum = 14;
|
||||
int64 max_length = 15;
|
||||
int64 min_length = 16;
|
||||
string pattern = 17;
|
||||
int64 max_items = 18;
|
||||
int64 min_items = 19;
|
||||
bool unique_items = 20;
|
||||
repeated Any enum = 21;
|
||||
double multiple_of = 22;
|
||||
repeated NamedAny vendor_extension = 23;
|
||||
}
|
||||
|
||||
message Response {
|
||||
string description = 1;
|
||||
SchemaItem schema = 2;
|
||||
Headers headers = 3;
|
||||
Examples examples = 4;
|
||||
repeated NamedAny vendor_extension = 5;
|
||||
}
|
||||
|
||||
// One or more JSON representations for responses
|
||||
message ResponseDefinitions {
|
||||
repeated NamedResponse additional_properties = 1;
|
||||
}
|
||||
|
||||
message ResponseValue {
|
||||
oneof oneof {
|
||||
Response response = 1;
|
||||
JsonReference json_reference = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// Response objects names can either be any valid HTTP status code or 'default'.
|
||||
message Responses {
|
||||
repeated NamedResponseValue response_code = 1;
|
||||
repeated NamedAny vendor_extension = 2;
|
||||
}
|
||||
|
||||
// A deterministic version of a JSON Schema object.
|
||||
message Schema {
|
||||
string _ref = 1;
|
||||
string format = 2;
|
||||
string title = 3;
|
||||
string description = 4;
|
||||
Any default = 5;
|
||||
double multiple_of = 6;
|
||||
double maximum = 7;
|
||||
bool exclusive_maximum = 8;
|
||||
double minimum = 9;
|
||||
bool exclusive_minimum = 10;
|
||||
int64 max_length = 11;
|
||||
int64 min_length = 12;
|
||||
string pattern = 13;
|
||||
int64 max_items = 14;
|
||||
int64 min_items = 15;
|
||||
bool unique_items = 16;
|
||||
int64 max_properties = 17;
|
||||
int64 min_properties = 18;
|
||||
repeated string required = 19;
|
||||
repeated Any enum = 20;
|
||||
AdditionalPropertiesItem additional_properties = 21;
|
||||
TypeItem type = 22;
|
||||
ItemsItem items = 23;
|
||||
repeated Schema all_of = 24;
|
||||
Properties properties = 25;
|
||||
string discriminator = 26;
|
||||
bool read_only = 27;
|
||||
Xml xml = 28;
|
||||
ExternalDocs external_docs = 29;
|
||||
Any example = 30;
|
||||
repeated NamedAny vendor_extension = 31;
|
||||
}
|
||||
|
||||
message SchemaItem {
|
||||
oneof oneof {
|
||||
Schema schema = 1;
|
||||
FileSchema file_schema = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message SecurityDefinitions {
|
||||
repeated NamedSecurityDefinitionsItem additional_properties = 1;
|
||||
}
|
||||
|
||||
message SecurityDefinitionsItem {
|
||||
oneof oneof {
|
||||
BasicAuthenticationSecurity basic_authentication_security = 1;
|
||||
ApiKeySecurity api_key_security = 2;
|
||||
Oauth2ImplicitSecurity oauth2_implicit_security = 3;
|
||||
Oauth2PasswordSecurity oauth2_password_security = 4;
|
||||
Oauth2ApplicationSecurity oauth2_application_security = 5;
|
||||
Oauth2AccessCodeSecurity oauth2_access_code_security = 6;
|
||||
}
|
||||
}
|
||||
|
||||
message SecurityRequirement {
|
||||
repeated NamedStringArray additional_properties = 1;
|
||||
}
|
||||
|
||||
message StringArray {
|
||||
repeated string value = 1;
|
||||
}
|
||||
|
||||
message Tag {
|
||||
string name = 1;
|
||||
string description = 2;
|
||||
ExternalDocs external_docs = 3;
|
||||
repeated NamedAny vendor_extension = 4;
|
||||
}
|
||||
|
||||
message TypeItem {
|
||||
repeated string value = 1;
|
||||
}
|
||||
|
||||
// Any property starting with x- is valid.
|
||||
message VendorExtension {
|
||||
repeated NamedAny additional_properties = 1;
|
||||
}
|
||||
|
||||
message Xml {
|
||||
string name = 1;
|
||||
string namespace = 2;
|
||||
string prefix = 3;
|
||||
bool attribute = 4;
|
||||
bool wrapped = 5;
|
||||
repeated NamedAny vendor_extension = 6;
|
||||
}
|
||||
|
14
vendor/github.com/google/gnostic/openapiv2/README.md
generated
vendored
Normal file
14
vendor/github.com/google/gnostic/openapiv2/README.md
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
# OpenAPI v2 Protocol Buffer Models
|
||||
|
||||
This directory contains a Protocol Buffer-language model and related code for
|
||||
supporting OpenAPI v2.
|
||||
|
||||
Gnostic applications and plugins can use OpenAPIv2.proto to generate Protocol
|
||||
Buffer support code for their preferred languages.
|
||||
|
||||
OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI descriptions into
|
||||
the Protocol Buffer-based datastructures generated from OpenAPIv2.proto.
|
||||
|
||||
OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic compiler
|
||||
generator, and OpenAPIv2.pb.go is generated by protoc, the Protocol Buffer
|
||||
compiler, and protoc-gen-go, the Protocol Buffer Go code generation plugin.
|
42
vendor/github.com/google/gnostic/openapiv2/document.go
generated
vendored
Normal file
42
vendor/github.com/google/gnostic/openapiv2/document.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
// Copyright 2020 Google LLC. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package openapi_v2
|
||||
|
||||
import (
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"github.com/google/gnostic/compiler"
|
||||
)
|
||||
|
||||
// ParseDocument reads an OpenAPI v2 description from a YAML/JSON representation.
|
||||
func ParseDocument(b []byte) (*Document, error) {
|
||||
info, err := compiler.ReadInfoFromBytes("", b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
root := info.Content[0]
|
||||
return NewDocument(root, compiler.NewContextWithExtensions("$root", root, nil, nil))
|
||||
}
|
||||
|
||||
// YAMLValue produces a serialized YAML representation of the document.
|
||||
func (d *Document) YAMLValue(comment string) ([]byte, error) {
|
||||
rawInfo := d.ToRawInfo()
|
||||
rawInfo = &yaml.Node{
|
||||
Kind: yaml.DocumentNode,
|
||||
Content: []*yaml.Node{rawInfo},
|
||||
HeadComment: comment,
|
||||
}
|
||||
return yaml.Marshal(rawInfo)
|
||||
}
|
1610
vendor/github.com/google/gnostic/openapiv2/openapi-2.0.json
generated
vendored
Normal file
1610
vendor/github.com/google/gnostic/openapiv2/openapi-2.0.json
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
8633
vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go
generated
vendored
Normal file
8633
vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
8053
vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go
generated
vendored
Normal file
8053
vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
672
vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto
generated
vendored
Normal file
672
vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto
generated
vendored
Normal file
@ -0,0 +1,672 @@
|
||||
// Copyright 2020 Google LLC. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// THIS FILE IS AUTOMATICALLY GENERATED.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package openapi.v3;
|
||||
|
||||
import "google/protobuf/any.proto";
|
||||
|
||||
// This option lets the proto compiler generate Java code inside the package
|
||||
// name (see below) instead of inside an outer class. It creates a simpler
|
||||
// developer experience by reducing one-level of name nesting and be
|
||||
// consistent with most programming languages that don't support outer classes.
|
||||
option java_multiple_files = true;
|
||||
|
||||
// The Java outer classname should be the filename in UpperCamelCase. This
|
||||
// class is only used to hold proto descriptor, so developers don't need to
|
||||
// work with it directly.
|
||||
option java_outer_classname = "OpenAPIProto";
|
||||
|
||||
// The Java package name must be proto package name with proper prefix.
|
||||
option java_package = "org.openapi_v3";
|
||||
|
||||
// A reasonable prefix for the Objective-C symbols generated from the package.
|
||||
// It should at a minimum be 3 characters long, all uppercase, and convention
|
||||
// is to use an abbreviation of the package name. Something short, but
|
||||
// hopefully unique enough to not conflict with things that may come along in
|
||||
// the future. 'GPB' is reserved for the protocol buffer implementation itself.
|
||||
option objc_class_prefix = "OAS";
|
||||
|
||||
// The Go package name.
|
||||
option go_package = "./openapiv3;openapi_v3";
|
||||
|
||||
message AdditionalPropertiesItem {
|
||||
oneof oneof {
|
||||
SchemaOrReference schema_or_reference = 1;
|
||||
bool boolean = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message Any {
|
||||
google.protobuf.Any value = 1;
|
||||
string yaml = 2;
|
||||
}
|
||||
|
||||
message AnyOrExpression {
|
||||
oneof oneof {
|
||||
Any any = 1;
|
||||
Expression expression = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// A map of possible out-of band callbacks related to the parent operation. Each value in the map is a Path Item Object that describes a set of requests that may be initiated by the API provider and the expected responses. The key value used to identify the callback object is an expression, evaluated at runtime, that identifies a URL to use for the callback operation.
|
||||
message Callback {
|
||||
repeated NamedPathItem path = 1;
|
||||
repeated NamedAny specification_extension = 2;
|
||||
}
|
||||
|
||||
message CallbackOrReference {
|
||||
oneof oneof {
|
||||
Callback callback = 1;
|
||||
Reference reference = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message CallbacksOrReferences {
|
||||
repeated NamedCallbackOrReference additional_properties = 1;
|
||||
}
|
||||
|
||||
// Holds a set of reusable objects for different aspects of the OAS. All objects defined within the components object will have no effect on the API unless they are explicitly referenced from properties outside the components object.
|
||||
message Components {
|
||||
SchemasOrReferences schemas = 1;
|
||||
ResponsesOrReferences responses = 2;
|
||||
ParametersOrReferences parameters = 3;
|
||||
ExamplesOrReferences examples = 4;
|
||||
RequestBodiesOrReferences request_bodies = 5;
|
||||
HeadersOrReferences headers = 6;
|
||||
SecuritySchemesOrReferences security_schemes = 7;
|
||||
LinksOrReferences links = 8;
|
||||
CallbacksOrReferences callbacks = 9;
|
||||
repeated NamedAny specification_extension = 10;
|
||||
}
|
||||
|
||||
// Contact information for the exposed API.
|
||||
message Contact {
|
||||
string name = 1;
|
||||
string url = 2;
|
||||
string email = 3;
|
||||
repeated NamedAny specification_extension = 4;
|
||||
}
|
||||
|
||||
message DefaultType {
|
||||
oneof oneof {
|
||||
double number = 1;
|
||||
bool boolean = 2;
|
||||
string string = 3;
|
||||
}
|
||||
}
|
||||
|
||||
// When request bodies or response payloads may be one of a number of different schemas, a `discriminator` object can be used to aid in serialization, deserialization, and validation. The discriminator is a specific object in a schema which is used to inform the consumer of the specification of an alternative schema based on the value associated with it. When using the discriminator, _inline_ schemas will not be considered.
|
||||
message Discriminator {
|
||||
string property_name = 1;
|
||||
Strings mapping = 2;
|
||||
repeated NamedAny specification_extension = 3;
|
||||
}
|
||||
|
||||
message Document {
|
||||
string openapi = 1;
|
||||
Info info = 2;
|
||||
repeated Server servers = 3;
|
||||
Paths paths = 4;
|
||||
Components components = 5;
|
||||
repeated SecurityRequirement security = 6;
|
||||
repeated Tag tags = 7;
|
||||
ExternalDocs external_docs = 8;
|
||||
repeated NamedAny specification_extension = 9;
|
||||
}
|
||||
|
||||
// A single encoding definition applied to a single schema property.
|
||||
message Encoding {
|
||||
string content_type = 1;
|
||||
HeadersOrReferences headers = 2;
|
||||
string style = 3;
|
||||
bool explode = 4;
|
||||
bool allow_reserved = 5;
|
||||
repeated NamedAny specification_extension = 6;
|
||||
}
|
||||
|
||||
message Encodings {
|
||||
repeated NamedEncoding additional_properties = 1;
|
||||
}
|
||||
|
||||
message Example {
|
||||
string summary = 1;
|
||||
string description = 2;
|
||||
Any value = 3;
|
||||
string external_value = 4;
|
||||
repeated NamedAny specification_extension = 5;
|
||||
}
|
||||
|
||||
message ExampleOrReference {
|
||||
oneof oneof {
|
||||
Example example = 1;
|
||||
Reference reference = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message ExamplesOrReferences {
|
||||
repeated NamedExampleOrReference additional_properties = 1;
|
||||
}
|
||||
|
||||
message Expression {
|
||||
repeated NamedAny additional_properties = 1;
|
||||
}
|
||||
|
||||
// Allows referencing an external resource for extended documentation.
|
||||
message ExternalDocs {
|
||||
string description = 1;
|
||||
string url = 2;
|
||||
repeated NamedAny specification_extension = 3;
|
||||
}
|
||||
|
||||
// The Header Object follows the structure of the Parameter Object with the following changes: 1. `name` MUST NOT be specified, it is given in the corresponding `headers` map. 1. `in` MUST NOT be specified, it is implicitly in `header`. 1. All traits that are affected by the location MUST be applicable to a location of `header` (for example, `style`).
|
||||
message Header {
|
||||
string description = 1;
|
||||
bool required = 2;
|
||||
bool deprecated = 3;
|
||||
bool allow_empty_value = 4;
|
||||
string style = 5;
|
||||
bool explode = 6;
|
||||
bool allow_reserved = 7;
|
||||
SchemaOrReference schema = 8;
|
||||
Any example = 9;
|
||||
ExamplesOrReferences examples = 10;
|
||||
MediaTypes content = 11;
|
||||
repeated NamedAny specification_extension = 12;
|
||||
}
|
||||
|
||||
message HeaderOrReference {
|
||||
oneof oneof {
|
||||
Header header = 1;
|
||||
Reference reference = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message HeadersOrReferences {
|
||||
repeated NamedHeaderOrReference additional_properties = 1;
|
||||
}
|
||||
|
||||
// The object provides metadata about the API. The metadata MAY be used by the clients if needed, and MAY be presented in editing or documentation generation tools for convenience.
|
||||
message Info {
|
||||
string title = 1;
|
||||
string description = 2;
|
||||
string terms_of_service = 3;
|
||||
Contact contact = 4;
|
||||
License license = 5;
|
||||
string version = 6;
|
||||
repeated NamedAny specification_extension = 7;
|
||||
string summary = 8;
|
||||
}
|
||||
|
||||
message ItemsItem {
|
||||
repeated SchemaOrReference schema_or_reference = 1;
|
||||
}
|
||||
|
||||
// License information for the exposed API.
|
||||
message License {
|
||||
string name = 1;
|
||||
string url = 2;
|
||||
repeated NamedAny specification_extension = 3;
|
||||
}
|
||||
|
||||
// The `Link object` represents a possible design-time link for a response. The presence of a link does not guarantee the caller's ability to successfully invoke it, rather it provides a known relationship and traversal mechanism between responses and other operations. Unlike _dynamic_ links (i.e. links provided **in** the response payload), the OAS linking mechanism does not require link information in the runtime response. For computing links, and providing instructions to execute them, a runtime expression is used for accessing values in an operation and using them as parameters while invoking the linked operation.
|
||||
message Link {
|
||||
string operation_ref = 1;
|
||||
string operation_id = 2;
|
||||
AnyOrExpression parameters = 3;
|
||||
AnyOrExpression request_body = 4;
|
||||
string description = 5;
|
||||
Server server = 6;
|
||||
repeated NamedAny specification_extension = 7;
|
||||
}
|
||||
|
||||
message LinkOrReference {
|
||||
oneof oneof {
|
||||
Link link = 1;
|
||||
Reference reference = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message LinksOrReferences {
|
||||
repeated NamedLinkOrReference additional_properties = 1;
|
||||
}
|
||||
|
||||
// Each Media Type Object provides schema and examples for the media type identified by its key.
|
||||
message MediaType {
|
||||
SchemaOrReference schema = 1;
|
||||
Any example = 2;
|
||||
ExamplesOrReferences examples = 3;
|
||||
Encodings encoding = 4;
|
||||
repeated NamedAny specification_extension = 5;
|
||||
}
|
||||
|
||||
message MediaTypes {
|
||||
repeated NamedMediaType additional_properties = 1;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs.
|
||||
message NamedAny {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
Any value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of CallbackOrReference as ordered (name,value) pairs.
|
||||
message NamedCallbackOrReference {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
CallbackOrReference value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of Encoding as ordered (name,value) pairs.
|
||||
message NamedEncoding {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
Encoding value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of ExampleOrReference as ordered (name,value) pairs.
|
||||
message NamedExampleOrReference {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
ExampleOrReference value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of HeaderOrReference as ordered (name,value) pairs.
|
||||
message NamedHeaderOrReference {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
HeaderOrReference value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of LinkOrReference as ordered (name,value) pairs.
|
||||
message NamedLinkOrReference {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
LinkOrReference value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of MediaType as ordered (name,value) pairs.
|
||||
message NamedMediaType {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
MediaType value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of ParameterOrReference as ordered (name,value) pairs.
|
||||
message NamedParameterOrReference {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
ParameterOrReference value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs.
|
||||
message NamedPathItem {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
PathItem value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of RequestBodyOrReference as ordered (name,value) pairs.
|
||||
message NamedRequestBodyOrReference {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
RequestBodyOrReference value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of ResponseOrReference as ordered (name,value) pairs.
|
||||
message NamedResponseOrReference {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
ResponseOrReference value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of SchemaOrReference as ordered (name,value) pairs.
|
||||
message NamedSchemaOrReference {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
SchemaOrReference value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of SecuritySchemeOrReference as ordered (name,value) pairs.
|
||||
message NamedSecuritySchemeOrReference {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
SecuritySchemeOrReference value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of ServerVariable as ordered (name,value) pairs.
|
||||
message NamedServerVariable {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
ServerVariable value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of string as ordered (name,value) pairs.
|
||||
message NamedString {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
string value = 2;
|
||||
}
|
||||
|
||||
// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs.
|
||||
message NamedStringArray {
|
||||
// Map key
|
||||
string name = 1;
|
||||
// Mapped value
|
||||
StringArray value = 2;
|
||||
}
|
||||
|
||||
// Configuration details for a supported OAuth Flow
|
||||
message OauthFlow {
|
||||
string authorization_url = 1;
|
||||
string token_url = 2;
|
||||
string refresh_url = 3;
|
||||
Strings scopes = 4;
|
||||
repeated NamedAny specification_extension = 5;
|
||||
}
|
||||
|
||||
// Allows configuration of the supported OAuth Flows.
|
||||
message OauthFlows {
|
||||
OauthFlow implicit = 1;
|
||||
OauthFlow password = 2;
|
||||
OauthFlow client_credentials = 3;
|
||||
OauthFlow authorization_code = 4;
|
||||
repeated NamedAny specification_extension = 5;
|
||||
}
|
||||
|
||||
message Object {
|
||||
repeated NamedAny additional_properties = 1;
|
||||
}
|
||||
|
||||
// Describes a single API operation on a path.
|
||||
message Operation {
|
||||
repeated string tags = 1;
|
||||
string summary = 2;
|
||||
string description = 3;
|
||||
ExternalDocs external_docs = 4;
|
||||
string operation_id = 5;
|
||||
repeated ParameterOrReference parameters = 6;
|
||||
RequestBodyOrReference request_body = 7;
|
||||
Responses responses = 8;
|
||||
CallbacksOrReferences callbacks = 9;
|
||||
bool deprecated = 10;
|
||||
repeated SecurityRequirement security = 11;
|
||||
repeated Server servers = 12;
|
||||
repeated NamedAny specification_extension = 13;
|
||||
}
|
||||
|
||||
// Describes a single operation parameter. A unique parameter is defined by a combination of a name and location.
|
||||
message Parameter {
|
||||
string name = 1;
|
||||
string in = 2;
|
||||
string description = 3;
|
||||
bool required = 4;
|
||||
bool deprecated = 5;
|
||||
bool allow_empty_value = 6;
|
||||
string style = 7;
|
||||
bool explode = 8;
|
||||
bool allow_reserved = 9;
|
||||
SchemaOrReference schema = 10;
|
||||
Any example = 11;
|
||||
ExamplesOrReferences examples = 12;
|
||||
MediaTypes content = 13;
|
||||
repeated NamedAny specification_extension = 14;
|
||||
}
|
||||
|
||||
message ParameterOrReference {
|
||||
oneof oneof {
|
||||
Parameter parameter = 1;
|
||||
Reference reference = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message ParametersOrReferences {
|
||||
repeated NamedParameterOrReference additional_properties = 1;
|
||||
}
|
||||
|
||||
// Describes the operations available on a single path. A Path Item MAY be empty, due to ACL constraints. The path itself is still exposed to the documentation viewer but they will not know which operations and parameters are available.
|
||||
message PathItem {
|
||||
string _ref = 1;
|
||||
string summary = 2;
|
||||
string description = 3;
|
||||
Operation get = 4;
|
||||
Operation put = 5;
|
||||
Operation post = 6;
|
||||
Operation delete = 7;
|
||||
Operation options = 8;
|
||||
Operation head = 9;
|
||||
Operation patch = 10;
|
||||
Operation trace = 11;
|
||||
repeated Server servers = 12;
|
||||
repeated ParameterOrReference parameters = 13;
|
||||
repeated NamedAny specification_extension = 14;
|
||||
}
|
||||
|
||||
// Holds the relative paths to the individual endpoints and their operations. The path is appended to the URL from the `Server Object` in order to construct the full URL. The Paths MAY be empty, due to ACL constraints.
|
||||
message Paths {
|
||||
repeated NamedPathItem path = 1;
|
||||
repeated NamedAny specification_extension = 2;
|
||||
}
|
||||
|
||||
message Properties {
|
||||
repeated NamedSchemaOrReference additional_properties = 1;
|
||||
}
|
||||
|
||||
// A simple object to allow referencing other components in the specification, internally and externally. The Reference Object is defined by JSON Reference and follows the same structure, behavior and rules. For this specification, reference resolution is accomplished as defined by the JSON Reference specification and not by the JSON Schema specification.
|
||||
message Reference {
|
||||
string _ref = 1;
|
||||
string summary = 2;
|
||||
string description = 3;
|
||||
}
|
||||
|
||||
message RequestBodiesOrReferences {
|
||||
repeated NamedRequestBodyOrReference additional_properties = 1;
|
||||
}
|
||||
|
||||
// Describes a single request body.
|
||||
message RequestBody {
|
||||
string description = 1;
|
||||
MediaTypes content = 2;
|
||||
bool required = 3;
|
||||
repeated NamedAny specification_extension = 4;
|
||||
}
|
||||
|
||||
message RequestBodyOrReference {
|
||||
oneof oneof {
|
||||
RequestBody request_body = 1;
|
||||
Reference reference = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// Describes a single response from an API Operation, including design-time, static `links` to operations based on the response.
|
||||
message Response {
|
||||
string description = 1;
|
||||
HeadersOrReferences headers = 2;
|
||||
MediaTypes content = 3;
|
||||
LinksOrReferences links = 4;
|
||||
repeated NamedAny specification_extension = 5;
|
||||
}
|
||||
|
||||
message ResponseOrReference {
|
||||
oneof oneof {
|
||||
Response response = 1;
|
||||
Reference reference = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// A container for the expected responses of an operation. The container maps a HTTP response code to the expected response. The documentation is not necessarily expected to cover all possible HTTP response codes because they may not be known in advance. However, documentation is expected to cover a successful operation response and any known errors. The `default` MAY be used as a default response object for all HTTP codes that are not covered individually by the specification. The `Responses Object` MUST contain at least one response code, and it SHOULD be the response for a successful operation call.
|
||||
message Responses {
|
||||
ResponseOrReference default = 1;
|
||||
repeated NamedResponseOrReference response_or_reference = 2;
|
||||
repeated NamedAny specification_extension = 3;
|
||||
}
|
||||
|
||||
message ResponsesOrReferences {
|
||||
repeated NamedResponseOrReference additional_properties = 1;
|
||||
}
|
||||
|
||||
// The Schema Object allows the definition of input and output data types. These types can be objects, but also primitives and arrays. This object is an extended subset of the JSON Schema Specification Wright Draft 00. For more information about the properties, see JSON Schema Core and JSON Schema Validation. Unless stated otherwise, the property definitions follow the JSON Schema.
|
||||
message Schema {
|
||||
bool nullable = 1;
|
||||
Discriminator discriminator = 2;
|
||||
bool read_only = 3;
|
||||
bool write_only = 4;
|
||||
Xml xml = 5;
|
||||
ExternalDocs external_docs = 6;
|
||||
Any example = 7;
|
||||
bool deprecated = 8;
|
||||
string title = 9;
|
||||
double multiple_of = 10;
|
||||
double maximum = 11;
|
||||
bool exclusive_maximum = 12;
|
||||
double minimum = 13;
|
||||
bool exclusive_minimum = 14;
|
||||
int64 max_length = 15;
|
||||
int64 min_length = 16;
|
||||
string pattern = 17;
|
||||
int64 max_items = 18;
|
||||
int64 min_items = 19;
|
||||
bool unique_items = 20;
|
||||
int64 max_properties = 21;
|
||||
int64 min_properties = 22;
|
||||
repeated string required = 23;
|
||||
repeated Any enum = 24;
|
||||
string type = 25;
|
||||
repeated SchemaOrReference all_of = 26;
|
||||
repeated SchemaOrReference one_of = 27;
|
||||
repeated SchemaOrReference any_of = 28;
|
||||
Schema not = 29;
|
||||
ItemsItem items = 30;
|
||||
Properties properties = 31;
|
||||
AdditionalPropertiesItem additional_properties = 32;
|
||||
DefaultType default = 33;
|
||||
string description = 34;
|
||||
string format = 35;
|
||||
repeated NamedAny specification_extension = 36;
|
||||
}
|
||||
|
||||
message SchemaOrReference {
|
||||
oneof oneof {
|
||||
Schema schema = 1;
|
||||
Reference reference = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message SchemasOrReferences {
|
||||
repeated NamedSchemaOrReference additional_properties = 1;
|
||||
}
|
||||
|
||||
// Lists the required security schemes to execute this operation. The name used for each property MUST correspond to a security scheme declared in the Security Schemes under the Components Object. Security Requirement Objects that contain multiple schemes require that all schemes MUST be satisfied for a request to be authorized. This enables support for scenarios where multiple query parameters or HTTP headers are required to convey security information. When a list of Security Requirement Objects is defined on the OpenAPI Object or Operation Object, only one of the Security Requirement Objects in the list needs to be satisfied to authorize the request.
|
||||
message SecurityRequirement {
|
||||
repeated NamedStringArray additional_properties = 1;
|
||||
}
|
||||
|
||||
// Defines a security scheme that can be used by the operations. Supported schemes are HTTP authentication, an API key (either as a header, a cookie parameter or as a query parameter), mutual TLS (use of a client certificate), OAuth2's common flows (implicit, password, application and access code) as defined in RFC6749, and OpenID Connect. Please note that currently (2019) the implicit flow is about to be deprecated OAuth 2.0 Security Best Current Practice. Recommended for most use case is Authorization Code Grant flow with PKCE.
|
||||
message SecurityScheme {
|
||||
string type = 1;
|
||||
string description = 2;
|
||||
string name = 3;
|
||||
string in = 4;
|
||||
string scheme = 5;
|
||||
string bearer_format = 6;
|
||||
OauthFlows flows = 7;
|
||||
string open_id_connect_url = 8;
|
||||
repeated NamedAny specification_extension = 9;
|
||||
}
|
||||
|
||||
message SecuritySchemeOrReference {
|
||||
oneof oneof {
|
||||
SecurityScheme security_scheme = 1;
|
||||
Reference reference = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message SecuritySchemesOrReferences {
|
||||
repeated NamedSecuritySchemeOrReference additional_properties = 1;
|
||||
}
|
||||
|
||||
// An object representing a Server.
|
||||
message Server {
|
||||
string url = 1;
|
||||
string description = 2;
|
||||
ServerVariables variables = 3;
|
||||
repeated NamedAny specification_extension = 4;
|
||||
}
|
||||
|
||||
// An object representing a Server Variable for server URL template substitution.
|
||||
message ServerVariable {
|
||||
repeated string enum = 1;
|
||||
string default = 2;
|
||||
string description = 3;
|
||||
repeated NamedAny specification_extension = 4;
|
||||
}
|
||||
|
||||
message ServerVariables {
|
||||
repeated NamedServerVariable additional_properties = 1;
|
||||
}
|
||||
|
||||
// Any property starting with x- is valid.
|
||||
message SpecificationExtension {
|
||||
oneof oneof {
|
||||
double number = 1;
|
||||
bool boolean = 2;
|
||||
string string = 3;
|
||||
}
|
||||
}
|
||||
|
||||
message StringArray {
|
||||
repeated string value = 1;
|
||||
}
|
||||
|
||||
message Strings {
|
||||
repeated NamedString additional_properties = 1;
|
||||
}
|
||||
|
||||
// Adds metadata to a single tag that is used by the Operation Object. It is not mandatory to have a Tag Object per tag defined in the Operation Object instances.
|
||||
message Tag {
|
||||
string name = 1;
|
||||
string description = 2;
|
||||
ExternalDocs external_docs = 3;
|
||||
repeated NamedAny specification_extension = 4;
|
||||
}
|
||||
|
||||
// A metadata object that allows for more fine-tuned XML model definitions. When using arrays, XML element names are *not* inferred (for singular/plural forms) and the `name` property SHOULD be used to add that information. See examples for expected behavior.
|
||||
message Xml {
|
||||
string name = 1;
|
||||
string namespace = 2;
|
||||
string prefix = 3;
|
||||
bool attribute = 4;
|
||||
bool wrapped = 5;
|
||||
repeated NamedAny specification_extension = 6;
|
||||
}
|
||||
|
21
vendor/github.com/google/gnostic/openapiv3/README.md
generated
vendored
Normal file
21
vendor/github.com/google/gnostic/openapiv3/README.md
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
# OpenAPI v3 Protocol Buffer Models
|
||||
|
||||
This directory contains a Protocol Buffer-language model and related code for
|
||||
supporting OpenAPI v3.
|
||||
|
||||
Gnostic applications and plugins can use OpenAPIv3.proto to generate Protocol
|
||||
Buffer support code for their preferred languages.
|
||||
|
||||
OpenAPIv3.go is used by Gnostic to read JSON and YAML OpenAPI descriptions into
|
||||
the Protocol Buffer-based datastructures generated from OpenAPIv3.proto.
|
||||
|
||||
OpenAPIv3.proto and OpenAPIv3.go are generated by the Gnostic compiler
|
||||
generator, and OpenAPIv3.pb.go is generated by protoc, the Protocol Buffer
|
||||
compiler, and protoc-gen-go, the Protocol Buffer Go code generation plugin.
|
||||
|
||||
openapi-3.1.json is a JSON schema for OpenAPI 3.1 that is automatically
|
||||
generated from the OpenAPI 3.1 specification. It is not an official JSON Schema
|
||||
for OpenAPI.
|
||||
|
||||
The schema-generator directory contains support code which generates
|
||||
openapi-3.1.json from the OpenAPI 3.1 specification document (Markdown).
|
42
vendor/github.com/google/gnostic/openapiv3/document.go
generated
vendored
Normal file
42
vendor/github.com/google/gnostic/openapiv3/document.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
// Copyright 2020 Google LLC. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package openapi_v3
|
||||
|
||||
import (
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"github.com/google/gnostic/compiler"
|
||||
)
|
||||
|
||||
// ParseDocument reads an OpenAPI v3 description from a YAML/JSON representation.
|
||||
func ParseDocument(b []byte) (*Document, error) {
|
||||
info, err := compiler.ReadInfoFromBytes("", b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
root := info.Content[0]
|
||||
return NewDocument(root, compiler.NewContextWithExtensions("$root", root, nil, nil))
|
||||
}
|
||||
|
||||
// YAMLValue produces a serialized YAML representation of the document.
|
||||
func (d *Document) YAMLValue(comment string) ([]byte, error) {
|
||||
rawInfo := d.ToRawInfo()
|
||||
rawInfo = &yaml.Node{
|
||||
Kind: yaml.DocumentNode,
|
||||
Content: []*yaml.Node{rawInfo},
|
||||
HeadComment: comment,
|
||||
}
|
||||
return yaml.Marshal(rawInfo)
|
||||
}
|
1251
vendor/github.com/google/gnostic/openapiv3/openapi-3.0.json
generated
vendored
Normal file
1251
vendor/github.com/google/gnostic/openapiv3/openapi-3.0.json
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1250
vendor/github.com/google/gnostic/openapiv3/openapi-3.1.json
generated
vendored
Normal file
1250
vendor/github.com/google/gnostic/openapiv3/openapi-3.1.json
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
27
vendor/github.com/google/go-cmp/LICENSE
generated
vendored
Normal file
27
vendor/github.com/google/go-cmp/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2017 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
682
vendor/github.com/google/go-cmp/cmp/compare.go
generated
vendored
Normal file
682
vendor/github.com/google/go-cmp/cmp/compare.go
generated
vendored
Normal file
@ -0,0 +1,682 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package cmp determines equality of values.
|
||||
//
|
||||
// This package is intended to be a more powerful and safer alternative to
|
||||
// reflect.DeepEqual for comparing whether two values are semantically equal.
|
||||
// It is intended to only be used in tests, as performance is not a goal and
|
||||
// it may panic if it cannot compare the values. Its propensity towards
|
||||
// panicking means that its unsuitable for production environments where a
|
||||
// spurious panic may be fatal.
|
||||
//
|
||||
// The primary features of cmp are:
|
||||
//
|
||||
// • When the default behavior of equality does not suit the needs of the test,
|
||||
// custom equality functions can override the equality operation.
|
||||
// For example, an equality function may report floats as equal so long as they
|
||||
// are within some tolerance of each other.
|
||||
//
|
||||
// • Types that have an Equal method may use that method to determine equality.
|
||||
// This allows package authors to determine the equality operation for the types
|
||||
// that they define.
|
||||
//
|
||||
// • If no custom equality functions are used and no Equal method is defined,
|
||||
// equality is determined by recursively comparing the primitive kinds on both
|
||||
// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported
|
||||
// fields are not compared by default; they result in panics unless suppressed
|
||||
// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly
|
||||
// compared using the Exporter option.
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/diff"
|
||||
"github.com/google/go-cmp/cmp/internal/flags"
|
||||
"github.com/google/go-cmp/cmp/internal/function"
|
||||
"github.com/google/go-cmp/cmp/internal/value"
|
||||
)
|
||||
|
||||
// Equal reports whether x and y are equal by recursively applying the
|
||||
// following rules in the given order to x and y and all of their sub-values:
|
||||
//
|
||||
// • Let S be the set of all Ignore, Transformer, and Comparer options that
|
||||
// remain after applying all path filters, value filters, and type filters.
|
||||
// If at least one Ignore exists in S, then the comparison is ignored.
|
||||
// If the number of Transformer and Comparer options in S is greater than one,
|
||||
// then Equal panics because it is ambiguous which option to use.
|
||||
// If S contains a single Transformer, then use that to transform the current
|
||||
// values and recursively call Equal on the output values.
|
||||
// If S contains a single Comparer, then use that to compare the current values.
|
||||
// Otherwise, evaluation proceeds to the next rule.
|
||||
//
|
||||
// • If the values have an Equal method of the form "(T) Equal(T) bool" or
|
||||
// "(T) Equal(I) bool" where T is assignable to I, then use the result of
|
||||
// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and
|
||||
// evaluation proceeds to the next rule.
|
||||
//
|
||||
// • Lastly, try to compare x and y based on their basic kinds.
|
||||
// Simple kinds like booleans, integers, floats, complex numbers, strings, and
|
||||
// channels are compared using the equivalent of the == operator in Go.
|
||||
// Functions are only equal if they are both nil, otherwise they are unequal.
|
||||
//
|
||||
// Structs are equal if recursively calling Equal on all fields report equal.
|
||||
// If a struct contains unexported fields, Equal panics unless an Ignore option
|
||||
// (e.g., cmpopts.IgnoreUnexported) ignores that field or the Exporter option
|
||||
// explicitly permits comparing the unexported field.
|
||||
//
|
||||
// Slices are equal if they are both nil or both non-nil, where recursively
|
||||
// calling Equal on all non-ignored slice or array elements report equal.
|
||||
// Empty non-nil slices and nil slices are not equal; to equate empty slices,
|
||||
// consider using cmpopts.EquateEmpty.
|
||||
//
|
||||
// Maps are equal if they are both nil or both non-nil, where recursively
|
||||
// calling Equal on all non-ignored map entries report equal.
|
||||
// Map keys are equal according to the == operator.
|
||||
// To use custom comparisons for map keys, consider using cmpopts.SortMaps.
|
||||
// Empty non-nil maps and nil maps are not equal; to equate empty maps,
|
||||
// consider using cmpopts.EquateEmpty.
|
||||
//
|
||||
// Pointers and interfaces are equal if they are both nil or both non-nil,
|
||||
// where they have the same underlying concrete type and recursively
|
||||
// calling Equal on the underlying values reports equal.
|
||||
//
|
||||
// Before recursing into a pointer, slice element, or map, the current path
|
||||
// is checked to detect whether the address has already been visited.
|
||||
// If there is a cycle, then the pointed at values are considered equal
|
||||
// only if both addresses were previously visited in the same path step.
|
||||
func Equal(x, y interface{}, opts ...Option) bool {
|
||||
s := newState(opts)
|
||||
s.compareAny(rootStep(x, y))
|
||||
return s.result.Equal()
|
||||
}
|
||||
|
||||
// Diff returns a human-readable report of the differences between two values:
|
||||
// y - x. It returns an empty string if and only if Equal returns true for the
|
||||
// same input values and options.
|
||||
//
|
||||
// The output is displayed as a literal in pseudo-Go syntax.
|
||||
// At the start of each line, a "-" prefix indicates an element removed from x,
|
||||
// a "+" prefix to indicates an element added from y, and the lack of a prefix
|
||||
// indicates an element common to both x and y. If possible, the output
|
||||
// uses fmt.Stringer.String or error.Error methods to produce more humanly
|
||||
// readable outputs. In such cases, the string is prefixed with either an
|
||||
// 's' or 'e' character, respectively, to indicate that the method was called.
|
||||
//
|
||||
// Do not depend on this output being stable. If you need the ability to
|
||||
// programmatically interpret the difference, consider using a custom Reporter.
|
||||
func Diff(x, y interface{}, opts ...Option) string {
|
||||
s := newState(opts)
|
||||
|
||||
// Optimization: If there are no other reporters, we can optimize for the
|
||||
// common case where the result is equal (and thus no reported difference).
|
||||
// This avoids the expensive construction of a difference tree.
|
||||
if len(s.reporters) == 0 {
|
||||
s.compareAny(rootStep(x, y))
|
||||
if s.result.Equal() {
|
||||
return ""
|
||||
}
|
||||
s.result = diff.Result{} // Reset results
|
||||
}
|
||||
|
||||
r := new(defaultReporter)
|
||||
s.reporters = append(s.reporters, reporter{r})
|
||||
s.compareAny(rootStep(x, y))
|
||||
d := r.String()
|
||||
if (d == "") != s.result.Equal() {
|
||||
panic("inconsistent difference and equality results")
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// rootStep constructs the first path step. If x and y have differing types,
|
||||
// then they are stored within an empty interface type.
|
||||
func rootStep(x, y interface{}) PathStep {
|
||||
vx := reflect.ValueOf(x)
|
||||
vy := reflect.ValueOf(y)
|
||||
|
||||
// If the inputs are different types, auto-wrap them in an empty interface
|
||||
// so that they have the same parent type.
|
||||
var t reflect.Type
|
||||
if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() {
|
||||
t = reflect.TypeOf((*interface{})(nil)).Elem()
|
||||
if vx.IsValid() {
|
||||
vvx := reflect.New(t).Elem()
|
||||
vvx.Set(vx)
|
||||
vx = vvx
|
||||
}
|
||||
if vy.IsValid() {
|
||||
vvy := reflect.New(t).Elem()
|
||||
vvy.Set(vy)
|
||||
vy = vvy
|
||||
}
|
||||
} else {
|
||||
t = vx.Type()
|
||||
}
|
||||
|
||||
return &pathStep{t, vx, vy}
|
||||
}
|
||||
|
||||
type state struct {
|
||||
// These fields represent the "comparison state".
|
||||
// Calling statelessCompare must not result in observable changes to these.
|
||||
result diff.Result // The current result of comparison
|
||||
curPath Path // The current path in the value tree
|
||||
curPtrs pointerPath // The current set of visited pointers
|
||||
reporters []reporter // Optional reporters
|
||||
|
||||
// recChecker checks for infinite cycles applying the same set of
|
||||
// transformers upon the output of itself.
|
||||
recChecker recChecker
|
||||
|
||||
// dynChecker triggers pseudo-random checks for option correctness.
|
||||
// It is safe for statelessCompare to mutate this value.
|
||||
dynChecker dynChecker
|
||||
|
||||
// These fields, once set by processOption, will not change.
|
||||
exporters []exporter // List of exporters for structs with unexported fields
|
||||
opts Options // List of all fundamental and filter options
|
||||
}
|
||||
|
||||
func newState(opts []Option) *state {
|
||||
// Always ensure a validator option exists to validate the inputs.
|
||||
s := &state{opts: Options{validator{}}}
|
||||
s.curPtrs.Init()
|
||||
s.processOption(Options(opts))
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *state) processOption(opt Option) {
|
||||
switch opt := opt.(type) {
|
||||
case nil:
|
||||
case Options:
|
||||
for _, o := range opt {
|
||||
s.processOption(o)
|
||||
}
|
||||
case coreOption:
|
||||
type filtered interface {
|
||||
isFiltered() bool
|
||||
}
|
||||
if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() {
|
||||
panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt))
|
||||
}
|
||||
s.opts = append(s.opts, opt)
|
||||
case exporter:
|
||||
s.exporters = append(s.exporters, opt)
|
||||
case reporter:
|
||||
s.reporters = append(s.reporters, opt)
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown option %T", opt))
|
||||
}
|
||||
}
|
||||
|
||||
// statelessCompare compares two values and returns the result.
|
||||
// This function is stateless in that it does not alter the current result,
|
||||
// or output to any registered reporters.
|
||||
func (s *state) statelessCompare(step PathStep) diff.Result {
|
||||
// We do not save and restore curPath and curPtrs because all of the
|
||||
// compareX methods should properly push and pop from them.
|
||||
// It is an implementation bug if the contents of the paths differ from
|
||||
// when calling this function to when returning from it.
|
||||
|
||||
oldResult, oldReporters := s.result, s.reporters
|
||||
s.result = diff.Result{} // Reset result
|
||||
s.reporters = nil // Remove reporters to avoid spurious printouts
|
||||
s.compareAny(step)
|
||||
res := s.result
|
||||
s.result, s.reporters = oldResult, oldReporters
|
||||
return res
|
||||
}
|
||||
|
||||
func (s *state) compareAny(step PathStep) {
|
||||
// Update the path stack.
|
||||
s.curPath.push(step)
|
||||
defer s.curPath.pop()
|
||||
for _, r := range s.reporters {
|
||||
r.PushStep(step)
|
||||
defer r.PopStep()
|
||||
}
|
||||
s.recChecker.Check(s.curPath)
|
||||
|
||||
// Cycle-detection for slice elements (see NOTE in compareSlice).
|
||||
t := step.Type()
|
||||
vx, vy := step.Values()
|
||||
if si, ok := step.(SliceIndex); ok && si.isSlice && vx.IsValid() && vy.IsValid() {
|
||||
px, py := vx.Addr(), vy.Addr()
|
||||
if eq, visited := s.curPtrs.Push(px, py); visited {
|
||||
s.report(eq, reportByCycle)
|
||||
return
|
||||
}
|
||||
defer s.curPtrs.Pop(px, py)
|
||||
}
|
||||
|
||||
// Rule 1: Check whether an option applies on this node in the value tree.
|
||||
if s.tryOptions(t, vx, vy) {
|
||||
return
|
||||
}
|
||||
|
||||
// Rule 2: Check whether the type has a valid Equal method.
|
||||
if s.tryMethod(t, vx, vy) {
|
||||
return
|
||||
}
|
||||
|
||||
// Rule 3: Compare based on the underlying kind.
|
||||
switch t.Kind() {
|
||||
case reflect.Bool:
|
||||
s.report(vx.Bool() == vy.Bool(), 0)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
s.report(vx.Int() == vy.Int(), 0)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
s.report(vx.Uint() == vy.Uint(), 0)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
s.report(vx.Float() == vy.Float(), 0)
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
s.report(vx.Complex() == vy.Complex(), 0)
|
||||
case reflect.String:
|
||||
s.report(vx.String() == vy.String(), 0)
|
||||
case reflect.Chan, reflect.UnsafePointer:
|
||||
s.report(vx.Pointer() == vy.Pointer(), 0)
|
||||
case reflect.Func:
|
||||
s.report(vx.IsNil() && vy.IsNil(), 0)
|
||||
case reflect.Struct:
|
||||
s.compareStruct(t, vx, vy)
|
||||
case reflect.Slice, reflect.Array:
|
||||
s.compareSlice(t, vx, vy)
|
||||
case reflect.Map:
|
||||
s.compareMap(t, vx, vy)
|
||||
case reflect.Ptr:
|
||||
s.comparePtr(t, vx, vy)
|
||||
case reflect.Interface:
|
||||
s.compareInterface(t, vx, vy)
|
||||
default:
|
||||
panic(fmt.Sprintf("%v kind not handled", t.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool {
|
||||
// Evaluate all filters and apply the remaining options.
|
||||
if opt := s.opts.filter(s, t, vx, vy); opt != nil {
|
||||
opt.apply(s, vx, vy)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool {
|
||||
// Check if this type even has an Equal method.
|
||||
m, ok := t.MethodByName("Equal")
|
||||
if !ok || !function.IsType(m.Type, function.EqualAssignable) {
|
||||
return false
|
||||
}
|
||||
|
||||
eq := s.callTTBFunc(m.Func, vx, vy)
|
||||
s.report(eq, reportByMethod)
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value {
|
||||
v = sanitizeValue(v, f.Type().In(0))
|
||||
if !s.dynChecker.Next() {
|
||||
return f.Call([]reflect.Value{v})[0]
|
||||
}
|
||||
|
||||
// Run the function twice and ensure that we get the same results back.
|
||||
// We run in goroutines so that the race detector (if enabled) can detect
|
||||
// unsafe mutations to the input.
|
||||
c := make(chan reflect.Value)
|
||||
go detectRaces(c, f, v)
|
||||
got := <-c
|
||||
want := f.Call([]reflect.Value{v})[0]
|
||||
if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() {
|
||||
// To avoid false-positives with non-reflexive equality operations,
|
||||
// we sanity check whether a value is equal to itself.
|
||||
if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() {
|
||||
return want
|
||||
}
|
||||
panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f)))
|
||||
}
|
||||
return want
|
||||
}
|
||||
|
||||
func (s *state) callTTBFunc(f, x, y reflect.Value) bool {
|
||||
x = sanitizeValue(x, f.Type().In(0))
|
||||
y = sanitizeValue(y, f.Type().In(1))
|
||||
if !s.dynChecker.Next() {
|
||||
return f.Call([]reflect.Value{x, y})[0].Bool()
|
||||
}
|
||||
|
||||
// Swapping the input arguments is sufficient to check that
|
||||
// f is symmetric and deterministic.
|
||||
// We run in goroutines so that the race detector (if enabled) can detect
|
||||
// unsafe mutations to the input.
|
||||
c := make(chan reflect.Value)
|
||||
go detectRaces(c, f, y, x)
|
||||
got := <-c
|
||||
want := f.Call([]reflect.Value{x, y})[0].Bool()
|
||||
if !got.IsValid() || got.Bool() != want {
|
||||
panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f)))
|
||||
}
|
||||
return want
|
||||
}
|
||||
|
||||
func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) {
|
||||
var ret reflect.Value
|
||||
defer func() {
|
||||
recover() // Ignore panics, let the other call to f panic instead
|
||||
c <- ret
|
||||
}()
|
||||
ret = f.Call(vs)[0]
|
||||
}
|
||||
|
||||
// sanitizeValue converts nil interfaces of type T to those of type R,
|
||||
// assuming that T is assignable to R.
|
||||
// Otherwise, it returns the input value as is.
|
||||
func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value {
|
||||
// TODO(≥go1.10): Workaround for reflect bug (https://golang.org/issue/22143).
|
||||
if !flags.AtLeastGo110 {
|
||||
if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t {
|
||||
return reflect.New(t).Elem()
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) {
|
||||
var addr bool
|
||||
var vax, vay reflect.Value // Addressable versions of vx and vy
|
||||
|
||||
var mayForce, mayForceInit bool
|
||||
step := StructField{&structField{}}
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
step.typ = t.Field(i).Type
|
||||
step.vx = vx.Field(i)
|
||||
step.vy = vy.Field(i)
|
||||
step.name = t.Field(i).Name
|
||||
step.idx = i
|
||||
step.unexported = !isExported(step.name)
|
||||
if step.unexported {
|
||||
if step.name == "_" {
|
||||
continue
|
||||
}
|
||||
// Defer checking of unexported fields until later to give an
|
||||
// Ignore a chance to ignore the field.
|
||||
if !vax.IsValid() || !vay.IsValid() {
|
||||
// For retrieveUnexportedField to work, the parent struct must
|
||||
// be addressable. Create a new copy of the values if
|
||||
// necessary to make them addressable.
|
||||
addr = vx.CanAddr() || vy.CanAddr()
|
||||
vax = makeAddressable(vx)
|
||||
vay = makeAddressable(vy)
|
||||
}
|
||||
if !mayForceInit {
|
||||
for _, xf := range s.exporters {
|
||||
mayForce = mayForce || xf(t)
|
||||
}
|
||||
mayForceInit = true
|
||||
}
|
||||
step.mayForce = mayForce
|
||||
step.paddr = addr
|
||||
step.pvx = vax
|
||||
step.pvy = vay
|
||||
step.field = t.Field(i)
|
||||
}
|
||||
s.compareAny(step)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) {
|
||||
isSlice := t.Kind() == reflect.Slice
|
||||
if isSlice && (vx.IsNil() || vy.IsNil()) {
|
||||
s.report(vx.IsNil() && vy.IsNil(), 0)
|
||||
return
|
||||
}
|
||||
|
||||
// NOTE: It is incorrect to call curPtrs.Push on the slice header pointer
|
||||
// since slices represents a list of pointers, rather than a single pointer.
|
||||
// The pointer checking logic must be handled on a per-element basis
|
||||
// in compareAny.
|
||||
//
|
||||
// A slice header (see reflect.SliceHeader) in Go is a tuple of a starting
|
||||
// pointer P, a length N, and a capacity C. Supposing each slice element has
|
||||
// a memory size of M, then the slice is equivalent to the list of pointers:
|
||||
// [P+i*M for i in range(N)]
|
||||
//
|
||||
// For example, v[:0] and v[:1] are slices with the same starting pointer,
|
||||
// but they are clearly different values. Using the slice pointer alone
|
||||
// violates the assumption that equal pointers implies equal values.
|
||||
|
||||
step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}, isSlice: isSlice}}
|
||||
withIndexes := func(ix, iy int) SliceIndex {
|
||||
if ix >= 0 {
|
||||
step.vx, step.xkey = vx.Index(ix), ix
|
||||
} else {
|
||||
step.vx, step.xkey = reflect.Value{}, -1
|
||||
}
|
||||
if iy >= 0 {
|
||||
step.vy, step.ykey = vy.Index(iy), iy
|
||||
} else {
|
||||
step.vy, step.ykey = reflect.Value{}, -1
|
||||
}
|
||||
return step
|
||||
}
|
||||
|
||||
// Ignore options are able to ignore missing elements in a slice.
|
||||
// However, detecting these reliably requires an optimal differencing
|
||||
// algorithm, for which diff.Difference is not.
|
||||
//
|
||||
// Instead, we first iterate through both slices to detect which elements
|
||||
// would be ignored if standing alone. The index of non-discarded elements
|
||||
// are stored in a separate slice, which diffing is then performed on.
|
||||
var indexesX, indexesY []int
|
||||
var ignoredX, ignoredY []bool
|
||||
for ix := 0; ix < vx.Len(); ix++ {
|
||||
ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0
|
||||
if !ignored {
|
||||
indexesX = append(indexesX, ix)
|
||||
}
|
||||
ignoredX = append(ignoredX, ignored)
|
||||
}
|
||||
for iy := 0; iy < vy.Len(); iy++ {
|
||||
ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0
|
||||
if !ignored {
|
||||
indexesY = append(indexesY, iy)
|
||||
}
|
||||
ignoredY = append(ignoredY, ignored)
|
||||
}
|
||||
|
||||
// Compute an edit-script for slices vx and vy (excluding ignored elements).
|
||||
edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result {
|
||||
return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy]))
|
||||
})
|
||||
|
||||
// Replay the ignore-scripts and the edit-script.
|
||||
var ix, iy int
|
||||
for ix < vx.Len() || iy < vy.Len() {
|
||||
var e diff.EditType
|
||||
switch {
|
||||
case ix < len(ignoredX) && ignoredX[ix]:
|
||||
e = diff.UniqueX
|
||||
case iy < len(ignoredY) && ignoredY[iy]:
|
||||
e = diff.UniqueY
|
||||
default:
|
||||
e, edits = edits[0], edits[1:]
|
||||
}
|
||||
switch e {
|
||||
case diff.UniqueX:
|
||||
s.compareAny(withIndexes(ix, -1))
|
||||
ix++
|
||||
case diff.UniqueY:
|
||||
s.compareAny(withIndexes(-1, iy))
|
||||
iy++
|
||||
default:
|
||||
s.compareAny(withIndexes(ix, iy))
|
||||
ix++
|
||||
iy++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) {
|
||||
if vx.IsNil() || vy.IsNil() {
|
||||
s.report(vx.IsNil() && vy.IsNil(), 0)
|
||||
return
|
||||
}
|
||||
|
||||
// Cycle-detection for maps.
|
||||
if eq, visited := s.curPtrs.Push(vx, vy); visited {
|
||||
s.report(eq, reportByCycle)
|
||||
return
|
||||
}
|
||||
defer s.curPtrs.Pop(vx, vy)
|
||||
|
||||
// We combine and sort the two map keys so that we can perform the
|
||||
// comparisons in a deterministic order.
|
||||
step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}}
|
||||
for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) {
|
||||
step.vx = vx.MapIndex(k)
|
||||
step.vy = vy.MapIndex(k)
|
||||
step.key = k
|
||||
if !step.vx.IsValid() && !step.vy.IsValid() {
|
||||
// It is possible for both vx and vy to be invalid if the
|
||||
// key contained a NaN value in it.
|
||||
//
|
||||
// Even with the ability to retrieve NaN keys in Go 1.12,
|
||||
// there still isn't a sensible way to compare the values since
|
||||
// a NaN key may map to multiple unordered values.
|
||||
// The most reasonable way to compare NaNs would be to compare the
|
||||
// set of values. However, this is impossible to do efficiently
|
||||
// since set equality is provably an O(n^2) operation given only
|
||||
// an Equal function. If we had a Less function or Hash function,
|
||||
// this could be done in O(n*log(n)) or O(n), respectively.
|
||||
//
|
||||
// Rather than adding complex logic to deal with NaNs, make it
|
||||
// the user's responsibility to compare such obscure maps.
|
||||
const help = "consider providing a Comparer to compare the map"
|
||||
panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help))
|
||||
}
|
||||
s.compareAny(step)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) {
|
||||
if vx.IsNil() || vy.IsNil() {
|
||||
s.report(vx.IsNil() && vy.IsNil(), 0)
|
||||
return
|
||||
}
|
||||
|
||||
// Cycle-detection for pointers.
|
||||
if eq, visited := s.curPtrs.Push(vx, vy); visited {
|
||||
s.report(eq, reportByCycle)
|
||||
return
|
||||
}
|
||||
defer s.curPtrs.Pop(vx, vy)
|
||||
|
||||
vx, vy = vx.Elem(), vy.Elem()
|
||||
s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}})
|
||||
}
|
||||
|
||||
func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) {
|
||||
if vx.IsNil() || vy.IsNil() {
|
||||
s.report(vx.IsNil() && vy.IsNil(), 0)
|
||||
return
|
||||
}
|
||||
vx, vy = vx.Elem(), vy.Elem()
|
||||
if vx.Type() != vy.Type() {
|
||||
s.report(false, 0)
|
||||
return
|
||||
}
|
||||
s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}})
|
||||
}
|
||||
|
||||
func (s *state) report(eq bool, rf resultFlags) {
|
||||
if rf&reportByIgnore == 0 {
|
||||
if eq {
|
||||
s.result.NumSame++
|
||||
rf |= reportEqual
|
||||
} else {
|
||||
s.result.NumDiff++
|
||||
rf |= reportUnequal
|
||||
}
|
||||
}
|
||||
for _, r := range s.reporters {
|
||||
r.Report(Result{flags: rf})
|
||||
}
|
||||
}
|
||||
|
||||
// recChecker tracks the state needed to periodically perform checks that
|
||||
// user provided transformers are not stuck in an infinitely recursive cycle.
|
||||
type recChecker struct{ next int }
|
||||
|
||||
// Check scans the Path for any recursive transformers and panics when any
|
||||
// recursive transformers are detected. Note that the presence of a
|
||||
// recursive Transformer does not necessarily imply an infinite cycle.
|
||||
// As such, this check only activates after some minimal number of path steps.
|
||||
func (rc *recChecker) Check(p Path) {
|
||||
const minLen = 1 << 16
|
||||
if rc.next == 0 {
|
||||
rc.next = minLen
|
||||
}
|
||||
if len(p) < rc.next {
|
||||
return
|
||||
}
|
||||
rc.next <<= 1
|
||||
|
||||
// Check whether the same transformer has appeared at least twice.
|
||||
var ss []string
|
||||
m := map[Option]int{}
|
||||
for _, ps := range p {
|
||||
if t, ok := ps.(Transform); ok {
|
||||
t := t.Option()
|
||||
if m[t] == 1 { // Transformer was used exactly once before
|
||||
tf := t.(*transformer).fnc.Type()
|
||||
ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0)))
|
||||
}
|
||||
m[t]++
|
||||
}
|
||||
}
|
||||
if len(ss) > 0 {
|
||||
const warning = "recursive set of Transformers detected"
|
||||
const help = "consider using cmpopts.AcyclicTransformer"
|
||||
set := strings.Join(ss, "\n\t")
|
||||
panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help))
|
||||
}
|
||||
}
|
||||
|
||||
// dynChecker tracks the state needed to periodically perform checks that
|
||||
// user provided functions are symmetric and deterministic.
|
||||
// The zero value is safe for immediate use.
|
||||
type dynChecker struct{ curr, next int }
|
||||
|
||||
// Next increments the state and reports whether a check should be performed.
|
||||
//
|
||||
// Checks occur every Nth function call, where N is a triangular number:
|
||||
// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ...
|
||||
// See https://en.wikipedia.org/wiki/Triangular_number
|
||||
//
|
||||
// This sequence ensures that the cost of checks drops significantly as
|
||||
// the number of functions calls grows larger.
|
||||
func (dc *dynChecker) Next() bool {
|
||||
ok := dc.curr == dc.next
|
||||
if ok {
|
||||
dc.curr = 0
|
||||
dc.next++
|
||||
}
|
||||
dc.curr++
|
||||
return ok
|
||||
}
|
||||
|
||||
// makeAddressable returns a value that is always addressable.
|
||||
// It returns the input verbatim if it is already addressable,
|
||||
// otherwise it creates a new value and returns an addressable copy.
|
||||
func makeAddressable(v reflect.Value) reflect.Value {
|
||||
if v.CanAddr() {
|
||||
return v
|
||||
}
|
||||
vc := reflect.New(v.Type()).Elem()
|
||||
vc.Set(v)
|
||||
return vc
|
||||
}
|
15
vendor/github.com/google/go-cmp/cmp/export_panic.go
generated
vendored
Normal file
15
vendor/github.com/google/go-cmp/cmp/export_panic.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build purego
|
||||
|
||||
package cmp
|
||||
|
||||
import "reflect"
|
||||
|
||||
const supportExporters = false
|
||||
|
||||
func retrieveUnexportedField(reflect.Value, reflect.StructField, bool) reflect.Value {
|
||||
panic("no support for forcibly accessing unexported fields")
|
||||
}
|
35
vendor/github.com/google/go-cmp/cmp/export_unsafe.go
generated
vendored
Normal file
35
vendor/github.com/google/go-cmp/cmp/export_unsafe.go
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !purego
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const supportExporters = true
|
||||
|
||||
// retrieveUnexportedField uses unsafe to forcibly retrieve any field from
|
||||
// a struct such that the value has read-write permissions.
|
||||
//
|
||||
// The parent struct, v, must be addressable, while f must be a StructField
|
||||
// describing the field to retrieve. If addr is false,
|
||||
// then the returned value will be shallowed copied to be non-addressable.
|
||||
func retrieveUnexportedField(v reflect.Value, f reflect.StructField, addr bool) reflect.Value {
|
||||
ve := reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem()
|
||||
if !addr {
|
||||
// A field is addressable if and only if the struct is addressable.
|
||||
// If the original parent value was not addressable, shallow copy the
|
||||
// value to make it non-addressable to avoid leaking an implementation
|
||||
// detail of how forcibly exporting a field works.
|
||||
if ve.Kind() == reflect.Interface && ve.IsNil() {
|
||||
return reflect.Zero(f.Type)
|
||||
}
|
||||
return reflect.ValueOf(ve.Interface()).Convert(f.Type)
|
||||
}
|
||||
return ve
|
||||
}
|
17
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
generated
vendored
Normal file
17
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !cmp_debug
|
||||
|
||||
package diff
|
||||
|
||||
var debug debugger
|
||||
|
||||
type debugger struct{}
|
||||
|
||||
func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc {
|
||||
return f
|
||||
}
|
||||
func (debugger) Update() {}
|
||||
func (debugger) Finish() {}
|
122
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
generated
vendored
Normal file
122
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
generated
vendored
Normal file
@ -0,0 +1,122 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build cmp_debug
|
||||
|
||||
package diff
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// The algorithm can be seen running in real-time by enabling debugging:
|
||||
// go test -tags=cmp_debug -v
|
||||
//
|
||||
// Example output:
|
||||
// === RUN TestDifference/#34
|
||||
// ┌───────────────────────────────┐
|
||||
// │ \ · · · · · · · · · · · · · · │
|
||||
// │ · # · · · · · · · · · · · · · │
|
||||
// │ · \ · · · · · · · · · · · · · │
|
||||
// │ · · \ · · · · · · · · · · · · │
|
||||
// │ · · · X # · · · · · · · · · · │
|
||||
// │ · · · # \ · · · · · · · · · · │
|
||||
// │ · · · · · # # · · · · · · · · │
|
||||
// │ · · · · · # \ · · · · · · · · │
|
||||
// │ · · · · · · · \ · · · · · · · │
|
||||
// │ · · · · · · · · \ · · · · · · │
|
||||
// │ · · · · · · · · · \ · · · · · │
|
||||
// │ · · · · · · · · · · \ · · # · │
|
||||
// │ · · · · · · · · · · · \ # # · │
|
||||
// │ · · · · · · · · · · · # # # · │
|
||||
// │ · · · · · · · · · · # # # # · │
|
||||
// │ · · · · · · · · · # # # # # · │
|
||||
// │ · · · · · · · · · · · · · · \ │
|
||||
// └───────────────────────────────┘
|
||||
// [.Y..M.XY......YXYXY.|]
|
||||
//
|
||||
// The grid represents the edit-graph where the horizontal axis represents
|
||||
// list X and the vertical axis represents list Y. The start of the two lists
|
||||
// is the top-left, while the ends are the bottom-right. The '·' represents
|
||||
// an unexplored node in the graph. The '\' indicates that the two symbols
|
||||
// from list X and Y are equal. The 'X' indicates that two symbols are similar
|
||||
// (but not exactly equal) to each other. The '#' indicates that the two symbols
|
||||
// are different (and not similar). The algorithm traverses this graph trying to
|
||||
// make the paths starting in the top-left and the bottom-right connect.
|
||||
//
|
||||
// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents
|
||||
// the currently established path from the forward and reverse searches,
|
||||
// separated by a '|' character.
|
||||
|
||||
const (
|
||||
updateDelay = 100 * time.Millisecond
|
||||
finishDelay = 500 * time.Millisecond
|
||||
ansiTerminal = true // ANSI escape codes used to move terminal cursor
|
||||
)
|
||||
|
||||
var debug debugger
|
||||
|
||||
type debugger struct {
|
||||
sync.Mutex
|
||||
p1, p2 EditScript
|
||||
fwdPath, revPath *EditScript
|
||||
grid []byte
|
||||
lines int
|
||||
}
|
||||
|
||||
func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc {
|
||||
dbg.Lock()
|
||||
dbg.fwdPath, dbg.revPath = p1, p2
|
||||
top := "┌─" + strings.Repeat("──", nx) + "┐\n"
|
||||
row := "│ " + strings.Repeat("· ", nx) + "│\n"
|
||||
btm := "└─" + strings.Repeat("──", nx) + "┘\n"
|
||||
dbg.grid = []byte(top + strings.Repeat(row, ny) + btm)
|
||||
dbg.lines = strings.Count(dbg.String(), "\n")
|
||||
fmt.Print(dbg)
|
||||
|
||||
// Wrap the EqualFunc so that we can intercept each result.
|
||||
return func(ix, iy int) (r Result) {
|
||||
cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")]
|
||||
for i := range cell {
|
||||
cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot
|
||||
}
|
||||
switch r = f(ix, iy); {
|
||||
case r.Equal():
|
||||
cell[0] = '\\'
|
||||
case r.Similar():
|
||||
cell[0] = 'X'
|
||||
default:
|
||||
cell[0] = '#'
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (dbg *debugger) Update() {
|
||||
dbg.print(updateDelay)
|
||||
}
|
||||
|
||||
func (dbg *debugger) Finish() {
|
||||
dbg.print(finishDelay)
|
||||
dbg.Unlock()
|
||||
}
|
||||
|
||||
func (dbg *debugger) String() string {
|
||||
dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0]
|
||||
for i := len(*dbg.revPath) - 1; i >= 0; i-- {
|
||||
dbg.p2 = append(dbg.p2, (*dbg.revPath)[i])
|
||||
}
|
||||
return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2)
|
||||
}
|
||||
|
||||
func (dbg *debugger) print(d time.Duration) {
|
||||
if ansiTerminal {
|
||||
fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor
|
||||
}
|
||||
fmt.Print(dbg)
|
||||
time.Sleep(d)
|
||||
}
|
398
vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
generated
vendored
Normal file
398
vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
generated
vendored
Normal file
@ -0,0 +1,398 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package diff implements an algorithm for producing edit-scripts.
|
||||
// The edit-script is a sequence of operations needed to transform one list
|
||||
// of symbols into another (or vice-versa). The edits allowed are insertions,
|
||||
// deletions, and modifications. The summation of all edits is called the
|
||||
// Levenshtein distance as this problem is well-known in computer science.
|
||||
//
|
||||
// This package prioritizes performance over accuracy. That is, the run time
|
||||
// is more important than obtaining a minimal Levenshtein distance.
|
||||
package diff
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/flags"
|
||||
)
|
||||
|
||||
// EditType represents a single operation within an edit-script.
|
||||
type EditType uint8
|
||||
|
||||
const (
|
||||
// Identity indicates that a symbol pair is identical in both list X and Y.
|
||||
Identity EditType = iota
|
||||
// UniqueX indicates that a symbol only exists in X and not Y.
|
||||
UniqueX
|
||||
// UniqueY indicates that a symbol only exists in Y and not X.
|
||||
UniqueY
|
||||
// Modified indicates that a symbol pair is a modification of each other.
|
||||
Modified
|
||||
)
|
||||
|
||||
// EditScript represents the series of differences between two lists.
|
||||
type EditScript []EditType
|
||||
|
||||
// String returns a human-readable string representing the edit-script where
|
||||
// Identity, UniqueX, UniqueY, and Modified are represented by the
|
||||
// '.', 'X', 'Y', and 'M' characters, respectively.
|
||||
func (es EditScript) String() string {
|
||||
b := make([]byte, len(es))
|
||||
for i, e := range es {
|
||||
switch e {
|
||||
case Identity:
|
||||
b[i] = '.'
|
||||
case UniqueX:
|
||||
b[i] = 'X'
|
||||
case UniqueY:
|
||||
b[i] = 'Y'
|
||||
case Modified:
|
||||
b[i] = 'M'
|
||||
default:
|
||||
panic("invalid edit-type")
|
||||
}
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// stats returns a histogram of the number of each type of edit operation.
|
||||
func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) {
|
||||
for _, e := range es {
|
||||
switch e {
|
||||
case Identity:
|
||||
s.NI++
|
||||
case UniqueX:
|
||||
s.NX++
|
||||
case UniqueY:
|
||||
s.NY++
|
||||
case Modified:
|
||||
s.NM++
|
||||
default:
|
||||
panic("invalid edit-type")
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if
|
||||
// lists X and Y are equal.
|
||||
func (es EditScript) Dist() int { return len(es) - es.stats().NI }
|
||||
|
||||
// LenX is the length of the X list.
|
||||
func (es EditScript) LenX() int { return len(es) - es.stats().NY }
|
||||
|
||||
// LenY is the length of the Y list.
|
||||
func (es EditScript) LenY() int { return len(es) - es.stats().NX }
|
||||
|
||||
// EqualFunc reports whether the symbols at indexes ix and iy are equal.
|
||||
// When called by Difference, the index is guaranteed to be within nx and ny.
|
||||
type EqualFunc func(ix int, iy int) Result
|
||||
|
||||
// Result is the result of comparison.
|
||||
// NumSame is the number of sub-elements that are equal.
|
||||
// NumDiff is the number of sub-elements that are not equal.
|
||||
type Result struct{ NumSame, NumDiff int }
|
||||
|
||||
// BoolResult returns a Result that is either Equal or not Equal.
|
||||
func BoolResult(b bool) Result {
|
||||
if b {
|
||||
return Result{NumSame: 1} // Equal, Similar
|
||||
} else {
|
||||
return Result{NumDiff: 2} // Not Equal, not Similar
|
||||
}
|
||||
}
|
||||
|
||||
// Equal indicates whether the symbols are equal. Two symbols are equal
|
||||
// if and only if NumDiff == 0. If Equal, then they are also Similar.
|
||||
func (r Result) Equal() bool { return r.NumDiff == 0 }
|
||||
|
||||
// Similar indicates whether two symbols are similar and may be represented
|
||||
// by using the Modified type. As a special case, we consider binary comparisons
|
||||
// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar.
|
||||
//
|
||||
// The exact ratio of NumSame to NumDiff to determine similarity may change.
|
||||
func (r Result) Similar() bool {
|
||||
// Use NumSame+1 to offset NumSame so that binary comparisons are similar.
|
||||
return r.NumSame+1 >= r.NumDiff
|
||||
}
|
||||
|
||||
var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
|
||||
|
||||
// Difference reports whether two lists of lengths nx and ny are equal
|
||||
// given the definition of equality provided as f.
|
||||
//
|
||||
// This function returns an edit-script, which is a sequence of operations
|
||||
// needed to convert one list into the other. The following invariants for
|
||||
// the edit-script are maintained:
|
||||
// • eq == (es.Dist()==0)
|
||||
// • nx == es.LenX()
|
||||
// • ny == es.LenY()
|
||||
//
|
||||
// This algorithm is not guaranteed to be an optimal solution (i.e., one that
|
||||
// produces an edit-script with a minimal Levenshtein distance). This algorithm
|
||||
// favors performance over optimality. The exact output is not guaranteed to
|
||||
// be stable and may change over time.
|
||||
func Difference(nx, ny int, f EqualFunc) (es EditScript) {
|
||||
// This algorithm is based on traversing what is known as an "edit-graph".
|
||||
// See Figure 1 from "An O(ND) Difference Algorithm and Its Variations"
|
||||
// by Eugene W. Myers. Since D can be as large as N itself, this is
|
||||
// effectively O(N^2). Unlike the algorithm from that paper, we are not
|
||||
// interested in the optimal path, but at least some "decent" path.
|
||||
//
|
||||
// For example, let X and Y be lists of symbols:
|
||||
// X = [A B C A B B A]
|
||||
// Y = [C B A B A C]
|
||||
//
|
||||
// The edit-graph can be drawn as the following:
|
||||
// A B C A B B A
|
||||
// ┌─────────────┐
|
||||
// C │_|_|\|_|_|_|_│ 0
|
||||
// B │_|\|_|_|\|\|_│ 1
|
||||
// A │\|_|_|\|_|_|\│ 2
|
||||
// B │_|\|_|_|\|\|_│ 3
|
||||
// A │\|_|_|\|_|_|\│ 4
|
||||
// C │ | |\| | | | │ 5
|
||||
// └─────────────┘ 6
|
||||
// 0 1 2 3 4 5 6 7
|
||||
//
|
||||
// List X is written along the horizontal axis, while list Y is written
|
||||
// along the vertical axis. At any point on this grid, if the symbol in
|
||||
// list X matches the corresponding symbol in list Y, then a '\' is drawn.
|
||||
// The goal of any minimal edit-script algorithm is to find a path from the
|
||||
// top-left corner to the bottom-right corner, while traveling through the
|
||||
// fewest horizontal or vertical edges.
|
||||
// A horizontal edge is equivalent to inserting a symbol from list X.
|
||||
// A vertical edge is equivalent to inserting a symbol from list Y.
|
||||
// A diagonal edge is equivalent to a matching symbol between both X and Y.
|
||||
|
||||
// Invariants:
|
||||
// • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
|
||||
// • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
|
||||
//
|
||||
// In general:
|
||||
// • fwdFrontier.X < revFrontier.X
|
||||
// • fwdFrontier.Y < revFrontier.Y
|
||||
// Unless, it is time for the algorithm to terminate.
|
||||
fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)}
|
||||
revPath := path{-1, point{nx, ny}, make(EditScript, 0)}
|
||||
fwdFrontier := fwdPath.point // Forward search frontier
|
||||
revFrontier := revPath.point // Reverse search frontier
|
||||
|
||||
// Search budget bounds the cost of searching for better paths.
|
||||
// The longest sequence of non-matching symbols that can be tolerated is
|
||||
// approximately the square-root of the search budget.
|
||||
searchBudget := 4 * (nx + ny) // O(n)
|
||||
|
||||
// Running the tests with the "cmp_debug" build tag prints a visualization
|
||||
// of the algorithm running in real-time. This is educational for
|
||||
// understanding how the algorithm works. See debug_enable.go.
|
||||
f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es)
|
||||
|
||||
// The algorithm below is a greedy, meet-in-the-middle algorithm for
|
||||
// computing sub-optimal edit-scripts between two lists.
|
||||
//
|
||||
// The algorithm is approximately as follows:
|
||||
// • Searching for differences switches back-and-forth between
|
||||
// a search that starts at the beginning (the top-left corner), and
|
||||
// a search that starts at the end (the bottom-right corner). The goal of
|
||||
// the search is connect with the search from the opposite corner.
|
||||
// • As we search, we build a path in a greedy manner, where the first
|
||||
// match seen is added to the path (this is sub-optimal, but provides a
|
||||
// decent result in practice). When matches are found, we try the next pair
|
||||
// of symbols in the lists and follow all matches as far as possible.
|
||||
// • When searching for matches, we search along a diagonal going through
|
||||
// through the "frontier" point. If no matches are found, we advance the
|
||||
// frontier towards the opposite corner.
|
||||
// • This algorithm terminates when either the X coordinates or the
|
||||
// Y coordinates of the forward and reverse frontier points ever intersect.
|
||||
|
||||
// This algorithm is correct even if searching only in the forward direction
|
||||
// or in the reverse direction. We do both because it is commonly observed
|
||||
// that two lists commonly differ because elements were added to the front
|
||||
// or end of the other list.
|
||||
//
|
||||
// Non-deterministically start with either the forward or reverse direction
|
||||
// to introduce some deliberate instability so that we have the flexibility
|
||||
// to change this algorithm in the future.
|
||||
if flags.Deterministic || randBool {
|
||||
goto forwardSearch
|
||||
} else {
|
||||
goto reverseSearch
|
||||
}
|
||||
|
||||
forwardSearch:
|
||||
{
|
||||
// Forward search from the beginning.
|
||||
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
|
||||
goto finishSearch
|
||||
}
|
||||
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
|
||||
// Search in a diagonal pattern for a match.
|
||||
z := zigzag(i)
|
||||
p := point{fwdFrontier.X + z, fwdFrontier.Y - z}
|
||||
switch {
|
||||
case p.X >= revPath.X || p.Y < fwdPath.Y:
|
||||
stop1 = true // Hit top-right corner
|
||||
case p.Y >= revPath.Y || p.X < fwdPath.X:
|
||||
stop2 = true // Hit bottom-left corner
|
||||
case f(p.X, p.Y).Equal():
|
||||
// Match found, so connect the path to this point.
|
||||
fwdPath.connect(p, f)
|
||||
fwdPath.append(Identity)
|
||||
// Follow sequence of matches as far as possible.
|
||||
for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
|
||||
if !f(fwdPath.X, fwdPath.Y).Equal() {
|
||||
break
|
||||
}
|
||||
fwdPath.append(Identity)
|
||||
}
|
||||
fwdFrontier = fwdPath.point
|
||||
stop1, stop2 = true, true
|
||||
default:
|
||||
searchBudget-- // Match not found
|
||||
}
|
||||
debug.Update()
|
||||
}
|
||||
// Advance the frontier towards reverse point.
|
||||
if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y {
|
||||
fwdFrontier.X++
|
||||
} else {
|
||||
fwdFrontier.Y++
|
||||
}
|
||||
goto reverseSearch
|
||||
}
|
||||
|
||||
reverseSearch:
|
||||
{
|
||||
// Reverse search from the end.
|
||||
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
|
||||
goto finishSearch
|
||||
}
|
||||
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
|
||||
// Search in a diagonal pattern for a match.
|
||||
z := zigzag(i)
|
||||
p := point{revFrontier.X - z, revFrontier.Y + z}
|
||||
switch {
|
||||
case fwdPath.X >= p.X || revPath.Y < p.Y:
|
||||
stop1 = true // Hit bottom-left corner
|
||||
case fwdPath.Y >= p.Y || revPath.X < p.X:
|
||||
stop2 = true // Hit top-right corner
|
||||
case f(p.X-1, p.Y-1).Equal():
|
||||
// Match found, so connect the path to this point.
|
||||
revPath.connect(p, f)
|
||||
revPath.append(Identity)
|
||||
// Follow sequence of matches as far as possible.
|
||||
for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
|
||||
if !f(revPath.X-1, revPath.Y-1).Equal() {
|
||||
break
|
||||
}
|
||||
revPath.append(Identity)
|
||||
}
|
||||
revFrontier = revPath.point
|
||||
stop1, stop2 = true, true
|
||||
default:
|
||||
searchBudget-- // Match not found
|
||||
}
|
||||
debug.Update()
|
||||
}
|
||||
// Advance the frontier towards forward point.
|
||||
if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y {
|
||||
revFrontier.X--
|
||||
} else {
|
||||
revFrontier.Y--
|
||||
}
|
||||
goto forwardSearch
|
||||
}
|
||||
|
||||
finishSearch:
|
||||
// Join the forward and reverse paths and then append the reverse path.
|
||||
fwdPath.connect(revPath.point, f)
|
||||
for i := len(revPath.es) - 1; i >= 0; i-- {
|
||||
t := revPath.es[i]
|
||||
revPath.es = revPath.es[:i]
|
||||
fwdPath.append(t)
|
||||
}
|
||||
debug.Finish()
|
||||
return fwdPath.es
|
||||
}
|
||||
|
||||
type path struct {
|
||||
dir int // +1 if forward, -1 if reverse
|
||||
point // Leading point of the EditScript path
|
||||
es EditScript
|
||||
}
|
||||
|
||||
// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types
|
||||
// to the edit-script to connect p.point to dst.
|
||||
func (p *path) connect(dst point, f EqualFunc) {
|
||||
if p.dir > 0 {
|
||||
// Connect in forward direction.
|
||||
for dst.X > p.X && dst.Y > p.Y {
|
||||
switch r := f(p.X, p.Y); {
|
||||
case r.Equal():
|
||||
p.append(Identity)
|
||||
case r.Similar():
|
||||
p.append(Modified)
|
||||
case dst.X-p.X >= dst.Y-p.Y:
|
||||
p.append(UniqueX)
|
||||
default:
|
||||
p.append(UniqueY)
|
||||
}
|
||||
}
|
||||
for dst.X > p.X {
|
||||
p.append(UniqueX)
|
||||
}
|
||||
for dst.Y > p.Y {
|
||||
p.append(UniqueY)
|
||||
}
|
||||
} else {
|
||||
// Connect in reverse direction.
|
||||
for p.X > dst.X && p.Y > dst.Y {
|
||||
switch r := f(p.X-1, p.Y-1); {
|
||||
case r.Equal():
|
||||
p.append(Identity)
|
||||
case r.Similar():
|
||||
p.append(Modified)
|
||||
case p.Y-dst.Y >= p.X-dst.X:
|
||||
p.append(UniqueY)
|
||||
default:
|
||||
p.append(UniqueX)
|
||||
}
|
||||
}
|
||||
for p.X > dst.X {
|
||||
p.append(UniqueX)
|
||||
}
|
||||
for p.Y > dst.Y {
|
||||
p.append(UniqueY)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *path) append(t EditType) {
|
||||
p.es = append(p.es, t)
|
||||
switch t {
|
||||
case Identity, Modified:
|
||||
p.add(p.dir, p.dir)
|
||||
case UniqueX:
|
||||
p.add(p.dir, 0)
|
||||
case UniqueY:
|
||||
p.add(0, p.dir)
|
||||
}
|
||||
debug.Update()
|
||||
}
|
||||
|
||||
type point struct{ X, Y int }
|
||||
|
||||
func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy }
|
||||
|
||||
// zigzag maps a consecutive sequence of integers to a zig-zag sequence.
|
||||
// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...]
|
||||
func zigzag(x int) int {
|
||||
if x&1 != 0 {
|
||||
x = ^x
|
||||
}
|
||||
return x >> 1
|
||||
}
|
9
vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go
generated
vendored
Normal file
9
vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flags
|
||||
|
||||
// Deterministic controls whether the output of Diff should be deterministic.
|
||||
// This is only used for testing.
|
||||
var Deterministic bool
|
10
vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go
generated
vendored
Normal file
10
vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.10
|
||||
|
||||
package flags
|
||||
|
||||
// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10.
|
||||
const AtLeastGo110 = false
|
10
vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go
generated
vendored
Normal file
10
vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.10
|
||||
|
||||
package flags
|
||||
|
||||
// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10.
|
||||
const AtLeastGo110 = true
|
99
vendor/github.com/google/go-cmp/cmp/internal/function/func.go
generated
vendored
Normal file
99
vendor/github.com/google/go-cmp/cmp/internal/function/func.go
generated
vendored
Normal file
@ -0,0 +1,99 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package function provides functionality for identifying function types.
|
||||
package function
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type funcType int
|
||||
|
||||
const (
|
||||
_ funcType = iota
|
||||
|
||||
tbFunc // func(T) bool
|
||||
ttbFunc // func(T, T) bool
|
||||
trbFunc // func(T, R) bool
|
||||
tibFunc // func(T, I) bool
|
||||
trFunc // func(T) R
|
||||
|
||||
Equal = ttbFunc // func(T, T) bool
|
||||
EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool
|
||||
Transformer = trFunc // func(T) R
|
||||
ValueFilter = ttbFunc // func(T, T) bool
|
||||
Less = ttbFunc // func(T, T) bool
|
||||
ValuePredicate = tbFunc // func(T) bool
|
||||
KeyValuePredicate = trbFunc // func(T, R) bool
|
||||
)
|
||||
|
||||
var boolType = reflect.TypeOf(true)
|
||||
|
||||
// IsType reports whether the reflect.Type is of the specified function type.
|
||||
func IsType(t reflect.Type, ft funcType) bool {
|
||||
if t == nil || t.Kind() != reflect.Func || t.IsVariadic() {
|
||||
return false
|
||||
}
|
||||
ni, no := t.NumIn(), t.NumOut()
|
||||
switch ft {
|
||||
case tbFunc: // func(T) bool
|
||||
if ni == 1 && no == 1 && t.Out(0) == boolType {
|
||||
return true
|
||||
}
|
||||
case ttbFunc: // func(T, T) bool
|
||||
if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType {
|
||||
return true
|
||||
}
|
||||
case trbFunc: // func(T, R) bool
|
||||
if ni == 2 && no == 1 && t.Out(0) == boolType {
|
||||
return true
|
||||
}
|
||||
case tibFunc: // func(T, I) bool
|
||||
if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType {
|
||||
return true
|
||||
}
|
||||
case trFunc: // func(T) R
|
||||
if ni == 1 && no == 1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`)
|
||||
|
||||
// NameOf returns the name of the function value.
|
||||
func NameOf(v reflect.Value) string {
|
||||
fnc := runtime.FuncForPC(v.Pointer())
|
||||
if fnc == nil {
|
||||
return "<unknown>"
|
||||
}
|
||||
fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm"
|
||||
|
||||
// Method closures have a "-fm" suffix.
|
||||
fullName = strings.TrimSuffix(fullName, "-fm")
|
||||
|
||||
var name string
|
||||
for len(fullName) > 0 {
|
||||
inParen := strings.HasSuffix(fullName, ")")
|
||||
fullName = strings.TrimSuffix(fullName, ")")
|
||||
|
||||
s := lastIdentRx.FindString(fullName)
|
||||
if s == "" {
|
||||
break
|
||||
}
|
||||
name = s + "." + name
|
||||
fullName = strings.TrimSuffix(fullName, s)
|
||||
|
||||
if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 {
|
||||
fullName = fullName[:i]
|
||||
}
|
||||
fullName = strings.TrimSuffix(fullName, ".")
|
||||
}
|
||||
return strings.TrimSuffix(name, ".")
|
||||
}
|
157
vendor/github.com/google/go-cmp/cmp/internal/value/name.go
generated
vendored
Normal file
157
vendor/github.com/google/go-cmp/cmp/internal/value/name.go
generated
vendored
Normal file
@ -0,0 +1,157 @@
|
||||
// Copyright 2020, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package value
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// TypeString is nearly identical to reflect.Type.String,
|
||||
// but has an additional option to specify that full type names be used.
|
||||
func TypeString(t reflect.Type, qualified bool) string {
|
||||
return string(appendTypeName(nil, t, qualified, false))
|
||||
}
|
||||
|
||||
func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte {
|
||||
// BUG: Go reflection provides no way to disambiguate two named types
|
||||
// of the same name and within the same package,
|
||||
// but declared within the namespace of different functions.
|
||||
|
||||
// Named type.
|
||||
if t.Name() != "" {
|
||||
if qualified && t.PkgPath() != "" {
|
||||
b = append(b, '"')
|
||||
b = append(b, t.PkgPath()...)
|
||||
b = append(b, '"')
|
||||
b = append(b, '.')
|
||||
b = append(b, t.Name()...)
|
||||
} else {
|
||||
b = append(b, t.String()...)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Unnamed type.
|
||||
switch k := t.Kind(); k {
|
||||
case reflect.Bool, reflect.String, reflect.UnsafePointer,
|
||||
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
|
||||
reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
|
||||
b = append(b, k.String()...)
|
||||
case reflect.Chan:
|
||||
if t.ChanDir() == reflect.RecvDir {
|
||||
b = append(b, "<-"...)
|
||||
}
|
||||
b = append(b, "chan"...)
|
||||
if t.ChanDir() == reflect.SendDir {
|
||||
b = append(b, "<-"...)
|
||||
}
|
||||
b = append(b, ' ')
|
||||
b = appendTypeName(b, t.Elem(), qualified, false)
|
||||
case reflect.Func:
|
||||
if !elideFunc {
|
||||
b = append(b, "func"...)
|
||||
}
|
||||
b = append(b, '(')
|
||||
for i := 0; i < t.NumIn(); i++ {
|
||||
if i > 0 {
|
||||
b = append(b, ", "...)
|
||||
}
|
||||
if i == t.NumIn()-1 && t.IsVariadic() {
|
||||
b = append(b, "..."...)
|
||||
b = appendTypeName(b, t.In(i).Elem(), qualified, false)
|
||||
} else {
|
||||
b = appendTypeName(b, t.In(i), qualified, false)
|
||||
}
|
||||
}
|
||||
b = append(b, ')')
|
||||
switch t.NumOut() {
|
||||
case 0:
|
||||
// Do nothing
|
||||
case 1:
|
||||
b = append(b, ' ')
|
||||
b = appendTypeName(b, t.Out(0), qualified, false)
|
||||
default:
|
||||
b = append(b, " ("...)
|
||||
for i := 0; i < t.NumOut(); i++ {
|
||||
if i > 0 {
|
||||
b = append(b, ", "...)
|
||||
}
|
||||
b = appendTypeName(b, t.Out(i), qualified, false)
|
||||
}
|
||||
b = append(b, ')')
|
||||
}
|
||||
case reflect.Struct:
|
||||
b = append(b, "struct{ "...)
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
if i > 0 {
|
||||
b = append(b, "; "...)
|
||||
}
|
||||
sf := t.Field(i)
|
||||
if !sf.Anonymous {
|
||||
if qualified && sf.PkgPath != "" {
|
||||
b = append(b, '"')
|
||||
b = append(b, sf.PkgPath...)
|
||||
b = append(b, '"')
|
||||
b = append(b, '.')
|
||||
}
|
||||
b = append(b, sf.Name...)
|
||||
b = append(b, ' ')
|
||||
}
|
||||
b = appendTypeName(b, sf.Type, qualified, false)
|
||||
if sf.Tag != "" {
|
||||
b = append(b, ' ')
|
||||
b = strconv.AppendQuote(b, string(sf.Tag))
|
||||
}
|
||||
}
|
||||
if b[len(b)-1] == ' ' {
|
||||
b = b[:len(b)-1]
|
||||
} else {
|
||||
b = append(b, ' ')
|
||||
}
|
||||
b = append(b, '}')
|
||||
case reflect.Slice, reflect.Array:
|
||||
b = append(b, '[')
|
||||
if k == reflect.Array {
|
||||
b = strconv.AppendUint(b, uint64(t.Len()), 10)
|
||||
}
|
||||
b = append(b, ']')
|
||||
b = appendTypeName(b, t.Elem(), qualified, false)
|
||||
case reflect.Map:
|
||||
b = append(b, "map["...)
|
||||
b = appendTypeName(b, t.Key(), qualified, false)
|
||||
b = append(b, ']')
|
||||
b = appendTypeName(b, t.Elem(), qualified, false)
|
||||
case reflect.Ptr:
|
||||
b = append(b, '*')
|
||||
b = appendTypeName(b, t.Elem(), qualified, false)
|
||||
case reflect.Interface:
|
||||
b = append(b, "interface{ "...)
|
||||
for i := 0; i < t.NumMethod(); i++ {
|
||||
if i > 0 {
|
||||
b = append(b, "; "...)
|
||||
}
|
||||
m := t.Method(i)
|
||||
if qualified && m.PkgPath != "" {
|
||||
b = append(b, '"')
|
||||
b = append(b, m.PkgPath...)
|
||||
b = append(b, '"')
|
||||
b = append(b, '.')
|
||||
}
|
||||
b = append(b, m.Name...)
|
||||
b = appendTypeName(b, m.Type, qualified, true)
|
||||
}
|
||||
if b[len(b)-1] == ' ' {
|
||||
b = b[:len(b)-1]
|
||||
} else {
|
||||
b = append(b, ' ')
|
||||
}
|
||||
b = append(b, '}')
|
||||
default:
|
||||
panic("invalid kind: " + k.String())
|
||||
}
|
||||
return b
|
||||
}
|
33
vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go
generated
vendored
Normal file
33
vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
// Copyright 2018, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build purego
|
||||
|
||||
package value
|
||||
|
||||
import "reflect"
|
||||
|
||||
// Pointer is an opaque typed pointer and is guaranteed to be comparable.
|
||||
type Pointer struct {
|
||||
p uintptr
|
||||
t reflect.Type
|
||||
}
|
||||
|
||||
// PointerOf returns a Pointer from v, which must be a
|
||||
// reflect.Ptr, reflect.Slice, or reflect.Map.
|
||||
func PointerOf(v reflect.Value) Pointer {
|
||||
// NOTE: Storing a pointer as an uintptr is technically incorrect as it
|
||||
// assumes that the GC implementation does not use a moving collector.
|
||||
return Pointer{v.Pointer(), v.Type()}
|
||||
}
|
||||
|
||||
// IsNil reports whether the pointer is nil.
|
||||
func (p Pointer) IsNil() bool {
|
||||
return p.p == 0
|
||||
}
|
||||
|
||||
// Uintptr returns the pointer as a uintptr.
|
||||
func (p Pointer) Uintptr() uintptr {
|
||||
return p.p
|
||||
}
|
36
vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go
generated
vendored
Normal file
36
vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
// Copyright 2018, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !purego
|
||||
|
||||
package value
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Pointer is an opaque typed pointer and is guaranteed to be comparable.
|
||||
type Pointer struct {
|
||||
p unsafe.Pointer
|
||||
t reflect.Type
|
||||
}
|
||||
|
||||
// PointerOf returns a Pointer from v, which must be a
|
||||
// reflect.Ptr, reflect.Slice, or reflect.Map.
|
||||
func PointerOf(v reflect.Value) Pointer {
|
||||
// The proper representation of a pointer is unsafe.Pointer,
|
||||
// which is necessary if the GC ever uses a moving collector.
|
||||
return Pointer{unsafe.Pointer(v.Pointer()), v.Type()}
|
||||
}
|
||||
|
||||
// IsNil reports whether the pointer is nil.
|
||||
func (p Pointer) IsNil() bool {
|
||||
return p.p == nil
|
||||
}
|
||||
|
||||
// Uintptr returns the pointer as a uintptr.
|
||||
func (p Pointer) Uintptr() uintptr {
|
||||
return uintptr(p.p)
|
||||
}
|
106
vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
generated
vendored
Normal file
106
vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
generated
vendored
Normal file
@ -0,0 +1,106 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package value
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// SortKeys sorts a list of map keys, deduplicating keys if necessary.
|
||||
// The type of each value must be comparable.
|
||||
func SortKeys(vs []reflect.Value) []reflect.Value {
|
||||
if len(vs) == 0 {
|
||||
return vs
|
||||
}
|
||||
|
||||
// Sort the map keys.
|
||||
sort.SliceStable(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) })
|
||||
|
||||
// Deduplicate keys (fails for NaNs).
|
||||
vs2 := vs[:1]
|
||||
for _, v := range vs[1:] {
|
||||
if isLess(vs2[len(vs2)-1], v) {
|
||||
vs2 = append(vs2, v)
|
||||
}
|
||||
}
|
||||
return vs2
|
||||
}
|
||||
|
||||
// isLess is a generic function for sorting arbitrary map keys.
|
||||
// The inputs must be of the same type and must be comparable.
|
||||
func isLess(x, y reflect.Value) bool {
|
||||
switch x.Type().Kind() {
|
||||
case reflect.Bool:
|
||||
return !x.Bool() && y.Bool()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return x.Int() < y.Int()
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return x.Uint() < y.Uint()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
// NOTE: This does not sort -0 as less than +0
|
||||
// since Go maps treat -0 and +0 as equal keys.
|
||||
fx, fy := x.Float(), y.Float()
|
||||
return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy)
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
cx, cy := x.Complex(), y.Complex()
|
||||
rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy)
|
||||
if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) {
|
||||
return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy)
|
||||
}
|
||||
return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry)
|
||||
case reflect.Ptr, reflect.UnsafePointer, reflect.Chan:
|
||||
return x.Pointer() < y.Pointer()
|
||||
case reflect.String:
|
||||
return x.String() < y.String()
|
||||
case reflect.Array:
|
||||
for i := 0; i < x.Len(); i++ {
|
||||
if isLess(x.Index(i), y.Index(i)) {
|
||||
return true
|
||||
}
|
||||
if isLess(y.Index(i), x.Index(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return false
|
||||
case reflect.Struct:
|
||||
for i := 0; i < x.NumField(); i++ {
|
||||
if isLess(x.Field(i), y.Field(i)) {
|
||||
return true
|
||||
}
|
||||
if isLess(y.Field(i), x.Field(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return false
|
||||
case reflect.Interface:
|
||||
vx, vy := x.Elem(), y.Elem()
|
||||
if !vx.IsValid() || !vy.IsValid() {
|
||||
return !vx.IsValid() && vy.IsValid()
|
||||
}
|
||||
tx, ty := vx.Type(), vy.Type()
|
||||
if tx == ty {
|
||||
return isLess(x.Elem(), y.Elem())
|
||||
}
|
||||
if tx.Kind() != ty.Kind() {
|
||||
return vx.Kind() < vy.Kind()
|
||||
}
|
||||
if tx.String() != ty.String() {
|
||||
return tx.String() < ty.String()
|
||||
}
|
||||
if tx.PkgPath() != ty.PkgPath() {
|
||||
return tx.PkgPath() < ty.PkgPath()
|
||||
}
|
||||
// This can happen in rare situations, so we fallback to just comparing
|
||||
// the unique pointer for a reflect.Type. This guarantees deterministic
|
||||
// ordering within a program, but it is obviously not stable.
|
||||
return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer()
|
||||
default:
|
||||
// Must be Func, Map, or Slice; which are not comparable.
|
||||
panic(fmt.Sprintf("%T is not comparable", x.Type()))
|
||||
}
|
||||
}
|
48
vendor/github.com/google/go-cmp/cmp/internal/value/zero.go
generated
vendored
Normal file
48
vendor/github.com/google/go-cmp/cmp/internal/value/zero.go
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package value
|
||||
|
||||
import (
|
||||
"math"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// IsZero reports whether v is the zero value.
|
||||
// This does not rely on Interface and so can be used on unexported fields.
|
||||
func IsZero(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
return v.Bool() == false
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return math.Float64bits(v.Float()) == 0
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return math.Float64bits(real(v.Complex())) == 0 && math.Float64bits(imag(v.Complex())) == 0
|
||||
case reflect.String:
|
||||
return v.String() == ""
|
||||
case reflect.UnsafePointer:
|
||||
return v.Pointer() == 0
|
||||
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
|
||||
return v.IsNil()
|
||||
case reflect.Array:
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if !IsZero(v.Index(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.Struct:
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
if !IsZero(v.Field(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
552
vendor/github.com/google/go-cmp/cmp/options.go
generated
vendored
Normal file
552
vendor/github.com/google/go-cmp/cmp/options.go
generated
vendored
Normal file
@ -0,0 +1,552 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/function"
|
||||
)
|
||||
|
||||
// Option configures for specific behavior of Equal and Diff. In particular,
|
||||
// the fundamental Option functions (Ignore, Transformer, and Comparer),
|
||||
// configure how equality is determined.
|
||||
//
|
||||
// The fundamental options may be composed with filters (FilterPath and
|
||||
// FilterValues) to control the scope over which they are applied.
|
||||
//
|
||||
// The cmp/cmpopts package provides helper functions for creating options that
|
||||
// may be used with Equal and Diff.
|
||||
type Option interface {
|
||||
// filter applies all filters and returns the option that remains.
|
||||
// Each option may only read s.curPath and call s.callTTBFunc.
|
||||
//
|
||||
// An Options is returned only if multiple comparers or transformers
|
||||
// can apply simultaneously and will only contain values of those types
|
||||
// or sub-Options containing values of those types.
|
||||
filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption
|
||||
}
|
||||
|
||||
// applicableOption represents the following types:
|
||||
// Fundamental: ignore | validator | *comparer | *transformer
|
||||
// Grouping: Options
|
||||
type applicableOption interface {
|
||||
Option
|
||||
|
||||
// apply executes the option, which may mutate s or panic.
|
||||
apply(s *state, vx, vy reflect.Value)
|
||||
}
|
||||
|
||||
// coreOption represents the following types:
|
||||
// Fundamental: ignore | validator | *comparer | *transformer
|
||||
// Filters: *pathFilter | *valuesFilter
|
||||
type coreOption interface {
|
||||
Option
|
||||
isCore()
|
||||
}
|
||||
|
||||
type core struct{}
|
||||
|
||||
func (core) isCore() {}
|
||||
|
||||
// Options is a list of Option values that also satisfies the Option interface.
|
||||
// Helper comparison packages may return an Options value when packing multiple
|
||||
// Option values into a single Option. When this package processes an Options,
|
||||
// it will be implicitly expanded into a flat list.
|
||||
//
|
||||
// Applying a filter on an Options is equivalent to applying that same filter
|
||||
// on all individual options held within.
|
||||
type Options []Option
|
||||
|
||||
func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) {
|
||||
for _, opt := range opts {
|
||||
switch opt := opt.filter(s, t, vx, vy); opt.(type) {
|
||||
case ignore:
|
||||
return ignore{} // Only ignore can short-circuit evaluation
|
||||
case validator:
|
||||
out = validator{} // Takes precedence over comparer or transformer
|
||||
case *comparer, *transformer, Options:
|
||||
switch out.(type) {
|
||||
case nil:
|
||||
out = opt
|
||||
case validator:
|
||||
// Keep validator
|
||||
case *comparer, *transformer, Options:
|
||||
out = Options{out, opt} // Conflicting comparers or transformers
|
||||
}
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (opts Options) apply(s *state, _, _ reflect.Value) {
|
||||
const warning = "ambiguous set of applicable options"
|
||||
const help = "consider using filters to ensure at most one Comparer or Transformer may apply"
|
||||
var ss []string
|
||||
for _, opt := range flattenOptions(nil, opts) {
|
||||
ss = append(ss, fmt.Sprint(opt))
|
||||
}
|
||||
set := strings.Join(ss, "\n\t")
|
||||
panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help))
|
||||
}
|
||||
|
||||
func (opts Options) String() string {
|
||||
var ss []string
|
||||
for _, opt := range opts {
|
||||
ss = append(ss, fmt.Sprint(opt))
|
||||
}
|
||||
return fmt.Sprintf("Options{%s}", strings.Join(ss, ", "))
|
||||
}
|
||||
|
||||
// FilterPath returns a new Option where opt is only evaluated if filter f
|
||||
// returns true for the current Path in the value tree.
|
||||
//
|
||||
// This filter is called even if a slice element or map entry is missing and
|
||||
// provides an opportunity to ignore such cases. The filter function must be
|
||||
// symmetric such that the filter result is identical regardless of whether the
|
||||
// missing value is from x or y.
|
||||
//
|
||||
// The option passed in may be an Ignore, Transformer, Comparer, Options, or
|
||||
// a previously filtered Option.
|
||||
func FilterPath(f func(Path) bool, opt Option) Option {
|
||||
if f == nil {
|
||||
panic("invalid path filter function")
|
||||
}
|
||||
if opt := normalizeOption(opt); opt != nil {
|
||||
return &pathFilter{fnc: f, opt: opt}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type pathFilter struct {
|
||||
core
|
||||
fnc func(Path) bool
|
||||
opt Option
|
||||
}
|
||||
|
||||
func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
|
||||
if f.fnc(s.curPath) {
|
||||
return f.opt.filter(s, t, vx, vy)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f pathFilter) String() string {
|
||||
return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt)
|
||||
}
|
||||
|
||||
// FilterValues returns a new Option where opt is only evaluated if filter f,
|
||||
// which is a function of the form "func(T, T) bool", returns true for the
|
||||
// current pair of values being compared. If either value is invalid or
|
||||
// the type of the values is not assignable to T, then this filter implicitly
|
||||
// returns false.
|
||||
//
|
||||
// The filter function must be
|
||||
// symmetric (i.e., agnostic to the order of the inputs) and
|
||||
// deterministic (i.e., produces the same result when given the same inputs).
|
||||
// If T is an interface, it is possible that f is called with two values with
|
||||
// different concrete types that both implement T.
|
||||
//
|
||||
// The option passed in may be an Ignore, Transformer, Comparer, Options, or
|
||||
// a previously filtered Option.
|
||||
func FilterValues(f interface{}, opt Option) Option {
|
||||
v := reflect.ValueOf(f)
|
||||
if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() {
|
||||
panic(fmt.Sprintf("invalid values filter function: %T", f))
|
||||
}
|
||||
if opt := normalizeOption(opt); opt != nil {
|
||||
vf := &valuesFilter{fnc: v, opt: opt}
|
||||
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
|
||||
vf.typ = ti
|
||||
}
|
||||
return vf
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type valuesFilter struct {
|
||||
core
|
||||
typ reflect.Type // T
|
||||
fnc reflect.Value // func(T, T) bool
|
||||
opt Option
|
||||
}
|
||||
|
||||
func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
|
||||
if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() {
|
||||
return nil
|
||||
}
|
||||
if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) {
|
||||
return f.opt.filter(s, t, vx, vy)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f valuesFilter) String() string {
|
||||
return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt)
|
||||
}
|
||||
|
||||
// Ignore is an Option that causes all comparisons to be ignored.
|
||||
// This value is intended to be combined with FilterPath or FilterValues.
|
||||
// It is an error to pass an unfiltered Ignore option to Equal.
|
||||
func Ignore() Option { return ignore{} }
|
||||
|
||||
type ignore struct{ core }
|
||||
|
||||
func (ignore) isFiltered() bool { return false }
|
||||
func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} }
|
||||
func (ignore) apply(s *state, _, _ reflect.Value) { s.report(true, reportByIgnore) }
|
||||
func (ignore) String() string { return "Ignore()" }
|
||||
|
||||
// validator is a sentinel Option type to indicate that some options could not
|
||||
// be evaluated due to unexported fields, missing slice elements, or
|
||||
// missing map entries. Both values are validator only for unexported fields.
|
||||
type validator struct{ core }
|
||||
|
||||
func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption {
|
||||
if !vx.IsValid() || !vy.IsValid() {
|
||||
return validator{}
|
||||
}
|
||||
if !vx.CanInterface() || !vy.CanInterface() {
|
||||
return validator{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (validator) apply(s *state, vx, vy reflect.Value) {
|
||||
// Implies missing slice element or map entry.
|
||||
if !vx.IsValid() || !vy.IsValid() {
|
||||
s.report(vx.IsValid() == vy.IsValid(), 0)
|
||||
return
|
||||
}
|
||||
|
||||
// Unable to Interface implies unexported field without visibility access.
|
||||
if !vx.CanInterface() || !vy.CanInterface() {
|
||||
help := "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported"
|
||||
var name string
|
||||
if t := s.curPath.Index(-2).Type(); t.Name() != "" {
|
||||
// Named type with unexported fields.
|
||||
name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType
|
||||
if _, ok := reflect.New(t).Interface().(error); ok {
|
||||
help = "consider using cmpopts.EquateErrors to compare error values"
|
||||
}
|
||||
} else {
|
||||
// Unnamed type with unexported fields. Derive PkgPath from field.
|
||||
var pkgPath string
|
||||
for i := 0; i < t.NumField() && pkgPath == ""; i++ {
|
||||
pkgPath = t.Field(i).PkgPath
|
||||
}
|
||||
name = fmt.Sprintf("%q.(%v)", pkgPath, t.String()) // e.g., "path/to/package".(struct { a int })
|
||||
}
|
||||
panic(fmt.Sprintf("cannot handle unexported field at %#v:\n\t%v\n%s", s.curPath, name, help))
|
||||
}
|
||||
|
||||
panic("not reachable")
|
||||
}
|
||||
|
||||
// identRx represents a valid identifier according to the Go specification.
|
||||
const identRx = `[_\p{L}][_\p{L}\p{N}]*`
|
||||
|
||||
var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`)
|
||||
|
||||
// Transformer returns an Option that applies a transformation function that
|
||||
// converts values of a certain type into that of another.
|
||||
//
|
||||
// The transformer f must be a function "func(T) R" that converts values of
|
||||
// type T to those of type R and is implicitly filtered to input values
|
||||
// assignable to T. The transformer must not mutate T in any way.
|
||||
//
|
||||
// To help prevent some cases of infinite recursive cycles applying the
|
||||
// same transform to the output of itself (e.g., in the case where the
|
||||
// input and output types are the same), an implicit filter is added such that
|
||||
// a transformer is applicable only if that exact transformer is not already
|
||||
// in the tail of the Path since the last non-Transform step.
|
||||
// For situations where the implicit filter is still insufficient,
|
||||
// consider using cmpopts.AcyclicTransformer, which adds a filter
|
||||
// to prevent the transformer from being recursively applied upon itself.
|
||||
//
|
||||
// The name is a user provided label that is used as the Transform.Name in the
|
||||
// transformation PathStep (and eventually shown in the Diff output).
|
||||
// The name must be a valid identifier or qualified identifier in Go syntax.
|
||||
// If empty, an arbitrary name is used.
|
||||
func Transformer(name string, f interface{}) Option {
|
||||
v := reflect.ValueOf(f)
|
||||
if !function.IsType(v.Type(), function.Transformer) || v.IsNil() {
|
||||
panic(fmt.Sprintf("invalid transformer function: %T", f))
|
||||
}
|
||||
if name == "" {
|
||||
name = function.NameOf(v)
|
||||
if !identsRx.MatchString(name) {
|
||||
name = "λ" // Lambda-symbol as placeholder name
|
||||
}
|
||||
} else if !identsRx.MatchString(name) {
|
||||
panic(fmt.Sprintf("invalid name: %q", name))
|
||||
}
|
||||
tr := &transformer{name: name, fnc: reflect.ValueOf(f)}
|
||||
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
|
||||
tr.typ = ti
|
||||
}
|
||||
return tr
|
||||
}
|
||||
|
||||
type transformer struct {
|
||||
core
|
||||
name string
|
||||
typ reflect.Type // T
|
||||
fnc reflect.Value // func(T) R
|
||||
}
|
||||
|
||||
func (tr *transformer) isFiltered() bool { return tr.typ != nil }
|
||||
|
||||
func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption {
|
||||
for i := len(s.curPath) - 1; i >= 0; i-- {
|
||||
if t, ok := s.curPath[i].(Transform); !ok {
|
||||
break // Hit most recent non-Transform step
|
||||
} else if tr == t.trans {
|
||||
return nil // Cannot directly use same Transform
|
||||
}
|
||||
}
|
||||
if tr.typ == nil || t.AssignableTo(tr.typ) {
|
||||
return tr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tr *transformer) apply(s *state, vx, vy reflect.Value) {
|
||||
step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}}
|
||||
vvx := s.callTRFunc(tr.fnc, vx, step)
|
||||
vvy := s.callTRFunc(tr.fnc, vy, step)
|
||||
step.vx, step.vy = vvx, vvy
|
||||
s.compareAny(step)
|
||||
}
|
||||
|
||||
func (tr transformer) String() string {
|
||||
return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc))
|
||||
}
|
||||
|
||||
// Comparer returns an Option that determines whether two values are equal
|
||||
// to each other.
|
||||
//
|
||||
// The comparer f must be a function "func(T, T) bool" and is implicitly
|
||||
// filtered to input values assignable to T. If T is an interface, it is
|
||||
// possible that f is called with two values of different concrete types that
|
||||
// both implement T.
|
||||
//
|
||||
// The equality function must be:
|
||||
// • Symmetric: equal(x, y) == equal(y, x)
|
||||
// • Deterministic: equal(x, y) == equal(x, y)
|
||||
// • Pure: equal(x, y) does not modify x or y
|
||||
func Comparer(f interface{}) Option {
|
||||
v := reflect.ValueOf(f)
|
||||
if !function.IsType(v.Type(), function.Equal) || v.IsNil() {
|
||||
panic(fmt.Sprintf("invalid comparer function: %T", f))
|
||||
}
|
||||
cm := &comparer{fnc: v}
|
||||
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
|
||||
cm.typ = ti
|
||||
}
|
||||
return cm
|
||||
}
|
||||
|
||||
type comparer struct {
|
||||
core
|
||||
typ reflect.Type // T
|
||||
fnc reflect.Value // func(T, T) bool
|
||||
}
|
||||
|
||||
func (cm *comparer) isFiltered() bool { return cm.typ != nil }
|
||||
|
||||
func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption {
|
||||
if cm.typ == nil || t.AssignableTo(cm.typ) {
|
||||
return cm
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cm *comparer) apply(s *state, vx, vy reflect.Value) {
|
||||
eq := s.callTTBFunc(cm.fnc, vx, vy)
|
||||
s.report(eq, reportByFunc)
|
||||
}
|
||||
|
||||
func (cm comparer) String() string {
|
||||
return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc))
|
||||
}
|
||||
|
||||
// Exporter returns an Option that specifies whether Equal is allowed to
|
||||
// introspect into the unexported fields of certain struct types.
|
||||
//
|
||||
// Users of this option must understand that comparing on unexported fields
|
||||
// from external packages is not safe since changes in the internal
|
||||
// implementation of some external package may cause the result of Equal
|
||||
// to unexpectedly change. However, it may be valid to use this option on types
|
||||
// defined in an internal package where the semantic meaning of an unexported
|
||||
// field is in the control of the user.
|
||||
//
|
||||
// In many cases, a custom Comparer should be used instead that defines
|
||||
// equality as a function of the public API of a type rather than the underlying
|
||||
// unexported implementation.
|
||||
//
|
||||
// For example, the reflect.Type documentation defines equality to be determined
|
||||
// by the == operator on the interface (essentially performing a shallow pointer
|
||||
// comparison) and most attempts to compare *regexp.Regexp types are interested
|
||||
// in only checking that the regular expression strings are equal.
|
||||
// Both of these are accomplished using Comparers:
|
||||
//
|
||||
// Comparer(func(x, y reflect.Type) bool { return x == y })
|
||||
// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() })
|
||||
//
|
||||
// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore
|
||||
// all unexported fields on specified struct types.
|
||||
func Exporter(f func(reflect.Type) bool) Option {
|
||||
if !supportExporters {
|
||||
panic("Exporter is not supported on purego builds")
|
||||
}
|
||||
return exporter(f)
|
||||
}
|
||||
|
||||
type exporter func(reflect.Type) bool
|
||||
|
||||
func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// AllowUnexported returns an Options that allows Equal to forcibly introspect
|
||||
// unexported fields of the specified struct types.
|
||||
//
|
||||
// See Exporter for the proper use of this option.
|
||||
func AllowUnexported(types ...interface{}) Option {
|
||||
m := make(map[reflect.Type]bool)
|
||||
for _, typ := range types {
|
||||
t := reflect.TypeOf(typ)
|
||||
if t.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("invalid struct type: %T", typ))
|
||||
}
|
||||
m[t] = true
|
||||
}
|
||||
return exporter(func(t reflect.Type) bool { return m[t] })
|
||||
}
|
||||
|
||||
// Result represents the comparison result for a single node and
|
||||
// is provided by cmp when calling Result (see Reporter).
|
||||
type Result struct {
|
||||
_ [0]func() // Make Result incomparable
|
||||
flags resultFlags
|
||||
}
|
||||
|
||||
// Equal reports whether the node was determined to be equal or not.
|
||||
// As a special case, ignored nodes are considered equal.
|
||||
func (r Result) Equal() bool {
|
||||
return r.flags&(reportEqual|reportByIgnore) != 0
|
||||
}
|
||||
|
||||
// ByIgnore reports whether the node is equal because it was ignored.
|
||||
// This never reports true if Equal reports false.
|
||||
func (r Result) ByIgnore() bool {
|
||||
return r.flags&reportByIgnore != 0
|
||||
}
|
||||
|
||||
// ByMethod reports whether the Equal method determined equality.
|
||||
func (r Result) ByMethod() bool {
|
||||
return r.flags&reportByMethod != 0
|
||||
}
|
||||
|
||||
// ByFunc reports whether a Comparer function determined equality.
|
||||
func (r Result) ByFunc() bool {
|
||||
return r.flags&reportByFunc != 0
|
||||
}
|
||||
|
||||
// ByCycle reports whether a reference cycle was detected.
|
||||
func (r Result) ByCycle() bool {
|
||||
return r.flags&reportByCycle != 0
|
||||
}
|
||||
|
||||
type resultFlags uint
|
||||
|
||||
const (
|
||||
_ resultFlags = (1 << iota) / 2
|
||||
|
||||
reportEqual
|
||||
reportUnequal
|
||||
reportByIgnore
|
||||
reportByMethod
|
||||
reportByFunc
|
||||
reportByCycle
|
||||
)
|
||||
|
||||
// Reporter is an Option that can be passed to Equal. When Equal traverses
|
||||
// the value trees, it calls PushStep as it descends into each node in the
|
||||
// tree and PopStep as it ascend out of the node. The leaves of the tree are
|
||||
// either compared (determined to be equal or not equal) or ignored and reported
|
||||
// as such by calling the Report method.
|
||||
func Reporter(r interface {
|
||||
// PushStep is called when a tree-traversal operation is performed.
|
||||
// The PathStep itself is only valid until the step is popped.
|
||||
// The PathStep.Values are valid for the duration of the entire traversal
|
||||
// and must not be mutated.
|
||||
//
|
||||
// Equal always calls PushStep at the start to provide an operation-less
|
||||
// PathStep used to report the root values.
|
||||
//
|
||||
// Within a slice, the exact set of inserted, removed, or modified elements
|
||||
// is unspecified and may change in future implementations.
|
||||
// The entries of a map are iterated through in an unspecified order.
|
||||
PushStep(PathStep)
|
||||
|
||||
// Report is called exactly once on leaf nodes to report whether the
|
||||
// comparison identified the node as equal, unequal, or ignored.
|
||||
// A leaf node is one that is immediately preceded by and followed by
|
||||
// a pair of PushStep and PopStep calls.
|
||||
Report(Result)
|
||||
|
||||
// PopStep ascends back up the value tree.
|
||||
// There is always a matching pop call for every push call.
|
||||
PopStep()
|
||||
}) Option {
|
||||
return reporter{r}
|
||||
}
|
||||
|
||||
type reporter struct{ reporterIface }
|
||||
type reporterIface interface {
|
||||
PushStep(PathStep)
|
||||
Report(Result)
|
||||
PopStep()
|
||||
}
|
||||
|
||||
func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// normalizeOption normalizes the input options such that all Options groups
|
||||
// are flattened and groups with a single element are reduced to that element.
|
||||
// Only coreOptions and Options containing coreOptions are allowed.
|
||||
func normalizeOption(src Option) Option {
|
||||
switch opts := flattenOptions(nil, Options{src}); len(opts) {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
return opts[0]
|
||||
default:
|
||||
return opts
|
||||
}
|
||||
}
|
||||
|
||||
// flattenOptions copies all options in src to dst as a flat list.
|
||||
// Only coreOptions and Options containing coreOptions are allowed.
|
||||
func flattenOptions(dst, src Options) Options {
|
||||
for _, opt := range src {
|
||||
switch opt := opt.(type) {
|
||||
case nil:
|
||||
continue
|
||||
case Options:
|
||||
dst = flattenOptions(dst, opt)
|
||||
case coreOption:
|
||||
dst = append(dst, opt)
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid option type: %T", opt))
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
378
vendor/github.com/google/go-cmp/cmp/path.go
generated
vendored
Normal file
378
vendor/github.com/google/go-cmp/cmp/path.go
generated
vendored
Normal file
@ -0,0 +1,378 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/value"
|
||||
)
|
||||
|
||||
// Path is a list of PathSteps describing the sequence of operations to get
|
||||
// from some root type to the current position in the value tree.
|
||||
// The first Path element is always an operation-less PathStep that exists
|
||||
// simply to identify the initial type.
|
||||
//
|
||||
// When traversing structs with embedded structs, the embedded struct will
|
||||
// always be accessed as a field before traversing the fields of the
|
||||
// embedded struct themselves. That is, an exported field from the
|
||||
// embedded struct will never be accessed directly from the parent struct.
|
||||
type Path []PathStep
|
||||
|
||||
// PathStep is a union-type for specific operations to traverse
|
||||
// a value's tree structure. Users of this package never need to implement
|
||||
// these types as values of this type will be returned by this package.
|
||||
//
|
||||
// Implementations of this interface are
|
||||
// StructField, SliceIndex, MapIndex, Indirect, TypeAssertion, and Transform.
|
||||
type PathStep interface {
|
||||
String() string
|
||||
|
||||
// Type is the resulting type after performing the path step.
|
||||
Type() reflect.Type
|
||||
|
||||
// Values is the resulting values after performing the path step.
|
||||
// The type of each valid value is guaranteed to be identical to Type.
|
||||
//
|
||||
// In some cases, one or both may be invalid or have restrictions:
|
||||
// • For StructField, both are not interface-able if the current field
|
||||
// is unexported and the struct type is not explicitly permitted by
|
||||
// an Exporter to traverse unexported fields.
|
||||
// • For SliceIndex, one may be invalid if an element is missing from
|
||||
// either the x or y slice.
|
||||
// • For MapIndex, one may be invalid if an entry is missing from
|
||||
// either the x or y map.
|
||||
//
|
||||
// The provided values must not be mutated.
|
||||
Values() (vx, vy reflect.Value)
|
||||
}
|
||||
|
||||
var (
|
||||
_ PathStep = StructField{}
|
||||
_ PathStep = SliceIndex{}
|
||||
_ PathStep = MapIndex{}
|
||||
_ PathStep = Indirect{}
|
||||
_ PathStep = TypeAssertion{}
|
||||
_ PathStep = Transform{}
|
||||
)
|
||||
|
||||
func (pa *Path) push(s PathStep) {
|
||||
*pa = append(*pa, s)
|
||||
}
|
||||
|
||||
func (pa *Path) pop() {
|
||||
*pa = (*pa)[:len(*pa)-1]
|
||||
}
|
||||
|
||||
// Last returns the last PathStep in the Path.
|
||||
// If the path is empty, this returns a non-nil PathStep that reports a nil Type.
|
||||
func (pa Path) Last() PathStep {
|
||||
return pa.Index(-1)
|
||||
}
|
||||
|
||||
// Index returns the ith step in the Path and supports negative indexing.
|
||||
// A negative index starts counting from the tail of the Path such that -1
|
||||
// refers to the last step, -2 refers to the second-to-last step, and so on.
|
||||
// If index is invalid, this returns a non-nil PathStep that reports a nil Type.
|
||||
func (pa Path) Index(i int) PathStep {
|
||||
if i < 0 {
|
||||
i = len(pa) + i
|
||||
}
|
||||
if i < 0 || i >= len(pa) {
|
||||
return pathStep{}
|
||||
}
|
||||
return pa[i]
|
||||
}
|
||||
|
||||
// String returns the simplified path to a node.
|
||||
// The simplified path only contains struct field accesses.
|
||||
//
|
||||
// For example:
|
||||
// MyMap.MySlices.MyField
|
||||
func (pa Path) String() string {
|
||||
var ss []string
|
||||
for _, s := range pa {
|
||||
if _, ok := s.(StructField); ok {
|
||||
ss = append(ss, s.String())
|
||||
}
|
||||
}
|
||||
return strings.TrimPrefix(strings.Join(ss, ""), ".")
|
||||
}
|
||||
|
||||
// GoString returns the path to a specific node using Go syntax.
|
||||
//
|
||||
// For example:
|
||||
// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField
|
||||
func (pa Path) GoString() string {
|
||||
var ssPre, ssPost []string
|
||||
var numIndirect int
|
||||
for i, s := range pa {
|
||||
var nextStep PathStep
|
||||
if i+1 < len(pa) {
|
||||
nextStep = pa[i+1]
|
||||
}
|
||||
switch s := s.(type) {
|
||||
case Indirect:
|
||||
numIndirect++
|
||||
pPre, pPost := "(", ")"
|
||||
switch nextStep.(type) {
|
||||
case Indirect:
|
||||
continue // Next step is indirection, so let them batch up
|
||||
case StructField:
|
||||
numIndirect-- // Automatic indirection on struct fields
|
||||
case nil:
|
||||
pPre, pPost = "", "" // Last step; no need for parenthesis
|
||||
}
|
||||
if numIndirect > 0 {
|
||||
ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect))
|
||||
ssPost = append(ssPost, pPost)
|
||||
}
|
||||
numIndirect = 0
|
||||
continue
|
||||
case Transform:
|
||||
ssPre = append(ssPre, s.trans.name+"(")
|
||||
ssPost = append(ssPost, ")")
|
||||
continue
|
||||
}
|
||||
ssPost = append(ssPost, s.String())
|
||||
}
|
||||
for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 {
|
||||
ssPre[i], ssPre[j] = ssPre[j], ssPre[i]
|
||||
}
|
||||
return strings.Join(ssPre, "") + strings.Join(ssPost, "")
|
||||
}
|
||||
|
||||
type pathStep struct {
|
||||
typ reflect.Type
|
||||
vx, vy reflect.Value
|
||||
}
|
||||
|
||||
func (ps pathStep) Type() reflect.Type { return ps.typ }
|
||||
func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy }
|
||||
func (ps pathStep) String() string {
|
||||
if ps.typ == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
s := ps.typ.String()
|
||||
if s == "" || strings.ContainsAny(s, "{}\n") {
|
||||
return "root" // Type too simple or complex to print
|
||||
}
|
||||
return fmt.Sprintf("{%s}", s)
|
||||
}
|
||||
|
||||
// StructField represents a struct field access on a field called Name.
|
||||
type StructField struct{ *structField }
|
||||
type structField struct {
|
||||
pathStep
|
||||
name string
|
||||
idx int
|
||||
|
||||
// These fields are used for forcibly accessing an unexported field.
|
||||
// pvx, pvy, and field are only valid if unexported is true.
|
||||
unexported bool
|
||||
mayForce bool // Forcibly allow visibility
|
||||
paddr bool // Was parent addressable?
|
||||
pvx, pvy reflect.Value // Parent values (always addressible)
|
||||
field reflect.StructField // Field information
|
||||
}
|
||||
|
||||
func (sf StructField) Type() reflect.Type { return sf.typ }
|
||||
func (sf StructField) Values() (vx, vy reflect.Value) {
|
||||
if !sf.unexported {
|
||||
return sf.vx, sf.vy // CanInterface reports true
|
||||
}
|
||||
|
||||
// Forcibly obtain read-write access to an unexported struct field.
|
||||
if sf.mayForce {
|
||||
vx = retrieveUnexportedField(sf.pvx, sf.field, sf.paddr)
|
||||
vy = retrieveUnexportedField(sf.pvy, sf.field, sf.paddr)
|
||||
return vx, vy // CanInterface reports true
|
||||
}
|
||||
return sf.vx, sf.vy // CanInterface reports false
|
||||
}
|
||||
func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) }
|
||||
|
||||
// Name is the field name.
|
||||
func (sf StructField) Name() string { return sf.name }
|
||||
|
||||
// Index is the index of the field in the parent struct type.
|
||||
// See reflect.Type.Field.
|
||||
func (sf StructField) Index() int { return sf.idx }
|
||||
|
||||
// SliceIndex is an index operation on a slice or array at some index Key.
|
||||
type SliceIndex struct{ *sliceIndex }
|
||||
type sliceIndex struct {
|
||||
pathStep
|
||||
xkey, ykey int
|
||||
isSlice bool // False for reflect.Array
|
||||
}
|
||||
|
||||
func (si SliceIndex) Type() reflect.Type { return si.typ }
|
||||
func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy }
|
||||
func (si SliceIndex) String() string {
|
||||
switch {
|
||||
case si.xkey == si.ykey:
|
||||
return fmt.Sprintf("[%d]", si.xkey)
|
||||
case si.ykey == -1:
|
||||
// [5->?] means "I don't know where X[5] went"
|
||||
return fmt.Sprintf("[%d->?]", si.xkey)
|
||||
case si.xkey == -1:
|
||||
// [?->3] means "I don't know where Y[3] came from"
|
||||
return fmt.Sprintf("[?->%d]", si.ykey)
|
||||
default:
|
||||
// [5->3] means "X[5] moved to Y[3]"
|
||||
return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey)
|
||||
}
|
||||
}
|
||||
|
||||
// Key is the index key; it may return -1 if in a split state
|
||||
func (si SliceIndex) Key() int {
|
||||
if si.xkey != si.ykey {
|
||||
return -1
|
||||
}
|
||||
return si.xkey
|
||||
}
|
||||
|
||||
// SplitKeys are the indexes for indexing into slices in the
|
||||
// x and y values, respectively. These indexes may differ due to the
|
||||
// insertion or removal of an element in one of the slices, causing
|
||||
// all of the indexes to be shifted. If an index is -1, then that
|
||||
// indicates that the element does not exist in the associated slice.
|
||||
//
|
||||
// Key is guaranteed to return -1 if and only if the indexes returned
|
||||
// by SplitKeys are not the same. SplitKeys will never return -1 for
|
||||
// both indexes.
|
||||
func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey }
|
||||
|
||||
// MapIndex is an index operation on a map at some index Key.
|
||||
type MapIndex struct{ *mapIndex }
|
||||
type mapIndex struct {
|
||||
pathStep
|
||||
key reflect.Value
|
||||
}
|
||||
|
||||
func (mi MapIndex) Type() reflect.Type { return mi.typ }
|
||||
func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy }
|
||||
func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) }
|
||||
|
||||
// Key is the value of the map key.
|
||||
func (mi MapIndex) Key() reflect.Value { return mi.key }
|
||||
|
||||
// Indirect represents pointer indirection on the parent type.
|
||||
type Indirect struct{ *indirect }
|
||||
type indirect struct {
|
||||
pathStep
|
||||
}
|
||||
|
||||
func (in Indirect) Type() reflect.Type { return in.typ }
|
||||
func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy }
|
||||
func (in Indirect) String() string { return "*" }
|
||||
|
||||
// TypeAssertion represents a type assertion on an interface.
|
||||
type TypeAssertion struct{ *typeAssertion }
|
||||
type typeAssertion struct {
|
||||
pathStep
|
||||
}
|
||||
|
||||
func (ta TypeAssertion) Type() reflect.Type { return ta.typ }
|
||||
func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy }
|
||||
func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) }
|
||||
|
||||
// Transform is a transformation from the parent type to the current type.
|
||||
type Transform struct{ *transform }
|
||||
type transform struct {
|
||||
pathStep
|
||||
trans *transformer
|
||||
}
|
||||
|
||||
func (tf Transform) Type() reflect.Type { return tf.typ }
|
||||
func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy }
|
||||
func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) }
|
||||
|
||||
// Name is the name of the Transformer.
|
||||
func (tf Transform) Name() string { return tf.trans.name }
|
||||
|
||||
// Func is the function pointer to the transformer function.
|
||||
func (tf Transform) Func() reflect.Value { return tf.trans.fnc }
|
||||
|
||||
// Option returns the originally constructed Transformer option.
|
||||
// The == operator can be used to detect the exact option used.
|
||||
func (tf Transform) Option() Option { return tf.trans }
|
||||
|
||||
// pointerPath represents a dual-stack of pointers encountered when
|
||||
// recursively traversing the x and y values. This data structure supports
|
||||
// detection of cycles and determining whether the cycles are equal.
|
||||
// In Go, cycles can occur via pointers, slices, and maps.
|
||||
//
|
||||
// The pointerPath uses a map to represent a stack; where descension into a
|
||||
// pointer pushes the address onto the stack, and ascension from a pointer
|
||||
// pops the address from the stack. Thus, when traversing into a pointer from
|
||||
// reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles
|
||||
// by checking whether the pointer has already been visited. The cycle detection
|
||||
// uses a seperate stack for the x and y values.
|
||||
//
|
||||
// If a cycle is detected we need to determine whether the two pointers
|
||||
// should be considered equal. The definition of equality chosen by Equal
|
||||
// requires two graphs to have the same structure. To determine this, both the
|
||||
// x and y values must have a cycle where the previous pointers were also
|
||||
// encountered together as a pair.
|
||||
//
|
||||
// Semantically, this is equivalent to augmenting Indirect, SliceIndex, and
|
||||
// MapIndex with pointer information for the x and y values.
|
||||
// Suppose px and py are two pointers to compare, we then search the
|
||||
// Path for whether px was ever encountered in the Path history of x, and
|
||||
// similarly so with py. If either side has a cycle, the comparison is only
|
||||
// equal if both px and py have a cycle resulting from the same PathStep.
|
||||
//
|
||||
// Using a map as a stack is more performant as we can perform cycle detection
|
||||
// in O(1) instead of O(N) where N is len(Path).
|
||||
type pointerPath struct {
|
||||
// mx is keyed by x pointers, where the value is the associated y pointer.
|
||||
mx map[value.Pointer]value.Pointer
|
||||
// my is keyed by y pointers, where the value is the associated x pointer.
|
||||
my map[value.Pointer]value.Pointer
|
||||
}
|
||||
|
||||
func (p *pointerPath) Init() {
|
||||
p.mx = make(map[value.Pointer]value.Pointer)
|
||||
p.my = make(map[value.Pointer]value.Pointer)
|
||||
}
|
||||
|
||||
// Push indicates intent to descend into pointers vx and vy where
|
||||
// visited reports whether either has been seen before. If visited before,
|
||||
// equal reports whether both pointers were encountered together.
|
||||
// Pop must be called if and only if the pointers were never visited.
|
||||
//
|
||||
// The pointers vx and vy must be a reflect.Ptr, reflect.Slice, or reflect.Map
|
||||
// and be non-nil.
|
||||
func (p pointerPath) Push(vx, vy reflect.Value) (equal, visited bool) {
|
||||
px := value.PointerOf(vx)
|
||||
py := value.PointerOf(vy)
|
||||
_, ok1 := p.mx[px]
|
||||
_, ok2 := p.my[py]
|
||||
if ok1 || ok2 {
|
||||
equal = p.mx[px] == py && p.my[py] == px // Pointers paired together
|
||||
return equal, true
|
||||
}
|
||||
p.mx[px] = py
|
||||
p.my[py] = px
|
||||
return false, false
|
||||
}
|
||||
|
||||
// Pop ascends from pointers vx and vy.
|
||||
func (p pointerPath) Pop(vx, vy reflect.Value) {
|
||||
delete(p.mx, value.PointerOf(vx))
|
||||
delete(p.my, value.PointerOf(vy))
|
||||
}
|
||||
|
||||
// isExported reports whether the identifier is exported.
|
||||
func isExported(id string) bool {
|
||||
r, _ := utf8.DecodeRuneInString(id)
|
||||
return unicode.IsUpper(r)
|
||||
}
|
54
vendor/github.com/google/go-cmp/cmp/report.go
generated
vendored
Normal file
54
vendor/github.com/google/go-cmp/cmp/report.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
// defaultReporter implements the reporter interface.
|
||||
//
|
||||
// As Equal serially calls the PushStep, Report, and PopStep methods, the
|
||||
// defaultReporter constructs a tree-based representation of the compared value
|
||||
// and the result of each comparison (see valueNode).
|
||||
//
|
||||
// When the String method is called, the FormatDiff method transforms the
|
||||
// valueNode tree into a textNode tree, which is a tree-based representation
|
||||
// of the textual output (see textNode).
|
||||
//
|
||||
// Lastly, the textNode.String method produces the final report as a string.
|
||||
type defaultReporter struct {
|
||||
root *valueNode
|
||||
curr *valueNode
|
||||
}
|
||||
|
||||
func (r *defaultReporter) PushStep(ps PathStep) {
|
||||
r.curr = r.curr.PushStep(ps)
|
||||
if r.root == nil {
|
||||
r.root = r.curr
|
||||
}
|
||||
}
|
||||
func (r *defaultReporter) Report(rs Result) {
|
||||
r.curr.Report(rs)
|
||||
}
|
||||
func (r *defaultReporter) PopStep() {
|
||||
r.curr = r.curr.PopStep()
|
||||
}
|
||||
|
||||
// String provides a full report of the differences detected as a structured
|
||||
// literal in pseudo-Go syntax. String may only be called after the entire tree
|
||||
// has been traversed.
|
||||
func (r *defaultReporter) String() string {
|
||||
assert(r.root != nil && r.curr == nil)
|
||||
if r.root.NumDiff == 0 {
|
||||
return ""
|
||||
}
|
||||
ptrs := new(pointerReferences)
|
||||
text := formatOptions{}.FormatDiff(r.root, ptrs)
|
||||
resolveReferences(text)
|
||||
return text.String()
|
||||
}
|
||||
|
||||
func assert(ok bool) {
|
||||
if !ok {
|
||||
panic("assertion failure")
|
||||
}
|
||||
}
|
432
vendor/github.com/google/go-cmp/cmp/report_compare.go
generated
vendored
Normal file
432
vendor/github.com/google/go-cmp/cmp/report_compare.go
generated
vendored
Normal file
@ -0,0 +1,432 @@
|
||||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/value"
|
||||
)
|
||||
|
||||
// numContextRecords is the number of surrounding equal records to print.
|
||||
const numContextRecords = 2
|
||||
|
||||
type diffMode byte
|
||||
|
||||
const (
|
||||
diffUnknown diffMode = 0
|
||||
diffIdentical diffMode = ' '
|
||||
diffRemoved diffMode = '-'
|
||||
diffInserted diffMode = '+'
|
||||
)
|
||||
|
||||
type typeMode int
|
||||
|
||||
const (
|
||||
// emitType always prints the type.
|
||||
emitType typeMode = iota
|
||||
// elideType never prints the type.
|
||||
elideType
|
||||
// autoType prints the type only for composite kinds
|
||||
// (i.e., structs, slices, arrays, and maps).
|
||||
autoType
|
||||
)
|
||||
|
||||
type formatOptions struct {
|
||||
// DiffMode controls the output mode of FormatDiff.
|
||||
//
|
||||
// If diffUnknown, then produce a diff of the x and y values.
|
||||
// If diffIdentical, then emit values as if they were equal.
|
||||
// If diffRemoved, then only emit x values (ignoring y values).
|
||||
// If diffInserted, then only emit y values (ignoring x values).
|
||||
DiffMode diffMode
|
||||
|
||||
// TypeMode controls whether to print the type for the current node.
|
||||
//
|
||||
// As a general rule of thumb, we always print the type of the next node
|
||||
// after an interface, and always elide the type of the next node after
|
||||
// a slice or map node.
|
||||
TypeMode typeMode
|
||||
|
||||
// formatValueOptions are options specific to printing reflect.Values.
|
||||
formatValueOptions
|
||||
}
|
||||
|
||||
func (opts formatOptions) WithDiffMode(d diffMode) formatOptions {
|
||||
opts.DiffMode = d
|
||||
return opts
|
||||
}
|
||||
func (opts formatOptions) WithTypeMode(t typeMode) formatOptions {
|
||||
opts.TypeMode = t
|
||||
return opts
|
||||
}
|
||||
func (opts formatOptions) WithVerbosity(level int) formatOptions {
|
||||
opts.VerbosityLevel = level
|
||||
opts.LimitVerbosity = true
|
||||
return opts
|
||||
}
|
||||
func (opts formatOptions) verbosity() uint {
|
||||
switch {
|
||||
case opts.VerbosityLevel < 0:
|
||||
return 0
|
||||
case opts.VerbosityLevel > 16:
|
||||
return 16 // some reasonable maximum to avoid shift overflow
|
||||
default:
|
||||
return uint(opts.VerbosityLevel)
|
||||
}
|
||||
}
|
||||
|
||||
const maxVerbosityPreset = 6
|
||||
|
||||
// verbosityPreset modifies the verbosity settings given an index
|
||||
// between 0 and maxVerbosityPreset, inclusive.
|
||||
func verbosityPreset(opts formatOptions, i int) formatOptions {
|
||||
opts.VerbosityLevel = int(opts.verbosity()) + 2*i
|
||||
if i > 0 {
|
||||
opts.AvoidStringer = true
|
||||
}
|
||||
if i >= maxVerbosityPreset {
|
||||
opts.PrintAddresses = true
|
||||
opts.QualifiedNames = true
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
// FormatDiff converts a valueNode tree into a textNode tree, where the later
|
||||
// is a textual representation of the differences detected in the former.
|
||||
func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) {
|
||||
if opts.DiffMode == diffIdentical {
|
||||
opts = opts.WithVerbosity(1)
|
||||
} else if opts.verbosity() < 3 {
|
||||
opts = opts.WithVerbosity(3)
|
||||
}
|
||||
|
||||
// Check whether we have specialized formatting for this node.
|
||||
// This is not necessary, but helpful for producing more readable outputs.
|
||||
if opts.CanFormatDiffSlice(v) {
|
||||
return opts.FormatDiffSlice(v)
|
||||
}
|
||||
|
||||
var parentKind reflect.Kind
|
||||
if v.parent != nil && v.parent.TransformerName == "" {
|
||||
parentKind = v.parent.Type.Kind()
|
||||
}
|
||||
|
||||
// For leaf nodes, format the value based on the reflect.Values alone.
|
||||
if v.MaxDepth == 0 {
|
||||
switch opts.DiffMode {
|
||||
case diffUnknown, diffIdentical:
|
||||
// Format Equal.
|
||||
if v.NumDiff == 0 {
|
||||
outx := opts.FormatValue(v.ValueX, parentKind, ptrs)
|
||||
outy := opts.FormatValue(v.ValueY, parentKind, ptrs)
|
||||
if v.NumIgnored > 0 && v.NumSame == 0 {
|
||||
return textEllipsis
|
||||
} else if outx.Len() < outy.Len() {
|
||||
return outx
|
||||
} else {
|
||||
return outy
|
||||
}
|
||||
}
|
||||
|
||||
// Format unequal.
|
||||
assert(opts.DiffMode == diffUnknown)
|
||||
var list textList
|
||||
outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, parentKind, ptrs)
|
||||
outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, parentKind, ptrs)
|
||||
for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ {
|
||||
opts2 := verbosityPreset(opts, i).WithTypeMode(elideType)
|
||||
outx = opts2.FormatValue(v.ValueX, parentKind, ptrs)
|
||||
outy = opts2.FormatValue(v.ValueY, parentKind, ptrs)
|
||||
}
|
||||
if outx != nil {
|
||||
list = append(list, textRecord{Diff: '-', Value: outx})
|
||||
}
|
||||
if outy != nil {
|
||||
list = append(list, textRecord{Diff: '+', Value: outy})
|
||||
}
|
||||
return opts.WithTypeMode(emitType).FormatType(v.Type, list)
|
||||
case diffRemoved:
|
||||
return opts.FormatValue(v.ValueX, parentKind, ptrs)
|
||||
case diffInserted:
|
||||
return opts.FormatValue(v.ValueY, parentKind, ptrs)
|
||||
default:
|
||||
panic("invalid diff mode")
|
||||
}
|
||||
}
|
||||
|
||||
// Register slice element to support cycle detection.
|
||||
if parentKind == reflect.Slice {
|
||||
ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, true)
|
||||
defer ptrs.Pop()
|
||||
defer func() { out = wrapTrunkReferences(ptrRefs, out) }()
|
||||
}
|
||||
|
||||
// Descend into the child value node.
|
||||
if v.TransformerName != "" {
|
||||
out := opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs)
|
||||
out = &textWrap{Prefix: "Inverse(" + v.TransformerName + ", ", Value: out, Suffix: ")"}
|
||||
return opts.FormatType(v.Type, out)
|
||||
} else {
|
||||
switch k := v.Type.Kind(); k {
|
||||
case reflect.Struct, reflect.Array, reflect.Slice:
|
||||
out = opts.formatDiffList(v.Records, k, ptrs)
|
||||
out = opts.FormatType(v.Type, out)
|
||||
case reflect.Map:
|
||||
// Register map to support cycle detection.
|
||||
ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false)
|
||||
defer ptrs.Pop()
|
||||
|
||||
out = opts.formatDiffList(v.Records, k, ptrs)
|
||||
out = wrapTrunkReferences(ptrRefs, out)
|
||||
out = opts.FormatType(v.Type, out)
|
||||
case reflect.Ptr:
|
||||
// Register pointer to support cycle detection.
|
||||
ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false)
|
||||
defer ptrs.Pop()
|
||||
|
||||
out = opts.FormatDiff(v.Value, ptrs)
|
||||
out = wrapTrunkReferences(ptrRefs, out)
|
||||
out = &textWrap{Prefix: "&", Value: out}
|
||||
case reflect.Interface:
|
||||
out = opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs)
|
||||
default:
|
||||
panic(fmt.Sprintf("%v cannot have children", k))
|
||||
}
|
||||
return out
|
||||
}
|
||||
}
|
||||
|
||||
func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, ptrs *pointerReferences) textNode {
|
||||
// Derive record name based on the data structure kind.
|
||||
var name string
|
||||
var formatKey func(reflect.Value) string
|
||||
switch k {
|
||||
case reflect.Struct:
|
||||
name = "field"
|
||||
opts = opts.WithTypeMode(autoType)
|
||||
formatKey = func(v reflect.Value) string { return v.String() }
|
||||
case reflect.Slice, reflect.Array:
|
||||
name = "element"
|
||||
opts = opts.WithTypeMode(elideType)
|
||||
formatKey = func(reflect.Value) string { return "" }
|
||||
case reflect.Map:
|
||||
name = "entry"
|
||||
opts = opts.WithTypeMode(elideType)
|
||||
formatKey = func(v reflect.Value) string { return formatMapKey(v, false, ptrs) }
|
||||
}
|
||||
|
||||
maxLen := -1
|
||||
if opts.LimitVerbosity {
|
||||
if opts.DiffMode == diffIdentical {
|
||||
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
|
||||
} else {
|
||||
maxLen = (1 << opts.verbosity()) << 1 // 2, 4, 8, 16, 32, 64, etc...
|
||||
}
|
||||
opts.VerbosityLevel--
|
||||
}
|
||||
|
||||
// Handle unification.
|
||||
switch opts.DiffMode {
|
||||
case diffIdentical, diffRemoved, diffInserted:
|
||||
var list textList
|
||||
var deferredEllipsis bool // Add final "..." to indicate records were dropped
|
||||
for _, r := range recs {
|
||||
if len(list) == maxLen {
|
||||
deferredEllipsis = true
|
||||
break
|
||||
}
|
||||
|
||||
// Elide struct fields that are zero value.
|
||||
if k == reflect.Struct {
|
||||
var isZero bool
|
||||
switch opts.DiffMode {
|
||||
case diffIdentical:
|
||||
isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueY)
|
||||
case diffRemoved:
|
||||
isZero = value.IsZero(r.Value.ValueX)
|
||||
case diffInserted:
|
||||
isZero = value.IsZero(r.Value.ValueY)
|
||||
}
|
||||
if isZero {
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Elide ignored nodes.
|
||||
if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 {
|
||||
deferredEllipsis = !(k == reflect.Slice || k == reflect.Array)
|
||||
if !deferredEllipsis {
|
||||
list.AppendEllipsis(diffStats{})
|
||||
}
|
||||
continue
|
||||
}
|
||||
if out := opts.FormatDiff(r.Value, ptrs); out != nil {
|
||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
||||
}
|
||||
}
|
||||
if deferredEllipsis {
|
||||
list.AppendEllipsis(diffStats{})
|
||||
}
|
||||
return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||
case diffUnknown:
|
||||
default:
|
||||
panic("invalid diff mode")
|
||||
}
|
||||
|
||||
// Handle differencing.
|
||||
var numDiffs int
|
||||
var list textList
|
||||
var keys []reflect.Value // invariant: len(list) == len(keys)
|
||||
groups := coalesceAdjacentRecords(name, recs)
|
||||
maxGroup := diffStats{Name: name}
|
||||
for i, ds := range groups {
|
||||
if maxLen >= 0 && numDiffs >= maxLen {
|
||||
maxGroup = maxGroup.Append(ds)
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle equal records.
|
||||
if ds.NumDiff() == 0 {
|
||||
// Compute the number of leading and trailing records to print.
|
||||
var numLo, numHi int
|
||||
numEqual := ds.NumIgnored + ds.NumIdentical
|
||||
for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 {
|
||||
if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
|
||||
break
|
||||
}
|
||||
numLo++
|
||||
}
|
||||
for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
|
||||
if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
|
||||
break
|
||||
}
|
||||
numHi++
|
||||
}
|
||||
if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 {
|
||||
numHi++ // Avoid pointless coalescing of a single equal record
|
||||
}
|
||||
|
||||
// Format the equal values.
|
||||
for _, r := range recs[:numLo] {
|
||||
out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs)
|
||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
||||
keys = append(keys, r.Key)
|
||||
}
|
||||
if numEqual > numLo+numHi {
|
||||
ds.NumIdentical -= numLo + numHi
|
||||
list.AppendEllipsis(ds)
|
||||
for len(keys) < len(list) {
|
||||
keys = append(keys, reflect.Value{})
|
||||
}
|
||||
}
|
||||
for _, r := range recs[numEqual-numHi : numEqual] {
|
||||
out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs)
|
||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
||||
keys = append(keys, r.Key)
|
||||
}
|
||||
recs = recs[numEqual:]
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle unequal records.
|
||||
for _, r := range recs[:ds.NumDiff()] {
|
||||
switch {
|
||||
case opts.CanFormatDiffSlice(r.Value):
|
||||
out := opts.FormatDiffSlice(r.Value)
|
||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
||||
keys = append(keys, r.Key)
|
||||
case r.Value.NumChildren == r.Value.MaxDepth:
|
||||
outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs)
|
||||
outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs)
|
||||
for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ {
|
||||
opts2 := verbosityPreset(opts, i)
|
||||
outx = opts2.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs)
|
||||
outy = opts2.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs)
|
||||
}
|
||||
if outx != nil {
|
||||
list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx})
|
||||
keys = append(keys, r.Key)
|
||||
}
|
||||
if outy != nil {
|
||||
list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy})
|
||||
keys = append(keys, r.Key)
|
||||
}
|
||||
default:
|
||||
out := opts.FormatDiff(r.Value, ptrs)
|
||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
||||
keys = append(keys, r.Key)
|
||||
}
|
||||
}
|
||||
recs = recs[ds.NumDiff():]
|
||||
numDiffs += ds.NumDiff()
|
||||
}
|
||||
if maxGroup.IsZero() {
|
||||
assert(len(recs) == 0)
|
||||
} else {
|
||||
list.AppendEllipsis(maxGroup)
|
||||
for len(keys) < len(list) {
|
||||
keys = append(keys, reflect.Value{})
|
||||
}
|
||||
}
|
||||
assert(len(list) == len(keys))
|
||||
|
||||
// For maps, the default formatting logic uses fmt.Stringer which may
|
||||
// produce ambiguous output. Avoid calling String to disambiguate.
|
||||
if k == reflect.Map {
|
||||
var ambiguous bool
|
||||
seenKeys := map[string]reflect.Value{}
|
||||
for i, currKey := range keys {
|
||||
if currKey.IsValid() {
|
||||
strKey := list[i].Key
|
||||
prevKey, seen := seenKeys[strKey]
|
||||
if seen && prevKey.CanInterface() && currKey.CanInterface() {
|
||||
ambiguous = prevKey.Interface() != currKey.Interface()
|
||||
if ambiguous {
|
||||
break
|
||||
}
|
||||
}
|
||||
seenKeys[strKey] = currKey
|
||||
}
|
||||
}
|
||||
if ambiguous {
|
||||
for i, k := range keys {
|
||||
if k.IsValid() {
|
||||
list[i].Key = formatMapKey(k, true, ptrs)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||
}
|
||||
|
||||
// coalesceAdjacentRecords coalesces the list of records into groups of
|
||||
// adjacent equal, or unequal counts.
|
||||
func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) {
|
||||
var prevCase int // Arbitrary index into which case last occurred
|
||||
lastStats := func(i int) *diffStats {
|
||||
if prevCase != i {
|
||||
groups = append(groups, diffStats{Name: name})
|
||||
prevCase = i
|
||||
}
|
||||
return &groups[len(groups)-1]
|
||||
}
|
||||
for _, r := range recs {
|
||||
switch rv := r.Value; {
|
||||
case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0:
|
||||
lastStats(1).NumIgnored++
|
||||
case rv.NumDiff == 0:
|
||||
lastStats(1).NumIdentical++
|
||||
case rv.NumDiff > 0 && !rv.ValueY.IsValid():
|
||||
lastStats(2).NumRemoved++
|
||||
case rv.NumDiff > 0 && !rv.ValueX.IsValid():
|
||||
lastStats(2).NumInserted++
|
||||
default:
|
||||
lastStats(2).NumModified++
|
||||
}
|
||||
}
|
||||
return groups
|
||||
}
|
264
vendor/github.com/google/go-cmp/cmp/report_references.go
generated
vendored
Normal file
264
vendor/github.com/google/go-cmp/cmp/report_references.go
generated
vendored
Normal file
@ -0,0 +1,264 @@
|
||||
// Copyright 2020, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/flags"
|
||||
"github.com/google/go-cmp/cmp/internal/value"
|
||||
)
|
||||
|
||||
const (
|
||||
pointerDelimPrefix = "⟪"
|
||||
pointerDelimSuffix = "⟫"
|
||||
)
|
||||
|
||||
// formatPointer prints the address of the pointer.
|
||||
func formatPointer(p value.Pointer, withDelims bool) string {
|
||||
v := p.Uintptr()
|
||||
if flags.Deterministic {
|
||||
v = 0xdeadf00f // Only used for stable testing purposes
|
||||
}
|
||||
if withDelims {
|
||||
return pointerDelimPrefix + formatHex(uint64(v)) + pointerDelimSuffix
|
||||
}
|
||||
return formatHex(uint64(v))
|
||||
}
|
||||
|
||||
// pointerReferences is a stack of pointers visited so far.
|
||||
type pointerReferences [][2]value.Pointer
|
||||
|
||||
func (ps *pointerReferences) PushPair(vx, vy reflect.Value, d diffMode, deref bool) (pp [2]value.Pointer) {
|
||||
if deref && vx.IsValid() {
|
||||
vx = vx.Addr()
|
||||
}
|
||||
if deref && vy.IsValid() {
|
||||
vy = vy.Addr()
|
||||
}
|
||||
switch d {
|
||||
case diffUnknown, diffIdentical:
|
||||
pp = [2]value.Pointer{value.PointerOf(vx), value.PointerOf(vy)}
|
||||
case diffRemoved:
|
||||
pp = [2]value.Pointer{value.PointerOf(vx), value.Pointer{}}
|
||||
case diffInserted:
|
||||
pp = [2]value.Pointer{value.Pointer{}, value.PointerOf(vy)}
|
||||
}
|
||||
*ps = append(*ps, pp)
|
||||
return pp
|
||||
}
|
||||
|
||||
func (ps *pointerReferences) Push(v reflect.Value) (p value.Pointer, seen bool) {
|
||||
p = value.PointerOf(v)
|
||||
for _, pp := range *ps {
|
||||
if p == pp[0] || p == pp[1] {
|
||||
return p, true
|
||||
}
|
||||
}
|
||||
*ps = append(*ps, [2]value.Pointer{p, p})
|
||||
return p, false
|
||||
}
|
||||
|
||||
func (ps *pointerReferences) Pop() {
|
||||
*ps = (*ps)[:len(*ps)-1]
|
||||
}
|
||||
|
||||
// trunkReferences is metadata for a textNode indicating that the sub-tree
|
||||
// represents the value for either pointer in a pair of references.
|
||||
type trunkReferences struct{ pp [2]value.Pointer }
|
||||
|
||||
// trunkReference is metadata for a textNode indicating that the sub-tree
|
||||
// represents the value for the given pointer reference.
|
||||
type trunkReference struct{ p value.Pointer }
|
||||
|
||||
// leafReference is metadata for a textNode indicating that the value is
|
||||
// truncated as it refers to another part of the tree (i.e., a trunk).
|
||||
type leafReference struct{ p value.Pointer }
|
||||
|
||||
func wrapTrunkReferences(pp [2]value.Pointer, s textNode) textNode {
|
||||
switch {
|
||||
case pp[0].IsNil():
|
||||
return &textWrap{Value: s, Metadata: trunkReference{pp[1]}}
|
||||
case pp[1].IsNil():
|
||||
return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
|
||||
case pp[0] == pp[1]:
|
||||
return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
|
||||
default:
|
||||
return &textWrap{Value: s, Metadata: trunkReferences{pp}}
|
||||
}
|
||||
}
|
||||
func wrapTrunkReference(p value.Pointer, printAddress bool, s textNode) textNode {
|
||||
var prefix string
|
||||
if printAddress {
|
||||
prefix = formatPointer(p, true)
|
||||
}
|
||||
return &textWrap{Prefix: prefix, Value: s, Metadata: trunkReference{p}}
|
||||
}
|
||||
func makeLeafReference(p value.Pointer, printAddress bool) textNode {
|
||||
out := &textWrap{Prefix: "(", Value: textEllipsis, Suffix: ")"}
|
||||
var prefix string
|
||||
if printAddress {
|
||||
prefix = formatPointer(p, true)
|
||||
}
|
||||
return &textWrap{Prefix: prefix, Value: out, Metadata: leafReference{p}}
|
||||
}
|
||||
|
||||
// resolveReferences walks the textNode tree searching for any leaf reference
|
||||
// metadata and resolves each against the corresponding trunk references.
|
||||
// Since pointer addresses in memory are not particularly readable to the user,
|
||||
// it replaces each pointer value with an arbitrary and unique reference ID.
|
||||
func resolveReferences(s textNode) {
|
||||
var walkNodes func(textNode, func(textNode))
|
||||
walkNodes = func(s textNode, f func(textNode)) {
|
||||
f(s)
|
||||
switch s := s.(type) {
|
||||
case *textWrap:
|
||||
walkNodes(s.Value, f)
|
||||
case textList:
|
||||
for _, r := range s {
|
||||
walkNodes(r.Value, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Collect all trunks and leaves with reference metadata.
|
||||
var trunks, leaves []*textWrap
|
||||
walkNodes(s, func(s textNode) {
|
||||
if s, ok := s.(*textWrap); ok {
|
||||
switch s.Metadata.(type) {
|
||||
case leafReference:
|
||||
leaves = append(leaves, s)
|
||||
case trunkReference, trunkReferences:
|
||||
trunks = append(trunks, s)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// No leaf references to resolve.
|
||||
if len(leaves) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Collect the set of all leaf references to resolve.
|
||||
leafPtrs := make(map[value.Pointer]bool)
|
||||
for _, leaf := range leaves {
|
||||
leafPtrs[leaf.Metadata.(leafReference).p] = true
|
||||
}
|
||||
|
||||
// Collect the set of trunk pointers that are always paired together.
|
||||
// This allows us to assign a single ID to both pointers for brevity.
|
||||
// If a pointer in a pair ever occurs by itself or as a different pair,
|
||||
// then the pair is broken.
|
||||
pairedTrunkPtrs := make(map[value.Pointer]value.Pointer)
|
||||
unpair := func(p value.Pointer) {
|
||||
if !pairedTrunkPtrs[p].IsNil() {
|
||||
pairedTrunkPtrs[pairedTrunkPtrs[p]] = value.Pointer{} // invalidate other half
|
||||
}
|
||||
pairedTrunkPtrs[p] = value.Pointer{} // invalidate this half
|
||||
}
|
||||
for _, trunk := range trunks {
|
||||
switch p := trunk.Metadata.(type) {
|
||||
case trunkReference:
|
||||
unpair(p.p) // standalone pointer cannot be part of a pair
|
||||
case trunkReferences:
|
||||
p0, ok0 := pairedTrunkPtrs[p.pp[0]]
|
||||
p1, ok1 := pairedTrunkPtrs[p.pp[1]]
|
||||
switch {
|
||||
case !ok0 && !ok1:
|
||||
// Register the newly seen pair.
|
||||
pairedTrunkPtrs[p.pp[0]] = p.pp[1]
|
||||
pairedTrunkPtrs[p.pp[1]] = p.pp[0]
|
||||
case ok0 && ok1 && p0 == p.pp[1] && p1 == p.pp[0]:
|
||||
// Exact pair already seen; do nothing.
|
||||
default:
|
||||
// Pair conflicts with some other pair; break all pairs.
|
||||
unpair(p.pp[0])
|
||||
unpair(p.pp[1])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Correlate each pointer referenced by leaves to a unique identifier,
|
||||
// and print the IDs for each trunk that matches those pointers.
|
||||
var nextID uint
|
||||
ptrIDs := make(map[value.Pointer]uint)
|
||||
newID := func() uint {
|
||||
id := nextID
|
||||
nextID++
|
||||
return id
|
||||
}
|
||||
for _, trunk := range trunks {
|
||||
switch p := trunk.Metadata.(type) {
|
||||
case trunkReference:
|
||||
if print := leafPtrs[p.p]; print {
|
||||
id, ok := ptrIDs[p.p]
|
||||
if !ok {
|
||||
id = newID()
|
||||
ptrIDs[p.p] = id
|
||||
}
|
||||
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
|
||||
}
|
||||
case trunkReferences:
|
||||
print0 := leafPtrs[p.pp[0]]
|
||||
print1 := leafPtrs[p.pp[1]]
|
||||
if print0 || print1 {
|
||||
id0, ok0 := ptrIDs[p.pp[0]]
|
||||
id1, ok1 := ptrIDs[p.pp[1]]
|
||||
isPair := pairedTrunkPtrs[p.pp[0]] == p.pp[1] && pairedTrunkPtrs[p.pp[1]] == p.pp[0]
|
||||
if isPair {
|
||||
var id uint
|
||||
assert(ok0 == ok1) // must be seen together or not at all
|
||||
if ok0 {
|
||||
assert(id0 == id1) // must have the same ID
|
||||
id = id0
|
||||
} else {
|
||||
id = newID()
|
||||
ptrIDs[p.pp[0]] = id
|
||||
ptrIDs[p.pp[1]] = id
|
||||
}
|
||||
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
|
||||
} else {
|
||||
if print0 && !ok0 {
|
||||
id0 = newID()
|
||||
ptrIDs[p.pp[0]] = id0
|
||||
}
|
||||
if print1 && !ok1 {
|
||||
id1 = newID()
|
||||
ptrIDs[p.pp[1]] = id1
|
||||
}
|
||||
switch {
|
||||
case print0 && print1:
|
||||
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)+","+formatReference(id1))
|
||||
case print0:
|
||||
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0))
|
||||
case print1:
|
||||
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id1))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update all leaf references with the unique identifier.
|
||||
for _, leaf := range leaves {
|
||||
if id, ok := ptrIDs[leaf.Metadata.(leafReference).p]; ok {
|
||||
leaf.Prefix = updateReferencePrefix(leaf.Prefix, formatReference(id))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func formatReference(id uint) string {
|
||||
return fmt.Sprintf("ref#%d", id)
|
||||
}
|
||||
|
||||
func updateReferencePrefix(prefix, ref string) string {
|
||||
if prefix == "" {
|
||||
return pointerDelimPrefix + ref + pointerDelimSuffix
|
||||
}
|
||||
suffix := strings.TrimPrefix(prefix, pointerDelimPrefix)
|
||||
return pointerDelimPrefix + ref + ": " + suffix
|
||||
}
|
402
vendor/github.com/google/go-cmp/cmp/report_reflect.go
generated
vendored
Normal file
402
vendor/github.com/google/go-cmp/cmp/report_reflect.go
generated
vendored
Normal file
@ -0,0 +1,402 @@
|
||||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/value"
|
||||
)
|
||||
|
||||
type formatValueOptions struct {
|
||||
// AvoidStringer controls whether to avoid calling custom stringer
|
||||
// methods like error.Error or fmt.Stringer.String.
|
||||
AvoidStringer bool
|
||||
|
||||
// PrintAddresses controls whether to print the address of all pointers,
|
||||
// slice elements, and maps.
|
||||
PrintAddresses bool
|
||||
|
||||
// QualifiedNames controls whether FormatType uses the fully qualified name
|
||||
// (including the full package path as opposed to just the package name).
|
||||
QualifiedNames bool
|
||||
|
||||
// VerbosityLevel controls the amount of output to produce.
|
||||
// A higher value produces more output. A value of zero or lower produces
|
||||
// no output (represented using an ellipsis).
|
||||
// If LimitVerbosity is false, then the level is treated as infinite.
|
||||
VerbosityLevel int
|
||||
|
||||
// LimitVerbosity specifies that formatting should respect VerbosityLevel.
|
||||
LimitVerbosity bool
|
||||
}
|
||||
|
||||
// FormatType prints the type as if it were wrapping s.
|
||||
// This may return s as-is depending on the current type and TypeMode mode.
|
||||
func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode {
|
||||
// Check whether to emit the type or not.
|
||||
switch opts.TypeMode {
|
||||
case autoType:
|
||||
switch t.Kind() {
|
||||
case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map:
|
||||
if s.Equal(textNil) {
|
||||
return s
|
||||
}
|
||||
default:
|
||||
return s
|
||||
}
|
||||
if opts.DiffMode == diffIdentical {
|
||||
return s // elide type for identical nodes
|
||||
}
|
||||
case elideType:
|
||||
return s
|
||||
}
|
||||
|
||||
// Determine the type label, applying special handling for unnamed types.
|
||||
typeName := value.TypeString(t, opts.QualifiedNames)
|
||||
if t.Name() == "" {
|
||||
// According to Go grammar, certain type literals contain symbols that
|
||||
// do not strongly bind to the next lexicographical token (e.g., *T).
|
||||
switch t.Kind() {
|
||||
case reflect.Chan, reflect.Func, reflect.Ptr:
|
||||
typeName = "(" + typeName + ")"
|
||||
}
|
||||
}
|
||||
return &textWrap{Prefix: typeName, Value: wrapParens(s)}
|
||||
}
|
||||
|
||||
// wrapParens wraps s with a set of parenthesis, but avoids it if the
|
||||
// wrapped node itself is already surrounded by a pair of parenthesis or braces.
|
||||
// It handles unwrapping one level of pointer-reference nodes.
|
||||
func wrapParens(s textNode) textNode {
|
||||
var refNode *textWrap
|
||||
if s2, ok := s.(*textWrap); ok {
|
||||
// Unwrap a single pointer reference node.
|
||||
switch s2.Metadata.(type) {
|
||||
case leafReference, trunkReference, trunkReferences:
|
||||
refNode = s2
|
||||
if s3, ok := refNode.Value.(*textWrap); ok {
|
||||
s2 = s3
|
||||
}
|
||||
}
|
||||
|
||||
// Already has delimiters that make parenthesis unnecessary.
|
||||
hasParens := strings.HasPrefix(s2.Prefix, "(") && strings.HasSuffix(s2.Suffix, ")")
|
||||
hasBraces := strings.HasPrefix(s2.Prefix, "{") && strings.HasSuffix(s2.Suffix, "}")
|
||||
if hasParens || hasBraces {
|
||||
return s
|
||||
}
|
||||
}
|
||||
if refNode != nil {
|
||||
refNode.Value = &textWrap{Prefix: "(", Value: refNode.Value, Suffix: ")"}
|
||||
return s
|
||||
}
|
||||
return &textWrap{Prefix: "(", Value: s, Suffix: ")"}
|
||||
}
|
||||
|
||||
// FormatValue prints the reflect.Value, taking extra care to avoid descending
|
||||
// into pointers already in ptrs. As pointers are visited, ptrs is also updated.
|
||||
func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, ptrs *pointerReferences) (out textNode) {
|
||||
if !v.IsValid() {
|
||||
return nil
|
||||
}
|
||||
t := v.Type()
|
||||
|
||||
// Check slice element for cycles.
|
||||
if parentKind == reflect.Slice {
|
||||
ptrRef, visited := ptrs.Push(v.Addr())
|
||||
if visited {
|
||||
return makeLeafReference(ptrRef, false)
|
||||
}
|
||||
defer ptrs.Pop()
|
||||
defer func() { out = wrapTrunkReference(ptrRef, false, out) }()
|
||||
}
|
||||
|
||||
// Check whether there is an Error or String method to call.
|
||||
if !opts.AvoidStringer && v.CanInterface() {
|
||||
// Avoid calling Error or String methods on nil receivers since many
|
||||
// implementations crash when doing so.
|
||||
if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() {
|
||||
var prefix, strVal string
|
||||
func() {
|
||||
// Swallow and ignore any panics from String or Error.
|
||||
defer func() { recover() }()
|
||||
switch v := v.Interface().(type) {
|
||||
case error:
|
||||
strVal = v.Error()
|
||||
prefix = "e"
|
||||
case fmt.Stringer:
|
||||
strVal = v.String()
|
||||
prefix = "s"
|
||||
}
|
||||
}()
|
||||
if prefix != "" {
|
||||
return opts.formatString(prefix, strVal)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check whether to explicitly wrap the result with the type.
|
||||
var skipType bool
|
||||
defer func() {
|
||||
if !skipType {
|
||||
out = opts.FormatType(t, out)
|
||||
}
|
||||
}()
|
||||
|
||||
switch t.Kind() {
|
||||
case reflect.Bool:
|
||||
return textLine(fmt.Sprint(v.Bool()))
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return textLine(fmt.Sprint(v.Int()))
|
||||
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return textLine(fmt.Sprint(v.Uint()))
|
||||
case reflect.Uint8:
|
||||
if parentKind == reflect.Slice || parentKind == reflect.Array {
|
||||
return textLine(formatHex(v.Uint()))
|
||||
}
|
||||
return textLine(fmt.Sprint(v.Uint()))
|
||||
case reflect.Uintptr:
|
||||
return textLine(formatHex(v.Uint()))
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return textLine(fmt.Sprint(v.Float()))
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return textLine(fmt.Sprint(v.Complex()))
|
||||
case reflect.String:
|
||||
return opts.formatString("", v.String())
|
||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||
return textLine(formatPointer(value.PointerOf(v), true))
|
||||
case reflect.Struct:
|
||||
var list textList
|
||||
v := makeAddressable(v) // needed for retrieveUnexportedField
|
||||
maxLen := v.NumField()
|
||||
if opts.LimitVerbosity {
|
||||
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
|
||||
opts.VerbosityLevel--
|
||||
}
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
vv := v.Field(i)
|
||||
if value.IsZero(vv) {
|
||||
continue // Elide fields with zero values
|
||||
}
|
||||
if len(list) == maxLen {
|
||||
list.AppendEllipsis(diffStats{})
|
||||
break
|
||||
}
|
||||
sf := t.Field(i)
|
||||
if supportExporters && !isExported(sf.Name) {
|
||||
vv = retrieveUnexportedField(v, sf, true)
|
||||
}
|
||||
s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs)
|
||||
list = append(list, textRecord{Key: sf.Name, Value: s})
|
||||
}
|
||||
return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||
case reflect.Slice:
|
||||
if v.IsNil() {
|
||||
return textNil
|
||||
}
|
||||
|
||||
// Check whether this is a []byte of text data.
|
||||
if t.Elem() == reflect.TypeOf(byte(0)) {
|
||||
b := v.Bytes()
|
||||
isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) && unicode.IsSpace(r) }
|
||||
if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 {
|
||||
out = opts.formatString("", string(b))
|
||||
return opts.WithTypeMode(emitType).FormatType(t, out)
|
||||
}
|
||||
}
|
||||
|
||||
fallthrough
|
||||
case reflect.Array:
|
||||
maxLen := v.Len()
|
||||
if opts.LimitVerbosity {
|
||||
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
|
||||
opts.VerbosityLevel--
|
||||
}
|
||||
var list textList
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if len(list) == maxLen {
|
||||
list.AppendEllipsis(diffStats{})
|
||||
break
|
||||
}
|
||||
s := opts.WithTypeMode(elideType).FormatValue(v.Index(i), t.Kind(), ptrs)
|
||||
list = append(list, textRecord{Value: s})
|
||||
}
|
||||
|
||||
out = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||
if t.Kind() == reflect.Slice && opts.PrintAddresses {
|
||||
header := fmt.Sprintf("ptr:%v, len:%d, cap:%d", formatPointer(value.PointerOf(v), false), v.Len(), v.Cap())
|
||||
out = &textWrap{Prefix: pointerDelimPrefix + header + pointerDelimSuffix, Value: out}
|
||||
}
|
||||
return out
|
||||
case reflect.Map:
|
||||
if v.IsNil() {
|
||||
return textNil
|
||||
}
|
||||
|
||||
// Check pointer for cycles.
|
||||
ptrRef, visited := ptrs.Push(v)
|
||||
if visited {
|
||||
return makeLeafReference(ptrRef, opts.PrintAddresses)
|
||||
}
|
||||
defer ptrs.Pop()
|
||||
|
||||
maxLen := v.Len()
|
||||
if opts.LimitVerbosity {
|
||||
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
|
||||
opts.VerbosityLevel--
|
||||
}
|
||||
var list textList
|
||||
for _, k := range value.SortKeys(v.MapKeys()) {
|
||||
if len(list) == maxLen {
|
||||
list.AppendEllipsis(diffStats{})
|
||||
break
|
||||
}
|
||||
sk := formatMapKey(k, false, ptrs)
|
||||
sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), t.Kind(), ptrs)
|
||||
list = append(list, textRecord{Key: sk, Value: sv})
|
||||
}
|
||||
|
||||
out = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||
out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
|
||||
return out
|
||||
case reflect.Ptr:
|
||||
if v.IsNil() {
|
||||
return textNil
|
||||
}
|
||||
|
||||
// Check pointer for cycles.
|
||||
ptrRef, visited := ptrs.Push(v)
|
||||
if visited {
|
||||
out = makeLeafReference(ptrRef, opts.PrintAddresses)
|
||||
return &textWrap{Prefix: "&", Value: out}
|
||||
}
|
||||
defer ptrs.Pop()
|
||||
|
||||
skipType = true // Let the underlying value print the type instead
|
||||
out = opts.FormatValue(v.Elem(), t.Kind(), ptrs)
|
||||
out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
|
||||
out = &textWrap{Prefix: "&", Value: out}
|
||||
return out
|
||||
case reflect.Interface:
|
||||
if v.IsNil() {
|
||||
return textNil
|
||||
}
|
||||
// Interfaces accept different concrete types,
|
||||
// so configure the underlying value to explicitly print the type.
|
||||
skipType = true // Print the concrete type instead
|
||||
return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs)
|
||||
default:
|
||||
panic(fmt.Sprintf("%v kind not handled", v.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
func (opts formatOptions) formatString(prefix, s string) textNode {
|
||||
maxLen := len(s)
|
||||
maxLines := strings.Count(s, "\n") + 1
|
||||
if opts.LimitVerbosity {
|
||||
maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc...
|
||||
maxLines = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc...
|
||||
}
|
||||
|
||||
// For multiline strings, use the triple-quote syntax,
|
||||
// but only use it when printing removed or inserted nodes since
|
||||
// we only want the extra verbosity for those cases.
|
||||
lines := strings.Split(strings.TrimSuffix(s, "\n"), "\n")
|
||||
isTripleQuoted := len(lines) >= 4 && (opts.DiffMode == '-' || opts.DiffMode == '+')
|
||||
for i := 0; i < len(lines) && isTripleQuoted; i++ {
|
||||
lines[i] = strings.TrimPrefix(strings.TrimSuffix(lines[i], "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support
|
||||
isPrintable := func(r rune) bool {
|
||||
return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable
|
||||
}
|
||||
line := lines[i]
|
||||
isTripleQuoted = !strings.HasPrefix(strings.TrimPrefix(line, prefix), `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" && len(line) <= maxLen
|
||||
}
|
||||
if isTripleQuoted {
|
||||
var list textList
|
||||
list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true})
|
||||
for i, line := range lines {
|
||||
if numElided := len(lines) - i; i == maxLines-1 && numElided > 1 {
|
||||
comment := commentString(fmt.Sprintf("%d elided lines", numElided))
|
||||
list = append(list, textRecord{Diff: opts.DiffMode, Value: textEllipsis, ElideComma: true, Comment: comment})
|
||||
break
|
||||
}
|
||||
list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(line), ElideComma: true})
|
||||
}
|
||||
list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true})
|
||||
return &textWrap{Prefix: "(", Value: list, Suffix: ")"}
|
||||
}
|
||||
|
||||
// Format the string as a single-line quoted string.
|
||||
if len(s) > maxLen+len(textEllipsis) {
|
||||
return textLine(prefix + formatString(s[:maxLen]) + string(textEllipsis))
|
||||
}
|
||||
return textLine(prefix + formatString(s))
|
||||
}
|
||||
|
||||
// formatMapKey formats v as if it were a map key.
|
||||
// The result is guaranteed to be a single line.
|
||||
func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) string {
|
||||
var opts formatOptions
|
||||
opts.DiffMode = diffIdentical
|
||||
opts.TypeMode = elideType
|
||||
opts.PrintAddresses = disambiguate
|
||||
opts.AvoidStringer = disambiguate
|
||||
opts.QualifiedNames = disambiguate
|
||||
opts.VerbosityLevel = maxVerbosityPreset
|
||||
opts.LimitVerbosity = true
|
||||
s := opts.FormatValue(v, reflect.Map, ptrs).String()
|
||||
return strings.TrimSpace(s)
|
||||
}
|
||||
|
||||
// formatString prints s as a double-quoted or backtick-quoted string.
|
||||
func formatString(s string) string {
|
||||
// Use quoted string if it the same length as a raw string literal.
|
||||
// Otherwise, attempt to use the raw string form.
|
||||
qs := strconv.Quote(s)
|
||||
if len(qs) == 1+len(s)+1 {
|
||||
return qs
|
||||
}
|
||||
|
||||
// Disallow newlines to ensure output is a single line.
|
||||
// Only allow printable runes for readability purposes.
|
||||
rawInvalid := func(r rune) bool {
|
||||
return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t')
|
||||
}
|
||||
if utf8.ValidString(s) && strings.IndexFunc(s, rawInvalid) < 0 {
|
||||
return "`" + s + "`"
|
||||
}
|
||||
return qs
|
||||
}
|
||||
|
||||
// formatHex prints u as a hexadecimal integer in Go notation.
|
||||
func formatHex(u uint64) string {
|
||||
var f string
|
||||
switch {
|
||||
case u <= 0xff:
|
||||
f = "0x%02x"
|
||||
case u <= 0xffff:
|
||||
f = "0x%04x"
|
||||
case u <= 0xffffff:
|
||||
f = "0x%06x"
|
||||
case u <= 0xffffffff:
|
||||
f = "0x%08x"
|
||||
case u <= 0xffffffffff:
|
||||
f = "0x%010x"
|
||||
case u <= 0xffffffffffff:
|
||||
f = "0x%012x"
|
||||
case u <= 0xffffffffffffff:
|
||||
f = "0x%014x"
|
||||
case u <= 0xffffffffffffffff:
|
||||
f = "0x%016x"
|
||||
}
|
||||
return fmt.Sprintf(f, u)
|
||||
}
|
465
vendor/github.com/google/go-cmp/cmp/report_slices.go
generated
vendored
Normal file
465
vendor/github.com/google/go-cmp/cmp/report_slices.go
generated
vendored
Normal file
@ -0,0 +1,465 @@
|
||||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/diff"
|
||||
)
|
||||
|
||||
// CanFormatDiffSlice reports whether we support custom formatting for nodes
|
||||
// that are slices of primitive kinds or strings.
|
||||
func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool {
|
||||
switch {
|
||||
case opts.DiffMode != diffUnknown:
|
||||
return false // Must be formatting in diff mode
|
||||
case v.NumDiff == 0:
|
||||
return false // No differences detected
|
||||
case !v.ValueX.IsValid() || !v.ValueY.IsValid():
|
||||
return false // Both values must be valid
|
||||
case v.NumIgnored > 0:
|
||||
return false // Some ignore option was used
|
||||
case v.NumTransformed > 0:
|
||||
return false // Some transform option was used
|
||||
case v.NumCompared > 1:
|
||||
return false // More than one comparison was used
|
||||
case v.NumCompared == 1 && v.Type.Name() != "":
|
||||
// The need for cmp to check applicability of options on every element
|
||||
// in a slice is a significant performance detriment for large []byte.
|
||||
// The workaround is to specify Comparer(bytes.Equal),
|
||||
// which enables cmp to compare []byte more efficiently.
|
||||
// If they differ, we still want to provide batched diffing.
|
||||
// The logic disallows named types since they tend to have their own
|
||||
// String method, with nicer formatting than what this provides.
|
||||
return false
|
||||
}
|
||||
|
||||
// Check whether this is an interface with the same concrete types.
|
||||
t := v.Type
|
||||
vx, vy := v.ValueX, v.ValueY
|
||||
if t.Kind() == reflect.Interface && !vx.IsNil() && !vy.IsNil() && vx.Elem().Type() == vy.Elem().Type() {
|
||||
vx, vy = vx.Elem(), vy.Elem()
|
||||
t = vx.Type()
|
||||
}
|
||||
|
||||
// Check whether we provide specialized diffing for this type.
|
||||
switch t.Kind() {
|
||||
case reflect.String:
|
||||
case reflect.Array, reflect.Slice:
|
||||
// Only slices of primitive types have specialized handling.
|
||||
switch t.Elem().Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
|
||||
reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
// Both slice values have to be non-empty.
|
||||
if t.Kind() == reflect.Slice && (vx.Len() == 0 || vy.Len() == 0) {
|
||||
return false
|
||||
}
|
||||
|
||||
// If a sufficient number of elements already differ,
|
||||
// use specialized formatting even if length requirement is not met.
|
||||
if v.NumDiff > v.NumSame {
|
||||
return true
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
// Use specialized string diffing for longer slices or strings.
|
||||
const minLength = 64
|
||||
return vx.Len() >= minLength && vy.Len() >= minLength
|
||||
}
|
||||
|
||||
// FormatDiffSlice prints a diff for the slices (or strings) represented by v.
|
||||
// This provides custom-tailored logic to make printing of differences in
|
||||
// textual strings and slices of primitive kinds more readable.
|
||||
func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
||||
assert(opts.DiffMode == diffUnknown)
|
||||
t, vx, vy := v.Type, v.ValueX, v.ValueY
|
||||
if t.Kind() == reflect.Interface {
|
||||
vx, vy = vx.Elem(), vy.Elem()
|
||||
t = vx.Type()
|
||||
opts = opts.WithTypeMode(emitType)
|
||||
}
|
||||
|
||||
// Auto-detect the type of the data.
|
||||
var isLinedText, isText, isBinary bool
|
||||
var sx, sy string
|
||||
switch {
|
||||
case t.Kind() == reflect.String:
|
||||
sx, sy = vx.String(), vy.String()
|
||||
isText = true // Initial estimate, verify later
|
||||
case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)):
|
||||
sx, sy = string(vx.Bytes()), string(vy.Bytes())
|
||||
isBinary = true // Initial estimate, verify later
|
||||
case t.Kind() == reflect.Array:
|
||||
// Arrays need to be addressable for slice operations to work.
|
||||
vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem()
|
||||
vx2.Set(vx)
|
||||
vy2.Set(vy)
|
||||
vx, vy = vx2, vy2
|
||||
}
|
||||
if isText || isBinary {
|
||||
var numLines, lastLineIdx, maxLineLen int
|
||||
isBinary = !utf8.ValidString(sx) || !utf8.ValidString(sy)
|
||||
for i, r := range sx + sy {
|
||||
if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError {
|
||||
isBinary = true
|
||||
break
|
||||
}
|
||||
if r == '\n' {
|
||||
if maxLineLen < i-lastLineIdx {
|
||||
maxLineLen = i - lastLineIdx
|
||||
}
|
||||
lastLineIdx = i + 1
|
||||
numLines++
|
||||
}
|
||||
}
|
||||
isText = !isBinary
|
||||
isLinedText = isText && numLines >= 4 && maxLineLen <= 1024
|
||||
}
|
||||
|
||||
// Format the string into printable records.
|
||||
var list textList
|
||||
var delim string
|
||||
switch {
|
||||
// If the text appears to be multi-lined text,
|
||||
// then perform differencing across individual lines.
|
||||
case isLinedText:
|
||||
ssx := strings.Split(sx, "\n")
|
||||
ssy := strings.Split(sy, "\n")
|
||||
list = opts.formatDiffSlice(
|
||||
reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line",
|
||||
func(v reflect.Value, d diffMode) textRecord {
|
||||
s := formatString(v.Index(0).String())
|
||||
return textRecord{Diff: d, Value: textLine(s)}
|
||||
},
|
||||
)
|
||||
delim = "\n"
|
||||
|
||||
// If possible, use a custom triple-quote (""") syntax for printing
|
||||
// differences in a string literal. This format is more readable,
|
||||
// but has edge-cases where differences are visually indistinguishable.
|
||||
// This format is avoided under the following conditions:
|
||||
// • A line starts with `"""`
|
||||
// • A line starts with "..."
|
||||
// • A line contains non-printable characters
|
||||
// • Adjacent different lines differ only by whitespace
|
||||
//
|
||||
// For example:
|
||||
// """
|
||||
// ... // 3 identical lines
|
||||
// foo
|
||||
// bar
|
||||
// - baz
|
||||
// + BAZ
|
||||
// """
|
||||
isTripleQuoted := true
|
||||
prevRemoveLines := map[string]bool{}
|
||||
prevInsertLines := map[string]bool{}
|
||||
var list2 textList
|
||||
list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true})
|
||||
for _, r := range list {
|
||||
if !r.Value.Equal(textEllipsis) {
|
||||
line, _ := strconv.Unquote(string(r.Value.(textLine)))
|
||||
line = strings.TrimPrefix(strings.TrimSuffix(line, "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support
|
||||
normLine := strings.Map(func(r rune) rune {
|
||||
if unicode.IsSpace(r) {
|
||||
return -1 // drop whitespace to avoid visually indistinguishable output
|
||||
}
|
||||
return r
|
||||
}, line)
|
||||
isPrintable := func(r rune) bool {
|
||||
return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable
|
||||
}
|
||||
isTripleQuoted = !strings.HasPrefix(line, `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == ""
|
||||
switch r.Diff {
|
||||
case diffRemoved:
|
||||
isTripleQuoted = isTripleQuoted && !prevInsertLines[normLine]
|
||||
prevRemoveLines[normLine] = true
|
||||
case diffInserted:
|
||||
isTripleQuoted = isTripleQuoted && !prevRemoveLines[normLine]
|
||||
prevInsertLines[normLine] = true
|
||||
}
|
||||
if !isTripleQuoted {
|
||||
break
|
||||
}
|
||||
r.Value = textLine(line)
|
||||
r.ElideComma = true
|
||||
}
|
||||
if !(r.Diff == diffRemoved || r.Diff == diffInserted) { // start a new non-adjacent difference group
|
||||
prevRemoveLines = map[string]bool{}
|
||||
prevInsertLines = map[string]bool{}
|
||||
}
|
||||
list2 = append(list2, r)
|
||||
}
|
||||
if r := list2[len(list2)-1]; r.Diff == diffIdentical && len(r.Value.(textLine)) == 0 {
|
||||
list2 = list2[:len(list2)-1] // elide single empty line at the end
|
||||
}
|
||||
list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true})
|
||||
if isTripleQuoted {
|
||||
var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"}
|
||||
switch t.Kind() {
|
||||
case reflect.String:
|
||||
if t != reflect.TypeOf(string("")) {
|
||||
out = opts.FormatType(t, out)
|
||||
}
|
||||
case reflect.Slice:
|
||||
// Always emit type for slices since the triple-quote syntax
|
||||
// looks like a string (not a slice).
|
||||
opts = opts.WithTypeMode(emitType)
|
||||
out = opts.FormatType(t, out)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// If the text appears to be single-lined text,
|
||||
// then perform differencing in approximately fixed-sized chunks.
|
||||
// The output is printed as quoted strings.
|
||||
case isText:
|
||||
list = opts.formatDiffSlice(
|
||||
reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte",
|
||||
func(v reflect.Value, d diffMode) textRecord {
|
||||
s := formatString(v.String())
|
||||
return textRecord{Diff: d, Value: textLine(s)}
|
||||
},
|
||||
)
|
||||
delim = ""
|
||||
|
||||
// If the text appears to be binary data,
|
||||
// then perform differencing in approximately fixed-sized chunks.
|
||||
// The output is inspired by hexdump.
|
||||
case isBinary:
|
||||
list = opts.formatDiffSlice(
|
||||
reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte",
|
||||
func(v reflect.Value, d diffMode) textRecord {
|
||||
var ss []string
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
ss = append(ss, formatHex(v.Index(i).Uint()))
|
||||
}
|
||||
s := strings.Join(ss, ", ")
|
||||
comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String())))
|
||||
return textRecord{Diff: d, Value: textLine(s), Comment: comment}
|
||||
},
|
||||
)
|
||||
|
||||
// For all other slices of primitive types,
|
||||
// then perform differencing in approximately fixed-sized chunks.
|
||||
// The size of each chunk depends on the width of the element kind.
|
||||
default:
|
||||
var chunkSize int
|
||||
if t.Elem().Kind() == reflect.Bool {
|
||||
chunkSize = 16
|
||||
} else {
|
||||
switch t.Elem().Bits() {
|
||||
case 8:
|
||||
chunkSize = 16
|
||||
case 16:
|
||||
chunkSize = 12
|
||||
case 32:
|
||||
chunkSize = 8
|
||||
default:
|
||||
chunkSize = 8
|
||||
}
|
||||
}
|
||||
list = opts.formatDiffSlice(
|
||||
vx, vy, chunkSize, t.Elem().Kind().String(),
|
||||
func(v reflect.Value, d diffMode) textRecord {
|
||||
var ss []string
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
switch t.Elem().Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
ss = append(ss, fmt.Sprint(v.Index(i).Int()))
|
||||
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
ss = append(ss, fmt.Sprint(v.Index(i).Uint()))
|
||||
case reflect.Uint8, reflect.Uintptr:
|
||||
ss = append(ss, formatHex(v.Index(i).Uint()))
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
|
||||
ss = append(ss, fmt.Sprint(v.Index(i).Interface()))
|
||||
}
|
||||
}
|
||||
s := strings.Join(ss, ", ")
|
||||
return textRecord{Diff: d, Value: textLine(s)}
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// Wrap the output with appropriate type information.
|
||||
var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||
if !isText {
|
||||
// The "{...}" byte-sequence literal is not valid Go syntax for strings.
|
||||
// Emit the type for extra clarity (e.g. "string{...}").
|
||||
if t.Kind() == reflect.String {
|
||||
opts = opts.WithTypeMode(emitType)
|
||||
}
|
||||
return opts.FormatType(t, out)
|
||||
}
|
||||
switch t.Kind() {
|
||||
case reflect.String:
|
||||
out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
|
||||
if t != reflect.TypeOf(string("")) {
|
||||
out = opts.FormatType(t, out)
|
||||
}
|
||||
case reflect.Slice:
|
||||
out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
|
||||
if t != reflect.TypeOf([]byte(nil)) {
|
||||
out = opts.FormatType(t, out)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// formatASCII formats s as an ASCII string.
|
||||
// This is useful for printing binary strings in a semi-legible way.
|
||||
func formatASCII(s string) string {
|
||||
b := bytes.Repeat([]byte{'.'}, len(s))
|
||||
for i := 0; i < len(s); i++ {
|
||||
if ' ' <= s[i] && s[i] <= '~' {
|
||||
b[i] = s[i]
|
||||
}
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func (opts formatOptions) formatDiffSlice(
|
||||
vx, vy reflect.Value, chunkSize int, name string,
|
||||
makeRec func(reflect.Value, diffMode) textRecord,
|
||||
) (list textList) {
|
||||
es := diff.Difference(vx.Len(), vy.Len(), func(ix int, iy int) diff.Result {
|
||||
return diff.BoolResult(vx.Index(ix).Interface() == vy.Index(iy).Interface())
|
||||
})
|
||||
|
||||
appendChunks := func(v reflect.Value, d diffMode) int {
|
||||
n0 := v.Len()
|
||||
for v.Len() > 0 {
|
||||
n := chunkSize
|
||||
if n > v.Len() {
|
||||
n = v.Len()
|
||||
}
|
||||
list = append(list, makeRec(v.Slice(0, n), d))
|
||||
v = v.Slice(n, v.Len())
|
||||
}
|
||||
return n0 - v.Len()
|
||||
}
|
||||
|
||||
var numDiffs int
|
||||
maxLen := -1
|
||||
if opts.LimitVerbosity {
|
||||
maxLen = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc...
|
||||
opts.VerbosityLevel--
|
||||
}
|
||||
|
||||
groups := coalesceAdjacentEdits(name, es)
|
||||
groups = coalesceInterveningIdentical(groups, chunkSize/4)
|
||||
maxGroup := diffStats{Name: name}
|
||||
for i, ds := range groups {
|
||||
if maxLen >= 0 && numDiffs >= maxLen {
|
||||
maxGroup = maxGroup.Append(ds)
|
||||
continue
|
||||
}
|
||||
|
||||
// Print equal.
|
||||
if ds.NumDiff() == 0 {
|
||||
// Compute the number of leading and trailing equal bytes to print.
|
||||
var numLo, numHi int
|
||||
numEqual := ds.NumIgnored + ds.NumIdentical
|
||||
for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 {
|
||||
numLo++
|
||||
}
|
||||
for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
|
||||
numHi++
|
||||
}
|
||||
if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 {
|
||||
numHi = numEqual - numLo // Avoid pointless coalescing of single equal row
|
||||
}
|
||||
|
||||
// Print the equal bytes.
|
||||
appendChunks(vx.Slice(0, numLo), diffIdentical)
|
||||
if numEqual > numLo+numHi {
|
||||
ds.NumIdentical -= numLo + numHi
|
||||
list.AppendEllipsis(ds)
|
||||
}
|
||||
appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical)
|
||||
vx = vx.Slice(numEqual, vx.Len())
|
||||
vy = vy.Slice(numEqual, vy.Len())
|
||||
continue
|
||||
}
|
||||
|
||||
// Print unequal.
|
||||
len0 := len(list)
|
||||
nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved)
|
||||
vx = vx.Slice(nx, vx.Len())
|
||||
ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted)
|
||||
vy = vy.Slice(ny, vy.Len())
|
||||
numDiffs += len(list) - len0
|
||||
}
|
||||
if maxGroup.IsZero() {
|
||||
assert(vx.Len() == 0 && vy.Len() == 0)
|
||||
} else {
|
||||
list.AppendEllipsis(maxGroup)
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent
|
||||
// equal or unequal counts.
|
||||
func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) {
|
||||
var prevCase int // Arbitrary index into which case last occurred
|
||||
lastStats := func(i int) *diffStats {
|
||||
if prevCase != i {
|
||||
groups = append(groups, diffStats{Name: name})
|
||||
prevCase = i
|
||||
}
|
||||
return &groups[len(groups)-1]
|
||||
}
|
||||
for _, e := range es {
|
||||
switch e {
|
||||
case diff.Identity:
|
||||
lastStats(1).NumIdentical++
|
||||
case diff.UniqueX:
|
||||
lastStats(2).NumRemoved++
|
||||
case diff.UniqueY:
|
||||
lastStats(2).NumInserted++
|
||||
case diff.Modified:
|
||||
lastStats(2).NumModified++
|
||||
}
|
||||
}
|
||||
return groups
|
||||
}
|
||||
|
||||
// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize)
|
||||
// equal groups into adjacent unequal groups that currently result in a
|
||||
// dual inserted/removed printout. This acts as a high-pass filter to smooth
|
||||
// out high-frequency changes within the windowSize.
|
||||
func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats {
|
||||
groups, groupsOrig := groups[:0], groups
|
||||
for i, ds := range groupsOrig {
|
||||
if len(groups) >= 2 && ds.NumDiff() > 0 {
|
||||
prev := &groups[len(groups)-2] // Unequal group
|
||||
curr := &groups[len(groups)-1] // Equal group
|
||||
next := &groupsOrig[i] // Unequal group
|
||||
hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0
|
||||
hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0
|
||||
if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize {
|
||||
*prev = prev.Append(*curr).Append(*next)
|
||||
groups = groups[:len(groups)-1] // Truncate off equal group
|
||||
continue
|
||||
}
|
||||
}
|
||||
groups = append(groups, ds)
|
||||
}
|
||||
return groups
|
||||
}
|
431
vendor/github.com/google/go-cmp/cmp/report_text.go
generated
vendored
Normal file
431
vendor/github.com/google/go-cmp/cmp/report_text.go
generated
vendored
Normal file
@ -0,0 +1,431 @@
|
||||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/flags"
|
||||
)
|
||||
|
||||
var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
|
||||
|
||||
const maxColumnLength = 80
|
||||
|
||||
type indentMode int
|
||||
|
||||
func (n indentMode) appendIndent(b []byte, d diffMode) []byte {
|
||||
// The output of Diff is documented as being unstable to provide future
|
||||
// flexibility in changing the output for more humanly readable reports.
|
||||
// This logic intentionally introduces instability to the exact output
|
||||
// so that users can detect accidental reliance on stability early on,
|
||||
// rather than much later when an actual change to the format occurs.
|
||||
if flags.Deterministic || randBool {
|
||||
// Use regular spaces (U+0020).
|
||||
switch d {
|
||||
case diffUnknown, diffIdentical:
|
||||
b = append(b, " "...)
|
||||
case diffRemoved:
|
||||
b = append(b, "- "...)
|
||||
case diffInserted:
|
||||
b = append(b, "+ "...)
|
||||
}
|
||||
} else {
|
||||
// Use non-breaking spaces (U+00a0).
|
||||
switch d {
|
||||
case diffUnknown, diffIdentical:
|
||||
b = append(b, " "...)
|
||||
case diffRemoved:
|
||||
b = append(b, "- "...)
|
||||
case diffInserted:
|
||||
b = append(b, "+ "...)
|
||||
}
|
||||
}
|
||||
return repeatCount(n).appendChar(b, '\t')
|
||||
}
|
||||
|
||||
type repeatCount int
|
||||
|
||||
func (n repeatCount) appendChar(b []byte, c byte) []byte {
|
||||
for ; n > 0; n-- {
|
||||
b = append(b, c)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// textNode is a simplified tree-based representation of structured text.
|
||||
// Possible node types are textWrap, textList, or textLine.
|
||||
type textNode interface {
|
||||
// Len reports the length in bytes of a single-line version of the tree.
|
||||
// Nested textRecord.Diff and textRecord.Comment fields are ignored.
|
||||
Len() int
|
||||
// Equal reports whether the two trees are structurally identical.
|
||||
// Nested textRecord.Diff and textRecord.Comment fields are compared.
|
||||
Equal(textNode) bool
|
||||
// String returns the string representation of the text tree.
|
||||
// It is not guaranteed that len(x.String()) == x.Len(),
|
||||
// nor that x.String() == y.String() implies that x.Equal(y).
|
||||
String() string
|
||||
|
||||
// formatCompactTo formats the contents of the tree as a single-line string
|
||||
// to the provided buffer. Any nested textRecord.Diff and textRecord.Comment
|
||||
// fields are ignored.
|
||||
//
|
||||
// However, not all nodes in the tree should be collapsed as a single-line.
|
||||
// If a node can be collapsed as a single-line, it is replaced by a textLine
|
||||
// node. Since the top-level node cannot replace itself, this also returns
|
||||
// the current node itself.
|
||||
//
|
||||
// This does not mutate the receiver.
|
||||
formatCompactTo([]byte, diffMode) ([]byte, textNode)
|
||||
// formatExpandedTo formats the contents of the tree as a multi-line string
|
||||
// to the provided buffer. In order for column alignment to operate well,
|
||||
// formatCompactTo must be called before calling formatExpandedTo.
|
||||
formatExpandedTo([]byte, diffMode, indentMode) []byte
|
||||
}
|
||||
|
||||
// textWrap is a wrapper that concatenates a prefix and/or a suffix
|
||||
// to the underlying node.
|
||||
type textWrap struct {
|
||||
Prefix string // e.g., "bytes.Buffer{"
|
||||
Value textNode // textWrap | textList | textLine
|
||||
Suffix string // e.g., "}"
|
||||
Metadata interface{} // arbitrary metadata; has no effect on formatting
|
||||
}
|
||||
|
||||
func (s *textWrap) Len() int {
|
||||
return len(s.Prefix) + s.Value.Len() + len(s.Suffix)
|
||||
}
|
||||
func (s1 *textWrap) Equal(s2 textNode) bool {
|
||||
if s2, ok := s2.(*textWrap); ok {
|
||||
return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix
|
||||
}
|
||||
return false
|
||||
}
|
||||
func (s *textWrap) String() string {
|
||||
var d diffMode
|
||||
var n indentMode
|
||||
_, s2 := s.formatCompactTo(nil, d)
|
||||
b := n.appendIndent(nil, d) // Leading indent
|
||||
b = s2.formatExpandedTo(b, d, n) // Main body
|
||||
b = append(b, '\n') // Trailing newline
|
||||
return string(b)
|
||||
}
|
||||
func (s *textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
|
||||
n0 := len(b) // Original buffer length
|
||||
b = append(b, s.Prefix...)
|
||||
b, s.Value = s.Value.formatCompactTo(b, d)
|
||||
b = append(b, s.Suffix...)
|
||||
if _, ok := s.Value.(textLine); ok {
|
||||
return b, textLine(b[n0:])
|
||||
}
|
||||
return b, s
|
||||
}
|
||||
func (s *textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
|
||||
b = append(b, s.Prefix...)
|
||||
b = s.Value.formatExpandedTo(b, d, n)
|
||||
b = append(b, s.Suffix...)
|
||||
return b
|
||||
}
|
||||
|
||||
// textList is a comma-separated list of textWrap or textLine nodes.
|
||||
// The list may be formatted as multi-lines or single-line at the discretion
|
||||
// of the textList.formatCompactTo method.
|
||||
type textList []textRecord
|
||||
type textRecord struct {
|
||||
Diff diffMode // e.g., 0 or '-' or '+'
|
||||
Key string // e.g., "MyField"
|
||||
Value textNode // textWrap | textLine
|
||||
ElideComma bool // avoid trailing comma
|
||||
Comment fmt.Stringer // e.g., "6 identical fields"
|
||||
}
|
||||
|
||||
// AppendEllipsis appends a new ellipsis node to the list if none already
|
||||
// exists at the end. If cs is non-zero it coalesces the statistics with the
|
||||
// previous diffStats.
|
||||
func (s *textList) AppendEllipsis(ds diffStats) {
|
||||
hasStats := !ds.IsZero()
|
||||
if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) {
|
||||
if hasStats {
|
||||
*s = append(*s, textRecord{Value: textEllipsis, ElideComma: true, Comment: ds})
|
||||
} else {
|
||||
*s = append(*s, textRecord{Value: textEllipsis, ElideComma: true})
|
||||
}
|
||||
return
|
||||
}
|
||||
if hasStats {
|
||||
(*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds)
|
||||
}
|
||||
}
|
||||
|
||||
func (s textList) Len() (n int) {
|
||||
for i, r := range s {
|
||||
n += len(r.Key)
|
||||
if r.Key != "" {
|
||||
n += len(": ")
|
||||
}
|
||||
n += r.Value.Len()
|
||||
if i < len(s)-1 {
|
||||
n += len(", ")
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (s1 textList) Equal(s2 textNode) bool {
|
||||
if s2, ok := s2.(textList); ok {
|
||||
if len(s1) != len(s2) {
|
||||
return false
|
||||
}
|
||||
for i := range s1 {
|
||||
r1, r2 := s1[i], s2[i]
|
||||
if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s textList) String() string {
|
||||
return (&textWrap{Prefix: "{", Value: s, Suffix: "}"}).String()
|
||||
}
|
||||
|
||||
func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
|
||||
s = append(textList(nil), s...) // Avoid mutating original
|
||||
|
||||
// Determine whether we can collapse this list as a single line.
|
||||
n0 := len(b) // Original buffer length
|
||||
var multiLine bool
|
||||
for i, r := range s {
|
||||
if r.Diff == diffInserted || r.Diff == diffRemoved {
|
||||
multiLine = true
|
||||
}
|
||||
b = append(b, r.Key...)
|
||||
if r.Key != "" {
|
||||
b = append(b, ": "...)
|
||||
}
|
||||
b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff)
|
||||
if _, ok := s[i].Value.(textLine); !ok {
|
||||
multiLine = true
|
||||
}
|
||||
if r.Comment != nil {
|
||||
multiLine = true
|
||||
}
|
||||
if i < len(s)-1 {
|
||||
b = append(b, ", "...)
|
||||
}
|
||||
}
|
||||
// Force multi-lined output when printing a removed/inserted node that
|
||||
// is sufficiently long.
|
||||
if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > maxColumnLength {
|
||||
multiLine = true
|
||||
}
|
||||
if !multiLine {
|
||||
return b, textLine(b[n0:])
|
||||
}
|
||||
return b, s
|
||||
}
|
||||
|
||||
func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
|
||||
alignKeyLens := s.alignLens(
|
||||
func(r textRecord) bool {
|
||||
_, isLine := r.Value.(textLine)
|
||||
return r.Key == "" || !isLine
|
||||
},
|
||||
func(r textRecord) int { return utf8.RuneCountInString(r.Key) },
|
||||
)
|
||||
alignValueLens := s.alignLens(
|
||||
func(r textRecord) bool {
|
||||
_, isLine := r.Value.(textLine)
|
||||
return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil
|
||||
},
|
||||
func(r textRecord) int { return utf8.RuneCount(r.Value.(textLine)) },
|
||||
)
|
||||
|
||||
// Format lists of simple lists in a batched form.
|
||||
// If the list is sequence of only textLine values,
|
||||
// then batch multiple values on a single line.
|
||||
var isSimple bool
|
||||
for _, r := range s {
|
||||
_, isLine := r.Value.(textLine)
|
||||
isSimple = r.Diff == 0 && r.Key == "" && isLine && r.Comment == nil
|
||||
if !isSimple {
|
||||
break
|
||||
}
|
||||
}
|
||||
if isSimple {
|
||||
n++
|
||||
var batch []byte
|
||||
emitBatch := func() {
|
||||
if len(batch) > 0 {
|
||||
b = n.appendIndent(append(b, '\n'), d)
|
||||
b = append(b, bytes.TrimRight(batch, " ")...)
|
||||
batch = batch[:0]
|
||||
}
|
||||
}
|
||||
for _, r := range s {
|
||||
line := r.Value.(textLine)
|
||||
if len(batch)+len(line)+len(", ") > maxColumnLength {
|
||||
emitBatch()
|
||||
}
|
||||
batch = append(batch, line...)
|
||||
batch = append(batch, ", "...)
|
||||
}
|
||||
emitBatch()
|
||||
n--
|
||||
return n.appendIndent(append(b, '\n'), d)
|
||||
}
|
||||
|
||||
// Format the list as a multi-lined output.
|
||||
n++
|
||||
for i, r := range s {
|
||||
b = n.appendIndent(append(b, '\n'), d|r.Diff)
|
||||
if r.Key != "" {
|
||||
b = append(b, r.Key+": "...)
|
||||
}
|
||||
b = alignKeyLens[i].appendChar(b, ' ')
|
||||
|
||||
b = r.Value.formatExpandedTo(b, d|r.Diff, n)
|
||||
if !r.ElideComma {
|
||||
b = append(b, ',')
|
||||
}
|
||||
b = alignValueLens[i].appendChar(b, ' ')
|
||||
|
||||
if r.Comment != nil {
|
||||
b = append(b, " // "+r.Comment.String()...)
|
||||
}
|
||||
}
|
||||
n--
|
||||
|
||||
return n.appendIndent(append(b, '\n'), d)
|
||||
}
|
||||
|
||||
func (s textList) alignLens(
|
||||
skipFunc func(textRecord) bool,
|
||||
lenFunc func(textRecord) int,
|
||||
) []repeatCount {
|
||||
var startIdx, endIdx, maxLen int
|
||||
lens := make([]repeatCount, len(s))
|
||||
for i, r := range s {
|
||||
if skipFunc(r) {
|
||||
for j := startIdx; j < endIdx && j < len(s); j++ {
|
||||
lens[j] = repeatCount(maxLen - lenFunc(s[j]))
|
||||
}
|
||||
startIdx, endIdx, maxLen = i+1, i+1, 0
|
||||
} else {
|
||||
if maxLen < lenFunc(r) {
|
||||
maxLen = lenFunc(r)
|
||||
}
|
||||
endIdx = i + 1
|
||||
}
|
||||
}
|
||||
for j := startIdx; j < endIdx && j < len(s); j++ {
|
||||
lens[j] = repeatCount(maxLen - lenFunc(s[j]))
|
||||
}
|
||||
return lens
|
||||
}
|
||||
|
||||
// textLine is a single-line segment of text and is always a leaf node
|
||||
// in the textNode tree.
|
||||
type textLine []byte
|
||||
|
||||
var (
|
||||
textNil = textLine("nil")
|
||||
textEllipsis = textLine("...")
|
||||
)
|
||||
|
||||
func (s textLine) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
func (s1 textLine) Equal(s2 textNode) bool {
|
||||
if s2, ok := s2.(textLine); ok {
|
||||
return bytes.Equal([]byte(s1), []byte(s2))
|
||||
}
|
||||
return false
|
||||
}
|
||||
func (s textLine) String() string {
|
||||
return string(s)
|
||||
}
|
||||
func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
|
||||
return append(b, s...), s
|
||||
}
|
||||
func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte {
|
||||
return append(b, s...)
|
||||
}
|
||||
|
||||
type diffStats struct {
|
||||
Name string
|
||||
NumIgnored int
|
||||
NumIdentical int
|
||||
NumRemoved int
|
||||
NumInserted int
|
||||
NumModified int
|
||||
}
|
||||
|
||||
func (s diffStats) IsZero() bool {
|
||||
s.Name = ""
|
||||
return s == diffStats{}
|
||||
}
|
||||
|
||||
func (s diffStats) NumDiff() int {
|
||||
return s.NumRemoved + s.NumInserted + s.NumModified
|
||||
}
|
||||
|
||||
func (s diffStats) Append(ds diffStats) diffStats {
|
||||
assert(s.Name == ds.Name)
|
||||
s.NumIgnored += ds.NumIgnored
|
||||
s.NumIdentical += ds.NumIdentical
|
||||
s.NumRemoved += ds.NumRemoved
|
||||
s.NumInserted += ds.NumInserted
|
||||
s.NumModified += ds.NumModified
|
||||
return s
|
||||
}
|
||||
|
||||
// String prints a humanly-readable summary of coalesced records.
|
||||
//
|
||||
// Example:
|
||||
// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields"
|
||||
func (s diffStats) String() string {
|
||||
var ss []string
|
||||
var sum int
|
||||
labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"}
|
||||
counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified}
|
||||
for i, n := range counts {
|
||||
if n > 0 {
|
||||
ss = append(ss, fmt.Sprintf("%d %v", n, labels[i]))
|
||||
}
|
||||
sum += n
|
||||
}
|
||||
|
||||
// Pluralize the name (adjusting for some obscure English grammar rules).
|
||||
name := s.Name
|
||||
if sum > 1 {
|
||||
name += "s"
|
||||
if strings.HasSuffix(name, "ys") {
|
||||
name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries"
|
||||
}
|
||||
}
|
||||
|
||||
// Format the list according to English grammar (with Oxford comma).
|
||||
switch n := len(ss); n {
|
||||
case 0:
|
||||
return ""
|
||||
case 1, 2:
|
||||
return strings.Join(ss, " and ") + " " + name
|
||||
default:
|
||||
return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name
|
||||
}
|
||||
}
|
||||
|
||||
type commentString string
|
||||
|
||||
func (s commentString) String() string { return string(s) }
|
121
vendor/github.com/google/go-cmp/cmp/report_value.go
generated
vendored
Normal file
121
vendor/github.com/google/go-cmp/cmp/report_value.go
generated
vendored
Normal file
@ -0,0 +1,121 @@
|
||||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import "reflect"
|
||||
|
||||
// valueNode represents a single node within a report, which is a
|
||||
// structured representation of the value tree, containing information
|
||||
// regarding which nodes are equal or not.
|
||||
type valueNode struct {
|
||||
parent *valueNode
|
||||
|
||||
Type reflect.Type
|
||||
ValueX reflect.Value
|
||||
ValueY reflect.Value
|
||||
|
||||
// NumSame is the number of leaf nodes that are equal.
|
||||
// All descendants are equal only if NumDiff is 0.
|
||||
NumSame int
|
||||
// NumDiff is the number of leaf nodes that are not equal.
|
||||
NumDiff int
|
||||
// NumIgnored is the number of leaf nodes that are ignored.
|
||||
NumIgnored int
|
||||
// NumCompared is the number of leaf nodes that were compared
|
||||
// using an Equal method or Comparer function.
|
||||
NumCompared int
|
||||
// NumTransformed is the number of non-leaf nodes that were transformed.
|
||||
NumTransformed int
|
||||
// NumChildren is the number of transitive descendants of this node.
|
||||
// This counts from zero; thus, leaf nodes have no descendants.
|
||||
NumChildren int
|
||||
// MaxDepth is the maximum depth of the tree. This counts from zero;
|
||||
// thus, leaf nodes have a depth of zero.
|
||||
MaxDepth int
|
||||
|
||||
// Records is a list of struct fields, slice elements, or map entries.
|
||||
Records []reportRecord // If populated, implies Value is not populated
|
||||
|
||||
// Value is the result of a transformation, pointer indirect, of
|
||||
// type assertion.
|
||||
Value *valueNode // If populated, implies Records is not populated
|
||||
|
||||
// TransformerName is the name of the transformer.
|
||||
TransformerName string // If non-empty, implies Value is populated
|
||||
}
|
||||
type reportRecord struct {
|
||||
Key reflect.Value // Invalid for slice element
|
||||
Value *valueNode
|
||||
}
|
||||
|
||||
func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) {
|
||||
vx, vy := ps.Values()
|
||||
child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy}
|
||||
switch s := ps.(type) {
|
||||
case StructField:
|
||||
assert(parent.Value == nil)
|
||||
parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child})
|
||||
case SliceIndex:
|
||||
assert(parent.Value == nil)
|
||||
parent.Records = append(parent.Records, reportRecord{Value: child})
|
||||
case MapIndex:
|
||||
assert(parent.Value == nil)
|
||||
parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child})
|
||||
case Indirect:
|
||||
assert(parent.Value == nil && parent.Records == nil)
|
||||
parent.Value = child
|
||||
case TypeAssertion:
|
||||
assert(parent.Value == nil && parent.Records == nil)
|
||||
parent.Value = child
|
||||
case Transform:
|
||||
assert(parent.Value == nil && parent.Records == nil)
|
||||
parent.Value = child
|
||||
parent.TransformerName = s.Name()
|
||||
parent.NumTransformed++
|
||||
default:
|
||||
assert(parent == nil) // Must be the root step
|
||||
}
|
||||
return child
|
||||
}
|
||||
|
||||
func (r *valueNode) Report(rs Result) {
|
||||
assert(r.MaxDepth == 0) // May only be called on leaf nodes
|
||||
|
||||
if rs.ByIgnore() {
|
||||
r.NumIgnored++
|
||||
} else {
|
||||
if rs.Equal() {
|
||||
r.NumSame++
|
||||
} else {
|
||||
r.NumDiff++
|
||||
}
|
||||
}
|
||||
assert(r.NumSame+r.NumDiff+r.NumIgnored == 1)
|
||||
|
||||
if rs.ByMethod() {
|
||||
r.NumCompared++
|
||||
}
|
||||
if rs.ByFunc() {
|
||||
r.NumCompared++
|
||||
}
|
||||
assert(r.NumCompared <= 1)
|
||||
}
|
||||
|
||||
func (child *valueNode) PopStep() (parent *valueNode) {
|
||||
if child.parent == nil {
|
||||
return nil
|
||||
}
|
||||
parent = child.parent
|
||||
parent.NumSame += child.NumSame
|
||||
parent.NumDiff += child.NumDiff
|
||||
parent.NumIgnored += child.NumIgnored
|
||||
parent.NumCompared += child.NumCompared
|
||||
parent.NumTransformed += child.NumTransformed
|
||||
parent.NumChildren += child.NumChildren + 1
|
||||
if parent.MaxDepth < child.MaxDepth+1 {
|
||||
parent.MaxDepth = child.MaxDepth + 1
|
||||
}
|
||||
return parent
|
||||
}
|
13
vendor/github.com/google/gofuzz/.travis.yml
generated
vendored
Normal file
13
vendor/github.com/google/gofuzz/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.4
|
||||
- 1.3
|
||||
- 1.2
|
||||
- tip
|
||||
|
||||
install:
|
||||
- if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
|
||||
|
||||
script:
|
||||
- go test -cover
|
67
vendor/github.com/google/gofuzz/CONTRIBUTING.md
generated
vendored
Normal file
67
vendor/github.com/google/gofuzz/CONTRIBUTING.md
generated
vendored
Normal file
@ -0,0 +1,67 @@
|
||||
# How to contribute #
|
||||
|
||||
We'd love to accept your patches and contributions to this project. There are
|
||||
a just a few small guidelines you need to follow.
|
||||
|
||||
|
||||
## Contributor License Agreement ##
|
||||
|
||||
Contributions to any Google project must be accompanied by a Contributor
|
||||
License Agreement. This is not a copyright **assignment**, it simply gives
|
||||
Google permission to use and redistribute your contributions as part of the
|
||||
project.
|
||||
|
||||
* If you are an individual writing original source code and you're sure you
|
||||
own the intellectual property, then you'll need to sign an [individual
|
||||
CLA][].
|
||||
|
||||
* If you work for a company that wants to allow you to contribute your work,
|
||||
then you'll need to sign a [corporate CLA][].
|
||||
|
||||
You generally only need to submit a CLA once, so if you've already submitted
|
||||
one (even if it was for a different project), you probably don't need to do it
|
||||
again.
|
||||
|
||||
[individual CLA]: https://developers.google.com/open-source/cla/individual
|
||||
[corporate CLA]: https://developers.google.com/open-source/cla/corporate
|
||||
|
||||
|
||||
## Submitting a patch ##
|
||||
|
||||
1. It's generally best to start by opening a new issue describing the bug or
|
||||
feature you're intending to fix. Even if you think it's relatively minor,
|
||||
it's helpful to know what people are working on. Mention in the initial
|
||||
issue that you are planning to work on that bug or feature so that it can
|
||||
be assigned to you.
|
||||
|
||||
1. Follow the normal process of [forking][] the project, and setup a new
|
||||
branch to work in. It's important that each group of changes be done in
|
||||
separate branches in order to ensure that a pull request only includes the
|
||||
commits related to that bug or feature.
|
||||
|
||||
1. Go makes it very simple to ensure properly formatted code, so always run
|
||||
`go fmt` on your code before committing it. You should also run
|
||||
[golint][] over your code. As noted in the [golint readme][], it's not
|
||||
strictly necessary that your code be completely "lint-free", but this will
|
||||
help you find common style issues.
|
||||
|
||||
1. Any significant changes should almost always be accompanied by tests. The
|
||||
project already has good test coverage, so look at some of the existing
|
||||
tests if you're unsure how to go about it. [gocov][] and [gocov-html][]
|
||||
are invaluable tools for seeing which parts of your code aren't being
|
||||
exercised by your tests.
|
||||
|
||||
1. Do your best to have [well-formed commit messages][] for each change.
|
||||
This provides consistency throughout the project, and ensures that commit
|
||||
messages are able to be formatted properly by various git tools.
|
||||
|
||||
1. Finally, push the commits to your fork and submit a [pull request][].
|
||||
|
||||
[forking]: https://help.github.com/articles/fork-a-repo
|
||||
[golint]: https://github.com/golang/lint
|
||||
[golint readme]: https://github.com/golang/lint/blob/master/README
|
||||
[gocov]: https://github.com/axw/gocov
|
||||
[gocov-html]: https://github.com/matm/gocov-html
|
||||
[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
|
||||
[squash]: http://git-scm.com/book/en/Git-Tools-Rewriting-History#Squashing-Commits
|
||||
[pull request]: https://help.github.com/articles/creating-a-pull-request
|
202
vendor/github.com/google/gofuzz/LICENSE
generated
vendored
Normal file
202
vendor/github.com/google/gofuzz/LICENSE
generated
vendored
Normal file
@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
71
vendor/github.com/google/gofuzz/README.md
generated
vendored
Normal file
71
vendor/github.com/google/gofuzz/README.md
generated
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
gofuzz
|
||||
======
|
||||
|
||||
gofuzz is a library for populating go objects with random values.
|
||||
|
||||
[](https://godoc.org/github.com/google/gofuzz)
|
||||
[](https://travis-ci.org/google/gofuzz)
|
||||
|
||||
This is useful for testing:
|
||||
|
||||
* Do your project's objects really serialize/unserialize correctly in all cases?
|
||||
* Is there an incorrectly formatted object that will cause your project to panic?
|
||||
|
||||
Import with ```import "github.com/google/gofuzz"```
|
||||
|
||||
You can use it on single variables:
|
||||
```go
|
||||
f := fuzz.New()
|
||||
var myInt int
|
||||
f.Fuzz(&myInt) // myInt gets a random value.
|
||||
```
|
||||
|
||||
You can use it on maps:
|
||||
```go
|
||||
f := fuzz.New().NilChance(0).NumElements(1, 1)
|
||||
var myMap map[ComplexKeyType]string
|
||||
f.Fuzz(&myMap) // myMap will have exactly one element.
|
||||
```
|
||||
|
||||
Customize the chance of getting a nil pointer:
|
||||
```go
|
||||
f := fuzz.New().NilChance(.5)
|
||||
var fancyStruct struct {
|
||||
A, B, C, D *string
|
||||
}
|
||||
f.Fuzz(&fancyStruct) // About half the pointers should be set.
|
||||
```
|
||||
|
||||
You can even customize the randomization completely if needed:
|
||||
```go
|
||||
type MyEnum string
|
||||
const (
|
||||
A MyEnum = "A"
|
||||
B MyEnum = "B"
|
||||
)
|
||||
type MyInfo struct {
|
||||
Type MyEnum
|
||||
AInfo *string
|
||||
BInfo *string
|
||||
}
|
||||
|
||||
f := fuzz.New().NilChance(0).Funcs(
|
||||
func(e *MyInfo, c fuzz.Continue) {
|
||||
switch c.Intn(2) {
|
||||
case 0:
|
||||
e.Type = A
|
||||
c.Fuzz(&e.AInfo)
|
||||
case 1:
|
||||
e.Type = B
|
||||
c.Fuzz(&e.BInfo)
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
var myObject MyInfo
|
||||
f.Fuzz(&myObject) // Type will correspond to whether A or B info is set.
|
||||
```
|
||||
|
||||
See more examples in ```example_test.go```.
|
||||
|
||||
Happy testing!
|
18
vendor/github.com/google/gofuzz/doc.go
generated
vendored
Normal file
18
vendor/github.com/google/gofuzz/doc.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
/*
|
||||
Copyright 2014 Google Inc. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package fuzz is a library for populating go objects with random values.
|
||||
package fuzz
|
506
vendor/github.com/google/gofuzz/fuzz.go
generated
vendored
Normal file
506
vendor/github.com/google/gofuzz/fuzz.go
generated
vendored
Normal file
@ -0,0 +1,506 @@
|
||||
/*
|
||||
Copyright 2014 Google Inc. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fuzz
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"time"
|
||||
)
|
||||
|
||||
// fuzzFuncMap is a map from a type to a fuzzFunc that handles that type.
|
||||
type fuzzFuncMap map[reflect.Type]reflect.Value
|
||||
|
||||
// Fuzzer knows how to fill any object with random fields.
|
||||
type Fuzzer struct {
|
||||
fuzzFuncs fuzzFuncMap
|
||||
defaultFuzzFuncs fuzzFuncMap
|
||||
r *rand.Rand
|
||||
nilChance float64
|
||||
minElements int
|
||||
maxElements int
|
||||
maxDepth int
|
||||
skipFieldPatterns []*regexp.Regexp
|
||||
}
|
||||
|
||||
// New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs,
|
||||
// RandSource, NilChance, or NumElements in any order.
|
||||
func New() *Fuzzer {
|
||||
return NewWithSeed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
func NewWithSeed(seed int64) *Fuzzer {
|
||||
f := &Fuzzer{
|
||||
defaultFuzzFuncs: fuzzFuncMap{
|
||||
reflect.TypeOf(&time.Time{}): reflect.ValueOf(fuzzTime),
|
||||
},
|
||||
|
||||
fuzzFuncs: fuzzFuncMap{},
|
||||
r: rand.New(rand.NewSource(seed)),
|
||||
nilChance: .2,
|
||||
minElements: 1,
|
||||
maxElements: 10,
|
||||
maxDepth: 100,
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// Funcs adds each entry in fuzzFuncs as a custom fuzzing function.
|
||||
//
|
||||
// Each entry in fuzzFuncs must be a function taking two parameters.
|
||||
// The first parameter must be a pointer or map. It is the variable that
|
||||
// function will fill with random data. The second parameter must be a
|
||||
// fuzz.Continue, which will provide a source of randomness and a way
|
||||
// to automatically continue fuzzing smaller pieces of the first parameter.
|
||||
//
|
||||
// These functions are called sensibly, e.g., if you wanted custom string
|
||||
// fuzzing, the function `func(s *string, c fuzz.Continue)` would get
|
||||
// called and passed the address of strings. Maps and pointers will always
|
||||
// be made/new'd for you, ignoring the NilChange option. For slices, it
|
||||
// doesn't make much sense to pre-create them--Fuzzer doesn't know how
|
||||
// long you want your slice--so take a pointer to a slice, and make it
|
||||
// yourself. (If you don't want your map/pointer type pre-made, take a
|
||||
// pointer to it, and make it yourself.) See the examples for a range of
|
||||
// custom functions.
|
||||
func (f *Fuzzer) Funcs(fuzzFuncs ...interface{}) *Fuzzer {
|
||||
for i := range fuzzFuncs {
|
||||
v := reflect.ValueOf(fuzzFuncs[i])
|
||||
if v.Kind() != reflect.Func {
|
||||
panic("Need only funcs!")
|
||||
}
|
||||
t := v.Type()
|
||||
if t.NumIn() != 2 || t.NumOut() != 0 {
|
||||
panic("Need 2 in and 0 out params!")
|
||||
}
|
||||
argT := t.In(0)
|
||||
switch argT.Kind() {
|
||||
case reflect.Ptr, reflect.Map:
|
||||
default:
|
||||
panic("fuzzFunc must take pointer or map type")
|
||||
}
|
||||
if t.In(1) != reflect.TypeOf(Continue{}) {
|
||||
panic("fuzzFunc's second parameter must be type fuzz.Continue")
|
||||
}
|
||||
f.fuzzFuncs[argT] = v
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// RandSource causes f to get values from the given source of randomness.
|
||||
// Use if you want deterministic fuzzing.
|
||||
func (f *Fuzzer) RandSource(s rand.Source) *Fuzzer {
|
||||
f.r = rand.New(s)
|
||||
return f
|
||||
}
|
||||
|
||||
// NilChance sets the probability of creating a nil pointer, map, or slice to
|
||||
// 'p'. 'p' should be between 0 (no nils) and 1 (all nils), inclusive.
|
||||
func (f *Fuzzer) NilChance(p float64) *Fuzzer {
|
||||
if p < 0 || p > 1 {
|
||||
panic("p should be between 0 and 1, inclusive.")
|
||||
}
|
||||
f.nilChance = p
|
||||
return f
|
||||
}
|
||||
|
||||
// NumElements sets the minimum and maximum number of elements that will be
|
||||
// added to a non-nil map or slice.
|
||||
func (f *Fuzzer) NumElements(atLeast, atMost int) *Fuzzer {
|
||||
if atLeast > atMost {
|
||||
panic("atLeast must be <= atMost")
|
||||
}
|
||||
if atLeast < 0 {
|
||||
panic("atLeast must be >= 0")
|
||||
}
|
||||
f.minElements = atLeast
|
||||
f.maxElements = atMost
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *Fuzzer) genElementCount() int {
|
||||
if f.minElements == f.maxElements {
|
||||
return f.minElements
|
||||
}
|
||||
return f.minElements + f.r.Intn(f.maxElements-f.minElements+1)
|
||||
}
|
||||
|
||||
func (f *Fuzzer) genShouldFill() bool {
|
||||
return f.r.Float64() > f.nilChance
|
||||
}
|
||||
|
||||
// MaxDepth sets the maximum number of recursive fuzz calls that will be made
|
||||
// before stopping. This includes struct members, pointers, and map and slice
|
||||
// elements.
|
||||
func (f *Fuzzer) MaxDepth(d int) *Fuzzer {
|
||||
f.maxDepth = d
|
||||
return f
|
||||
}
|
||||
|
||||
// Skip fields which match the supplied pattern. Call this multiple times if needed
|
||||
// This is useful to skip XXX_ fields generated by protobuf
|
||||
func (f *Fuzzer) SkipFieldsWithPattern(pattern *regexp.Regexp) *Fuzzer {
|
||||
f.skipFieldPatterns = append(f.skipFieldPatterns, pattern)
|
||||
return f
|
||||
}
|
||||
|
||||
// Fuzz recursively fills all of obj's fields with something random. First
|
||||
// this tries to find a custom fuzz function (see Funcs). If there is no
|
||||
// custom function this tests whether the object implements fuzz.Interface and,
|
||||
// if so, calls Fuzz on it to fuzz itself. If that fails, this will see if
|
||||
// there is a default fuzz function provided by this package. If all of that
|
||||
// fails, this will generate random values for all primitive fields and then
|
||||
// recurse for all non-primitives.
|
||||
//
|
||||
// This is safe for cyclic or tree-like structs, up to a limit. Use the
|
||||
// MaxDepth method to adjust how deep you need it to recurse.
|
||||
//
|
||||
// obj must be a pointer. Only exported (public) fields can be set (thanks,
|
||||
// golang :/ ) Intended for tests, so will panic on bad input or unimplemented
|
||||
// fields.
|
||||
func (f *Fuzzer) Fuzz(obj interface{}) {
|
||||
v := reflect.ValueOf(obj)
|
||||
if v.Kind() != reflect.Ptr {
|
||||
panic("needed ptr!")
|
||||
}
|
||||
v = v.Elem()
|
||||
f.fuzzWithContext(v, 0)
|
||||
}
|
||||
|
||||
// FuzzNoCustom is just like Fuzz, except that any custom fuzz function for
|
||||
// obj's type will not be called and obj will not be tested for fuzz.Interface
|
||||
// conformance. This applies only to obj and not other instances of obj's
|
||||
// type.
|
||||
// Not safe for cyclic or tree-like structs!
|
||||
// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ )
|
||||
// Intended for tests, so will panic on bad input or unimplemented fields.
|
||||
func (f *Fuzzer) FuzzNoCustom(obj interface{}) {
|
||||
v := reflect.ValueOf(obj)
|
||||
if v.Kind() != reflect.Ptr {
|
||||
panic("needed ptr!")
|
||||
}
|
||||
v = v.Elem()
|
||||
f.fuzzWithContext(v, flagNoCustomFuzz)
|
||||
}
|
||||
|
||||
const (
|
||||
// Do not try to find a custom fuzz function. Does not apply recursively.
|
||||
flagNoCustomFuzz uint64 = 1 << iota
|
||||
)
|
||||
|
||||
func (f *Fuzzer) fuzzWithContext(v reflect.Value, flags uint64) {
|
||||
fc := &fuzzerContext{fuzzer: f}
|
||||
fc.doFuzz(v, flags)
|
||||
}
|
||||
|
||||
// fuzzerContext carries context about a single fuzzing run, which lets Fuzzer
|
||||
// be thread-safe.
|
||||
type fuzzerContext struct {
|
||||
fuzzer *Fuzzer
|
||||
curDepth int
|
||||
}
|
||||
|
||||
func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) {
|
||||
if fc.curDepth >= fc.fuzzer.maxDepth {
|
||||
return
|
||||
}
|
||||
fc.curDepth++
|
||||
defer func() { fc.curDepth-- }()
|
||||
|
||||
if !v.CanSet() {
|
||||
return
|
||||
}
|
||||
|
||||
if flags&flagNoCustomFuzz == 0 {
|
||||
// Check for both pointer and non-pointer custom functions.
|
||||
if v.CanAddr() && fc.tryCustom(v.Addr()) {
|
||||
return
|
||||
}
|
||||
if fc.tryCustom(v) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if fn, ok := fillFuncMap[v.Kind()]; ok {
|
||||
fn(v, fc.fuzzer.r)
|
||||
return
|
||||
}
|
||||
switch v.Kind() {
|
||||
case reflect.Map:
|
||||
if fc.fuzzer.genShouldFill() {
|
||||
v.Set(reflect.MakeMap(v.Type()))
|
||||
n := fc.fuzzer.genElementCount()
|
||||
for i := 0; i < n; i++ {
|
||||
key := reflect.New(v.Type().Key()).Elem()
|
||||
fc.doFuzz(key, 0)
|
||||
val := reflect.New(v.Type().Elem()).Elem()
|
||||
fc.doFuzz(val, 0)
|
||||
v.SetMapIndex(key, val)
|
||||
}
|
||||
return
|
||||
}
|
||||
v.Set(reflect.Zero(v.Type()))
|
||||
case reflect.Ptr:
|
||||
if fc.fuzzer.genShouldFill() {
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
fc.doFuzz(v.Elem(), 0)
|
||||
return
|
||||
}
|
||||
v.Set(reflect.Zero(v.Type()))
|
||||
case reflect.Slice:
|
||||
if fc.fuzzer.genShouldFill() {
|
||||
n := fc.fuzzer.genElementCount()
|
||||
v.Set(reflect.MakeSlice(v.Type(), n, n))
|
||||
for i := 0; i < n; i++ {
|
||||
fc.doFuzz(v.Index(i), 0)
|
||||
}
|
||||
return
|
||||
}
|
||||
v.Set(reflect.Zero(v.Type()))
|
||||
case reflect.Array:
|
||||
if fc.fuzzer.genShouldFill() {
|
||||
n := v.Len()
|
||||
for i := 0; i < n; i++ {
|
||||
fc.doFuzz(v.Index(i), 0)
|
||||
}
|
||||
return
|
||||
}
|
||||
v.Set(reflect.Zero(v.Type()))
|
||||
case reflect.Struct:
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
skipField := false
|
||||
fieldName := v.Type().Field(i).Name
|
||||
for _, pattern := range fc.fuzzer.skipFieldPatterns {
|
||||
if pattern.MatchString(fieldName) {
|
||||
skipField = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !skipField {
|
||||
fc.doFuzz(v.Field(i), 0)
|
||||
}
|
||||
}
|
||||
case reflect.Chan:
|
||||
fallthrough
|
||||
case reflect.Func:
|
||||
fallthrough
|
||||
case reflect.Interface:
|
||||
fallthrough
|
||||
default:
|
||||
panic(fmt.Sprintf("Can't handle %#v", v.Interface()))
|
||||
}
|
||||
}
|
||||
|
||||
// tryCustom searches for custom handlers, and returns true iff it finds a match
|
||||
// and successfully randomizes v.
|
||||
func (fc *fuzzerContext) tryCustom(v reflect.Value) bool {
|
||||
// First: see if we have a fuzz function for it.
|
||||
doCustom, ok := fc.fuzzer.fuzzFuncs[v.Type()]
|
||||
if !ok {
|
||||
// Second: see if it can fuzz itself.
|
||||
if v.CanInterface() {
|
||||
intf := v.Interface()
|
||||
if fuzzable, ok := intf.(Interface); ok {
|
||||
fuzzable.Fuzz(Continue{fc: fc, Rand: fc.fuzzer.r})
|
||||
return true
|
||||
}
|
||||
}
|
||||
// Finally: see if there is a default fuzz function.
|
||||
doCustom, ok = fc.fuzzer.defaultFuzzFuncs[v.Type()]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr:
|
||||
if v.IsNil() {
|
||||
if !v.CanSet() {
|
||||
return false
|
||||
}
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
}
|
||||
case reflect.Map:
|
||||
if v.IsNil() {
|
||||
if !v.CanSet() {
|
||||
return false
|
||||
}
|
||||
v.Set(reflect.MakeMap(v.Type()))
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{
|
||||
fc: fc,
|
||||
Rand: fc.fuzzer.r,
|
||||
})})
|
||||
return true
|
||||
}
|
||||
|
||||
// Interface represents an object that knows how to fuzz itself. Any time we
|
||||
// find a type that implements this interface we will delegate the act of
|
||||
// fuzzing itself.
|
||||
type Interface interface {
|
||||
Fuzz(c Continue)
|
||||
}
|
||||
|
||||
// Continue can be passed to custom fuzzing functions to allow them to use
|
||||
// the correct source of randomness and to continue fuzzing their members.
|
||||
type Continue struct {
|
||||
fc *fuzzerContext
|
||||
|
||||
// For convenience, Continue implements rand.Rand via embedding.
|
||||
// Use this for generating any randomness if you want your fuzzing
|
||||
// to be repeatable for a given seed.
|
||||
*rand.Rand
|
||||
}
|
||||
|
||||
// Fuzz continues fuzzing obj. obj must be a pointer.
|
||||
func (c Continue) Fuzz(obj interface{}) {
|
||||
v := reflect.ValueOf(obj)
|
||||
if v.Kind() != reflect.Ptr {
|
||||
panic("needed ptr!")
|
||||
}
|
||||
v = v.Elem()
|
||||
c.fc.doFuzz(v, 0)
|
||||
}
|
||||
|
||||
// FuzzNoCustom continues fuzzing obj, except that any custom fuzz function for
|
||||
// obj's type will not be called and obj will not be tested for fuzz.Interface
|
||||
// conformance. This applies only to obj and not other instances of obj's
|
||||
// type.
|
||||
func (c Continue) FuzzNoCustom(obj interface{}) {
|
||||
v := reflect.ValueOf(obj)
|
||||
if v.Kind() != reflect.Ptr {
|
||||
panic("needed ptr!")
|
||||
}
|
||||
v = v.Elem()
|
||||
c.fc.doFuzz(v, flagNoCustomFuzz)
|
||||
}
|
||||
|
||||
// RandString makes a random string up to 20 characters long. The returned string
|
||||
// may include a variety of (valid) UTF-8 encodings.
|
||||
func (c Continue) RandString() string {
|
||||
return randString(c.Rand)
|
||||
}
|
||||
|
||||
// RandUint64 makes random 64 bit numbers.
|
||||
// Weirdly, rand doesn't have a function that gives you 64 random bits.
|
||||
func (c Continue) RandUint64() uint64 {
|
||||
return randUint64(c.Rand)
|
||||
}
|
||||
|
||||
// RandBool returns true or false randomly.
|
||||
func (c Continue) RandBool() bool {
|
||||
return randBool(c.Rand)
|
||||
}
|
||||
|
||||
func fuzzInt(v reflect.Value, r *rand.Rand) {
|
||||
v.SetInt(int64(randUint64(r)))
|
||||
}
|
||||
|
||||
func fuzzUint(v reflect.Value, r *rand.Rand) {
|
||||
v.SetUint(randUint64(r))
|
||||
}
|
||||
|
||||
func fuzzTime(t *time.Time, c Continue) {
|
||||
var sec, nsec int64
|
||||
// Allow for about 1000 years of random time values, which keeps things
|
||||
// like JSON parsing reasonably happy.
|
||||
sec = c.Rand.Int63n(1000 * 365 * 24 * 60 * 60)
|
||||
c.Fuzz(&nsec)
|
||||
*t = time.Unix(sec, nsec)
|
||||
}
|
||||
|
||||
var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){
|
||||
reflect.Bool: func(v reflect.Value, r *rand.Rand) {
|
||||
v.SetBool(randBool(r))
|
||||
},
|
||||
reflect.Int: fuzzInt,
|
||||
reflect.Int8: fuzzInt,
|
||||
reflect.Int16: fuzzInt,
|
||||
reflect.Int32: fuzzInt,
|
||||
reflect.Int64: fuzzInt,
|
||||
reflect.Uint: fuzzUint,
|
||||
reflect.Uint8: fuzzUint,
|
||||
reflect.Uint16: fuzzUint,
|
||||
reflect.Uint32: fuzzUint,
|
||||
reflect.Uint64: fuzzUint,
|
||||
reflect.Uintptr: fuzzUint,
|
||||
reflect.Float32: func(v reflect.Value, r *rand.Rand) {
|
||||
v.SetFloat(float64(r.Float32()))
|
||||
},
|
||||
reflect.Float64: func(v reflect.Value, r *rand.Rand) {
|
||||
v.SetFloat(r.Float64())
|
||||
},
|
||||
reflect.Complex64: func(v reflect.Value, r *rand.Rand) {
|
||||
panic("unimplemented")
|
||||
},
|
||||
reflect.Complex128: func(v reflect.Value, r *rand.Rand) {
|
||||
panic("unimplemented")
|
||||
},
|
||||
reflect.String: func(v reflect.Value, r *rand.Rand) {
|
||||
v.SetString(randString(r))
|
||||
},
|
||||
reflect.UnsafePointer: func(v reflect.Value, r *rand.Rand) {
|
||||
panic("unimplemented")
|
||||
},
|
||||
}
|
||||
|
||||
// randBool returns true or false randomly.
|
||||
func randBool(r *rand.Rand) bool {
|
||||
if r.Int()&1 == 1 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type charRange struct {
|
||||
first, last rune
|
||||
}
|
||||
|
||||
// choose returns a random unicode character from the given range, using the
|
||||
// given randomness source.
|
||||
func (r *charRange) choose(rand *rand.Rand) rune {
|
||||
count := int64(r.last - r.first)
|
||||
return r.first + rune(rand.Int63n(count))
|
||||
}
|
||||
|
||||
var unicodeRanges = []charRange{
|
||||
{' ', '~'}, // ASCII characters
|
||||
{'\u00a0', '\u02af'}, // Multi-byte encoded characters
|
||||
{'\u4e00', '\u9fff'}, // Common CJK (even longer encodings)
|
||||
}
|
||||
|
||||
// randString makes a random string up to 20 characters long. The returned string
|
||||
// may include a variety of (valid) UTF-8 encodings.
|
||||
func randString(r *rand.Rand) string {
|
||||
n := r.Intn(20)
|
||||
runes := make([]rune, n)
|
||||
for i := range runes {
|
||||
runes[i] = unicodeRanges[r.Intn(len(unicodeRanges))].choose(r)
|
||||
}
|
||||
return string(runes)
|
||||
}
|
||||
|
||||
// randUint64 makes random 64 bit numbers.
|
||||
// Weirdly, rand doesn't have a function that gives you 64 random bits.
|
||||
func randUint64(r *rand.Rand) uint64 {
|
||||
return uint64(r.Uint32())<<32 | uint64(r.Uint32())
|
||||
}
|
Reference in New Issue
Block a user